merge default into stable for 5.2 release stable 5.2rc0
authorAugie Fackler <augie@google.com>
Mon, 21 Oct 2019 11:09:48 -0400
branchstable
changeset 43306 59338f956109
parent 43029 c5dc122fdc2b (current diff)
parent 43305 d782cce137fd (diff)
child 43307 9086071498dd
merge default into stable for 5.2 release
mercurial/statprof.py
--- a/Makefile	Wed Oct 02 12:20:36 2019 -0400
+++ b/Makefile	Mon Oct 21 11:09:48 2019 -0400
@@ -113,7 +113,7 @@
 tests:
         # Run Rust tests if cargo is installed
 	if command -v $(CARGO) >/dev/null 2>&1; then \
-		cd $(HGROOT)/rust/hg-cpython && $(CARGO) test --quiet --all; \
+		$(MAKE) rust-tests; \
 	fi
 	cd tests && $(PYTHON) run-tests.py $(TESTFLAGS)
 
@@ -127,6 +127,13 @@
         $(MAKE) -f $(HGROOT)/contrib/Makefile.python PYTHONVER=$* PREFIX=$(HGPYTHONS)/$* python )
 	cd tests && $(HGPYTHONS)/$*/bin/python run-tests.py $(TESTFLAGS)
 
+rust-tests: py_feature = $(shell $(PYTHON) -c \
+ 'import sys; print(["python27-bin", "python3-bin"][sys.version_info[0] >= 3])')
+rust-tests:
+	cd $(HGROOT)/rust/hg-cpython \
+		&& $(CARGO) test --quiet --all \
+			--no-default-features --features "$(py_feature)"
+
 check-code:
 	hg manifest | xargs python contrib/check-code.py
 
@@ -248,6 +255,7 @@
 
 .PHONY: help all local build doc cleanbutpackages clean install install-bin \
 	install-doc install-home install-home-bin install-home-doc \
-	dist dist-notests check tests check-code format-c update-pot \
+	dist dist-notests check tests rust-tests check-code format-c \
+	update-pot \
 	$(packaging_targets) \
 	osx
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/black.toml	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,17 @@
+[tool.black]
+line-length = 80
+exclude = '''
+build/
+| wheelhouse/
+| dist/
+| packages/
+| \.hg/
+| \.mypy_cache/
+| \.venv/
+| mercurial/thirdparty/
+| hgext/fsmonitor/pywatchman/
+| contrib/python-zstandard/
+| contrib/grey.py
+'''
+skip-string-normalization = true
+quiet = true
--- a/contrib/automation/README.rst	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/automation/README.rst	Mon Oct 21 11:09:48 2019 -0400
@@ -33,6 +33,46 @@
 into a remote machine, we create a temporary directory for the SSH
 config so the user's known hosts file isn't updated.
 
+Try Server
+==========
+
+There exists a *Try Server* which allows automation to run against
+an arbitrary Mercurial changeset and displays results via the web.
+
+.. note::
+
+   The *Try Server* is still experimental infrastructure.
+
+To use the *Try Server*::
+
+   $ ./automation.py try
+
+With a custom AWS profile::
+
+   $ AWS_PROFILE=hg contrib/automation/automation.py try
+
+By default, the ``.`` revision is submitted. **Any uncommitted changes
+are not submitted.**
+
+To switch which revision is used::
+
+   $ ./automation.py try -r abcdef
+
+Access to the *Try Server* requires access to a special AWS account.
+This account is currently run by Gregory Szorc. Here is the procedure
+for accessing the *Try Server*:
+
+1. Email Gregory Szorc at gregory.szorc@gmail.com and request a
+   username. This username will be stored in the public domain.
+2. Wait for an email reply containing your temporary AWS credentials.
+3. Log in at https://gregoryszorc-hg.signin.aws.amazon.com/console
+   and set a new, secure password.
+4. Go to https://console.aws.amazon.com/iam/home?region=us-west-2#/security_credentials
+5. Under ``Access keys for CLI, SDK, & API access``, click the
+   ``Create access key`` button.
+6. See the ``AWS Integration`` section for instructions on
+   configuring your local client to use the generated credentials.
+
 AWS Integration
 ===============
 
@@ -47,12 +87,25 @@
 for how ``boto3`` works. Once you have configured your environment such
 that ``boto3`` can find credentials, interaction with AWS should *just work*.
 
-.. hint::
+To configure ``boto3``, you can use the ``aws configure`` command to
+write out configuration files. (The ``aws`` command is typically provided
+by an ``awscli`` package available in your package manager, including
+``pip``.) Alternatively, you can write out files in ``~/.aws/`` directly.
+e.g.::
+
+   # ~/.aws/config
+   [default]
+   region = us-west-2
 
-   Typically you have a ``~/.aws/credentials`` file containing AWS
-   credentials. If you manage multiple credentials, you can override which
-   *profile* to use at run-time by setting the ``AWS_PROFILE`` environment
-   variable.
+   # ~/.aws/credentials
+   [default]
+   aws_access_key_id = XXXX
+   aws_secret_access_key = YYYY
+
+If you have multiple AWS accounts, you can name the profile something
+different from ``default``. e.g. ``hg``. You can influence which profile
+is used by ``boto3`` by setting the ``AWS_PROFILE`` environment variable.
+e.g. ``AWS_PROFILE=hg``.
 
 Resource Management
 -------------------
@@ -181,3 +234,25 @@
 Documenting them is beyond the scope of this document. Various tests
 also require other optional dependencies and missing dependencies will
 be printed by the test runner when a test is skipped.
+
+Releasing Windows Artifacts
+===========================
+
+The `automation.py` script can be used to automate the release of Windows
+artifacts::
+
+   $ ./automation.py build-all-windows-packages --revision 5.1.1
+   $ ./automation.py publish-windows-artifacts 5.1.1
+
+The first command will launch an EC2 instance to build all Windows packages
+and copy them into the `dist` directory relative to the repository root. The
+second command will then attempt to upload these files to PyPI (via `twine`)
+and to `mercurial-scm.org` (via SSH).
+
+Uploading to PyPI requires a PyPI account with write access to the `Mercurial`
+package. You can skip PyPI uploading by passing `--no-pypi`.
+
+Uploading to `mercurial-scm.org` requires an SSH account on that server
+with `windows` group membership and for the SSH key for that account to be the
+default SSH key (e.g. `~/.ssh/id_rsa`) or in a running SSH agent. You can
+skip `mercurial-scm.org` uploading by passing `--no-mercurial-scm-org`.
--- a/contrib/automation/automation.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/automation/automation.py	Mon Oct 21 11:09:48 2019 -0400
@@ -36,8 +36,13 @@
         pip = venv_bin / 'pip'
         python = venv_bin / 'python'
 
-    args = [str(pip), 'install', '-r', str(REQUIREMENTS_TXT),
-            '--disable-pip-version-check']
+    args = [
+        str(pip),
+        'install',
+        '-r',
+        str(REQUIREMENTS_TXT),
+        '--disable-pip-version-check',
+    ]
 
     if not venv_created:
         args.append('-q')
@@ -45,8 +50,7 @@
     subprocess.run(args, check=True)
 
     os.environ['HGAUTOMATION_BOOTSTRAPPED'] = '1'
-    os.environ['PATH'] = '%s%s%s' % (
-        venv_bin, os.pathsep, os.environ['PATH'])
+    os.environ['PATH'] = '%s%s%s' % (venv_bin, os.pathsep, os.environ['PATH'])
 
     subprocess.run([str(python), __file__] + sys.argv[1:], check=True)
 
--- a/contrib/automation/hgautomation/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/automation/hgautomation/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,9 +10,7 @@
 import pathlib
 import secrets
 
-from .aws import (
-    AWSConnection,
-)
+from .aws import AWSConnection
 
 
 class HGAutomation:
@@ -53,7 +51,7 @@
 
         return password
 
-    def aws_connection(self, region: str, ensure_ec2_state: bool=True):
+    def aws_connection(self, region: str, ensure_ec2_state: bool = True):
         """Obtain an AWSConnection instance bound to a specific region."""
 
         return AWSConnection(self, region, ensure_ec2_state=ensure_ec2_state)
--- a/contrib/automation/hgautomation/aws.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/automation/hgautomation/aws.py	Mon Oct 21 11:09:48 2019 -0400
@@ -19,9 +19,7 @@
 import boto3
 import botocore.exceptions
 
-from .linux import (
-    BOOTSTRAP_DEBIAN,
-)
+from .linux import BOOTSTRAP_DEBIAN
 from .ssh import (
     exec_command as ssh_exec_command,
     wait_for_ssh,
@@ -32,10 +30,13 @@
 )
 
 
-SOURCE_ROOT = pathlib.Path(os.path.abspath(__file__)).parent.parent.parent.parent
+SOURCE_ROOT = pathlib.Path(
+    os.path.abspath(__file__)
+).parent.parent.parent.parent
 
-INSTALL_WINDOWS_DEPENDENCIES = (SOURCE_ROOT / 'contrib' /
-                                'install-windows-dependencies.ps1')
+INSTALL_WINDOWS_DEPENDENCIES = (
+    SOURCE_ROOT / 'contrib' / 'install-windows-dependencies.ps1'
+)
 
 
 INSTANCE_TYPES_WITH_STORAGE = {
@@ -54,6 +55,7 @@
 
 AMAZON_ACCOUNT_ID = '801119661308'
 DEBIAN_ACCOUNT_ID = '379101102735'
+DEBIAN_ACCOUNT_ID_2 = '136693071363'
 UBUNTU_ACCOUNT_ID = '099720109477'
 
 
@@ -106,7 +108,6 @@
                         'Description': 'RDP from entire Internet',
                     },
                 ],
-
             },
             {
                 'FromPort': 5985,
@@ -118,7 +119,7 @@
                         'Description': 'PowerShell Remoting (Windows Remote Management)',
                     },
                 ],
-            }
+            },
         ],
     },
 }
@@ -151,11 +152,7 @@
 
 
 IAM_INSTANCE_PROFILES = {
-    'ephemeral-ec2-1': {
-        'roles': [
-            'ephemeral-ec2-role-1',
-        ],
-    }
+    'ephemeral-ec2-1': {'roles': ['ephemeral-ec2-role-1',],}
 }
 
 
@@ -225,7 +222,7 @@
 class AWSConnection:
     """Manages the state of a connection with AWS."""
 
-    def __init__(self, automation, region: str, ensure_ec2_state: bool=True):
+    def __init__(self, automation, region: str, ensure_ec2_state: bool = True):
         self.automation = automation
         self.local_state_path = automation.state_path
 
@@ -256,10 +253,19 @@
 
     # TODO use rsa package.
     res = subprocess.run(
-        ['openssl', 'pkcs8', '-in', str(p), '-nocrypt', '-topk8',
-         '-outform', 'DER'],
+        [
+            'openssl',
+            'pkcs8',
+            '-in',
+            str(p),
+            '-nocrypt',
+            '-topk8',
+            '-outform',
+            'DER',
+        ],
         capture_output=True,
-        check=True)
+        check=True,
+    )
 
     sha1 = hashlib.sha1(res.stdout).hexdigest()
     return ':'.join(a + b for a, b in zip(sha1[::2], sha1[1::2]))
@@ -270,7 +276,7 @@
 
     for kpi in ec2resource.key_pairs.all():
         if kpi.name.startswith(prefix):
-            remote_existing[kpi.name[len(prefix):]] = kpi.key_fingerprint
+            remote_existing[kpi.name[len(prefix) :]] = kpi.key_fingerprint
 
     # Validate that we have these keys locally.
     key_path = state_path / 'keys'
@@ -296,7 +302,7 @@
         if not f.startswith('keypair-') or not f.endswith('.pub'):
             continue
 
-        name = f[len('keypair-'):-len('.pub')]
+        name = f[len('keypair-') : -len('.pub')]
 
         pub_full = key_path / f
         priv_full = key_path / ('keypair-%s' % name)
@@ -305,8 +311,9 @@
             data = fh.read()
 
         if not data.startswith('ssh-rsa '):
-            print('unexpected format for key pair file: %s; removing' %
-                  pub_full)
+            print(
+                'unexpected format for key pair file: %s; removing' % pub_full
+            )
             pub_full.unlink()
             priv_full.unlink()
             continue
@@ -326,8 +333,10 @@
             del local_existing[name]
 
         elif remote_existing[name] != local_existing[name]:
-            print('key fingerprint mismatch for %s; '
-                  'removing from local and remote' % name)
+            print(
+                'key fingerprint mismatch for %s; '
+                'removing from local and remote' % name
+            )
             remove_local(name)
             remove_remote('%s%s' % (prefix, name))
             del local_existing[name]
@@ -355,15 +364,18 @@
             subprocess.run(
                 ['ssh-keygen', '-y', '-f', str(priv_full)],
                 stdout=fh,
-                check=True)
+                check=True,
+            )
 
         pub_full.chmod(0o0600)
 
 
 def delete_instance_profile(profile):
     for role in profile.roles:
-        print('removing role %s from instance profile %s' % (role.name,
-                                                             profile.name))
+        print(
+            'removing role %s from instance profile %s'
+            % (role.name, profile.name)
+        )
         profile.remove_role(RoleName=role.name)
 
     print('deleting instance profile %s' % profile.name)
@@ -377,7 +389,7 @@
 
     for profile in iamresource.instance_profiles.all():
         if profile.name.startswith(prefix):
-            remote_profiles[profile.name[len(prefix):]] = profile
+            remote_profiles[profile.name[len(prefix) :]] = profile
 
     for name in sorted(set(remote_profiles) - set(IAM_INSTANCE_PROFILES)):
         delete_instance_profile(remote_profiles[name])
@@ -387,7 +399,7 @@
 
     for role in iamresource.roles.all():
         if role.name.startswith(prefix):
-            remote_roles[role.name[len(prefix):]] = role
+            remote_roles[role.name[len(prefix) :]] = role
 
     for name in sorted(set(remote_roles) - set(IAM_ROLES)):
         role = remote_roles[name]
@@ -403,7 +415,8 @@
         print('creating IAM instance profile %s' % actual)
 
         profile = iamresource.create_instance_profile(
-            InstanceProfileName=actual)
+            InstanceProfileName=actual
+        )
         remote_profiles[name] = profile
 
         waiter = iamclient.get_waiter('instance_profile_exists')
@@ -452,23 +465,12 @@
 
     images = ec2resource.images.filter(
         Filters=[
-            {
-                'Name': 'owner-id',
-                'Values': [owner_id],
-            },
-            {
-                'Name': 'state',
-                'Values': ['available'],
-            },
-            {
-                'Name': 'image-type',
-                'Values': ['machine'],
-            },
-            {
-                'Name': 'name',
-                'Values': [name],
-            },
-        ])
+            {'Name': 'owner-id', 'Values': [owner_id],},
+            {'Name': 'state', 'Values': ['available'],},
+            {'Name': 'image-type', 'Values': ['machine'],},
+            {'Name': 'name', 'Values': [name],},
+        ]
+    )
 
     for image in images:
         return image
@@ -486,7 +488,7 @@
 
     for group in ec2resource.security_groups.all():
         if group.group_name.startswith(prefix):
-            existing[group.group_name[len(prefix):]] = group
+            existing[group.group_name[len(prefix) :]] = group
 
     purge = set(existing) - set(SECURITY_GROUPS)
 
@@ -506,13 +508,10 @@
         print('adding security group %s' % actual)
 
         group_res = ec2resource.create_security_group(
-            Description=group['description'],
-            GroupName=actual,
+            Description=group['description'], GroupName=actual,
         )
 
-        group_res.authorize_ingress(
-            IpPermissions=group['ingress'],
-        )
+        group_res.authorize_ingress(IpPermissions=group['ingress'],)
 
         security_groups[name] = group_res
 
@@ -576,8 +575,10 @@
                 instance.reload()
                 continue
 
-            print('public IP address for %s: %s' % (
-                instance.id, instance.public_ip_address))
+            print(
+                'public IP address for %s: %s'
+                % (instance.id, instance.public_ip_address)
+            )
             break
 
 
@@ -602,10 +603,7 @@
     while True:
         res = ssmclient.describe_instance_information(
             Filters=[
-                {
-                    'Key': 'InstanceIds',
-                    'Values': [i.id for i in instances],
-                },
+                {'Key': 'InstanceIds', 'Values': [i.id for i in instances],},
             ],
         )
 
@@ -627,9 +625,7 @@
         InstanceIds=[i.id for i in instances],
         DocumentName=document_name,
         Parameters=parameters,
-        CloudWatchOutputConfig={
-            'CloudWatchOutputEnabled': True,
-        },
+        CloudWatchOutputConfig={'CloudWatchOutputEnabled': True,},
     )
 
     command_id = res['Command']['CommandId']
@@ -638,8 +634,7 @@
         while True:
             try:
                 res = ssmclient.get_command_invocation(
-                    CommandId=command_id,
-                    InstanceId=instance.id,
+                    CommandId=command_id, InstanceId=instance.id,
                 )
             except botocore.exceptions.ClientError as e:
                 if e.response['Error']['Code'] == 'InvocationDoesNotExist':
@@ -654,8 +649,9 @@
             elif res['Status'] in ('Pending', 'InProgress', 'Delayed'):
                 time.sleep(2)
             else:
-                raise Exception('command failed on %s: %s' % (
-                    instance.id, res['Status']))
+                raise Exception(
+                    'command failed on %s: %s' % (instance.id, res['Status'])
+                )
 
 
 @contextlib.contextmanager
@@ -691,7 +687,9 @@
 
 
 @contextlib.contextmanager
-def create_temp_windows_ec2_instances(c: AWSConnection, config):
+def create_temp_windows_ec2_instances(
+    c: AWSConnection, config, bootstrap: bool = False
+):
     """Create temporary Windows EC2 instances.
 
     This is a higher-level wrapper around ``create_temp_ec2_instances()`` that
@@ -710,11 +708,15 @@
     config['IamInstanceProfile'] = {
         'Name': 'hg-ephemeral-ec2-1',
     }
-    config.setdefault('TagSpecifications', []).append({
-        'ResourceType': 'instance',
-        'Tags': [{'Key': 'Name', 'Value': 'hg-temp-windows'}],
-    })
-    config['UserData'] = WINDOWS_USER_DATA % password
+    config.setdefault('TagSpecifications', []).append(
+        {
+            'ResourceType': 'instance',
+            'Tags': [{'Key': 'Name', 'Value': 'hg-temp-windows'}],
+        }
+    )
+
+    if bootstrap:
+        config['UserData'] = WINDOWS_USER_DATA % password
 
     with temporary_ec2_instances(c.ec2resource, config) as instances:
         wait_for_ip_addresses(instances)
@@ -722,7 +724,9 @@
         print('waiting for Windows Remote Management service...')
 
         for instance in instances:
-            client = wait_for_winrm(instance.public_ip_address, 'Administrator', password)
+            client = wait_for_winrm(
+                instance.public_ip_address, 'Administrator', password
+            )
             print('established WinRM connection to %s' % instance.id)
             instance.winrm_client = client
 
@@ -747,14 +751,17 @@
     # Store a reference to a good image so it can be returned one the
     # image state is reconciled.
     images = ec2resource.images.filter(
-        Filters=[{'Name': 'name', 'Values': [name]}])
+        Filters=[{'Name': 'name', 'Values': [name]}]
+    )
 
     existing_image = None
 
     for image in images:
         if image.tags is None:
-            print('image %s for %s lacks required tags; removing' % (
-                image.id, image.name))
+            print(
+                'image %s for %s lacks required tags; removing'
+                % (image.id, image.name)
+            )
             remove_ami(ec2resource, image)
         else:
             tags = {t['Key']: t['Value'] for t in image.tags}
@@ -762,15 +769,18 @@
             if tags.get('HGIMAGEFINGERPRINT') == fingerprint:
                 existing_image = image
             else:
-                print('image %s for %s has wrong fingerprint; removing' % (
-                      image.id, image.name))
+                print(
+                    'image %s for %s has wrong fingerprint; removing'
+                    % (image.id, image.name)
+                )
                 remove_ami(ec2resource, image)
 
     return existing_image
 
 
-def create_ami_from_instance(ec2client, instance, name, description,
-                             fingerprint):
+def create_ami_from_instance(
+    ec2client, instance, name, description, fingerprint
+):
     """Create an AMI from a running instance.
 
     Returns the ``ec2resource.Image`` representing the created AMI.
@@ -778,36 +788,26 @@
     instance.stop()
 
     ec2client.get_waiter('instance_stopped').wait(
-        InstanceIds=[instance.id],
-        WaiterConfig={
-            'Delay': 5,
-        })
+        InstanceIds=[instance.id], WaiterConfig={'Delay': 5,}
+    )
     print('%s is stopped' % instance.id)
 
-    image = instance.create_image(
-        Name=name,
-        Description=description,
-    )
+    image = instance.create_image(Name=name, Description=description,)
 
-    image.create_tags(Tags=[
-        {
-            'Key': 'HGIMAGEFINGERPRINT',
-            'Value': fingerprint,
-        },
-    ])
+    image.create_tags(
+        Tags=[{'Key': 'HGIMAGEFINGERPRINT', 'Value': fingerprint,},]
+    )
 
     print('waiting for image %s' % image.id)
 
-    ec2client.get_waiter('image_available').wait(
-        ImageIds=[image.id],
-    )
+    ec2client.get_waiter('image_available').wait(ImageIds=[image.id],)
 
     print('image %s available as %s' % (image.id, image.name))
 
     return image
 
 
-def ensure_linux_dev_ami(c: AWSConnection, distro='debian9', prefix='hg-'):
+def ensure_linux_dev_ami(c: AWSConnection, distro='debian10', prefix='hg-'):
     """Ensures a Linux development AMI is available and up-to-date.
 
     Returns an ``ec2.Image`` of either an existing AMI or a newly-built one.
@@ -821,28 +821,26 @@
         image = find_image(
             ec2resource,
             DEBIAN_ACCOUNT_ID,
-            'debian-stretch-hvm-x86_64-gp2-2019-02-19-26620',
+            'debian-stretch-hvm-x86_64-gp2-2019-09-08-17994',
+        )
+        ssh_username = 'admin'
+    elif distro == 'debian10':
+        image = find_image(
+            ec2resource, DEBIAN_ACCOUNT_ID_2, 'debian-10-amd64-20190909-10',
         )
         ssh_username = 'admin'
     elif distro == 'ubuntu18.04':
         image = find_image(
             ec2resource,
             UBUNTU_ACCOUNT_ID,
-            'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20190403',
-        )
-        ssh_username = 'ubuntu'
-    elif distro == 'ubuntu18.10':
-        image = find_image(
-            ec2resource,
-            UBUNTU_ACCOUNT_ID,
-            'ubuntu/images/hvm-ssd/ubuntu-cosmic-18.10-amd64-server-20190402',
+            'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20190918',
         )
         ssh_username = 'ubuntu'
     elif distro == 'ubuntu19.04':
         image = find_image(
             ec2resource,
             UBUNTU_ACCOUNT_ID,
-            'ubuntu/images/hvm-ssd/ubuntu-disco-19.04-amd64-server-20190417',
+            'ubuntu/images/hvm-ssd/ubuntu-disco-19.04-amd64-server-20190918',
         )
         ssh_username = 'ubuntu'
     else:
@@ -854,7 +852,7 @@
                 'DeviceName': image.block_device_mappings[0]['DeviceName'],
                 'Ebs': {
                     'DeleteOnTermination': True,
-                    'VolumeSize': 8,
+                    'VolumeSize': 10,
                     'VolumeType': 'gp2',
                 },
             },
@@ -870,10 +868,12 @@
         'SecurityGroupIds': [c.security_groups['linux-dev-1'].id],
     }
 
-    requirements2_path = (pathlib.Path(__file__).parent.parent /
-                          'linux-requirements-py2.txt')
-    requirements3_path = (pathlib.Path(__file__).parent.parent /
-                          'linux-requirements-py3.txt')
+    requirements2_path = (
+        pathlib.Path(__file__).parent.parent / 'linux-requirements-py2.txt'
+    )
+    requirements3_path = (
+        pathlib.Path(__file__).parent.parent / 'linux-requirements-py3.txt'
+    )
     with requirements2_path.open('r', encoding='utf-8') as fh:
         requirements2 = fh.read()
     with requirements3_path.open('r', encoding='utf-8') as fh:
@@ -881,12 +881,14 @@
 
     # Compute a deterministic fingerprint to determine whether image needs to
     # be regenerated.
-    fingerprint = resolve_fingerprint({
-        'instance_config': config,
-        'bootstrap_script': BOOTSTRAP_DEBIAN,
-        'requirements_py2': requirements2,
-        'requirements_py3': requirements3,
-    })
+    fingerprint = resolve_fingerprint(
+        {
+            'instance_config': config,
+            'bootstrap_script': BOOTSTRAP_DEBIAN,
+            'requirements_py2': requirements2,
+            'requirements_py3': requirements3,
+        }
+    )
 
     existing_image = find_and_reconcile_image(ec2resource, name, fingerprint)
 
@@ -901,9 +903,11 @@
         instance = instances[0]
 
         client = wait_for_ssh(
-            instance.public_ip_address, 22,
+            instance.public_ip_address,
+            22,
             username=ssh_username,
-            key_filename=str(c.key_pair_path_private('automation')))
+            key_filename=str(c.key_pair_path_private('automation')),
+        )
 
         home = '/home/%s' % ssh_username
 
@@ -925,8 +929,9 @@
                 fh.chmod(0o0700)
 
             print('executing bootstrap')
-            chan, stdin, stdout = ssh_exec_command(client,
-                                                   '%s/bootstrap' % home)
+            chan, stdin, stdout = ssh_exec_command(
+                client, '%s/bootstrap' % home
+            )
             stdin.close()
 
             for line in stdout:
@@ -936,17 +941,28 @@
             if res:
                 raise Exception('non-0 exit from bootstrap: %d' % res)
 
-            print('bootstrap completed; stopping %s to create %s' % (
-                  instance.id, name))
+            print(
+                'bootstrap completed; stopping %s to create %s'
+                % (instance.id, name)
+            )
 
-        return create_ami_from_instance(ec2client, instance, name,
-                                        'Mercurial Linux development environment',
-                                        fingerprint)
+        return create_ami_from_instance(
+            ec2client,
+            instance,
+            name,
+            'Mercurial Linux development environment',
+            fingerprint,
+        )
 
 
 @contextlib.contextmanager
-def temporary_linux_dev_instances(c: AWSConnection, image, instance_type,
-                                  prefix='hg-', ensure_extra_volume=False):
+def temporary_linux_dev_instances(
+    c: AWSConnection,
+    image,
+    instance_type,
+    prefix='hg-',
+    ensure_extra_volume=False,
+):
     """Create temporary Linux development EC2 instances.
 
     Context manager resolves to a list of ``ec2.Instance`` that were created
@@ -970,7 +986,7 @@
             'DeviceName': image.block_device_mappings[0]['DeviceName'],
             'Ebs': {
                 'DeleteOnTermination': True,
-                'VolumeSize': 8,
+                'VolumeSize': 12,
                 'VolumeType': 'gp2',
             },
         }
@@ -978,8 +994,9 @@
 
     # This is not an exhaustive list of instance types having instance storage.
     # But
-    if (ensure_extra_volume
-        and not instance_type.startswith(tuple(INSTANCE_TYPES_WITH_STORAGE))):
+    if ensure_extra_volume and not instance_type.startswith(
+        tuple(INSTANCE_TYPES_WITH_STORAGE)
+    ):
         main_device = block_device_mappings[0]['DeviceName']
 
         if main_device == 'xvda':
@@ -987,17 +1004,20 @@
         elif main_device == '/dev/sda1':
             second_device = '/dev/sdb'
         else:
-            raise ValueError('unhandled primary EBS device name: %s' %
-                             main_device)
+            raise ValueError(
+                'unhandled primary EBS device name: %s' % main_device
+            )
 
-        block_device_mappings.append({
-            'DeviceName': second_device,
-            'Ebs': {
-                'DeleteOnTermination': True,
-                'VolumeSize': 8,
-                'VolumeType': 'gp2',
+        block_device_mappings.append(
+            {
+                'DeviceName': second_device,
+                'Ebs': {
+                    'DeleteOnTermination': True,
+                    'VolumeSize': 8,
+                    'VolumeType': 'gp2',
+                },
             }
-        })
+        )
 
     config = {
         'BlockDeviceMappings': block_device_mappings,
@@ -1018,9 +1038,11 @@
 
         for instance in instances:
             client = wait_for_ssh(
-                instance.public_ip_address, 22,
+                instance.public_ip_address,
+                22,
                 username='hg',
-                key_filename=ssh_private_key_path)
+                key_filename=ssh_private_key_path,
+            )
 
             instance.ssh_client = client
             instance.ssh_private_key_path = ssh_private_key_path
@@ -1032,8 +1054,9 @@
                 instance.ssh_client.close()
 
 
-def ensure_windows_dev_ami(c: AWSConnection, prefix='hg-',
-                           base_image_name=WINDOWS_BASE_IMAGE_NAME):
+def ensure_windows_dev_ami(
+    c: AWSConnection, prefix='hg-', base_image_name=WINDOWS_BASE_IMAGE_NAME
+):
     """Ensure Windows Development AMI is available and up-to-date.
 
     If necessary, a modern AMI will be built by starting a temporary EC2
@@ -1092,6 +1115,23 @@
     with INSTALL_WINDOWS_DEPENDENCIES.open('r', encoding='utf-8') as fh:
         commands.extend(l.rstrip() for l in fh)
 
+    # Schedule run of EC2Launch on next boot. This ensures that UserData
+    # is executed.
+    # We disable setComputerName because it forces a reboot.
+    # We set an explicit admin password because this causes UserData to run
+    # as Administrator instead of System.
+    commands.extend(
+        [
+            r'''Set-Content -Path C:\ProgramData\Amazon\EC2-Windows\Launch\Config\LaunchConfig.json '''
+            r'''-Value '{"setComputerName": false, "setWallpaper": true, "addDnsSuffixList": true, '''
+            r'''"extendBootVolumeSize": true, "handleUserData": true, '''
+            r'''"adminPasswordType": "Specify", "adminPassword": "%s"}' '''
+            % c.automation.default_password(),
+            r'C:\ProgramData\Amazon\EC2-Windows\Launch\Scripts\InitializeInstance.ps1 '
+            r'–Schedule',
+        ]
+    )
+
     # Disable Windows Defender when bootstrapping because it just slows
     # things down.
     commands.insert(0, 'Set-MpPreference -DisableRealtimeMonitoring $true')
@@ -1099,13 +1139,15 @@
 
     # Compute a deterministic fingerprint to determine whether image needs
     # to be regenerated.
-    fingerprint = resolve_fingerprint({
-        'instance_config': config,
-        'user_data': WINDOWS_USER_DATA,
-        'initial_bootstrap': WINDOWS_BOOTSTRAP_POWERSHELL,
-        'bootstrap_commands': commands,
-        'base_image_name': base_image_name,
-    })
+    fingerprint = resolve_fingerprint(
+        {
+            'instance_config': config,
+            'user_data': WINDOWS_USER_DATA,
+            'initial_bootstrap': WINDOWS_BOOTSTRAP_POWERSHELL,
+            'bootstrap_commands': commands,
+            'base_image_name': base_image_name,
+        }
+    )
 
     existing_image = find_and_reconcile_image(ec2resource, name, fingerprint)
 
@@ -1114,7 +1156,9 @@
 
     print('no suitable Windows development image found; creating one...')
 
-    with create_temp_windows_ec2_instances(c, config) as instances:
+    with create_temp_windows_ec2_instances(
+        c, config, bootstrap=True
+    ) as instances:
         assert len(instances) == 1
         instance = instances[0]
 
@@ -1130,9 +1174,7 @@
             ssmclient,
             [instance],
             'AWS-RunPowerShellScript',
-            {
-                'commands': WINDOWS_BOOTSTRAP_POWERSHELL.split('\n'),
-            },
+            {'commands': WINDOWS_BOOTSTRAP_POWERSHELL.split('\n'),},
         )
 
         # Reboot so all updates are fully applied.
@@ -1144,10 +1186,8 @@
         print('rebooting instance %s' % instance.id)
         instance.stop()
         ec2client.get_waiter('instance_stopped').wait(
-            InstanceIds=[instance.id],
-            WaiterConfig={
-                'Delay': 5,
-            })
+            InstanceIds=[instance.id], WaiterConfig={'Delay': 5,}
+        )
 
         instance.start()
         wait_for_ip_addresses([instance])
@@ -1158,8 +1198,11 @@
         # TODO figure out a workaround.
 
         print('waiting for Windows Remote Management to come back...')
-        client = wait_for_winrm(instance.public_ip_address, 'Administrator',
-                                c.automation.default_password())
+        client = wait_for_winrm(
+            instance.public_ip_address,
+            'Administrator',
+            c.automation.default_password(),
+        )
         print('established WinRM connection to %s' % instance.id)
         instance.winrm_client = client
 
@@ -1167,14 +1210,23 @@
         run_powershell(instance.winrm_client, '\n'.join(commands))
 
         print('bootstrap completed; stopping %s to create image' % instance.id)
-        return create_ami_from_instance(ec2client, instance, name,
-                                        'Mercurial Windows development environment',
-                                        fingerprint)
+        return create_ami_from_instance(
+            ec2client,
+            instance,
+            name,
+            'Mercurial Windows development environment',
+            fingerprint,
+        )
 
 
 @contextlib.contextmanager
-def temporary_windows_dev_instances(c: AWSConnection, image, instance_type,
-                                    prefix='hg-', disable_antivirus=False):
+def temporary_windows_dev_instances(
+    c: AWSConnection,
+    image,
+    instance_type,
+    prefix='hg-',
+    disable_antivirus=False,
+):
     """Create a temporary Windows development EC2 instance.
 
     Context manager resolves to the list of ``EC2.Instance`` that were created.
@@ -1204,6 +1256,7 @@
             for instance in instances:
                 run_powershell(
                     instance.winrm_client,
-                    'Set-MpPreference -DisableRealtimeMonitoring $true')
+                    'Set-MpPreference -DisableRealtimeMonitoring $true',
+                )
 
         yield instances
--- a/contrib/automation/hgautomation/cli.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/automation/hgautomation/cli.py	Mon Oct 21 11:09:48 2019 -0400
@@ -17,16 +17,20 @@
     aws,
     HGAutomation,
     linux,
+    try_server,
     windows,
 )
 
 
-SOURCE_ROOT = pathlib.Path(os.path.abspath(__file__)).parent.parent.parent.parent
+SOURCE_ROOT = pathlib.Path(
+    os.path.abspath(__file__)
+).parent.parent.parent.parent
 DIST_PATH = SOURCE_ROOT / 'dist'
 
 
-def bootstrap_linux_dev(hga: HGAutomation, aws_region, distros=None,
-                        parallel=False):
+def bootstrap_linux_dev(
+    hga: HGAutomation, aws_region, distros=None, parallel=False
+):
     c = hga.aws_connection(aws_region)
 
     if distros:
@@ -58,8 +62,9 @@
     print('Windows development AMI available as %s' % image.id)
 
 
-def build_inno(hga: HGAutomation, aws_region, arch, revision, version,
-               base_image_name):
+def build_inno(
+    hga: HGAutomation, aws_region, arch, revision, version, base_image_name
+):
     c = hga.aws_connection(aws_region)
     image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
     DIST_PATH.mkdir(exist_ok=True)
@@ -70,13 +75,14 @@
         windows.synchronize_hg(SOURCE_ROOT, revision, instance)
 
         for a in arch:
-            windows.build_inno_installer(instance.winrm_client, a,
-                                         DIST_PATH,
-                                         version=version)
+            windows.build_inno_installer(
+                instance.winrm_client, a, DIST_PATH, version=version
+            )
 
 
-def build_wix(hga: HGAutomation, aws_region, arch, revision, version,
-              base_image_name):
+def build_wix(
+    hga: HGAutomation, aws_region, arch, revision, version, base_image_name
+):
     c = hga.aws_connection(aws_region)
     image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
     DIST_PATH.mkdir(exist_ok=True)
@@ -87,12 +93,14 @@
         windows.synchronize_hg(SOURCE_ROOT, revision, instance)
 
         for a in arch:
-            windows.build_wix_installer(instance.winrm_client, a,
-                                        DIST_PATH, version=version)
+            windows.build_wix_installer(
+                instance.winrm_client, a, DIST_PATH, version=version
+            )
 
 
-def build_windows_wheel(hga: HGAutomation, aws_region, arch, revision,
-                        base_image_name):
+def build_windows_wheel(
+    hga: HGAutomation, aws_region, arch, revision, base_image_name
+):
     c = hga.aws_connection(aws_region)
     image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
     DIST_PATH.mkdir(exist_ok=True)
@@ -106,8 +114,9 @@
             windows.build_wheel(instance.winrm_client, a, DIST_PATH)
 
 
-def build_all_windows_packages(hga: HGAutomation, aws_region, revision,
-                               version, base_image_name):
+def build_all_windows_packages(
+    hga: HGAutomation, aws_region, revision, version, base_image_name
+):
     c = hga.aws_connection(aws_region)
     image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
     DIST_PATH.mkdir(exist_ok=True)
@@ -123,11 +132,13 @@
             windows.purge_hg(winrm_client)
             windows.build_wheel(winrm_client, arch, DIST_PATH)
             windows.purge_hg(winrm_client)
-            windows.build_inno_installer(winrm_client, arch, DIST_PATH,
-                                         version=version)
+            windows.build_inno_installer(
+                winrm_client, arch, DIST_PATH, version=version
+            )
             windows.purge_hg(winrm_client)
-            windows.build_wix_installer(winrm_client, arch, DIST_PATH,
-                                        version=version)
+            windows.build_wix_installer(
+                winrm_client, arch, DIST_PATH, version=version
+            )
 
 
 def terminate_ec2_instances(hga: HGAutomation, aws_region):
@@ -140,8 +151,15 @@
     aws.remove_resources(c)
 
 
-def run_tests_linux(hga: HGAutomation, aws_region, instance_type,
-                    python_version, test_flags, distro, filesystem):
+def run_tests_linux(
+    hga: HGAutomation,
+    aws_region,
+    instance_type,
+    python_version,
+    test_flags,
+    distro,
+    filesystem,
+):
     c = hga.aws_connection(aws_region)
     image = aws.ensure_linux_dev_ami(c, distro=distro)
 
@@ -150,17 +168,17 @@
     ensure_extra_volume = filesystem not in ('default', 'tmpfs')
 
     with aws.temporary_linux_dev_instances(
-        c, image, instance_type,
-        ensure_extra_volume=ensure_extra_volume) as insts:
+        c, image, instance_type, ensure_extra_volume=ensure_extra_volume
+    ) as insts:
 
         instance = insts[0]
 
-        linux.prepare_exec_environment(instance.ssh_client,
-                                       filesystem=filesystem)
+        linux.prepare_exec_environment(
+            instance.ssh_client, filesystem=filesystem
+        )
         linux.synchronize_hg(SOURCE_ROOT, instance, '.')
         t_prepared = time.time()
-        linux.run_tests(instance.ssh_client, python_version,
-                        test_flags)
+        linux.run_tests(instance.ssh_client, python_version, test_flags)
         t_done = time.time()
 
     t_setup = t_prepared - t_start
@@ -168,21 +186,53 @@
 
     print(
         'total time: %.1fs; setup: %.1fs; tests: %.1fs; setup overhead: %.1f%%'
-        % (t_all, t_setup, t_done - t_prepared, t_setup / t_all * 100.0))
+        % (t_all, t_setup, t_done - t_prepared, t_setup / t_all * 100.0)
+    )
 
 
-def run_tests_windows(hga: HGAutomation, aws_region, instance_type,
-                      python_version, arch, test_flags, base_image_name):
+def run_tests_windows(
+    hga: HGAutomation,
+    aws_region,
+    instance_type,
+    python_version,
+    arch,
+    test_flags,
+    base_image_name,
+):
     c = hga.aws_connection(aws_region)
     image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
 
-    with aws.temporary_windows_dev_instances(c, image, instance_type,
-                                             disable_antivirus=True) as insts:
+    with aws.temporary_windows_dev_instances(
+        c, image, instance_type, disable_antivirus=True
+    ) as insts:
         instance = insts[0]
 
         windows.synchronize_hg(SOURCE_ROOT, '.', instance)
-        windows.run_tests(instance.winrm_client, python_version, arch,
-                          test_flags)
+        windows.run_tests(
+            instance.winrm_client, python_version, arch, test_flags
+        )
+
+
+def publish_windows_artifacts(
+    hg: HGAutomation,
+    aws_region,
+    version: str,
+    pypi: bool,
+    mercurial_scm_org: bool,
+    ssh_username: str,
+):
+    windows.publish_artifacts(
+        DIST_PATH,
+        version,
+        pypi=pypi,
+        mercurial_scm_org=mercurial_scm_org,
+        ssh_username=ssh_username,
+    )
+
+
+def run_try(hga: HGAutomation, aws_region: str, rev: str):
+    c = hga.aws_connection(aws_region, ensure_ec2_state=False)
+    try_server.trigger_try(c, rev=rev)
 
 
 def get_parser():
@@ -194,25 +244,21 @@
         help='Path for local state files',
     )
     parser.add_argument(
-        '--aws-region',
-        help='AWS region to use',
-        default='us-west-1',
+        '--aws-region', help='AWS region to use', default='us-west-2',
     )
 
     subparsers = parser.add_subparsers()
 
     sp = subparsers.add_parser(
-        'bootstrap-linux-dev',
-        help='Bootstrap Linux development environments',
+        'bootstrap-linux-dev', help='Bootstrap Linux development environments',
     )
     sp.add_argument(
-        '--distros',
-        help='Comma delimited list of distros to bootstrap',
+        '--distros', help='Comma delimited list of distros to bootstrap',
     )
     sp.add_argument(
         '--parallel',
         action='store_true',
-        help='Generate AMIs in parallel (not CTRL-c safe)'
+        help='Generate AMIs in parallel (not CTRL-c safe)',
     )
     sp.set_defaults(func=bootstrap_linux_dev)
 
@@ -228,17 +274,13 @@
     sp.set_defaults(func=bootstrap_windows_dev)
 
     sp = subparsers.add_parser(
-        'build-all-windows-packages',
-        help='Build all Windows packages',
+        'build-all-windows-packages', help='Build all Windows packages',
     )
     sp.add_argument(
-        '--revision',
-        help='Mercurial revision to build',
-        default='.',
+        '--revision', help='Mercurial revision to build', default='.',
     )
     sp.add_argument(
-        '--version',
-        help='Mercurial version string to use',
+        '--version', help='Mercurial version string to use',
     )
     sp.add_argument(
         '--base-image-name',
@@ -248,8 +290,7 @@
     sp.set_defaults(func=build_all_windows_packages)
 
     sp = subparsers.add_parser(
-        'build-inno',
-        help='Build Inno Setup installer(s)',
+        'build-inno', help='Build Inno Setup installer(s)',
     )
     sp.add_argument(
         '--arch',
@@ -259,13 +300,10 @@
         default=['x64'],
     )
     sp.add_argument(
-        '--revision',
-        help='Mercurial revision to build',
-        default='.',
+        '--revision', help='Mercurial revision to build', default='.',
     )
     sp.add_argument(
-        '--version',
-        help='Mercurial version string to use in installer',
+        '--version', help='Mercurial version string to use in installer',
     )
     sp.add_argument(
         '--base-image-name',
@@ -275,8 +313,7 @@
     sp.set_defaults(func=build_inno)
 
     sp = subparsers.add_parser(
-        'build-windows-wheel',
-        help='Build Windows wheel(s)',
+        'build-windows-wheel', help='Build Windows wheel(s)',
     )
     sp.add_argument(
         '--arch',
@@ -286,9 +323,7 @@
         default=['x64'],
     )
     sp.add_argument(
-        '--revision',
-        help='Mercurial revision to build',
-        default='.',
+        '--revision', help='Mercurial revision to build', default='.',
     )
     sp.add_argument(
         '--base-image-name',
@@ -297,10 +332,7 @@
     )
     sp.set_defaults(func=build_windows_wheel)
 
-    sp = subparsers.add_parser(
-        'build-wix',
-        help='Build WiX installer(s)'
-    )
+    sp = subparsers.add_parser('build-wix', help='Build WiX installer(s)')
     sp.add_argument(
         '--arch',
         help='Architecture to build for',
@@ -309,13 +341,10 @@
         default=['x64'],
     )
     sp.add_argument(
-        '--revision',
-        help='Mercurial revision to build',
-        default='.',
+        '--revision', help='Mercurial revision to build', default='.',
     )
     sp.add_argument(
-        '--version',
-        help='Mercurial version string to use in installer',
+        '--version', help='Mercurial version string to use in installer',
     )
     sp.add_argument(
         '--base-image-name',
@@ -331,20 +360,16 @@
     sp.set_defaults(func=terminate_ec2_instances)
 
     sp = subparsers.add_parser(
-        'purge-ec2-resources',
-        help='Purge all EC2 resources managed by us',
+        'purge-ec2-resources', help='Purge all EC2 resources managed by us',
     )
     sp.set_defaults(func=purge_ec2_resources)
 
-    sp = subparsers.add_parser(
-        'run-tests-linux',
-        help='Run tests on Linux',
-    )
+    sp = subparsers.add_parser('run-tests-linux', help='Run tests on Linux',)
     sp.add_argument(
         '--distro',
         help='Linux distribution to run tests on',
         choices=linux.DISTROS,
-        default='debian9',
+        default='debian10',
     )
     sp.add_argument(
         '--filesystem',
@@ -360,8 +385,18 @@
     sp.add_argument(
         '--python-version',
         help='Python version to use',
-        choices={'system2', 'system3', '2.7', '3.5', '3.6', '3.7', '3.8',
-                 'pypy', 'pypy3.5', 'pypy3.6'},
+        choices={
+            'system2',
+            'system3',
+            '2.7',
+            '3.5',
+            '3.6',
+            '3.7',
+            '3.8',
+            'pypy',
+            'pypy3.5',
+            'pypy3.6',
+        },
         default='system2',
     )
     sp.add_argument(
@@ -372,13 +407,10 @@
     sp.set_defaults(func=run_tests_linux)
 
     sp = subparsers.add_parser(
-        'run-tests-windows',
-        help='Run tests on Windows',
+        'run-tests-windows', help='Run tests on Windows',
     )
     sp.add_argument(
-        '--instance-type',
-        help='EC2 instance type to use',
-        default='t3.medium',
+        '--instance-type', help='EC2 instance type to use', default='t3.medium',
     )
     sp.add_argument(
         '--python-version',
@@ -393,8 +425,7 @@
         default='x64',
     )
     sp.add_argument(
-        '--test-flags',
-        help='Extra command line flags to pass to run-tests.py',
+        '--test-flags', help='Extra command line flags to pass to run-tests.py',
     )
     sp.add_argument(
         '--base-image-name',
@@ -403,6 +434,38 @@
     )
     sp.set_defaults(func=run_tests_windows)
 
+    sp = subparsers.add_parser(
+        'publish-windows-artifacts',
+        help='Publish built Windows artifacts (wheels, installers, etc)',
+    )
+    sp.add_argument(
+        '--no-pypi',
+        dest='pypi',
+        action='store_false',
+        default=True,
+        help='Skip uploading to PyPI',
+    )
+    sp.add_argument(
+        '--no-mercurial-scm-org',
+        dest='mercurial_scm_org',
+        action='store_false',
+        default=True,
+        help='Skip uploading to www.mercurial-scm.org',
+    )
+    sp.add_argument(
+        '--ssh-username', help='SSH username for mercurial-scm.org',
+    )
+    sp.add_argument(
+        'version', help='Mercurial version string to locate local packages',
+    )
+    sp.set_defaults(func=publish_windows_artifacts)
+
+    sp = subparsers.add_parser(
+        'try', help='Run CI automation against a custom changeset'
+    )
+    sp.add_argument('-r', '--rev', default='.', help='Revision to run CI on')
+    sp.set_defaults(func=run_try)
+
     return parser
 
 
--- a/contrib/automation/hgautomation/linux.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/automation/hgautomation/linux.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,39 +13,37 @@
 import subprocess
 import tempfile
 
-from .ssh import (
-    exec_command,
-)
+from .ssh import exec_command
 
 
 # Linux distributions that are supported.
 DISTROS = {
     'debian9',
+    'debian10',
     'ubuntu18.04',
-    'ubuntu18.10',
     'ubuntu19.04',
 }
 
 INSTALL_PYTHONS = r'''
 PYENV2_VERSIONS="2.7.16 pypy2.7-7.1.1"
-PYENV3_VERSIONS="3.5.7 3.6.8 3.7.3 3.8-dev pypy3.5-7.0.0 pypy3.6-7.1.1"
+PYENV3_VERSIONS="3.5.7 3.6.9 3.7.4 3.8.0 pypy3.5-7.0.0 pypy3.6-7.1.1"
 
 git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv
 pushd /hgdev/pyenv
-git checkout 3faeda67bb33e07750d1a104271369a7384ca45c
+git checkout d6d6bc8bb08bcdcbf4eb79509aa7061011ade1c4
 popd
 
 export PYENV_ROOT="/hgdev/pyenv"
 export PATH="$PYENV_ROOT/bin:$PATH"
 
-# pip 19.0.3.
-PIP_SHA256=efe99298f3fbb1f56201ce6b81d2658067d2f7d7dfc2d412e0d3cacc9a397c61
-wget -O get-pip.py --progress dot:mega https://github.com/pypa/get-pip/raw/fee32c376da1ff6496a798986d7939cd51e1644f/get-pip.py
+# pip 19.2.3.
+PIP_SHA256=57e3643ff19f018f8a00dfaa6b7e4620e3c1a7a2171fd218425366ec006b3bfe
+wget -O get-pip.py --progress dot:mega https://github.com/pypa/get-pip/raw/309a56c5fd94bd1134053a541cb4657a4e47e09d/get-pip.py
 echo "${PIP_SHA256} get-pip.py" | sha256sum --check -
 
-VIRTUALENV_SHA256=984d7e607b0a5d1329425dd8845bd971b957424b5ba664729fab51ab8c11bc39
-VIRTUALENV_TARBALL=virtualenv-16.4.3.tar.gz
-wget -O ${VIRTUALENV_TARBALL} --progress dot:mega https://files.pythonhosted.org/packages/37/db/89d6b043b22052109da35416abc3c397655e4bd3cff031446ba02b9654fa/${VIRTUALENV_TARBALL}
+VIRTUALENV_SHA256=f78d81b62d3147396ac33fc9d77579ddc42cc2a98dd9ea38886f616b33bc7fb2
+VIRTUALENV_TARBALL=virtualenv-16.7.5.tar.gz
+wget -O ${VIRTUALENV_TARBALL} --progress dot:mega https://files.pythonhosted.org/packages/66/f0/6867af06d2e2f511e4e1d7094ff663acdebc4f15d4a0cb0fed1007395124/${VIRTUALENV_TARBALL}
 echo "${VIRTUALENV_SHA256} ${VIRTUALENV_TARBALL}" | sha256sum --check -
 
 for v in ${PYENV2_VERSIONS}; do
@@ -62,23 +60,40 @@
 done
 
 pyenv global ${PYENV2_VERSIONS} ${PYENV3_VERSIONS} system
-'''.lstrip().replace('\r\n', '\n')
+'''.lstrip().replace(
+    '\r\n', '\n'
+)
+
+
+INSTALL_RUST = r'''
+RUSTUP_INIT_SHA256=a46fe67199b7bcbbde2dcbc23ae08db6f29883e260e23899a88b9073effc9076
+wget -O rustup-init --progress dot:mega https://static.rust-lang.org/rustup/archive/1.18.3/x86_64-unknown-linux-gnu/rustup-init
+echo "${RUSTUP_INIT_SHA256} rustup-init" | sha256sum --check -
+
+chmod +x rustup-init
+sudo -H -u hg -g hg ./rustup-init -y
+sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.31.1 1.34.2
+sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup component add clippy
+'''
 
 
 BOOTSTRAP_VIRTUALENV = r'''
 /usr/bin/virtualenv /hgdev/venv-bootstrap
 
-HG_SHA256=1bdd21bb87d1e05fb5cd395d488d0e0cc2f2f90ce0fd248e31a03595da5ccb47
-HG_TARBALL=mercurial-4.9.1.tar.gz
+HG_SHA256=35fc8ba5e0379c1b3affa2757e83fb0509e8ac314cbd9f1fd133cf265d16e49f
+HG_TARBALL=mercurial-5.1.1.tar.gz
 
 wget -O ${HG_TARBALL} --progress dot:mega https://www.mercurial-scm.org/release/${HG_TARBALL}
 echo "${HG_SHA256} ${HG_TARBALL}" | sha256sum --check -
 
 /hgdev/venv-bootstrap/bin/pip install ${HG_TARBALL}
-'''.lstrip().replace('\r\n', '\n')
+'''.lstrip().replace(
+    '\r\n', '\n'
+)
 
 
-BOOTSTRAP_DEBIAN = r'''
+BOOTSTRAP_DEBIAN = (
+    r'''
 #!/bin/bash
 
 set -ex
@@ -175,18 +190,22 @@
 
 sudo apt-key add docker-apt-key
 
-if [ "$DEBIAN_VERSION" = "9.8" ]; then
+if [ "$LSB_RELEASE" = "stretch" ]; then
 cat << EOF | sudo tee -a /etc/apt/sources.list
 # Need backports for clang-format-6.0
 deb http://deb.debian.org/debian stretch-backports main
+EOF
+fi
 
+if [ "$LSB_RELEASE" = "stretch" -o "$LSB_RELEASE" = "buster" ]; then
+cat << EOF | sudo tee -a /etc/apt/sources.list
 # Sources are useful if we want to compile things locally.
-deb-src http://deb.debian.org/debian stretch main
-deb-src http://security.debian.org/debian-security stretch/updates main
-deb-src http://deb.debian.org/debian stretch-updates main
-deb-src http://deb.debian.org/debian stretch-backports main
+deb-src http://deb.debian.org/debian $LSB_RELEASE main
+deb-src http://security.debian.org/debian-security $LSB_RELEASE/updates main
+deb-src http://deb.debian.org/debian $LSB_RELEASE-updates main
+deb-src http://deb.debian.org/debian $LSB_RELEASE-backports main
 
-deb [arch=amd64] https://download.docker.com/linux/debian stretch stable
+deb [arch=amd64] https://download.docker.com/linux/debian $LSB_RELEASE stable
 EOF
 
 elif [ "$DISTRO" = "Ubuntu" ]; then
@@ -199,6 +218,7 @@
 sudo apt-get update
 
 PACKAGES="\
+    awscli \
     btrfs-progs \
     build-essential \
     bzr \
@@ -207,6 +227,7 @@
     darcs \
     debhelper \
     devscripts \
+    docker-ce \
     dpkg-dev \
     dstat \
     emacs \
@@ -239,6 +260,7 @@
     python-pygments \
     python-subversion \
     python-vcr \
+    python3-boto3 \
     python3-dev \
     python3-docutils \
     python3-fuzzywuzzy \
@@ -259,23 +281,17 @@
     zip \
     zlib1g-dev"
 
-if [ "$DEBIAN_VERSION" = "9.8" ]; then
+if [ "LSB_RELEASE" = "stretch" ]; then
     PACKAGES="$PACKAGES linux-perf"
 elif [ "$DISTRO" = "Ubuntu" ]; then
     PACKAGES="$PACKAGES linux-tools-common"
 fi
 
-# Ubuntu 19.04 removes monotone.
-if [ "$LSB_RELEASE" != "disco" ]; then
+# Monotone only available in older releases.
+if [ "$LSB_RELEASE" = "stretch" -o "$LSB_RELEASE" = "xenial" ]; then
     PACKAGES="$PACKAGES monotone"
 fi
 
-# As of April 27, 2019, Docker hasn't published packages for
-# Ubuntu 19.04 yet.
-if [ "$LSB_RELEASE" != "disco" ]; then
-    PACKAGES="$PACKAGES docker-ce"
-fi
-
 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends $PACKAGES
 
 # Create clang-format symlink so test harness finds it.
@@ -286,6 +302,8 @@
 # Will be normalized to hg:hg later.
 sudo chown `whoami` /hgdev
 
+{install_rust}
+
 cp requirements-py2.txt /hgdev/requirements-py2.txt
 cp requirements-py3.txt /hgdev/requirements-py3.txt
 
@@ -308,10 +326,14 @@
 EOF
 
 sudo chown -R hg:hg /hgdev
-'''.lstrip().format(
-    install_pythons=INSTALL_PYTHONS,
-    bootstrap_virtualenv=BOOTSTRAP_VIRTUALENV
-).replace('\r\n', '\n')
+'''.lstrip()
+    .format(
+        install_rust=INSTALL_RUST,
+        install_pythons=INSTALL_PYTHONS,
+        bootstrap_virtualenv=BOOTSTRAP_VIRTUALENV,
+    )
+    .replace('\r\n', '\n')
+)
 
 
 # Prepares /hgdev for operations.
@@ -393,7 +415,9 @@
 chown hg:hg /hgwork/tmp
 
 rsync -a /hgdev/src /hgwork/
-'''.lstrip().replace('\r\n', '\n')
+'''.lstrip().replace(
+    '\r\n', '\n'
+)
 
 
 HG_UPDATE_CLEAN = '''
@@ -405,7 +429,9 @@
 ${HG} --config extensions.purge= purge --all
 ${HG} update -C $1
 ${HG} log -r .
-'''.lstrip().replace('\r\n', '\n')
+'''.lstrip().replace(
+    '\r\n', '\n'
+)
 
 
 def prepare_exec_environment(ssh_client, filesystem='default'):
@@ -440,11 +466,12 @@
     res = chan.recv_exit_status()
 
     if res:
-        raise Exception('non-0 exit code updating working directory; %d'
-                        % res)
+        raise Exception('non-0 exit code updating working directory; %d' % res)
 
 
-def synchronize_hg(source_path: pathlib.Path, ec2_instance, revision: str=None):
+def synchronize_hg(
+    source_path: pathlib.Path, ec2_instance, revision: str = None
+):
     """Synchronize a local Mercurial source path to remote EC2 instance."""
 
     with tempfile.TemporaryDirectory() as temp_dir:
@@ -466,8 +493,10 @@
             fh.write('  IdentityFile %s\n' % ec2_instance.ssh_private_key_path)
 
         if not (source_path / '.hg').is_dir():
-            raise Exception('%s is not a Mercurial repository; synchronization '
-                            'not yet supported' % source_path)
+            raise Exception(
+                '%s is not a Mercurial repository; synchronization '
+                'not yet supported' % source_path
+            )
 
         env = dict(os.environ)
         env['HGPLAIN'] = '1'
@@ -477,17 +506,29 @@
 
         res = subprocess.run(
             ['python2.7', str(hg_bin), 'log', '-r', revision, '-T', '{node}'],
-            cwd=str(source_path), env=env, check=True, capture_output=True)
+            cwd=str(source_path),
+            env=env,
+            check=True,
+            capture_output=True,
+        )
 
         full_revision = res.stdout.decode('ascii')
 
         args = [
-            'python2.7', str(hg_bin),
-            '--config', 'ui.ssh=ssh -F %s' % ssh_config,
-            '--config', 'ui.remotecmd=/hgdev/venv-bootstrap/bin/hg',
+            'python2.7',
+            str(hg_bin),
+            '--config',
+            'ui.ssh=ssh -F %s' % ssh_config,
+            '--config',
+            'ui.remotecmd=/hgdev/venv-bootstrap/bin/hg',
             # Also ensure .hgtags changes are present so auto version
             # calculation works.
-            'push', '-f', '-r', full_revision, '-r', 'file(.hgtags)',
+            'push',
+            '-f',
+            '-r',
+            full_revision,
+            '-r',
+            'file(.hgtags)',
             'ssh://%s//hgwork/src' % public_ip,
         ]
 
@@ -506,7 +547,8 @@
             fh.chmod(0o0700)
 
         chan, stdin, stdout = exec_command(
-            ec2_instance.ssh_client, '/hgdev/hgup %s' % full_revision)
+            ec2_instance.ssh_client, '/hgdev/hgup %s' % full_revision
+        )
         stdin.close()
 
         for line in stdout:
@@ -515,8 +557,9 @@
         res = chan.recv_exit_status()
 
         if res:
-            raise Exception('non-0 exit code updating working directory; %d'
-                            % res)
+            raise Exception(
+                'non-0 exit code updating working directory; %d' % res
+            )
 
 
 def run_tests(ssh_client, python_version, test_flags=None):
@@ -538,8 +581,8 @@
 
     command = (
         '/bin/sh -c "export TMPDIR=/hgwork/tmp; '
-        'cd /hgwork/src/tests && %s run-tests.py %s"' % (
-            python, test_flags))
+        'cd /hgwork/src/tests && %s run-tests.py %s"' % (python, test_flags)
+    )
 
     chan, stdin, stdout = exec_command(ssh_client, command)
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/automation/hgautomation/pypi.py	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,21 @@
+# pypi.py - Automation around PyPI
+#
+# Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# no-check-code because Python 3 native.
+
+from twine.commands.upload import upload as twine_upload
+from twine.settings import Settings
+
+
+def upload(paths):
+    """Upload files to PyPI.
+
+    `paths` is an iterable of `pathlib.Path`.
+    """
+    settings = Settings()
+
+    twine_upload(settings, [str(p) for p in paths])
--- a/contrib/automation/hgautomation/ssh.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/automation/hgautomation/ssh.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,14 +11,13 @@
 import time
 import warnings
 
-from cryptography.utils import (
-    CryptographyDeprecationWarning,
-)
+from cryptography.utils import CryptographyDeprecationWarning
 import paramiko
 
 
 def wait_for_ssh(hostname, port, timeout=60, username=None, key_filename=None):
     """Wait for an SSH server to start on the specified host and port."""
+
     class IgnoreHostKeyPolicy(paramiko.MissingHostKeyPolicy):
         def missing_host_key(self, client, hostname, key):
             return
@@ -28,17 +27,23 @@
     # paramiko triggers a CryptographyDeprecationWarning in the cryptography
     # package. Let's suppress
     with warnings.catch_warnings():
-        warnings.filterwarnings('ignore',
-                                category=CryptographyDeprecationWarning)
+        warnings.filterwarnings(
+            'ignore', category=CryptographyDeprecationWarning
+        )
 
         while True:
             client = paramiko.SSHClient()
             client.set_missing_host_key_policy(IgnoreHostKeyPolicy())
             try:
-                client.connect(hostname, port=port, username=username,
-                               key_filename=key_filename,
-                               timeout=5.0, allow_agent=False,
-                               look_for_keys=False)
+                client.connect(
+                    hostname,
+                    port=port,
+                    username=username,
+                    key_filename=key_filename,
+                    timeout=5.0,
+                    allow_agent=False,
+                    look_for_keys=False,
+                )
 
                 return client
             except socket.error:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/automation/hgautomation/try_server.py	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,99 @@
+# try_server.py - Interact with Try server
+#
+# Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# no-check-code because Python 3 native.
+
+import base64
+import json
+import os
+import subprocess
+import tempfile
+
+from .aws import AWSConnection
+
+LAMBDA_FUNCTION = "ci-try-server-upload"
+
+
+def trigger_try(c: AWSConnection, rev="."):
+    """Trigger a new Try run."""
+    lambda_client = c.session.client("lambda")
+
+    cset, bundle = generate_bundle(rev=rev)
+
+    payload = {
+        "bundle": base64.b64encode(bundle).decode("utf-8"),
+        "node": cset["node"],
+        "branch": cset["branch"],
+        "user": cset["user"],
+        "message": cset["desc"],
+    }
+
+    print("resolved revision:")
+    print("node: %s" % cset["node"])
+    print("branch: %s" % cset["branch"])
+    print("user: %s" % cset["user"])
+    print("desc: %s" % cset["desc"].splitlines()[0])
+    print()
+
+    print("sending to Try...")
+    res = lambda_client.invoke(
+        FunctionName=LAMBDA_FUNCTION,
+        InvocationType="RequestResponse",
+        Payload=json.dumps(payload).encode("utf-8"),
+    )
+
+    body = json.load(res["Payload"])
+    for message in body:
+        print("remote: %s" % message)
+
+
+def generate_bundle(rev="."):
+    """Generate a bundle suitable for use by the Try service.
+
+    Returns a tuple of revision metadata and raw Mercurial bundle data.
+    """
+    # `hg bundle` doesn't support streaming to stdout. So we use a temporary
+    # file.
+    path = None
+    try:
+        fd, path = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
+        os.close(fd)
+
+        args = [
+            "hg",
+            "bundle",
+            "--type",
+            "gzip-v2",
+            "--base",
+            "public()",
+            "--rev",
+            rev,
+            path,
+        ]
+
+        print("generating bundle...")
+        subprocess.run(args, check=True)
+
+        with open(path, "rb") as fh:
+            bundle_data = fh.read()
+
+    finally:
+        if path:
+            os.unlink(path)
+
+    args = [
+        "hg",
+        "log",
+        "-r",
+        rev,
+        # We have to upload as JSON, so it won't matter if we emit binary
+        # since we need to normalize to UTF-8.
+        "-T",
+        "json",
+    ]
+    res = subprocess.run(args, check=True, capture_output=True)
+    return json.loads(res.stdout)[0], bundle_data
--- a/contrib/automation/hgautomation/windows.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/automation/hgautomation/windows.py	Mon Oct 21 11:09:48 2019 -0400
@@ -7,15 +7,16 @@
 
 # no-check-code because Python 3 native.
 
+import datetime
 import os
+import paramiko
 import pathlib
 import re
 import subprocess
 import tempfile
 
-from .winrm import (
-    run_powershell,
-)
+from .pypi import upload as pypi_upload
+from .winrm import run_powershell
 
 
 # PowerShell commands to activate a Visual Studio 2008 environment.
@@ -100,6 +101,33 @@
 }}
 '''
 
+X86_WHEEL_FILENAME = 'mercurial-{version}-cp27-cp27m-win32.whl'
+X64_WHEEL_FILENAME = 'mercurial-{version}-cp27-cp27m-win_amd64.whl'
+X86_EXE_FILENAME = 'Mercurial-{version}.exe'
+X64_EXE_FILENAME = 'Mercurial-{version}-x64.exe'
+X86_MSI_FILENAME = 'mercurial-{version}-x86.msi'
+X64_MSI_FILENAME = 'mercurial-{version}-x64.msi'
+
+MERCURIAL_SCM_BASE_URL = 'https://mercurial-scm.org/release/windows'
+
+X86_USER_AGENT_PATTERN = '.*Windows.*'
+X64_USER_AGENT_PATTERN = '.*Windows.*(WOW|x)64.*'
+
+X86_EXE_DESCRIPTION = (
+    'Mercurial {version} Inno Setup installer - x86 Windows '
+    '- does not require admin rights'
+)
+X64_EXE_DESCRIPTION = (
+    'Mercurial {version} Inno Setup installer - x64 Windows '
+    '- does not require admin rights'
+)
+X86_MSI_DESCRIPTION = (
+    'Mercurial {version} MSI installer - x86 Windows ' '- requires admin rights'
+)
+X64_MSI_DESCRIPTION = (
+    'Mercurial {version} MSI installer - x64 Windows ' '- requires admin rights'
+)
+
 
 def get_vc_prefix(arch):
     if arch == 'x86':
@@ -133,10 +161,21 @@
         ssh_dir.chmod(0o0700)
 
         # Generate SSH key to use for communication.
-        subprocess.run([
-            'ssh-keygen', '-t', 'rsa', '-b', '4096', '-N', '',
-            '-f', str(ssh_dir / 'id_rsa')],
-            check=True, capture_output=True)
+        subprocess.run(
+            [
+                'ssh-keygen',
+                '-t',
+                'rsa',
+                '-b',
+                '4096',
+                '-N',
+                '',
+                '-f',
+                str(ssh_dir / 'id_rsa'),
+            ],
+            check=True,
+            capture_output=True,
+        )
 
         # Add it to ~/.ssh/authorized_keys on remote.
         # This assumes the file doesn't already exist.
@@ -157,8 +196,10 @@
             fh.write('  IdentityFile %s\n' % (ssh_dir / 'id_rsa'))
 
         if not (hg_repo / '.hg').is_dir():
-            raise Exception('%s is not a Mercurial repository; '
-                            'synchronization not yet supported' % hg_repo)
+            raise Exception(
+                '%s is not a Mercurial repository; '
+                'synchronization not yet supported' % hg_repo
+            )
 
         env = dict(os.environ)
         env['HGPLAIN'] = '1'
@@ -168,17 +209,29 @@
 
         res = subprocess.run(
             ['python2.7', str(hg_bin), 'log', '-r', revision, '-T', '{node}'],
-            cwd=str(hg_repo), env=env, check=True, capture_output=True)
+            cwd=str(hg_repo),
+            env=env,
+            check=True,
+            capture_output=True,
+        )
 
         full_revision = res.stdout.decode('ascii')
 
         args = [
-            'python2.7', hg_bin,
-            '--config', 'ui.ssh=ssh -F %s' % ssh_config,
-            '--config', 'ui.remotecmd=c:/hgdev/venv-bootstrap/Scripts/hg.exe',
+            'python2.7',
+            hg_bin,
+            '--config',
+            'ui.ssh=ssh -F %s' % ssh_config,
+            '--config',
+            'ui.remotecmd=c:/hgdev/venv-bootstrap/Scripts/hg.exe',
             # Also ensure .hgtags changes are present so auto version
             # calculation works.
-            'push', '-f', '-r', full_revision, '-r', 'file(.hgtags)',
+            'push',
+            '-f',
+            '-r',
+            full_revision,
+            '-r',
+            'file(.hgtags)',
             'ssh://%s/c:/hgdev/src' % public_ip,
         ]
 
@@ -188,8 +241,9 @@
         if res.returncode not in (0, 1):
             res.check_returncode()
 
-        run_powershell(winrm_client,
-                       HG_UPDATE_CLEAN.format(revision=full_revision))
+        run_powershell(
+            winrm_client, HG_UPDATE_CLEAN.format(revision=full_revision)
+        )
 
         # TODO detect dirty local working directory and synchronize accordingly.
 
@@ -225,8 +279,9 @@
     winrm_client.fetch(source, str(dest))
 
 
-def build_inno_installer(winrm_client, arch: str, dest_path: pathlib.Path,
-                         version=None):
+def build_inno_installer(
+    winrm_client, arch: str, dest_path: pathlib.Path, version=None
+):
     """Build the Inno Setup installer on a remote machine.
 
     Using a WinRM client, remote commands are executed to build
@@ -238,8 +293,9 @@
     if version:
         extra_args.extend(['--version', version])
 
-    ps = get_vc_prefix(arch) + BUILD_INNO.format(arch=arch,
-                                                 extra_args=' '.join(extra_args))
+    ps = get_vc_prefix(arch) + BUILD_INNO.format(
+        arch=arch, extra_args=' '.join(extra_args)
+    )
     run_powershell(winrm_client, ps)
     copy_latest_dist(winrm_client, '*.exe', dest_path)
 
@@ -256,8 +312,9 @@
     copy_latest_dist(winrm_client, '*.whl', dest_path)
 
 
-def build_wix_installer(winrm_client, arch: str, dest_path: pathlib.Path,
-                        version=None):
+def build_wix_installer(
+    winrm_client, arch: str, dest_path: pathlib.Path, version=None
+):
     """Build the WiX installer on a remote machine.
 
     Using a WinRM client, remote commands are executed to build a WiX installer.
@@ -267,8 +324,9 @@
     if version:
         extra_args.extend(['--version', version])
 
-    ps = get_vc_prefix(arch) + BUILD_WIX.format(arch=arch,
-                                                extra_args=' '.join(extra_args))
+    ps = get_vc_prefix(arch) + BUILD_WIX.format(
+        arch=arch, extra_args=' '.join(extra_args)
+    )
     run_powershell(winrm_client, ps)
     copy_latest_dist(winrm_client, '*.msi', dest_path)
 
@@ -282,17 +340,171 @@
     ``run-tests.py``.
     """
     if not re.match(r'\d\.\d', python_version):
-        raise ValueError(r'python_version must be \d.\d; got %s' %
-                         python_version)
+        raise ValueError(
+            r'python_version must be \d.\d; got %s' % python_version
+        )
 
     if arch not in ('x86', 'x64'):
         raise ValueError('arch must be x86 or x64; got %s' % arch)
 
     python_path = 'python%s-%s' % (python_version.replace('.', ''), arch)
 
-    ps = RUN_TESTS.format(
-        python_path=python_path,
-        test_flags=test_flags or '',
+    ps = RUN_TESTS.format(python_path=python_path, test_flags=test_flags or '',)
+
+    run_powershell(winrm_client, ps)
+
+
+def resolve_wheel_artifacts(dist_path: pathlib.Path, version: str):
+    return (
+        dist_path / X86_WHEEL_FILENAME.format(version=version),
+        dist_path / X64_WHEEL_FILENAME.format(version=version),
+    )
+
+
+def resolve_all_artifacts(dist_path: pathlib.Path, version: str):
+    return (
+        dist_path / X86_WHEEL_FILENAME.format(version=version),
+        dist_path / X64_WHEEL_FILENAME.format(version=version),
+        dist_path / X86_EXE_FILENAME.format(version=version),
+        dist_path / X64_EXE_FILENAME.format(version=version),
+        dist_path / X86_MSI_FILENAME.format(version=version),
+        dist_path / X64_MSI_FILENAME.format(version=version),
+    )
+
+
+def generate_latest_dat(version: str):
+    x86_exe_filename = X86_EXE_FILENAME.format(version=version)
+    x64_exe_filename = X64_EXE_FILENAME.format(version=version)
+    x86_msi_filename = X86_MSI_FILENAME.format(version=version)
+    x64_msi_filename = X64_MSI_FILENAME.format(version=version)
+
+    entries = (
+        (
+            '10',
+            version,
+            X86_USER_AGENT_PATTERN,
+            '%s/%s' % (MERCURIAL_SCM_BASE_URL, x86_exe_filename),
+            X86_EXE_DESCRIPTION.format(version=version),
+        ),
+        (
+            '10',
+            version,
+            X64_USER_AGENT_PATTERN,
+            '%s/%s' % (MERCURIAL_SCM_BASE_URL, x64_exe_filename),
+            X64_EXE_DESCRIPTION.format(version=version),
+        ),
+        (
+            '10',
+            version,
+            X86_USER_AGENT_PATTERN,
+            '%s/%s' % (MERCURIAL_SCM_BASE_URL, x86_msi_filename),
+            X86_MSI_DESCRIPTION.format(version=version),
+        ),
+        (
+            '10',
+            version,
+            X64_USER_AGENT_PATTERN,
+            '%s/%s' % (MERCURIAL_SCM_BASE_URL, x64_msi_filename),
+            X64_MSI_DESCRIPTION.format(version=version),
+        ),
     )
 
-    run_powershell(winrm_client, ps)
+    lines = ['\t'.join(e) for e in entries]
+
+    return '\n'.join(lines) + '\n'
+
+
+def publish_artifacts_pypi(dist_path: pathlib.Path, version: str):
+    """Publish Windows release artifacts to PyPI."""
+
+    wheel_paths = resolve_wheel_artifacts(dist_path, version)
+
+    for p in wheel_paths:
+        if not p.exists():
+            raise Exception('%s not found' % p)
+
+    print('uploading wheels to PyPI (you may be prompted for credentials)')
+    pypi_upload(wheel_paths)
+
+
+def publish_artifacts_mercurial_scm_org(
+    dist_path: pathlib.Path, version: str, ssh_username=None
+):
+    """Publish Windows release artifacts to mercurial-scm.org."""
+    all_paths = resolve_all_artifacts(dist_path, version)
+
+    for p in all_paths:
+        if not p.exists():
+            raise Exception('%s not found' % p)
+
+    client = paramiko.SSHClient()
+    client.load_system_host_keys()
+    # We assume the system SSH configuration knows how to connect.
+    print('connecting to mercurial-scm.org via ssh...')
+    try:
+        client.connect('mercurial-scm.org', username=ssh_username)
+    except paramiko.AuthenticationException:
+        print('error authenticating; is an SSH key available in an SSH agent?')
+        raise
+
+    print('SSH connection established')
+
+    print('opening SFTP client...')
+    sftp = client.open_sftp()
+    print('SFTP client obtained')
+
+    for p in all_paths:
+        dest_path = '/var/www/release/windows/%s' % p.name
+        print('uploading %s to %s' % (p, dest_path))
+
+        with p.open('rb') as fh:
+            data = fh.read()
+
+        with sftp.open(dest_path, 'wb') as fh:
+            fh.write(data)
+            fh.chmod(0o0664)
+
+    latest_dat_path = '/var/www/release/windows/latest.dat'
+
+    now = datetime.datetime.utcnow()
+    backup_path = dist_path / (
+        'latest-windows-%s.dat' % now.strftime('%Y%m%dT%H%M%S')
+    )
+    print('backing up %s to %s' % (latest_dat_path, backup_path))
+
+    with sftp.open(latest_dat_path, 'rb') as fh:
+        latest_dat_old = fh.read()
+
+    with backup_path.open('wb') as fh:
+        fh.write(latest_dat_old)
+
+    print('writing %s with content:' % latest_dat_path)
+    latest_dat_content = generate_latest_dat(version)
+    print(latest_dat_content)
+
+    with sftp.open(latest_dat_path, 'wb') as fh:
+        fh.write(latest_dat_content.encode('ascii'))
+
+
+def publish_artifacts(
+    dist_path: pathlib.Path,
+    version: str,
+    pypi=True,
+    mercurial_scm_org=True,
+    ssh_username=None,
+):
+    """Publish Windows release artifacts.
+
+    Files are found in `dist_path`. We will look for files with version string
+    `version`.
+
+    `pypi` controls whether we upload to PyPI.
+    `mercurial_scm_org` controls whether we upload to mercurial-scm.org.
+    """
+    if pypi:
+        publish_artifacts_pypi(dist_path, version)
+
+    if mercurial_scm_org:
+        publish_artifacts_mercurial_scm_org(
+            dist_path, version, ssh_username=ssh_username
+        )
--- a/contrib/automation/hgautomation/winrm.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/automation/hgautomation/winrm.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,9 +11,7 @@
 import pprint
 import time
 
-from pypsrp.client import (
-    Client,
-)
+from pypsrp.client import Client
 from pypsrp.powershell import (
     PowerShell,
     PSInvocationState,
@@ -35,8 +33,13 @@
 
     while True:
         try:
-            client = Client(host, username=username, password=password,
-                            ssl=ssl, connection_timeout=5)
+            client = Client(
+                host,
+                username=username,
+                password=password,
+                ssl=ssl,
+                connection_timeout=5,
+            )
             client.execute_ps("Write-Host 'Hello, World!'")
             return client
         except requests.exceptions.ConnectionError:
@@ -52,7 +55,7 @@
 
     try:
         o = str(o)
-    except TypeError:
+    except (AttributeError, TypeError):
         o = pprint.pformat(o.extended_properties)
 
     return o
@@ -78,5 +81,7 @@
             print(format_object(o))
 
         if ps.state == PSInvocationState.FAILED:
-            raise Exception('PowerShell execution failed: %s' %
-                            ' '.join(map(format_object, ps.streams.error)))
+            raise Exception(
+                'PowerShell execution failed: %s'
+                % ' '.join(map(format_object, ps.streams.error))
+            )
--- a/contrib/automation/linux-requirements-py2.txt	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/automation/linux-requirements-py2.txt	Mon Oct 21 11:09:48 2019 -0400
@@ -2,7 +2,7 @@
 # This file is autogenerated by pip-compile
 # To update, run:
 #
-#    pip-compile -U --generate-hashes --output-file contrib/automation/linux-requirements-py2.txt contrib/automation/linux-requirements.txt.in
+#    pip-compile --generate-hashes --output-file=contrib/automation/linux-requirements-py2.txt contrib/automation/linux-requirements.txt.in
 #
 astroid==1.6.6 \
     --hash=sha256:87de48a92e29cedf7210ffa853d11441e7ad94cb47bacd91b023499b51cbc756 \
@@ -22,10 +22,10 @@
     --hash=sha256:509f9419ee91cdd00ba34443217d5ca51f5a364a404e1dce9e8979cea969ca48 \
     --hash=sha256:f5260a6e679d2ff42ec91ec5252f4eeffdcf21053db9113bd0a8e4d953769c00 \
     # via vcrpy
-docutils==0.14 \
-    --hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \
-    --hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \
-    --hash=sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6
+docutils==0.15.2 \
+    --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \
+    --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \
+    --hash=sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99
 enum34==1.1.6 \
     --hash=sha256:2d81cbbe0e73112bdfe6ef8576f2238f2ba27dd0d55752a776c41d38b7da2850 \
     --hash=sha256:644837f692e5f550741432dd3f223bbb9852018674981b1664e5dc339387588a \
@@ -36,83 +36,70 @@
     --hash=sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca \
     --hash=sha256:a7bb0f2cf3a3fd1ab2732cb49eba4252c2af4240442415b4abce3b87022a8f50 \
     # via mock
-futures==3.2.0 \
-    --hash=sha256:9ec02aa7d674acb8618afb127e27fde7fc68994c0437ad759fa094a574adb265 \
-    --hash=sha256:ec0a6cb848cc212002b9828c3e34c675e0c9ff6741dc445cab6fdd4e1085d1f1 \
+futures==3.3.0 \
+    --hash=sha256:49b3f5b064b6e3afc3316421a3f25f66c137ae88f068abbf72830170033c5e16 \
+    --hash=sha256:7e033af76a5e35f58e56da7a91e687706faf4e7bdfb2cbc3f2cca6b9bcda9794 \
     # via isort
 fuzzywuzzy==0.17.0 \
     --hash=sha256:5ac7c0b3f4658d2743aa17da53a55598144edbc5bee3c6863840636e6926f254 \
     --hash=sha256:6f49de47db00e1c71d40ad16da42284ac357936fa9b66bea1df63fed07122d62
-isort==4.3.17 \
-    --hash=sha256:01cb7e1ca5e6c5b3f235f0385057f70558b70d2f00320208825fa62887292f43 \
-    --hash=sha256:268067462aed7eb2a1e237fcb287852f22077de3fb07964e87e00f829eea2d1a \
+isort==4.3.21 \
+    --hash=sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1 \
+    --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd \
     # via pylint
-lazy-object-proxy==1.3.1 \
-    --hash=sha256:0ce34342b419bd8f018e6666bfef729aec3edf62345a53b537a4dcc115746a33 \
-    --hash=sha256:1b668120716eb7ee21d8a38815e5eb3bb8211117d9a90b0f8e21722c0758cc39 \
-    --hash=sha256:209615b0fe4624d79e50220ce3310ca1a9445fd8e6d3572a896e7f9146bbf019 \
-    --hash=sha256:27bf62cb2b1a2068d443ff7097ee33393f8483b570b475db8ebf7e1cba64f088 \
-    --hash=sha256:27ea6fd1c02dcc78172a82fc37fcc0992a94e4cecf53cb6d73f11749825bd98b \
-    --hash=sha256:2c1b21b44ac9beb0fc848d3993924147ba45c4ebc24be19825e57aabbe74a99e \
-    --hash=sha256:2df72ab12046a3496a92476020a1a0abf78b2a7db9ff4dc2036b8dd980203ae6 \
-    --hash=sha256:320ffd3de9699d3892048baee45ebfbbf9388a7d65d832d7e580243ade426d2b \
-    --hash=sha256:50e3b9a464d5d08cc5227413db0d1c4707b6172e4d4d915c1c70e4de0bbff1f5 \
-    --hash=sha256:5276db7ff62bb7b52f77f1f51ed58850e315154249aceb42e7f4c611f0f847ff \
-    --hash=sha256:61a6cf00dcb1a7f0c773ed4acc509cb636af2d6337a08f362413c76b2b47a8dd \
-    --hash=sha256:6ae6c4cb59f199d8827c5a07546b2ab7e85d262acaccaacd49b62f53f7c456f7 \
-    --hash=sha256:7661d401d60d8bf15bb5da39e4dd72f5d764c5aff5a86ef52a042506e3e970ff \
-    --hash=sha256:7bd527f36a605c914efca5d3d014170b2cb184723e423d26b1fb2fd9108e264d \
-    --hash=sha256:7cb54db3535c8686ea12e9535eb087d32421184eacc6939ef15ef50f83a5e7e2 \
-    --hash=sha256:7f3a2d740291f7f2c111d86a1c4851b70fb000a6c8883a59660d95ad57b9df35 \
-    --hash=sha256:81304b7d8e9c824d058087dcb89144842c8e0dea6d281c031f59f0acf66963d4 \
-    --hash=sha256:933947e8b4fbe617a51528b09851685138b49d511af0b6c0da2539115d6d4514 \
-    --hash=sha256:94223d7f060301b3a8c09c9b3bc3294b56b2188e7d8179c762a1cda72c979252 \
-    --hash=sha256:ab3ca49afcb47058393b0122428358d2fbe0408cf99f1b58b295cfeb4ed39109 \
-    --hash=sha256:bd6292f565ca46dee4e737ebcc20742e3b5be2b01556dafe169f6c65d088875f \
-    --hash=sha256:cb924aa3e4a3fb644d0c463cad5bc2572649a6a3f68a7f8e4fbe44aaa6d77e4c \
-    --hash=sha256:d0fc7a286feac9077ec52a927fc9fe8fe2fabab95426722be4c953c9a8bede92 \
-    --hash=sha256:ddc34786490a6e4ec0a855d401034cbd1242ef186c20d79d2166d6a4bd449577 \
-    --hash=sha256:e34b155e36fa9da7e1b7c738ed7767fc9491a62ec6af70fe9da4a057759edc2d \
-    --hash=sha256:e5b9e8f6bda48460b7b143c3821b21b452cb3a835e6bbd5dd33aa0c8d3f5137d \
-    --hash=sha256:e81ebf6c5ee9684be8f2c87563880f93eedd56dd2b6146d8a725b50b7e5adb0f \
-    --hash=sha256:eb91be369f945f10d3a49f5f9be8b3d0b93a4c2be8f8a5b83b0571b8123e0a7a \
-    --hash=sha256:f460d1ceb0e4a5dcb2a652db0904224f367c9b3c1470d5a7683c0480e582468b \
+lazy-object-proxy==1.4.1 \
+    --hash=sha256:159a745e61422217881c4de71f9eafd9d703b93af95618635849fe469a283661 \
+    --hash=sha256:23f63c0821cc96a23332e45dfaa83266feff8adc72b9bcaef86c202af765244f \
+    --hash=sha256:3b11be575475db2e8a6e11215f5aa95b9ec14de658628776e10d96fa0b4dac13 \
+    --hash=sha256:3f447aff8bc61ca8b42b73304f6a44fa0d915487de144652816f950a3f1ab821 \
+    --hash=sha256:4ba73f6089cd9b9478bc0a4fa807b47dbdb8fad1d8f31a0f0a5dbf26a4527a71 \
+    --hash=sha256:4f53eadd9932055eac465bd3ca1bd610e4d7141e1278012bd1f28646aebc1d0e \
+    --hash=sha256:64483bd7154580158ea90de5b8e5e6fc29a16a9b4db24f10193f0c1ae3f9d1ea \
+    --hash=sha256:6f72d42b0d04bfee2397aa1862262654b56922c20a9bb66bb76b6f0e5e4f9229 \
+    --hash=sha256:7c7f1ec07b227bdc561299fa2328e85000f90179a2f44ea30579d38e037cb3d4 \
+    --hash=sha256:7c8b1ba1e15c10b13cad4171cfa77f5bb5ec2580abc5a353907780805ebe158e \
+    --hash=sha256:8559b94b823f85342e10d3d9ca4ba5478168e1ac5658a8a2f18c991ba9c52c20 \
+    --hash=sha256:a262c7dfb046f00e12a2bdd1bafaed2408114a89ac414b0af8755c696eb3fc16 \
+    --hash=sha256:acce4e3267610c4fdb6632b3886fe3f2f7dd641158a843cf6b6a68e4ce81477b \
+    --hash=sha256:be089bb6b83fac7f29d357b2dc4cf2b8eb8d98fe9d9ff89f9ea6012970a853c7 \
+    --hash=sha256:bfab710d859c779f273cc48fb86af38d6e9210f38287df0069a63e40b45a2f5c \
+    --hash=sha256:c10d29019927301d524a22ced72706380de7cfc50f767217485a912b4c8bd82a \
+    --hash=sha256:dd6e2b598849b3d7aee2295ac765a578879830fb8966f70be8cd472e6069932e \
+    --hash=sha256:e408f1eacc0a68fed0c08da45f31d0ebb38079f043328dce69ff133b95c29dc1 \
     # via astroid
 mccabe==0.6.1 \
     --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \
     --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f \
     # via pylint
-mock==2.0.0 \
-    --hash=sha256:5ce3c71c5545b472da17b72268978914d0252980348636840bd34a00b5cc96c1 \
-    --hash=sha256:b158b6df76edd239b8208d481dc46b6afd45a846b7812ff0ce58971cf5bc8bba \
+mock==3.0.5 \
+    --hash=sha256:83657d894c90d5681d62155c82bda9c1187827525880eda8ff5df4ec813437c3 \
+    --hash=sha256:d157e52d4e5b938c550f39eb2fd15610db062441a9c2747d3dbfa9298211d0f8 \
     # via vcrpy
-pbr==5.1.3 \
-    --hash=sha256:8257baf496c8522437e8a6cfe0f15e00aedc6c0e0e7c9d55eeeeab31e0853843 \
-    --hash=sha256:8c361cc353d988e4f5b998555c88098b9d5964c2e11acf7b0d21925a66bb5824 \
-    # via mock
 pyflakes==2.1.1 \
     --hash=sha256:17dbeb2e3f4d772725c777fabc446d5634d1038f234e77343108ce445ea69ce0 \
     --hash=sha256:d976835886f8c5b31d47970ed689944a0262b5f3afa00a5a7b4dc81e5449f8a2
-pygments==2.3.1 \
-    --hash=sha256:5ffada19f6203563680669ee7f53b64dabbeb100eb51b61996085e99c03b284a \
-    --hash=sha256:e8218dd399a61674745138520d0d4cf2621d7e032439341bc3f647bff125818d
-pylint==1.9.4 \
-    --hash=sha256:02c2b6d268695a8b64ad61847f92e611e6afcff33fd26c3a2125370c4662905d \
-    --hash=sha256:ee1e85575587c5b58ddafa25e1c1b01691ef172e139fc25585e5d3f02451da93
+pygments==2.4.2 \
+    --hash=sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127 \
+    --hash=sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297
+pylint==1.9.5 \
+    --hash=sha256:367e3d49813d349a905390ac27989eff82ab84958731c5ef0bef867452cfdc42 \
+    --hash=sha256:97a42df23d436c70132971d1dcb9efad2fe5c0c6add55b90161e773caf729300
 python-levenshtein==0.12.0 \
     --hash=sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1
-pyyaml==5.1 \
-    --hash=sha256:1adecc22f88d38052fb787d959f003811ca858b799590a5eaa70e63dca50308c \
-    --hash=sha256:436bc774ecf7c103814098159fbb84c2715d25980175292c648f2da143909f95 \
-    --hash=sha256:460a5a4248763f6f37ea225d19d5c205677d8d525f6a83357ca622ed541830c2 \
-    --hash=sha256:5a22a9c84653debfbf198d02fe592c176ea548cccce47553f35f466e15cf2fd4 \
-    --hash=sha256:7a5d3f26b89d688db27822343dfa25c599627bc92093e788956372285c6298ad \
-    --hash=sha256:9372b04a02080752d9e6f990179a4ab840227c6e2ce15b95e1278456664cf2ba \
-    --hash=sha256:a5dcbebee834eaddf3fa7366316b880ff4062e4bcc9787b78c7fbb4a26ff2dd1 \
-    --hash=sha256:aee5bab92a176e7cd034e57f46e9df9a9862a71f8f37cad167c6fc74c65f5b4e \
-    --hash=sha256:c51f642898c0bacd335fc119da60baae0824f2cde95b0330b56c0553439f0673 \
-    --hash=sha256:c68ea4d3ba1705da1e0d85da6684ac657912679a649e8868bd850d2c299cce13 \
-    --hash=sha256:e23d0cc5299223dcc37885dae624f382297717e459ea24053709675a976a3e19 \
+pyyaml==5.1.2 \
+    --hash=sha256:0113bc0ec2ad727182326b61326afa3d1d8280ae1122493553fd6f4397f33df9 \
+    --hash=sha256:01adf0b6c6f61bd11af6e10ca52b7d4057dd0be0343eb9283c878cf3af56aee4 \
+    --hash=sha256:5124373960b0b3f4aa7df1707e63e9f109b5263eca5976c66e08b1c552d4eaf8 \
+    --hash=sha256:5ca4f10adbddae56d824b2c09668e91219bb178a1eee1faa56af6f99f11bf696 \
+    --hash=sha256:7907be34ffa3c5a32b60b95f4d95ea25361c951383a894fec31be7252b2b6f34 \
+    --hash=sha256:7ec9b2a4ed5cad025c2278a1e6a19c011c80a3caaac804fd2d329e9cc2c287c9 \
+    --hash=sha256:87ae4c829bb25b9fe99cf71fbb2140c448f534e24c998cc60f39ae4f94396a73 \
+    --hash=sha256:9de9919becc9cc2ff03637872a440195ac4241c80536632fffeb6a1e25a74299 \
+    --hash=sha256:a5a85b10e450c66b49f98846937e8cfca1db3127a9d5d1e31ca45c3d0bef4c5b \
+    --hash=sha256:b0997827b4f6a7c286c01c5f60384d218dca4ed7d9efa945c3e1aa623d5709ae \
+    --hash=sha256:b631ef96d3222e62861443cc89d6563ba3eeb816eeb96b2629345ab795e53681 \
+    --hash=sha256:bf47c0607522fdbca6c9e817a6e81b08491de50f3766a7a0e6a5be7905961b41 \
+    --hash=sha256:f81025eddd0327c7d4cfe9b62cf33190e1e736cc6e97502b3ec425f574b3e7a8 \
     # via vcrpy
 singledispatch==3.4.0.3 \
     --hash=sha256:5b06af87df13818d14f08a028e42f566640aef80805c3b50c5056b086e3c2b9c \
@@ -125,6 +112,10 @@
 vcrpy==2.0.1 \
     --hash=sha256:127e79cf7b569d071d1bd761b83f7b62b2ce2a2eb63ceca7aa67cba8f2602ea3 \
     --hash=sha256:57be64aa8e9883a4117d0b15de28af62275c001abcdb00b6dc2d4406073d9a4f
-wrapt==1.11.1 \
-    --hash=sha256:4aea003270831cceb8a90ff27c4031da6ead7ec1886023b80ce0dfe0adf61533 \
+wrapt==1.11.2 \
+    --hash=sha256:565a021fd19419476b9362b05eeaa094178de64f8361e44468f9e9d7843901e1 \
     # via astroid, vcrpy
+
+# WARNING: The following packages were not pinned, but pip requires them to be
+# pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag.
+# setuptools==41.0.1        # via python-levenshtein
--- a/contrib/automation/linux-requirements-py3.txt	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/automation/linux-requirements-py3.txt	Mon Oct 21 11:09:48 2019 -0400
@@ -2,16 +2,16 @@
 # This file is autogenerated by pip-compile
 # To update, run:
 #
-#    pip-compile -U --generate-hashes --output-file contrib/automation/linux-requirements-py3.txt contrib/automation/linux-requirements.txt.in
+#    pip-compile --generate-hashes --output-file=contrib/automation/linux-requirements-py3.txt contrib/automation/linux-requirements.txt.in
 #
 astroid==2.2.5 \
     --hash=sha256:6560e1e1749f68c64a4b5dee4e091fce798d2f0d84ebe638cf0e0585a343acf4 \
     --hash=sha256:b65db1bbaac9f9f4d190199bb8680af6f6f84fd3769a5ea883df8a91fe68b4c4 \
     # via pylint
-docutils==0.14 \
-    --hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \
-    --hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \
-    --hash=sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6
+docutils==0.15.2 \
+    --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \
+    --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \
+    --hash=sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99
 fuzzywuzzy==0.17.0 \
     --hash=sha256:5ac7c0b3f4658d2743aa17da53a55598144edbc5bee3c6863840636e6926f254 \
     --hash=sha256:6f49de47db00e1c71d40ad16da42284ac357936fa9b66bea1df63fed07122d62
@@ -19,40 +19,29 @@
     --hash=sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407 \
     --hash=sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c \
     # via yarl
-isort==4.3.17 \
-    --hash=sha256:01cb7e1ca5e6c5b3f235f0385057f70558b70d2f00320208825fa62887292f43 \
-    --hash=sha256:268067462aed7eb2a1e237fcb287852f22077de3fb07964e87e00f829eea2d1a \
+isort==4.3.21 \
+    --hash=sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1 \
+    --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd \
     # via pylint
-lazy-object-proxy==1.3.1 \
-    --hash=sha256:0ce34342b419bd8f018e6666bfef729aec3edf62345a53b537a4dcc115746a33 \
-    --hash=sha256:1b668120716eb7ee21d8a38815e5eb3bb8211117d9a90b0f8e21722c0758cc39 \
-    --hash=sha256:209615b0fe4624d79e50220ce3310ca1a9445fd8e6d3572a896e7f9146bbf019 \
-    --hash=sha256:27bf62cb2b1a2068d443ff7097ee33393f8483b570b475db8ebf7e1cba64f088 \
-    --hash=sha256:27ea6fd1c02dcc78172a82fc37fcc0992a94e4cecf53cb6d73f11749825bd98b \
-    --hash=sha256:2c1b21b44ac9beb0fc848d3993924147ba45c4ebc24be19825e57aabbe74a99e \
-    --hash=sha256:2df72ab12046a3496a92476020a1a0abf78b2a7db9ff4dc2036b8dd980203ae6 \
-    --hash=sha256:320ffd3de9699d3892048baee45ebfbbf9388a7d65d832d7e580243ade426d2b \
-    --hash=sha256:50e3b9a464d5d08cc5227413db0d1c4707b6172e4d4d915c1c70e4de0bbff1f5 \
-    --hash=sha256:5276db7ff62bb7b52f77f1f51ed58850e315154249aceb42e7f4c611f0f847ff \
-    --hash=sha256:61a6cf00dcb1a7f0c773ed4acc509cb636af2d6337a08f362413c76b2b47a8dd \
-    --hash=sha256:6ae6c4cb59f199d8827c5a07546b2ab7e85d262acaccaacd49b62f53f7c456f7 \
-    --hash=sha256:7661d401d60d8bf15bb5da39e4dd72f5d764c5aff5a86ef52a042506e3e970ff \
-    --hash=sha256:7bd527f36a605c914efca5d3d014170b2cb184723e423d26b1fb2fd9108e264d \
-    --hash=sha256:7cb54db3535c8686ea12e9535eb087d32421184eacc6939ef15ef50f83a5e7e2 \
-    --hash=sha256:7f3a2d740291f7f2c111d86a1c4851b70fb000a6c8883a59660d95ad57b9df35 \
-    --hash=sha256:81304b7d8e9c824d058087dcb89144842c8e0dea6d281c031f59f0acf66963d4 \
-    --hash=sha256:933947e8b4fbe617a51528b09851685138b49d511af0b6c0da2539115d6d4514 \
-    --hash=sha256:94223d7f060301b3a8c09c9b3bc3294b56b2188e7d8179c762a1cda72c979252 \
-    --hash=sha256:ab3ca49afcb47058393b0122428358d2fbe0408cf99f1b58b295cfeb4ed39109 \
-    --hash=sha256:bd6292f565ca46dee4e737ebcc20742e3b5be2b01556dafe169f6c65d088875f \
-    --hash=sha256:cb924aa3e4a3fb644d0c463cad5bc2572649a6a3f68a7f8e4fbe44aaa6d77e4c \
-    --hash=sha256:d0fc7a286feac9077ec52a927fc9fe8fe2fabab95426722be4c953c9a8bede92 \
-    --hash=sha256:ddc34786490a6e4ec0a855d401034cbd1242ef186c20d79d2166d6a4bd449577 \
-    --hash=sha256:e34b155e36fa9da7e1b7c738ed7767fc9491a62ec6af70fe9da4a057759edc2d \
-    --hash=sha256:e5b9e8f6bda48460b7b143c3821b21b452cb3a835e6bbd5dd33aa0c8d3f5137d \
-    --hash=sha256:e81ebf6c5ee9684be8f2c87563880f93eedd56dd2b6146d8a725b50b7e5adb0f \
-    --hash=sha256:eb91be369f945f10d3a49f5f9be8b3d0b93a4c2be8f8a5b83b0571b8123e0a7a \
-    --hash=sha256:f460d1ceb0e4a5dcb2a652db0904224f367c9b3c1470d5a7683c0480e582468b \
+lazy-object-proxy==1.4.1 \
+    --hash=sha256:159a745e61422217881c4de71f9eafd9d703b93af95618635849fe469a283661 \
+    --hash=sha256:23f63c0821cc96a23332e45dfaa83266feff8adc72b9bcaef86c202af765244f \
+    --hash=sha256:3b11be575475db2e8a6e11215f5aa95b9ec14de658628776e10d96fa0b4dac13 \
+    --hash=sha256:3f447aff8bc61ca8b42b73304f6a44fa0d915487de144652816f950a3f1ab821 \
+    --hash=sha256:4ba73f6089cd9b9478bc0a4fa807b47dbdb8fad1d8f31a0f0a5dbf26a4527a71 \
+    --hash=sha256:4f53eadd9932055eac465bd3ca1bd610e4d7141e1278012bd1f28646aebc1d0e \
+    --hash=sha256:64483bd7154580158ea90de5b8e5e6fc29a16a9b4db24f10193f0c1ae3f9d1ea \
+    --hash=sha256:6f72d42b0d04bfee2397aa1862262654b56922c20a9bb66bb76b6f0e5e4f9229 \
+    --hash=sha256:7c7f1ec07b227bdc561299fa2328e85000f90179a2f44ea30579d38e037cb3d4 \
+    --hash=sha256:7c8b1ba1e15c10b13cad4171cfa77f5bb5ec2580abc5a353907780805ebe158e \
+    --hash=sha256:8559b94b823f85342e10d3d9ca4ba5478168e1ac5658a8a2f18c991ba9c52c20 \
+    --hash=sha256:a262c7dfb046f00e12a2bdd1bafaed2408114a89ac414b0af8755c696eb3fc16 \
+    --hash=sha256:acce4e3267610c4fdb6632b3886fe3f2f7dd641158a843cf6b6a68e4ce81477b \
+    --hash=sha256:be089bb6b83fac7f29d357b2dc4cf2b8eb8d98fe9d9ff89f9ea6012970a853c7 \
+    --hash=sha256:bfab710d859c779f273cc48fb86af38d6e9210f38287df0069a63e40b45a2f5c \
+    --hash=sha256:c10d29019927301d524a22ced72706380de7cfc50f767217485a912b4c8bd82a \
+    --hash=sha256:dd6e2b598849b3d7aee2295ac765a578879830fb8966f70be8cd472e6069932e \
+    --hash=sha256:e408f1eacc0a68fed0c08da45f31d0ebb38079f043328dce69ff133b95c29dc1 \
     # via astroid
 mccabe==0.6.1 \
     --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \
@@ -92,57 +81,54 @@
 pyflakes==2.1.1 \
     --hash=sha256:17dbeb2e3f4d772725c777fabc446d5634d1038f234e77343108ce445ea69ce0 \
     --hash=sha256:d976835886f8c5b31d47970ed689944a0262b5f3afa00a5a7b4dc81e5449f8a2
-pygments==2.3.1 \
-    --hash=sha256:5ffada19f6203563680669ee7f53b64dabbeb100eb51b61996085e99c03b284a \
-    --hash=sha256:e8218dd399a61674745138520d0d4cf2621d7e032439341bc3f647bff125818d
+pygments==2.4.2 \
+    --hash=sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127 \
+    --hash=sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297
 pylint==2.3.1 \
     --hash=sha256:5d77031694a5fb97ea95e828c8d10fc770a1df6eb3906067aaed42201a8a6a09 \
     --hash=sha256:723e3db49555abaf9bf79dc474c6b9e2935ad82230b10c1138a71ea41ac0fff1
 python-levenshtein==0.12.0 \
     --hash=sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1
-pyyaml==5.1 \
-    --hash=sha256:1adecc22f88d38052fb787d959f003811ca858b799590a5eaa70e63dca50308c \
-    --hash=sha256:436bc774ecf7c103814098159fbb84c2715d25980175292c648f2da143909f95 \
-    --hash=sha256:460a5a4248763f6f37ea225d19d5c205677d8d525f6a83357ca622ed541830c2 \
-    --hash=sha256:5a22a9c84653debfbf198d02fe592c176ea548cccce47553f35f466e15cf2fd4 \
-    --hash=sha256:7a5d3f26b89d688db27822343dfa25c599627bc92093e788956372285c6298ad \
-    --hash=sha256:9372b04a02080752d9e6f990179a4ab840227c6e2ce15b95e1278456664cf2ba \
-    --hash=sha256:a5dcbebee834eaddf3fa7366316b880ff4062e4bcc9787b78c7fbb4a26ff2dd1 \
-    --hash=sha256:aee5bab92a176e7cd034e57f46e9df9a9862a71f8f37cad167c6fc74c65f5b4e \
-    --hash=sha256:c51f642898c0bacd335fc119da60baae0824f2cde95b0330b56c0553439f0673 \
-    --hash=sha256:c68ea4d3ba1705da1e0d85da6684ac657912679a649e8868bd850d2c299cce13 \
-    --hash=sha256:e23d0cc5299223dcc37885dae624f382297717e459ea24053709675a976a3e19 \
+pyyaml==5.1.2 \
+    --hash=sha256:0113bc0ec2ad727182326b61326afa3d1d8280ae1122493553fd6f4397f33df9 \
+    --hash=sha256:01adf0b6c6f61bd11af6e10ca52b7d4057dd0be0343eb9283c878cf3af56aee4 \
+    --hash=sha256:5124373960b0b3f4aa7df1707e63e9f109b5263eca5976c66e08b1c552d4eaf8 \
+    --hash=sha256:5ca4f10adbddae56d824b2c09668e91219bb178a1eee1faa56af6f99f11bf696 \
+    --hash=sha256:7907be34ffa3c5a32b60b95f4d95ea25361c951383a894fec31be7252b2b6f34 \
+    --hash=sha256:7ec9b2a4ed5cad025c2278a1e6a19c011c80a3caaac804fd2d329e9cc2c287c9 \
+    --hash=sha256:87ae4c829bb25b9fe99cf71fbb2140c448f534e24c998cc60f39ae4f94396a73 \
+    --hash=sha256:9de9919becc9cc2ff03637872a440195ac4241c80536632fffeb6a1e25a74299 \
+    --hash=sha256:a5a85b10e450c66b49f98846937e8cfca1db3127a9d5d1e31ca45c3d0bef4c5b \
+    --hash=sha256:b0997827b4f6a7c286c01c5f60384d218dca4ed7d9efa945c3e1aa623d5709ae \
+    --hash=sha256:b631ef96d3222e62861443cc89d6563ba3eeb816eeb96b2629345ab795e53681 \
+    --hash=sha256:bf47c0607522fdbca6c9e817a6e81b08491de50f3766a7a0e6a5be7905961b41 \
+    --hash=sha256:f81025eddd0327c7d4cfe9b62cf33190e1e736cc6e97502b3ec425f574b3e7a8 \
     # via vcrpy
 six==1.12.0 \
     --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \
     --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \
     # via astroid, vcrpy
-typed-ast==1.3.4 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \
-    --hash=sha256:04894d268ba6eab7e093d43107869ad49e7b5ef40d1a94243ea49b352061b200 \
-    --hash=sha256:16616ece19daddc586e499a3d2f560302c11f122b9c692bc216e821ae32aa0d0 \
-    --hash=sha256:252fdae740964b2d3cdfb3f84dcb4d6247a48a6abe2579e8029ab3be3cdc026c \
-    --hash=sha256:2af80a373af123d0b9f44941a46df67ef0ff7a60f95872412a145f4500a7fc99 \
-    --hash=sha256:2c88d0a913229a06282b285f42a31e063c3bf9071ff65c5ea4c12acb6977c6a7 \
-    --hash=sha256:2ea99c029ebd4b5a308d915cc7fb95b8e1201d60b065450d5d26deb65d3f2bc1 \
-    --hash=sha256:3d2e3ab175fc097d2a51c7a0d3fda442f35ebcc93bb1d7bd9b95ad893e44c04d \
-    --hash=sha256:4766dd695548a15ee766927bf883fb90c6ac8321be5a60c141f18628fb7f8da8 \
-    --hash=sha256:56b6978798502ef66625a2e0f80cf923da64e328da8bbe16c1ff928c70c873de \
-    --hash=sha256:5cddb6f8bce14325b2863f9d5ac5c51e07b71b462361fd815d1d7706d3a9d682 \
-    --hash=sha256:644ee788222d81555af543b70a1098f2025db38eaa99226f3a75a6854924d4db \
-    --hash=sha256:64cf762049fc4775efe6b27161467e76d0ba145862802a65eefc8879086fc6f8 \
-    --hash=sha256:68c362848d9fb71d3c3e5f43c09974a0ae319144634e7a47db62f0f2a54a7fa7 \
-    --hash=sha256:6c1f3c6f6635e611d58e467bf4371883568f0de9ccc4606f17048142dec14a1f \
-    --hash=sha256:b213d4a02eec4ddf622f4d2fbc539f062af3788d1f332f028a2e19c42da53f15 \
-    --hash=sha256:bb27d4e7805a7de0e35bd0cb1411bc85f807968b2b0539597a49a23b00a622ae \
-    --hash=sha256:c9d414512eaa417aadae7758bc118868cd2396b0e6138c1dd4fda96679c079d3 \
-    --hash=sha256:f0937165d1e25477b01081c4763d2d9cdc3b18af69cb259dd4f640c9b900fe5e \
-    --hash=sha256:fb96a6e2c11059ecf84e6741a319f93f683e440e341d4489c9b161eca251cf2a \
-    --hash=sha256:fc71d2d6ae56a091a8d94f33ec9d0f2001d1cb1db423d8b4355debfe9ce689b7
+typed-ast==1.4.0 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \
+    --hash=sha256:18511a0b3e7922276346bcb47e2ef9f38fb90fd31cb9223eed42c85d1312344e \
+    --hash=sha256:262c247a82d005e43b5b7f69aff746370538e176131c32dda9cb0f324d27141e \
+    --hash=sha256:2b907eb046d049bcd9892e3076c7a6456c93a25bebfe554e931620c90e6a25b0 \
+    --hash=sha256:354c16e5babd09f5cb0ee000d54cfa38401d8b8891eefa878ac772f827181a3c \
+    --hash=sha256:4e0b70c6fc4d010f8107726af5fd37921b666f5b31d9331f0bd24ad9a088e631 \
+    --hash=sha256:630968c5cdee51a11c05a30453f8cd65e0cc1d2ad0d9192819df9978984529f4 \
+    --hash=sha256:66480f95b8167c9c5c5c87f32cf437d585937970f3fc24386f313a4c97b44e34 \
+    --hash=sha256:71211d26ffd12d63a83e079ff258ac9d56a1376a25bc80b1cdcdf601b855b90b \
+    --hash=sha256:95bd11af7eafc16e829af2d3df510cecfd4387f6453355188342c3e79a2ec87a \
+    --hash=sha256:bc6c7d3fa1325a0c6613512a093bc2a2a15aeec350451cbdf9e1d4bffe3e3233 \
+    --hash=sha256:cc34a6f5b426748a507dd5d1de4c1978f2eb5626d51326e43280941206c209e1 \
+    --hash=sha256:d755f03c1e4a51e9b24d899561fec4ccaf51f210d52abdf8c07ee2849b212a36 \
+    --hash=sha256:d7c45933b1bdfaf9f36c579671fec15d25b06c8398f113dab64c18ed1adda01d \
+    --hash=sha256:d896919306dd0aa22d0132f62a1b78d11aaf4c9fc5b3410d3c666b818191630a \
+    --hash=sha256:ffde2fbfad571af120fcbfbbc61c72469e72f550d676c3342492a9dfdefb8f12
 vcrpy==2.0.1 \
     --hash=sha256:127e79cf7b569d071d1bd761b83f7b62b2ce2a2eb63ceca7aa67cba8f2602ea3 \
     --hash=sha256:57be64aa8e9883a4117d0b15de28af62275c001abcdb00b6dc2d4406073d9a4f
-wrapt==1.11.1 \
-    --hash=sha256:4aea003270831cceb8a90ff27c4031da6ead7ec1886023b80ce0dfe0adf61533 \
+wrapt==1.11.2 \
+    --hash=sha256:565a021fd19419476b9362b05eeaa094178de64f8361e44468f9e9d7843901e1 \
     # via astroid, vcrpy
 yarl==1.3.0 \
     --hash=sha256:024ecdc12bc02b321bc66b41327f930d1c2c543fa9a561b39861da9388ba7aa9 \
@@ -157,3 +143,7 @@
     --hash=sha256:c9bb7c249c4432cd47e75af3864bc02d26c9594f49c82e2a28624417f0ae63b8 \
     --hash=sha256:e060906c0c585565c718d1c3841747b61c5439af2211e185f6739a9412dfbde1 \
     # via vcrpy
+
+# WARNING: The following packages were not pinned, but pip requires them to be
+# pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag.
+# setuptools==41.0.1        # via python-levenshtein
--- a/contrib/automation/requirements.txt	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/automation/requirements.txt	Mon Oct 21 11:09:48 2019 -0400
@@ -2,43 +2,44 @@
 # This file is autogenerated by pip-compile
 # To update, run:
 #
-#    pip-compile -U --generate-hashes --output-file contrib/automation/requirements.txt contrib/automation/requirements.txt.in
+#    pip-compile --generate-hashes --output-file=contrib/automation/requirements.txt contrib/automation/requirements.txt.in
 #
-asn1crypto==0.24.0 \
-    --hash=sha256:2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87 \
-    --hash=sha256:9d5c20441baf0cb60a4ac34cc447c6c189024b6b4c6cd7877034f4965c464e49 \
+asn1crypto==1.0.1 \
+    --hash=sha256:0b199f211ae690df3db4fd6c1c4ff976497fb1da689193e368eedbadc53d9292 \
+    --hash=sha256:bca90060bd995c3f62c4433168eab407e44bdbdb567b3f3a396a676c1a4c4a3f \
     # via cryptography
-bcrypt==3.1.6 \
-    --hash=sha256:0ba875eb67b011add6d8c5b76afbd92166e98b1f1efab9433d5dc0fafc76e203 \
-    --hash=sha256:21ed446054c93e209434148ef0b362432bb82bbdaf7beef70a32c221f3e33d1c \
-    --hash=sha256:28a0459381a8021f57230954b9e9a65bb5e3d569d2c253c5cac6cb181d71cf23 \
-    --hash=sha256:2aed3091eb6f51c26b7c2fad08d6620d1c35839e7a362f706015b41bd991125e \
-    --hash=sha256:2fa5d1e438958ea90eaedbf8082c2ceb1a684b4f6c75a3800c6ec1e18ebef96f \
-    --hash=sha256:3a73f45484e9874252002793518da060fb11eaa76c30713faa12115db17d1430 \
-    --hash=sha256:3e489787638a36bb466cd66780e15715494b6d6905ffdbaede94440d6d8e7dba \
-    --hash=sha256:44636759d222baa62806bbceb20e96f75a015a6381690d1bc2eda91c01ec02ea \
-    --hash=sha256:678c21b2fecaa72a1eded0cf12351b153615520637efcadc09ecf81b871f1596 \
-    --hash=sha256:75460c2c3786977ea9768d6c9d8957ba31b5fbeb0aae67a5c0e96aab4155f18c \
-    --hash=sha256:8ac06fb3e6aacb0a95b56eba735c0b64df49651c6ceb1ad1cf01ba75070d567f \
-    --hash=sha256:8fdced50a8b646fff8fa0e4b1c5fd940ecc844b43d1da5a980cb07f2d1b1132f \
-    --hash=sha256:9b2c5b640a2da533b0ab5f148d87fb9989bf9bcb2e61eea6a729102a6d36aef9 \
-    --hash=sha256:a9083e7fa9adb1a4de5ac15f9097eb15b04e2c8f97618f1b881af40abce382e1 \
-    --hash=sha256:b7e3948b8b1a81c5a99d41da5fb2dc03ddb93b5f96fcd3fd27e643f91efa33e1 \
-    --hash=sha256:b998b8ca979d906085f6a5d84f7b5459e5e94a13fc27c28a3514437013b6c2f6 \
-    --hash=sha256:dd08c50bc6f7be69cd7ba0769acca28c846ec46b7a8ddc2acf4b9ac6f8a7457e \
-    --hash=sha256:de5badee458544ab8125e63e39afeedfcf3aef6a6e2282ac159c95ae7472d773 \
-    --hash=sha256:ede2a87333d24f55a4a7338a6ccdccf3eaa9bed081d1737e0db4dbd1a4f7e6b6 \
+bcrypt==3.1.7 \
+    --hash=sha256:0258f143f3de96b7c14f762c770f5fc56ccd72f8a1857a451c1cd9a655d9ac89 \
+    --hash=sha256:0b0069c752ec14172c5f78208f1863d7ad6755a6fae6fe76ec2c80d13be41e42 \
+    --hash=sha256:19a4b72a6ae5bb467fea018b825f0a7d917789bcfe893e53f15c92805d187294 \
+    --hash=sha256:5432dd7b34107ae8ed6c10a71b4397f1c853bd39a4d6ffa7e35f40584cffd161 \
+    --hash=sha256:69361315039878c0680be456640f8705d76cb4a3a3fe1e057e0f261b74be4b31 \
+    --hash=sha256:6fe49a60b25b584e2f4ef175b29d3a83ba63b3a4df1b4c0605b826668d1b6be5 \
+    --hash=sha256:74a015102e877d0ccd02cdeaa18b32aa7273746914a6c5d0456dd442cb65b99c \
+    --hash=sha256:763669a367869786bb4c8fcf731f4175775a5b43f070f50f46f0b59da45375d0 \
+    --hash=sha256:8b10acde4e1919d6015e1df86d4c217d3b5b01bb7744c36113ea43d529e1c3de \
+    --hash=sha256:9fe92406c857409b70a38729dbdf6578caf9228de0aef5bc44f859ffe971a39e \
+    --hash=sha256:a190f2a5dbbdbff4b74e3103cef44344bc30e61255beb27310e2aec407766052 \
+    --hash=sha256:a595c12c618119255c90deb4b046e1ca3bcfad64667c43d1166f2b04bc72db09 \
+    --hash=sha256:c9457fa5c121e94a58d6505cadca8bed1c64444b83b3204928a866ca2e599105 \
+    --hash=sha256:cb93f6b2ab0f6853550b74e051d297c27a638719753eb9ff66d1e4072be67133 \
+    --hash=sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7 \
+    --hash=sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc \
     # via paramiko
-boto3==1.9.137 \
-    --hash=sha256:882cc4869b47b51dae4b4a900769e72171ff00e0b6bca644b2d7a7ad7378f324 \
-    --hash=sha256:cd503a7e7a04f1c14d2801f9727159dfa88c393b4004e98940fa4aa205d920c8
-botocore==1.12.137 \
-    --hash=sha256:0d95794f6b1239c75e2c5f966221bcd4b68020fddb5676f757531eedbb612ed8 \
-    --hash=sha256:3213cf48cf2ceee10fc3b93221f2cd1c38521cca7584f547d5c086213cc60f35 \
+bleach==3.1.0 \
+    --hash=sha256:213336e49e102af26d9cde77dd2d0397afabc5a6bf2fed985dc35b5d1e285a16 \
+    --hash=sha256:3fdf7f77adcf649c9911387df51254b813185e32b2c6619f690b593a617e19fa \
+    # via readme-renderer
+boto3==1.9.243 \
+    --hash=sha256:404acbecef8f4912f18312fcfaffe7eba7f10b3b7adf7853bdba59cdf2275ebb \
+    --hash=sha256:c6e5a7e4548ce7586c354ff633f2a66ba3c471d15a8ae6a30f873122ab04e1cf
+botocore==1.12.243 \
+    --hash=sha256:397585a7881230274afb8d1877ef69a661b0a311745cd324f14a052fb2a2863a \
+    --hash=sha256:4496f8da89cb496462a831897ad248e13e431d9fa7e41e06d426fd6658ab6e59 \
     # via boto3, s3transfer
-certifi==2019.3.9 \
-    --hash=sha256:59b7658e26ca9c7339e00f8f4636cdfe59d34fa37b9b04f6f9e9926b3cece1a5 \
-    --hash=sha256:b26104d6835d1f5e49452a26eb2ff87fe7090b89dfcaee5ea2212697e1e1d7ae \
+certifi==2019.9.11 \
+    --hash=sha256:e4f3620cfea4f83eedc95b24abd9cd56f3c4b146dd0177e83a21b4eb49e21e50 \
+    --hash=sha256:fd7c7c74727ddcf00e9acd26bba8da604ffec95bf1c2144e67aff7a8b50e6cef \
     # via requests
 cffi==1.12.3 \
     --hash=sha256:041c81822e9f84b1d9c401182e174996f0bae9991f33725d059b771744290774 \
@@ -74,32 +75,29 @@
     --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \
     --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 \
     # via requests
-cryptography==2.6.1 \
-    --hash=sha256:066f815f1fe46020877c5983a7e747ae140f517f1b09030ec098503575265ce1 \
-    --hash=sha256:210210d9df0afba9e000636e97810117dc55b7157c903a55716bb73e3ae07705 \
-    --hash=sha256:26c821cbeb683facb966045e2064303029d572a87ee69ca5a1bf54bf55f93ca6 \
-    --hash=sha256:2afb83308dc5c5255149ff7d3fb9964f7c9ee3d59b603ec18ccf5b0a8852e2b1 \
-    --hash=sha256:2db34e5c45988f36f7a08a7ab2b69638994a8923853dec2d4af121f689c66dc8 \
-    --hash=sha256:409c4653e0f719fa78febcb71ac417076ae5e20160aec7270c91d009837b9151 \
-    --hash=sha256:45a4f4cf4f4e6a55c8128f8b76b4c057027b27d4c67e3fe157fa02f27e37830d \
-    --hash=sha256:48eab46ef38faf1031e58dfcc9c3e71756a1108f4c9c966150b605d4a1a7f659 \
-    --hash=sha256:6b9e0ae298ab20d371fc26e2129fd683cfc0cfde4d157c6341722de645146537 \
-    --hash=sha256:6c4778afe50f413707f604828c1ad1ff81fadf6c110cb669579dea7e2e98a75e \
-    --hash=sha256:8c33fb99025d353c9520141f8bc989c2134a1f76bac6369cea060812f5b5c2bb \
-    --hash=sha256:9873a1760a274b620a135054b756f9f218fa61ca030e42df31b409f0fb738b6c \
-    --hash=sha256:9b069768c627f3f5623b1cbd3248c5e7e92aec62f4c98827059eed7053138cc9 \
-    --hash=sha256:9e4ce27a507e4886efbd3c32d120db5089b906979a4debf1d5939ec01b9dd6c5 \
-    --hash=sha256:acb424eaca214cb08735f1a744eceb97d014de6530c1ea23beb86d9c6f13c2ad \
-    --hash=sha256:c8181c7d77388fe26ab8418bb088b1a1ef5fde058c6926790c8a0a3d94075a4a \
-    --hash=sha256:d4afbb0840f489b60f5a580a41a1b9c3622e08ecb5eec8614d4fb4cd914c4460 \
-    --hash=sha256:d9ed28030797c00f4bc43c86bf819266c76a5ea61d006cd4078a93ebf7da6bfd \
-    --hash=sha256:e603aa7bb52e4e8ed4119a58a03b60323918467ef209e6ff9db3ac382e5cf2c6 \
+cryptography==2.7 \
+    --hash=sha256:24b61e5fcb506424d3ec4e18bca995833839bf13c59fc43e530e488f28d46b8c \
+    --hash=sha256:25dd1581a183e9e7a806fe0543f485103232f940fcfc301db65e630512cce643 \
+    --hash=sha256:3452bba7c21c69f2df772762be0066c7ed5dc65df494a1d53a58b683a83e1216 \
+    --hash=sha256:41a0be220dd1ed9e998f5891948306eb8c812b512dc398e5a01846d855050799 \
+    --hash=sha256:5751d8a11b956fbfa314f6553d186b94aa70fdb03d8a4d4f1c82dcacf0cbe28a \
+    --hash=sha256:5f61c7d749048fa6e3322258b4263463bfccefecb0dd731b6561cb617a1d9bb9 \
+    --hash=sha256:72e24c521fa2106f19623a3851e9f89ddfdeb9ac63871c7643790f872a305dfc \
+    --hash=sha256:7b97ae6ef5cba2e3bb14256625423413d5ce8d1abb91d4f29b6d1a081da765f8 \
+    --hash=sha256:961e886d8a3590fd2c723cf07be14e2a91cf53c25f02435c04d39e90780e3b53 \
+    --hash=sha256:96d8473848e984184b6728e2c9d391482008646276c3ff084a1bd89e15ff53a1 \
+    --hash=sha256:ae536da50c7ad1e002c3eee101871d93abdc90d9c5f651818450a0d3af718609 \
+    --hash=sha256:b0db0cecf396033abb4a93c95d1602f268b3a68bb0a9cc06a7cff587bb9a7292 \
+    --hash=sha256:cfee9164954c186b191b91d4193989ca994703b2fff406f71cf454a2d3c7327e \
+    --hash=sha256:e6347742ac8f35ded4a46ff835c60e68c22a536a8ae5c4422966d06946b6d4c6 \
+    --hash=sha256:f27d93f0139a3c056172ebb5d4f9056e770fdf0206c2f422ff2ebbad142e09ed \
+    --hash=sha256:f57b76e46a58b63d1c6375017f4564a28f19a5ca912691fd2e4261b3414b618d \
     # via paramiko, pypsrp
-docutils==0.14 \
-    --hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \
-    --hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \
-    --hash=sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6 \
-    # via botocore
+docutils==0.15.2 \
+    --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \
+    --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \
+    --hash=sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99 \
+    # via botocore, readme-renderer
 idna==2.8 \
     --hash=sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407 \
     --hash=sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c \
@@ -108,20 +106,24 @@
     --hash=sha256:3720a4b1bd659dd2eecad0666459b9788813e032b83e7ba58578e48254e0a0e6 \
     --hash=sha256:bde2aef6f44302dfb30320115b17d030798de8c4110e28d5cf6cf91a7a31074c \
     # via boto3, botocore
-ntlm-auth==1.3.0 \
-    --hash=sha256:bb2fd03c665f0f62c5f65695b62dcdb07fb7a45df6ebc86c770be2054d6902dd \
-    --hash=sha256:ce5b4483ed761f341a538a426a71a52e5a9cf5fd834ebef1d2090f9eef14b3f8 \
+ntlm-auth==1.4.0 \
+    --hash=sha256:11f7a3cec38155b7cecdd9bbc8c37cd738d8012f0523b3f98d8caefe394feb97 \
+    --hash=sha256:350f2389c8ee5517f47db55a36ac2f8efc9742a60a678d6e2caa92385bdcaa9a \
     # via pypsrp
-paramiko==2.4.2 \
-    --hash=sha256:3c16b2bfb4c0d810b24c40155dbfd113c0521e7e6ee593d704e84b4c658a1f3b \
-    --hash=sha256:a8975a7df3560c9f1e2b43dc54ebd40fd00a7017392ca5445ce7df409f900fcb
-pyasn1==0.4.5 \
-    --hash=sha256:da2420fe13a9452d8ae97a0e478adde1dee153b11ba832a95b223a2ba01c10f7 \
-    --hash=sha256:da6b43a8c9ae93bc80e2739efb38cc776ba74a886e3e9318d65fe81a8b8a2c6e \
-    # via paramiko
+paramiko==2.6.0 \
+    --hash=sha256:99f0179bdc176281d21961a003ffdb2ec369daac1a1007241f53374e376576cf \
+    --hash=sha256:f4b2edfa0d226b70bd4ca31ea7e389325990283da23465d572ed1f70a7583041
+pkginfo==1.5.0.1 \
+    --hash=sha256:7424f2c8511c186cd5424bbf31045b77435b37a8d604990b79d4e70d741148bb \
+    --hash=sha256:a6d9e40ca61ad3ebd0b72fbadd4fba16e4c0e4df0428c041e01e06eb6ee71f32 \
+    # via twine
 pycparser==2.19 \
     --hash=sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3 \
     # via cffi
+pygments==2.4.2 \
+    --hash=sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127 \
+    --hash=sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297 \
+    # via readme-renderer
 pynacl==1.3.0 \
     --hash=sha256:05c26f93964373fc0abe332676cb6735f0ecad27711035b9472751faa8521255 \
     --hash=sha256:0c6100edd16fefd1557da078c7a31e7b7d7a52ce39fdca2bec29d4f7b6e7600c \
@@ -143,26 +145,49 @@
     --hash=sha256:e2da3c13307eac601f3de04887624939aca8ee3c9488a0bb0eca4fb9401fc6b1 \
     --hash=sha256:f67814c38162f4deb31f68d590771a29d5ae3b1bd64b75cf232308e5c74777e0 \
     # via paramiko
-pypsrp==0.3.1 \
-    --hash=sha256:309853380fe086090a03cc6662a778ee69b1cae355ae4a932859034fd76e9d0b \
-    --hash=sha256:90f946254f547dc3493cea8493c819ab87e152a755797c93aa2668678ba8ae85
+pypsrp==0.4.0 \
+    --hash=sha256:64b5bdd725a9744c821483b05ecd266f6417f4c6e90ee961a08838480f7d025e \
+    --hash=sha256:f42919247fb80f7dc24c552560d7c24e754d15326030c9e3b7b94f51cfa4dc69
 python-dateutil==2.8.0 \
     --hash=sha256:7e6584c74aeed623791615e26efd690f29817a27c73085b78e4bad02493df2fb \
     --hash=sha256:c89805f6f4d64db21ed966fda138f8a5ed7a4fdbc1a8ee329ce1b74e3c74da9e \
     # via botocore
-requests==2.21.0 \
-    --hash=sha256:502a824f31acdacb3a35b6690b5fbf0bc41d63a24a45c4004352b0242707598e \
-    --hash=sha256:7bf2a778576d825600030a110f3c0e3e8edc51dfaafe1c146e39a2027784957b \
-    # via pypsrp
-s3transfer==0.2.0 \
-    --hash=sha256:7b9ad3213bff7d357f888e0fab5101b56fa1a0548ee77d121c3a3dbfbef4cb2e \
-    --hash=sha256:f23d5cb7d862b104401d9021fc82e5fa0e0cf57b7660a1331425aab0c691d021 \
+readme-renderer==24.0 \
+    --hash=sha256:bb16f55b259f27f75f640acf5e00cf897845a8b3e4731b5c1a436e4b8529202f \
+    --hash=sha256:c8532b79afc0375a85f10433eca157d6b50f7d6990f337fa498c96cd4bfc203d \
+    # via twine
+requests-toolbelt==0.9.1 \
+    --hash=sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f \
+    --hash=sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0 \
+    # via twine
+requests==2.22.0 \
+    --hash=sha256:11e007a8a2aa0323f5a921e9e6a2d7e4e67d9877e85773fba9ba6419025cbeb4 \
+    --hash=sha256:9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590f48c010551dc6c4b31 \
+    # via pypsrp, requests-toolbelt, twine
+s3transfer==0.2.1 \
+    --hash=sha256:6efc926738a3cd576c2a79725fed9afde92378aa5c6a957e3af010cb019fac9d \
+    --hash=sha256:b780f2411b824cb541dbcd2c713d0cb61c7d1bcadae204cdddda2b35cef493ba \
     # via boto3
 six==1.12.0 \
     --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \
     --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \
-    # via bcrypt, cryptography, pynacl, pypsrp, python-dateutil
-urllib3==1.24.2 \
-    --hash=sha256:4c291ca23bbb55c76518905869ef34bdd5f0e46af7afe6861e8375643ffee1a0 \
-    --hash=sha256:9a247273df709c4fedb38c711e44292304f73f39ab01beda9f6b9fc375669ac3 \
+    # via bcrypt, bleach, cryptography, pynacl, pypsrp, python-dateutil, readme-renderer
+tqdm==4.36.1 \
+    --hash=sha256:abc25d0ce2397d070ef07d8c7e706aede7920da163c64997585d42d3537ece3d \
+    --hash=sha256:dd3fcca8488bb1d416aa7469d2f277902f26260c45aa86b667b074cd44b3b115 \
+    # via twine
+twine==2.0.0 \
+    --hash=sha256:5319dd3e02ac73fcddcd94f035b9631589ab5d23e1f4699d57365199d85261e1 \
+    --hash=sha256:9fe7091715c7576df166df8ef6654e61bada39571783f2fd415bdcba867c6993
+urllib3==1.25.6 \
+    --hash=sha256:3de946ffbed6e6746608990594d08faac602528ac7015ac28d33cee6a45b7398 \
+    --hash=sha256:9a107b99a5393caf59c7aa3c1249c16e6879447533d0887f4336dde834c7be86 \
     # via botocore, requests
+webencodings==0.5.1 \
+    --hash=sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78 \
+    --hash=sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923 \
+    # via bleach
+
+# WARNING: The following packages were not pinned, but pip requires them to be
+# pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag.
+# setuptools==41.2.0        # via twine
--- a/contrib/automation/requirements.txt.in	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/automation/requirements.txt.in	Mon Oct 21 11:09:48 2019 -0400
@@ -1,3 +1,4 @@
 boto3
 paramiko
 pypsrp
+twine
--- a/contrib/bdiff-torture.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/bdiff-torture.py	Mon Oct 21 11:09:48 2019 -0400
@@ -6,17 +6,23 @@
 
 from mercurial import (
     mdiff,
+    pycompat,
 )
 
+
 def reducetest(a, b):
     tries = 0
     reductions = 0
     print("reducing...")
     while tries < 1000:
-        a2 = "\n".join(l for l in a.splitlines()
-                       if random.randint(0, 100) > 0) + "\n"
-        b2 = "\n".join(l for l in b.splitlines()
-                       if random.randint(0, 100) > 0) + "\n"
+        a2 = (
+            "\n".join(l for l in a.splitlines() if random.randint(0, 100) > 0)
+            + "\n"
+        )
+        b2 = (
+            "\n".join(l for l in b.splitlines() if random.randint(0, 100) > 0)
+            + "\n"
+        )
         if a2 == a and b2 == b:
             continue
         if a2 == b2:
@@ -31,8 +37,7 @@
             a = a2
             b = b2
 
-    print("reduced:", reductions, len(a) + len(b),
-          repr(a), repr(b))
+    print("reduced:", reductions, len(a) + len(b), repr(a), repr(b))
     try:
         test1(a, b)
     except Exception as inst:
@@ -40,6 +45,7 @@
 
     sys.exit(0)
 
+
 def test1(a, b):
     d = mdiff.textdiff(a, b)
     if not d:
@@ -48,23 +54,25 @@
     if c != b:
         raise ValueError("bad")
 
+
 def testwrap(a, b):
     try:
         test1(a, b)
         return
     except Exception as inst:
-        pass
-    print("exception:", inst)
+        print("exception:", inst)
     reducetest(a, b)
 
+
 def test(a, b):
     testwrap(a, b)
     testwrap(b, a)
 
+
 def rndtest(size, noise):
     a = []
     src = "                aaaaaaaabbbbccd"
-    for x in xrange(size):
+    for x in pycompat.xrange(size):
         a.append(src[random.randint(0, len(src) - 1)])
 
     while True:
@@ -82,6 +90,7 @@
 
     test(a, b)
 
+
 maxvol = 10000
 startsize = 2
 while True:
--- a/contrib/benchmarks/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/benchmarks/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -44,15 +44,24 @@
     util,
 )
 
-basedir = os.path.abspath(os.path.join(os.path.dirname(__file__),
-                          os.path.pardir, os.path.pardir))
+basedir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
+)
 reposdir = os.environ['REPOS_DIR']
-reposnames = [name for name in os.listdir(reposdir)
-              if os.path.isdir(os.path.join(reposdir, name, ".hg"))]
+reposnames = [
+    name
+    for name in os.listdir(reposdir)
+    if os.path.isdir(os.path.join(reposdir, name, ".hg"))
+]
 if not reposnames:
     raise ValueError("No repositories found in $REPO_DIR")
-outputre = re.compile((r'! wall (\d+.\d+) comb \d+.\d+ user \d+.\d+ sys '
-                       r'\d+.\d+ \(best of \d+\)'))
+outputre = re.compile(
+    (
+        r'! wall (\d+.\d+) comb \d+.\d+ user \d+.\d+ sys '
+        r'\d+.\d+ \(best of \d+\)'
+    )
+)
+
 
 def runperfcommand(reponame, command, *args, **kwargs):
     os.environ["HGRCPATH"] = os.environ.get("ASVHGRCPATH", "")
@@ -63,8 +72,9 @@
     else:
         ui = uimod.ui()
     repo = hg.repository(ui, os.path.join(reposdir, reponame))
-    perfext = extensions.load(ui, 'perfext',
-                              os.path.join(basedir, 'contrib', 'perf.py'))
+    perfext = extensions.load(
+        ui, 'perfext', os.path.join(basedir, 'contrib', 'perf.py')
+    )
     cmd = getattr(perfext, command)
     ui.pushbuffer()
     cmd(ui, repo, *args, **kwargs)
@@ -74,6 +84,7 @@
         raise ValueError("Invalid output {0}".format(output))
     return float(match.group(1))
 
+
 def perfbench(repos=reposnames, name=None, params=None):
     """decorator to declare ASV benchmark based on contrib/perf.py extension
 
@@ -104,10 +115,12 @@
         def wrapped(repo, *args):
             def perf(command, *a, **kw):
                 return runperfcommand(repo, command, *a, **kw)
+
             return func(perf, *args)
 
         wrapped.params = [p[1] for p in params]
         wrapped.param_names = [p[0] for p in params]
         wrapped.pretty_name = name
         return wrapped
+
     return decorator
--- a/contrib/benchmarks/perf.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/benchmarks/perf.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,18 +9,22 @@
 
 from . import perfbench
 
+
 @perfbench()
 def track_tags(perf):
     return perf("perftags")
 
+
 @perfbench()
 def track_status(perf):
     return perf("perfstatus", unknown=False)
 
+
 @perfbench(params=[('rev', ['1000', '10000', 'tip'])])
 def track_manifest(perf, rev):
     return perf("perfmanifest", rev)
 
+
 @perfbench()
 def track_heads(perf):
     return perf("perfheads")
--- a/contrib/benchmarks/revset.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/benchmarks/revset.py	Mon Oct 21 11:09:48 2019 -0400
@@ -18,15 +18,16 @@
 
 from . import basedir, perfbench
 
+
 def createrevsetbenchmark(baseset, variants=None):
     if variants is None:
         # Default variants
-        variants = ["plain", "first", "last", "sort", "sort+first",
-                    "sort+last"]
-    fname = "track_" + "_".join("".join([
-        c if c in string.digits + string.letters else " "
-        for c in baseset
-    ]).split())
+        variants = ["plain", "first", "last", "sort", "sort+first", "sort+last"]
+    fname = "track_" + "_".join(
+        "".join(
+            [c if c in string.digits + string.letters else " " for c in baseset]
+        ).split()
+    )
 
     def wrap(fname, baseset):
         @perfbench(name=baseset, params=[("variant", variants)])
@@ -36,18 +37,21 @@
                 for var in variant.split("+"):
                     revset = "%s(%s)" % (var, revset)
             return perf("perfrevset", revset)
+
         f.__name__ = fname
         return f
+
     return wrap(fname, baseset)
 
+
 def initializerevsetbenchmarks():
     mod = sys.modules[__name__]
-    with open(os.path.join(basedir, 'contrib', 'base-revsets.txt'),
-              'rb') as fh:
+    with open(os.path.join(basedir, 'contrib', 'base-revsets.txt'), 'rb') as fh:
         for line in fh:
             baseset = line.strip()
             if baseset and not baseset.startswith('#'):
                 func = createrevsetbenchmark(baseset)
                 setattr(mod, func.__name__, func)
 
+
 initializerevsetbenchmarks()
--- a/contrib/byteify-strings.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/byteify-strings.py	Mon Oct 21 11:09:48 2019 -0400
@@ -18,10 +18,13 @@
 import token
 import tokenize
 
+
 def adjusttokenpos(t, ofs):
     """Adjust start/end column of the given token"""
-    return t._replace(start=(t.start[0], t.start[1] + ofs),
-                      end=(t.end[0], t.end[1] + ofs))
+    return t._replace(
+        start=(t.start[0], t.start[1] + ofs), end=(t.end[0], t.end[1] + ofs)
+    )
+
 
 def replacetokens(tokens, opts):
     """Transform a stream of tokens from raw to Python 3.
@@ -78,23 +81,68 @@
         already been done.
 
         """
-        st = tokens[j]
-        if st.type == token.STRING and st.string.startswith(("'", '"')):
-            sysstrtokens.add(st)
+        k = j
+        currtoken = tokens[k]
+        while currtoken.type in (token.STRING, token.NEWLINE, tokenize.NL):
+            k += 1
+            if currtoken.type == token.STRING and currtoken.string.startswith(
+                ("'", '"')
+            ):
+                sysstrtokens.add(currtoken)
+            try:
+                currtoken = tokens[k]
+            except IndexError:
+                break
+
+    def _isitemaccess(j):
+        """Assert the next tokens form an item access on `tokens[j]` and that
+        `tokens[j]` is a name.
+        """
+        try:
+            return (
+                tokens[j].type == token.NAME
+                and _isop(j + 1, '[')
+                and tokens[j + 2].type == token.STRING
+                and _isop(j + 3, ']')
+            )
+        except IndexError:
+            return False
+
+    def _ismethodcall(j, *methodnames):
+        """Assert the next tokens form a call to `methodname` with a string
+        as first argument on `tokens[j]` and that `tokens[j]` is a name.
+        """
+        try:
+            return (
+                tokens[j].type == token.NAME
+                and _isop(j + 1, '.')
+                and tokens[j + 2].type == token.NAME
+                and tokens[j + 2].string in methodnames
+                and _isop(j + 3, '(')
+                and tokens[j + 4].type == token.STRING
+            )
+        except IndexError:
+            return False
 
     coldelta = 0  # column increment for new opening parens
     coloffset = -1  # column offset for the current line (-1: TBD)
-    parens = [(0, 0, 0)]  # stack of (line, end-column, column-offset)
+    parens = [(0, 0, 0, -1)]  # stack of (line, end-column, column-offset, type)
+    ignorenextline = False  # don't transform the next line
+    insideignoreblock = False  # don't transform until turned off
     for i, t in enumerate(tokens):
         # Compute the column offset for the current line, such that
         # the current line will be aligned to the last opening paren
         # as before.
         if coloffset < 0:
-            if t.start[1] == parens[-1][1]:
-                coloffset = parens[-1][2]
-            elif t.start[1] + 1 == parens[-1][1]:
+            lastparen = parens[-1]
+            if t.start[1] == lastparen[1]:
+                coloffset = lastparen[2]
+            elif t.start[1] + 1 == lastparen[1] and lastparen[3] not in (
+                token.NEWLINE,
+                tokenize.NL,
+            ):
                 # fix misaligned indent of s/util.Abort/error.Abort/
-                coloffset = parens[-1][2] + (parens[-1][1] - t.start[1])
+                coloffset = lastparen[2] + (lastparen[1] - t.start[1])
             else:
                 coloffset = 0
 
@@ -103,11 +151,26 @@
             yield adjusttokenpos(t, coloffset)
             coldelta = 0
             coloffset = -1
+            if not insideignoreblock:
+                ignorenextline = (
+                    tokens[i - 1].type == token.COMMENT
+                    and tokens[i - 1].string == "# no-py3-transform"
+                )
+            continue
+
+        if t.type == token.COMMENT:
+            if t.string == "# py3-transform: off":
+                insideignoreblock = True
+            if t.string == "# py3-transform: on":
+                insideignoreblock = False
+
+        if ignorenextline or insideignoreblock:
+            yield adjusttokenpos(t, coloffset)
             continue
 
         # Remember the last paren position.
         if _isop(i, '(', '[', '{'):
-            parens.append(t.end + (coloffset + coldelta,))
+            parens.append(t.end + (coloffset + coldelta, tokens[i + 1].type))
         elif _isop(i, ')', ']', '}'):
             parens.pop()
 
@@ -129,8 +192,10 @@
             # components touching docstrings need to handle unicode,
             # unfortunately.
             if s[0:3] in ("'''", '"""'):
-                yield adjusttokenpos(t, coloffset)
-                continue
+                # If it's assigned to something, it's not a docstring
+                if not _isop(i - 1, '='):
+                    yield adjusttokenpos(t, coloffset)
+                    continue
 
             # If the first character isn't a quote, it is likely a string
             # prefixing character (such as 'b', 'u', or 'r'. Ignore.
@@ -139,8 +204,7 @@
                 continue
 
             # String literal. Prefix to make a b'' string.
-            yield adjusttokenpos(t._replace(string='b%s' % t.string),
-                                 coloffset)
+            yield adjusttokenpos(t._replace(string='b%s' % t.string), coloffset)
             coldelta += 1
             continue
 
@@ -149,8 +213,15 @@
             fn = t.string
 
             # *attr() builtins don't accept byte strings to 2nd argument.
-            if (fn in ('getattr', 'setattr', 'hasattr', 'safehasattr') and
-                    not _isop(i - 1, '.')):
+            if fn in (
+                'getattr',
+                'setattr',
+                'hasattr',
+                'safehasattr',
+                'wrapfunction',
+                'wrapclass',
+                'addattr',
+            ) and (opts['allow-attr-methods'] or not _isop(i - 1, '.')):
                 arg1idx = _findargnofcall(1)
                 if arg1idx is not None:
                     _ensuresysstr(arg1idx)
@@ -169,19 +240,30 @@
                 yield adjusttokenpos(t._replace(string=fn[4:]), coloffset)
                 continue
 
+        if t.type == token.NAME and t.string in opts['treat-as-kwargs']:
+            if _isitemaccess(i):
+                _ensuresysstr(i + 2)
+            if _ismethodcall(i, 'get', 'pop', 'setdefault', 'popitem'):
+                _ensuresysstr(i + 4)
+
         # Looks like "if __name__ == '__main__'".
-        if (t.type == token.NAME and t.string == '__name__'
-            and _isop(i + 1, '==')):
+        if (
+            t.type == token.NAME
+            and t.string == '__name__'
+            and _isop(i + 1, '==')
+        ):
             _ensuresysstr(i + 2)
 
         # Emit unmodified token.
         yield adjusttokenpos(t, coloffset)
 
+
 def process(fin, fout, opts):
     tokens = tokenize.tokenize(fin.readline)
     tokens = replacetokens(list(tokens), opts)
     fout.write(tokenize.untokenize(tokens))
 
+
 def tryunlink(fname):
     try:
         os.unlink(fname)
@@ -189,12 +271,14 @@
         if err.errno != errno.ENOENT:
             raise
 
+
 @contextlib.contextmanager
 def editinplace(fname):
     n = os.path.basename(fname)
     d = os.path.dirname(fname)
-    fp = tempfile.NamedTemporaryFile(prefix='.%s-' % n, suffix='~', dir=d,
-                                     delete=False)
+    fp = tempfile.NamedTemporaryFile(
+        prefix='.%s-' % n, suffix='~', dir=d, delete=False
+    )
     try:
         yield fp
         fp.close()
@@ -205,16 +289,43 @@
         fp.close()
         tryunlink(fp.name)
 
+
 def main():
     ap = argparse.ArgumentParser()
-    ap.add_argument('-i', '--inplace', action='store_true', default=False,
-                    help='edit files in place')
-    ap.add_argument('--dictiter', action='store_true', default=False,
-                    help='rewrite iteritems() and itervalues()'),
+    ap.add_argument(
+        '--version', action='version', version='Byteify strings 1.0'
+    )
+    ap.add_argument(
+        '-i',
+        '--inplace',
+        action='store_true',
+        default=False,
+        help='edit files in place',
+    )
+    ap.add_argument(
+        '--dictiter',
+        action='store_true',
+        default=False,
+        help='rewrite iteritems() and itervalues()',
+    ),
+    ap.add_argument(
+        '--allow-attr-methods',
+        action='store_true',
+        default=False,
+        help='also handle attr*() when they are methods',
+    ),
+    ap.add_argument(
+        '--treat-as-kwargs',
+        nargs="+",
+        default=[],
+        help="ignore kwargs-like objects",
+    ),
     ap.add_argument('files', metavar='FILE', nargs='+', help='source file')
     args = ap.parse_args()
     opts = {
         'dictiter': args.dictiter,
+        'treat-as-kwargs': set(args.treat_as_kwargs),
+        'allow-attr-methods': args.allow_attr_methods,
     }
     for fname in args.files:
         if args.inplace:
@@ -226,6 +337,7 @@
                 fout = sys.stdout.buffer
                 process(fin, fout, opts)
 
+
 if __name__ == '__main__':
     if sys.version_info.major < 3:
         print('This script must be run under Python 3.')
--- a/contrib/casesmash.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/casesmash.py	Mon Oct 21 11:09:48 2019 -0400
@@ -1,12 +1,12 @@
 from __future__ import absolute_import
 import __builtin__
 import os
-from mercurial import (
-    util,
-)
+from mercurial import util
+
 
 def lowerwrap(scope, funcname):
     f = getattr(scope, funcname)
+
     def wrap(fname, *args, **kwargs):
         d, base = os.path.split(fname)
         try:
@@ -19,11 +19,14 @@
             if fn.lower() == base.lower():
                 return f(os.path.join(d, fn), *args, **kwargs)
         return f(fname, *args, **kwargs)
+
     scope.__dict__[funcname] = wrap
 
+
 def normcase(path):
     return path.lower()
 
+
 os.path.normcase = normcase
 
 for f in 'file open'.split():
--- a/contrib/catapipe.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/catapipe.py	Mon Oct 21 11:09:48 2019 -0400
@@ -53,15 +53,28 @@
 # Python version and OS
 timer = timeit.default_timer
 
+
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument('pipe', type=str, nargs=1,
-                        help='Path of named pipe to create and listen on.')
-    parser.add_argument('output', default='trace.json', type=str, nargs='?',
-                        help='Path of json file to create where the traces '
-                             'will be stored.')
-    parser.add_argument('--debug', default=False, action='store_true',
-                        help='Print useful debug messages')
+    parser.add_argument(
+        'pipe',
+        type=str,
+        nargs=1,
+        help='Path of named pipe to create and listen on.',
+    )
+    parser.add_argument(
+        'output',
+        default='trace.json',
+        type=str,
+        nargs='?',
+        help='Path of json file to create where the traces ' 'will be stored.',
+    )
+    parser.add_argument(
+        '--debug',
+        default=False,
+        action='store_true',
+        help='Print useful debug messages',
+    )
     args = parser.parse_args()
     fn = args.pipe[0]
     os.mkfifo(fn)
@@ -86,19 +99,23 @@
                     payload_args = {}
                 pid = _threadmap[session]
                 ts_micros = (now - start) * 1000000
-                out.write(json.dumps(
-                    {
-                        "name": label,
-                        "cat": "misc",
-                        "ph": _TYPEMAP[verb],
-                        "ts": ts_micros,
-                        "pid": pid,
-                        "tid": 1,
-                        "args": payload_args,
-                    }))
+                out.write(
+                    json.dumps(
+                        {
+                            "name": label,
+                            "cat": "misc",
+                            "ph": _TYPEMAP[verb],
+                            "ts": ts_micros,
+                            "pid": pid,
+                            "tid": 1,
+                            "args": payload_args,
+                        }
+                    )
+                )
                 out.write(',\n')
     finally:
         os.unlink(fn)
 
+
 if __name__ == '__main__':
     main()
--- a/contrib/check-code.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/check-code.py	Mon Oct 21 11:09:48 2019 -0400
@@ -26,11 +26,15 @@
 import os
 import re
 import sys
+
 if sys.version_info[0] < 3:
     opentext = open
 else:
+
     def opentext(f):
         return open(f, encoding='latin1')
+
+
 try:
     xrange
 except NameError:
@@ -42,6 +46,7 @@
 
 import testparseutil
 
+
 def compilere(pat, multiline=False):
     if multiline:
         pat = '(?m)' + pat
@@ -52,10 +57,22 @@
             pass
     return re.compile(pat)
 
+
 # check "rules depending on implementation of repquote()" in each
 # patterns (especially pypats), before changing around repquote()
-_repquotefixedmap = {' ': ' ', '\n': '\n', '.': 'p', ':': 'q',
-                     '%': '%', '\\': 'b', '*': 'A', '+': 'P', '-': 'M'}
+_repquotefixedmap = {
+    ' ': ' ',
+    '\n': '\n',
+    '.': 'p',
+    ':': 'q',
+    '%': '%',
+    '\\': 'b',
+    '*': 'A',
+    '+': 'P',
+    '-': 'M',
+}
+
+
 def _repquoteencodechr(i):
     if i > 255:
         return 'u'
@@ -67,13 +84,17 @@
     if c.isdigit():
         return 'n'
     return 'o'
+
+
 _repquotett = ''.join(_repquoteencodechr(i) for i in xrange(256))
 
+
 def repquote(m):
     t = m.group('text')
     t = t.translate(_repquotett)
     return m.group('quote') + t + m.group('quote')
 
+
 def reppython(m):
     comment = m.group('comment')
     if comment:
@@ -81,87 +102,103 @@
         return "#" * l + comment[l:]
     return repquote(m)
 
+
 def repcomment(m):
     return m.group(1) + "#" * len(m.group(2))
 
+
 def repccomment(m):
     t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
     return m.group(1) + t + "*/"
 
+
 def repcallspaces(m):
     t = re.sub(r"\n\s+", "\n", m.group(2))
     return m.group(1) + t
 
+
 def repinclude(m):
     return m.group(1) + "<foo>"
 
+
 def rephere(m):
     t = re.sub(r"\S", "x", m.group(2))
     return m.group(1) + t
 
 
 testpats = [
-  [
-    (r'\b(push|pop)d\b', "don't use 'pushd' or 'popd', use 'cd'"),
-    (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"),
-    (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
-    (r'(?<!hg )grep.* -a', "don't use 'grep -a', use in-line python"),
-    (r'sed.*-i', "don't use 'sed -i', use a temporary file"),
-    (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"),
-    (r'echo -n', "don't use 'echo -n', use printf"),
-    (r'(^|\|\s*)\bwc\b[^|]*$\n(?!.*\(re\))', "filter wc output"),
-    (r'head -c', "don't use 'head -c', use 'dd'"),
-    (r'tail -n', "don't use the '-n' option to tail, just use '-<num>'"),
-    (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"),
-    (r'\bls\b.*-\w*R', "don't use 'ls -R', use 'find'"),
-    (r'printf.*[^\\]\\([1-9]|0\d)', r"don't use 'printf \NNN', use Python"),
-    (r'printf.*[^\\]\\x', "don't use printf \\x, use Python"),
-    (r'\$\(.*\)', "don't use $(expr), use `expr`"),
-    (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
-    (r'\[[^\]]+==', '[ foo == bar ] is a bashism, use [ foo = bar ] instead'),
-    (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
-     "use egrep for extended grep syntax"),
-    (r'(^|\|\s*)e?grep .*\\S', "don't use \\S in regular expression"),
-    (r'(?<!!)/bin/', "don't use explicit paths for tools"),
-    (r'#!.*/bash', "don't use bash in shebang, use sh"),
-    (r'[^\n]\Z', "no trailing newline"),
-    (r'export .*=', "don't export and assign at once"),
-    (r'^source\b', "don't use 'source', use '.'"),
-    (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"),
-    (r'\bls +[^|\n-]+ +-', "options to 'ls' must come before filenames"),
-    (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"),
-    (r'^stop\(\)', "don't use 'stop' as a shell function name"),
-    (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"),
-    (r'\[\[\s+[^\]]*\]\]', "don't use '[[ ]]', use '[ ]'"),
-    (r'^alias\b.*=', "don't use alias, use a function"),
-    (r'if\s*!', "don't use '!' to negate exit status"),
-    (r'/dev/u?random', "don't use entropy, use /dev/zero"),
-    (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"),
-    (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)',
-     "put a backslash-escaped newline after sed 'i' command"),
-    (r'^diff *-\w*[uU].*$\n(^  \$ |^$)', "prefix diff -u/-U with cmp"),
-    (r'^\s+(if)? diff *-\w*[uU]', "prefix diff -u/-U with cmp"),
-    (r'[\s="`\']python\s(?!bindings)', "don't use 'python', use '$PYTHON'"),
-    (r'seq ', "don't use 'seq', use $TESTDIR/seq.py"),
-    (r'\butil\.Abort\b', "directly use error.Abort"),
-    (r'\|&', "don't use |&, use 2>&1"),
-    (r'\w =  +\w', "only one space after = allowed"),
-    (r'\bsed\b.*[^\\]\\n', "don't use 'sed ... \\n', use a \\ and a newline"),
-    (r'env.*-u', "don't use 'env -u VAR', use 'unset VAR'"),
-    (r'cp.* -r ', "don't use 'cp -r', use 'cp -R'"),
-    (r'grep.* -[ABC]', "don't use grep's context flags"),
-    (r'find.*-printf',
-     "don't use 'find -printf', it doesn't exist on BSD find(1)"),
-    (r'\$RANDOM ', "don't use bash-only $RANDOM to generate random values"),
-  ],
-  # warnings
-  [
-    (r'^function', "don't use 'function', use old style"),
-    (r'^diff.*-\w*N', "don't use 'diff -N'"),
-    (r'\$PWD|\${PWD}', "don't use $PWD, use `pwd`"),
-    (r'^([^"\'\n]|("[^"\n]*")|(\'[^\'\n]*\'))*\^', "^ must be quoted"),
-    (r'kill (`|\$\()', "don't use kill, use killdaemons.py")
-  ]
+    [
+        (r'\b(push|pop)d\b', "don't use 'pushd' or 'popd', use 'cd'"),
+        (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"),
+        (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
+        (r'(?<!hg )grep.* -a', "don't use 'grep -a', use in-line python"),
+        (r'sed.*-i', "don't use 'sed -i', use a temporary file"),
+        (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"),
+        (r'echo -n', "don't use 'echo -n', use printf"),
+        (r'(^|\|\s*)\bwc\b[^|]*$\n(?!.*\(re\))', "filter wc output"),
+        (r'head -c', "don't use 'head -c', use 'dd'"),
+        (r'tail -n', "don't use the '-n' option to tail, just use '-<num>'"),
+        (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"),
+        (r'\bls\b.*-\w*R', "don't use 'ls -R', use 'find'"),
+        (r'printf.*[^\\]\\([1-9]|0\d)', r"don't use 'printf \NNN', use Python"),
+        (r'printf.*[^\\]\\x', "don't use printf \\x, use Python"),
+        (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
+        (
+            r'\[[^\]]+==',
+            '[ foo == bar ] is a bashism, use [ foo = bar ] instead',
+        ),
+        (
+            r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
+            "use egrep for extended grep syntax",
+        ),
+        (r'(^|\|\s*)e?grep .*\\S', "don't use \\S in regular expression"),
+        (r'(?<!!)/bin/', "don't use explicit paths for tools"),
+        (r'#!.*/bash', "don't use bash in shebang, use sh"),
+        (r'[^\n]\Z', "no trailing newline"),
+        (r'export .*=', "don't export and assign at once"),
+        (r'^source\b', "don't use 'source', use '.'"),
+        (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"),
+        (r'\bls +[^|\n-]+ +-', "options to 'ls' must come before filenames"),
+        (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"),
+        (r'^stop\(\)', "don't use 'stop' as a shell function name"),
+        (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"),
+        (r'\[\[\s+[^\]]*\]\]', "don't use '[[ ]]', use '[ ]'"),
+        (r'^alias\b.*=', "don't use alias, use a function"),
+        (r'if\s*!', "don't use '!' to negate exit status"),
+        (r'/dev/u?random', "don't use entropy, use /dev/zero"),
+        (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"),
+        (
+            r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)',
+            "put a backslash-escaped newline after sed 'i' command",
+        ),
+        (r'^diff *-\w*[uU].*$\n(^  \$ |^$)', "prefix diff -u/-U with cmp"),
+        (r'^\s+(if)? diff *-\w*[uU]', "prefix diff -u/-U with cmp"),
+        (r'[\s="`\']python\s(?!bindings)', "don't use 'python', use '$PYTHON'"),
+        (r'seq ', "don't use 'seq', use $TESTDIR/seq.py"),
+        (r'\butil\.Abort\b', "directly use error.Abort"),
+        (r'\|&', "don't use |&, use 2>&1"),
+        (r'\w =  +\w', "only one space after = allowed"),
+        (
+            r'\bsed\b.*[^\\]\\n',
+            "don't use 'sed ... \\n', use a \\ and a newline",
+        ),
+        (r'env.*-u', "don't use 'env -u VAR', use 'unset VAR'"),
+        (r'cp.* -r ', "don't use 'cp -r', use 'cp -R'"),
+        (r'grep.* -[ABC]', "don't use grep's context flags"),
+        (
+            r'find.*-printf',
+            "don't use 'find -printf', it doesn't exist on BSD find(1)",
+        ),
+        (r'\$RANDOM ', "don't use bash-only $RANDOM to generate random values"),
+    ],
+    # warnings
+    [
+        (r'^function', "don't use 'function', use old style"),
+        (r'^diff.*-\w*N', "don't use 'diff -N'"),
+        (r'\$PWD|\${PWD}', "don't use $PWD, use `pwd`"),
+        (r'^([^"\'\n]|("[^"\n]*")|(\'[^\'\n]*\'))*\^', "^ must be quoted"),
+        (r'kill (`|\$\()', "don't use kill, use killdaemons.py"),
+    ],
 ]
 
 testfilters = [
@@ -171,45 +208,72 @@
 
 uprefix = r"^  \$ "
 utestpats = [
-  [
-    (r'^(\S.*||  [$>] \S.*)[ \t]\n', "trailing whitespace on non-output"),
-    (uprefix + r'.*\|\s*sed[^|>\n]*\n',
-     "use regex test output patterns instead of sed"),
-    (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
-    (uprefix + r'.*(?<!\[)\$\?', "explicit exit code checks unnecessary"),
-    (uprefix + r'.*\|\| echo.*(fail|error)',
-     "explicit exit code checks unnecessary"),
-    (uprefix + r'set -e', "don't use set -e"),
-    (uprefix + r'(\s|fi\b|done\b)', "use > for continued lines"),
-    (uprefix + r'.*:\.\S*/', "x:.y in a path does not work on msys, rewrite "
-     "as x://.y, or see `hg log -k msys` for alternatives", r'-\S+:\.|' #-Rxxx
-     '# no-msys'), # in test-pull.t which is skipped on windows
-    (r'^  [^$>].*27\.0\.0\.1',
-     'use $LOCALIP not an explicit loopback address'),
-    (r'^  (?![>$] ).*\$LOCALIP.*[^)]$',
-     'mark $LOCALIP output lines with (glob) to help tests in BSD jails'),
-    (r'^  (cat|find): .*: \$ENOENT\$',
-     'use test -f to test for file existence'),
-    (r'^  diff -[^ -]*p',
-     "don't use (external) diff with -p for portability"),
-    (r' readlink ', 'use readlink.py instead of readlink'),
-    (r'^  [-+][-+][-+] .* [-+]0000 \(glob\)',
-     "glob timezone field in diff output for portability"),
-    (r'^  @@ -[0-9]+ [+][0-9]+,[0-9]+ @@',
-     "use '@@ -N* +N,n @@ (glob)' style chunk header for portability"),
-    (r'^  @@ -[0-9]+,[0-9]+ [+][0-9]+ @@',
-     "use '@@ -N,n +N* @@ (glob)' style chunk header for portability"),
-    (r'^  @@ -[0-9]+ [+][0-9]+ @@',
-     "use '@@ -N* +N* @@ (glob)' style chunk header for portability"),
-    (uprefix + r'hg( +-[^ ]+( +[^ ]+)?)* +extdiff'
-     r'( +(-[^ po-]+|--(?!program|option)[^ ]+|[^-][^ ]*))*$',
-     "use $RUNTESTDIR/pdiff via extdiff (or -o/-p for false-positives)"),
-  ],
-  # warnings
-  [
-    (r'^  (?!.*\$LOCALIP)[^*?/\n]* \(glob\)$',
-     "glob match with no glob string (?, *, /, and $LOCALIP)"),
-  ]
+    [
+        (r'^(\S.*||  [$>] \S.*)[ \t]\n', "trailing whitespace on non-output"),
+        (
+            uprefix + r'.*\|\s*sed[^|>\n]*\n',
+            "use regex test output patterns instead of sed",
+        ),
+        (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
+        (uprefix + r'.*(?<!\[)\$\?', "explicit exit code checks unnecessary"),
+        (
+            uprefix + r'.*\|\| echo.*(fail|error)',
+            "explicit exit code checks unnecessary",
+        ),
+        (uprefix + r'set -e', "don't use set -e"),
+        (uprefix + r'(\s|fi\b|done\b)', "use > for continued lines"),
+        (
+            uprefix + r'.*:\.\S*/',
+            "x:.y in a path does not work on msys, rewrite "
+            "as x://.y, or see `hg log -k msys` for alternatives",
+            r'-\S+:\.|' '# no-msys',  # -Rxxx
+        ),  # in test-pull.t which is skipped on windows
+        (
+            r'^  [^$>].*27\.0\.0\.1',
+            'use $LOCALIP not an explicit loopback address',
+        ),
+        (
+            r'^  (?![>$] ).*\$LOCALIP.*[^)]$',
+            'mark $LOCALIP output lines with (glob) to help tests in BSD jails',
+        ),
+        (
+            r'^  (cat|find): .*: \$ENOENT\$',
+            'use test -f to test for file existence',
+        ),
+        (
+            r'^  diff -[^ -]*p',
+            "don't use (external) diff with -p for portability",
+        ),
+        (r' readlink ', 'use readlink.py instead of readlink'),
+        (
+            r'^  [-+][-+][-+] .* [-+]0000 \(glob\)',
+            "glob timezone field in diff output for portability",
+        ),
+        (
+            r'^  @@ -[0-9]+ [+][0-9]+,[0-9]+ @@',
+            "use '@@ -N* +N,n @@ (glob)' style chunk header for portability",
+        ),
+        (
+            r'^  @@ -[0-9]+,[0-9]+ [+][0-9]+ @@',
+            "use '@@ -N,n +N* @@ (glob)' style chunk header for portability",
+        ),
+        (
+            r'^  @@ -[0-9]+ [+][0-9]+ @@',
+            "use '@@ -N* +N* @@ (glob)' style chunk header for portability",
+        ),
+        (
+            uprefix + r'hg( +-[^ ]+( +[^ ]+)?)* +extdiff'
+            r'( +(-[^ po-]+|--(?!program|option)[^ ]+|[^-][^ ]*))*$',
+            "use $RUNTESTDIR/pdiff via extdiff (or -o/-p for false-positives)",
+        ),
+    ],
+    # warnings
+    [
+        (
+            r'^  (?!.*\$LOCALIP)[^*?/\n]* \(glob\)$',
+            "glob match with no glob string (?, *, /, and $LOCALIP)",
+        ),
+    ],
 ]
 
 # transform plain test rules to unified test's
@@ -235,157 +299,214 @@
 
 # common patterns to check *.py
 commonpypats = [
-  [
-    (r'\\$', 'Use () to wrap long lines in Python, not \\'),
-    (r'^\s*def\s*\w+\s*\(.*,\s*\(',
-     "tuple parameter unpacking not available in Python 3+"),
-    (r'lambda\s*\(.*,.*\)',
-     "tuple parameter unpacking not available in Python 3+"),
-    (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
-    (r'(?<!\.)\breduce\s*\(.*', "reduce is not available in Python 3+"),
-    (r'\bdict\(.*=', 'dict() is different in Py2 and 3 and is slower than {}',
-     'dict-from-generator'),
-    (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
-    (r'\s<>\s', '<> operator is not available in Python 3+, use !='),
-    (r'^\s*\t', "don't use tabs"),
-    (r'\S;\s*\n', "semicolon"),
-    (r'[^_]_\([ \t\n]*(?:"[^"]+"[ \t\n+]*)+%', "don't use % inside _()"),
-    (r"[^_]_\([ \t\n]*(?:'[^']+'[ \t\n+]*)+%", "don't use % inside _()"),
-    (r'(\w|\)),\w', "missing whitespace after ,"),
-    (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"),
-    (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"),
-    (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
-    ((
-        # a line ending with a colon, potentially with trailing comments
-        r':([ \t]*#[^\n]*)?\n'
-        # one that is not a pass and not only a comment
-        r'(?P<indent>[ \t]+)[^#][^\n]+\n'
-        # more lines at the same indent level
-        r'((?P=indent)[^\n]+\n)*'
-        # a pass at the same indent level, which is bogus
-        r'(?P=indent)pass[ \t\n#]'
-      ), 'omit superfluous pass'),
-    (r'[^\n]\Z', "no trailing newline"),
-    (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
-#    (r'^\s+[^_ \n][^_. \n]+_[^_\n]+\s*=',
-#     "don't use underbars in identifiers"),
-    (r'^\s+(self\.)?[A-Za-z][a-z0-9]+[A-Z]\w* = ',
-     "don't use camelcase in identifiers", r'#.*camelcase-required'),
-    (r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+',
-     "linebreak after :"),
-    (r'class\s[^( \n]+:', "old-style class, use class foo(object)",
-     r'#.*old-style'),
-    (r'class\s[^( \n]+\(\):',
-     "class foo() creates old style object, use class foo(object)",
-     r'#.*old-style'),
-    (r'\b(%s)\(' % '|'.join(k for k in keyword.kwlist
-                            if k not in ('print', 'exec')),
-     "Python keyword is not a function"),
-    (r',]', "unneeded trailing ',' in list"),
-#    (r'class\s[A-Z][^\(]*\((?!Exception)',
-#     "don't capitalize non-exception classes"),
-#    (r'in range\(', "use xrange"),
-#    (r'^\s*print\s+', "avoid using print in core and extensions"),
-    (r'[\x80-\xff]', "non-ASCII character literal"),
-    (r'("\')\.format\(', "str.format() has no bytes counterpart, use %"),
-    (r'^\s*(%s)\s\s' % '|'.join(keyword.kwlist),
-     "gratuitous whitespace after Python keyword"),
-    (r'([\(\[][ \t]\S)|(\S[ \t][\)\]])', "gratuitous whitespace in () or []"),
-#    (r'\s\s=', "gratuitous whitespace before ="),
-    (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
-     "missing whitespace around operator"),
-    (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\s',
-     "missing whitespace around operator"),
-    (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
-     "missing whitespace around operator"),
-    (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]',
-     "wrong whitespace around ="),
-    (r'\([^()]*( =[^=]|[^<>!=]= )',
-     "no whitespace around = for named parameters"),
-    (r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$',
-     "don't use old-style two-argument raise, use Exception(message)"),
-    (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"),
-    (r' [=!]=\s+(True|False|None)',
-     "comparison with singleton, use 'is' or 'is not' instead"),
-    (r'^\s*(while|if) [01]:',
-     "use True/False for constant Boolean expression"),
-    (r'^\s*if False(:| +and)', 'Remove code instead of using `if False`'),
-    (r'(?:(?<!def)\s+|\()hasattr\(',
-     'hasattr(foo, bar) is broken on py2, use util.safehasattr(foo, bar) '
-     'instead', r'#.*hasattr-py3-only'),
-    (r'opener\([^)]*\).read\(',
-     "use opener.read() instead"),
-    (r'opener\([^)]*\).write\(',
-     "use opener.write() instead"),
-    (r'(?i)descend[e]nt', "the proper spelling is descendAnt"),
-    (r'\.debug\(\_', "don't mark debug messages for translation"),
-    (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"),
-    (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'),
-    (r'^\s*except\s([^\(,]+|\([^\)]+\))\s*,',
-     'legacy exception syntax; use "as" instead of ","'),
-    (r'release\(.*wlock, .*lock\)', "wrong lock release order"),
-    (r'\bdef\s+__bool__\b', "__bool__ should be __nonzero__ in Python 2"),
-    (r'os\.path\.join\(.*, *(""|\'\')\)',
-     "use pathutil.normasprefix(path) instead of os.path.join(path, '')"),
-    (r'\s0[0-7]+\b', 'legacy octal syntax; use "0o" prefix instead of "0"'),
-    # XXX only catch mutable arguments on the first line of the definition
-    (r'def.*[( ]\w+=\{\}', "don't use mutable default arguments"),
-    (r'\butil\.Abort\b', "directly use error.Abort"),
-    (r'^@(\w*\.)?cachefunc', "module-level @cachefunc is risky, please avoid"),
-    (r'^import Queue', "don't use Queue, use pycompat.queue.Queue + "
-                       "pycompat.queue.Empty"),
-    (r'^import cStringIO', "don't use cStringIO.StringIO, use util.stringio"),
-    (r'^import urllib', "don't use urllib, use util.urlreq/util.urlerr"),
-    (r'^import SocketServer', "don't use SockerServer, use util.socketserver"),
-    (r'^import urlparse', "don't use urlparse, use util.urlreq"),
-    (r'^import xmlrpclib', "don't use xmlrpclib, use util.xmlrpclib"),
-    (r'^import cPickle', "don't use cPickle, use util.pickle"),
-    (r'^import pickle', "don't use pickle, use util.pickle"),
-    (r'^import httplib', "don't use httplib, use util.httplib"),
-    (r'^import BaseHTTPServer', "use util.httpserver instead"),
-    (r'^(from|import) mercurial\.(cext|pure|cffi)',
-     "use mercurial.policy.importmod instead"),
-    (r'\.next\(\)', "don't use .next(), use next(...)"),
-    (r'([a-z]*).revision\(\1\.node\(',
-     "don't convert rev to node before passing to revision(nodeorrev)"),
-    (r'platform\.system\(\)', "don't use platform.system(), use pycompat"),
-
-  ],
-  # warnings
-  [
-  ]
+    [
+        (r'\\$', 'Use () to wrap long lines in Python, not \\'),
+        (
+            r'^\s*def\s*\w+\s*\(.*,\s*\(',
+            "tuple parameter unpacking not available in Python 3+",
+        ),
+        (
+            r'lambda\s*\(.*,.*\)',
+            "tuple parameter unpacking not available in Python 3+",
+        ),
+        (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
+        (r'(?<!\.)\breduce\s*\(.*', "reduce is not available in Python 3+"),
+        (
+            r'\bdict\(.*=',
+            'dict() is different in Py2 and 3 and is slower than {}',
+            'dict-from-generator',
+        ),
+        (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
+        (r'\s<>\s', '<> operator is not available in Python 3+, use !='),
+        (r'^\s*\t', "don't use tabs"),
+        (r'\S;\s*\n', "semicolon"),
+        (r'[^_]_\([ \t\n]*(?:"[^"]+"[ \t\n+]*)+%', "don't use % inside _()"),
+        (r"[^_]_\([ \t\n]*(?:'[^']+'[ \t\n+]*)+%", "don't use % inside _()"),
+        (r'(\w|\)),\w', "missing whitespace after ,"),
+        (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"),
+        (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
+        (
+            (
+                # a line ending with a colon, potentially with trailing comments
+                r':([ \t]*#[^\n]*)?\n'
+                # one that is not a pass and not only a comment
+                r'(?P<indent>[ \t]+)[^#][^\n]+\n'
+                # more lines at the same indent level
+                r'((?P=indent)[^\n]+\n)*'
+                # a pass at the same indent level, which is bogus
+                r'(?P=indent)pass[ \t\n#]'
+            ),
+            'omit superfluous pass',
+        ),
+        (r'[^\n]\Z', "no trailing newline"),
+        (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
+        (
+            r'^\s+(self\.)?[A-Za-z][a-z0-9]+[A-Z]\w* = ',
+            "don't use camelcase in identifiers",
+            r'#.*camelcase-required',
+        ),
+        (
+            r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+',
+            "linebreak after :",
+        ),
+        (
+            r'class\s[^( \n]+:',
+            "old-style class, use class foo(object)",
+            r'#.*old-style',
+        ),
+        (
+            r'class\s[^( \n]+\(\):',
+            "class foo() creates old style object, use class foo(object)",
+            r'#.*old-style',
+        ),
+        (
+            r'\b(%s)\('
+            % '|'.join(k for k in keyword.kwlist if k not in ('print', 'exec')),
+            "Python keyword is not a function",
+        ),
+        #    (r'class\s[A-Z][^\(]*\((?!Exception)',
+        #     "don't capitalize non-exception classes"),
+        #    (r'in range\(', "use xrange"),
+        #    (r'^\s*print\s+', "avoid using print in core and extensions"),
+        (r'[\x80-\xff]', "non-ASCII character literal"),
+        (r'("\')\.format\(', "str.format() has no bytes counterpart, use %"),
+        (
+            r'([\(\[][ \t]\S)|(\S[ \t][\)\]])',
+            "gratuitous whitespace in () or []",
+        ),
+        #    (r'\s\s=', "gratuitous whitespace before ="),
+        (
+            r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
+            "missing whitespace around operator",
+        ),
+        (
+            r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\s',
+            "missing whitespace around operator",
+        ),
+        (
+            r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
+            "missing whitespace around operator",
+        ),
+        (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]', "wrong whitespace around ="),
+        (
+            r'\([^()]*( =[^=]|[^<>!=]= )',
+            "no whitespace around = for named parameters",
+        ),
+        (
+            r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$',
+            "don't use old-style two-argument raise, use Exception(message)",
+        ),
+        (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"),
+        (
+            r' [=!]=\s+(True|False|None)',
+            "comparison with singleton, use 'is' or 'is not' instead",
+        ),
+        (
+            r'^\s*(while|if) [01]:',
+            "use True/False for constant Boolean expression",
+        ),
+        (r'^\s*if False(:| +and)', 'Remove code instead of using `if False`'),
+        (
+            r'(?:(?<!def)\s+|\()hasattr\(',
+            'hasattr(foo, bar) is broken on py2, use util.safehasattr(foo, bar) '
+            'instead',
+            r'#.*hasattr-py3-only',
+        ),
+        (r'opener\([^)]*\).read\(', "use opener.read() instead"),
+        (r'opener\([^)]*\).write\(', "use opener.write() instead"),
+        (r'(?i)descend[e]nt', "the proper spelling is descendAnt"),
+        (r'\.debug\(\_', "don't mark debug messages for translation"),
+        (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"),
+        (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'),
+        (
+            r'^\s*except\s([^\(,]+|\([^\)]+\))\s*,',
+            'legacy exception syntax; use "as" instead of ","',
+        ),
+        (r'release\(.*wlock, .*lock\)', "wrong lock release order"),
+        (r'\bdef\s+__bool__\b', "__bool__ should be __nonzero__ in Python 2"),
+        (
+            r'os\.path\.join\(.*, *(""|\'\')\)',
+            "use pathutil.normasprefix(path) instead of os.path.join(path, '')",
+        ),
+        (r'\s0[0-7]+\b', 'legacy octal syntax; use "0o" prefix instead of "0"'),
+        # XXX only catch mutable arguments on the first line of the definition
+        (r'def.*[( ]\w+=\{\}', "don't use mutable default arguments"),
+        (r'\butil\.Abort\b', "directly use error.Abort"),
+        (
+            r'^@(\w*\.)?cachefunc',
+            "module-level @cachefunc is risky, please avoid",
+        ),
+        (
+            r'^import Queue',
+            "don't use Queue, use pycompat.queue.Queue + "
+            "pycompat.queue.Empty",
+        ),
+        (
+            r'^import cStringIO',
+            "don't use cStringIO.StringIO, use util.stringio",
+        ),
+        (r'^import urllib', "don't use urllib, use util.urlreq/util.urlerr"),
+        (
+            r'^import SocketServer',
+            "don't use SockerServer, use util.socketserver",
+        ),
+        (r'^import urlparse', "don't use urlparse, use util.urlreq"),
+        (r'^import xmlrpclib', "don't use xmlrpclib, use util.xmlrpclib"),
+        (r'^import cPickle', "don't use cPickle, use util.pickle"),
+        (r'^import pickle', "don't use pickle, use util.pickle"),
+        (r'^import httplib', "don't use httplib, use util.httplib"),
+        (r'^import BaseHTTPServer', "use util.httpserver instead"),
+        (
+            r'^(from|import) mercurial\.(cext|pure|cffi)',
+            "use mercurial.policy.importmod instead",
+        ),
+        (r'\.next\(\)', "don't use .next(), use next(...)"),
+        (
+            r'([a-z]*).revision\(\1\.node\(',
+            "don't convert rev to node before passing to revision(nodeorrev)",
+        ),
+        (r'platform\.system\(\)', "don't use platform.system(), use pycompat"),
+    ],
+    # warnings
+    [],
 ]
 
 # patterns to check normal *.py files
 pypats = [
-  [
-    # Ideally, these should be placed in "commonpypats" for
-    # consistency of coding rules in Mercurial source tree.
-    # But on the other hand, these are not so seriously required for
-    # python code fragments embedded in test scripts. Fixing test
-    # scripts for these patterns requires many changes, and has less
-    # profit than effort.
-    (r'.{81}', "line too long"),
-    (r'raise Exception', "don't raise generic exceptions"),
-    (r'[\s\(](open|file)\([^)]*\)\.read\(',
-     "use util.readfile() instead"),
-    (r'[\s\(](open|file)\([^)]*\)\.write\(',
-     "use util.writefile() instead"),
-    (r'^[\s\(]*(open(er)?|file)\([^)]*\)(?!\.close\(\))',
-     "always assign an opened file to a variable, and close it afterwards"),
-    (r'[\s\(](open|file)\([^)]*\)\.(?!close\(\))',
-     "always assign an opened file to a variable, and close it afterwards"),
-    (r':\n(    )*( ){1,3}[^ ]', "must indent 4 spaces"),
-    (r'^import atexit', "don't use atexit, use ui.atexit"),
-
-    # rules depending on implementation of repquote()
-    (r' x+[xpqo%APM][\'"]\n\s+[\'"]x',
-     'string join across lines with no space'),
-    (r'''(?x)ui\.(status|progress|write|note|warn)\(
+    [
+        # Ideally, these should be placed in "commonpypats" for
+        # consistency of coding rules in Mercurial source tree.
+        # But on the other hand, these are not so seriously required for
+        # python code fragments embedded in test scripts. Fixing test
+        # scripts for these patterns requires many changes, and has less
+        # profit than effort.
+        (r'raise Exception', "don't raise generic exceptions"),
+        (r'[\s\(](open|file)\([^)]*\)\.read\(', "use util.readfile() instead"),
+        (
+            r'[\s\(](open|file)\([^)]*\)\.write\(',
+            "use util.writefile() instead",
+        ),
+        (
+            r'^[\s\(]*(open(er)?|file)\([^)]*\)(?!\.close\(\))',
+            "always assign an opened file to a variable, and close it afterwards",
+        ),
+        (
+            r'[\s\(](open|file)\([^)]*\)\.(?!close\(\))',
+            "always assign an opened file to a variable, and close it afterwards",
+        ),
+        (r':\n(    )*( ){1,3}[^ ]', "must indent 4 spaces"),
+        (r'^import atexit', "don't use atexit, use ui.atexit"),
+        # rules depending on implementation of repquote()
+        (
+            r' x+[xpqo%APM][\'"]\n\s+[\'"]x',
+            'string join across lines with no space',
+        ),
+        (
+            r'''(?x)ui\.(status|progress|write|note|warn)\(
          [ \t\n#]*
          (?# any strings/comments might precede a string, which
            # contains translatable message)
-         ((['"]|\'\'\'|""")[ \npq%bAPMxno]*(['"]|\'\'\'|""")[ \t\n#]+)*
+         b?((['"]|\'\'\'|""")[ \npq%bAPMxno]*(['"]|\'\'\'|""")[ \t\n#]+)*
          (?# sequence consisting of below might precede translatable message
            # - formatting string: "% 10s", "%05d", "% -3.2f", "%*s", "%%" ...
            # - escaped character: "\\", "\n", "\0" ...
@@ -395,51 +516,55 @@
          (?# this regexp can't use [^...] style,
            # because _preparepats forcibly adds "\n" into [^...],
            # even though this regexp wants match it against "\n")''',
-     "missing _() in ui message (use () to hide false-positives)"),
-  ] + commonpypats[0],
-  # warnings
-  [
-    # rules depending on implementation of repquote()
-    (r'(^| )pp +xxxxqq[ \n][^\n]', "add two newlines after '.. note::'"),
-  ] + commonpypats[1]
+            "missing _() in ui message (use () to hide false-positives)",
+        ),
+    ]
+    + commonpypats[0],
+    # warnings
+    [
+        # rules depending on implementation of repquote()
+        (r'(^| )pp +xxxxqq[ \n][^\n]', "add two newlines after '.. note::'"),
+    ]
+    + commonpypats[1],
 ]
 
 # patterns to check *.py for embedded ones in test script
 embeddedpypats = [
-  [
-  ] + commonpypats[0],
-  # warnings
-  [
-  ] + commonpypats[1]
+    [] + commonpypats[0],
+    # warnings
+    [] + commonpypats[1],
 ]
 
 # common filters to convert *.py
 commonpyfilters = [
-    (r"""(?msx)(?P<comment>\#.*?$)|
+    (
+        r"""(?msx)(?P<comment>\#.*?$)|
          ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
           (?P<text>(([^\\]|\\.)*?))
-          (?P=quote))""", reppython),
+          (?P=quote))""",
+        reppython,
+    ),
 ]
 
 # filters to convert normal *.py files
-pyfilters = [
-] + commonpyfilters
+pyfilters = [] + commonpyfilters
 
 # non-filter patterns
 pynfpats = [
     [
-    (r'pycompat\.osname\s*[=!]=\s*[\'"]nt[\'"]', "use pycompat.iswindows"),
-    (r'pycompat\.osname\s*[=!]=\s*[\'"]posix[\'"]', "use pycompat.isposix"),
-    (r'pycompat\.sysplatform\s*[!=]=\s*[\'"]darwin[\'"]',
-     "use pycompat.isdarwin"),
+        (r'pycompat\.osname\s*[=!]=\s*[\'"]nt[\'"]', "use pycompat.iswindows"),
+        (r'pycompat\.osname\s*[=!]=\s*[\'"]posix[\'"]', "use pycompat.isposix"),
+        (
+            r'pycompat\.sysplatform\s*[!=]=\s*[\'"]darwin[\'"]',
+            "use pycompat.isdarwin",
+        ),
     ],
     # warnings
     [],
 ]
 
 # filters to convert *.py for embedded ones in test script
-embeddedpyfilters = [
-] + commonpyfilters
+embeddedpyfilters = [] + commonpyfilters
 
 # extension non-filter patterns
 pyextnfpats = [
@@ -451,42 +576,40 @@
 txtfilters = []
 
 txtpats = [
-  [
-    (r'\s$', 'trailing whitespace'),
-    ('.. note::[ \n][^\n]', 'add two newlines after note::')
-  ],
-  []
+    [
+        (r'\s$', 'trailing whitespace'),
+        ('.. note::[ \n][^\n]', 'add two newlines after note::'),
+    ],
+    [],
 ]
 
 cpats = [
-  [
-    (r'//', "don't use //-style comments"),
-    (r'\S\t', "don't use tabs except for indent"),
-    (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
-    (r'.{81}', "line too long"),
-    (r'(while|if|do|for)\(', "use space after while/if/do/for"),
-    (r'return\(', "return is not a function"),
-    (r' ;', "no space before ;"),
-    (r'[^;] \)', "no space before )"),
-    (r'[)][{]', "space between ) and {"),
-    (r'\w+\* \w+', "use int *foo, not int* foo"),
-    (r'\W\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
-    (r'\w+ (\+\+|--)', "use foo++, not foo ++"),
-    (r'\w,\w', "missing whitespace after ,"),
-    (r'^[^#]\w[+/*]\w', "missing whitespace in expression"),
-    (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
-    (r'^#\s+\w', "use #foo, not # foo"),
-    (r'[^\n]\Z', "no trailing newline"),
-    (r'^\s*#import\b', "use only #include in standard C code"),
-    (r'strcpy\(', "don't use strcpy, use strlcpy or memcpy"),
-    (r'strcat\(', "don't use strcat"),
-
-    # rules depending on implementation of repquote()
-  ],
-  # warnings
-  [
-    # rules depending on implementation of repquote()
-  ]
+    [
+        (r'//', "don't use //-style comments"),
+        (r'\S\t', "don't use tabs except for indent"),
+        (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
+        (r'(while|if|do|for)\(', "use space after while/if/do/for"),
+        (r'return\(', "return is not a function"),
+        (r' ;', "no space before ;"),
+        (r'[^;] \)', "no space before )"),
+        (r'[)][{]', "space between ) and {"),
+        (r'\w+\* \w+', "use int *foo, not int* foo"),
+        (r'\W\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
+        (r'\w+ (\+\+|--)', "use foo++, not foo ++"),
+        (r'\w,\w', "missing whitespace after ,"),
+        (r'^[^#]\w[+/*]\w', "missing whitespace in expression"),
+        (r'\w\s=\s\s+\w', "gratuitous whitespace after ="),
+        (r'^#\s+\w', "use #foo, not # foo"),
+        (r'[^\n]\Z', "no trailing newline"),
+        (r'^\s*#import\b', "use only #include in standard C code"),
+        (r'strcpy\(', "don't use strcpy, use strlcpy or memcpy"),
+        (r'strcat\(', "don't use strcat"),
+        # rules depending on implementation of repquote()
+    ],
+    # warnings
+    [
+        # rules depending on implementation of repquote()
+    ],
 ]
 
 cfilters = [
@@ -497,82 +620,109 @@
 ]
 
 inutilpats = [
-  [
-    (r'\bui\.', "don't use ui in util"),
-  ],
-  # warnings
-  []
+    [(r'\bui\.', "don't use ui in util"),],
+    # warnings
+    [],
 ]
 
 inrevlogpats = [
-  [
-    (r'\brepo\.', "don't use repo in revlog"),
-  ],
-  # warnings
-  []
+    [(r'\brepo\.', "don't use repo in revlog"),],
+    # warnings
+    [],
 ]
 
 webtemplatefilters = []
 
 webtemplatepats = [
-  [],
-  [
-    (r'{desc(\|(?!websub|firstline)[^\|]*)+}',
-     'follow desc keyword with either firstline or websub'),
-  ]
+    [],
+    [
+        (
+            r'{desc(\|(?!websub|firstline)[^\|]*)+}',
+            'follow desc keyword with either firstline or websub',
+        ),
+    ],
 ]
 
 allfilesfilters = []
 
 allfilespats = [
-  [
-    (r'(http|https)://[a-zA-Z0-9./]*selenic.com/',
-     'use mercurial-scm.org domain URL'),
-    (r'mercurial@selenic\.com',
-     'use mercurial-scm.org domain for mercurial ML address'),
-    (r'mercurial-devel@selenic\.com',
-     'use mercurial-scm.org domain for mercurial-devel ML address'),
-  ],
-  # warnings
-  [],
+    [
+        (
+            r'(http|https)://[a-zA-Z0-9./]*selenic.com/',
+            'use mercurial-scm.org domain URL',
+        ),
+        (
+            r'mercurial@selenic\.com',
+            'use mercurial-scm.org domain for mercurial ML address',
+        ),
+        (
+            r'mercurial-devel@selenic\.com',
+            'use mercurial-scm.org domain for mercurial-devel ML address',
+        ),
+    ],
+    # warnings
+    [],
 ]
 
 py3pats = [
-  [
-    (r'os\.environ', "use encoding.environ instead (py3)", r'#.*re-exports'),
-    (r'os\.name', "use pycompat.osname instead (py3)"),
-    (r'os\.getcwd', "use encoding.getcwd instead (py3)", r'#.*re-exports'),
-    (r'os\.sep', "use pycompat.ossep instead (py3)"),
-    (r'os\.pathsep', "use pycompat.ospathsep instead (py3)"),
-    (r'os\.altsep', "use pycompat.osaltsep instead (py3)"),
-    (r'sys\.platform', "use pycompat.sysplatform instead (py3)"),
-    (r'getopt\.getopt', "use pycompat.getoptb instead (py3)"),
-    (r'os\.getenv', "use encoding.environ.get instead"),
-    (r'os\.setenv', "modifying the environ dict is not preferred"),
-    (r'(?<!pycompat\.)xrange', "use pycompat.xrange instead (py3)"),
-  ],
-  # warnings
-  [],
+    [
+        (
+            r'os\.environ',
+            "use encoding.environ instead (py3)",
+            r'#.*re-exports',
+        ),
+        (r'os\.name', "use pycompat.osname instead (py3)"),
+        (r'os\.getcwd', "use encoding.getcwd instead (py3)", r'#.*re-exports'),
+        (r'os\.sep', "use pycompat.ossep instead (py3)"),
+        (r'os\.pathsep', "use pycompat.ospathsep instead (py3)"),
+        (r'os\.altsep', "use pycompat.osaltsep instead (py3)"),
+        (r'sys\.platform', "use pycompat.sysplatform instead (py3)"),
+        (r'getopt\.getopt', "use pycompat.getoptb instead (py3)"),
+        (r'os\.getenv', "use encoding.environ.get instead"),
+        (r'os\.setenv', "modifying the environ dict is not preferred"),
+        (r'(?<!pycompat\.)xrange', "use pycompat.xrange instead (py3)"),
+    ],
+    # warnings
+    [],
 ]
 
 checks = [
     ('python', r'.*\.(py|cgi)$', r'^#!.*python', pyfilters, pypats),
     ('python', r'.*\.(py|cgi)$', r'^#!.*python', [], pynfpats),
     ('python', r'.*hgext.*\.py$', '', [], pyextnfpats),
-    ('python 3', r'.*(hgext|mercurial)/(?!demandimport|policy|pycompat).*\.py',
-     '', pyfilters, py3pats),
+    (
+        'python 3',
+        r'.*(hgext|mercurial)/(?!demandimport|policy|pycompat).*\.py',
+        '',
+        pyfilters,
+        py3pats,
+    ),
     ('test script', r'(.*/)?test-[^.~]*$', '', testfilters, testpats),
     ('c', r'.*\.[ch]$', '', cfilters, cpats),
     ('unified test', r'.*\.t$', '', utestfilters, utestpats),
-    ('layering violation repo in revlog', r'mercurial/revlog\.py', '',
-     pyfilters, inrevlogpats),
-    ('layering violation ui in util', r'mercurial/util\.py', '', pyfilters,
-     inutilpats),
+    (
+        'layering violation repo in revlog',
+        r'mercurial/revlog\.py',
+        '',
+        pyfilters,
+        inrevlogpats,
+    ),
+    (
+        'layering violation ui in util',
+        r'mercurial/util\.py',
+        '',
+        pyfilters,
+        inutilpats,
+    ),
     ('txt', r'.*\.txt$', '', txtfilters, txtpats),
-    ('web template', r'mercurial/templates/.*\.tmpl', '',
-     webtemplatefilters, webtemplatepats),
-    ('all except for .po', r'.*(?<!\.po)$', '',
-     allfilesfilters, allfilespats),
+    (
+        'web template',
+        r'mercurial/templates/.*\.tmpl',
+        '',
+        webtemplatefilters,
+        webtemplatepats,
+    ),
+    ('all except for .po', r'.*(?<!\.po)$', '', allfilesfilters, allfilespats),
 ]
 
 # (desc,
@@ -580,10 +730,15 @@
 #  list of patterns to convert target files
 #  list of patterns to detect errors/warnings)
 embeddedchecks = [
-    ('embedded python',
-     testparseutil.pyembedded, embeddedpyfilters, embeddedpypats)
+    (
+        'embedded python',
+        testparseutil.pyembedded,
+        embeddedpyfilters,
+        embeddedpypats,
+    )
 ]
 
+
 def _preparepats():
     def preparefailandwarn(failandwarn):
         for pats in failandwarn:
@@ -612,6 +767,7 @@
             filters = c[-2]
             preparefilters(filters)
 
+
 class norepeatlogger(object):
     def __init__(self):
         self._lastseen = None
@@ -637,8 +793,10 @@
             self._lastseen = msgid
         print(" " + msg)
 
+
 _defaultlogger = norepeatlogger()
 
+
 def getblame(f):
     lines = []
     for l in os.popen('hg annotate -un %s' % f):
@@ -647,8 +805,16 @@
         lines.append((line[1:-1], user, rev))
     return lines
 
-def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
-              blame=False, debug=False, lineno=True):
+
+def checkfile(
+    f,
+    logfunc=_defaultlogger.log,
+    maxerr=None,
+    warnings=False,
+    blame=False,
+    debug=False,
+    lineno=True,
+):
     """checks style and portability of a given file
 
     :f: filepath
@@ -680,8 +846,9 @@
             print(name, f)
         if not (re.match(match, f) or (magic and re.search(magic, pre))):
             if debug:
-                print("Skipping %s for %s it doesn't match %s" % (
-                       name, match, f))
+                print(
+                    "Skipping %s for %s it doesn't match %s" % (name, match, f)
+                )
             continue
         if "no-" "check-code" in pre:
             # If you're looking at this line, it's because a file has:
@@ -691,16 +858,28 @@
             # spelling, we write it with the expected spelling from
             # tests/test-check-code.t
             print("Skipping %s it has no-che?k-code (glob)" % f)
-            return "Skip" # skip checking this file
+            return "Skip"  # skip checking this file
 
-        fc = _checkfiledata(name, f, pre, filters, pats, context,
-                            logfunc, maxerr, warnings, blame, debug, lineno)
+        fc = _checkfiledata(
+            name,
+            f,
+            pre,
+            filters,
+            pats,
+            context,
+            logfunc,
+            maxerr,
+            warnings,
+            blame,
+            debug,
+            lineno,
+        )
         if fc:
             result = False
 
     if f.endswith('.t') and "no-" "check-code" not in pre:
         if debug:
-            print("Checking embedded code in %s" % (f))
+            print("Checking embedded code in %s" % f)
 
         prelines = pre.splitlines()
         embeddederros = []
@@ -712,9 +891,21 @@
 
             for found in embedded(f, prelines, embeddederros):
                 filename, starts, ends, code = found
-                fc = _checkfiledata(name, f, code, filters, pats, context,
-                                    logfunc, curmaxerr, warnings, blame, debug,
-                                    lineno, offset=starts - 1)
+                fc = _checkfiledata(
+                    name,
+                    f,
+                    code,
+                    filters,
+                    pats,
+                    context,
+                    logfunc,
+                    curmaxerr,
+                    warnings,
+                    blame,
+                    debug,
+                    lineno,
+                    offset=starts - 1,
+                )
                 if fc:
                     result = False
                     if curmaxerr:
@@ -724,9 +915,22 @@
 
     return result
 
-def _checkfiledata(name, f, filedata, filters, pats, context,
-                   logfunc, maxerr, warnings, blame, debug, lineno,
-                   offset=None):
+
+def _checkfiledata(
+    name,
+    f,
+    filedata,
+    filters,
+    pats,
+    context,
+    logfunc,
+    maxerr,
+    warnings,
+    blame,
+    debug,
+    lineno,
+    offset=None,
+):
     """Execute actual error check for file data
 
     :name: of the checking category
@@ -759,10 +963,10 @@
     fc = 0
     pre = post = filedata
 
-    if True: # TODO: get rid of this redundant 'if' block
+    if True:  # TODO: get rid of this redundant 'if' block
         for p, r in filters:
             post = re.sub(p, r, post)
-        nerrs = len(pats[0]) # nerr elements are errors
+        nerrs = len(pats[0])  # nerr elements are errors
         if warnings:
             pats = pats[0] + pats[1]
         else:
@@ -801,8 +1005,10 @@
 
                 if ignore and re.search(ignore, l, re.MULTILINE):
                     if debug:
-                        print("Skipping %s for %s:%s (ignore pattern)" % (
-                            name, f, (n + lineoffset)))
+                        print(
+                            "Skipping %s for %s:%s (ignore pattern)"
+                            % (name, f, (n + lineoffset))
+                        )
                     continue
                 bd = ""
                 if blame:
@@ -837,21 +1043,38 @@
 
     return fc
 
+
 def main():
     parser = optparse.OptionParser("%prog [options] [files | -]")
-    parser.add_option("-w", "--warnings", action="store_true",
-                      help="include warning-level checks")
-    parser.add_option("-p", "--per-file", type="int",
-                      help="max warnings per file")
-    parser.add_option("-b", "--blame", action="store_true",
-                      help="use annotate to generate blame info")
-    parser.add_option("", "--debug", action="store_true",
-                      help="show debug information")
-    parser.add_option("", "--nolineno", action="store_false",
-                      dest='lineno', help="don't show line numbers")
+    parser.add_option(
+        "-w",
+        "--warnings",
+        action="store_true",
+        help="include warning-level checks",
+    )
+    parser.add_option(
+        "-p", "--per-file", type="int", help="max warnings per file"
+    )
+    parser.add_option(
+        "-b",
+        "--blame",
+        action="store_true",
+        help="use annotate to generate blame info",
+    )
+    parser.add_option(
+        "", "--debug", action="store_true", help="show debug information"
+    )
+    parser.add_option(
+        "",
+        "--nolineno",
+        action="store_false",
+        dest='lineno',
+        help="don't show line numbers",
+    )
 
-    parser.set_defaults(per_file=15, warnings=False, blame=False, debug=False,
-                        lineno=True)
+    parser.set_defaults(
+        per_file=15, warnings=False, blame=False, debug=False, lineno=True
+    )
     (options, args) = parser.parse_args()
 
     if len(args) == 0:
@@ -866,11 +1089,17 @@
 
     ret = 0
     for f in check:
-        if not checkfile(f, maxerr=options.per_file, warnings=options.warnings,
-                         blame=options.blame, debug=options.debug,
-                         lineno=options.lineno):
+        if not checkfile(
+            f,
+            maxerr=options.per_file,
+            warnings=options.warnings,
+            blame=options.blame,
+            debug=options.debug,
+            lineno=options.lineno,
+        ):
             ret = 1
     return ret
 
+
 if __name__ == "__main__":
     sys.exit(main())
--- a/contrib/check-commit	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/check-commit	Mon Oct 21 11:09:48 2019 -0400
@@ -39,12 +39,6 @@
      "summary keyword should be most user-relevant one-word command or topic"),
     (afterheader + r".*\.\s*\n", "don't add trailing period on summary line"),
     (afterheader + r".{79,}", "summary line too long (limit is 78)"),
-    # Forbid "_" in function name.
-    #
-    # We skip the check for cffi related functions. They use names mapping the
-    # name of the C function. C function names may contain "_".
-    (r"\n\+[ \t]+def (?!cffi)[a-z]+_[a-z]",
-     "adds a function with foo_bar naming"),
 ]
 
 word = re.compile(r'\S')
--- a/contrib/check-config.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/check-config.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,7 +15,8 @@
 documented = {}
 allowinconsistent = set()
 
-configre = re.compile(br'''
+configre = re.compile(
+    br'''
     # Function call
     ui\.config(?P<ctype>|int|bool|list)\(
         # First argument.
@@ -23,9 +24,12 @@
         # Second argument
         ['"](?P<option>\S+)['"](,\s+
         (?:default=)?(?P<default>\S+?))?
-    \)''', re.VERBOSE | re.MULTILINE)
+    \)''',
+    re.VERBOSE | re.MULTILINE,
+)
 
-configwithre = re.compile(br'''
+configwithre = re.compile(
+    br'''
     ui\.config(?P<ctype>with)\(
         # First argument is callback function. This doesn't parse robustly
         # if it is e.g. a function call.
@@ -33,23 +37,32 @@
         ['"](?P<section>\S+)['"],\s*
         ['"](?P<option>\S+)['"](,\s+
         (?:default=)?(?P<default>\S+?))?
-    \)''', re.VERBOSE | re.MULTILINE)
+    \)''',
+    re.VERBOSE | re.MULTILINE,
+)
 
-configpartialre = (br"""ui\.config""")
+configpartialre = br"""ui\.config"""
 
-ignorere = re.compile(br'''
+ignorere = re.compile(
+    br'''
     \#\s(?P<reason>internal|experimental|deprecated|developer|inconsistent)\s
     config:\s(?P<config>\S+\.\S+)$
-    ''', re.VERBOSE | re.MULTILINE)
+    ''',
+    re.VERBOSE | re.MULTILINE,
+)
 
 if sys.version_info[0] > 2:
+
     def mkstr(b):
         if isinstance(b, str):
             return b
         return b.decode('utf8')
+
+
 else:
     mkstr = lambda x: x
 
+
 def main(args):
     for f in args:
         sect = b''
@@ -115,18 +128,32 @@
                 name = m.group('section') + b"." + m.group('option')
                 default = m.group('default')
                 if default in (
-                        None, b'False', b'None', b'0', b'[]', b'""', b"''"):
+                    None,
+                    b'False',
+                    b'None',
+                    b'0',
+                    b'[]',
+                    b'""',
+                    b"''",
+                ):
                     default = b''
                 if re.match(b'[a-z.]+$', default):
                     default = b'<variable>'
-                if (name in foundopts and (ctype, default) != foundopts[name]
-                    and name not in allowinconsistent):
+                if (
+                    name in foundopts
+                    and (ctype, default) != foundopts[name]
+                    and name not in allowinconsistent
+                ):
                     print(mkstr(l.rstrip()))
                     fctype, fdefault = foundopts[name]
-                    print("conflict on %s: %r != %r" % (
-                        mkstr(name),
-                        (mkstr(ctype), mkstr(default)),
-                        (mkstr(fctype), mkstr(fdefault))))
+                    print(
+                        "conflict on %s: %r != %r"
+                        % (
+                            mkstr(name),
+                            (mkstr(ctype), mkstr(default)),
+                            (mkstr(fctype), mkstr(fdefault)),
+                        )
+                    )
                     print("at %s:%d:" % (mkstr(f), linenum))
                 foundopts[name] = (ctype, default)
                 carryover = b''
@@ -139,9 +166,11 @@
 
     for name in sorted(foundopts):
         if name not in documented:
-            if not (name.startswith(b"devel.") or
-                    name.startswith(b"experimental.") or
-                    name.startswith(b"debug.")):
+            if not (
+                name.startswith(b"devel.")
+                or name.startswith(b"experimental.")
+                or name.startswith(b"debug.")
+            ):
                 ctype, default = foundopts[name]
                 if default:
                     if isinstance(default, bytes):
@@ -149,8 +178,11 @@
                     default = ' [%s]' % default
                 elif isinstance(default, bytes):
                     default = mkstr(default)
-                print("undocumented: %s (%s)%s" % (
-                    mkstr(name), mkstr(ctype), default))
+                print(
+                    "undocumented: %s (%s)%s"
+                    % (mkstr(name), mkstr(ctype), default)
+                )
+
 
 if __name__ == "__main__":
     if len(sys.argv) > 1:
--- a/contrib/check-py3-compat.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/check-py3-compat.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,6 +16,7 @@
 import traceback
 import warnings
 
+
 def check_compat_py2(f):
     """Check Python 3 compatibility for a file with Python 2"""
     with open(f, 'rb') as fh:
@@ -40,6 +41,7 @@
     if haveprint and 'print_function' not in futures:
         print('%s requires print_function' % f)
 
+
 def check_compat_py3(f):
     """Check Python 3 compatibility of a file with Python 3."""
     with open(f, 'rb') as fh:
@@ -54,8 +56,9 @@
     # Try to import the module.
     # For now we only support modules in packages because figuring out module
     # paths for things not in a package can be confusing.
-    if (f.startswith(('hgdemandimport/', 'hgext/', 'mercurial/'))
-        and not f.endswith('__init__.py')):
+    if f.startswith(
+        ('hgdemandimport/', 'hgext/', 'mercurial/')
+    ) and not f.endswith('__init__.py'):
         assert f.endswith('.py')
         name = f.replace('/', '.')[:-3]
         try:
@@ -79,11 +82,16 @@
 
             if frame.filename:
                 filename = os.path.basename(frame.filename)
-                print('%s: error importing: <%s> %s (error at %s:%d)' % (
-                      f, type(e).__name__, e, filename, frame.lineno))
+                print(
+                    '%s: error importing: <%s> %s (error at %s:%d)'
+                    % (f, type(e).__name__, e, filename, frame.lineno)
+                )
             else:
-                print('%s: error importing module: <%s> %s (line %d)' % (
-                      f, type(e).__name__, e, frame.lineno))
+                print(
+                    '%s: error importing module: <%s> %s (line %d)'
+                    % (f, type(e).__name__, e, frame.lineno)
+                )
+
 
 if __name__ == '__main__':
     if sys.version_info[0] == 2:
@@ -96,7 +104,10 @@
             fn(f)
 
         for w in warns:
-            print(warnings.formatwarning(w.message, w.category,
-                                         w.filename, w.lineno).rstrip())
+            print(
+                warnings.formatwarning(
+                    w.message, w.category, w.filename, w.lineno
+                ).rstrip()
+            )
 
     sys.exit(0)
--- a/contrib/clang-format-ignorelist	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/clang-format-ignorelist	Mon Oct 21 11:09:48 2019 -0400
@@ -1,6 +1,5 @@
 # Files that just need to be migrated to the formatter.
 # Do not add new files here!
-mercurial/cext/dirs.c
 mercurial/cext/manifest.c
 mercurial/cext/osutil.c
 # Vendored code that we should never format:
@@ -49,6 +48,10 @@
 contrib/python-zstandard/zstd/compress/huf_compress.c
 contrib/python-zstandard/zstd/compress/zstd_compress.c
 contrib/python-zstandard/zstd/compress/zstd_compress_internal.h
+contrib/python-zstandard/zstd/compress/zstd_compress_literals.c
+contrib/python-zstandard/zstd/compress/zstd_compress_literals.h
+contrib/python-zstandard/zstd/compress/zstd_compress_sequences.c
+contrib/python-zstandard/zstd/compress/zstd_compress_sequences.h
 contrib/python-zstandard/zstd/compress/zstd_double_fast.c
 contrib/python-zstandard/zstd/compress/zstd_double_fast.h
 contrib/python-zstandard/zstd/compress/zstd_fast.c
--- a/contrib/debugcmdserver.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/debugcmdserver.py	Mon Oct 21 11:09:48 2019 -0400
@@ -23,6 +23,7 @@
 else:
     log = open(sys.argv[1], 'a')
 
+
 def read(size):
     data = sys.stdin.read(size)
     if not data:
@@ -31,6 +32,7 @@
     sys.stdout.flush()
     return data
 
+
 try:
     while True:
         header = read(outputfmtsize)
--- a/contrib/debugshell.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/debugshell.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,6 +14,7 @@
 cmdtable = {}
 command = registrar.command(cmdtable)
 
+
 def pdb(ui, repo, msg, **opts):
     objects = {
         'mercurial': mercurial,
@@ -24,25 +25,25 @@
 
     code.interact(msg, local=objects)
 
+
 def ipdb(ui, repo, msg, **opts):
     import IPython
 
     cl = repo.changelog
     mf = repo.manifestlog
-    cl, mf # use variables to appease pyflakes
+    cl, mf  # use variables to appease pyflakes
 
     IPython.embed()
 
+
 @command(b'debugshell|dbsh', [])
 def debugshell(ui, repo, **opts):
-    bannermsg = ("loaded repo : %s\n"
-                 "using source: %s" % (pycompat.sysstr(repo.root),
-                                       mercurial.__path__[0]))
+    bannermsg = "loaded repo : %s\n" "using source: %s" % (
+        pycompat.sysstr(repo.root),
+        mercurial.__path__[0],
+    )
 
-    pdbmap = {
-        'pdb'  : 'code',
-        'ipdb' : 'IPython'
-    }
+    pdbmap = {'pdb': 'code', 'ipdb': 'IPython'}
 
     debugger = ui.config(b"ui", b"debugger")
     if not debugger:
@@ -55,8 +56,10 @@
         with demandimport.deactivated():
             __import__(pdbmap[debugger])
     except ImportError:
-        ui.warn((b"%s debugger specified but %s module was not found\n")
-                % (debugger, pdbmap[debugger]))
+        ui.warnnoi18n(
+            b"%s debugger specified but %s module was not found\n"
+            % (debugger, pdbmap[debugger])
+        )
         debugger = b'pdb'
 
     getattr(sys.modules[__name__], debugger)(ui, repo, bannermsg, **opts)
--- a/contrib/dirstatenonnormalcheck.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/dirstatenonnormalcheck.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,6 +13,7 @@
     extensions,
 )
 
+
 def nonnormalentries(dmap):
     """Compute nonnormal entries from dirstate's dmap"""
     res = set()
@@ -21,6 +22,7 @@
             res.add(f)
     return res
 
+
 def checkconsistency(ui, orig, dmap, _nonnormalset, label):
     """Compute nonnormalset from dmap, check that it matches _nonnormalset"""
     nonnormalcomputedmap = nonnormalentries(dmap)
@@ -30,15 +32,19 @@
         ui.develwarn(b"[nonnormalset] %s\n" % _nonnormalset, config=b'dirstate')
         ui.develwarn(b"[map] %s\n" % nonnormalcomputedmap, config=b'dirstate')
 
+
 def _checkdirstate(orig, self, arg):
     """Check nonnormal set consistency before and after the call to orig"""
-    checkconsistency(self._ui, orig, self._map, self._map.nonnormalset,
-                     b"before")
+    checkconsistency(
+        self._ui, orig, self._map, self._map.nonnormalset, b"before"
+    )
     r = orig(self, arg)
-    checkconsistency(self._ui, orig, self._map, self._map.nonnormalset,
-                     b"after")
+    checkconsistency(
+        self._ui, orig, self._map, self._map.nonnormalset, b"after"
+    )
     return r
 
+
 def extsetup(ui):
     """Wrap functions modifying dirstate to check nonnormalset consistency"""
     dirstatecl = dirstate.dirstate
--- a/contrib/dumprevlog	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/dumprevlog	Mon Oct 21 11:09:48 2019 -0400
@@ -22,6 +22,7 @@
     if b'b' not in mode:
         mode = mode + b'b'
     return open(path, pycompat.sysstr(mode))
+binopen.options = {}
 
 def printb(data, end=b'\n'):
     sys.stdout.flush()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/examples/fix.hgrc	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,15 @@
+[fix]
+clang-format:command = clang-format --style file -i
+clang-format:pattern = (**.c or **.cc or **.h) and not "listfile:contrib/clang-format-ignorelist"
+
+rustfmt:command = rustfmt {rootpath}
+rustfmt:pattern = set:**.rs
+
+# We use black, but currently with
+# https://github.com/psf/black/pull/826 applied. For now
+# contrib/grey.py is our fork of black. You need to pip install
+# git+https://github.com/python/black/@d9e71a75ccfefa3d9156a64c03313a0d4ad981e5
+# to have the dependencies for grey.
+#
+# black:command = python3.7 contrib/grey.py --config=black.toml -
+# black:pattern = set:**.py - hgext/fsmonitor/pywatchman/** - mercurial/thirdparty/** - "contrib/python-zstandard/** - contrib/grey.py"
--- a/contrib/fuzz/Makefile	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/fuzz/Makefile	Mon Oct 21 11:09:48 2019 -0400
@@ -105,6 +105,33 @@
 	  -I../../mercurial \
 	  -c -o revlog.o ../../mercurial/cext/revlog.c
 
+dirs_fuzzer: dirs.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
+	$(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -Wno-register -Wno-macro-redefined \
+	  -I../../mercurial dirs.cc \
+	  manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
+	  -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+	  -o $$OUT/dirs_fuzzer
+
+fncache_fuzzer: fncache.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
+	$(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -Wno-register -Wno-macro-redefined \
+	  -I../../mercurial fncache.cc \
+	  manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
+	  -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+	  -o $$OUT/fncache_fuzzer
+
+jsonescapeu8fast_fuzzer: jsonescapeu8fast.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
+	$(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -Wno-register -Wno-macro-redefined \
+	  -I../../mercurial jsonescapeu8fast.cc \
+	  manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
+	  -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+	  -o $$OUT/jsonescapeu8fast_fuzzer
+
+manifest_corpus.zip:
+	python manifest_corpus.py $$OUT/manifest_fuzzer_seed_corpus.zip
+
 manifest_fuzzer: manifest.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
 	$(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
 	  -Wno-register -Wno-macro-redefined \
@@ -113,9 +140,6 @@
 	  -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
 	  -o $$OUT/manifest_fuzzer
 
-manifest_corpus.zip:
-	python manifest_corpus.py $$OUT/manifest_fuzzer_seed_corpus.zip
-
 revlog_fuzzer: revlog.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
 	$(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
 	  -Wno-register -Wno-macro-redefined \
@@ -155,6 +179,6 @@
 	  mpatch \
 	  xdiff
 
-oss-fuzz: bdiff_fuzzer mpatch_fuzzer mpatch_corpus.zip xdiff_fuzzer manifest_fuzzer manifest_corpus.zip revlog_fuzzer revlog_corpus.zip dirstate_fuzzer dirstate_corpus.zip fm1readmarkers_fuzzer fm1readmarkers_corpus.zip
+oss-fuzz: bdiff_fuzzer mpatch_fuzzer mpatch_corpus.zip xdiff_fuzzer dirs_fuzzer fncache_fuzzer jsonescapeu8fast_fuzzer manifest_fuzzer manifest_corpus.zip revlog_fuzzer revlog_corpus.zip dirstate_fuzzer dirstate_corpus.zip fm1readmarkers_fuzzer fm1readmarkers_corpus.zip
 
 .PHONY: all clean oss-fuzz
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/dirs.cc	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,56 @@
+#include <Python.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "pyutil.h"
+
+#include <string>
+
+extern "C" {
+
+static PyCodeObject *code;
+
+extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
+{
+	contrib::initpy(*argv[0]);
+	code = (PyCodeObject *)Py_CompileString(R"py(
+from parsers import dirs
+try:
+  files = mdata.split('\n')
+  d = dirs(files)
+  list(d)
+  'a' in d
+  if files:
+    files[0] in d
+except Exception as e:
+  pass
+  # uncomment this print if you're editing this Python code
+  # to debug failures.
+  # print e
+)py",
+	                                        "fuzzer", Py_file_input);
+	return 0;
+}
+
+int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
+{
+	// Don't allow fuzzer inputs larger than 100k, since we'll just bog
+	// down and not accomplish much.
+	if (Size > 100000) {
+		return 0;
+	}
+	PyObject *mtext =
+	    PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
+	PyObject *locals = PyDict_New();
+	PyDict_SetItemString(locals, "mdata", mtext);
+	PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals);
+	if (!res) {
+		PyErr_Print();
+	}
+	Py_XDECREF(res);
+	Py_DECREF(locals);
+	Py_DECREF(mtext);
+	return 0; // Non-zero return values are reserved for future use.
+}
+}
--- a/contrib/fuzz/dirstate_corpus.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/fuzz/dirstate_corpus.py	Mon Oct 21 11:09:48 2019 -0400
@@ -8,8 +8,7 @@
 ap.add_argument("out", metavar="some.zip", type=str, nargs=1)
 args = ap.parse_args()
 
-reporoot = os.path.normpath(os.path.join(os.path.dirname(__file__),
-                                         '..', '..'))
+reporoot = os.path.normpath(os.path.join(os.path.dirname(__file__), '..', '..'))
 dirstate = os.path.join(reporoot, '.hg', 'dirstate')
 
 with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
--- a/contrib/fuzz/fm1readmarkers_corpus.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/fuzz/fm1readmarkers_corpus.py	Mon Oct 21 11:09:48 2019 -0400
@@ -33,4 +33,6 @@
             'nhistedituserAugie Fackler <raf@durin42.com>\x00\x00\x00yA\xd7\x02'
             'MtA\xd4\xe1\x01,\x00\x00\x01\x03\x03"\xa5\xcb\x86\xb6\xf4\xbaO\xa0'
             'sH\xe7?\xcb\x9b\xc2n\xcfI\x9e\x14\xf0D\xf0!\x18DN\xcd\x97\x016\xa5'
-            '\xef\xa06\xcb\x884\x8a\x03\x01\t\x08\x04\x1fef14operationhisted'))
+            '\xef\xa06\xcb\x884\x8a\x03\x01\t\x08\x04\x1fef14operationhisted'
+        ),
+    )
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/fncache.cc	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,78 @@
+#include <Python.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "pyutil.h"
+
+#include <iostream>
+#include <string>
+
+extern "C" {
+
+static PyCodeObject *code;
+
+extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
+{
+	contrib::initpy(*argv[0]);
+	code = (PyCodeObject *)Py_CompileString(R"py(
+from parsers import (
+    isasciistr,
+    asciilower,
+    asciiupper,
+    encodedir,
+    pathencode,
+    lowerencode,
+)
+
+try:
+    for fn in (
+        isasciistr,
+        asciilower,
+        asciiupper,
+        encodedir,
+        pathencode,
+        lowerencode,
+    ):
+        try:
+            fn(data)
+        except UnicodeDecodeError:
+            pass  # some functions emit this exception
+        except AttributeError:
+            # pathencode needs hashlib, which fails to import because the time
+            # module fails to import. We should try and fix that some day, but
+            # for now we at least get coverage on non-hashencoded codepaths.
+            if fn != pathencode:
+                raise
+        # uncomment this for debugging exceptions
+        # except Exception as e:
+        #     raise Exception('%r: %r' % (fn, e))
+except Exception as e:
+    pass
+    # uncomment this print if you're editing this Python code
+    # to debug failures.
+    # print(e)
+)py",
+	                                        "fuzzer", Py_file_input);
+	if (!code) {
+		std::cerr << "failed to compile Python code!" << std::endl;
+	}
+	return 0;
+}
+
+int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
+{
+	PyObject *mtext =
+	    PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
+	PyObject *locals = PyDict_New();
+	PyDict_SetItemString(locals, "data", mtext);
+	PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals);
+	if (!res) {
+		PyErr_Print();
+	}
+	Py_XDECREF(res);
+	Py_DECREF(locals);
+	Py_DECREF(mtext);
+	return 0; // Non-zero return values are reserved for future use.
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/jsonescapeu8fast.cc	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,57 @@
+#include <Python.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "pyutil.h"
+
+#include <fuzzer/FuzzedDataProvider.h>
+#include <iostream>
+#include <string>
+
+extern "C" {
+
+static PyCodeObject *code;
+
+extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
+{
+	contrib::initpy(*argv[0]);
+	code = (PyCodeObject *)Py_CompileString(R"py(
+from parsers import jsonescapeu8fast
+
+try:
+    jsonescapeu8fast(data, paranoid)
+except Exception as e:
+    pass
+    # uncomment this print if you're editing this Python code
+    # to debug failures.
+    # print(e)
+)py",
+	                                        "fuzzer", Py_file_input);
+	if (!code) {
+		std::cerr << "failed to compile Python code!" << std::endl;
+	}
+	return 0;
+}
+
+int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
+{
+	FuzzedDataProvider provider(Data, Size);
+	bool paranoid = provider.ConsumeBool();
+	std::string remainder = provider.ConsumeRemainingBytesAsString();
+
+	PyObject *mtext = PyBytes_FromStringAndSize(
+	    (const char *)remainder.c_str(), remainder.size());
+	PyObject *locals = PyDict_New();
+	PyDict_SetItemString(locals, "data", mtext);
+	PyDict_SetItemString(locals, "paranoid", paranoid ? Py_True : Py_False);
+	PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals);
+	if (!res) {
+		PyErr_Print();
+	}
+	Py_XDECREF(res);
+	Py_DECREF(locals);
+	Py_DECREF(mtext);
+	return 0; // Non-zero return values are reserved for future use.
+}
+}
--- a/contrib/fuzz/manifest_corpus.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/fuzz/manifest_corpus.py	Mon Oct 21 11:09:48 2019 -0400
@@ -8,8 +8,9 @@
 args = ap.parse_args()
 
 with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
-    zf.writestr("manifest_zero",
-'''PKG-INFO\09b3ed8f2b81095a13064402e930565f083346e9a
+    zf.writestr(
+        "manifest_zero",
+        '''PKG-INFO\09b3ed8f2b81095a13064402e930565f083346e9a
 README\080b6e76643dcb44d4bc729e932fc464b3e36dbe3
 hg\0b6444347c629cc058d478023905cfb83b7f5bb9d
 mercurial/__init__.py\0b80de5d138758541c5f05265ad144ab9fa86d1db
@@ -22,9 +23,11 @@
 notes.txt\0703afcec5edb749cf5cec67831f554d6da13f2fb
 setup.py\0ccf3f6daf0f13101ca73631f7a1769e328b472c9
 tkmerge\03c922edb43a9c143682f7bc7b00f98b3c756ebe7
-''')
-    zf.writestr("badmanifest_shorthashes",
-                "narf\0aa\nnarf2\0aaa\n")
-    zf.writestr("badmanifest_nonull",
-                "narf\0cccccccccccccccccccccccccccccccccccccccc\n"
-                "narf2aaaaaaaaaaaaaaaaaaaa\n")
+''',
+    )
+    zf.writestr("badmanifest_shorthashes", "narf\0aa\nnarf2\0aaa\n")
+    zf.writestr(
+        "badmanifest_nonull",
+        "narf\0cccccccccccccccccccccccccccccccccccccccc\n"
+        "narf2aaaaaaaaaaaaaaaaaaaa\n",
+    )
--- a/contrib/fuzz/mpatch_corpus.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/fuzz/mpatch_corpus.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,6 +13,7 @@
 ap.add_argument("out", metavar="some.zip", type=str, nargs=1)
 args = ap.parse_args()
 
+
 class deltafrag(object):
     def __init__(self, start, end, data):
         self.start = start
@@ -20,8 +21,11 @@
         self.data = data
 
     def __str__(self):
-        return struct.pack(
-            ">lll", self.start, self.end, len(self.data)) + self.data
+        return (
+            struct.pack(">lll", self.start, self.end, len(self.data))
+            + self.data
+        )
+
 
 class delta(object):
     def __init__(self, frags):
@@ -30,8 +34,8 @@
     def __str__(self):
         return ''.join(str(f) for f in self.frags)
 
+
 class corpus(object):
-
     def __init__(self, base, deltas):
         self.base = base
         self.deltas = deltas
@@ -49,19 +53,19 @@
         )
         return "".join(parts)
 
+
 with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
     # Manually constructed entries
     zf.writestr(
-        "one_delta_applies",
-        str(corpus('a', [delta([deltafrag(0, 1, 'b')])]))
+        "one_delta_applies", str(corpus('a', [delta([deltafrag(0, 1, 'b')])]))
     )
     zf.writestr(
         "one_delta_starts_late",
-        str(corpus('a', [delta([deltafrag(3, 1, 'b')])]))
+        str(corpus('a', [delta([deltafrag(3, 1, 'b')])])),
     )
     zf.writestr(
         "one_delta_ends_late",
-        str(corpus('a', [delta([deltafrag(0, 20, 'b')])]))
+        str(corpus('a', [delta([deltafrag(0, 20, 'b')])])),
     )
 
     try:
@@ -70,9 +74,8 @@
         fl = r.file('mercurial/manifest.py')
         rl = getattr(fl, '_revlog', fl)
         bins = rl._chunks(rl._deltachain(10)[0])
-        zf.writestr('manifest_py_rev_10',
-                    str(corpus(bins[0], bins[1:])))
-    except: # skip this, so no re-raises
+        zf.writestr('manifest_py_rev_10', str(corpus(bins[0], bins[1:])))
+    except:  # skip this, so no re-raises
         print('skipping seed file from repo data')
     # Automatically discovered by running the fuzzer
     zf.writestr(
@@ -81,7 +84,8 @@
     # https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=8876
     zf.writestr(
         "mpatch_ossfuzz_getbe32_ubsan",
-        "\x02\x00\x00\x00\x0c    \xff\xff\xff\xff    ")
+        "\x02\x00\x00\x00\x0c    \xff\xff\xff\xff    ",
+    )
     zf.writestr(
         "mpatch_apply_over_memcpy",
         '\x13\x01\x00\x05\xd0\x00\x00\x00\x00\x00\x00\x00\x00\n \x00\x00\x00'
@@ -342,4 +346,5 @@
         '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
         '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00se\x00\x00'
         '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00')
+        '\x00\x00\x00\x00',
+    )
--- a/contrib/fuzz/revlog.cc	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/fuzz/revlog.cc	Mon Oct 21 11:09:48 2019 -0400
@@ -20,10 +20,15 @@
     try:
         index, cache = parse_index2(data, inline)
         index.slicechunktodensity(list(range(len(index))), 0.5, 262144)
+        index.stats()
+        index.findsnapshots({}, 0)
+        10 in index
         for rev in range(len(index)):
+            index.reachableroots(0, [len(index)-1], [rev])
             node = index[rev][7]
             partial = index.shortest(node)
             index.partialmatch(node[:partial])
+            index.deltachain(rev, None, True)
     except Exception as e:
         pass
         # uncomment this print if you're editing this Python code
--- a/contrib/fuzz/revlog_corpus.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/fuzz/revlog_corpus.py	Mon Oct 21 11:09:48 2019 -0400
@@ -8,13 +8,13 @@
 ap.add_argument("out", metavar="some.zip", type=str, nargs=1)
 args = ap.parse_args()
 
-reporoot = os.path.normpath(os.path.join(os.path.dirname(__file__),
-                                         '..', '..'))
+reporoot = os.path.normpath(os.path.join(os.path.dirname(__file__), '..', '..'))
 # typically a standalone index
 changelog = os.path.join(reporoot, '.hg', 'store', '00changelog.i')
 # an inline revlog with only a few revisions
 contributing = os.path.join(
-    reporoot, '.hg', 'store', 'data', 'contrib', 'fuzz', 'mpatch.cc.i')
+    reporoot, '.hg', 'store', 'data', 'contrib', 'fuzz', 'mpatch.cc.i'
+)
 
 print(changelog, os.path.exists(changelog))
 print(contributing, os.path.exists(contributing))
--- a/contrib/genosxversion.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/genosxversion.py	Mon Oct 21 11:09:48 2019 -0400
@@ -2,27 +2,30 @@
 from __future__ import absolute_import, print_function
 
 import argparse
-import json
 import os
 import subprocess
 import sys
 
 # Always load hg libraries from the hg we can find on $PATH.
-hglib = json.loads(subprocess.check_output(
-    ['hg', 'debuginstall', '-Tjson']))[0]['hgmodules']
+hglib = subprocess.check_output(['hg', 'debuginstall', '-T', '{hgmodules}'])
 sys.path.insert(0, os.path.dirname(hglib))
 
 from mercurial import util
 
 ap = argparse.ArgumentParser()
-ap.add_argument('--paranoid',
-                action='store_true',
-                help=("Be paranoid about how version numbers compare and "
-                      "produce something that's more likely to sort "
-                      "reasonably."))
+ap.add_argument(
+    '--paranoid',
+    action='store_true',
+    help=(
+        "Be paranoid about how version numbers compare and "
+        "produce something that's more likely to sort "
+        "reasonably."
+    ),
+)
 ap.add_argument('--selftest', action='store_true', help='Run self-tests.')
 ap.add_argument('versionfile', help='Path to a valid mercurial __version__.py')
 
+
 def paranoidver(ver):
     """Given an hg version produce something that distutils can sort.
 
@@ -109,22 +112,25 @@
         extra = ''
     return '%d.%d.%d%s' % (major, minor, micro, extra)
 
+
 def main(argv):
     opts = ap.parse_args(argv[1:])
     if opts.selftest:
         import doctest
+
         doctest.testmod()
         return
     with open(opts.versionfile) as f:
         for l in f:
             if l.startswith('version = b'):
                 # version number is entire line minus the quotes
-                ver = l[len('version = b') + 1:-2]
+                ver = l[len('version = b') + 1 : -2]
                 break
     if opts.paranoid:
         print(paranoidver(ver))
     else:
         print(ver)
 
+
 if __name__ == '__main__':
     main(sys.argv)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/grey.py	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,4094 @@
+# no-check-code because 3rd party
+import ast
+import asyncio
+from concurrent.futures import Executor, ProcessPoolExecutor
+from contextlib import contextmanager
+from datetime import datetime
+from enum import Enum
+from functools import lru_cache, partial, wraps
+import io
+import itertools
+import logging
+from multiprocessing import Manager, freeze_support
+import os
+from pathlib import Path
+import pickle
+import re
+import signal
+import sys
+import tempfile
+import tokenize
+import traceback
+from typing import (
+    Any,
+    Callable,
+    Collection,
+    Dict,
+    Generator,
+    Generic,
+    Iterable,
+    Iterator,
+    List,
+    Optional,
+    Pattern,
+    Sequence,
+    Set,
+    Tuple,
+    TypeVar,
+    Union,
+    cast,
+)
+
+from appdirs import user_cache_dir
+from attr import dataclass, evolve, Factory
+import click
+import toml
+from typed_ast import ast3, ast27
+
+# lib2to3 fork
+from blib2to3.pytree import Node, Leaf, type_repr
+from blib2to3 import pygram, pytree
+from blib2to3.pgen2 import driver, token
+from blib2to3.pgen2.grammar import Grammar
+from blib2to3.pgen2.parse import ParseError
+
+__version__ = '19.3b1.dev95+gdc1add6.d20191005'
+
+DEFAULT_LINE_LENGTH = 88
+DEFAULT_EXCLUDES = (
+    r"/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist)/"
+)
+DEFAULT_INCLUDES = r"\.pyi?$"
+CACHE_DIR = Path(user_cache_dir("black", version=__version__))
+
+
+# types
+FileContent = str
+Encoding = str
+NewLine = str
+Depth = int
+NodeType = int
+LeafID = int
+Priority = int
+Index = int
+LN = Union[Leaf, Node]
+SplitFunc = Callable[["Line", Collection["Feature"]], Iterator["Line"]]
+Timestamp = float
+FileSize = int
+CacheInfo = Tuple[Timestamp, FileSize]
+Cache = Dict[Path, CacheInfo]
+out = partial(click.secho, bold=True, err=True)
+err = partial(click.secho, fg="red", err=True)
+
+pygram.initialize(CACHE_DIR)
+syms = pygram.python_symbols
+
+
+class NothingChanged(UserWarning):
+    """Raised when reformatted code is the same as source."""
+
+
+class CannotSplit(Exception):
+    """A readable split that fits the allotted line length is impossible."""
+
+
+class InvalidInput(ValueError):
+    """Raised when input source code fails all parse attempts."""
+
+
+class WriteBack(Enum):
+    NO = 0
+    YES = 1
+    DIFF = 2
+    CHECK = 3
+
+    @classmethod
+    def from_configuration(cls, *, check: bool, diff: bool) -> "WriteBack":
+        if check and not diff:
+            return cls.CHECK
+
+        return cls.DIFF if diff else cls.YES
+
+
+class Changed(Enum):
+    NO = 0
+    CACHED = 1
+    YES = 2
+
+
+class TargetVersion(Enum):
+    PY27 = 2
+    PY33 = 3
+    PY34 = 4
+    PY35 = 5
+    PY36 = 6
+    PY37 = 7
+    PY38 = 8
+
+    def is_python2(self) -> bool:
+        return self is TargetVersion.PY27
+
+
+PY36_VERSIONS = {TargetVersion.PY36, TargetVersion.PY37, TargetVersion.PY38}
+
+
+class Feature(Enum):
+    # All string literals are unicode
+    UNICODE_LITERALS = 1
+    F_STRINGS = 2
+    NUMERIC_UNDERSCORES = 3
+    TRAILING_COMMA_IN_CALL = 4
+    TRAILING_COMMA_IN_DEF = 5
+    # The following two feature-flags are mutually exclusive, and exactly one should be
+    # set for every version of python.
+    ASYNC_IDENTIFIERS = 6
+    ASYNC_KEYWORDS = 7
+    ASSIGNMENT_EXPRESSIONS = 8
+    POS_ONLY_ARGUMENTS = 9
+
+
+VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = {
+    TargetVersion.PY27: {Feature.ASYNC_IDENTIFIERS},
+    TargetVersion.PY33: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS},
+    TargetVersion.PY34: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS},
+    TargetVersion.PY35: {
+        Feature.UNICODE_LITERALS,
+        Feature.TRAILING_COMMA_IN_CALL,
+        Feature.ASYNC_IDENTIFIERS,
+    },
+    TargetVersion.PY36: {
+        Feature.UNICODE_LITERALS,
+        Feature.F_STRINGS,
+        Feature.NUMERIC_UNDERSCORES,
+        Feature.TRAILING_COMMA_IN_CALL,
+        Feature.TRAILING_COMMA_IN_DEF,
+        Feature.ASYNC_IDENTIFIERS,
+    },
+    TargetVersion.PY37: {
+        Feature.UNICODE_LITERALS,
+        Feature.F_STRINGS,
+        Feature.NUMERIC_UNDERSCORES,
+        Feature.TRAILING_COMMA_IN_CALL,
+        Feature.TRAILING_COMMA_IN_DEF,
+        Feature.ASYNC_KEYWORDS,
+    },
+    TargetVersion.PY38: {
+        Feature.UNICODE_LITERALS,
+        Feature.F_STRINGS,
+        Feature.NUMERIC_UNDERSCORES,
+        Feature.TRAILING_COMMA_IN_CALL,
+        Feature.TRAILING_COMMA_IN_DEF,
+        Feature.ASYNC_KEYWORDS,
+        Feature.ASSIGNMENT_EXPRESSIONS,
+        Feature.POS_ONLY_ARGUMENTS,
+    },
+}
+
+
+@dataclass
+class FileMode:
+    target_versions: Set[TargetVersion] = Factory(set)
+    line_length: int = DEFAULT_LINE_LENGTH
+    string_normalization: bool = True
+    is_pyi: bool = False
+
+    def get_cache_key(self) -> str:
+        if self.target_versions:
+            version_str = ",".join(
+                str(version.value)
+                for version in sorted(self.target_versions, key=lambda v: v.value)
+            )
+        else:
+            version_str = "-"
+        parts = [
+            version_str,
+            str(self.line_length),
+            str(int(self.string_normalization)),
+            str(int(self.is_pyi)),
+        ]
+        return ".".join(parts)
+
+
+def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool:
+    return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
+
+
+def read_pyproject_toml(
+    ctx: click.Context, param: click.Parameter, value: Union[str, int, bool, None]
+) -> Optional[str]:
+    """Inject Black configuration from "pyproject.toml" into defaults in `ctx`.
+
+    Returns the path to a successfully found and read configuration file, None
+    otherwise.
+    """
+    assert not isinstance(value, (int, bool)), "Invalid parameter type passed"
+    if not value:
+        root = find_project_root(ctx.params.get("src", ()))
+        path = root / "pyproject.toml"
+        if path.is_file():
+            value = str(path)
+        else:
+            return None
+
+    try:
+        pyproject_toml = toml.load(value)
+        config = pyproject_toml.get("tool", {}).get("black", {})
+    except (toml.TomlDecodeError, OSError) as e:
+        raise click.FileError(
+            filename=value, hint=f"Error reading configuration file: {e}"
+        )
+
+    if not config:
+        return None
+
+    if ctx.default_map is None:
+        ctx.default_map = {}
+    ctx.default_map.update(  # type: ignore  # bad types in .pyi
+        {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
+    )
+    return value
+
+
+@click.command(context_settings=dict(help_option_names=["-h", "--help"]))
+@click.option("-c", "--code", type=str, help="Format the code passed in as a string.")
+@click.option(
+    "-l",
+    "--line-length",
+    type=int,
+    default=DEFAULT_LINE_LENGTH,
+    help="How many characters per line to allow.",
+    show_default=True,
+)
+@click.option(
+    "-t",
+    "--target-version",
+    type=click.Choice([v.name.lower() for v in TargetVersion]),
+    callback=lambda c, p, v: [TargetVersion[val.upper()] for val in v],
+    multiple=True,
+    help=(
+        "Python versions that should be supported by Black's output. [default: "
+        "per-file auto-detection]"
+    ),
+)
+@click.option(
+    "--py36",
+    is_flag=True,
+    help=(
+        "Allow using Python 3.6-only syntax on all input files.  This will put "
+        "trailing commas in function signatures and calls also after *args and "
+        "**kwargs. Deprecated; use --target-version instead. "
+        "[default: per-file auto-detection]"
+    ),
+)
+@click.option(
+    "--pyi",
+    is_flag=True,
+    help=(
+        "Format all input files like typing stubs regardless of file extension "
+        "(useful when piping source on standard input)."
+    ),
+)
+@click.option(
+    "-S",
+    "--skip-string-normalization",
+    is_flag=True,
+    help="Don't normalize string quotes or prefixes.",
+)
+@click.option(
+    "--check",
+    is_flag=True,
+    help=(
+        "Don't write the files back, just return the status.  Return code 0 "
+        "means nothing would change.  Return code 1 means some files would be "
+        "reformatted.  Return code 123 means there was an internal error."
+    ),
+)
+@click.option(
+    "--diff",
+    is_flag=True,
+    help="Don't write the files back, just output a diff for each file on stdout.",
+)
+@click.option(
+    "--fast/--safe",
+    is_flag=True,
+    help="If --fast given, skip temporary sanity checks. [default: --safe]",
+)
+@click.option(
+    "--include",
+    type=str,
+    default=DEFAULT_INCLUDES,
+    help=(
+        "A regular expression that matches files and directories that should be "
+        "included on recursive searches.  An empty value means all files are "
+        "included regardless of the name.  Use forward slashes for directories on "
+        "all platforms (Windows, too).  Exclusions are calculated first, inclusions "
+        "later."
+    ),
+    show_default=True,
+)
+@click.option(
+    "--exclude",
+    type=str,
+    default=DEFAULT_EXCLUDES,
+    help=(
+        "A regular expression that matches files and directories that should be "
+        "excluded on recursive searches.  An empty value means no paths are excluded. "
+        "Use forward slashes for directories on all platforms (Windows, too).  "
+        "Exclusions are calculated first, inclusions later."
+    ),
+    show_default=True,
+)
+@click.option(
+    "-q",
+    "--quiet",
+    is_flag=True,
+    help=(
+        "Don't emit non-error messages to stderr. Errors are still emitted; "
+        "silence those with 2>/dev/null."
+    ),
+)
+@click.option(
+    "-v",
+    "--verbose",
+    is_flag=True,
+    help=(
+        "Also emit messages to stderr about files that were not changed or were "
+        "ignored due to --exclude=."
+    ),
+)
+@click.version_option(version=__version__)
+@click.argument(
+    "src",
+    nargs=-1,
+    type=click.Path(
+        exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True
+    ),
+    is_eager=True,
+)
+@click.option(
+    "--config",
+    type=click.Path(
+        exists=False, file_okay=True, dir_okay=False, readable=True, allow_dash=False
+    ),
+    is_eager=True,
+    callback=read_pyproject_toml,
+    help="Read configuration from PATH.",
+)
+@click.pass_context
+def main(
+    ctx: click.Context,
+    code: Optional[str],
+    line_length: int,
+    target_version: List[TargetVersion],
+    check: bool,
+    diff: bool,
+    fast: bool,
+    pyi: bool,
+    py36: bool,
+    skip_string_normalization: bool,
+    quiet: bool,
+    verbose: bool,
+    include: str,
+    exclude: str,
+    src: Tuple[str],
+    config: Optional[str],
+) -> None:
+    """The uncompromising code formatter."""
+    write_back = WriteBack.from_configuration(check=check, diff=diff)
+    if target_version:
+        if py36:
+            err(f"Cannot use both --target-version and --py36")
+            ctx.exit(2)
+        else:
+            versions = set(target_version)
+    elif py36:
+        err(
+            "--py36 is deprecated and will be removed in a future version. "
+            "Use --target-version py36 instead."
+        )
+        versions = PY36_VERSIONS
+    else:
+        # We'll autodetect later.
+        versions = set()
+    mode = FileMode(
+        target_versions=versions,
+        line_length=line_length,
+        is_pyi=pyi,
+        string_normalization=not skip_string_normalization,
+    )
+    if config and verbose:
+        out(f"Using configuration from {config}.", bold=False, fg="blue")
+    if code is not None:
+        print(format_str(code, mode=mode))
+        ctx.exit(0)
+    try:
+        include_regex = re_compile_maybe_verbose(include)
+    except re.error:
+        err(f"Invalid regular expression for include given: {include!r}")
+        ctx.exit(2)
+    try:
+        exclude_regex = re_compile_maybe_verbose(exclude)
+    except re.error:
+        err(f"Invalid regular expression for exclude given: {exclude!r}")
+        ctx.exit(2)
+    report = Report(check=check, quiet=quiet, verbose=verbose)
+    root = find_project_root(src)
+    sources: Set[Path] = set()
+    path_empty(src, quiet, verbose, ctx)
+    for s in src:
+        p = Path(s)
+        if p.is_dir():
+            sources.update(
+                gen_python_files_in_dir(p, root, include_regex, exclude_regex, report)
+            )
+        elif p.is_file() or s == "-":
+            # if a file was explicitly given, we don't care about its extension
+            sources.add(p)
+        else:
+            err(f"invalid path: {s}")
+    if len(sources) == 0:
+        if verbose or not quiet:
+            out("No Python files are present to be formatted. Nothing to do 😴")
+        ctx.exit(0)
+
+    if len(sources) == 1:
+        reformat_one(
+            src=sources.pop(),
+            fast=fast,
+            write_back=write_back,
+            mode=mode,
+            report=report,
+        )
+    else:
+        reformat_many(
+            sources=sources, fast=fast, write_back=write_back, mode=mode, report=report
+        )
+
+    if verbose or not quiet:
+        out("Oh no! 💥 💔 💥" if report.return_code else "All done! ✨ 🍰 ✨")
+        click.secho(str(report), err=True)
+    ctx.exit(report.return_code)
+
+
+def path_empty(src: Tuple[str], quiet: bool, verbose: bool, ctx: click.Context) -> None:
+    """
+    Exit if there is no `src` provided for formatting
+    """
+    if not src:
+        if verbose or not quiet:
+            out("No Path provided. Nothing to do 😴")
+            ctx.exit(0)
+
+
+def reformat_one(
+    src: Path, fast: bool, write_back: WriteBack, mode: FileMode, report: "Report"
+) -> None:
+    """Reformat a single file under `src` without spawning child processes.
+
+    `fast`, `write_back`, and `mode` options are passed to
+    :func:`format_file_in_place` or :func:`format_stdin_to_stdout`.
+    """
+    try:
+        changed = Changed.NO
+        if not src.is_file() and str(src) == "-":
+            if format_stdin_to_stdout(fast=fast, write_back=write_back, mode=mode):
+                changed = Changed.YES
+        else:
+            cache: Cache = {}
+            if write_back != WriteBack.DIFF:
+                cache = read_cache(mode)
+                res_src = src.resolve()
+                if res_src in cache and cache[res_src] == get_cache_info(res_src):
+                    changed = Changed.CACHED
+            if changed is not Changed.CACHED and format_file_in_place(
+                src, fast=fast, write_back=write_back, mode=mode
+            ):
+                changed = Changed.YES
+            if (write_back is WriteBack.YES and changed is not Changed.CACHED) or (
+                write_back is WriteBack.CHECK and changed is Changed.NO
+            ):
+                write_cache(cache, [src], mode)
+        report.done(src, changed)
+    except Exception as exc:
+        report.failed(src, str(exc))
+
+
+def reformat_many(
+    sources: Set[Path],
+    fast: bool,
+    write_back: WriteBack,
+    mode: FileMode,
+    report: "Report",
+) -> None:
+    """Reformat multiple files using a ProcessPoolExecutor."""
+    loop = asyncio.get_event_loop()
+    worker_count = os.cpu_count()
+    if sys.platform == "win32":
+        # Work around https://bugs.python.org/issue26903
+        worker_count = min(worker_count, 61)
+    executor = ProcessPoolExecutor(max_workers=worker_count)
+    try:
+        loop.run_until_complete(
+            schedule_formatting(
+                sources=sources,
+                fast=fast,
+                write_back=write_back,
+                mode=mode,
+                report=report,
+                loop=loop,
+                executor=executor,
+            )
+        )
+    finally:
+        shutdown(loop)
+        executor.shutdown()
+
+
+async def schedule_formatting(
+    sources: Set[Path],
+    fast: bool,
+    write_back: WriteBack,
+    mode: FileMode,
+    report: "Report",
+    loop: asyncio.AbstractEventLoop,
+    executor: Executor,
+) -> None:
+    """Run formatting of `sources` in parallel using the provided `executor`.
+
+    (Use ProcessPoolExecutors for actual parallelism.)
+
+    `write_back`, `fast`, and `mode` options are passed to
+    :func:`format_file_in_place`.
+    """
+    cache: Cache = {}
+    if write_back != WriteBack.DIFF:
+        cache = read_cache(mode)
+        sources, cached = filter_cached(cache, sources)
+        for src in sorted(cached):
+            report.done(src, Changed.CACHED)
+    if not sources:
+        return
+
+    cancelled = []
+    sources_to_cache = []
+    lock = None
+    if write_back == WriteBack.DIFF:
+        # For diff output, we need locks to ensure we don't interleave output
+        # from different processes.
+        manager = Manager()
+        lock = manager.Lock()
+    tasks = {
+        asyncio.ensure_future(
+            loop.run_in_executor(
+                executor, format_file_in_place, src, fast, mode, write_back, lock
+            )
+        ): src
+        for src in sorted(sources)
+    }
+    pending: Iterable[asyncio.Future] = tasks.keys()
+    try:
+        loop.add_signal_handler(signal.SIGINT, cancel, pending)
+        loop.add_signal_handler(signal.SIGTERM, cancel, pending)
+    except NotImplementedError:
+        # There are no good alternatives for these on Windows.
+        pass
+    while pending:
+        done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
+        for task in done:
+            src = tasks.pop(task)
+            if task.cancelled():
+                cancelled.append(task)
+            elif task.exception():
+                report.failed(src, str(task.exception()))
+            else:
+                changed = Changed.YES if task.result() else Changed.NO
+                # If the file was written back or was successfully checked as
+                # well-formatted, store this information in the cache.
+                if write_back is WriteBack.YES or (
+                    write_back is WriteBack.CHECK and changed is Changed.NO
+                ):
+                    sources_to_cache.append(src)
+                report.done(src, changed)
+    if cancelled:
+        await asyncio.gather(*cancelled, loop=loop, return_exceptions=True)
+    if sources_to_cache:
+        write_cache(cache, sources_to_cache, mode)
+
+
+def format_file_in_place(
+    src: Path,
+    fast: bool,
+    mode: FileMode,
+    write_back: WriteBack = WriteBack.NO,
+    lock: Any = None,  # multiprocessing.Manager().Lock() is some crazy proxy
+) -> bool:
+    """Format file under `src` path. Return True if changed.
+
+    If `write_back` is DIFF, write a diff to stdout. If it is YES, write reformatted
+    code to the file.
+    `mode` and `fast` options are passed to :func:`format_file_contents`.
+    """
+    if src.suffix == ".pyi":
+        mode = evolve(mode, is_pyi=True)
+
+    then = datetime.utcfromtimestamp(src.stat().st_mtime)
+    with open(src, "rb") as buf:
+        src_contents, encoding, newline = decode_bytes(buf.read())
+    try:
+        dst_contents = format_file_contents(src_contents, fast=fast, mode=mode)
+    except NothingChanged:
+        return False
+
+    if write_back == write_back.YES:
+        with open(src, "w", encoding=encoding, newline=newline) as f:
+            f.write(dst_contents)
+    elif write_back == write_back.DIFF:
+        now = datetime.utcnow()
+        src_name = f"{src}\t{then} +0000"
+        dst_name = f"{src}\t{now} +0000"
+        diff_contents = diff(src_contents, dst_contents, src_name, dst_name)
+
+        with lock or nullcontext():
+            f = io.TextIOWrapper(
+                sys.stdout.buffer,
+                encoding=encoding,
+                newline=newline,
+                write_through=True,
+            )
+            f.write(diff_contents)
+            f.detach()
+
+    return True
+
+
+def format_stdin_to_stdout(
+    fast: bool, *, write_back: WriteBack = WriteBack.NO, mode: FileMode
+) -> bool:
+    """Format file on stdin. Return True if changed.
+
+    If `write_back` is YES, write reformatted code back to stdout. If it is DIFF,
+    write a diff to stdout. The `mode` argument is passed to
+    :func:`format_file_contents`.
+    """
+    then = datetime.utcnow()
+    src, encoding, newline = decode_bytes(sys.stdin.buffer.read())
+    dst = src
+    try:
+        dst = format_file_contents(src, fast=fast, mode=mode)
+        return True
+
+    except NothingChanged:
+        return False
+
+    finally:
+        f = io.TextIOWrapper(
+            sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True
+        )
+        if write_back == WriteBack.YES:
+            f.write(dst)
+        elif write_back == WriteBack.DIFF:
+            now = datetime.utcnow()
+            src_name = f"STDIN\t{then} +0000"
+            dst_name = f"STDOUT\t{now} +0000"
+            f.write(diff(src, dst, src_name, dst_name))
+        f.detach()
+
+
+def format_file_contents(
+    src_contents: str, *, fast: bool, mode: FileMode
+) -> FileContent:
+    """Reformat contents a file and return new contents.
+
+    If `fast` is False, additionally confirm that the reformatted code is
+    valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it.
+    `mode` is passed to :func:`format_str`.
+    """
+    if src_contents.strip() == "":
+        raise NothingChanged
+
+    dst_contents = format_str(src_contents, mode=mode)
+    if src_contents == dst_contents:
+        raise NothingChanged
+
+    if not fast:
+        assert_equivalent(src_contents, dst_contents)
+        assert_stable(src_contents, dst_contents, mode=mode)
+    return dst_contents
+
+
+def format_str(src_contents: str, *, mode: FileMode) -> FileContent:
+    """Reformat a string and return new contents.
+
+    `mode` determines formatting options, such as how many characters per line are
+    allowed.
+    """
+    src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions)
+    dst_contents = []
+    future_imports = get_future_imports(src_node)
+    if mode.target_versions:
+        versions = mode.target_versions
+    else:
+        versions = detect_target_versions(src_node)
+    normalize_fmt_off(src_node)
+    lines = LineGenerator(
+        remove_u_prefix="unicode_literals" in future_imports
+        or supports_feature(versions, Feature.UNICODE_LITERALS),
+        is_pyi=mode.is_pyi,
+        normalize_strings=mode.string_normalization,
+    )
+    elt = EmptyLineTracker(is_pyi=mode.is_pyi)
+    empty_line = Line()
+    after = 0
+    split_line_features = {
+        feature
+        for feature in {Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF}
+        if supports_feature(versions, feature)
+    }
+    for current_line in lines.visit(src_node):
+        for _ in range(after):
+            dst_contents.append(str(empty_line))
+        before, after = elt.maybe_empty_lines(current_line)
+        for _ in range(before):
+            dst_contents.append(str(empty_line))
+        for line in split_line(
+            current_line, line_length=mode.line_length, features=split_line_features
+        ):
+            dst_contents.append(str(line))
+    return "".join(dst_contents)
+
+
+def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]:
+    """Return a tuple of (decoded_contents, encoding, newline).
+
+    `newline` is either CRLF or LF but `decoded_contents` is decoded with
+    universal newlines (i.e. only contains LF).
+    """
+    srcbuf = io.BytesIO(src)
+    encoding, lines = tokenize.detect_encoding(srcbuf.readline)
+    if not lines:
+        return "", encoding, "\n"
+
+    newline = "\r\n" if b"\r\n" == lines[0][-2:] else "\n"
+    srcbuf.seek(0)
+    with io.TextIOWrapper(srcbuf, encoding) as tiow:
+        return tiow.read(), encoding, newline
+
+
+def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:
+    if not target_versions:
+        # No target_version specified, so try all grammars.
+        return [
+            # Python 3.7+
+            pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords,
+            # Python 3.0-3.6
+            pygram.python_grammar_no_print_statement_no_exec_statement,
+            # Python 2.7 with future print_function import
+            pygram.python_grammar_no_print_statement,
+            # Python 2.7
+            pygram.python_grammar,
+        ]
+    elif all(version.is_python2() for version in target_versions):
+        # Python 2-only code, so try Python 2 grammars.
+        return [
+            # Python 2.7 with future print_function import
+            pygram.python_grammar_no_print_statement,
+            # Python 2.7
+            pygram.python_grammar,
+        ]
+    else:
+        # Python 3-compatible code, so only try Python 3 grammar.
+        grammars = []
+        # If we have to parse both, try to parse async as a keyword first
+        if not supports_feature(target_versions, Feature.ASYNC_IDENTIFIERS):
+            # Python 3.7+
+            grammars.append(
+                pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords  # noqa: B950
+            )
+        if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS):
+            # Python 3.0-3.6
+            grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement)
+        # At least one of the above branches must have been taken, because every Python
+        # version has exactly one of the two 'ASYNC_*' flags
+        return grammars
+
+
+def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node:
+    """Given a string with source, return the lib2to3 Node."""
+    if src_txt[-1:] != "\n":
+        src_txt += "\n"
+
+    for grammar in get_grammars(set(target_versions)):
+        drv = driver.Driver(grammar, pytree.convert)
+        try:
+            result = drv.parse_string(src_txt, True)
+            break
+
+        except ParseError as pe:
+            lineno, column = pe.context[1]
+            lines = src_txt.splitlines()
+            try:
+                faulty_line = lines[lineno - 1]
+            except IndexError:
+                faulty_line = "<line number missing in source>"
+            exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}")
+    else:
+        raise exc from None
+
+    if isinstance(result, Leaf):
+        result = Node(syms.file_input, [result])
+    return result
+
+
+def lib2to3_unparse(node: Node) -> str:
+    """Given a lib2to3 node, return its string representation."""
+    code = str(node)
+    return code
+
+
+T = TypeVar("T")
+
+
+class Visitor(Generic[T]):
+    """Basic lib2to3 visitor that yields things of type `T` on `visit()`."""
+
+    def visit(self, node: LN) -> Iterator[T]:
+        """Main method to visit `node` and its children.
+
+        It tries to find a `visit_*()` method for the given `node.type`, like
+        `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects.
+        If no dedicated `visit_*()` method is found, chooses `visit_default()`
+        instead.
+
+        Then yields objects of type `T` from the selected visitor.
+        """
+        if node.type < 256:
+            name = token.tok_name[node.type]
+        else:
+            name = type_repr(node.type)
+        yield from getattr(self, f"visit_{name}", self.visit_default)(node)
+
+    def visit_default(self, node: LN) -> Iterator[T]:
+        """Default `visit_*()` implementation. Recurses to children of `node`."""
+        if isinstance(node, Node):
+            for child in node.children:
+                yield from self.visit(child)
+
+
+@dataclass
+class DebugVisitor(Visitor[T]):
+    tree_depth: int = 0
+
+    def visit_default(self, node: LN) -> Iterator[T]:
+        indent = " " * (2 * self.tree_depth)
+        if isinstance(node, Node):
+            _type = type_repr(node.type)
+            out(f"{indent}{_type}", fg="yellow")
+            self.tree_depth += 1
+            for child in node.children:
+                yield from self.visit(child)
+
+            self.tree_depth -= 1
+            out(f"{indent}/{_type}", fg="yellow", bold=False)
+        else:
+            _type = token.tok_name.get(node.type, str(node.type))
+            out(f"{indent}{_type}", fg="blue", nl=False)
+            if node.prefix:
+                # We don't have to handle prefixes for `Node` objects since
+                # that delegates to the first child anyway.
+                out(f" {node.prefix!r}", fg="green", bold=False, nl=False)
+            out(f" {node.value!r}", fg="blue", bold=False)
+
+    @classmethod
+    def show(cls, code: Union[str, Leaf, Node]) -> None:
+        """Pretty-print the lib2to3 AST of a given string of `code`.
+
+        Convenience method for debugging.
+        """
+        v: DebugVisitor[None] = DebugVisitor()
+        if isinstance(code, str):
+            code = lib2to3_parse(code)
+        list(v.visit(code))
+
+
+WHITESPACE = {token.DEDENT, token.INDENT, token.NEWLINE}
+STATEMENT = {
+    syms.if_stmt,
+    syms.while_stmt,
+    syms.for_stmt,
+    syms.try_stmt,
+    syms.except_clause,
+    syms.with_stmt,
+    syms.funcdef,
+    syms.classdef,
+}
+STANDALONE_COMMENT = 153
+token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT"
+LOGIC_OPERATORS = {"and", "or"}
+COMPARATORS = {
+    token.LESS,
+    token.GREATER,
+    token.EQEQUAL,
+    token.NOTEQUAL,
+    token.LESSEQUAL,
+    token.GREATEREQUAL,
+}
+MATH_OPERATORS = {
+    token.VBAR,
+    token.CIRCUMFLEX,
+    token.AMPER,
+    token.LEFTSHIFT,
+    token.RIGHTSHIFT,
+    token.PLUS,
+    token.MINUS,
+    token.STAR,
+    token.SLASH,
+    token.DOUBLESLASH,
+    token.PERCENT,
+    token.AT,
+    token.TILDE,
+    token.DOUBLESTAR,
+}
+STARS = {token.STAR, token.DOUBLESTAR}
+VARARGS_SPECIALS = STARS | {token.SLASH}
+VARARGS_PARENTS = {
+    syms.arglist,
+    syms.argument,  # double star in arglist
+    syms.trailer,  # single argument to call
+    syms.typedargslist,
+    syms.varargslist,  # lambdas
+}
+UNPACKING_PARENTS = {
+    syms.atom,  # single element of a list or set literal
+    syms.dictsetmaker,
+    syms.listmaker,
+    syms.testlist_gexp,
+    syms.testlist_star_expr,
+}
+TEST_DESCENDANTS = {
+    syms.test,
+    syms.lambdef,
+    syms.or_test,
+    syms.and_test,
+    syms.not_test,
+    syms.comparison,
+    syms.star_expr,
+    syms.expr,
+    syms.xor_expr,
+    syms.and_expr,
+    syms.shift_expr,
+    syms.arith_expr,
+    syms.trailer,
+    syms.term,
+    syms.power,
+}
+ASSIGNMENTS = {
+    "=",
+    "+=",
+    "-=",
+    "*=",
+    "@=",
+    "/=",
+    "%=",
+    "&=",
+    "|=",
+    "^=",
+    "<<=",
+    ">>=",
+    "**=",
+    "//=",
+}
+COMPREHENSION_PRIORITY = 20
+COMMA_PRIORITY = 18
+TERNARY_PRIORITY = 16
+LOGIC_PRIORITY = 14
+STRING_PRIORITY = 12
+COMPARATOR_PRIORITY = 10
+MATH_PRIORITIES = {
+    token.VBAR: 9,
+    token.CIRCUMFLEX: 8,
+    token.AMPER: 7,
+    token.LEFTSHIFT: 6,
+    token.RIGHTSHIFT: 6,
+    token.PLUS: 5,
+    token.MINUS: 5,
+    token.STAR: 4,
+    token.SLASH: 4,
+    token.DOUBLESLASH: 4,
+    token.PERCENT: 4,
+    token.AT: 4,
+    token.TILDE: 3,
+    token.DOUBLESTAR: 2,
+}
+DOT_PRIORITY = 1
+
+
+@dataclass
+class BracketTracker:
+    """Keeps track of brackets on a line."""
+
+    depth: int = 0
+    bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = Factory(dict)
+    delimiters: Dict[LeafID, Priority] = Factory(dict)
+    previous: Optional[Leaf] = None
+    _for_loop_depths: List[int] = Factory(list)
+    _lambda_argument_depths: List[int] = Factory(list)
+
+    def mark(self, leaf: Leaf) -> None:
+        """Mark `leaf` with bracket-related metadata. Keep track of delimiters.
+
+        All leaves receive an int `bracket_depth` field that stores how deep
+        within brackets a given leaf is. 0 means there are no enclosing brackets
+        that started on this line.
+
+        If a leaf is itself a closing bracket, it receives an `opening_bracket`
+        field that it forms a pair with. This is a one-directional link to
+        avoid reference cycles.
+
+        If a leaf is a delimiter (a token on which Black can split the line if
+        needed) and it's on depth 0, its `id()` is stored in the tracker's
+        `delimiters` field.
+        """
+        if leaf.type == token.COMMENT:
+            return
+
+        self.maybe_decrement_after_for_loop_variable(leaf)
+        self.maybe_decrement_after_lambda_arguments(leaf)
+        if leaf.type in CLOSING_BRACKETS:
+            self.depth -= 1
+            opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
+            leaf.opening_bracket = opening_bracket
+        leaf.bracket_depth = self.depth
+        if self.depth == 0:
+            delim = is_split_before_delimiter(leaf, self.previous)
+            if delim and self.previous is not None:
+                self.delimiters[id(self.previous)] = delim
+            else:
+                delim = is_split_after_delimiter(leaf, self.previous)
+                if delim:
+                    self.delimiters[id(leaf)] = delim
+        if leaf.type in OPENING_BRACKETS:
+            self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
+            self.depth += 1
+        self.previous = leaf
+        self.maybe_increment_lambda_arguments(leaf)
+        self.maybe_increment_for_loop_variable(leaf)
+
+    def any_open_brackets(self) -> bool:
+        """Return True if there is an yet unmatched open bracket on the line."""
+        return bool(self.bracket_match)
+
+    def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority:
+        """Return the highest priority of a delimiter found on the line.
+
+        Values are consistent with what `is_split_*_delimiter()` return.
+        Raises ValueError on no delimiters.
+        """
+        return max(v for k, v in self.delimiters.items() if k not in exclude)
+
+    def delimiter_count_with_priority(self, priority: Priority = 0) -> int:
+        """Return the number of delimiters with the given `priority`.
+
+        If no `priority` is passed, defaults to max priority on the line.
+        """
+        if not self.delimiters:
+            return 0
+
+        priority = priority or self.max_delimiter_priority()
+        return sum(1 for p in self.delimiters.values() if p == priority)
+
+    def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool:
+        """In a for loop, or comprehension, the variables are often unpacks.
+
+        To avoid splitting on the comma in this situation, increase the depth of
+        tokens between `for` and `in`.
+        """
+        if leaf.type == token.NAME and leaf.value == "for":
+            self.depth += 1
+            self._for_loop_depths.append(self.depth)
+            return True
+
+        return False
+
+    def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool:
+        """See `maybe_increment_for_loop_variable` above for explanation."""
+        if (
+            self._for_loop_depths
+            and self._for_loop_depths[-1] == self.depth
+            and leaf.type == token.NAME
+            and leaf.value == "in"
+        ):
+            self.depth -= 1
+            self._for_loop_depths.pop()
+            return True
+
+        return False
+
+    def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool:
+        """In a lambda expression, there might be more than one argument.
+
+        To avoid splitting on the comma in this situation, increase the depth of
+        tokens between `lambda` and `:`.
+        """
+        if leaf.type == token.NAME and leaf.value == "lambda":
+            self.depth += 1
+            self._lambda_argument_depths.append(self.depth)
+            return True
+
+        return False
+
+    def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool:
+        """See `maybe_increment_lambda_arguments` above for explanation."""
+        if (
+            self._lambda_argument_depths
+            and self._lambda_argument_depths[-1] == self.depth
+            and leaf.type == token.COLON
+        ):
+            self.depth -= 1
+            self._lambda_argument_depths.pop()
+            return True
+
+        return False
+
+    def get_open_lsqb(self) -> Optional[Leaf]:
+        """Return the most recent opening square bracket (if any)."""
+        return self.bracket_match.get((self.depth - 1, token.RSQB))
+
+
+@dataclass
+class Line:
+    """Holds leaves and comments. Can be printed with `str(line)`."""
+
+    depth: int = 0
+    leaves: List[Leaf] = Factory(list)
+    comments: Dict[LeafID, List[Leaf]] = Factory(dict)  # keys ordered like `leaves`
+    bracket_tracker: BracketTracker = Factory(BracketTracker)
+    inside_brackets: bool = False
+    should_explode: bool = False
+
+    def append(self, leaf: Leaf, preformatted: bool = False) -> None:
+        """Add a new `leaf` to the end of the line.
+
+        Unless `preformatted` is True, the `leaf` will receive a new consistent
+        whitespace prefix and metadata applied by :class:`BracketTracker`.
+        Trailing commas are maybe removed, unpacked for loop variables are
+        demoted from being delimiters.
+
+        Inline comments are put aside.
+        """
+        has_value = leaf.type in BRACKETS or bool(leaf.value.strip())
+        if not has_value:
+            return
+
+        if token.COLON == leaf.type and self.is_class_paren_empty:
+            del self.leaves[-2:]
+        if self.leaves and not preformatted:
+            # Note: at this point leaf.prefix should be empty except for
+            # imports, for which we only preserve newlines.
+            leaf.prefix += whitespace(
+                leaf, complex_subscript=self.is_complex_subscript(leaf)
+            )
+        if self.inside_brackets or not preformatted:
+            self.bracket_tracker.mark(leaf)
+            self.maybe_remove_trailing_comma(leaf)
+        if not self.append_comment(leaf):
+            self.leaves.append(leaf)
+
+    def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None:
+        """Like :func:`append()` but disallow invalid standalone comment structure.
+
+        Raises ValueError when any `leaf` is appended after a standalone comment
+        or when a standalone comment is not the first leaf on the line.
+        """
+        if self.bracket_tracker.depth == 0:
+            if self.is_comment:
+                raise ValueError("cannot append to standalone comments")
+
+            if self.leaves and leaf.type == STANDALONE_COMMENT:
+                raise ValueError(
+                    "cannot append standalone comments to a populated line"
+                )
+
+        self.append(leaf, preformatted=preformatted)
+
+    @property
+    def is_comment(self) -> bool:
+        """Is this line a standalone comment?"""
+        return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT
+
+    @property
+    def is_decorator(self) -> bool:
+        """Is this line a decorator?"""
+        return bool(self) and self.leaves[0].type == token.AT
+
+    @property
+    def is_import(self) -> bool:
+        """Is this an import line?"""
+        return bool(self) and is_import(self.leaves[0])
+
+    @property
+    def is_class(self) -> bool:
+        """Is this line a class definition?"""
+        return (
+            bool(self)
+            and self.leaves[0].type == token.NAME
+            and self.leaves[0].value == "class"
+        )
+
+    @property
+    def is_stub_class(self) -> bool:
+        """Is this line a class definition with a body consisting only of "..."?"""
+        return self.is_class and self.leaves[-3:] == [
+            Leaf(token.DOT, ".") for _ in range(3)
+        ]
+
+    @property
+    def is_collection_with_optional_trailing_comma(self) -> bool:
+        """Is this line a collection literal with a trailing comma that's optional?
+
+        Note that the trailing comma in a 1-tuple is not optional.
+        """
+        if not self.leaves or len(self.leaves) < 4:
+            return False
+        # Look for and address a trailing colon.
+        if self.leaves[-1].type == token.COLON:
+            closer = self.leaves[-2]
+            close_index = -2
+        else:
+            closer = self.leaves[-1]
+            close_index = -1
+        if closer.type not in CLOSING_BRACKETS or self.inside_brackets:
+            return False
+        if closer.type == token.RPAR:
+            # Tuples require an extra check, because if there's only
+            # one element in the tuple removing the comma unmakes the
+            # tuple.
+            #
+            # We also check for parens before looking for the trailing
+            # comma because in some cases (eg assigning a dict
+            # literal) the literal gets wrapped in temporary parens
+            # during parsing. This case is covered by the
+            # collections.py test data.
+            opener = closer.opening_bracket
+            for _open_index, leaf in enumerate(self.leaves):
+                if leaf is opener:
+                    break
+            else:
+                # Couldn't find the matching opening paren, play it safe.
+                return False
+            commas = 0
+            comma_depth = self.leaves[close_index - 1].bracket_depth
+            for leaf in self.leaves[_open_index + 1 : close_index]:
+                if leaf.bracket_depth == comma_depth and leaf.type == token.COMMA:
+                    commas += 1
+            if commas > 1:
+                # We haven't looked yet for the trailing comma because
+                # we might also have caught noop parens.
+                return self.leaves[close_index - 1].type == token.COMMA
+            elif commas == 1:
+                return False  # it's either a one-tuple or didn't have a trailing comma
+            if self.leaves[close_index - 1].type in CLOSING_BRACKETS:
+                close_index -= 1
+                closer = self.leaves[close_index]
+                if closer.type == token.RPAR:
+                    # TODO: this is a gut feeling. Will we ever see this?
+                    return False
+        if self.leaves[close_index - 1].type != token.COMMA:
+            return False
+        return True
+
+    @property
+    def is_def(self) -> bool:
+        """Is this a function definition? (Also returns True for async defs.)"""
+        try:
+            first_leaf = self.leaves[0]
+        except IndexError:
+            return False
+
+        try:
+            second_leaf: Optional[Leaf] = self.leaves[1]
+        except IndexError:
+            second_leaf = None
+        return (first_leaf.type == token.NAME and first_leaf.value == "def") or (
+            first_leaf.type == token.ASYNC
+            and second_leaf is not None
+            and second_leaf.type == token.NAME
+            and second_leaf.value == "def"
+        )
+
+    @property
+    def is_class_paren_empty(self) -> bool:
+        """Is this a class with no base classes but using parentheses?
+
+        Those are unnecessary and should be removed.
+        """
+        return (
+            bool(self)
+            and len(self.leaves) == 4
+            and self.is_class
+            and self.leaves[2].type == token.LPAR
+            and self.leaves[2].value == "("
+            and self.leaves[3].type == token.RPAR
+            and self.leaves[3].value == ")"
+        )
+
+    @property
+    def is_triple_quoted_string(self) -> bool:
+        """Is the line a triple quoted string?"""
+        return (
+            bool(self)
+            and self.leaves[0].type == token.STRING
+            and self.leaves[0].value.startswith(('"""', "'''"))
+        )
+
+    def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool:
+        """If so, needs to be split before emitting."""
+        for leaf in self.leaves:
+            if leaf.type == STANDALONE_COMMENT:
+                if leaf.bracket_depth <= depth_limit:
+                    return True
+        return False
+
+    def contains_uncollapsable_type_comments(self) -> bool:
+        ignored_ids = set()
+        try:
+            last_leaf = self.leaves[-1]
+            ignored_ids.add(id(last_leaf))
+            if last_leaf.type == token.COMMA or (
+                last_leaf.type == token.RPAR and not last_leaf.value
+            ):
+                # When trailing commas or optional parens are inserted by Black for
+                # consistency, comments after the previous last element are not moved
+                # (they don't have to, rendering will still be correct).  So we ignore
+                # trailing commas and invisible.
+                last_leaf = self.leaves[-2]
+                ignored_ids.add(id(last_leaf))
+        except IndexError:
+            return False
+
+        # A type comment is uncollapsable if it is attached to a leaf
+        # that isn't at the end of the line (since that could cause it
+        # to get associated to a different argument) or if there are
+        # comments before it (since that could cause it to get hidden
+        # behind a comment.
+        comment_seen = False
+        for leaf_id, comments in self.comments.items():
+            for comment in comments:
+                if is_type_comment(comment):
+                    if leaf_id not in ignored_ids or comment_seen:
+                        return True
+
+            comment_seen = True
+
+        return False
+
+    def contains_unsplittable_type_ignore(self) -> bool:
+        if not self.leaves:
+            return False
+
+        # If a 'type: ignore' is attached to the end of a line, we
+        # can't split the line, because we can't know which of the
+        # subexpressions the ignore was meant to apply to.
+        #
+        # We only want this to apply to actual physical lines from the
+        # original source, though: we don't want the presence of a
+        # 'type: ignore' at the end of a multiline expression to
+        # justify pushing it all onto one line. Thus we
+        # (unfortunately) need to check the actual source lines and
+        # only report an unsplittable 'type: ignore' if this line was
+        # one line in the original code.
+        if self.leaves[0].lineno == self.leaves[-1].lineno:
+            for comment in self.comments.get(id(self.leaves[-1]), []):
+                if is_type_comment(comment, " ignore"):
+                    return True
+
+        return False
+
+    def contains_multiline_strings(self) -> bool:
+        for leaf in self.leaves:
+            if is_multiline_string(leaf):
+                return True
+
+        return False
+
+    def maybe_remove_trailing_comma(self, closing: Leaf) -> bool:
+        """Remove trailing comma if there is one and it's safe."""
+        if not (self.leaves and self.leaves[-1].type == token.COMMA):
+            return False
+        # We remove trailing commas only in the case of importing a
+        # single name from a module.
+        if not (
+            self.leaves
+            and self.is_import
+            and len(self.leaves) > 4
+            and self.leaves[-1].type == token.COMMA
+            and closing.type in CLOSING_BRACKETS
+            and self.leaves[-4].type == token.NAME
+            and (
+                # regular `from foo import bar,`
+                self.leaves[-4].value == "import"
+                # `from foo import (bar as baz,)
+                or (
+                    len(self.leaves) > 6
+                    and self.leaves[-6].value == "import"
+                    and self.leaves[-3].value == "as"
+                )
+                # `from foo import bar as baz,`
+                or (
+                    len(self.leaves) > 5
+                    and self.leaves[-5].value == "import"
+                    and self.leaves[-3].value == "as"
+                )
+            )
+            and closing.type == token.RPAR
+        ):
+            return False
+
+        self.remove_trailing_comma()
+        return True
+
+    def append_comment(self, comment: Leaf) -> bool:
+        """Add an inline or standalone comment to the line."""
+        if (
+            comment.type == STANDALONE_COMMENT
+            and self.bracket_tracker.any_open_brackets()
+        ):
+            comment.prefix = ""
+            return False
+
+        if comment.type != token.COMMENT:
+            return False
+
+        if not self.leaves:
+            comment.type = STANDALONE_COMMENT
+            comment.prefix = ""
+            return False
+
+        last_leaf = self.leaves[-1]
+        if (
+            last_leaf.type == token.RPAR
+            and not last_leaf.value
+            and last_leaf.parent
+            and len(list(last_leaf.parent.leaves())) <= 3
+            and not is_type_comment(comment)
+        ):
+            # Comments on an optional parens wrapping a single leaf should belong to
+            # the wrapped node except if it's a type comment. Pinning the comment like
+            # this avoids unstable formatting caused by comment migration.
+            if len(self.leaves) < 2:
+                comment.type = STANDALONE_COMMENT
+                comment.prefix = ""
+                return False
+            last_leaf = self.leaves[-2]
+        self.comments.setdefault(id(last_leaf), []).append(comment)
+        return True
+
+    def comments_after(self, leaf: Leaf) -> List[Leaf]:
+        """Generate comments that should appear directly after `leaf`."""
+        return self.comments.get(id(leaf), [])
+
+    def remove_trailing_comma(self) -> None:
+        """Remove the trailing comma and moves the comments attached to it."""
+        trailing_comma = self.leaves.pop()
+        trailing_comma_comments = self.comments.pop(id(trailing_comma), [])
+        self.comments.setdefault(id(self.leaves[-1]), []).extend(
+            trailing_comma_comments
+        )
+
+    def is_complex_subscript(self, leaf: Leaf) -> bool:
+        """Return True iff `leaf` is part of a slice with non-trivial exprs."""
+        open_lsqb = self.bracket_tracker.get_open_lsqb()
+        if open_lsqb is None:
+            return False
+
+        subscript_start = open_lsqb.next_sibling
+
+        if isinstance(subscript_start, Node):
+            if subscript_start.type == syms.listmaker:
+                return False
+
+            if subscript_start.type == syms.subscriptlist:
+                subscript_start = child_towards(subscript_start, leaf)
+        return subscript_start is not None and any(
+            n.type in TEST_DESCENDANTS for n in subscript_start.pre_order()
+        )
+
+    def __str__(self) -> str:
+        """Render the line."""
+        if not self:
+            return "\n"
+
+        indent = "    " * self.depth
+        leaves = iter(self.leaves)
+        first = next(leaves)
+        res = f"{first.prefix}{indent}{first.value}"
+        for leaf in leaves:
+            res += str(leaf)
+        for comment in itertools.chain.from_iterable(self.comments.values()):
+            res += str(comment)
+        return res + "\n"
+
+    def __bool__(self) -> bool:
+        """Return True if the line has leaves or comments."""
+        return bool(self.leaves or self.comments)
+
+
+@dataclass
+class EmptyLineTracker:
+    """Provides a stateful method that returns the number of potential extra
+    empty lines needed before and after the currently processed line.
+
+    Note: this tracker works on lines that haven't been split yet.  It assumes
+    the prefix of the first leaf consists of optional newlines.  Those newlines
+    are consumed by `maybe_empty_lines()` and included in the computation.
+    """
+
+    is_pyi: bool = False
+    previous_line: Optional[Line] = None
+    previous_after: int = 0
+    previous_defs: List[int] = Factory(list)
+
+    def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
+        """Return the number of extra empty lines before and after the `current_line`.
+
+        This is for separating `def`, `async def` and `class` with extra empty
+        lines (two on module-level).
+        """
+        before, after = self._maybe_empty_lines(current_line)
+        before = (
+            # Black should not insert empty lines at the beginning
+            # of the file
+            0
+            if self.previous_line is None
+            else before - self.previous_after
+        )
+        self.previous_after = after
+        self.previous_line = current_line
+        return before, after
+
+    def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
+        max_allowed = 1
+        if current_line.depth == 0:
+            max_allowed = 1 if self.is_pyi else 2
+        if current_line.leaves:
+            # Consume the first leaf's extra newlines.
+            first_leaf = current_line.leaves[0]
+            before = first_leaf.prefix.count("\n")
+            before = min(before, max_allowed)
+            first_leaf.prefix = ""
+        else:
+            before = 0
+        depth = current_line.depth
+        while self.previous_defs and self.previous_defs[-1] >= depth:
+            self.previous_defs.pop()
+            if self.is_pyi:
+                before = 0 if depth else 1
+            else:
+                before = 1 if depth else 2
+        if current_line.is_decorator or current_line.is_def or current_line.is_class:
+            return self._maybe_empty_lines_for_class_or_def(current_line, before)
+
+        if (
+            self.previous_line
+            and self.previous_line.is_import
+            and not current_line.is_import
+            and depth == self.previous_line.depth
+        ):
+            return (before or 1), 0
+
+        if (
+            self.previous_line
+            and self.previous_line.is_class
+            and current_line.is_triple_quoted_string
+        ):
+            return before, 1
+
+        return before, 0
+
+    def _maybe_empty_lines_for_class_or_def(
+        self, current_line: Line, before: int
+    ) -> Tuple[int, int]:
+        if not current_line.is_decorator:
+            self.previous_defs.append(current_line.depth)
+        if self.previous_line is None:
+            # Don't insert empty lines before the first line in the file.
+            return 0, 0
+
+        if self.previous_line.is_decorator:
+            return 0, 0
+
+        if self.previous_line.depth < current_line.depth and (
+            self.previous_line.is_class or self.previous_line.is_def
+        ):
+            return 0, 0
+
+        if (
+            self.previous_line.is_comment
+            and self.previous_line.depth == current_line.depth
+            and before == 0
+        ):
+            return 0, 0
+
+        if self.is_pyi:
+            if self.previous_line.depth > current_line.depth:
+                newlines = 1
+            elif current_line.is_class or self.previous_line.is_class:
+                if current_line.is_stub_class and self.previous_line.is_stub_class:
+                    # No blank line between classes with an empty body
+                    newlines = 0
+                else:
+                    newlines = 1
+            elif current_line.is_def and not self.previous_line.is_def:
+                # Blank line between a block of functions and a block of non-functions
+                newlines = 1
+            else:
+                newlines = 0
+        else:
+            newlines = 2
+        if current_line.depth and newlines:
+            newlines -= 1
+        return newlines, 0
+
+
+@dataclass
+class LineGenerator(Visitor[Line]):
+    """Generates reformatted Line objects.  Empty lines are not emitted.
+
+    Note: destroys the tree it's visiting by mutating prefixes of its leaves
+    in ways that will no longer stringify to valid Python code on the tree.
+    """
+
+    is_pyi: bool = False
+    normalize_strings: bool = True
+    current_line: Line = Factory(Line)
+    remove_u_prefix: bool = False
+
+    def line(self, indent: int = 0) -> Iterator[Line]:
+        """Generate a line.
+
+        If the line is empty, only emit if it makes sense.
+        If the line is too long, split it first and then generate.
+
+        If any lines were generated, set up a new current_line.
+        """
+        if not self.current_line:
+            self.current_line.depth += indent
+            return  # Line is empty, don't emit. Creating a new one unnecessary.
+
+        complete_line = self.current_line
+        self.current_line = Line(depth=complete_line.depth + indent)
+        yield complete_line
+
+    def visit_default(self, node: LN) -> Iterator[Line]:
+        """Default `visit_*()` implementation. Recurses to children of `node`."""
+        if isinstance(node, Leaf):
+            any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
+            for comment in generate_comments(node):
+                if any_open_brackets:
+                    # any comment within brackets is subject to splitting
+                    self.current_line.append(comment)
+                elif comment.type == token.COMMENT:
+                    # regular trailing comment
+                    self.current_line.append(comment)
+                    yield from self.line()
+
+                else:
+                    # regular standalone comment
+                    yield from self.line()
+
+                    self.current_line.append(comment)
+                    yield from self.line()
+
+            normalize_prefix(node, inside_brackets=any_open_brackets)
+            if self.normalize_strings and node.type == token.STRING:
+                normalize_string_prefix(node, remove_u_prefix=self.remove_u_prefix)
+                normalize_string_quotes(node)
+            if node.type == token.NUMBER:
+                normalize_numeric_literal(node)
+            if node.type not in WHITESPACE:
+                self.current_line.append(node)
+        yield from super().visit_default(node)
+
+    def visit_atom(self, node: Node) -> Iterator[Line]:
+        # Always make parentheses invisible around a single node, because it should
+        # not be needed (except in the case of yield, where removing the parentheses
+        # produces a SyntaxError).
+        if (
+            len(node.children) == 3
+            and isinstance(node.children[0], Leaf)
+            and node.children[0].type == token.LPAR
+            and isinstance(node.children[2], Leaf)
+            and node.children[2].type == token.RPAR
+            and isinstance(node.children[1], Leaf)
+            and not (
+                node.children[1].type == token.NAME
+                and node.children[1].value == "yield"
+            )
+        ):
+            node.children[0].value = ""
+            node.children[2].value = ""
+        yield from super().visit_default(node)
+
+    def visit_factor(self, node: Node) -> Iterator[Line]:
+        """Force parentheses between a unary op and a binary power:
+
+        -2 ** 8 -> -(2 ** 8)
+        """
+        child = node.children[1]
+        if child.type == syms.power and len(child.children) == 3:
+            lpar = Leaf(token.LPAR, "(")
+            rpar = Leaf(token.RPAR, ")")
+            index = child.remove() or 0
+            node.insert_child(index, Node(syms.atom, [lpar, child, rpar]))
+        yield from self.visit_default(node)
+
+    def visit_INDENT(self, node: Node) -> Iterator[Line]:
+        """Increase indentation level, maybe yield a line."""
+        # In blib2to3 INDENT never holds comments.
+        yield from self.line(+1)
+        yield from self.visit_default(node)
+
+    def visit_DEDENT(self, node: Node) -> Iterator[Line]:
+        """Decrease indentation level, maybe yield a line."""
+        # The current line might still wait for trailing comments.  At DEDENT time
+        # there won't be any (they would be prefixes on the preceding NEWLINE).
+        # Emit the line then.
+        yield from self.line()
+
+        # While DEDENT has no value, its prefix may contain standalone comments
+        # that belong to the current indentation level.  Get 'em.
+        yield from self.visit_default(node)
+
+        # Finally, emit the dedent.
+        yield from self.line(-1)
+
+    def visit_stmt(
+        self, node: Node, keywords: Set[str], parens: Set[str]
+    ) -> Iterator[Line]:
+        """Visit a statement.
+
+        This implementation is shared for `if`, `while`, `for`, `try`, `except`,
+        `def`, `with`, `class`, `assert` and assignments.
+
+        The relevant Python language `keywords` for a given statement will be
+        NAME leaves within it. This methods puts those on a separate line.
+
+        `parens` holds a set of string leaf values immediately after which
+        invisible parens should be put.
+        """
+        normalize_invisible_parens(node, parens_after=parens)
+        for child in node.children:
+            if child.type == token.NAME and child.value in keywords:  # type: ignore
+                yield from self.line()
+
+            yield from self.visit(child)
+
+    def visit_suite(self, node: Node) -> Iterator[Line]:
+        """Visit a suite."""
+        if self.is_pyi and is_stub_suite(node):
+            yield from self.visit(node.children[2])
+        else:
+            yield from self.visit_default(node)
+
+    def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
+        """Visit a statement without nested statements."""
+        is_suite_like = node.parent and node.parent.type in STATEMENT
+        if is_suite_like:
+            if self.is_pyi and is_stub_body(node):
+                yield from self.visit_default(node)
+            else:
+                yield from self.line(+1)
+                yield from self.visit_default(node)
+                yield from self.line(-1)
+
+        else:
+            if not self.is_pyi or not node.parent or not is_stub_suite(node.parent):
+                yield from self.line()
+            yield from self.visit_default(node)
+
+    def visit_async_stmt(self, node: Node) -> Iterator[Line]:
+        """Visit `async def`, `async for`, `async with`."""
+        yield from self.line()
+
+        children = iter(node.children)
+        for child in children:
+            yield from self.visit(child)
+
+            if child.type == token.ASYNC:
+                break
+
+        internal_stmt = next(children)
+        for child in internal_stmt.children:
+            yield from self.visit(child)
+
+    def visit_decorators(self, node: Node) -> Iterator[Line]:
+        """Visit decorators."""
+        for child in node.children:
+            yield from self.line()
+            yield from self.visit(child)
+
+    def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
+        """Remove a semicolon and put the other statement on a separate line."""
+        yield from self.line()
+
+    def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
+        """End of file. Process outstanding comments and end with a newline."""
+        yield from self.visit_default(leaf)
+        yield from self.line()
+
+    def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]:
+        if not self.current_line.bracket_tracker.any_open_brackets():
+            yield from self.line()
+        yield from self.visit_default(leaf)
+
+    def __attrs_post_init__(self) -> None:
+        """You are in a twisty little maze of passages."""
+        v = self.visit_stmt
+        Ø: Set[str] = set()
+        self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
+        self.visit_if_stmt = partial(
+            v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
+        )
+        self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
+        self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
+        self.visit_try_stmt = partial(
+            v, keywords={"try", "except", "else", "finally"}, parens=Ø
+        )
+        self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø)
+        self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø)
+        self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø)
+        self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
+        self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
+        self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
+        self.visit_import_from = partial(v, keywords=Ø, parens={"import"})
+        self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"})
+        self.visit_async_funcdef = self.visit_async_stmt
+        self.visit_decorated = self.visit_decorators
+
+
+IMPLICIT_TUPLE = {syms.testlist, syms.testlist_star_expr, syms.exprlist}
+BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE}
+OPENING_BRACKETS = set(BRACKET.keys())
+CLOSING_BRACKETS = set(BRACKET.values())
+BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS
+ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT}
+
+
+def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str:  # noqa: C901
+    """Return whitespace prefix if needed for the given `leaf`.
+
+    `complex_subscript` signals whether the given leaf is part of a subscription
+    which has non-trivial arguments, like arithmetic expressions or function calls.
+    """
+    NO = ""
+    SPACE = " "
+    DOUBLESPACE = "  "
+    t = leaf.type
+    p = leaf.parent
+    v = leaf.value
+    if t in ALWAYS_NO_SPACE:
+        return NO
+
+    if t == token.COMMENT:
+        return DOUBLESPACE
+
+    assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
+    if t == token.COLON and p.type not in {
+        syms.subscript,
+        syms.subscriptlist,
+        syms.sliceop,
+    }:
+        return NO
+
+    prev = leaf.prev_sibling
+    if not prev:
+        prevp = preceding_leaf(p)
+        if not prevp or prevp.type in OPENING_BRACKETS:
+            return NO
+
+        if t == token.COLON:
+            if prevp.type == token.COLON:
+                return NO
+
+            elif prevp.type != token.COMMA and not complex_subscript:
+                return NO
+
+            return SPACE
+
+        if prevp.type == token.EQUAL:
+            if prevp.parent:
+                if prevp.parent.type in {
+                    syms.arglist,
+                    syms.argument,
+                    syms.parameters,
+                    syms.varargslist,
+                }:
+                    return NO
+
+                elif prevp.parent.type == syms.typedargslist:
+                    # A bit hacky: if the equal sign has whitespace, it means we
+                    # previously found it's a typed argument.  So, we're using
+                    # that, too.
+                    return prevp.prefix
+
+        elif prevp.type in VARARGS_SPECIALS:
+            if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS):
+                return NO
+
+        elif prevp.type == token.COLON:
+            if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}:
+                return SPACE if complex_subscript else NO
+
+        elif (
+            prevp.parent
+            and prevp.parent.type == syms.factor
+            and prevp.type in MATH_OPERATORS
+        ):
+            return NO
+
+        elif (
+            prevp.type == token.RIGHTSHIFT
+            and prevp.parent
+            and prevp.parent.type == syms.shift_expr
+            and prevp.prev_sibling
+            and prevp.prev_sibling.type == token.NAME
+            and prevp.prev_sibling.value == "print"  # type: ignore
+        ):
+            # Python 2 print chevron
+            return NO
+
+    elif prev.type in OPENING_BRACKETS:
+        return NO
+
+    if p.type in {syms.parameters, syms.arglist}:
+        # untyped function signatures or calls
+        if not prev or prev.type != token.COMMA:
+            return NO
+
+    elif p.type == syms.varargslist:
+        # lambdas
+        if prev and prev.type != token.COMMA:
+            return NO
+
+    elif p.type == syms.typedargslist:
+        # typed function signatures
+        if not prev:
+            return NO
+
+        if t == token.EQUAL:
+            if prev.type != syms.tname:
+                return NO
+
+        elif prev.type == token.EQUAL:
+            # A bit hacky: if the equal sign has whitespace, it means we
+            # previously found it's a typed argument.  So, we're using that, too.
+            return prev.prefix
+
+        elif prev.type != token.COMMA:
+            return NO
+
+    elif p.type == syms.tname:
+        # type names
+        if not prev:
+            prevp = preceding_leaf(p)
+            if not prevp or prevp.type != token.COMMA:
+                return NO
+
+    elif p.type == syms.trailer:
+        # attributes and calls
+        if t == token.LPAR or t == token.RPAR:
+            return NO
+
+        if not prev:
+            if t == token.DOT:
+                prevp = preceding_leaf(p)
+                if not prevp or prevp.type != token.NUMBER:
+                    return NO
+
+            elif t == token.LSQB:
+                return NO
+
+        elif prev.type != token.COMMA:
+            return NO
+
+    elif p.type == syms.argument:
+        # single argument
+        if t == token.EQUAL:
+            return NO
+
+        if not prev:
+            prevp = preceding_leaf(p)
+            if not prevp or prevp.type == token.LPAR:
+                return NO
+
+        elif prev.type in {token.EQUAL} | VARARGS_SPECIALS:
+            return NO
+
+    elif p.type == syms.decorator:
+        # decorators
+        return NO
+
+    elif p.type == syms.dotted_name:
+        if prev:
+            return NO
+
+        prevp = preceding_leaf(p)
+        if not prevp or prevp.type == token.AT or prevp.type == token.DOT:
+            return NO
+
+    elif p.type == syms.classdef:
+        if t == token.LPAR:
+            return NO
+
+        if prev and prev.type == token.LPAR:
+            return NO
+
+    elif p.type in {syms.subscript, syms.sliceop}:
+        # indexing
+        if not prev:
+            assert p.parent is not None, "subscripts are always parented"
+            if p.parent.type == syms.subscriptlist:
+                return SPACE
+
+            return NO
+
+        elif not complex_subscript:
+            return NO
+
+    elif p.type == syms.atom:
+        if prev and t == token.DOT:
+            # dots, but not the first one.
+            return NO
+
+    elif p.type == syms.dictsetmaker:
+        # dict unpacking
+        if prev and prev.type == token.DOUBLESTAR:
+            return NO
+
+    elif p.type in {syms.factor, syms.star_expr}:
+        # unary ops
+        if not prev:
+            prevp = preceding_leaf(p)
+            if not prevp or prevp.type in OPENING_BRACKETS:
+                return NO
+
+            prevp_parent = prevp.parent
+            assert prevp_parent is not None
+            if prevp.type == token.COLON and prevp_parent.type in {
+                syms.subscript,
+                syms.sliceop,
+            }:
+                return NO
+
+            elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
+                return NO
+
+        elif t in {token.NAME, token.NUMBER, token.STRING}:
+            return NO
+
+    elif p.type == syms.import_from:
+        if t == token.DOT:
+            if prev and prev.type == token.DOT:
+                return NO
+
+        elif t == token.NAME:
+            if v == "import":
+                return SPACE
+
+            if prev and prev.type == token.DOT:
+                return NO
+
+    elif p.type == syms.sliceop:
+        return NO
+
+    return SPACE
+
+
+def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
+    """Return the first leaf that precedes `node`, if any."""
+    while node:
+        res = node.prev_sibling
+        if res:
+            if isinstance(res, Leaf):
+                return res
+
+            try:
+                return list(res.leaves())[-1]
+
+            except IndexError:
+                return None
+
+        node = node.parent
+    return None
+
+
+def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]:
+    """Return the child of `ancestor` that contains `descendant`."""
+    node: Optional[LN] = descendant
+    while node and node.parent != ancestor:
+        node = node.parent
+    return node
+
+
+def container_of(leaf: Leaf) -> LN:
+    """Return `leaf` or one of its ancestors that is the topmost container of it.
+
+    By "container" we mean a node where `leaf` is the very first child.
+    """
+    same_prefix = leaf.prefix
+    container: LN = leaf
+    while container:
+        parent = container.parent
+        if parent is None:
+            break
+
+        if parent.children[0].prefix != same_prefix:
+            break
+
+        if parent.type == syms.file_input:
+            break
+
+        if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS:
+            break
+
+        container = parent
+    return container
+
+
+def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
+    """Return the priority of the `leaf` delimiter, given a line break after it.
+
+    The delimiter priorities returned here are from those delimiters that would
+    cause a line break after themselves.
+
+    Higher numbers are higher priority.
+    """
+    if leaf.type == token.COMMA:
+        return COMMA_PRIORITY
+
+    return 0
+
+
+def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
+    """Return the priority of the `leaf` delimiter, given a line break before it.
+
+    The delimiter priorities returned here are from those delimiters that would
+    cause a line break before themselves.
+
+    Higher numbers are higher priority.
+    """
+    if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS):
+        # * and ** might also be MATH_OPERATORS but in this case they are not.
+        # Don't treat them as a delimiter.
+        return 0
+
+    if (
+        leaf.type == token.DOT
+        and leaf.parent
+        and leaf.parent.type not in {syms.import_from, syms.dotted_name}
+        and (previous is None or previous.type in CLOSING_BRACKETS)
+    ):
+        return DOT_PRIORITY
+
+    if (
+        leaf.type in MATH_OPERATORS
+        and leaf.parent
+        and leaf.parent.type not in {syms.factor, syms.star_expr}
+    ):
+        return MATH_PRIORITIES[leaf.type]
+
+    if leaf.type in COMPARATORS:
+        return COMPARATOR_PRIORITY
+
+    if (
+        leaf.type == token.STRING
+        and previous is not None
+        and previous.type == token.STRING
+    ):
+        return STRING_PRIORITY
+
+    if leaf.type not in {token.NAME, token.ASYNC}:
+        return 0
+
+    if (
+        leaf.value == "for"
+        and leaf.parent
+        and leaf.parent.type in {syms.comp_for, syms.old_comp_for}
+        or leaf.type == token.ASYNC
+    ):
+        if (
+            not isinstance(leaf.prev_sibling, Leaf)
+            or leaf.prev_sibling.value != "async"
+        ):
+            return COMPREHENSION_PRIORITY
+
+    if (
+        leaf.value == "if"
+        and leaf.parent
+        and leaf.parent.type in {syms.comp_if, syms.old_comp_if}
+    ):
+        return COMPREHENSION_PRIORITY
+
+    if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test:
+        return TERNARY_PRIORITY
+
+    if leaf.value == "is":
+        return COMPARATOR_PRIORITY
+
+    if (
+        leaf.value == "in"
+        and leaf.parent
+        and leaf.parent.type in {syms.comp_op, syms.comparison}
+        and not (
+            previous is not None
+            and previous.type == token.NAME
+            and previous.value == "not"
+        )
+    ):
+        return COMPARATOR_PRIORITY
+
+    if (
+        leaf.value == "not"
+        and leaf.parent
+        and leaf.parent.type == syms.comp_op
+        and not (
+            previous is not None
+            and previous.type == token.NAME
+            and previous.value == "is"
+        )
+    ):
+        return COMPARATOR_PRIORITY
+
+    if leaf.value in LOGIC_OPERATORS and leaf.parent:
+        return LOGIC_PRIORITY
+
+    return 0
+
+
+FMT_OFF = {"# fmt: off", "# fmt:off", "# yapf: disable"}
+FMT_ON = {"# fmt: on", "# fmt:on", "# yapf: enable"}
+
+
+def generate_comments(leaf: LN) -> Iterator[Leaf]:
+    """Clean the prefix of the `leaf` and generate comments from it, if any.
+
+    Comments in lib2to3 are shoved into the whitespace prefix.  This happens
+    in `pgen2/driver.py:Driver.parse_tokens()`.  This was a brilliant implementation
+    move because it does away with modifying the grammar to include all the
+    possible places in which comments can be placed.
+
+    The sad consequence for us though is that comments don't "belong" anywhere.
+    This is why this function generates simple parentless Leaf objects for
+    comments.  We simply don't know what the correct parent should be.
+
+    No matter though, we can live without this.  We really only need to
+    differentiate between inline and standalone comments.  The latter don't
+    share the line with any code.
+
+    Inline comments are emitted as regular token.COMMENT leaves.  Standalone
+    are emitted with a fake STANDALONE_COMMENT token identifier.
+    """
+    for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER):
+        yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines)
+
+
+@dataclass
+class ProtoComment:
+    """Describes a piece of syntax that is a comment.
+
+    It's not a :class:`blib2to3.pytree.Leaf` so that:
+
+    * it can be cached (`Leaf` objects should not be reused more than once as
+      they store their lineno, column, prefix, and parent information);
+    * `newlines` and `consumed` fields are kept separate from the `value`. This
+      simplifies handling of special marker comments like ``# fmt: off/on``.
+    """
+
+    type: int  # token.COMMENT or STANDALONE_COMMENT
+    value: str  # content of the comment
+    newlines: int  # how many newlines before the comment
+    consumed: int  # how many characters of the original leaf's prefix did we consume
+
+
+@lru_cache(maxsize=4096)
+def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]:
+    """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
+    result: List[ProtoComment] = []
+    if not prefix or "#" not in prefix:
+        return result
+
+    consumed = 0
+    nlines = 0
+    ignored_lines = 0
+    for index, line in enumerate(prefix.split("\n")):
+        consumed += len(line) + 1  # adding the length of the split '\n'
+        line = line.lstrip()
+        if not line:
+            nlines += 1
+        if not line.startswith("#"):
+            # Escaped newlines outside of a comment are not really newlines at
+            # all. We treat a single-line comment following an escaped newline
+            # as a simple trailing comment.
+            if line.endswith("\\"):
+                ignored_lines += 1
+            continue
+
+        if index == ignored_lines and not is_endmarker:
+            comment_type = token.COMMENT  # simple trailing comment
+        else:
+            comment_type = STANDALONE_COMMENT
+        comment = make_comment(line)
+        result.append(
+            ProtoComment(
+                type=comment_type, value=comment, newlines=nlines, consumed=consumed
+            )
+        )
+        nlines = 0
+    return result
+
+
+def make_comment(content: str) -> str:
+    """Return a consistently formatted comment from the given `content` string.
+
+    All comments (except for "##", "#!", "#:", '#'", "#%%") should have a single
+    space between the hash sign and the content.
+
+    If `content` didn't start with a hash sign, one is provided.
+    """
+    content = content.rstrip()
+    if not content:
+        return "#"
+
+    if content[0] == "#":
+        content = content[1:]
+    if content and content[0] not in " !:#'%":
+        content = " " + content
+    return "#" + content
+
+
+def split_line(
+    line: Line,
+    line_length: int,
+    inner: bool = False,
+    features: Collection[Feature] = (),
+) -> Iterator[Line]:
+    """Split a `line` into potentially many lines.
+
+    They should fit in the allotted `line_length` but might not be able to.
+    `inner` signifies that there were a pair of brackets somewhere around the
+    current `line`, possibly transitively. This means we can fallback to splitting
+    by delimiters if the LHS/RHS don't yield any results.
+
+    `features` are syntactical features that may be used in the output.
+    """
+    if line.is_comment:
+        yield line
+        return
+
+    line_str = str(line).strip("\n")
+
+    if (
+        not line.contains_uncollapsable_type_comments()
+        and not line.should_explode
+        and not line.is_collection_with_optional_trailing_comma
+        and (
+            is_line_short_enough(line, line_length=line_length, line_str=line_str)
+            or line.contains_unsplittable_type_ignore()
+        )
+    ):
+        yield line
+        return
+
+    split_funcs: List[SplitFunc]
+    if line.is_def:
+        split_funcs = [left_hand_split]
+    else:
+
+        def rhs(line: Line, features: Collection[Feature]) -> Iterator[Line]:
+            for omit in generate_trailers_to_omit(line, line_length):
+                lines = list(right_hand_split(line, line_length, features, omit=omit))
+                if is_line_short_enough(lines[0], line_length=line_length):
+                    yield from lines
+                    return
+
+            # All splits failed, best effort split with no omits.
+            # This mostly happens to multiline strings that are by definition
+            # reported as not fitting a single line.
+            yield from right_hand_split(line, line_length, features=features)
+
+        if line.inside_brackets:
+            split_funcs = [delimiter_split, standalone_comment_split, rhs]
+        else:
+            split_funcs = [rhs]
+    for split_func in split_funcs:
+        # We are accumulating lines in `result` because we might want to abort
+        # mission and return the original line in the end, or attempt a different
+        # split altogether.
+        result: List[Line] = []
+        try:
+            for l in split_func(line, features):
+                if str(l).strip("\n") == line_str:
+                    raise CannotSplit("Split function returned an unchanged result")
+
+                result.extend(
+                    split_line(
+                        l, line_length=line_length, inner=True, features=features
+                    )
+                )
+        except CannotSplit:
+            continue
+
+        else:
+            yield from result
+            break
+
+    else:
+        yield line
+
+
+def left_hand_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
+    """Split line into many lines, starting with the first matching bracket pair.
+
+    Note: this usually looks weird, only use this for function definitions.
+    Prefer RHS otherwise.  This is why this function is not symmetrical with
+    :func:`right_hand_split` which also handles optional parentheses.
+    """
+    tail_leaves: List[Leaf] = []
+    body_leaves: List[Leaf] = []
+    head_leaves: List[Leaf] = []
+    current_leaves = head_leaves
+    matching_bracket = None
+    for leaf in line.leaves:
+        if (
+            current_leaves is body_leaves
+            and leaf.type in CLOSING_BRACKETS
+            and leaf.opening_bracket is matching_bracket
+        ):
+            current_leaves = tail_leaves if body_leaves else head_leaves
+        current_leaves.append(leaf)
+        if current_leaves is head_leaves:
+            if leaf.type in OPENING_BRACKETS:
+                matching_bracket = leaf
+                current_leaves = body_leaves
+    if not matching_bracket:
+        raise CannotSplit("No brackets found")
+
+    head = bracket_split_build_line(head_leaves, line, matching_bracket)
+    body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True)
+    tail = bracket_split_build_line(tail_leaves, line, matching_bracket)
+    bracket_split_succeeded_or_raise(head, body, tail)
+    for result in (head, body, tail):
+        if result:
+            yield result
+
+
+def right_hand_split(
+    line: Line,
+    line_length: int,
+    features: Collection[Feature] = (),
+    omit: Collection[LeafID] = (),
+) -> Iterator[Line]:
+    """Split line into many lines, starting with the last matching bracket pair.
+
+    If the split was by optional parentheses, attempt splitting without them, too.
+    `omit` is a collection of closing bracket IDs that shouldn't be considered for
+    this split.
+
+    Note: running this function modifies `bracket_depth` on the leaves of `line`.
+    """
+    tail_leaves: List[Leaf] = []
+    body_leaves: List[Leaf] = []
+    head_leaves: List[Leaf] = []
+    current_leaves = tail_leaves
+    opening_bracket = None
+    closing_bracket = None
+    for leaf in reversed(line.leaves):
+        if current_leaves is body_leaves:
+            if leaf is opening_bracket:
+                current_leaves = head_leaves if body_leaves else tail_leaves
+        current_leaves.append(leaf)
+        if current_leaves is tail_leaves:
+            if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit:
+                opening_bracket = leaf.opening_bracket
+                closing_bracket = leaf
+                current_leaves = body_leaves
+    if not (opening_bracket and closing_bracket and head_leaves):
+        # If there is no opening or closing_bracket that means the split failed and
+        # all content is in the tail.  Otherwise, if `head_leaves` are empty, it means
+        # the matching `opening_bracket` wasn't available on `line` anymore.
+        raise CannotSplit("No brackets found")
+
+    tail_leaves.reverse()
+    body_leaves.reverse()
+    head_leaves.reverse()
+    head = bracket_split_build_line(head_leaves, line, opening_bracket)
+    body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True)
+    tail = bracket_split_build_line(tail_leaves, line, opening_bracket)
+    bracket_split_succeeded_or_raise(head, body, tail)
+    if (
+        # the body shouldn't be exploded
+        not body.should_explode
+        # the opening bracket is an optional paren
+        and opening_bracket.type == token.LPAR
+        and not opening_bracket.value
+        # the closing bracket is an optional paren
+        and closing_bracket.type == token.RPAR
+        and not closing_bracket.value
+        # it's not an import (optional parens are the only thing we can split on
+        # in this case; attempting a split without them is a waste of time)
+        and not line.is_import
+        # there are no standalone comments in the body
+        and not body.contains_standalone_comments(0)
+        # and we can actually remove the parens
+        and can_omit_invisible_parens(body, line_length)
+    ):
+        omit = {id(closing_bracket), *omit}
+        try:
+            yield from right_hand_split(line, line_length, features=features, omit=omit)
+            return
+
+        except CannotSplit:
+            if not (
+                can_be_split(body)
+                or is_line_short_enough(body, line_length=line_length)
+            ):
+                raise CannotSplit(
+                    "Splitting failed, body is still too long and can't be split."
+                )
+
+            elif head.contains_multiline_strings() or tail.contains_multiline_strings():
+                raise CannotSplit(
+                    "The current optional pair of parentheses is bound to fail to "
+                    "satisfy the splitting algorithm because the head or the tail "
+                    "contains multiline strings which by definition never fit one "
+                    "line."
+                )
+
+    ensure_visible(opening_bracket)
+    ensure_visible(closing_bracket)
+    for result in (head, body, tail):
+        if result:
+            yield result
+
+
+def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None:
+    """Raise :exc:`CannotSplit` if the last left- or right-hand split failed.
+
+    Do nothing otherwise.
+
+    A left- or right-hand split is based on a pair of brackets. Content before
+    (and including) the opening bracket is left on one line, content inside the
+    brackets is put on a separate line, and finally content starting with and
+    following the closing bracket is put on a separate line.
+
+    Those are called `head`, `body`, and `tail`, respectively. If the split
+    produced the same line (all content in `head`) or ended up with an empty `body`
+    and the `tail` is just the closing bracket, then it's considered failed.
+    """
+    tail_len = len(str(tail).strip())
+    if not body:
+        if tail_len == 0:
+            raise CannotSplit("Splitting brackets produced the same line")
+
+        elif tail_len < 3:
+            raise CannotSplit(
+                f"Splitting brackets on an empty body to save "
+                f"{tail_len} characters is not worth it"
+            )
+
+
+def bracket_split_build_line(
+    leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False
+) -> Line:
+    """Return a new line with given `leaves` and respective comments from `original`.
+
+    If `is_body` is True, the result line is one-indented inside brackets and as such
+    has its first leaf's prefix normalized and a trailing comma added when expected.
+    """
+    result = Line(depth=original.depth)
+    if is_body:
+        result.inside_brackets = True
+        result.depth += 1
+        if leaves:
+            # Since body is a new indent level, remove spurious leading whitespace.
+            normalize_prefix(leaves[0], inside_brackets=True)
+            # Ensure a trailing comma for imports and standalone function arguments, but
+            # be careful not to add one after any comments.
+            no_commas = original.is_def and not any(
+                l.type == token.COMMA for l in leaves
+            )
+
+            if original.is_import or no_commas:
+                for i in range(len(leaves) - 1, -1, -1):
+                    if leaves[i].type == STANDALONE_COMMENT:
+                        continue
+                    elif leaves[i].type == token.COMMA:
+                        break
+                    else:
+                        leaves.insert(i + 1, Leaf(token.COMMA, ","))
+                        break
+    # Populate the line
+    for leaf in leaves:
+        result.append(leaf, preformatted=True)
+        for comment_after in original.comments_after(leaf):
+            result.append(comment_after, preformatted=True)
+    if is_body:
+        result.should_explode = should_explode(result, opening_bracket)
+    return result
+
+
+def dont_increase_indentation(split_func: SplitFunc) -> SplitFunc:
+    """Normalize prefix of the first leaf in every line returned by `split_func`.
+
+    This is a decorator over relevant split functions.
+    """
+
+    @wraps(split_func)
+    def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
+        for l in split_func(line, features):
+            normalize_prefix(l.leaves[0], inside_brackets=True)
+            yield l
+
+    return split_wrapper
+
+
+@dont_increase_indentation
+def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
+    """Split according to delimiters of the highest priority.
+
+    If the appropriate Features are given, the split will add trailing commas
+    also in function signatures and calls that contain `*` and `**`.
+    """
+    try:
+        last_leaf = line.leaves[-1]
+    except IndexError:
+        raise CannotSplit("Line empty")
+
+    bt = line.bracket_tracker
+    try:
+        delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)})
+    except ValueError:
+        raise CannotSplit("No delimiters found")
+
+    if delimiter_priority == DOT_PRIORITY:
+        if bt.delimiter_count_with_priority(delimiter_priority) == 1:
+            raise CannotSplit("Splitting a single attribute from its owner looks wrong")
+
+    current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
+    lowest_depth = sys.maxsize
+    trailing_comma_safe = True
+
+    def append_to_line(leaf: Leaf) -> Iterator[Line]:
+        """Append `leaf` to current line or to new line if appending impossible."""
+        nonlocal current_line
+        try:
+            current_line.append_safe(leaf, preformatted=True)
+        except ValueError:
+            yield current_line
+
+            current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
+            current_line.append(leaf)
+
+    for leaf in line.leaves:
+        yield from append_to_line(leaf)
+
+        for comment_after in line.comments_after(leaf):
+            yield from append_to_line(comment_after)
+
+        lowest_depth = min(lowest_depth, leaf.bracket_depth)
+        if leaf.bracket_depth == lowest_depth:
+            if is_vararg(leaf, within={syms.typedargslist}):
+                trailing_comma_safe = (
+                    trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features
+                )
+            elif is_vararg(leaf, within={syms.arglist, syms.argument}):
+                trailing_comma_safe = (
+                    trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features
+                )
+
+        leaf_priority = bt.delimiters.get(id(leaf))
+        if leaf_priority == delimiter_priority:
+            yield current_line
+
+            current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
+    if current_line:
+        if (
+            trailing_comma_safe
+            and delimiter_priority == COMMA_PRIORITY
+            and current_line.leaves[-1].type != token.COMMA
+            and current_line.leaves[-1].type != STANDALONE_COMMENT
+        ):
+            current_line.append(Leaf(token.COMMA, ","))
+        yield current_line
+
+
+@dont_increase_indentation
+def standalone_comment_split(
+    line: Line, features: Collection[Feature] = ()
+) -> Iterator[Line]:
+    """Split standalone comments from the rest of the line."""
+    if not line.contains_standalone_comments(0):
+        raise CannotSplit("Line does not have any standalone comments")
+
+    current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
+
+    def append_to_line(leaf: Leaf) -> Iterator[Line]:
+        """Append `leaf` to current line or to new line if appending impossible."""
+        nonlocal current_line
+        try:
+            current_line.append_safe(leaf, preformatted=True)
+        except ValueError:
+            yield current_line
+
+            current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
+            current_line.append(leaf)
+
+    for leaf in line.leaves:
+        yield from append_to_line(leaf)
+
+        for comment_after in line.comments_after(leaf):
+            yield from append_to_line(comment_after)
+
+    if current_line:
+        yield current_line
+
+
+def is_import(leaf: Leaf) -> bool:
+    """Return True if the given leaf starts an import statement."""
+    p = leaf.parent
+    t = leaf.type
+    v = leaf.value
+    return bool(
+        t == token.NAME
+        and (
+            (v == "import" and p and p.type == syms.import_name)
+            or (v == "from" and p and p.type == syms.import_from)
+        )
+    )
+
+
+def is_type_comment(leaf: Leaf, suffix: str = "") -> bool:
+    """Return True if the given leaf is a special comment.
+    Only returns true for type comments for now."""
+    t = leaf.type
+    v = leaf.value
+    return t in {token.COMMENT, t == STANDALONE_COMMENT} and v.startswith(
+        "# type:" + suffix
+    )
+
+
+def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None:
+    """Leave existing extra newlines if not `inside_brackets`. Remove everything
+    else.
+
+    Note: don't use backslashes for formatting or you'll lose your voting rights.
+    """
+    if not inside_brackets:
+        spl = leaf.prefix.split("#")
+        if "\\" not in spl[0]:
+            nl_count = spl[-1].count("\n")
+            if len(spl) > 1:
+                nl_count -= 1
+            leaf.prefix = "\n" * nl_count
+            return
+
+    leaf.prefix = ""
+
+
+def normalize_string_prefix(leaf: Leaf, remove_u_prefix: bool = False) -> None:
+    """Make all string prefixes lowercase.
+
+    If remove_u_prefix is given, also removes any u prefix from the string.
+
+    Note: Mutates its argument.
+    """
+    match = re.match(r"^([furbFURB]*)(.*)$", leaf.value, re.DOTALL)
+    assert match is not None, f"failed to match string {leaf.value!r}"
+    orig_prefix = match.group(1)
+    new_prefix = orig_prefix.lower()
+    if remove_u_prefix:
+        new_prefix = new_prefix.replace("u", "")
+    leaf.value = f"{new_prefix}{match.group(2)}"
+
+
+def normalize_string_quotes(leaf: Leaf) -> None:
+    """Prefer double quotes but only if it doesn't cause more escaping.
+
+    Adds or removes backslashes as appropriate. Doesn't parse and fix
+    strings nested in f-strings (yet).
+
+    Note: Mutates its argument.
+    """
+    value = leaf.value.lstrip("furbFURB")
+    if value[:3] == '"""':
+        return
+
+    elif value[:3] == "'''":
+        orig_quote = "'''"
+        new_quote = '"""'
+    elif value[0] == '"':
+        orig_quote = '"'
+        new_quote = "'"
+    else:
+        orig_quote = "'"
+        new_quote = '"'
+    first_quote_pos = leaf.value.find(orig_quote)
+    if first_quote_pos == -1:
+        return  # There's an internal error
+
+    prefix = leaf.value[:first_quote_pos]
+    unescaped_new_quote = re.compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
+    escaped_new_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}")
+    escaped_orig_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}")
+    body = leaf.value[first_quote_pos + len(orig_quote) : -len(orig_quote)]
+    if "r" in prefix.casefold():
+        if unescaped_new_quote.search(body):
+            # There's at least one unescaped new_quote in this raw string
+            # so converting is impossible
+            return
+
+        # Do not introduce or remove backslashes in raw strings
+        new_body = body
+    else:
+        # remove unnecessary escapes
+        new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body)
+        if body != new_body:
+            # Consider the string without unnecessary escapes as the original
+            body = new_body
+            leaf.value = f"{prefix}{orig_quote}{body}{orig_quote}"
+        new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body)
+        new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body)
+    if "f" in prefix.casefold():
+        matches = re.findall(
+            r"""
+            (?:[^{]|^)\{  # start of the string or a non-{ followed by a single {
+                ([^{].*?)  # contents of the brackets except if begins with {{
+            \}(?:[^}]|$)  # A } followed by end of the string or a non-}
+            """,
+            new_body,
+            re.VERBOSE,
+        )
+        for m in matches:
+            if "\\" in str(m):
+                # Do not introduce backslashes in interpolated expressions
+                return
+    if new_quote == '"""' and new_body[-1:] == '"':
+        # edge case:
+        new_body = new_body[:-1] + '\\"'
+    orig_escape_count = body.count("\\")
+    new_escape_count = new_body.count("\\")
+    if new_escape_count > orig_escape_count:
+        return  # Do not introduce more escaping
+
+    if new_escape_count == orig_escape_count and orig_quote == '"':
+        return  # Prefer double quotes
+
+    leaf.value = f"{prefix}{new_quote}{new_body}{new_quote}"
+
+
+def normalize_numeric_literal(leaf: Leaf) -> None:
+    """Normalizes numeric (float, int, and complex) literals.
+
+    All letters used in the representation are normalized to lowercase (except
+    in Python 2 long literals).
+    """
+    text = leaf.value.lower()
+    if text.startswith(("0o", "0b")):
+        # Leave octal and binary literals alone.
+        pass
+    elif text.startswith("0x"):
+        # Change hex literals to upper case.
+        before, after = text[:2], text[2:]
+        text = f"{before}{after.upper()}"
+    elif "e" in text:
+        before, after = text.split("e")
+        sign = ""
+        if after.startswith("-"):
+            after = after[1:]
+            sign = "-"
+        elif after.startswith("+"):
+            after = after[1:]
+        before = format_float_or_int_string(before)
+        text = f"{before}e{sign}{after}"
+    elif text.endswith(("j", "l")):
+        number = text[:-1]
+        suffix = text[-1]
+        # Capitalize in "2L" because "l" looks too similar to "1".
+        if suffix == "l":
+            suffix = "L"
+        text = f"{format_float_or_int_string(number)}{suffix}"
+    else:
+        text = format_float_or_int_string(text)
+    leaf.value = text
+
+
+def format_float_or_int_string(text: str) -> str:
+    """Formats a float string like "1.0"."""
+    if "." not in text:
+        return text
+
+    before, after = text.split(".")
+    return f"{before or 0}.{after or 0}"
+
+
+def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None:
+    """Make existing optional parentheses invisible or create new ones.
+
+    `parens_after` is a set of string leaf values immediately after which parens
+    should be put.
+
+    Standardizes on visible parentheses for single-element tuples, and keeps
+    existing visible parentheses for other tuples and generator expressions.
+    """
+    for pc in list_comments(node.prefix, is_endmarker=False):
+        if pc.value in FMT_OFF:
+            # This `node` has a prefix with `# fmt: off`, don't mess with parens.
+            return
+
+    check_lpar = False
+    for index, child in enumerate(list(node.children)):
+        # Add parentheses around long tuple unpacking in assignments.
+        if (
+            index == 0
+            and isinstance(child, Node)
+            and child.type == syms.testlist_star_expr
+        ):
+            check_lpar = True
+
+        if check_lpar:
+            if is_walrus_assignment(child):
+                continue
+            if child.type == syms.atom:
+                # Determines if the underlying atom should be surrounded with
+                # invisible params - also makes parens invisible recursively
+                # within the atom and removes repeated invisible parens within
+                # the atom
+                should_surround_with_parens = maybe_make_parens_invisible_in_atom(
+                    child, parent=node
+                )
+
+                if should_surround_with_parens:
+                    lpar = Leaf(token.LPAR, "")
+                    rpar = Leaf(token.RPAR, "")
+                    index = child.remove() or 0
+                    node.insert_child(index, Node(syms.atom, [lpar, child, rpar]))
+            elif is_one_tuple(child):
+                # wrap child in visible parentheses
+                lpar = Leaf(token.LPAR, "(")
+                rpar = Leaf(token.RPAR, ")")
+                child.remove()
+                node.insert_child(index, Node(syms.atom, [lpar, child, rpar]))
+            elif node.type == syms.import_from:
+                # "import from" nodes store parentheses directly as part of
+                # the statement
+                if child.type == token.LPAR:
+                    # make parentheses invisible
+                    child.value = ""  # type: ignore
+                    node.children[-1].value = ""  # type: ignore
+                elif child.type != token.STAR:
+                    # insert invisible parentheses
+                    node.insert_child(index, Leaf(token.LPAR, ""))
+                    node.append_child(Leaf(token.RPAR, ""))
+                break
+
+            elif not (isinstance(child, Leaf) and is_multiline_string(child)):
+                # wrap child in invisible parentheses
+                lpar = Leaf(token.LPAR, "")
+                rpar = Leaf(token.RPAR, "")
+                index = child.remove() or 0
+                prefix = child.prefix
+                child.prefix = ""
+                new_child = Node(syms.atom, [lpar, child, rpar])
+                new_child.prefix = prefix
+                node.insert_child(index, new_child)
+
+        check_lpar = isinstance(child, Leaf) and child.value in parens_after
+
+
+def normalize_fmt_off(node: Node) -> None:
+    """Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
+    try_again = True
+    while try_again:
+        try_again = convert_one_fmt_off_pair(node)
+
+
+def convert_one_fmt_off_pair(node: Node) -> bool:
+    """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
+
+    Returns True if a pair was converted.
+    """
+    for leaf in node.leaves():
+        previous_consumed = 0
+        for comment in list_comments(leaf.prefix, is_endmarker=False):
+            if comment.value in FMT_OFF:
+                # We only want standalone comments. If there's no previous leaf or
+                # the previous leaf is indentation, it's a standalone comment in
+                # disguise.
+                if comment.type != STANDALONE_COMMENT:
+                    prev = preceding_leaf(leaf)
+                    if prev and prev.type not in WHITESPACE:
+                        continue
+
+                ignored_nodes = list(generate_ignored_nodes(leaf))
+                if not ignored_nodes:
+                    continue
+
+                first = ignored_nodes[0]  # Can be a container node with the `leaf`.
+                parent = first.parent
+                prefix = first.prefix
+                first.prefix = prefix[comment.consumed :]
+                hidden_value = (
+                    comment.value + "\n" + "".join(str(n) for n in ignored_nodes)
+                )
+                if hidden_value.endswith("\n"):
+                    # That happens when one of the `ignored_nodes` ended with a NEWLINE
+                    # leaf (possibly followed by a DEDENT).
+                    hidden_value = hidden_value[:-1]
+                first_idx = None
+                for ignored in ignored_nodes:
+                    index = ignored.remove()
+                    if first_idx is None:
+                        first_idx = index
+                assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)"
+                assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)"
+                parent.insert_child(
+                    first_idx,
+                    Leaf(
+                        STANDALONE_COMMENT,
+                        hidden_value,
+                        prefix=prefix[:previous_consumed] + "\n" * comment.newlines,
+                    ),
+                )
+                return True
+
+            previous_consumed = comment.consumed
+
+    return False
+
+
+def generate_ignored_nodes(leaf: Leaf) -> Iterator[LN]:
+    """Starting from the container of `leaf`, generate all leaves until `# fmt: on`.
+
+    Stops at the end of the block.
+    """
+    container: Optional[LN] = container_of(leaf)
+    while container is not None and container.type != token.ENDMARKER:
+        for comment in list_comments(container.prefix, is_endmarker=False):
+            if comment.value in FMT_ON:
+                return
+
+        yield container
+
+        container = container.next_sibling
+
+
+def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool:
+    """If it's safe, make the parens in the atom `node` invisible, recursively.
+    Additionally, remove repeated, adjacent invisible parens from the atom `node`
+    as they are redundant.
+
+    Returns whether the node should itself be wrapped in invisible parentheses.
+
+    """
+    if (
+        node.type != syms.atom
+        or is_empty_tuple(node)
+        or is_one_tuple(node)
+        or (is_yield(node) and parent.type != syms.expr_stmt)
+        or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
+    ):
+        return False
+
+    first = node.children[0]
+    last = node.children[-1]
+    if first.type == token.LPAR and last.type == token.RPAR:
+        middle = node.children[1]
+        # make parentheses invisible
+        first.value = ""  # type: ignore
+        last.value = ""  # type: ignore
+        maybe_make_parens_invisible_in_atom(middle, parent=parent)
+
+        if is_atom_with_invisible_parens(middle):
+            # Strip the invisible parens from `middle` by replacing
+            # it with the child in-between the invisible parens
+            middle.replace(middle.children[1])
+
+        return False
+
+    return True
+
+
+def is_atom_with_invisible_parens(node: LN) -> bool:
+    """Given a `LN`, determines whether it's an atom `node` with invisible
+    parens. Useful in dedupe-ing and normalizing parens.
+    """
+    if isinstance(node, Leaf) or node.type != syms.atom:
+        return False
+
+    first, last = node.children[0], node.children[-1]
+    return (
+        isinstance(first, Leaf)
+        and first.type == token.LPAR
+        and first.value == ""
+        and isinstance(last, Leaf)
+        and last.type == token.RPAR
+        and last.value == ""
+    )
+
+
+def is_empty_tuple(node: LN) -> bool:
+    """Return True if `node` holds an empty tuple."""
+    return (
+        node.type == syms.atom
+        and len(node.children) == 2
+        and node.children[0].type == token.LPAR
+        and node.children[1].type == token.RPAR
+    )
+
+
+def unwrap_singleton_parenthesis(node: LN) -> Optional[LN]:
+    """Returns `wrapped` if `node` is of the shape ( wrapped ).
+
+    Parenthesis can be optional. Returns None otherwise"""
+    if len(node.children) != 3:
+        return None
+    lpar, wrapped, rpar = node.children
+    if not (lpar.type == token.LPAR and rpar.type == token.RPAR):
+        return None
+
+    return wrapped
+
+
+def is_one_tuple(node: LN) -> bool:
+    """Return True if `node` holds a tuple with one element, with or without parens."""
+    if node.type == syms.atom:
+        gexp = unwrap_singleton_parenthesis(node)
+        if gexp is None or gexp.type != syms.testlist_gexp:
+            return False
+
+        return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA
+
+    return (
+        node.type in IMPLICIT_TUPLE
+        and len(node.children) == 2
+        and node.children[1].type == token.COMMA
+    )
+
+
+def is_walrus_assignment(node: LN) -> bool:
+    """Return True iff `node` is of the shape ( test := test )"""
+    inner = unwrap_singleton_parenthesis(node)
+    return inner is not None and inner.type == syms.namedexpr_test
+
+
+def is_yield(node: LN) -> bool:
+    """Return True if `node` holds a `yield` or `yield from` expression."""
+    if node.type == syms.yield_expr:
+        return True
+
+    if node.type == token.NAME and node.value == "yield":  # type: ignore
+        return True
+
+    if node.type != syms.atom:
+        return False
+
+    if len(node.children) != 3:
+        return False
+
+    lpar, expr, rpar = node.children
+    if lpar.type == token.LPAR and rpar.type == token.RPAR:
+        return is_yield(expr)
+
+    return False
+
+
+def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool:
+    """Return True if `leaf` is a star or double star in a vararg or kwarg.
+
+    If `within` includes VARARGS_PARENTS, this applies to function signatures.
+    If `within` includes UNPACKING_PARENTS, it applies to right hand-side
+    extended iterable unpacking (PEP 3132) and additional unpacking
+    generalizations (PEP 448).
+    """
+    if leaf.type not in VARARGS_SPECIALS or not leaf.parent:
+        return False
+
+    p = leaf.parent
+    if p.type == syms.star_expr:
+        # Star expressions are also used as assignment targets in extended
+        # iterable unpacking (PEP 3132).  See what its parent is instead.
+        if not p.parent:
+            return False
+
+        p = p.parent
+
+    return p.type in within
+
+
+def is_multiline_string(leaf: Leaf) -> bool:
+    """Return True if `leaf` is a multiline string that actually spans many lines."""
+    value = leaf.value.lstrip("furbFURB")
+    return value[:3] in {'"""', "'''"} and "\n" in value
+
+
+def is_stub_suite(node: Node) -> bool:
+    """Return True if `node` is a suite with a stub body."""
+    if (
+        len(node.children) != 4
+        or node.children[0].type != token.NEWLINE
+        or node.children[1].type != token.INDENT
+        or node.children[3].type != token.DEDENT
+    ):
+        return False
+
+    return is_stub_body(node.children[2])
+
+
+def is_stub_body(node: LN) -> bool:
+    """Return True if `node` is a simple statement containing an ellipsis."""
+    if not isinstance(node, Node) or node.type != syms.simple_stmt:
+        return False
+
+    if len(node.children) != 2:
+        return False
+
+    child = node.children[0]
+    return (
+        child.type == syms.atom
+        and len(child.children) == 3
+        and all(leaf == Leaf(token.DOT, ".") for leaf in child.children)
+    )
+
+
+def max_delimiter_priority_in_atom(node: LN) -> Priority:
+    """Return maximum delimiter priority inside `node`.
+
+    This is specific to atoms with contents contained in a pair of parentheses.
+    If `node` isn't an atom or there are no enclosing parentheses, returns 0.
+    """
+    if node.type != syms.atom:
+        return 0
+
+    first = node.children[0]
+    last = node.children[-1]
+    if not (first.type == token.LPAR and last.type == token.RPAR):
+        return 0
+
+    bt = BracketTracker()
+    for c in node.children[1:-1]:
+        if isinstance(c, Leaf):
+            bt.mark(c)
+        else:
+            for leaf in c.leaves():
+                bt.mark(leaf)
+    try:
+        return bt.max_delimiter_priority()
+
+    except ValueError:
+        return 0
+
+
+def ensure_visible(leaf: Leaf) -> None:
+    """Make sure parentheses are visible.
+
+    They could be invisible as part of some statements (see
+    :func:`normalize_invisible_parens` and :func:`visit_import_from`).
+    """
+    if leaf.type == token.LPAR:
+        leaf.value = "("
+    elif leaf.type == token.RPAR:
+        leaf.value = ")"
+
+
+def should_explode(line: Line, opening_bracket: Leaf) -> bool:
+    """Should `line` immediately be split with `delimiter_split()` after RHS?"""
+
+    if not (
+        opening_bracket.parent
+        and opening_bracket.parent.type in {syms.atom, syms.import_from}
+        and opening_bracket.value in "[{("
+    ):
+        return False
+
+    try:
+        last_leaf = line.leaves[-1]
+        exclude = {id(last_leaf)} if last_leaf.type == token.COMMA else set()
+        max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude)
+    except (IndexError, ValueError):
+        return False
+
+    return max_priority == COMMA_PRIORITY
+
+
+def get_features_used(node: Node) -> Set[Feature]:
+    """Return a set of (relatively) new Python features used in this file.
+
+    Currently looking for:
+    - f-strings;
+    - underscores in numeric literals;
+    - trailing commas after * or ** in function signatures and calls;
+    - positional only arguments in function signatures and lambdas;
+    """
+    features: Set[Feature] = set()
+    for n in node.pre_order():
+        if n.type == token.STRING:
+            value_head = n.value[:2]  # type: ignore
+            if value_head in {'f"', 'F"', "f'", "F'", "rf", "fr", "RF", "FR"}:
+                features.add(Feature.F_STRINGS)
+
+        elif n.type == token.NUMBER:
+            if "_" in n.value:  # type: ignore
+                features.add(Feature.NUMERIC_UNDERSCORES)
+
+        elif n.type == token.SLASH:
+            if n.parent and n.parent.type in {syms.typedargslist, syms.arglist}:
+                features.add(Feature.POS_ONLY_ARGUMENTS)
+
+        elif n.type == token.COLONEQUAL:
+            features.add(Feature.ASSIGNMENT_EXPRESSIONS)
+
+        elif (
+            n.type in {syms.typedargslist, syms.arglist}
+            and n.children
+            and n.children[-1].type == token.COMMA
+        ):
+            if n.type == syms.typedargslist:
+                feature = Feature.TRAILING_COMMA_IN_DEF
+            else:
+                feature = Feature.TRAILING_COMMA_IN_CALL
+
+            for ch in n.children:
+                if ch.type in STARS:
+                    features.add(feature)
+
+                if ch.type == syms.argument:
+                    for argch in ch.children:
+                        if argch.type in STARS:
+                            features.add(feature)
+
+    return features
+
+
+def detect_target_versions(node: Node) -> Set[TargetVersion]:
+    """Detect the version to target based on the nodes used."""
+    features = get_features_used(node)
+    return {
+        version for version in TargetVersion if features <= VERSION_TO_FEATURES[version]
+    }
+
+
+def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]:
+    """Generate sets of closing bracket IDs that should be omitted in a RHS.
+
+    Brackets can be omitted if the entire trailer up to and including
+    a preceding closing bracket fits in one line.
+
+    Yielded sets are cumulative (contain results of previous yields, too).  First
+    set is empty.
+    """
+
+    omit: Set[LeafID] = set()
+    yield omit
+
+    length = 4 * line.depth
+    opening_bracket = None
+    closing_bracket = None
+    inner_brackets: Set[LeafID] = set()
+    for index, leaf, leaf_length in enumerate_with_length(line, reversed=True):
+        length += leaf_length
+        if length > line_length:
+            break
+
+        has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix)
+        if leaf.type == STANDALONE_COMMENT or has_inline_comment:
+            break
+
+        if opening_bracket:
+            if leaf is opening_bracket:
+                opening_bracket = None
+            elif leaf.type in CLOSING_BRACKETS:
+                inner_brackets.add(id(leaf))
+        elif leaf.type in CLOSING_BRACKETS:
+            if index > 0 and line.leaves[index - 1].type in OPENING_BRACKETS:
+                # Empty brackets would fail a split so treat them as "inner"
+                # brackets (e.g. only add them to the `omit` set if another
+                # pair of brackets was good enough.
+                inner_brackets.add(id(leaf))
+                continue
+
+            if closing_bracket:
+                omit.add(id(closing_bracket))
+                omit.update(inner_brackets)
+                inner_brackets.clear()
+                yield omit
+
+            if leaf.value:
+                opening_bracket = leaf.opening_bracket
+                closing_bracket = leaf
+
+
+def get_future_imports(node: Node) -> Set[str]:
+    """Return a set of __future__ imports in the file."""
+    imports: Set[str] = set()
+
+    def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]:
+        for child in children:
+            if isinstance(child, Leaf):
+                if child.type == token.NAME:
+                    yield child.value
+            elif child.type == syms.import_as_name:
+                orig_name = child.children[0]
+                assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports"
+                assert orig_name.type == token.NAME, "Invalid syntax parsing imports"
+                yield orig_name.value
+            elif child.type == syms.import_as_names:
+                yield from get_imports_from_children(child.children)
+            else:
+                raise AssertionError("Invalid syntax parsing imports")
+
+    for child in node.children:
+        if child.type != syms.simple_stmt:
+            break
+        first_child = child.children[0]
+        if isinstance(first_child, Leaf):
+            # Continue looking if we see a docstring; otherwise stop.
+            if (
+                len(child.children) == 2
+                and first_child.type == token.STRING
+                and child.children[1].type == token.NEWLINE
+            ):
+                continue
+            else:
+                break
+        elif first_child.type == syms.import_from:
+            module_name = first_child.children[1]
+            if not isinstance(module_name, Leaf) or module_name.value != "__future__":
+                break
+            imports |= set(get_imports_from_children(first_child.children[3:]))
+        else:
+            break
+    return imports
+
+
+def gen_python_files_in_dir(
+    path: Path,
+    root: Path,
+    include: Pattern[str],
+    exclude: Pattern[str],
+    report: "Report",
+) -> Iterator[Path]:
+    """Generate all files under `path` whose paths are not excluded by the
+    `exclude` regex, but are included by the `include` regex.
+
+    Symbolic links pointing outside of the `root` directory are ignored.
+
+    `report` is where output about exclusions goes.
+    """
+    assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
+    for child in path.iterdir():
+        try:
+            normalized_path = "/" + child.resolve().relative_to(root).as_posix()
+        except ValueError:
+            if child.is_symlink():
+                report.path_ignored(
+                    child, f"is a symbolic link that points outside {root}"
+                )
+                continue
+
+            raise
+
+        if child.is_dir():
+            normalized_path += "/"
+        exclude_match = exclude.search(normalized_path)
+        if exclude_match and exclude_match.group(0):
+            report.path_ignored(child, f"matches the --exclude regular expression")
+            continue
+
+        if child.is_dir():
+            yield from gen_python_files_in_dir(child, root, include, exclude, report)
+
+        elif child.is_file():
+            include_match = include.search(normalized_path)
+            if include_match:
+                yield child
+
+
+@lru_cache()
+def find_project_root(srcs: Iterable[str]) -> Path:
+    """Return a directory containing .git, .hg, or pyproject.toml.
+
+    That directory can be one of the directories passed in `srcs` or their
+    common parent.
+
+    If no directory in the tree contains a marker that would specify it's the
+    project root, the root of the file system is returned.
+    """
+    if not srcs:
+        return Path("/").resolve()
+
+    common_base = min(Path(src).resolve() for src in srcs)
+    if common_base.is_dir():
+        # Append a fake file so `parents` below returns `common_base_dir`, too.
+        common_base /= "fake-file"
+    for directory in common_base.parents:
+        if (directory / ".git").is_dir():
+            return directory
+
+        if (directory / ".hg").is_dir():
+            return directory
+
+        if (directory / "pyproject.toml").is_file():
+            return directory
+
+    return directory
+
+
+@dataclass
+class Report:
+    """Provides a reformatting counter. Can be rendered with `str(report)`."""
+
+    check: bool = False
+    quiet: bool = False
+    verbose: bool = False
+    change_count: int = 0
+    same_count: int = 0
+    failure_count: int = 0
+
+    def done(self, src: Path, changed: Changed) -> None:
+        """Increment the counter for successful reformatting. Write out a message."""
+        if changed is Changed.YES:
+            reformatted = "would reformat" if self.check else "reformatted"
+            if self.verbose or not self.quiet:
+                out(f"{reformatted} {src}")
+            self.change_count += 1
+        else:
+            if self.verbose:
+                if changed is Changed.NO:
+                    msg = f"{src} already well formatted, good job."
+                else:
+                    msg = f"{src} wasn't modified on disk since last run."
+                out(msg, bold=False)
+            self.same_count += 1
+
+    def failed(self, src: Path, message: str) -> None:
+        """Increment the counter for failed reformatting. Write out a message."""
+        err(f"error: cannot format {src}: {message}")
+        self.failure_count += 1
+
+    def path_ignored(self, path: Path, message: str) -> None:
+        if self.verbose:
+            out(f"{path} ignored: {message}", bold=False)
+
+    @property
+    def return_code(self) -> int:
+        """Return the exit code that the app should use.
+
+        This considers the current state of changed files and failures:
+        - if there were any failures, return 123;
+        - if any files were changed and --check is being used, return 1;
+        - otherwise return 0.
+        """
+        # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with
+        # 126 we have special return codes reserved by the shell.
+        if self.failure_count:
+            return 123
+
+        elif self.change_count and self.check:
+            return 1
+
+        return 0
+
+    def __str__(self) -> str:
+        """Render a color report of the current state.
+
+        Use `click.unstyle` to remove colors.
+        """
+        if self.check:
+            reformatted = "would be reformatted"
+            unchanged = "would be left unchanged"
+            failed = "would fail to reformat"
+        else:
+            reformatted = "reformatted"
+            unchanged = "left unchanged"
+            failed = "failed to reformat"
+        report = []
+        if self.change_count:
+            s = "s" if self.change_count > 1 else ""
+            report.append(
+                click.style(f"{self.change_count} file{s} {reformatted}", bold=True)
+            )
+        if self.same_count:
+            s = "s" if self.same_count > 1 else ""
+            report.append(f"{self.same_count} file{s} {unchanged}")
+        if self.failure_count:
+            s = "s" if self.failure_count > 1 else ""
+            report.append(
+                click.style(f"{self.failure_count} file{s} {failed}", fg="red")
+            )
+        return ", ".join(report) + "."
+
+
+def parse_ast(src: str) -> Union[ast.AST, ast3.AST, ast27.AST]:
+    filename = "<unknown>"
+    if sys.version_info >= (3, 8):
+        # TODO: support Python 4+ ;)
+        for minor_version in range(sys.version_info[1], 4, -1):
+            try:
+                return ast.parse(src, filename, feature_version=(3, minor_version))
+            except SyntaxError:
+                continue
+    else:
+        for feature_version in (7, 6):
+            try:
+                return ast3.parse(src, filename, feature_version=feature_version)
+            except SyntaxError:
+                continue
+
+    return ast27.parse(src)
+
+
+def _fixup_ast_constants(
+    node: Union[ast.AST, ast3.AST, ast27.AST]
+) -> Union[ast.AST, ast3.AST, ast27.AST]:
+    """Map ast nodes deprecated in 3.8 to Constant."""
+    # casts are required until this is released:
+    # https://github.com/python/typeshed/pull/3142
+    if isinstance(node, (ast.Str, ast3.Str, ast27.Str, ast.Bytes, ast3.Bytes)):
+        return cast(ast.AST, ast.Constant(value=node.s))
+    elif isinstance(node, (ast.Num, ast3.Num, ast27.Num)):
+        return cast(ast.AST, ast.Constant(value=node.n))
+    elif isinstance(node, (ast.NameConstant, ast3.NameConstant)):
+        return cast(ast.AST, ast.Constant(value=node.value))
+    return node
+
+
+def assert_equivalent(src: str, dst: str) -> None:
+    """Raise AssertionError if `src` and `dst` aren't equivalent."""
+
+    def _v(node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0) -> Iterator[str]:
+        """Simple visitor generating strings to compare ASTs by content."""
+
+        node = _fixup_ast_constants(node)
+
+        yield f"{'  ' * depth}{node.__class__.__name__}("
+
+        for field in sorted(node._fields):
+            # TypeIgnore has only one field 'lineno' which breaks this comparison
+            type_ignore_classes = (ast3.TypeIgnore, ast27.TypeIgnore)
+            if sys.version_info >= (3, 8):
+                type_ignore_classes += (ast.TypeIgnore,)
+            if isinstance(node, type_ignore_classes):
+                break
+
+            try:
+                value = getattr(node, field)
+            except AttributeError:
+                continue
+
+            yield f"{'  ' * (depth+1)}{field}="
+
+            if isinstance(value, list):
+                for item in value:
+                    # Ignore nested tuples within del statements, because we may insert
+                    # parentheses and they change the AST.
+                    if (
+                        field == "targets"
+                        and isinstance(node, (ast.Delete, ast3.Delete, ast27.Delete))
+                        and isinstance(item, (ast.Tuple, ast3.Tuple, ast27.Tuple))
+                    ):
+                        for item in item.elts:
+                            yield from _v(item, depth + 2)
+                    elif isinstance(item, (ast.AST, ast3.AST, ast27.AST)):
+                        yield from _v(item, depth + 2)
+
+            elif isinstance(value, (ast.AST, ast3.AST, ast27.AST)):
+                yield from _v(value, depth + 2)
+
+            else:
+                yield f"{'  ' * (depth+2)}{value!r},  # {value.__class__.__name__}"
+
+        yield f"{'  ' * depth})  # /{node.__class__.__name__}"
+
+    try:
+        src_ast = parse_ast(src)
+    except Exception as exc:
+        raise AssertionError(
+            f"cannot use --safe with this file; failed to parse source file.  "
+            f"AST error message: {exc}"
+        )
+
+    try:
+        dst_ast = parse_ast(dst)
+    except Exception as exc:
+        log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst)
+        raise AssertionError(
+            f"INTERNAL ERROR: Black produced invalid code: {exc}. "
+            f"Please report a bug on https://github.com/psf/black/issues.  "
+            f"This invalid output might be helpful: {log}"
+        ) from None
+
+    src_ast_str = "\n".join(_v(src_ast))
+    dst_ast_str = "\n".join(_v(dst_ast))
+    if src_ast_str != dst_ast_str:
+        log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst"))
+        raise AssertionError(
+            f"INTERNAL ERROR: Black produced code that is not equivalent to "
+            f"the source.  "
+            f"Please report a bug on https://github.com/psf/black/issues.  "
+            f"This diff might be helpful: {log}"
+        ) from None
+
+
+def assert_stable(src: str, dst: str, mode: FileMode) -> None:
+    """Raise AssertionError if `dst` reformats differently the second time."""
+    newdst = format_str(dst, mode=mode)
+    if dst != newdst:
+        log = dump_to_file(
+            diff(src, dst, "source", "first pass"),
+            diff(dst, newdst, "first pass", "second pass"),
+        )
+        raise AssertionError(
+            f"INTERNAL ERROR: Black produced different code on the second pass "
+            f"of the formatter.  "
+            f"Please report a bug on https://github.com/psf/black/issues.  "
+            f"This diff might be helpful: {log}"
+        ) from None
+
+
+def dump_to_file(*output: str) -> str:
+    """Dump `output` to a temporary file. Return path to the file."""
+    with tempfile.NamedTemporaryFile(
+        mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8"
+    ) as f:
+        for lines in output:
+            f.write(lines)
+            if lines and lines[-1] != "\n":
+                f.write("\n")
+    return f.name
+
+
+@contextmanager
+def nullcontext() -> Iterator[None]:
+    """Return context manager that does nothing.
+    Similar to `nullcontext` from python 3.7"""
+    yield
+
+
+def diff(a: str, b: str, a_name: str, b_name: str) -> str:
+    """Return a unified diff string between strings `a` and `b`."""
+    import difflib
+
+    a_lines = [line + "\n" for line in a.split("\n")]
+    b_lines = [line + "\n" for line in b.split("\n")]
+    return "".join(
+        difflib.unified_diff(a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5)
+    )
+
+
+def cancel(tasks: Iterable[asyncio.Task]) -> None:
+    """asyncio signal handler that cancels all `tasks` and reports to stderr."""
+    err("Aborted!")
+    for task in tasks:
+        task.cancel()
+
+
+def shutdown(loop: asyncio.AbstractEventLoop) -> None:
+    """Cancel all pending tasks on `loop`, wait for them, and close the loop."""
+    try:
+        if sys.version_info[:2] >= (3, 7):
+            all_tasks = asyncio.all_tasks
+        else:
+            all_tasks = asyncio.Task.all_tasks
+        # This part is borrowed from asyncio/runners.py in Python 3.7b2.
+        to_cancel = [task for task in all_tasks(loop) if not task.done()]
+        if not to_cancel:
+            return
+
+        for task in to_cancel:
+            task.cancel()
+        loop.run_until_complete(
+            asyncio.gather(*to_cancel, loop=loop, return_exceptions=True)
+        )
+    finally:
+        # `concurrent.futures.Future` objects cannot be cancelled once they
+        # are already running. There might be some when the `shutdown()` happened.
+        # Silence their logger's spew about the event loop being closed.
+        cf_logger = logging.getLogger("concurrent.futures")
+        cf_logger.setLevel(logging.CRITICAL)
+        loop.close()
+
+
+def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:
+    """Replace `regex` with `replacement` twice on `original`.
+
+    This is used by string normalization to perform replaces on
+    overlapping matches.
+    """
+    return regex.sub(replacement, regex.sub(replacement, original))
+
+
+def re_compile_maybe_verbose(regex: str) -> Pattern[str]:
+    """Compile a regular expression string in `regex`.
+
+    If it contains newlines, use verbose mode.
+    """
+    if "\n" in regex:
+        regex = "(?x)" + regex
+    return re.compile(regex)
+
+
+def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
+    """Like `reversed(enumerate(sequence))` if that were possible."""
+    index = len(sequence) - 1
+    for element in reversed(sequence):
+        yield (index, element)
+        index -= 1
+
+
+def enumerate_with_length(
+    line: Line, reversed: bool = False
+) -> Iterator[Tuple[Index, Leaf, int]]:
+    """Return an enumeration of leaves with their length.
+
+    Stops prematurely on multiline strings and standalone comments.
+    """
+    op = cast(
+        Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]],
+        enumerate_reversed if reversed else enumerate,
+    )
+    for index, leaf in op(line.leaves):
+        length = len(leaf.prefix) + len(leaf.value)
+        if "\n" in leaf.value:
+            return  # Multiline strings, we can't continue.
+
+        for comment in line.comments_after(leaf):
+            length += len(comment.value)
+
+        yield index, leaf, length
+
+
+def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool:
+    """Return True if `line` is no longer than `line_length`.
+
+    Uses the provided `line_str` rendering, if any, otherwise computes a new one.
+    """
+    if not line_str:
+        line_str = str(line).strip("\n")
+    return (
+        len(line_str) <= line_length
+        and "\n" not in line_str  # multiline strings
+        and not line.contains_standalone_comments()
+    )
+
+
+def can_be_split(line: Line) -> bool:
+    """Return False if the line cannot be split *for sure*.
+
+    This is not an exhaustive search but a cheap heuristic that we can use to
+    avoid some unfortunate formattings (mostly around wrapping unsplittable code
+    in unnecessary parentheses).
+    """
+    leaves = line.leaves
+    if len(leaves) < 2:
+        return False
+
+    if leaves[0].type == token.STRING and leaves[1].type == token.DOT:
+        call_count = 0
+        dot_count = 0
+        next = leaves[-1]
+        for leaf in leaves[-2::-1]:
+            if leaf.type in OPENING_BRACKETS:
+                if next.type not in CLOSING_BRACKETS:
+                    return False
+
+                call_count += 1
+            elif leaf.type == token.DOT:
+                dot_count += 1
+            elif leaf.type == token.NAME:
+                if not (next.type == token.DOT or next.type in OPENING_BRACKETS):
+                    return False
+
+            elif leaf.type not in CLOSING_BRACKETS:
+                return False
+
+            if dot_count > 1 and call_count > 1:
+                return False
+
+    return True
+
+
+def can_omit_invisible_parens(line: Line, line_length: int) -> bool:
+    """Does `line` have a shape safe to reformat without optional parens around it?
+
+    Returns True for only a subset of potentially nice looking formattings but
+    the point is to not return false positives that end up producing lines that
+    are too long.
+    """
+    bt = line.bracket_tracker
+    if not bt.delimiters:
+        # Without delimiters the optional parentheses are useless.
+        return True
+
+    max_priority = bt.max_delimiter_priority()
+    if bt.delimiter_count_with_priority(max_priority) > 1:
+        # With more than one delimiter of a kind the optional parentheses read better.
+        return False
+
+    if max_priority == DOT_PRIORITY:
+        # A single stranded method call doesn't require optional parentheses.
+        return True
+
+    assert len(line.leaves) >= 2, "Stranded delimiter"
+
+    first = line.leaves[0]
+    second = line.leaves[1]
+    penultimate = line.leaves[-2]
+    last = line.leaves[-1]
+
+    # With a single delimiter, omit if the expression starts or ends with
+    # a bracket.
+    if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS:
+        remainder = False
+        length = 4 * line.depth
+        for _index, leaf, leaf_length in enumerate_with_length(line):
+            if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first:
+                remainder = True
+            if remainder:
+                length += leaf_length
+                if length > line_length:
+                    break
+
+                if leaf.type in OPENING_BRACKETS:
+                    # There are brackets we can further split on.
+                    remainder = False
+
+        else:
+            # checked the entire string and line length wasn't exceeded
+            if len(line.leaves) == _index + 1:
+                return True
+
+        # Note: we are not returning False here because a line might have *both*
+        # a leading opening bracket and a trailing closing bracket.  If the
+        # opening bracket doesn't match our rule, maybe the closing will.
+
+    if (
+        last.type == token.RPAR
+        or last.type == token.RBRACE
+        or (
+            # don't use indexing for omitting optional parentheses;
+            # it looks weird
+            last.type == token.RSQB
+            and last.parent
+            and last.parent.type != syms.trailer
+        )
+    ):
+        if penultimate.type in OPENING_BRACKETS:
+            # Empty brackets don't help.
+            return False
+
+        if is_multiline_string(first):
+            # Additional wrapping of a multiline string in this situation is
+            # unnecessary.
+            return True
+
+        length = 4 * line.depth
+        seen_other_brackets = False
+        for _index, leaf, leaf_length in enumerate_with_length(line):
+            length += leaf_length
+            if leaf is last.opening_bracket:
+                if seen_other_brackets or length <= line_length:
+                    return True
+
+            elif leaf.type in OPENING_BRACKETS:
+                # There are brackets we can further split on.
+                seen_other_brackets = True
+
+    return False
+
+
+def get_cache_file(mode: FileMode) -> Path:
+    return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle"
+
+
+def read_cache(mode: FileMode) -> Cache:
+    """Read the cache if it exists and is well formed.
+
+    If it is not well formed, the call to write_cache later should resolve the issue.
+    """
+    cache_file = get_cache_file(mode)
+    if not cache_file.exists():
+        return {}
+
+    with cache_file.open("rb") as fobj:
+        try:
+            cache: Cache = pickle.load(fobj)
+        except pickle.UnpicklingError:
+            return {}
+
+    return cache
+
+
+def get_cache_info(path: Path) -> CacheInfo:
+    """Return the information used to check if a file is already formatted or not."""
+    stat = path.stat()
+    return stat.st_mtime, stat.st_size
+
+
+def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
+    """Split an iterable of paths in `sources` into two sets.
+
+    The first contains paths of files that modified on disk or are not in the
+    cache. The other contains paths to non-modified files.
+    """
+    todo, done = set(), set()
+    for src in sources:
+        src = src.resolve()
+        if cache.get(src) != get_cache_info(src):
+            todo.add(src)
+        else:
+            done.add(src)
+    return todo, done
+
+
+def write_cache(cache: Cache, sources: Iterable[Path], mode: FileMode) -> None:
+    """Update the cache file."""
+    cache_file = get_cache_file(mode)
+    try:
+        CACHE_DIR.mkdir(parents=True, exist_ok=True)
+        new_cache = {**cache, **{src.resolve(): get_cache_info(src) for src in sources}}
+        with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f:
+            pickle.dump(new_cache, f, protocol=pickle.HIGHEST_PROTOCOL)
+        os.replace(f.name, cache_file)
+    except OSError:
+        pass
+
+
+def patch_click() -> None:
+    """Make Click not crash.
+
+    On certain misconfigured environments, Python 3 selects the ASCII encoding as the
+    default which restricts paths that it can access during the lifetime of the
+    application.  Click refuses to work in this scenario by raising a RuntimeError.
+
+    In case of Black the likelihood that non-ASCII characters are going to be used in
+    file paths is minimal since it's Python source code.  Moreover, this crash was
+    spurious on Python 3.7 thanks to PEP 538 and PEP 540.
+    """
+    try:
+        from click import core
+        from click import _unicodefun  # type: ignore
+    except ModuleNotFoundError:
+        return
+
+    for module in (core, _unicodefun):
+        if hasattr(module, "_verify_python3_env"):
+            module._verify_python3_env = lambda: None
+
+
+def patched_main() -> None:
+    freeze_support()
+    patch_click()
+    main()
+
+
+if __name__ == "__main__":
+    patched_main()
--- a/contrib/hgclient.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/hgclient.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,17 +16,22 @@
     stdout = sys.stdout.buffer
     stderr = sys.stderr.buffer
     stringio = io.BytesIO
+
     def bprint(*args):
         # remove b'' as well for ease of test migration
         pargs = [re.sub(br'''\bb(['"])''', br'\1', b'%s' % a) for a in args]
         stdout.write(b' '.join(pargs) + b'\n')
+
+
 else:
     import cStringIO
+
     stdout = sys.stdout
     stderr = sys.stderr
     stringio = cStringIO.StringIO
     bprint = print
 
+
 def connectpipe(path=None, extraargs=()):
     cmdline = [b'hg', b'serve', b'--cmdserver', b'pipe']
     if path:
@@ -38,11 +43,13 @@
             return cmdline
         return [arg.decode("utf-8") for arg in cmdline]
 
-    server = subprocess.Popen(tonative(cmdline), stdin=subprocess.PIPE,
-                              stdout=subprocess.PIPE)
+    server = subprocess.Popen(
+        tonative(cmdline), stdin=subprocess.PIPE, stdout=subprocess.PIPE
+    )
 
     return server
 
+
 class unixconnection(object):
     def __init__(self, sockpath):
         self.sock = sock = socket.socket(socket.AF_UNIX)
@@ -55,6 +62,7 @@
         self.stdout.close()
         self.sock.close()
 
+
 class unixserver(object):
     def __init__(self, sockpath, logpath=None, repopath=None):
         self.sockpath = sockpath
@@ -80,11 +88,13 @@
         os.kill(self.server.pid, signal.SIGTERM)
         self.server.wait()
 
+
 def writeblock(server, data):
     server.stdin.write(struct.pack(b'>I', len(data)))
     server.stdin.write(data)
     server.stdin.flush()
 
+
 def readchannel(server):
     data = server.stdout.read(5)
     if not data:
@@ -95,11 +105,14 @@
     else:
         return channel, server.stdout.read(length)
 
+
 def sep(text):
     return text.replace(b'\\', b'/')
 
-def runcommand(server, args, output=stdout, error=stderr, input=None,
-               outfilter=lambda x: x):
+
+def runcommand(
+    server, args, output=stdout, error=stderr, input=None, outfilter=lambda x: x
+):
     bprint(b'*** runcommand', b' '.join(args))
     stdout.flush()
     server.stdin.write(b'runcommand\n')
@@ -123,7 +136,7 @@
         elif ch == b'm':
             bprint(b"message: %r" % data)
         elif ch == b'r':
-            ret, = struct.unpack('>i', data)
+            (ret,) = struct.unpack('>i', data)
             if ret != 0:
                 bprint(b' [%d]' % ret)
             return ret
@@ -132,6 +145,7 @@
             if ch.isupper():
                 return
 
+
 def check(func, connect=connectpipe):
     stdout.flush()
     server = connect()
@@ -141,7 +155,9 @@
         server.stdin.close()
         server.wait()
 
+
 def checkwith(connect=connectpipe, **kwargs):
     def wrap(func):
         return check(func, lambda: connect(**kwargs))
+
     return wrap
--- a/contrib/import-checker.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/import-checker.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,7 +10,7 @@
 # Import a minimal set of stdlib modules needed for list_stdlib_modules()
 # to work when run from a virtualenv.  The modules were chosen empirically
 # so that the return value matches the return value without virtualenv.
-if True: # disable lexical sorting checks
+if True:  # disable lexical sorting checks
     try:
         import BaseHTTPServer as basehttpserver
     except ImportError:
@@ -28,9 +28,12 @@
     'mercurial.hgweb.common',
     'mercurial.hgweb.request',
     'mercurial.i18n',
+    'mercurial.interfaces',
     'mercurial.node',
+    'mercurial.pycompat',
     # for revlog to re-export constant to extensions
     'mercurial.revlogutils.constants',
+    'mercurial.revlogutils.flagutil',
     # for cffi modules to re-export pure functions
     'mercurial.pure.base85',
     'mercurial.pure.bdiff',
@@ -45,9 +48,7 @@
 )
 
 # Whitelist of symbols that can be directly imported.
-directsymbols = (
-    'demandimport',
-)
+directsymbols = ('demandimport',)
 
 # Modules that must be aliased because they are commonly confused with
 # common variables and can create aliasing and readability issues.
@@ -55,6 +56,7 @@
     'ui': 'uimod',
 }
 
+
 def usingabsolute(root):
     """Whether absolute imports are being used."""
     if sys.version_info[0] >= 3:
@@ -69,6 +71,7 @@
 
     return False
 
+
 def walklocal(root):
     """Recursively yield all descendant nodes but not in a different scope"""
     todo = collections.deque(ast.iter_child_nodes(root))
@@ -80,6 +83,7 @@
             todo.extend(ast.iter_child_nodes(node))
         yield node, newscope
 
+
 def dotted_name_of_path(path):
     """Given a relative path to a source file, return its dotted module name.
 
@@ -89,11 +93,12 @@
     'zlib'
     """
     parts = path.replace(os.sep, '/').split('/')
-    parts[-1] = parts[-1].split('.', 1)[0] # remove .py and .so and .ARCH.so
+    parts[-1] = parts[-1].split('.', 1)[0]  # remove .py and .so and .ARCH.so
     if parts[-1].endswith('module'):
         parts[-1] = parts[-1][:-6]
     return '.'.join(parts)
 
+
 def fromlocalfunc(modulename, localmods):
     """Get a function to examine which locally defined module the
     target source imports via a specified name.
@@ -162,6 +167,7 @@
     prefix = '.'.join(modulename.split('.')[:-1])
     if prefix:
         prefix += '.'
+
     def fromlocal(name, level=0):
         # name is false value when relative imports are used.
         if not name:
@@ -173,8 +179,9 @@
                 # Check relative name first.
                 candidates = [prefix + name, name]
             else:
-                candidates = ['.'.join(modulename.split('.')[:-level]) +
-                              '.' + name]
+                candidates = [
+                    '.'.join(modulename.split('.')[:-level]) + '.' + name
+                ]
 
         for n in candidates:
             if n in localmods:
@@ -183,18 +190,21 @@
             if dottedpath in localmods:
                 return (n, dottedpath, True)
         return False
+
     return fromlocal
 
+
 def populateextmods(localmods):
     """Populate C extension modules based on pure modules"""
     newlocalmods = set(localmods)
     for n in localmods:
         if n.startswith('mercurial.pure.'):
-            m = n[len('mercurial.pure.'):]
+            m = n[len('mercurial.pure.') :]
             newlocalmods.add('mercurial.cext.' + m)
             newlocalmods.add('mercurial.cffi._' + m)
     return newlocalmods
 
+
 def list_stdlib_modules():
     """List the modules present in the stdlib.
 
@@ -230,13 +240,13 @@
     for m in ['msvcrt', '_winreg']:
         yield m
     yield '__builtin__'
-    yield 'builtins' # python3 only
-    yield 'importlib.abc' # python3 only
-    yield 'importlib.machinery' # python3 only
-    yield 'importlib.util' # python3 only
+    yield 'builtins'  # python3 only
+    yield 'importlib.abc'  # python3 only
+    yield 'importlib.machinery'  # python3 only
+    yield 'importlib.util'  # python3 only
     for m in 'fcntl', 'grp', 'pwd', 'termios':  # Unix only
         yield m
-    for m in 'cPickle', 'datetime': # in Python (not C) on PyPy
+    for m in 'cPickle', 'datetime':  # in Python (not C) on PyPy
         yield m
     for m in ['cffi']:
         yield m
@@ -262,14 +272,17 @@
     for libpath in sys.path:
         # We want to walk everything in sys.path that starts with something in
         # stdlib_prefixes, but not directories from the hg sources.
-        if (os.path.abspath(libpath).startswith(sourceroot)
-            or not any(libpath.startswith(p) for p in stdlib_prefixes)):
+        if os.path.abspath(libpath).startswith(sourceroot) or not any(
+            libpath.startswith(p) for p in stdlib_prefixes
+        ):
             continue
         for top, dirs, files in os.walk(libpath):
             for i, d in reversed(list(enumerate(dirs))):
-                if (not os.path.exists(os.path.join(top, d, '__init__.py'))
-                    or top == libpath and d in ('hgdemandimport', 'hgext',
-                                                'mercurial')):
+                if (
+                    not os.path.exists(os.path.join(top, d, '__init__.py'))
+                    or top == libpath
+                    and d in ('hgdemandimport', 'hgext', 'mercurial')
+                ):
                     del dirs[i]
             for name in files:
                 if not name.endswith(('.py', '.so', '.pyc', '.pyo', '.pyd')):
@@ -278,12 +291,14 @@
                     full_path = top
                 else:
                     full_path = os.path.join(top, name)
-                rel_path = full_path[len(libpath) + 1:]
+                rel_path = full_path[len(libpath) + 1 :]
                 mod = dotted_name_of_path(rel_path)
                 yield mod
 
+
 stdlib_modules = set(list_stdlib_modules())
 
+
 def imported_modules(source, modulename, f, localmods, ignore_nested=False):
     """Given the source of a file as a string, yield the names
     imported by that file.
@@ -381,6 +396,7 @@
                 # lookup
                 yield dottedpath
 
+
 def verify_import_convention(module, source, localmods):
     """Verify imports match our established coding convention.
 
@@ -398,6 +414,7 @@
     else:
         return verify_stdlib_on_own_line(root)
 
+
 def verify_modern_convention(module, root, localmods, root_col_offset=0):
     """Verify a file conforms to the modern import convention rules.
 
@@ -441,19 +458,24 @@
     seenlevels = set()
 
     for node, newscope in walklocal(root):
+
         def msg(fmt, *args):
             return (fmt % args, node.lineno)
+
         if newscope:
             # Check for local imports in function
-            for r in verify_modern_convention(module, node, localmods,
-                                              node.col_offset + 4):
+            for r in verify_modern_convention(
+                module, node, localmods, node.col_offset + 4
+            ):
                 yield r
         elif isinstance(node, ast.Import):
             # Disallow "import foo, bar" and require separate imports
             # for each module.
             if len(node.names) > 1:
-                yield msg('multiple imported names: %s',
-                          ', '.join(n.name for n in node.names))
+                yield msg(
+                    'multiple imported names: %s',
+                    ', '.join(n.name for n in node.names),
+                )
 
             name = node.names[0].name
             asname = node.names[0].asname
@@ -463,16 +485,20 @@
             # Ignore sorting rules on imports inside blocks.
             if node.col_offset == root_col_offset:
                 if lastname and name < lastname and laststdlib == stdlib:
-                    yield msg('imports not lexically sorted: %s < %s',
-                              name, lastname)
+                    yield msg(
+                        'imports not lexically sorted: %s < %s', name, lastname
+                    )
 
             lastname = name
             laststdlib = stdlib
 
             # stdlib imports should be before local imports.
             if stdlib and seenlocal and node.col_offset == root_col_offset:
-                yield msg('stdlib import "%s" follows local import: %s',
-                          name, seenlocal)
+                yield msg(
+                    'stdlib import "%s" follows local import: %s',
+                    name,
+                    seenlocal,
+                )
 
             if not stdlib:
                 seenlocal = name
@@ -483,13 +509,16 @@
                 yield msg('import should be relative: %s', name)
 
             if name in requirealias and asname != requirealias[name]:
-                yield msg('%s module must be "as" aliased to %s',
-                          name, requirealias[name])
+                yield msg(
+                    '%s module must be "as" aliased to %s',
+                    name,
+                    requirealias[name],
+                )
 
         elif isinstance(node, ast.ImportFrom):
             # Resolve the full imported module name.
             if node.level > 0:
-                fullname = '.'.join(module.split('.')[:-node.level])
+                fullname = '.'.join(module.split('.')[: -node.level])
                 if node.module:
                     fullname += '.%s' % node.module
             else:
@@ -506,7 +535,8 @@
                 if not fullname or (
                     fullname in stdlib_modules
                     and fullname not in localmods
-                    and fullname + '.__init__' not in localmods):
+                    and fullname + '.__init__' not in localmods
+                ):
                     yield msg('relative import of stdlib module')
                 else:
                     seenlocal = fullname
@@ -516,19 +546,24 @@
             found = fromlocal(node.module, node.level)
             if found and found[2]:  # node.module is a package
                 prefix = found[0] + '.'
-                symbols = (n.name for n in node.names
-                           if not fromlocal(prefix + n.name))
+                symbols = (
+                    n.name for n in node.names if not fromlocal(prefix + n.name)
+                )
             else:
                 symbols = (n.name for n in node.names)
             symbols = [sym for sym in symbols if sym not in directsymbols]
             if node.module and node.col_offset == root_col_offset:
                 if symbols and fullname not in allowsymbolimports:
-                    yield msg('direct symbol import %s from %s',
-                              ', '.join(symbols), fullname)
+                    yield msg(
+                        'direct symbol import %s from %s',
+                        ', '.join(symbols),
+                        fullname,
+                    )
 
                 if symbols and seennonsymbollocal:
-                    yield msg('symbol import follows non-symbol import: %s',
-                              fullname)
+                    yield msg(
+                        'symbol import follows non-symbol import: %s', fullname
+                    )
             if not symbols and fullname not in stdlib_modules:
                 seennonsymbollocal = True
 
@@ -536,15 +571,19 @@
                 assert node.level
 
                 # Only allow 1 group per level.
-                if (node.level in seenlevels
-                    and node.col_offset == root_col_offset):
-                    yield msg('multiple "from %s import" statements',
-                              '.' * node.level)
+                if (
+                    node.level in seenlevels
+                    and node.col_offset == root_col_offset
+                ):
+                    yield msg(
+                        'multiple "from %s import" statements', '.' * node.level
+                    )
 
                 # Higher-level groups come before lower-level groups.
                 if any(node.level > l for l in seenlevels):
-                    yield msg('higher-level import should come first: %s',
-                              fullname)
+                    yield msg(
+                        'higher-level import should come first: %s', fullname
+                    )
 
                 seenlevels.add(node.level)
 
@@ -554,14 +593,23 @@
 
             for n in node.names:
                 if lastentryname and n.name < lastentryname:
-                    yield msg('imports from %s not lexically sorted: %s < %s',
-                              fullname, n.name, lastentryname)
+                    yield msg(
+                        'imports from %s not lexically sorted: %s < %s',
+                        fullname,
+                        n.name,
+                        lastentryname,
+                    )
 
                 lastentryname = n.name
 
                 if n.name in requirealias and n.asname != requirealias[n.name]:
-                    yield msg('%s from %s must be "as" aliased to %s',
-                              n.name, fullname, requirealias[n.name])
+                    yield msg(
+                        '%s from %s must be "as" aliased to %s',
+                        n.name,
+                        fullname,
+                        requirealias[n.name],
+                    )
+
 
 def verify_stdlib_on_own_line(root):
     """Given some python source, verify that stdlib imports are done
@@ -580,13 +628,20 @@
             for n in node.names:
                 from_stdlib[n.name in stdlib_modules].append(n.name)
             if from_stdlib[True] and from_stdlib[False]:
-                yield ('mixed imports\n   stdlib:    %s\n   relative:  %s' %
-                       (', '.join(sorted(from_stdlib[True])),
-                        ', '.join(sorted(from_stdlib[False]))), node.lineno)
+                yield (
+                    'mixed imports\n   stdlib:    %s\n   relative:  %s'
+                    % (
+                        ', '.join(sorted(from_stdlib[True])),
+                        ', '.join(sorted(from_stdlib[False])),
+                    ),
+                    node.lineno,
+                )
+
 
 class CircularImport(Exception):
     pass
 
+
 def checkmod(mod, imports):
     shortest = {}
     visit = [[mod]]
@@ -601,6 +656,7 @@
                     continue
                 visit.append(path + [i])
 
+
 def rotatecycle(cycle):
     """arrange a cycle so that the lexicographically first module listed first
 
@@ -611,6 +667,7 @@
     idx = cycle.index(lowest)
     return cycle[idx:] + cycle[:idx] + [lowest]
 
+
 def find_cycles(imports):
     """Find cycles in an already-loaded import graph.
 
@@ -634,9 +691,11 @@
             cycles.add(" -> ".join(rotatecycle(cycle)))
     return cycles
 
+
 def _cycle_sortkey(c):
     return len(c), c
 
+
 def embedded(f, modname, src):
     """Extract embedded python code
 
@@ -678,6 +737,7 @@
             modname = modname.decode('utf8')
         yield code, "%s[%d]" % (modname, starts), name, starts - 1
 
+
 def sources(f, modname):
     """Yields possibly multiple sources from a filepath
 
@@ -698,6 +758,7 @@
             for script, modname, t, line in embedded(f, modname, src):
                 yield script, modname.encode('utf8'), t, line
 
+
 def main(argv):
     if len(argv) < 2 or (argv[1] == '-' and len(argv) > 2):
         print('Usage: %s {-|file [file] [file] ...}')
@@ -719,15 +780,19 @@
         for src, modname, name, line in sources(source_path, localmodname):
             try:
                 used_imports[modname] = sorted(
-                    imported_modules(src, modname, name, localmods,
-                                     ignore_nested=True))
-                for error, lineno in verify_import_convention(modname, src,
-                                                              localmods):
+                    imported_modules(
+                        src, modname, name, localmods, ignore_nested=True
+                    )
+                )
+                for error, lineno in verify_import_convention(
+                    modname, src, localmods
+                ):
                     any_errors = True
                     print('%s:%d: %s' % (source_path, lineno + line, error))
             except SyntaxError as e:
-                print('%s:%d: SyntaxError: %s' %
-                      (source_path, e.lineno + line, e))
+                print(
+                    '%s:%d: SyntaxError: %s' % (source_path, e.lineno + line, e)
+                )
     cycles = find_cycles(used_imports)
     if cycles:
         firstmods = set()
@@ -743,5 +808,6 @@
         any_errors = True
     return any_errors != 0
 
+
 if __name__ == '__main__':
     sys.exit(int(main(sys.argv)))
--- a/contrib/install-windows-dependencies.ps1	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/install-windows-dependencies.ps1	Mon Oct 21 11:09:48 2019 -0400
@@ -37,22 +37,22 @@
 $PYTHON36_x64_URL = "https://www.python.org/ftp/python/3.6.8/python-3.6.8-amd64.exe"
 $PYTHON36_x64_SHA256 = "96088A58B7C43BC83B84E6B67F15E8706C614023DD64F9A5A14E81FF824ADADC"
 
-$PYTHON37_x86_URL = "https://www.python.org/ftp/python/3.7.2/python-3.7.2.exe"
-$PYTHON37_x86_SHA256 = "8BACE330FB409E428B04EEEE083DD9CA7F6C754366D07E23B3853891D8F8C3D0"
-$PYTHON37_x64_URL = "https://www.python.org/ftp/python/3.7.2/python-3.7.2-amd64.exe"
-$PYTHON37_x64_SHA256 = "0FE2A696F5A3E481FED795EF6896ED99157BCEF273EF3C4A96F2905CBDB3AA13"
+$PYTHON37_x86_URL = "https://www.python.org/ftp/python/3.7.4/python-3.7.4.exe"
+$PYTHON37_x86_SHA256 = "9a30ab5568ba37bfbcae5cdee19e9dc30765c42cf066f605221563ff8b20ee34"
+$PYTHON37_X64_URL = "https://www.python.org/ftp/python/3.7.4/python-3.7.4-amd64.exe"
+$PYTHON37_x64_SHA256 = "bab92f987320975c7826171a072bfd64f8f0941aaf2cdeba6924b7025c9968a3"
 
-$PYTHON38_x86_URL = "https://www.python.org/ftp/python/3.8.0/python-3.8.0b2.exe"
-$PYTHON38_x86_SHA256 = "efa37ff7a239332bd5cf8b6e6ff15e3f183da942fd8c8d3e4b6bd11fa5e07e23"
-$PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8.0/python-3.8.0b2-amd64.exe"
-$PYTHON38_x64_SHA256 = "4e151f7dfa3605e6f400a3b01acfc2517468d71afb1e20f9299149356b79d8e9"
+$PYTHON38_x86_URL = "https://www.python.org/ftp/python/3.8.0/python-3.8.0.exe"
+$PYTHON38_x86_SHA256 = "b471908de5e10d8fb5c3351a5affb1172da7790c533e0c9ffbaeec9c11611b15"
+$PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8.0/python-3.8.0-amd64.exe"
+$PYTHON38_x64_SHA256 = "a9bbc6088a3e4c7112826e21bfee6277f7b6d93259f7c57176139231bb7071e4"
 
-# PIP 19.0.3.
-$PIP_URL = "https://github.com/pypa/get-pip/raw/fee32c376da1ff6496a798986d7939cd51e1644f/get-pip.py"
-$PIP_SHA256 = "efe99298f3fbb1f56201ce6b81d2658067d2f7d7dfc2d412e0d3cacc9a397c61"
+# PIP 19.2.3.
+$PIP_URL = "https://github.com/pypa/get-pip/raw/309a56c5fd94bd1134053a541cb4657a4e47e09d/get-pip.py"
+$PIP_SHA256 = "57e3643ff19f018f8a00dfaa6b7e4620e3c1a7a2171fd218425366ec006b3bfe"
 
-$VIRTUALENV_URL = "https://files.pythonhosted.org/packages/37/db/89d6b043b22052109da35416abc3c397655e4bd3cff031446ba02b9654fa/virtualenv-16.4.3.tar.gz"
-$VIRTUALENV_SHA256 = "984d7e607b0a5d1329425dd8845bd971b957424b5ba664729fab51ab8c11bc39"
+$VIRTUALENV_URL = "https://files.pythonhosted.org/packages/66/f0/6867af06d2e2f511e4e1d7094ff663acdebc4f15d4a0cb0fed1007395124/virtualenv-16.7.5.tar.gz"
+$VIRTUALENV_SHA256 = "f78d81b62d3147396ac33fc9d77579ddc42cc2a98dd9ea38886f616b33bc7fb2"
 
 $INNO_SETUP_URL = "http://files.jrsoftware.org/is/5/innosetup-5.6.1-unicode.exe"
 $INNO_SETUP_SHA256 = "27D49E9BC769E9D1B214C153011978DB90DC01C2ACD1DDCD9ED7B3FE3B96B538"
@@ -60,9 +60,9 @@
 $MINGW_BIN_URL = "https://osdn.net/frs/redir.php?m=constant&f=mingw%2F68260%2Fmingw-get-0.6.3-mingw32-pre-20170905-1-bin.zip"
 $MINGW_BIN_SHA256 = "2AB8EFD7C7D1FC8EAF8B2FA4DA4EEF8F3E47768284C021599BC7435839A046DF"
 
-$MERCURIAL_WHEEL_FILENAME = "mercurial-4.9-cp27-cp27m-win_amd64.whl"
-$MERCURIAL_WHEEL_URL = "https://files.pythonhosted.org/packages/fe/e8/b872d53dfbbf986bdc46af0b30f580b227fb59bddd2587152a55e205b0cc/$MERCURIAL_WHEEL_FILENAME"
-$MERCURIAL_WHEEL_SHA256 = "218cc2e7c3f1d535007febbb03351663897edf27df0e57d6842e3b686492b429"
+$MERCURIAL_WHEEL_FILENAME = "mercurial-5.1.2-cp27-cp27m-win_amd64.whl"
+$MERCURIAL_WHEEL_URL = "https://files.pythonhosted.org/packages/6d/47/e031e47f7fe9b16e4e3383da47e2b0a7eae6e603996bc67a03ec4fa1b3f4/$MERCURIAL_WHEEL_FILENAME"
+$MERCURIAL_WHEEL_SHA256 = "1d18c7f6ca1456f0f62ee65c9a50c14cbba48ce6e924930cdb10537f5c9eaf5f"
 
 # Writing progress slows down downloads substantially. So disable it.
 $progressPreference = 'silentlyContinue'
--- a/contrib/memory.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/memory.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,6 +13,7 @@
 
 from __future__ import absolute_import
 
+
 def memusage(ui):
     """Report memory usage of the current process."""
     result = {'peak': 0, 'rss': 0}
@@ -24,8 +25,13 @@
             key = parts[0][2:-1].lower()
             if key in result:
                 result[key] = int(parts[1])
-    ui.write_err(", ".join(["%s: %.1f MiB" % (k, v / 1024.0)
-                            for k, v in result.iteritems()]) + "\n")
+    ui.write_err(
+        ", ".join(
+            ["%s: %.1f MiB" % (k, v / 1024.0) for k, v in result.iteritems()]
+        )
+        + "\n"
+    )
+
 
 def extsetup(ui):
     ui.atexit(memusage, ui)
--- a/contrib/packaging/hgpackaging/downloads.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/packaging/hgpackaging/downloads.py	Mon Oct 21 11:09:48 2019 -0400
@@ -98,7 +98,10 @@
     length = 0
 
     with urllib.request.urlopen(url) as fh:
-        if not url.endswith('.gz') and fh.info().get('Content-Encoding') == 'gzip':
+        if (
+            not url.endswith('.gz')
+            and fh.info().get('Content-Encoding') == 'gzip'
+        ):
             fh = gzip.GzipFile(fileobj=fh)
 
         while True:
@@ -114,12 +117,14 @@
     digest = h.hexdigest()
 
     if length != size:
-        raise IntegrityError('size mismatch on %s: wanted %d; got %d' % (
-            url, size, length))
+        raise IntegrityError(
+            'size mismatch on %s: wanted %d; got %d' % (url, size, length)
+        )
 
     if digest != sha256:
-        raise IntegrityError('sha256 mismatch on %s: wanted %s; got %s' % (
-            url, sha256, digest))
+        raise IntegrityError(
+            'sha256 mismatch on %s: wanted %s; got %s' % (url, sha256, digest)
+        )
 
 
 def download_to_path(url: str, path: pathlib.Path, size: int, sha256: str):
@@ -162,12 +167,14 @@
     print('successfully downloaded %s' % url)
 
 
-def download_entry(name: dict, dest_path: pathlib.Path, local_name=None) -> pathlib.Path:
+def download_entry(
+    name: dict, dest_path: pathlib.Path, local_name=None
+) -> pathlib.Path:
     entry = DOWNLOADS[name]
 
     url = entry['url']
 
-    local_name = local_name or url[url.rindex('/') + 1:]
+    local_name = local_name or url[url.rindex('/') + 1 :]
 
     local_path = dest_path / local_name
     download_to_path(url, local_path, entry['size'], entry['sha256'])
--- a/contrib/packaging/hgpackaging/inno.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/packaging/hgpackaging/inno.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,12 +12,8 @@
 import shutil
 import subprocess
 
-from .py2exe import (
-    build_py2exe,
-)
-from .util import (
-    find_vc_runtime_files,
-)
+from .py2exe import build_py2exe
+from .util import find_vc_runtime_files
 
 
 EXTRA_PACKAGES = {
@@ -28,9 +24,13 @@
 }
 
 
-def build(source_dir: pathlib.Path, build_dir: pathlib.Path,
-          python_exe: pathlib.Path, iscc_exe: pathlib.Path,
-          version=None):
+def build(
+    source_dir: pathlib.Path,
+    build_dir: pathlib.Path,
+    python_exe: pathlib.Path,
+    iscc_exe: pathlib.Path,
+    version=None,
+):
     """Build the Inno installer.
 
     Build files will be placed in ``build_dir``.
@@ -44,11 +44,18 @@
 
     vc_x64 = r'\x64' in os.environ.get('LIB', '')
 
-    requirements_txt = (source_dir / 'contrib' / 'packaging' /
-                        'inno' / 'requirements.txt')
+    requirements_txt = (
+        source_dir / 'contrib' / 'packaging' / 'inno' / 'requirements.txt'
+    )
 
-    build_py2exe(source_dir, build_dir, python_exe, 'inno',
-                 requirements_txt, extra_packages=EXTRA_PACKAGES)
+    build_py2exe(
+        source_dir,
+        build_dir,
+        python_exe,
+        'inno',
+        requirements_txt,
+        extra_packages=EXTRA_PACKAGES,
+    )
 
     # hg.exe depends on VC9 runtime DLLs. Copy those into place.
     for f in find_vc_runtime_files(vc_x64):
--- a/contrib/packaging/hgpackaging/py2exe.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/packaging/hgpackaging/py2exe.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,9 +11,7 @@
 import pathlib
 import subprocess
 
-from .downloads import (
-    download_entry,
-)
+from .downloads import download_entry
 from .util import (
     extract_tar_to_directory,
     extract_zip_to_directory,
@@ -21,12 +19,17 @@
 )
 
 
-def build_py2exe(source_dir: pathlib.Path, build_dir: pathlib.Path,
-                 python_exe: pathlib.Path, build_name: str,
-                 venv_requirements_txt: pathlib.Path,
-                 extra_packages=None, extra_excludes=None,
-                 extra_dll_excludes=None,
-                 extra_packages_script=None):
+def build_py2exe(
+    source_dir: pathlib.Path,
+    build_dir: pathlib.Path,
+    python_exe: pathlib.Path,
+    build_name: str,
+    venv_requirements_txt: pathlib.Path,
+    extra_packages=None,
+    extra_excludes=None,
+    extra_dll_excludes=None,
+    extra_packages_script=None,
+):
     """Build Mercurial with py2exe.
 
     Build files will be placed in ``build_dir``.
@@ -36,9 +39,11 @@
     to already be configured with an active toolchain.
     """
     if 'VCINSTALLDIR' not in os.environ:
-        raise Exception('not running from a Visual C++ build environment; '
-                        'execute the "Visual C++ <version> Command Prompt" '
-                        'application shortcut or a vcsvarsall.bat file')
+        raise Exception(
+            'not running from a Visual C++ build environment; '
+            'execute the "Visual C++ <version> Command Prompt" '
+            'application shortcut or a vcsvarsall.bat file'
+        )
 
     # Identity x86/x64 and validate the environment matches the Python
     # architecture.
@@ -48,12 +53,16 @@
 
     if vc_x64:
         if py_info['arch'] != '64bit':
-            raise Exception('architecture mismatch: Visual C++ environment '
-                            'is configured for 64-bit but Python is 32-bit')
+            raise Exception(
+                'architecture mismatch: Visual C++ environment '
+                'is configured for 64-bit but Python is 32-bit'
+            )
     else:
         if py_info['arch'] != '32bit':
-            raise Exception('architecture mismatch: Visual C++ environment '
-                            'is configured for 32-bit but Python is 64-bit')
+            raise Exception(
+                'architecture mismatch: Visual C++ environment '
+                'is configured for 32-bit but Python is 64-bit'
+            )
 
     if py_info['py3']:
         raise Exception('Only Python 2 is currently supported')
@@ -65,11 +74,11 @@
     virtualenv_pkg, virtualenv_entry = download_entry('virtualenv', build_dir)
     py2exe_pkg, py2exe_entry = download_entry('py2exe', build_dir)
 
-    venv_path = build_dir / ('venv-%s-%s' % (build_name,
-                                             'x64' if vc_x64 else 'x86'))
+    venv_path = build_dir / (
+        'venv-%s-%s' % (build_name, 'x64' if vc_x64 else 'x86')
+    )
 
-    gettext_root = build_dir / (
-        'gettext-win-%s' % gettext_entry['version'])
+    gettext_root = build_dir / ('gettext-win-%s' % gettext_entry['version'])
 
     if not gettext_root.exists():
         extract_zip_to_directory(gettext_pkg, gettext_root)
@@ -77,7 +86,8 @@
 
     # This assumes Python 2. We don't need virtualenv on Python 3.
     virtualenv_src_path = build_dir / (
-        'virtualenv-%s' % virtualenv_entry['version'])
+        'virtualenv-%s' % virtualenv_entry['version']
+    )
     virtualenv_py = virtualenv_src_path / 'virtualenv.py'
 
     if not virtualenv_src_path.exists():
@@ -91,14 +101,15 @@
     if not venv_path.exists():
         print('creating virtualenv with dependencies')
         subprocess.run(
-            [str(python_exe), str(virtualenv_py), str(venv_path)],
-            check=True)
+            [str(python_exe), str(virtualenv_py), str(venv_path)], check=True
+        )
 
     venv_python = venv_path / 'Scripts' / 'python.exe'
     venv_pip = venv_path / 'Scripts' / 'pip.exe'
 
-    subprocess.run([str(venv_pip), 'install', '-r', str(venv_requirements_txt)],
-                   check=True)
+    subprocess.run(
+        [str(venv_pip), 'install', '-r', str(venv_requirements_txt)], check=True
+    )
 
     # Force distutils to use VC++ settings from environment, which was
     # validated above.
@@ -107,9 +118,13 @@
     env['MSSdk'] = '1'
 
     if extra_packages_script:
-        more_packages = set(subprocess.check_output(
-            extra_packages_script,
-            cwd=build_dir).split(b'\0')[-1].strip().decode('utf-8').splitlines())
+        more_packages = set(
+            subprocess.check_output(extra_packages_script, cwd=build_dir)
+            .split(b'\0')[-1]
+            .strip()
+            .decode('utf-8')
+            .splitlines()
+        )
         if more_packages:
             if not extra_packages:
                 extra_packages = more_packages
@@ -119,32 +134,38 @@
     if extra_packages:
         env['HG_PY2EXE_EXTRA_PACKAGES'] = ' '.join(sorted(extra_packages))
         hgext3rd_extras = sorted(
-            e for e in extra_packages if e.startswith('hgext3rd.'))
+            e for e in extra_packages if e.startswith('hgext3rd.')
+        )
         if hgext3rd_extras:
             env['HG_PY2EXE_EXTRA_INSTALL_PACKAGES'] = ' '.join(hgext3rd_extras)
     if extra_excludes:
         env['HG_PY2EXE_EXTRA_EXCLUDES'] = ' '.join(sorted(extra_excludes))
     if extra_dll_excludes:
         env['HG_PY2EXE_EXTRA_DLL_EXCLUDES'] = ' '.join(
-            sorted(extra_dll_excludes))
+            sorted(extra_dll_excludes)
+        )
 
     py2exe_py_path = venv_path / 'Lib' / 'site-packages' / 'py2exe'
     if not py2exe_py_path.exists():
         print('building py2exe')
-        subprocess.run([str(venv_python), 'setup.py', 'install'],
-                       cwd=py2exe_source_path,
-                       env=env,
-                       check=True)
+        subprocess.run(
+            [str(venv_python), 'setup.py', 'install'],
+            cwd=py2exe_source_path,
+            env=env,
+            check=True,
+        )
 
     # Register location of msgfmt and other binaries.
     env['PATH'] = '%s%s%s' % (
-        env['PATH'], os.pathsep, str(gettext_root / 'bin'))
+        env['PATH'],
+        os.pathsep,
+        str(gettext_root / 'bin'),
+    )
 
     print('building Mercurial')
     subprocess.run(
-        [str(venv_python), 'setup.py',
-         'py2exe',
-         'build_doc', '--html'],
+        [str(venv_python), 'setup.py', 'py2exe', 'build_doc', '--html'],
         cwd=str(source_dir),
         env=env,
-        check=True)
+        check=True,
+    )
--- a/contrib/packaging/hgpackaging/util.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/packaging/hgpackaging/util.py	Mon Oct 21 11:09:48 2019 -0400
@@ -32,8 +32,11 @@
 
     prefix = 'amd64' if x64 else 'x86'
 
-    candidates = sorted(p for p in os.listdir(winsxs)
-                  if p.lower().startswith('%s_microsoft.vc90.crt_' % prefix))
+    candidates = sorted(
+        p
+        for p in os.listdir(winsxs)
+        if p.lower().startswith('%s_microsoft.vc90.crt_' % prefix)
+    )
 
     for p in candidates:
         print('found candidate VC runtime: %s' % p)
@@ -72,7 +75,7 @@
         'version': version,
         'bin_root': bin_version,
         'bin_x86': bin_version / 'x86',
-        'bin_x64': bin_version / 'x64'
+        'bin_x64': bin_version / 'x64',
     }
 
 
@@ -89,9 +92,14 @@
     raise Exception('could not find signtool.exe in Windows 10 SDK')
 
 
-def sign_with_signtool(file_path, description, subject_name=None,
-                       cert_path=None, cert_password=None,
-                       timestamp_url=None):
+def sign_with_signtool(
+    file_path,
+    description,
+    subject_name=None,
+    cert_path=None,
+    cert_password=None,
+    timestamp_url=None,
+):
     """Digitally sign a file with signtool.exe.
 
     ``file_path`` is file to sign.
@@ -114,10 +122,13 @@
         cert_password = getpass.getpass('password for %s: ' % cert_path)
 
     args = [
-        str(find_signtool()), 'sign',
+        str(find_signtool()),
+        'sign',
         '/v',
-        '/fd', 'sha256',
-        '/d', description,
+        '/fd',
+        'sha256',
+        '/d',
+        description,
     ]
 
     if cert_path:
--- a/contrib/packaging/hgpackaging/wix.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/packaging/hgpackaging/wix.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,12 +15,8 @@
 import typing
 import xml.dom.minidom
 
-from .downloads import (
-    download_entry,
-)
-from .py2exe import (
-    build_py2exe,
-)
+from .downloads import download_entry
+from .py2exe import build_py2exe
 from .util import (
     extract_zip_to_directory,
     sign_with_signtool,
@@ -84,17 +80,29 @@
 
 def ensure_vc90_merge_modules(build_dir):
     x86 = (
-        download_entry('vc9-crt-x86-msm', build_dir,
-                       local_name='microsoft.vcxx.crt.x86_msm.msm')[0],
-        download_entry('vc9-crt-x86-msm-policy', build_dir,
-                       local_name='policy.x.xx.microsoft.vcxx.crt.x86_msm.msm')[0]
+        download_entry(
+            'vc9-crt-x86-msm',
+            build_dir,
+            local_name='microsoft.vcxx.crt.x86_msm.msm',
+        )[0],
+        download_entry(
+            'vc9-crt-x86-msm-policy',
+            build_dir,
+            local_name='policy.x.xx.microsoft.vcxx.crt.x86_msm.msm',
+        )[0],
     )
 
     x64 = (
-        download_entry('vc9-crt-x64-msm', build_dir,
-                       local_name='microsoft.vcxx.crt.x64_msm.msm')[0],
-        download_entry('vc9-crt-x64-msm-policy', build_dir,
-                       local_name='policy.x.xx.microsoft.vcxx.crt.x64_msm.msm')[0]
+        download_entry(
+            'vc9-crt-x64-msm',
+            build_dir,
+            local_name='microsoft.vcxx.crt.x64_msm.msm',
+        )[0],
+        download_entry(
+            'vc9-crt-x64-msm-policy',
+            build_dir,
+            local_name='policy.x.xx.microsoft.vcxx.crt.x64_msm.msm',
+        )[0],
     )
     return {
         'x86': x86,
@@ -116,17 +124,26 @@
     subprocess.run(args, cwd=str(cwd), check=True)
 
 
-def make_post_build_signing_fn(name, subject_name=None, cert_path=None,
-                               cert_password=None, timestamp_url=None):
+def make_post_build_signing_fn(
+    name,
+    subject_name=None,
+    cert_path=None,
+    cert_password=None,
+    timestamp_url=None,
+):
     """Create a callable that will use signtool to sign hg.exe."""
 
     def post_build_sign(source_dir, build_dir, dist_dir, version):
         description = '%s %s' % (name, version)
 
-        sign_with_signtool(dist_dir / 'hg.exe', description,
-                           subject_name=subject_name, cert_path=cert_path,
-                           cert_password=cert_password,
-                           timestamp_url=timestamp_url)
+        sign_with_signtool(
+            dist_dir / 'hg.exe',
+            description,
+            subject_name=subject_name,
+            cert_path=cert_path,
+            cert_password=cert_password,
+            timestamp_url=timestamp_url,
+        )
 
     return post_build_sign
 
@@ -155,7 +172,8 @@
     # We can't use ElementTree because it doesn't handle the
     # <?include ?> directives.
     doc = xml.dom.minidom.parseString(
-        LIBRARIES_XML.format(wix_dir=str(wix_dir)))
+        LIBRARIES_XML.format(wix_dir=str(wix_dir))
+    )
 
     component = doc.getElementsByTagName('Component')[0]
 
@@ -177,11 +195,16 @@
     return doc.toprettyxml()
 
 
-def build_installer(source_dir: pathlib.Path, python_exe: pathlib.Path,
-                    msi_name='mercurial', version=None, post_build_fn=None,
-                    extra_packages_script=None,
-                    extra_wxs:typing.Optional[typing.Dict[str,str]]=None,
-                    extra_features:typing.Optional[typing.List[str]]=None):
+def build_installer(
+    source_dir: pathlib.Path,
+    python_exe: pathlib.Path,
+    msi_name='mercurial',
+    version=None,
+    post_build_fn=None,
+    extra_packages_script=None,
+    extra_wxs: typing.Optional[typing.Dict[str, str]] = None,
+    extra_features: typing.Optional[typing.List[str]] = None,
+):
     """Build a WiX MSI installer.
 
     ``source_dir`` is the path to the Mercurial source tree to use.
@@ -209,10 +232,15 @@
 
     requirements_txt = wix_dir / 'requirements.txt'
 
-    build_py2exe(source_dir, hg_build_dir,
-                 python_exe, 'wix', requirements_txt,
-                 extra_packages=EXTRA_PACKAGES,
-                 extra_packages_script=extra_packages_script)
+    build_py2exe(
+        source_dir,
+        hg_build_dir,
+        python_exe,
+        'wix',
+        requirements_txt,
+        extra_packages=EXTRA_PACKAGES,
+        extra_packages_script=extra_packages_script,
+    )
 
     version = version or normalize_version(find_version(source_dir))
     print('using version string: %s' % version)
@@ -265,16 +293,19 @@
 
     run_candle(wix_path, build_dir, source, source_build_rel, defines=defines)
 
-    msi_path = source_dir / 'dist' / (
-        '%s-%s-%s.msi' % (msi_name, version, arch))
+    msi_path = (
+        source_dir / 'dist' / ('%s-%s-%s.msi' % (msi_name, version, arch))
+    )
 
     args = [
         str(wix_path / 'light.exe'),
         '-nologo',
-        '-ext', 'WixUIExtension',
+        '-ext',
+        'WixUIExtension',
         '-sw1076',
         '-spdb',
-        '-o', str(msi_path),
+        '-o',
+        str(msi_path),
     ]
 
     for source, rel_path in SUPPORT_WXS:
@@ -286,10 +317,12 @@
         source = os.path.basename(source)
         args.append(str(build_dir / ('%s.wixobj' % source[:-4])))
 
-    args.extend([
-        str(build_dir / 'library.wixobj'),
-        str(build_dir / 'mercurial.wixobj'),
-    ])
+    args.extend(
+        [
+            str(build_dir / 'library.wixobj'),
+            str(build_dir / 'mercurial.wixobj'),
+        ]
+    )
 
     subprocess.run(args, cwd=str(source_dir), check=True)
 
@@ -300,11 +333,19 @@
     }
 
 
-def build_signed_installer(source_dir: pathlib.Path, python_exe: pathlib.Path,
-                           name: str, version=None, subject_name=None,
-                           cert_path=None, cert_password=None,
-                           timestamp_url=None, extra_packages_script=None,
-                           extra_wxs=None, extra_features=None):
+def build_signed_installer(
+    source_dir: pathlib.Path,
+    python_exe: pathlib.Path,
+    name: str,
+    version=None,
+    subject_name=None,
+    cert_path=None,
+    cert_password=None,
+    timestamp_url=None,
+    extra_packages_script=None,
+    extra_wxs=None,
+    extra_features=None,
+):
     """Build an installer with signed executables."""
 
     post_build_fn = make_post_build_signing_fn(
@@ -312,16 +353,27 @@
         subject_name=subject_name,
         cert_path=cert_path,
         cert_password=cert_password,
-        timestamp_url=timestamp_url)
+        timestamp_url=timestamp_url,
+    )
 
-    info = build_installer(source_dir, python_exe=python_exe,
-                           msi_name=name.lower(), version=version,
-                           post_build_fn=post_build_fn,
-                           extra_packages_script=extra_packages_script,
-                           extra_wxs=extra_wxs, extra_features=extra_features)
+    info = build_installer(
+        source_dir,
+        python_exe=python_exe,
+        msi_name=name.lower(),
+        version=version,
+        post_build_fn=post_build_fn,
+        extra_packages_script=extra_packages_script,
+        extra_wxs=extra_wxs,
+        extra_features=extra_features,
+    )
 
     description = '%s %s' % (name, version)
 
-    sign_with_signtool(info['msi_path'], description,
-                       subject_name=subject_name, cert_path=cert_path,
-                       cert_password=cert_password, timestamp_url=timestamp_url)
+    sign_with_signtool(
+        info['msi_path'],
+        description,
+        subject_name=subject_name,
+        cert_path=cert_path,
+        cert_password=cert_password,
+        timestamp_url=timestamp_url,
+    )
--- a/contrib/packaging/inno/build.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/packaging/inno/build.py	Mon Oct 21 11:09:48 2019 -0400
@@ -19,14 +19,15 @@
 if __name__ == '__main__':
     parser = argparse.ArgumentParser()
 
-    parser.add_argument('--python',
-                        required=True,
-                        help='path to python.exe to use')
-    parser.add_argument('--iscc',
-                        help='path to iscc.exe to use')
-    parser.add_argument('--version',
-                        help='Mercurial version string to use '
-                             '(detected from __version__.py if not defined')
+    parser.add_argument(
+        '--python', required=True, help='path to python.exe to use'
+    )
+    parser.add_argument('--iscc', help='path to iscc.exe to use')
+    parser.add_argument(
+        '--version',
+        help='Mercurial version string to use '
+        '(detected from __version__.py if not defined',
+    )
 
     args = parser.parse_args()
 
@@ -36,8 +37,11 @@
     if args.iscc:
         iscc = pathlib.Path(args.iscc)
     else:
-        iscc = (pathlib.Path(os.environ['ProgramFiles(x86)']) / 'Inno Setup 5' /
-            'ISCC.exe')
+        iscc = (
+            pathlib.Path(os.environ['ProgramFiles(x86)'])
+            / 'Inno Setup 5'
+            / 'ISCC.exe'
+        )
 
     here = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
     source_dir = here.parent.parent.parent
@@ -47,5 +51,10 @@
 
     from hgpackaging.inno import build
 
-    build(source_dir, build_dir, pathlib.Path(args.python), iscc,
-          version=args.version)
+    build(
+        source_dir,
+        build_dir,
+        pathlib.Path(args.python),
+        iscc,
+        version=args.version,
+    )
--- a/contrib/packaging/wix/build.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/packaging/wix/build.py	Mon Oct 21 11:09:48 2019 -0400
@@ -17,31 +17,42 @@
 if __name__ == '__main__':
     parser = argparse.ArgumentParser()
 
-    parser.add_argument('--name',
-                        help='Application name',
-                        default='Mercurial')
-    parser.add_argument('--python',
-                        help='Path to Python executable to use',
-                        required=True)
-    parser.add_argument('--sign-sn',
-                        help='Subject name (or fragment thereof) of certificate '
-                             'to use for signing')
-    parser.add_argument('--sign-cert',
-                        help='Path to certificate to use for signing')
-    parser.add_argument('--sign-password',
-                        help='Password for signing certificate')
-    parser.add_argument('--sign-timestamp-url',
-                        help='URL of timestamp server to use for signing')
-    parser.add_argument('--version',
-                        help='Version string to use')
-    parser.add_argument('--extra-packages-script',
-                        help=('Script to execute to include extra packages in '
-                              'py2exe binary.'))
-    parser.add_argument('--extra-wxs',
-                        help='CSV of path_to_wxs_file=working_dir_for_wxs_file')
-    parser.add_argument('--extra-features',
-                        help=('CSV of extra feature names to include '
-                              'in the installer from the extra wxs files'))
+    parser.add_argument('--name', help='Application name', default='Mercurial')
+    parser.add_argument(
+        '--python', help='Path to Python executable to use', required=True
+    )
+    parser.add_argument(
+        '--sign-sn',
+        help='Subject name (or fragment thereof) of certificate '
+        'to use for signing',
+    )
+    parser.add_argument(
+        '--sign-cert', help='Path to certificate to use for signing'
+    )
+    parser.add_argument(
+        '--sign-password', help='Password for signing certificate'
+    )
+    parser.add_argument(
+        '--sign-timestamp-url',
+        help='URL of timestamp server to use for signing',
+    )
+    parser.add_argument('--version', help='Version string to use')
+    parser.add_argument(
+        '--extra-packages-script',
+        help=(
+            'Script to execute to include extra packages in ' 'py2exe binary.'
+        ),
+    )
+    parser.add_argument(
+        '--extra-wxs', help='CSV of path_to_wxs_file=working_dir_for_wxs_file'
+    )
+    parser.add_argument(
+        '--extra-features',
+        help=(
+            'CSV of extra feature names to include '
+            'in the installer from the extra wxs files'
+        ),
+    )
 
     args = parser.parse_args()
 
@@ -69,7 +80,8 @@
         kwargs['extra_packages_script'] = args.extra_packages_script
     if args.extra_wxs:
         kwargs['extra_wxs'] = dict(
-            thing.split("=") for thing in args.extra_wxs.split(','))
+            thing.split("=") for thing in args.extra_wxs.split(',')
+        )
     if args.extra_features:
         kwargs['extra_features'] = args.extra_features.split(',')
 
--- a/contrib/perf-utils/perf-revlog-write-plot.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/perf-utils/perf-revlog-write-plot.py	Mon Oct 21 11:09:48 2019 -0400
@@ -44,18 +44,12 @@
     comb_plt = fig.add_subplot(211)
     other_plt = fig.add_subplot(212)
 
-    comb_plt.plot(ary[0],
-                  np.cumsum(ary[1]),
-                  color='red',
-                  linewidth=1,
-                  label='comb')
+    comb_plt.plot(
+        ary[0], np.cumsum(ary[1]), color='red', linewidth=1, label='comb'
+    )
 
     plots = []
-    p = other_plt.plot(ary[0],
-                       ary[1],
-                       color='red',
-                       linewidth=1,
-                       label='wall')
+    p = other_plt.plot(ary[0], ary[1], color='red', linewidth=1, label='wall')
     plots.append(p)
 
     colors = {
@@ -64,20 +58,24 @@
         1000: ('purple', 'xkcd:dark pink'),
     }
     for n, color in colors.items():
-        avg_n = np.convolve(ary[1], np.full(n, 1. / n), 'valid')
-        p = other_plt.plot(ary[0][n - 1:],
-                           avg_n,
-                           color=color[0],
-                           linewidth=1,
-                           label='avg time last %d' % n)
+        avg_n = np.convolve(ary[1], np.full(n, 1.0 / n), 'valid')
+        p = other_plt.plot(
+            ary[0][n - 1 :],
+            avg_n,
+            color=color[0],
+            linewidth=1,
+            label='avg time last %d' % n,
+        )
         plots.append(p)
 
         med_n = scipy.signal.medfilt(ary[1], n + 1)
-        p = other_plt.plot(ary[0],
-                           med_n,
-                           color=color[1],
-                           linewidth=1,
-                           label='median time last %d' % n)
+        p = other_plt.plot(
+            ary[0],
+            med_n,
+            color=color[1],
+            linewidth=1,
+            label='median time last %d' % n,
+        )
         plots.append(p)
 
     formatter = mticker.ScalarFormatter()
@@ -108,6 +106,7 @@
         else:
             legline.set_alpha(0.2)
         fig.canvas.draw()
+
     if title is not None:
         fig.canvas.set_window_title(title)
     fig.canvas.mpl_connect('pick_event', onpick)
--- a/contrib/perf.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/perf.py	Mon Oct 21 11:09:48 2019 -0400
@@ -84,32 +84,33 @@
 # try to import modules separately (in dict order), and ignore
 # failure, because these aren't available with early Mercurial
 try:
-    from mercurial import branchmap # since 2.5 (or bcee63733aad)
+    from mercurial import branchmap  # since 2.5 (or bcee63733aad)
 except ImportError:
     pass
 try:
-    from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
+    from mercurial import obsolete  # since 2.3 (or ad0d6c2b3279)
 except ImportError:
     pass
 try:
-    from mercurial import registrar # since 3.7 (or 37d50250b696)
-    dir(registrar) # forcibly load it
+    from mercurial import registrar  # since 3.7 (or 37d50250b696)
+
+    dir(registrar)  # forcibly load it
 except ImportError:
     registrar = None
 try:
-    from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
+    from mercurial import repoview  # since 2.5 (or 3a6ddacb7198)
 except ImportError:
     pass
 try:
-    from mercurial.utils import repoviewutil # since 5.0
+    from mercurial.utils import repoviewutil  # since 5.0
 except ImportError:
     repoviewutil = None
 try:
-    from mercurial import scmutil # since 1.9 (or 8b252e826c68)
+    from mercurial import scmutil  # since 1.9 (or 8b252e826c68)
 except ImportError:
     pass
 try:
-    from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
+    from mercurial import setdiscovery  # since 1.9 (or cb98fed52495)
 except ImportError:
     pass
 
@@ -118,41 +119,48 @@
 except ImportError:
     profiling = None
 
+
 def identity(a):
     return a
 
+
 try:
     from mercurial import pycompat
+
     getargspec = pycompat.getargspec  # added to module after 4.5
     _byteskwargs = pycompat.byteskwargs  # since 4.1 (or fbc3f73dc802)
-    _sysstr = pycompat.sysstr         # since 4.0 (or 2219f4f82ede)
-    _xrange = pycompat.xrange         # since 4.8 (or 7eba8f83129b)
-    fsencode = pycompat.fsencode      # since 3.9 (or f4a5e0e86a7e)
+    _sysstr = pycompat.sysstr  # since 4.0 (or 2219f4f82ede)
+    _bytestr = pycompat.bytestr  # since 4.2 (or b70407bd84d5)
+    _xrange = pycompat.xrange  # since 4.8 (or 7eba8f83129b)
+    fsencode = pycompat.fsencode  # since 3.9 (or f4a5e0e86a7e)
     if pycompat.ispy3:
         _maxint = sys.maxsize  # per py3 docs for replacing maxint
     else:
         _maxint = sys.maxint
-except (ImportError, AttributeError):
+except (NameError, ImportError, AttributeError):
     import inspect
+
     getargspec = inspect.getargspec
     _byteskwargs = identity
-    fsencode = identity               # no py3 support
-    _maxint = sys.maxint              # no py3 support
-    _sysstr = lambda x: x             # no py3 support
+    _bytestr = str
+    fsencode = identity  # no py3 support
+    _maxint = sys.maxint  # no py3 support
+    _sysstr = lambda x: x  # no py3 support
     _xrange = xrange
 
 try:
     # 4.7+
     queue = pycompat.queue.Queue
-except (AttributeError, ImportError):
+except (NameError, AttributeError, ImportError):
     # <4.7.
     try:
         queue = pycompat.queue
-    except (AttributeError, ImportError):
-        queue = util.queue
+    except (NameError, AttributeError, ImportError):
+        import Queue as queue
 
 try:
     from mercurial import logcmdutil
+
     makelogtemplater = logcmdutil.maketemplater
 except (AttributeError, ImportError):
     try:
@@ -164,8 +172,12 @@
 # define util.safehasattr forcibly, because util.safehasattr has been
 # available since 1.9.3 (or 94b200a11cf7)
 _undefined = object()
+
+
 def safehasattr(thing, attr):
     return getattr(thing, _sysstr(attr), _undefined) is not _undefined
+
+
 setattr(util, 'safehasattr', safehasattr)
 
 # for "historical portability":
@@ -183,20 +195,28 @@
 # available, because commands.formatteropts has been available since
 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
 # available since 2.2 (or ae5f92e154d3)
-formatteropts = getattr(cmdutil, "formatteropts",
-                        getattr(commands, "formatteropts", []))
+formatteropts = getattr(
+    cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
+)
 
 # for "historical portability":
 # use locally defined option list, if debugrevlogopts isn't available,
 # because commands.debugrevlogopts has been available since 3.7 (or
 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
 # since 1.9 (or a79fea6b3e77).
-revlogopts = getattr(cmdutil, "debugrevlogopts",
-                     getattr(commands, "debugrevlogopts", [
-        (b'c', b'changelog', False, (b'open changelog')),
-        (b'm', b'manifest', False, (b'open manifest')),
-        (b'', b'dir', False, (b'open directory manifest')),
-        ]))
+revlogopts = getattr(
+    cmdutil,
+    "debugrevlogopts",
+    getattr(
+        commands,
+        "debugrevlogopts",
+        [
+            (b'c', b'changelog', False, b'open changelog'),
+            (b'm', b'manifest', False, b'open manifest'),
+            (b'', b'dir', False, b'open directory manifest'),
+        ],
+    ),
+)
 
 cmdtable = {}
 
@@ -206,6 +226,7 @@
 def parsealiases(cmd):
     return cmd.split(b"|")
 
+
 if safehasattr(registrar, 'command'):
     command = registrar.command(cmdtable)
 elif safehasattr(cmdutil, 'command'):
@@ -215,10 +236,13 @@
         # wrap original cmdutil.command, because "norepo" option has
         # been available since 3.1 (or 75a96326cecb)
         _command = command
+
         def command(name, options=(), synopsis=None, norepo=False):
             if norepo:
                 commands.norepo += b' %s' % b' '.join(parsealiases(name))
             return _command(name, list(options), synopsis)
+
+
 else:
     # for "historical portability":
     # define "@command" annotation locally, because cmdutil.command
@@ -232,51 +256,103 @@
             if norepo:
                 commands.norepo += b' %s' % b' '.join(parsealiases(name))
             return func
+
         return decorator
 
+
 try:
     import mercurial.registrar
     import mercurial.configitems
+
     configtable = {}
     configitem = mercurial.registrar.configitem(configtable)
-    configitem(b'perf', b'presleep',
+    configitem(
+        b'perf',
+        b'presleep',
         default=mercurial.configitems.dynamicdefault,
+        experimental=True,
+    )
+    configitem(
+        b'perf',
+        b'stub',
+        default=mercurial.configitems.dynamicdefault,
+        experimental=True,
     )
-    configitem(b'perf', b'stub',
+    configitem(
+        b'perf',
+        b'parentscount',
         default=mercurial.configitems.dynamicdefault,
+        experimental=True,
     )
-    configitem(b'perf', b'parentscount',
+    configitem(
+        b'perf',
+        b'all-timing',
+        default=mercurial.configitems.dynamicdefault,
+        experimental=True,
+    )
+    configitem(
+        b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
+    )
+    configitem(
+        b'perf',
+        b'profile-benchmark',
         default=mercurial.configitems.dynamicdefault,
     )
-    configitem(b'perf', b'all-timing',
-        default=mercurial.configitems.dynamicdefault,
-    )
-    configitem(b'perf', b'pre-run',
+    configitem(
+        b'perf',
+        b'run-limits',
         default=mercurial.configitems.dynamicdefault,
-    )
-    configitem(b'perf', b'profile-benchmark',
-        default=mercurial.configitems.dynamicdefault,
-    )
-    configitem(b'perf', b'run-limits',
-        default=mercurial.configitems.dynamicdefault,
+        experimental=True,
     )
 except (ImportError, AttributeError):
     pass
+except TypeError:
+    # compatibility fix for a11fd395e83f
+    # hg version: 5.2
+    configitem(
+        b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
+    )
+    configitem(
+        b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
+    )
+    configitem(
+        b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
+    )
+    configitem(
+        b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
+    )
+    configitem(
+        b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
+    )
+    configitem(
+        b'perf',
+        b'profile-benchmark',
+        default=mercurial.configitems.dynamicdefault,
+    )
+    configitem(
+        b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
+    )
+
 
 def getlen(ui):
     if ui.configbool(b"perf", b"stub", False):
         return lambda x: 1
     return len
 
+
 class noop(object):
     """dummy context manager"""
+
     def __enter__(self):
         pass
+
     def __exit__(self, *args):
         pass
 
+
 NOOPCTX = noop()
 
+
 def gettimer(ui, opts=None):
     """return a timer function and formatter: (timer, formatter)
 
@@ -307,31 +383,42 @@
         # define formatter locally, because ui.formatter has been
         # available since 2.2 (or ae5f92e154d3)
         from mercurial import node
+
         class defaultformatter(object):
             """Minimized composition of baseformatter and plainformatter
             """
+
             def __init__(self, ui, topic, opts):
                 self._ui = ui
                 if ui.debugflag:
                     self.hexfunc = node.hex
                 else:
                     self.hexfunc = node.short
+
             def __nonzero__(self):
                 return False
+
             __bool__ = __nonzero__
+
             def startitem(self):
                 pass
+
             def data(self, **data):
                 pass
+
             def write(self, fields, deftext, *fielddata, **opts):
                 self._ui.write(deftext % fielddata, **opts)
+
             def condwrite(self, cond, fields, deftext, *fielddata, **opts):
                 if cond:
                     self._ui.write(deftext % fielddata, **opts)
+
             def plain(self, text, **opts):
                 self._ui.write(text, **opts)
+
             def end(self):
                 pass
+
         fm = defaultformatter(ui, b'perf', opts)
 
     # stub function, runs code only once instead of in a loop
@@ -348,20 +435,27 @@
     for item in limitspec:
         parts = item.split(b'-', 1)
         if len(parts) < 2:
-            ui.warn((b'malformatted run limit entry, missing "-": %s\n'
-                     % item))
+            ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
             continue
         try:
-            time_limit = float(pycompat.sysstr(parts[0]))
+            time_limit = float(_sysstr(parts[0]))
         except ValueError as e:
-            ui.warn((b'malformatted run limit entry, %s: %s\n'
-                     % (pycompat.bytestr(e), item)))
+            ui.warn(
+                (
+                    b'malformatted run limit entry, %s: %s\n'
+                    % (_bytestr(e), item)
+                )
+            )
             continue
         try:
-            run_limit = int(pycompat.sysstr(parts[1]))
+            run_limit = int(_sysstr(parts[1]))
         except ValueError as e:
-            ui.warn((b'malformatted run limit entry, %s: %s\n'
-                     % (pycompat.bytestr(e), item)))
+            ui.warn(
+                (
+                    b'malformatted run limit entry, %s: %s\n'
+                    % (_bytestr(e), item)
+                )
+            )
             continue
         limits.append((time_limit, run_limit))
     if not limits:
@@ -373,15 +467,23 @@
             profiler = profiling.profile(ui)
 
     prerun = getint(ui, b"perf", b"pre-run", 0)
-    t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
-                          prerun=prerun, profiler=profiler)
+    t = functools.partial(
+        _timer,
+        fm,
+        displayall=displayall,
+        limits=limits,
+        prerun=prerun,
+        profiler=profiler,
+    )
     return t, fm
 
+
 def stub_timer(fm, func, setup=None, title=None):
     if setup is not None:
         setup()
     func()
 
+
 @contextlib.contextmanager
 def timeone():
     r = []
@@ -391,7 +493,7 @@
     cstop = util.timer()
     ostop = os.times()
     a, b = ostart, ostop
-    r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
+    r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
 
 
 # list of stop condition (elapsed time, minimal run count)
@@ -400,8 +502,17 @@
     (10.0, 3),
 )
 
-def _timer(fm, func, setup=None, title=None, displayall=False,
-           limits=DEFAULTLIMITS, prerun=0, profiler=None):
+
+def _timer(
+    fm,
+    func,
+    setup=None,
+    title=None,
+    displayall=False,
+    limits=DEFAULTLIMITS,
+    prerun=0,
+    profiler=None,
+):
     gc.collect()
     results = []
     begin = util.timer()
@@ -430,8 +541,8 @@
                 keepgoing = False
                 break
 
-    formatone(fm, results, title=title, result=r,
-              displayall=displayall)
+    formatone(fm, results, title=title, result=r, displayall=displayall)
+
 
 def formatone(fm, timings, title=None, result=None, displayall=False):
 
@@ -443,6 +554,7 @@
         fm.write(b'title', b'! %s\n', title)
     if result:
         fm.write(b'result', b'! result: %s\n', result)
+
     def display(role, entry):
         prefix = b''
         if role != b'best':
@@ -451,9 +563,10 @@
         fm.write(prefix + b'wall', b' wall %f', entry[0])
         fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
         fm.write(prefix + b'user', b' user %f', entry[1])
-        fm.write(prefix + b'sys',  b' sys %f', entry[2])
-        fm.write(prefix + b'count',  b' (%s of %%d)' % role, count)
+        fm.write(prefix + b'sys', b' sys %f', entry[2])
+        fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
         fm.plain(b'\n')
+
     timings.sort()
     min_val = timings[0]
     display(b'best', min_val)
@@ -465,8 +578,10 @@
         median = timings[len(timings) // 2]
         display(b'median', median)
 
+
 # utilities for historical portability
 
+
 def getint(ui, section, name, default):
     # for "historical portability":
     # ui.configint has been available since 1.9 (or fa2b596db182)
@@ -476,8 +591,10 @@
     try:
         return int(v)
     except ValueError:
-        raise error.ConfigError((b"%s.%s is not an integer ('%s')")
-                                % (section, name, v))
+        raise error.ConfigError(
+            b"%s.%s is not an integer ('%s')" % (section, name, v)
+        )
+
 
 def safeattrsetter(obj, name, ignoremissing=False):
     """Ensure that 'obj' has 'name' attribute before subsequent setattr
@@ -497,20 +614,29 @@
     if not util.safehasattr(obj, name):
         if ignoremissing:
             return None
-        raise error.Abort((b"missing attribute %s of %s might break assumption"
-                           b" of performance measurement") % (name, obj))
+        raise error.Abort(
+            (
+                b"missing attribute %s of %s might break assumption"
+                b" of performance measurement"
+            )
+            % (name, obj)
+        )
 
     origvalue = getattr(obj, _sysstr(name))
+
     class attrutil(object):
         def set(self, newvalue):
             setattr(obj, _sysstr(name), newvalue)
+
         def restore(self):
             setattr(obj, _sysstr(name), origvalue)
 
     return attrutil()
 
+
 # utilities to examine each internal API changes
 
+
 def getbranchmapsubsettable():
     # for "historical portability":
     # subsettable is defined in:
@@ -525,8 +651,11 @@
     # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
     # branchmap and repoview modules exist, but subsettable attribute
     # doesn't)
-    raise error.Abort((b"perfbranchmap not available with this Mercurial"),
-                      hint=b"use 2.5 or later")
+    raise error.Abort(
+        b"perfbranchmap not available with this Mercurial",
+        hint=b"use 2.5 or later",
+    )
+
 
 def getsvfs(repo):
     """Return appropriate object to access files under .hg/store
@@ -539,6 +668,7 @@
     else:
         return getattr(repo, 'sopener')
 
+
 def getvfs(repo):
     """Return appropriate object to access files under .hg
     """
@@ -550,10 +680,11 @@
     else:
         return getattr(repo, 'opener')
 
+
 def repocleartagscachefunc(repo):
     """Return the function to clear tags cache according to repo internal API
     """
-    if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
+    if util.safehasattr(repo, b'_tagscache'):  # since 2.0 (or 9dca7653b525)
         # in this case, setattr(repo, '_tagscache', None) or so isn't
         # correct way to clear tags cache, because existing code paths
         # expect _tagscache to be a structured object.
@@ -562,25 +693,28 @@
             # 98c867ac1330), and delattr() can't work in such case
             if b'_tagscache' in vars(repo):
                 del repo.__dict__[b'_tagscache']
+
         return clearcache
 
     repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
-    if repotags: # since 1.4 (or 5614a628d173)
-        return lambda : repotags.set(None)
+    if repotags:  # since 1.4 (or 5614a628d173)
+        return lambda: repotags.set(None)
 
     repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
-    if repotagscache: # since 0.6 (or d7df759d0e97)
-        return lambda : repotagscache.set(None)
+    if repotagscache:  # since 0.6 (or d7df759d0e97)
+        return lambda: repotagscache.set(None)
 
     # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
     # this point, but it isn't so problematic, because:
     # - repo.tags of such Mercurial isn't "callable", and repo.tags()
     #   in perftags() causes failure soon
     # - perf.py itself has been available since 1.1 (or eb240755386d)
-    raise error.Abort((b"tags API of this hg command is unknown"))
+    raise error.Abort(b"tags API of this hg command is unknown")
+
 
 # utilities to clear cache
 
+
 def clearfilecache(obj, attrname):
     unfiltered = getattr(obj, 'unfiltered', None)
     if unfiltered is not None:
@@ -589,23 +723,32 @@
         delattr(obj, attrname)
     obj._filecache.pop(attrname, None)
 
+
 def clearchangelog(repo):
     if repo is not repo.unfiltered():
         object.__setattr__(repo, r'_clcachekey', None)
         object.__setattr__(repo, r'_clcache', None)
     clearfilecache(repo.unfiltered(), 'changelog')
 
+
 # perf commands
 
+
 @command(b'perfwalk', formatteropts)
 def perfwalk(ui, repo, *pats, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     m = scmutil.match(repo[None], pats, {})
-    timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
-                                              ignored=False))))
+    timer(
+        lambda: len(
+            list(
+                repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
+            )
+        )
+    )
     fm.end()
 
+
 @command(b'perfannotate', formatteropts)
 def perfannotate(ui, repo, f, **opts):
     opts = _byteskwargs(opts)
@@ -614,18 +757,29 @@
     timer(lambda: len(fc.annotate(True)))
     fm.end()
 
-@command(b'perfstatus',
-         [(b'u', b'unknown', False,
-           b'ask status to look for unknown files')] + formatteropts)
+
+@command(
+    b'perfstatus',
+    [(b'u', b'unknown', False, b'ask status to look for unknown files')]
+    + formatteropts,
+)
 def perfstatus(ui, repo, **opts):
+    """benchmark the performance of a single status call
+
+    The repository data are preserved between each call.
+
+    By default, only the status of the tracked file are requested. If
+    `--unknown` is passed, the "unknown" files are also tracked.
+    """
     opts = _byteskwargs(opts)
-    #m = match.always(repo.root, repo.getcwd())
-    #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
+    # m = match.always(repo.root, repo.getcwd())
+    # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
     #                                                False))))
     timer, fm = gettimer(ui, opts)
     timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
     fm.end()
 
+
 @command(b'perfaddremove', formatteropts)
 def perfaddremove(ui, repo, **opts):
     opts = _byteskwargs(opts)
@@ -644,71 +798,89 @@
         repo.ui.quiet = oldquiet
         fm.end()
 
+
 def clearcaches(cl):
     # behave somewhat consistently across internal API changes
     if util.safehasattr(cl, b'clearcaches'):
         cl.clearcaches()
     elif util.safehasattr(cl, b'_nodecache'):
         from mercurial.node import nullid, nullrev
+
         cl._nodecache = {nullid: nullrev}
         cl._nodepos = None
 
+
 @command(b'perfheads', formatteropts)
 def perfheads(ui, repo, **opts):
     """benchmark the computation of a changelog heads"""
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     cl = repo.changelog
+
     def s():
         clearcaches(cl)
+
     def d():
         len(cl.headrevs())
+
     timer(d, setup=s)
     fm.end()
 
-@command(b'perftags', formatteropts+
-        [
-            (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
-        ])
+
+@command(
+    b'perftags',
+    formatteropts
+    + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
+)
 def perftags(ui, repo, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     repocleartagscache = repocleartagscachefunc(repo)
     clearrevlogs = opts[b'clear_revlogs']
+
     def s():
         if clearrevlogs:
             clearchangelog(repo)
             clearfilecache(repo.unfiltered(), 'manifest')
         repocleartagscache()
+
     def t():
         return len(repo.tags())
+
     timer(t, setup=s)
     fm.end()
 
+
 @command(b'perfancestors', formatteropts)
 def perfancestors(ui, repo, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     heads = repo.changelog.headrevs()
+
     def d():
         for a in repo.changelog.ancestors(heads):
             pass
+
     timer(d)
     fm.end()
 
+
 @command(b'perfancestorset', formatteropts)
 def perfancestorset(ui, repo, revset, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     revs = repo.revs(revset)
     heads = repo.changelog.headrevs()
+
     def d():
         s = repo.changelog.ancestors(heads)
         for rev in revs:
             rev in s
+
     timer(d)
     fm.end()
 
+
 @command(b'perfdiscovery', formatteropts, b'PATH')
 def perfdiscovery(ui, repo, path, **opts):
     """benchmark discovery between local repo and the peer at given path
@@ -719,30 +891,38 @@
 
     def s():
         repos[1] = hg.peer(ui, opts, path)
+
     def d():
         setdiscovery.findcommonheads(ui, *repos)
+
     timer(d, setup=s)
     fm.end()
 
-@command(b'perfbookmarks', formatteropts +
-        [
-            (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
-        ])
+
+@command(
+    b'perfbookmarks',
+    formatteropts
+    + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
+)
 def perfbookmarks(ui, repo, **opts):
     """benchmark parsing bookmarks from disk to memory"""
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
 
     clearrevlogs = opts[b'clear_revlogs']
+
     def s():
         if clearrevlogs:
             clearchangelog(repo)
         clearfilecache(repo, b'_bookmarks')
+
     def d():
         repo._bookmarks
+
     timer(d, setup=s)
     fm.end()
 
+
 @command(b'perfbundleread', formatteropts, b'BUNDLE')
 def perfbundleread(ui, repo, bundlepath, **opts):
     """Benchmark reading of bundle files.
@@ -832,25 +1012,32 @@
         bundle = exchange.readbundle(ui, fh, bundlepath)
 
         if isinstance(bundle, changegroup.cg1unpacker):
-            benches.extend([
-                (makebench(deltaiter), b'cg1 deltaiter()'),
-                (makebench(iterchunks), b'cg1 getchunks()'),
-                (makereadnbytes(8192), b'cg1 read(8k)'),
-                (makereadnbytes(16384), b'cg1 read(16k)'),
-                (makereadnbytes(32768), b'cg1 read(32k)'),
-                (makereadnbytes(131072), b'cg1 read(128k)'),
-            ])
+            benches.extend(
+                [
+                    (makebench(deltaiter), b'cg1 deltaiter()'),
+                    (makebench(iterchunks), b'cg1 getchunks()'),
+                    (makereadnbytes(8192), b'cg1 read(8k)'),
+                    (makereadnbytes(16384), b'cg1 read(16k)'),
+                    (makereadnbytes(32768), b'cg1 read(32k)'),
+                    (makereadnbytes(131072), b'cg1 read(128k)'),
+                ]
+            )
         elif isinstance(bundle, bundle2.unbundle20):
-            benches.extend([
-                (makebench(forwardchunks), b'bundle2 forwardchunks()'),
-                (makebench(iterparts), b'bundle2 iterparts()'),
-                (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
-                (makebench(seek), b'bundle2 part seek()'),
-                (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
-                (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
-                (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
-                (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
-            ])
+            benches.extend(
+                [
+                    (makebench(forwardchunks), b'bundle2 forwardchunks()'),
+                    (makebench(iterparts), b'bundle2 iterparts()'),
+                    (
+                        makebench(iterpartsseekable),
+                        b'bundle2 iterparts() seekable',
+                    ),
+                    (makebench(seek), b'bundle2 part seek()'),
+                    (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
+                    (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
+                    (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
+                    (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
+                ]
+            )
         elif isinstance(bundle, streamclone.streamcloneapplier):
             raise error.Abort(b'stream clone bundles not supported')
         else:
@@ -861,9 +1048,15 @@
         timer(fn, title=title)
         fm.end()
 
-@command(b'perfchangegroupchangelog', formatteropts +
-         [(b'', b'cgversion', b'02', b'changegroup version'),
-          (b'r', b'rev', b'', b'revisions to add to changegroup')])
+
+@command(
+    b'perfchangegroupchangelog',
+    formatteropts
+    + [
+        (b'', b'cgversion', b'02', b'changegroup version'),
+        (b'r', b'rev', b'', b'revisions to add to changegroup'),
+    ],
+)
 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
     """Benchmark producing a changelog group for a changegroup.
 
@@ -892,77 +1085,166 @@
 
     fm.end()
 
+
 @command(b'perfdirs', formatteropts)
 def perfdirs(ui, repo, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     dirstate = repo.dirstate
     b'a' in dirstate
+
     def d():
         dirstate.hasdir(b'a')
         del dirstate._map._dirs
+
     timer(d)
     fm.end()
 
-@command(b'perfdirstate', formatteropts)
+
+@command(
+    b'perfdirstate',
+    [
+        (
+            b'',
+            b'iteration',
+            None,
+            b'benchmark a full iteration for the dirstate',
+        ),
+        (
+            b'',
+            b'contains',
+            None,
+            b'benchmark a large amount of `nf in dirstate` calls',
+        ),
+    ]
+    + formatteropts,
+)
 def perfdirstate(ui, repo, **opts):
-    opts = _byteskwargs(opts)
-    timer, fm = gettimer(ui, opts)
-    b"a" in repo.dirstate
-    def d():
-        repo.dirstate.invalidate()
-        b"a" in repo.dirstate
-    timer(d)
-    fm.end()
-
-@command(b'perfdirstatedirs', formatteropts)
-def perfdirstatedirs(ui, repo, **opts):
+    """benchmap the time of various distate operations
+
+    By default benchmark the time necessary to load a dirstate from scratch.
+    The dirstate is loaded to the point were a "contains" request can be
+    answered.
+    """
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     b"a" in repo.dirstate
+
+    if opts[b'iteration'] and opts[b'contains']:
+        msg = b'only specify one of --iteration or --contains'
+        raise error.Abort(msg)
+
+    if opts[b'iteration']:
+        setup = None
+        dirstate = repo.dirstate
+
+        def d():
+            for f in dirstate:
+                pass
+
+    elif opts[b'contains']:
+        setup = None
+        dirstate = repo.dirstate
+        allfiles = list(dirstate)
+        # also add file path that will be "missing" from the dirstate
+        allfiles.extend([f[::-1] for f in allfiles])
+
+        def d():
+            for f in allfiles:
+                f in dirstate
+
+    else:
+
+        def setup():
+            repo.dirstate.invalidate()
+
+        def d():
+            b"a" in repo.dirstate
+
+    timer(d, setup=setup)
+    fm.end()
+
+
+@command(b'perfdirstatedirs', formatteropts)
+def perfdirstatedirs(ui, repo, **opts):
+    """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
+    """
+    opts = _byteskwargs(opts)
+    timer, fm = gettimer(ui, opts)
+    repo.dirstate.hasdir(b"a")
+
+    def setup():
+        del repo.dirstate._map._dirs
+
     def d():
         repo.dirstate.hasdir(b"a")
-        del repo.dirstate._map._dirs
-    timer(d)
+
+    timer(d, setup=setup)
     fm.end()
 
+
 @command(b'perfdirstatefoldmap', formatteropts)
 def perfdirstatefoldmap(ui, repo, **opts):
+    """benchmap a `dirstate._map.filefoldmap.get()` request
+
+    The dirstate filefoldmap cache is dropped between every request.
+    """
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     dirstate = repo.dirstate
-    b'a' in dirstate
+    dirstate._map.filefoldmap.get(b'a')
+
+    def setup():
+        del dirstate._map.filefoldmap
+
     def d():
         dirstate._map.filefoldmap.get(b'a')
-        del dirstate._map.filefoldmap
-    timer(d)
+
+    timer(d, setup=setup)
     fm.end()
 
+
 @command(b'perfdirfoldmap', formatteropts)
 def perfdirfoldmap(ui, repo, **opts):
+    """benchmap a `dirstate._map.dirfoldmap.get()` request
+
+    The dirstate dirfoldmap cache is dropped between every request.
+    """
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     dirstate = repo.dirstate
-    b'a' in dirstate
+    dirstate._map.dirfoldmap.get(b'a')
+
+    def setup():
+        del dirstate._map.dirfoldmap
+        del dirstate._map._dirs
+
     def d():
         dirstate._map.dirfoldmap.get(b'a')
-        del dirstate._map.dirfoldmap
-        del dirstate._map._dirs
-    timer(d)
+
+    timer(d, setup=setup)
     fm.end()
 
+
 @command(b'perfdirstatewrite', formatteropts)
 def perfdirstatewrite(ui, repo, **opts):
+    """benchmap the time it take to write a dirstate on disk
+    """
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     ds = repo.dirstate
     b"a" in ds
+
+    def setup():
+        ds._dirty = True
+
     def d():
-        ds._dirty = True
         ds.write(repo.currenttransaction())
-    timer(d)
+
+    timer(d, setup=setup)
     fm.end()
 
+
 def _getmergerevs(repo, opts):
     """parse command argument to return rev involved in merge
 
@@ -985,43 +1267,64 @@
         ancestor = wctx.ancestor(rctx)
     return (wctx, rctx, ancestor)
 
-@command(b'perfmergecalculate',
-         [
-             (b'r', b'rev', b'.', b'rev to merge against'),
-             (b'', b'from', b'', b'rev to merge from'),
-             (b'', b'base', b'', b'the revision to use as base'),
-         ] + formatteropts)
+
+@command(
+    b'perfmergecalculate',
+    [
+        (b'r', b'rev', b'.', b'rev to merge against'),
+        (b'', b'from', b'', b'rev to merge from'),
+        (b'', b'base', b'', b'the revision to use as base'),
+    ]
+    + formatteropts,
+)
 def perfmergecalculate(ui, repo, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
 
     wctx, rctx, ancestor = _getmergerevs(repo, opts)
+
     def d():
         # acceptremote is True because we don't want prompts in the middle of
         # our benchmark
-        merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
-                               acceptremote=True, followcopies=True)
+        merge.calculateupdates(
+            repo,
+            wctx,
+            rctx,
+            [ancestor],
+            branchmerge=False,
+            force=False,
+            acceptremote=True,
+            followcopies=True,
+        )
+
     timer(d)
     fm.end()
 
-@command(b'perfmergecopies',
-         [
-             (b'r', b'rev', b'.', b'rev to merge against'),
-             (b'', b'from', b'', b'rev to merge from'),
-             (b'', b'base', b'', b'the revision to use as base'),
-         ] + formatteropts)
+
+@command(
+    b'perfmergecopies',
+    [
+        (b'r', b'rev', b'.', b'rev to merge against'),
+        (b'', b'from', b'', b'rev to merge from'),
+        (b'', b'base', b'', b'the revision to use as base'),
+    ]
+    + formatteropts,
+)
 def perfmergecopies(ui, repo, **opts):
     """measure runtime of `copies.mergecopies`"""
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     wctx, rctx, ancestor = _getmergerevs(repo, opts)
+
     def d():
         # acceptremote is True because we don't want prompts in the middle of
         # our benchmark
         copies.mergecopies(repo, wctx, rctx, ancestor)
+
     timer(d)
     fm.end()
 
+
 @command(b'perfpathcopies', [], b"REV REV")
 def perfpathcopies(ui, repo, rev1, rev2, **opts):
     """benchmark the copy tracing logic"""
@@ -1029,20 +1332,26 @@
     timer, fm = gettimer(ui, opts)
     ctx1 = scmutil.revsingle(repo, rev1, rev1)
     ctx2 = scmutil.revsingle(repo, rev2, rev2)
+
     def d():
         copies.pathcopies(ctx1, ctx2)
+
     timer(d)
     fm.end()
 
-@command(b'perfphases',
-         [(b'', b'full', False, b'include file reading time too'),
-          ], b"")
+
+@command(
+    b'perfphases',
+    [(b'', b'full', False, b'include file reading time too'),],
+    b"",
+)
 def perfphases(ui, repo, **opts):
     """benchmark phasesets computation"""
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     _phases = repo._phasecache
     full = opts.get(b'full')
+
     def d():
         phases = _phases
         if full:
@@ -1050,30 +1359,32 @@
             phases = repo._phasecache
         phases.invalidate()
         phases.loadphaserevs(repo)
+
     timer(d)
     fm.end()
 
-@command(b'perfphasesremote',
-         [], b"[DEST]")
+
+@command(b'perfphasesremote', [], b"[DEST]")
 def perfphasesremote(ui, repo, dest=None, **opts):
     """benchmark time needed to analyse phases of the remote server"""
-    from mercurial.node import (
-        bin,
-    )
+    from mercurial.node import bin
     from mercurial import (
         exchange,
         hg,
         phases,
     )
+
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
 
     path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
     if not path:
-        raise error.Abort((b'default repository not configured!'),
-                          hint=(b"see 'hg help config.paths'"))
+        raise error.Abort(
+            b'default repository not configured!',
+            hint=b"see 'hg help config.paths'",
+        )
     dest = path.pushloc or path.loc
-    ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
+    ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
     other = hg.peer(repo, opts, dest)
 
     # easier to perform discovery through the operation
@@ -1083,36 +1394,43 @@
     remotesubset = op.fallbackheads
 
     with other.commandexecutor() as e:
-        remotephases = e.callcommand(b'listkeys',
-                       {b'namespace': b'phases'}).result()
+        remotephases = e.callcommand(
+            b'listkeys', {b'namespace': b'phases'}
+        ).result()
     del other
     publishing = remotephases.get(b'publishing', False)
     if publishing:
-        ui.status((b'publishing: yes\n'))
+        ui.statusnoi18n(b'publishing: yes\n')
     else:
-        ui.status((b'publishing: no\n'))
+        ui.statusnoi18n(b'publishing: no\n')
 
     nodemap = repo.changelog.nodemap
     nonpublishroots = 0
     for nhex, phase in remotephases.iteritems():
-        if nhex == b'publishing': # ignore data related to publish option
+        if nhex == b'publishing':  # ignore data related to publish option
             continue
         node = bin(nhex)
         if node in nodemap and int(phase):
             nonpublishroots += 1
-    ui.status((b'number of roots: %d\n') % len(remotephases))
-    ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
+    ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
+    ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
+
     def d():
-        phases.remotephasessummary(repo,
-                                   remotesubset,
-                                   remotephases)
+        phases.remotephasessummary(repo, remotesubset, remotephases)
+
     timer(d)
     fm.end()
 
-@command(b'perfmanifest',[
-            (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
-            (b'', b'clear-disk', False, b'clear on-disk caches too'),
-         ] + formatteropts, b'REV|NODE')
+
+@command(
+    b'perfmanifest',
+    [
+        (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
+        (b'', b'clear-disk', False, b'clear on-disk caches too'),
+    ]
+    + formatteropts,
+    b'REV|NODE',
+)
 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
     """benchmark the time to read a manifest from disk and return a usable
     dict-like object
@@ -1137,25 +1455,32 @@
                 else:
                     t = repo.manifestlog._revlog.lookup(rev)
             except ValueError:
-                raise error.Abort(b'manifest revision must be integer or full '
-                                  b'node')
+                raise error.Abort(
+                    b'manifest revision must be integer or full node'
+                )
+
     def d():
         repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
         repo.manifestlog[t].read()
+
     timer(d)
     fm.end()
 
+
 @command(b'perfchangeset', formatteropts)
 def perfchangeset(ui, repo, rev, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     n = scmutil.revsingle(repo, rev).node()
+
     def d():
         repo.changelog.read(n)
-        #repo.changelog._cache = None
+        # repo.changelog._cache = None
+
     timer(d)
     fm.end()
 
+
 @command(b'perfignore', formatteropts)
 def perfignore(ui, repo, **opts):
     """benchmark operation related to computing ignore"""
@@ -1173,10 +1498,15 @@
     timer(runone, setup=setupone, title=b"load")
     fm.end()
 
-@command(b'perfindex', [
-            (b'', b'rev', [], b'revision to be looked up (default tip)'),
-            (b'', b'no-lookup', None, b'do not revision lookup post creation'),
-         ] + formatteropts)
+
+@command(
+    b'perfindex',
+    [
+        (b'', b'rev', [], b'revision to be looked up (default tip)'),
+        (b'', b'no-lookup', None, b'do not revision lookup post creation'),
+    ]
+    + formatteropts,
+)
 def perfindex(ui, repo, **opts):
     """benchmark index creation time followed by a lookup
 
@@ -1199,9 +1529,10 @@
     It is not currently possible to check for lookup of a missing node. For
     deeper lookup benchmarking, checkout the `perfnodemap` command."""
     import mercurial.revlog
+
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
-    mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
+    mercurial.revlog._prereadsize = 2 ** 24  # disable lazy parser in old hg
     if opts[b'no_lookup']:
         if opts['rev']:
             raise error.Abort('--no-lookup and --rev are mutually exclusive')
@@ -1217,20 +1548,28 @@
     # find the filecache func directly
     # This avoid polluting the benchmark with the filecache logic
     makecl = unfi.__class__.changelog.func
+
     def setup():
         # probably not necessary, but for good measure
         clearchangelog(unfi)
+
     def d():
         cl = makecl(unfi)
         for n in nodes:
             cl.rev(n)
+
     timer(d, setup=setup)
     fm.end()
 
-@command(b'perfnodemap', [
-          (b'', b'rev', [], b'revision to be looked up (default tip)'),
-          (b'', b'clear-caches', True, b'clear revlog cache between calls'),
-    ] + formatteropts)
+
+@command(
+    b'perfnodemap',
+    [
+        (b'', b'rev', [], b'revision to be looked up (default tip)'),
+        (b'', b'clear-caches', True, b'clear revlog cache between calls'),
+    ]
+    + formatteropts,
+)
 def perfnodemap(ui, repo, **opts):
     """benchmark the time necessary to look up revision from a cold nodemap
 
@@ -1249,9 +1588,10 @@
     hexlookup, prefix lookup and missing lookup would also be valuable.
     """
     import mercurial.revlog
+
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
-    mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
+    mercurial.revlog._prereadsize = 2 ** 24  # disable lazy parser in old hg
 
     unfi = repo.unfiltered()
     clearcaches = opts['clear_caches']
@@ -1266,6 +1606,7 @@
 
     # use a list to pass reference to a nodemap from one closure to the next
     nodeget = [None]
+
     def setnodeget():
         # probably not necessary, but for good measure
         clearchangelog(unfi)
@@ -1278,28 +1619,35 @@
 
     setup = None
     if clearcaches:
+
         def setup():
             setnodeget()
+
     else:
         setnodeget()
-        d() # prewarm the data structure
+        d()  # prewarm the data structure
     timer(d, setup=setup)
     fm.end()
 
+
 @command(b'perfstartup', formatteropts)
 def perfstartup(ui, repo, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
+
     def d():
         if os.name != r'nt':
-            os.system(b"HGRCPATH= %s version -q > /dev/null" %
-                      fsencode(sys.argv[0]))
+            os.system(
+                b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
+            )
         else:
             os.environ[r'HGRCPATH'] = r' '
             os.system(r"%s version -q > NUL" % sys.argv[0])
+
     timer(d)
     fm.end()
 
+
 @command(b'perfparents', formatteropts)
 def perfparents(ui, repo, **opts):
     """benchmark the time necessary to fetch one changeset's parents.
@@ -1318,33 +1666,42 @@
         raise error.Abort(b"repo needs %d commits for this test" % count)
     repo = repo.unfiltered()
     nl = [repo.changelog.node(i) for i in _xrange(count)]
+
     def d():
         for n in nl:
             repo.changelog.parents(n)
+
     timer(d)
     fm.end()
 
+
 @command(b'perfctxfiles', formatteropts)
 def perfctxfiles(ui, repo, x, **opts):
     opts = _byteskwargs(opts)
     x = int(x)
     timer, fm = gettimer(ui, opts)
+
     def d():
         len(repo[x].files())
+
     timer(d)
     fm.end()
 
+
 @command(b'perfrawfiles', formatteropts)
 def perfrawfiles(ui, repo, x, **opts):
     opts = _byteskwargs(opts)
     x = int(x)
     timer, fm = gettimer(ui, opts)
     cl = repo.changelog
+
     def d():
         len(cl.read(x)[3])
+
     timer(d)
     fm.end()
 
+
 @command(b'perflookup', formatteropts)
 def perflookup(ui, repo, rev, **opts):
     opts = _byteskwargs(opts)
@@ -1352,10 +1709,15 @@
     timer(lambda: len(repo.lookup(rev)))
     fm.end()
 
-@command(b'perflinelogedits',
-         [(b'n', b'edits', 10000, b'number of edits'),
-          (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
-          ], norepo=True)
+
+@command(
+    b'perflinelogedits',
+    [
+        (b'n', b'edits', 10000, b'number of edits'),
+        (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
+    ],
+    norepo=True,
+)
 def perflinelogedits(ui, **opts):
     from mercurial import linelog
 
@@ -1386,6 +1748,7 @@
     timer(d)
     fm.end()
 
+
 @command(b'perfrevrange', formatteropts)
 def perfrevrange(ui, repo, *specs, **opts):
     opts = _byteskwargs(opts)
@@ -1394,34 +1757,44 @@
     timer(lambda: len(revrange(repo, specs)))
     fm.end()
 
+
 @command(b'perfnodelookup', formatteropts)
 def perfnodelookup(ui, repo, rev, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     import mercurial.revlog
-    mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
+
+    mercurial.revlog._prereadsize = 2 ** 24  # disable lazy parser in old hg
     n = scmutil.revsingle(repo, rev).node()
     cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
+
     def d():
         cl.rev(n)
         clearcaches(cl)
+
     timer(d)
     fm.end()
 
-@command(b'perflog',
-         [(b'', b'rename', False, b'ask log to follow renames')
-         ] + formatteropts)
+
+@command(
+    b'perflog',
+    [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
+)
 def perflog(ui, repo, rev=None, **opts):
     opts = _byteskwargs(opts)
     if rev is None:
-        rev=[]
+        rev = []
     timer, fm = gettimer(ui, opts)
     ui.pushbuffer()
-    timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
-                               copies=opts.get(b'rename')))
+    timer(
+        lambda: commands.log(
+            ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
+        )
+    )
     ui.popbuffer()
     fm.end()
 
+
 @command(b'perfmoonwalk', formatteropts)
 def perfmoonwalk(ui, repo, **opts):
     """benchmark walking the changelog backwards
@@ -1430,21 +1803,27 @@
     """
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
+
     def moonwalk():
         for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
             ctx = repo[i]
-            ctx.branch() # read changelog data (in addition to the index)
+            ctx.branch()  # read changelog data (in addition to the index)
+
     timer(moonwalk)
     fm.end()
 
-@command(b'perftemplating',
-         [(b'r', b'rev', [], b'revisions to run the template on'),
-          ] + formatteropts)
+
+@command(
+    b'perftemplating',
+    [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
+)
 def perftemplating(ui, repo, testedtemplate=None, **opts):
     """test the rendering time of a given template"""
     if makelogtemplater is None:
-        raise error.Abort((b"perftemplating not available with this Mercurial"),
-                          hint=b"use 4.3 or later")
+        raise error.Abort(
+            b"perftemplating not available with this Mercurial",
+            hint=b"use 4.3 or later",
+        )
 
     opts = _byteskwargs(opts)
 
@@ -1456,11 +1835,14 @@
         revs = [b'all()']
     revs = list(scmutil.revrange(repo, revs))
 
-    defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
-                       b' {author|person}: {desc|firstline}\n')
+    defaulttemplate = (
+        b'{date|shortdate} [{rev}:{node|short}]'
+        b' {author|person}: {desc|firstline}\n'
+    )
     if testedtemplate is None:
         testedtemplate = defaulttemplate
     displayer = makelogtemplater(nullui, repo, testedtemplate)
+
     def format():
         for r in revs:
             ctx = repo[r]
@@ -1471,11 +1853,63 @@
     timer(format)
     fm.end()
 
-@command(b'perfhelper-mergecopies', formatteropts +
-         [
-          (b'r', b'revs', [], b'restrict search to these revisions'),
-          (b'', b'timing', False, b'provides extra data (costly)'),
-         ])
+
+def _displaystats(ui, opts, entries, data):
+    pass
+    # use a second formatter because the data are quite different, not sure
+    # how it flies with the templater.
+    fm = ui.formatter(b'perf-stats', opts)
+    for key, title in entries:
+        values = data[key]
+        nbvalues = len(data)
+        values.sort()
+        stats = {
+            'key': key,
+            'title': title,
+            'nbitems': len(values),
+            'min': values[0][0],
+            '10%': values[(nbvalues * 10) // 100][0],
+            '25%': values[(nbvalues * 25) // 100][0],
+            '50%': values[(nbvalues * 50) // 100][0],
+            '75%': values[(nbvalues * 75) // 100][0],
+            '80%': values[(nbvalues * 80) // 100][0],
+            '85%': values[(nbvalues * 85) // 100][0],
+            '90%': values[(nbvalues * 90) // 100][0],
+            '95%': values[(nbvalues * 95) // 100][0],
+            '99%': values[(nbvalues * 99) // 100][0],
+            'max': values[-1][0],
+        }
+        fm.startitem()
+        fm.data(**stats)
+        # make node pretty for the human output
+        fm.plain('### %s (%d items)\n' % (title, len(values)))
+        lines = [
+            'min',
+            '10%',
+            '25%',
+            '50%',
+            '75%',
+            '80%',
+            '85%',
+            '90%',
+            '95%',
+            '99%',
+            'max',
+        ]
+        for l in lines:
+            fm.plain('%s: %s\n' % (l, stats[l]))
+    fm.end()
+
+
+@command(
+    b'perfhelper-mergecopies',
+    formatteropts
+    + [
+        (b'r', b'revs', [], b'restrict search to these revisions'),
+        (b'', b'timing', False, b'provides extra data (costly)'),
+        (b'', b'stats', False, b'provides statistic about the measured data'),
+    ],
+)
 def perfhelpermergecopies(ui, repo, revs=[], **opts):
     """find statistics about potential parameters for `perfmergecopies`
 
@@ -1494,6 +1928,7 @@
     opts = _byteskwargs(opts)
     fm = ui.formatter(b'perf', opts)
     dotiming = opts[b'timing']
+    dostats = opts[b'stats']
 
     output_template = [
         ("base", "%(base)12s"),
@@ -1509,10 +1944,13 @@
         ("p2.time", "%(p2.time)12.3f"),
         ("renames", "%(nbrenamedfiles)12d"),
         ("total.time", "%(time)12.3f"),
-        ]
+    ]
     if not dotiming:
-        output_template = [i for i in output_template
-                           if not ('time' in i[0] or 'renames' in i[0])]
+        output_template = [
+            i
+            for i in output_template
+            if not ('time' in i[0] or 'renames' in i[0])
+        ]
     header_names = [h for (h, v) in output_template]
     output = ' '.join([v for (h, v) in output_template]) + '\n'
     header = ' '.join(['%12s'] * len(header_names)) + '\n'
@@ -1522,6 +1960,17 @@
         revs = ['all()']
     revs = scmutil.revrange(repo, revs)
 
+    if dostats:
+        alldata = {
+            'nbrevs': [],
+            'nbmissingfiles': [],
+        }
+        if dotiming:
+            alldata['parentnbrenames'] = []
+            alldata['totalnbrenames'] = []
+            alldata['parenttime'] = []
+            alldata['totaltime'] = []
+
     roi = repo.revs('merge() and %ld', revs)
     for r in roi:
         ctx = repo[r]
@@ -1535,12 +1984,27 @@
             data = {
                 b'base': b.hex(),
                 b'p1.node': p1.hex(),
-                b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
+                b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
                 b'p1.nbmissingfiles': len(p1missing),
                 b'p2.node': p2.hex(),
-                b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
+                b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
                 b'p2.nbmissingfiles': len(p2missing),
             }
+            if dostats:
+                if p1missing:
+                    alldata['nbrevs'].append(
+                        (data['p1.nbrevs'], b.hex(), p1.hex())
+                    )
+                    alldata['nbmissingfiles'].append(
+                        (data['p1.nbmissingfiles'], b.hex(), p1.hex())
+                    )
+                if p2missing:
+                    alldata['nbrevs'].append(
+                        (data['p2.nbrevs'], b.hex(), p2.hex())
+                    )
+                    alldata['nbmissingfiles'].append(
+                        (data['p2.nbmissingfiles'], b.hex(), p2.hex())
+                    )
             if dotiming:
                 begin = util.timer()
                 mergedata = copies.mergecopies(repo, p1, p2, b)
@@ -1565,6 +2029,34 @@
                 end = util.timer()
                 data['p1.renamedfiles'] = len(p1renames)
                 data['p2.renamedfiles'] = len(p2renames)
+
+                if dostats:
+                    if p1missing:
+                        alldata['parentnbrenames'].append(
+                            (data['p1.renamedfiles'], b.hex(), p1.hex())
+                        )
+                        alldata['parenttime'].append(
+                            (data['p1.time'], b.hex(), p1.hex())
+                        )
+                    if p2missing:
+                        alldata['parentnbrenames'].append(
+                            (data['p2.renamedfiles'], b.hex(), p2.hex())
+                        )
+                        alldata['parenttime'].append(
+                            (data['p2.time'], b.hex(), p2.hex())
+                        )
+                    if p1missing or p2missing:
+                        alldata['totalnbrenames'].append(
+                            (
+                                data['nbrenamedfiles'],
+                                b.hex(),
+                                p1.hex(),
+                                p2.hex(),
+                            )
+                        )
+                        alldata['totaltime'].append(
+                            (data['time'], b.hex(), p1.hex(), p2.hex())
+                        )
             fm.startitem()
             fm.data(**data)
             # make node pretty for the human output
@@ -1575,12 +2067,32 @@
             fm.plain(output % out)
 
     fm.end()
-
-@command(b'perfhelper-pathcopies', formatteropts +
-         [
-          (b'r', b'revs', [], b'restrict search to these revisions'),
-          (b'', b'timing', False, b'provides extra data (costly)'),
-         ])
+    if dostats:
+        # use a second formatter because the data are quite different, not sure
+        # how it flies with the templater.
+        entries = [
+            ('nbrevs', 'number of revision covered'),
+            ('nbmissingfiles', 'number of missing files at head'),
+        ]
+        if dotiming:
+            entries.append(
+                ('parentnbrenames', 'rename from one parent to base')
+            )
+            entries.append(('totalnbrenames', 'total number of renames'))
+            entries.append(('parenttime', 'time for one parent'))
+            entries.append(('totaltime', 'time for both parents'))
+        _displaystats(ui, opts, entries, alldata)
+
+
+@command(
+    b'perfhelper-pathcopies',
+    formatteropts
+    + [
+        (b'r', b'revs', [], b'restrict search to these revisions'),
+        (b'', b'timing', False, b'provides extra data (costly)'),
+        (b'', b'stats', False, b'provides statistic about the measured data'),
+    ],
+)
 def perfhelperpathcopies(ui, repo, revs=[], **opts):
     """find statistic about potential parameters for the `perftracecopies`
 
@@ -1598,25 +2110,45 @@
     opts = _byteskwargs(opts)
     fm = ui.formatter(b'perf', opts)
     dotiming = opts[b'timing']
+    dostats = opts[b'stats']
 
     if dotiming:
         header = '%12s %12s %12s %12s %12s %12s\n'
-        output = ("%(source)12s %(destination)12s "
-                  "%(nbrevs)12d %(nbmissingfiles)12d "
-                  "%(nbrenamedfiles)12d %(time)18.5f\n")
-        header_names = ("source", "destination", "nb-revs", "nb-files",
-                        "nb-renames", "time")
+        output = (
+            "%(source)12s %(destination)12s "
+            "%(nbrevs)12d %(nbmissingfiles)12d "
+            "%(nbrenamedfiles)12d %(time)18.5f\n"
+        )
+        header_names = (
+            "source",
+            "destination",
+            "nb-revs",
+            "nb-files",
+            "nb-renames",
+            "time",
+        )
         fm.plain(header % header_names)
     else:
         header = '%12s %12s %12s %12s\n'
-        output = ("%(source)12s %(destination)12s "
-                  "%(nbrevs)12d %(nbmissingfiles)12d\n")
+        output = (
+            "%(source)12s %(destination)12s "
+            "%(nbrevs)12d %(nbmissingfiles)12d\n"
+        )
         fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
 
     if not revs:
         revs = ['all()']
     revs = scmutil.revrange(repo, revs)
 
+    if dostats:
+        alldata = {
+            'nbrevs': [],
+            'nbmissingfiles': [],
+        }
+        if dotiming:
+            alldata['nbrenames'] = []
+            alldata['time'] = []
+
     roi = repo.revs('merge() and %ld', revs)
     for r in roi:
         ctx = repo[r]
@@ -1633,9 +2165,16 @@
                 data = {
                     b'source': base.hex(),
                     b'destination': parent.hex(),
-                    b'nbrevs': len(repo.revs('%d::%d', b, p)),
+                    b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
                     b'nbmissingfiles': len(missing),
                 }
+                if dostats:
+                    alldata['nbrevs'].append(
+                        (data['nbrevs'], base.hex(), parent.hex(),)
+                    )
+                    alldata['nbmissingfiles'].append(
+                        (data['nbmissingfiles'], base.hex(), parent.hex(),)
+                    )
                 if dotiming:
                     begin = util.timer()
                     renames = copies.pathcopies(base, parent)
@@ -1643,6 +2182,13 @@
                     # not very stable timing since we did only one run
                     data['time'] = end - begin
                     data['nbrenamedfiles'] = len(renames)
+                    if dostats:
+                        alldata['time'].append(
+                            (data['time'], base.hex(), parent.hex(),)
+                        )
+                        alldata['nbrenames'].append(
+                            (data['nbrenamedfiles'], base.hex(), parent.hex(),)
+                        )
                 fm.startitem()
                 fm.data(**data)
                 out = data.copy()
@@ -1651,6 +2197,19 @@
                 fm.plain(output % out)
 
     fm.end()
+    if dostats:
+        # use a second formatter because the data are quite different, not sure
+        # how it flies with the templater.
+        fm = ui.formatter(b'perf', opts)
+        entries = [
+            ('nbrevs', 'number of revision covered'),
+            ('nbmissingfiles', 'number of missing files at head'),
+        ]
+        if dotiming:
+            entries.append(('nbrenames', 'renamed files'))
+            entries.append(('time', 'time'))
+        _displaystats(ui, opts, entries, alldata)
+
 
 @command(b'perfcca', formatteropts)
 def perfcca(ui, repo, **opts):
@@ -1659,16 +2218,20 @@
     timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
     fm.end()
 
+
 @command(b'perffncacheload', formatteropts)
 def perffncacheload(ui, repo, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     s = repo.store
+
     def d():
         s.fncache._load()
+
     timer(d)
     fm.end()
 
+
 @command(b'perffncachewrite', formatteropts)
 def perffncachewrite(ui, repo, **opts):
     opts = _byteskwargs(opts)
@@ -1678,26 +2241,32 @@
     s.fncache._load()
     tr = repo.transaction(b'perffncachewrite')
     tr.addbackup(b'fncache')
+
     def d():
         s.fncache._dirty = True
         s.fncache.write(tr)
+
     timer(d)
     tr.close()
     lock.release()
     fm.end()
 
+
 @command(b'perffncacheencode', formatteropts)
 def perffncacheencode(ui, repo, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     s = repo.store
     s.fncache._load()
+
     def d():
         for p in s.fncache.entries:
             s.encode(p)
+
     timer(d)
     fm.end()
 
+
 def _bdiffworker(q, blocks, xdiff, ready, done):
     while not done.is_set():
         pair = q.get()
@@ -1710,10 +2279,11 @@
                 mdiff.textdiff(*pair)
             q.task_done()
             pair = q.get()
-        q.task_done() # for the None one
+        q.task_done()  # for the None one
         with ready:
             ready.wait()
 
+
 def _manifestrevision(repo, mnode):
     ml = repo.manifestlog
 
@@ -1724,15 +2294,25 @@
 
     return store.revision(mnode)
 
-@command(b'perfbdiff', revlogopts + formatteropts + [
-    (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
-    (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
-    (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
-    (b'', b'blocks', False, b'test computing diffs into blocks'),
-    (b'', b'xdiff', False, b'use xdiff algorithm'),
+
+@command(
+    b'perfbdiff',
+    revlogopts
+    + formatteropts
+    + [
+        (
+            b'',
+            b'count',
+            1,
+            b'number of revisions to test (when using --startrev)',
+        ),
+        (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
+        (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
+        (b'', b'blocks', False, b'test computing diffs into blocks'),
+        (b'', b'xdiff', False, b'use xdiff algorithm'),
     ],
-
-    b'-c|-m|FILE REV')
+    b'-c|-m|FILE REV',
+)
 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
     """benchmark a bdiff between revisions
 
@@ -1788,6 +2368,7 @@
 
     withthreads = threads > 0
     if not withthreads:
+
         def d():
             for pair in textpairs:
                 if xdiff:
@@ -1796,6 +2377,7 @@
                     mdiff.bdiff.blocks(*pair)
                 else:
                     mdiff.textdiff(*pair)
+
     else:
         q = queue()
         for i in _xrange(threads):
@@ -1803,9 +2385,11 @@
         ready = threading.Condition()
         done = threading.Event()
         for i in _xrange(threads):
-            threading.Thread(target=_bdiffworker,
-                             args=(q, blocks, xdiff, ready, done)).start()
+            threading.Thread(
+                target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
+            ).start()
         q.join()
+
         def d():
             for pair in textpairs:
                 q.put(pair)
@@ -1814,6 +2398,7 @@
             with ready:
                 ready.notify_all()
             q.join()
+
     timer, fm = gettimer(ui, opts)
     timer(d)
     fm.end()
@@ -1825,10 +2410,22 @@
         with ready:
             ready.notify_all()
 
-@command(b'perfunidiff', revlogopts + formatteropts + [
-    (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
-    (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
-    ], b'-c|-m|FILE REV')
+
+@command(
+    b'perfunidiff',
+    revlogopts
+    + formatteropts
+    + [
+        (
+            b'',
+            b'count',
+            1,
+            b'number of revisions to test (when using --startrev)',
+        ),
+        (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
+    ],
+    b'-c|-m|FILE REV',
+)
 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
     """benchmark a unified diff between revisions
 
@@ -1883,14 +2480,17 @@
         for left, right in textpairs:
             # The date strings don't matter, so we pass empty strings.
             headerlines, hunks = mdiff.unidiff(
-                left, b'', right, b'', b'left', b'right', binary=False)
+                left, b'', right, b'', b'left', b'right', binary=False
+            )
             # consume iterators in roughly the way patch.py does
             b'\n'.join(headerlines)
             b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
+
     timer, fm = gettimer(ui, opts)
     timer(d)
     fm.end()
 
+
 @command(b'perfdiffwd', formatteropts)
 def perfdiffwd(ui, repo, **opts):
     """Profile diff of working directory changes"""
@@ -1900,21 +2500,23 @@
         'w': 'ignore_all_space',
         'b': 'ignore_space_change',
         'B': 'ignore_blank_lines',
-        }
+    }
 
     for diffopt in ('', 'w', 'b', 'B', 'wB'):
         opts = dict((options[c], b'1') for c in diffopt)
+
         def d():
             ui.pushbuffer()
             commands.diff(ui, repo, **opts)
             ui.popbuffer()
+
         diffopt = diffopt.encode('ascii')
         title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
         timer(d, title=title)
     fm.end()
 
-@command(b'perfrevlogindex', revlogopts + formatteropts,
-         b'-c|-m|FILE')
+
+@command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
 def perfrevlogindex(ui, repo, file_=None, **opts):
     """Benchmark operations against a revlog index.
 
@@ -1937,7 +2539,7 @@
         revlogio = revlog.revlogio()
         inline = header & (1 << 16)
     else:
-        raise error.Abort((b'unsupported revlog version: %d') % version)
+        raise error.Abort(b'unsupported revlog version: %d' % version)
 
     rllen = len(rl)
 
@@ -2008,22 +2610,26 @@
         (lambda: resolvenode(node75), b'look up node at 3/4 len'),
         (lambda: resolvenode(node100), b'look up node at tip'),
         # 2x variation is to measure caching impact.
-        (lambda: resolvenodes(allnodes),
-         b'look up all nodes (forward)'),
-        (lambda: resolvenodes(allnodes, 2),
-         b'look up all nodes 2x (forward)'),
-        (lambda: resolvenodes(allnodesrev),
-         b'look up all nodes (reverse)'),
-        (lambda: resolvenodes(allnodesrev, 2),
-         b'look up all nodes 2x (reverse)'),
-        (lambda: getentries(allrevs),
-         b'retrieve all index entries (forward)'),
-        (lambda: getentries(allrevs, 2),
-         b'retrieve all index entries 2x (forward)'),
-        (lambda: getentries(allrevsrev),
-         b'retrieve all index entries (reverse)'),
-        (lambda: getentries(allrevsrev, 2),
-         b'retrieve all index entries 2x (reverse)'),
+        (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
+        (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
+        (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
+        (
+            lambda: resolvenodes(allnodesrev, 2),
+            b'look up all nodes 2x (reverse)',
+        ),
+        (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
+        (
+            lambda: getentries(allrevs, 2),
+            b'retrieve all index entries 2x (forward)',
+        ),
+        (
+            lambda: getentries(allrevsrev),
+            b'retrieve all index entries (reverse)',
+        ),
+        (
+            lambda: getentries(allrevsrev, 2),
+            b'retrieve all index entries 2x (reverse)',
+        ),
     ]
 
     for fn, title in benches:
@@ -2031,13 +2637,21 @@
         timer(fn, title=title)
         fm.end()
 
-@command(b'perfrevlogrevisions', revlogopts + formatteropts +
-         [(b'd', b'dist', 100, b'distance between the revisions'),
-          (b's', b'startrev', 0, b'revision to start reading at'),
-          (b'', b'reverse', False, b'read in reverse')],
-         b'-c|-m|FILE')
-def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
-                        **opts):
+
+@command(
+    b'perfrevlogrevisions',
+    revlogopts
+    + formatteropts
+    + [
+        (b'd', b'dist', 100, b'distance between the revisions'),
+        (b's', b'startrev', 0, b'revision to start reading at'),
+        (b'', b'reverse', False, b'read in reverse'),
+    ],
+    b'-c|-m|FILE',
+)
+def perfrevlogrevisions(
+    ui, repo, file_=None, startrev=0, reverse=False, **opts
+):
     """Benchmark reading a series of revisions from a revlog.
 
     By default, we read every ``-d/--dist`` revision from 0 to tip of
@@ -2073,16 +2687,22 @@
     timer(d)
     fm.end()
 
-@command(b'perfrevlogwrite', revlogopts + formatteropts +
-         [(b's', b'startrev', 1000, b'revision to start writing at'),
-          (b'', b'stoprev', -1, b'last revision to write'),
-          (b'', b'count', 3, b'number of passes to perform'),
-          (b'', b'details', False, b'print timing for every revisions tested'),
-          (b'', b'source', b'full', b'the kind of data feed in the revlog'),
-          (b'', b'lazydeltabase', True, b'try the provided delta first'),
-          (b'', b'clear-caches', True, b'clear revlog cache between calls'),
-         ],
-         b'-c|-m|FILE')
+
+@command(
+    b'perfrevlogwrite',
+    revlogopts
+    + formatteropts
+    + [
+        (b's', b'startrev', 1000, b'revision to start writing at'),
+        (b'', b'stoprev', -1, b'last revision to write'),
+        (b'', b'count', 3, b'number of passes to perform'),
+        (b'', b'details', False, b'print timing for every revisions tested'),
+        (b'', b'source', b'full', b'the kind of data feed in the revlog'),
+        (b'', b'lazydeltabase', True, b'try the provided delta first'),
+        (b'', b'clear-caches', True, b'clear revlog cache between calls'),
+    ],
+    b'-c|-m|FILE',
+)
 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
     """Benchmark writing a series of revisions to a revlog.
 
@@ -2116,8 +2736,13 @@
     lazydeltabase = opts['lazydeltabase']
     source = opts['source']
     clearcaches = opts['clear_caches']
-    validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
-                   b'storage')
+    validsource = (
+        b'full',
+        b'parent-1',
+        b'parent-2',
+        b'parent-smallest',
+        b'storage',
+    )
     if source not in validsource:
         raise error.Abort('invalid source type: %s' % source)
 
@@ -2127,9 +2752,16 @@
         raise error.Abort('invalide run count: %d' % count)
     allresults = []
     for c in range(count):
-        timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
-                               lazydeltabase=lazydeltabase,
-                               clearcaches=clearcaches)
+        timing = _timeonewrite(
+            ui,
+            rl,
+            source,
+            startrev,
+            stoprev,
+            c + 1,
+            lazydeltabase=lazydeltabase,
+            clearcaches=clearcaches,
+        )
         allresults.append(timing)
 
     ### consolidate the results in a single list
@@ -2183,20 +2815,37 @@
     # for now
     totaltime = []
     for item in allresults:
-        totaltime.append((sum(x[1][0] for x in item),
-                          sum(x[1][1] for x in item),
-                          sum(x[1][2] for x in item),)
+        totaltime.append(
+            (
+                sum(x[1][0] for x in item),
+                sum(x[1][1] for x in item),
+                sum(x[1][2] for x in item),
+            )
         )
-    formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
-              displayall=displayall)
+    formatone(
+        fm,
+        totaltime,
+        title="total time (%d revs)" % resultcount,
+        displayall=displayall,
+    )
     fm.end()
 
+
 class _faketr(object):
     def add(s, x, y, z=None):
         return None
 
-def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
-                  lazydeltabase=True, clearcaches=True):
+
+def _timeonewrite(
+    ui,
+    orig,
+    source,
+    startrev,
+    stoprev,
+    runidx=None,
+    lazydeltabase=True,
+    clearcaches=True,
+):
     timings = []
     tr = _faketr()
     with _temprevlog(ui, orig, startrev) as dest:
@@ -2206,16 +2855,21 @@
         topic = 'adding'
         if runidx is not None:
             topic += ' (run #%d)' % runidx
-         # Support both old and new progress API
+        # Support both old and new progress API
         if util.safehasattr(ui, 'makeprogress'):
             progress = ui.makeprogress(topic, unit='revs', total=total)
+
             def updateprogress(pos):
                 progress.update(pos)
+
             def completeprogress():
                 progress.complete()
+
         else:
+
             def updateprogress(pos):
                 ui.progress(topic, pos, unit='revs', total=total)
+
             def completeprogress():
                 ui.progress(topic, None, unit='revs', total=total)
 
@@ -2232,6 +2886,7 @@
         completeprogress()
     return timings
 
+
 def _getrevisionseed(orig, rev, tr, source):
     from mercurial.node import nullid
 
@@ -2268,8 +2923,11 @@
         baserev = orig.deltaparent(rev)
         cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
 
-    return ((text, tr, linkrev, p1, p2),
-            {'node': node, 'flags': flags, 'cachedelta': cachedelta})
+    return (
+        (text, tr, linkrev, p1, p2),
+        {'node': node, 'flags': flags, 'cachedelta': cachedelta},
+    )
+
 
 @contextlib.contextmanager
 def _temprevlog(ui, orig, truncaterev):
@@ -2310,9 +2968,9 @@
         vfs = vfsmod.vfs(tmpdir)
         vfs.options = getattr(orig.opener, 'options', None)
 
-        dest = revlog.revlog(vfs,
-                             indexfile=indexname,
-                             datafile=dataname, **revlogkwargs)
+        dest = revlog.revlog(
+            vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
+        )
         if dest._inline:
             raise error.Abort('not supporting inline revlog (yet)')
         # make sure internals are initialized
@@ -2322,10 +2980,17 @@
     finally:
         shutil.rmtree(tmpdir, True)
 
-@command(b'perfrevlogchunks', revlogopts + formatteropts +
-         [(b'e', b'engines', b'', b'compression engines to use'),
-          (b's', b'startrev', 0, b'revision to start at')],
-         b'-c|-m|FILE')
+
+@command(
+    b'perfrevlogchunks',
+    revlogopts
+    + formatteropts
+    + [
+        (b'e', b'engines', b'', b'compression engines to use'),
+        (b's', b'startrev', 0, b'revision to start at'),
+    ],
+    b'-c|-m|FILE',
+)
 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
     """Benchmark operations on revlog chunks.
 
@@ -2432,17 +3097,26 @@
 
     for engine in sorted(engines):
         compressor = util.compengines[engine].revlogcompressor()
-        benches.append((functools.partial(docompress, compressor),
-                        b'compress w/ %s' % engine))
+        benches.append(
+            (
+                functools.partial(docompress, compressor),
+                b'compress w/ %s' % engine,
+            )
+        )
 
     for fn, title in benches:
         timer, fm = gettimer(ui, opts)
         timer(fn, title=title)
         fm.end()
 
-@command(b'perfrevlogrevision', revlogopts + formatteropts +
-         [(b'', b'cache', False, b'use caches instead of clearing')],
-         b'-c|-m|FILE REV')
+
+@command(
+    b'perfrevlogrevision',
+    revlogopts
+    + formatteropts
+    + [(b'', b'cache', False, b'use caches instead of clearing')],
+    b'-c|-m|FILE REV',
+)
 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
     """Benchmark obtaining a revlog revision.
 
@@ -2564,22 +3238,30 @@
         slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
         benches.append(slicing)
 
-    benches.extend([
-        (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
-        (lambda: dodecompress(rawchunks), b'decompress'),
-        (lambda: dopatch(text, bins), b'patch'),
-        (lambda: dohash(text), b'hash'),
-    ])
+    benches.extend(
+        [
+            (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
+            (lambda: dodecompress(rawchunks), b'decompress'),
+            (lambda: dopatch(text, bins), b'patch'),
+            (lambda: dohash(text), b'hash'),
+        ]
+    )
 
     timer, fm = gettimer(ui, opts)
     for fn, title in benches:
         timer(fn, title=title)
     fm.end()
 
-@command(b'perfrevset',
-         [(b'C', b'clear', False, b'clear volatile cache between each call.'),
-          (b'', b'contexts', False, b'obtain changectx for each revision')]
-         + formatteropts, b"REVSET")
+
+@command(
+    b'perfrevset',
+    [
+        (b'C', b'clear', False, b'clear volatile cache between each call.'),
+        (b'', b'contexts', False, b'obtain changectx for each revision'),
+    ]
+    + formatteropts,
+    b"REVSET",
+)
 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
     """benchmark the execution time of a revset
 
@@ -2589,19 +3271,26 @@
     opts = _byteskwargs(opts)
 
     timer, fm = gettimer(ui, opts)
+
     def d():
         if clear:
             repo.invalidatevolatilesets()
         if contexts:
-            for ctx in repo.set(expr): pass
+            for ctx in repo.set(expr):
+                pass
         else:
-            for r in repo.revs(expr): pass
+            for r in repo.revs(expr):
+                pass
+
     timer(d)
     fm.end()
 
-@command(b'perfvolatilesets',
-         [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
-          ] + formatteropts)
+
+@command(
+    b'perfvolatilesets',
+    [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
+    + formatteropts,
+)
 def perfvolatilesets(ui, repo, *names, **opts):
     """benchmark the computation of various volatile set
 
@@ -2616,6 +3305,7 @@
             if opts[b'clear_obsstore']:
                 clearfilecache(repo, b'obsstore')
             obsolete.getrevs(repo, name)
+
         return d
 
     allobs = sorted(obsolete.cachefuncs)
@@ -2631,6 +3321,7 @@
             if opts[b'clear_obsstore']:
                 clearfilecache(repo, b'obsstore')
             repoview.filterrevs(repo, name)
+
         return d
 
     allfilter = sorted(repoview.filtertable)
@@ -2641,12 +3332,20 @@
         timer(getfiltered(name), title=name)
     fm.end()
 
-@command(b'perfbranchmap',
-         [(b'f', b'full', False,
-           b'Includes build time of subset'),
-          (b'', b'clear-revbranch', False,
-           b'purge the revbranch cache between computation'),
-          ] + formatteropts)
+
+@command(
+    b'perfbranchmap',
+    [
+        (b'f', b'full', False, b'Includes build time of subset'),
+        (
+            b'',
+            b'clear-revbranch',
+            False,
+            b'purge the revbranch cache between computation',
+        ),
+    ]
+    + formatteropts,
+)
 def perfbranchmap(ui, repo, *filternames, **opts):
     """benchmark the update of a branchmap
 
@@ -2656,6 +3355,7 @@
     full = opts.get(b"full", False)
     clear_revbranch = opts.get(b"clear_revbranch", False)
     timer, fm = gettimer(ui, opts)
+
     def getbranchmap(filtername):
         """generate a benchmark function for the filtername"""
         if filtername is None:
@@ -2667,6 +3367,7 @@
         else:
             # older versions
             filtered = view._branchcaches
+
         def d():
             if clear_revbranch:
                 repo.revbranchcache()._clear()
@@ -2675,7 +3376,9 @@
             else:
                 filtered.pop(filtername, None)
             view.branchmap()
+
         return d
+
     # add filter in smaller subset to bigger subset
     possiblefilters = set(repoview.filtertable)
     if filternames:
@@ -2720,11 +3423,16 @@
         branchcachewrite.restore()
     fm.end()
 
-@command(b'perfbranchmapupdate', [
-     (b'', b'base', [], b'subset of revision to start from'),
-     (b'', b'target', [], b'subset of revision to end with'),
-     (b'', b'clear-caches', False, b'clear cache between each runs')
-    ] + formatteropts)
+
+@command(
+    b'perfbranchmapupdate',
+    [
+        (b'', b'base', [], b'subset of revision to start from'),
+        (b'', b'target', [], b'subset of revision to end with'),
+        (b'', b'clear-caches', False, b'clear cache between each runs'),
+    ]
+    + formatteropts,
+)
 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
     """benchmark branchmap update from for <base> revs to <target> revs
 
@@ -2743,11 +3451,12 @@
     """
     from mercurial import branchmap
     from mercurial import repoview
+
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     clearcaches = opts[b'clear_caches']
     unfi = repo.unfiltered()
-    x = [None] # used to pass data between closure
+    x = [None]  # used to pass data between closure
 
     # we use a `list` here to avoid possible side effect from smartset
     baserevs = list(scmutil.revrange(repo, base))
@@ -2824,12 +3533,16 @@
         repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
         repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
 
-@command(b'perfbranchmapload', [
-     (b'f', b'filter', b'', b'Specify repoview filter'),
-     (b'', b'list', False, b'List brachmap filter caches'),
-     (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
-
-    ] + formatteropts)
+
+@command(
+    b'perfbranchmapload',
+    [
+        (b'f', b'filter', b'', b'Specify repoview filter'),
+        (b'', b'list', False, b'List brachmap filter caches'),
+        (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
+    ]
+    + formatteropts,
+)
 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
     """benchmark reading the branchmap"""
     opts = _byteskwargs(opts)
@@ -2839,8 +3552,9 @@
         for name, kind, st in repo.cachevfs.readdir(stat=True):
             if name.startswith(b'branch2'):
                 filtername = name.partition(b'-')[2] or b'unfiltered'
-                ui.status(b'%s - %s\n'
-                          % (filtername, util.bytecount(st.st_size)))
+                ui.status(
+                    b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
+                )
         return
     if not filter:
         filter = None
@@ -2850,7 +3564,7 @@
     else:
         repo = repoview.repoview(repo, filter)
 
-    repo.branchmap() # make sure we have a relevant, up to date branchmap
+    repo.branchmap()  # make sure we have a relevant, up to date branchmap
 
     try:
         fromfile = branchmap.branchcache.fromfile
@@ -2863,18 +3577,23 @@
     while fromfile(repo) is None:
         currentfilter = subsettable.get(currentfilter)
         if currentfilter is None:
-            raise error.Abort(b'No branchmap cached for %s repo'
-                              % (filter or b'unfiltered'))
+            raise error.Abort(
+                b'No branchmap cached for %s repo' % (filter or b'unfiltered')
+            )
         repo = repo.filtered(currentfilter)
     timer, fm = gettimer(ui, opts)
+
     def setup():
         if clearrevlogs:
             clearchangelog(repo)
+
     def bench():
         fromfile(repo)
+
     timer(bench, setup=setup)
     fm.end()
 
+
 @command(b'perfloadmarkers')
 def perfloadmarkers(ui, repo):
     """benchmark the time to parse the on-disk markers for a repo
@@ -2885,18 +3604,39 @@
     timer(lambda: len(obsolete.obsstore(svfs)))
     fm.end()
 
-@command(b'perflrucachedict', formatteropts +
-    [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
-     (b'', b'mincost', 0, b'smallest cost of items in cache'),
-     (b'', b'maxcost', 100, b'maximum cost of items in cache'),
-     (b'', b'size', 4, b'size of cache'),
-     (b'', b'gets', 10000, b'number of key lookups'),
-     (b'', b'sets', 10000, b'number of key sets'),
-     (b'', b'mixed', 10000, b'number of mixed mode operations'),
-     (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
-    norepo=True)
-def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
-                 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
+
+@command(
+    b'perflrucachedict',
+    formatteropts
+    + [
+        (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
+        (b'', b'mincost', 0, b'smallest cost of items in cache'),
+        (b'', b'maxcost', 100, b'maximum cost of items in cache'),
+        (b'', b'size', 4, b'size of cache'),
+        (b'', b'gets', 10000, b'number of key lookups'),
+        (b'', b'sets', 10000, b'number of key sets'),
+        (b'', b'mixed', 10000, b'number of mixed mode operations'),
+        (
+            b'',
+            b'mixedgetfreq',
+            50,
+            b'frequency of get vs set ops in mixed mode',
+        ),
+    ],
+    norepo=True,
+)
+def perflrucache(
+    ui,
+    mincost=0,
+    maxcost=100,
+    costlimit=0,
+    size=4,
+    gets=10000,
+    sets=10000,
+    mixed=10000,
+    mixedgetfreq=50,
+    **opts
+):
     opts = _byteskwargs(opts)
 
     def doinit():
@@ -2921,7 +3661,7 @@
             d[v] = v
         for key in getseq:
             value = d[key]
-            value # silence pyflakes warning
+            value  # silence pyflakes warning
 
     def dogetscost():
         d = util.lrucachedict(size, maxcost=costlimit)
@@ -2930,7 +3670,7 @@
         for key in getseq:
             try:
                 value = d[key]
-                value # silence pyflakes warning
+                value  # silence pyflakes warning
             except KeyError:
                 pass
 
@@ -2965,9 +3705,9 @@
         else:
             op = 1
 
-        mixedops.append((op,
-                         random.randint(0, size * 2),
-                         random.choice(costrange)))
+        mixedops.append(
+            (op, random.randint(0, size * 2), random.choice(costrange))
+        )
 
     def domixed():
         d = util.lrucachedict(size)
@@ -2998,24 +3738,29 @@
     ]
 
     if costlimit:
-        benches.extend([
-            (dogetscost, b'gets w/ cost limit'),
-            (doinsertscost, b'inserts w/ cost limit'),
-            (domixedcost, b'mixed w/ cost limit'),
-        ])
+        benches.extend(
+            [
+                (dogetscost, b'gets w/ cost limit'),
+                (doinsertscost, b'inserts w/ cost limit'),
+                (domixedcost, b'mixed w/ cost limit'),
+            ]
+        )
     else:
-        benches.extend([
-            (dogets, b'gets'),
-            (doinserts, b'inserts'),
-            (dosets, b'sets'),
-            (domixed, b'mixed')
-        ])
+        benches.extend(
+            [
+                (dogets, b'gets'),
+                (doinserts, b'inserts'),
+                (dosets, b'sets'),
+                (domixed, b'mixed'),
+            ]
+        )
 
     for fn, title in benches:
         timer, fm = gettimer(ui, opts)
         timer(fn, title=title)
         fm.end()
 
+
 @command(b'perfwrite', formatteropts)
 def perfwrite(ui, repo, **opts):
     """microbenchmark ui.write
@@ -3023,15 +3768,19 @@
     opts = _byteskwargs(opts)
 
     timer, fm = gettimer(ui, opts)
+
     def write():
         for i in range(100000):
-            ui.write((b'Testing write performance\n'))
+            ui.writenoi18n(b'Testing write performance\n')
+
     timer(write)
     fm.end()
 
+
 def uisetup(ui):
-    if (util.safehasattr(cmdutil, b'openrevlog') and
-        not util.safehasattr(commands, b'debugrevlogopts')):
+    if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
+        commands, b'debugrevlogopts'
+    ):
         # for "historical portability":
         # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
         # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
@@ -3039,15 +3788,24 @@
         # available since 3.5 (or 49c583ca48c4).
         def openrevlog(orig, repo, cmd, file_, opts):
             if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
-                raise error.Abort(b"This version doesn't support --dir option",
-                                  hint=b"use 3.5 or later")
+                raise error.Abort(
+                    b"This version doesn't support --dir option",
+                    hint=b"use 3.5 or later",
+                )
             return orig(repo, cmd, file_, opts)
+
         extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
 
-@command(b'perfprogress', formatteropts + [
-    (b'', b'topic', b'topic', b'topic for progress messages'),
-    (b'c', b'total', 1000000, b'total value we are progressing to'),
-], norepo=True)
+
+@command(
+    b'perfprogress',
+    formatteropts
+    + [
+        (b'', b'topic', b'topic', b'topic for progress messages'),
+        (b'c', b'total', 1000000, b'total value we are progressing to'),
+    ],
+    norepo=True,
+)
 def perfprogress(ui, topic=None, total=None, **opts):
     """printing of progress bars"""
     opts = _byteskwargs(opts)
@@ -3056,7 +3814,7 @@
 
     def doprogress():
         with ui.makeprogress(topic, total=total) as progress:
-            for i in pycompat.xrange(total):
+            for i in _xrange(total):
                 progress.increment()
 
     timer(doprogress)
--- a/contrib/python-hook-examples.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-hook-examples.py	Mon Oct 21 11:09:48 2019 -0400
@@ -7,6 +7,7 @@
     util,
 )
 
+
 def diffstat(ui, repo, **kwargs):
     '''Example usage:
 
--- a/contrib/python-zstandard/NEWS.rst	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/NEWS.rst	Mon Oct 21 11:09:48 2019 -0400
@@ -44,6 +44,7 @@
   zstd API.
 * Expose ``ZSTD_CLEVEL_DEFAULT`` constant.
 * Support ``ZSTD_p_forceAttachDict`` compression parameter.
+* Support ``ZSTD_c_literalCompressionMode `` compression parameter.
 * Use ``ZSTD_CCtx_getParameter()``/``ZSTD_CCtxParam_getParameter()`` for retrieving
   compression parameters.
 * Consider exposing ``ZSTDMT_toFlushNow()``.
@@ -66,10 +67,39 @@
 * API for ensuring max memory ceiling isn't exceeded.
 * Move off nose for testing.
 
+0.12.0 (released 2019-09-15)
+============================
+
+Backwards Compatibility Notes
+-----------------------------
+
+* Support for Python 3.4 has been dropped since Python 3.4 is no longer
+  a supported Python version upstream. (But it will likely continue to
+  work until Python 2.7 support is dropped and we port to Python 3.5+
+  APIs.)
+
+Bug Fixes
+---------
+
+* Fix ``ZstdDecompressor.__init__`` on 64-bit big-endian systems (#91).
+* Fix memory leak in ``ZstdDecompressionReader.seek()`` (#82).
+
+Changes
+-------
+
+* CI transitioned to Azure Pipelines (from AppVeyor and Travis CI).
+* Switched to ``pytest`` for running tests (from ``nose``).
+* Bundled zstandard library upgraded from 1.3.8 to 1.4.3.
+
+0.11.1 (released 2019-05-14)
+============================
+
+* Fix memory leak in ``ZstdDecompressionReader.seek()`` (#82).
+
 0.11.0 (released 2019-02-24)
 ============================
 
-Backwards Compatibility Nodes
+Backwards Compatibility Notes
 -----------------------------
 
 * ``ZstdDecompressor.read()`` now allows reading sizes of ``-1`` or ``0``
--- a/contrib/python-zstandard/README.rst	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/README.rst	Mon Oct 21 11:09:48 2019 -0400
@@ -15,7 +15,7 @@
 the author. For convenience, that repository is frequently synchronized to
 https://github.com/indygreg/python-zstandard.
 
-|  |ci-status| |win-ci-status|
+|  |ci-status|
 
 Requirements
 ============
@@ -1598,9 +1598,5 @@
     :target: https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=gregory%2eszorc%40gmail%2ecom&lc=US&item_name=python%2dzstandard&currency_code=USD&bn=PP%2dDonationsBF%3abtn_donate_LG%2egif%3aNonHosted
     :alt: Donate via PayPal
 
-.. |ci-status| image:: https://travis-ci.org/indygreg/python-zstandard.svg?branch=master
-    :target: https://travis-ci.org/indygreg/python-zstandard
-
-.. |win-ci-status| image:: https://ci.appveyor.com/api/projects/status/github/indygreg/python-zstandard?svg=true
-    :target: https://ci.appveyor.com/project/indygreg/python-zstandard
-    :alt: Windows build status
+.. |ci-status| image:: https://dev.azure.com/gregoryszorc/python-zstandard/_apis/build/status/indygreg.python-zstandard?branchName=master
+    :target: https://dev.azure.com/gregoryszorc/python-zstandard/_apis/build/status/indygreg.python-zstandard?branchName=master
--- a/contrib/python-zstandard/c-ext/compressionparams.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/c-ext/compressionparams.c	Mon Oct 21 11:09:48 2019 -0400
@@ -11,7 +11,7 @@
 extern PyObject* ZstdError;
 
 int set_parameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value) {
-	size_t zresult = ZSTD_CCtxParam_setParameter(params, param, value);
+	size_t zresult = ZSTD_CCtxParams_setParameter(params, param, value);
 	if (ZSTD_isError(zresult)) {
 		PyErr_Format(ZstdError, "unable to set compression context parameter: %s",
 			ZSTD_getErrorName(zresult));
@@ -25,11 +25,11 @@
 
 #define TRY_COPY_PARAMETER(source, dest, param) { \
 	int result; \
-	size_t zresult = ZSTD_CCtxParam_getParameter(source, param, &result); \
+	size_t zresult = ZSTD_CCtxParams_getParameter(source, param, &result); \
 	if (ZSTD_isError(zresult)) { \
 		return 1; \
 	} \
-	zresult = ZSTD_CCtxParam_setParameter(dest, param, result); \
+	zresult = ZSTD_CCtxParams_setParameter(dest, param, result); \
 	if (ZSTD_isError(zresult)) { \
 		return 1; \
 	} \
@@ -78,7 +78,7 @@
 }
 
 #define TRY_GET_PARAMETER(params, param, value) { \
-    size_t zresult = ZSTD_CCtxParam_getParameter(params, param, value); \
+    size_t zresult = ZSTD_CCtxParams_getParameter(params, param, value); \
     if (ZSTD_isError(zresult)) { \
         PyErr_Format(ZstdError, "unable to retrieve parameter: %s", ZSTD_getErrorName(zresult)); \
         return 1; \
@@ -436,7 +436,7 @@
     int result; \
     size_t zresult; \
     ZstdCompressionParametersObject* p = (ZstdCompressionParametersObject*)(self); \
-    zresult = ZSTD_CCtxParam_getParameter(p->params, param, &result); \
+    zresult = ZSTD_CCtxParams_getParameter(p->params, param, &result); \
     if (ZSTD_isError(zresult)) { \
         PyErr_Format(ZstdError, "unable to get compression parameter: %s", \
             ZSTD_getErrorName(zresult)); \
--- a/contrib/python-zstandard/c-ext/decompressionreader.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/c-ext/decompressionreader.c	Mon Oct 21 11:09:48 2019 -0400
@@ -653,6 +653,8 @@
 
 		readSize = PyBytes_GET_SIZE(readResult);
 
+		Py_CLEAR(readResult);
+
 		/* Empty read means EOF. */
 		if (!readSize) {
 			break;
--- a/contrib/python-zstandard/c-ext/python-zstandard.h	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/c-ext/python-zstandard.h	Mon Oct 21 11:09:48 2019 -0400
@@ -16,7 +16,7 @@
 #include <zdict.h>
 
 /* Remember to change the string in zstandard/__init__ as well */
-#define PYTHON_ZSTANDARD_VERSION "0.11.0"
+#define PYTHON_ZSTANDARD_VERSION "0.12.0"
 
 typedef enum {
 	compressorobj_flush_finish,
--- a/contrib/python-zstandard/make_cffi.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/make_cffi.py	Mon Oct 21 11:09:48 2019 -0400
@@ -29,6 +29,8 @@
     'compress/hist.c',
     'compress/huf_compress.c',
     'compress/zstd_compress.c',
+    'compress/zstd_compress_literals.c',
+    'compress/zstd_compress_sequences.c',
     'compress/zstd_double_fast.c',
     'compress/zstd_fast.c',
     'compress/zstd_lazy.c',
@@ -119,7 +121,11 @@
     os.close(fd)
 
     try:
-        process = subprocess.Popen(args + [input_file], stdout=subprocess.PIPE)
+        env = dict(os.environ)
+        if getattr(compiler, '_paths', None):
+            env['PATH'] = compiler._paths
+        process = subprocess.Popen(args + [input_file], stdout=subprocess.PIPE,
+                                   env=env)
         output = process.communicate()[0]
         ret = process.poll()
         if ret:
--- a/contrib/python-zstandard/setup.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/setup.py	Mon Oct 21 11:09:48 2019 -0400
@@ -100,7 +100,6 @@
         'License :: OSI Approved :: BSD License',
         'Programming Language :: C',
         'Programming Language :: Python :: 2.7',
-        'Programming Language :: Python :: 3.4',
         'Programming Language :: Python :: 3.5',
         'Programming Language :: Python :: 3.6',
         'Programming Language :: Python :: 3.7',
--- a/contrib/python-zstandard/setup_zstd.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/setup_zstd.py	Mon Oct 21 11:09:48 2019 -0400
@@ -22,6 +22,8 @@
     'compress/fse_compress.c',
     'compress/hist.c',
     'compress/huf_compress.c',
+    'compress/zstd_compress_literals.c',
+    'compress/zstd_compress_sequences.c',
     'compress/zstd_compress.c',
     'compress/zstd_double_fast.c',
     'compress/zstd_fast.c',
--- a/contrib/python-zstandard/tests/test_compressor.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/tests/test_compressor.py	Mon Oct 21 11:09:48 2019 -0400
@@ -1038,7 +1038,7 @@
         d = zstd.train_dictionary(8192, samples)
 
         h = hashlib.sha1(d.as_bytes()).hexdigest()
-        self.assertEqual(h, '88ca0d38332aff379d4ced166a51c280a7679aad')
+        self.assertEqual(h, '7a2e59a876db958f74257141045af8f912e00d4e')
 
         buffer = NonClosingBytesIO()
         cctx = zstd.ZstdCompressor(level=9, dict_data=d)
@@ -1056,7 +1056,7 @@
         self.assertFalse(params.has_checksum)
 
         h = hashlib.sha1(compressed).hexdigest()
-        self.assertEqual(h, '8703b4316f274d26697ea5dd480f29c08e85d940')
+        self.assertEqual(h, '0a7c05635061f58039727cdbe76388c6f4cfef06')
 
         source = b'foo' + b'bar' + (b'foo' * 16384)
 
@@ -1091,7 +1091,7 @@
         self.assertFalse(params.has_checksum)
 
         h = hashlib.sha1(compressed).hexdigest()
-        self.assertEqual(h, '2a8111d72eb5004cdcecbdac37da9f26720d30ef')
+        self.assertEqual(h, 'dd4bb7d37c1a0235b38a2f6b462814376843ef0b')
 
     def test_write_checksum(self):
         no_checksum = NonClosingBytesIO()
--- a/contrib/python-zstandard/tests/test_data_structures.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/tests/test_data_structures.py	Mon Oct 21 11:09:48 2019 -0400
@@ -100,7 +100,7 @@
                                            strategy=zstd.STRATEGY_DFAST)
 
         # 32-bit has slightly different values from 64-bit.
-        self.assertAlmostEqual(p.estimated_compression_context_size(), 1294072,
+        self.assertAlmostEqual(p.estimated_compression_context_size(), 1294144,
                                delta=250)
 
     def test_strategy(self):
--- a/contrib/python-zstandard/tests/test_module_attributes.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/tests/test_module_attributes.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,9 +12,9 @@
 @make_cffi
 class TestModuleAttributes(unittest.TestCase):
     def test_version(self):
-        self.assertEqual(zstd.ZSTD_VERSION, (1, 3, 8))
+        self.assertEqual(zstd.ZSTD_VERSION, (1, 4, 3))
 
-        self.assertEqual(zstd.__version__, '0.11.0')
+        self.assertEqual(zstd.__version__, '0.12.0')
 
     def test_constants(self):
         self.assertEqual(zstd.MAX_COMPRESSION_LEVEL, 22)
--- a/contrib/python-zstandard/tests/test_train_dictionary.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/tests/test_train_dictionary.py	Mon Oct 21 11:09:48 2019 -0400
@@ -7,6 +7,7 @@
 from . common import (
     generate_samples,
     make_cffi,
+    random_input_data,
 )
 
 if sys.version_info[0] >= 3:
@@ -29,7 +30,7 @@
             zstd.train_dictionary(8192, [u'foo'])
 
     def test_no_params(self):
-        d = zstd.train_dictionary(8192, generate_samples())
+        d = zstd.train_dictionary(8192, random_input_data())
         self.assertIsInstance(d.dict_id(), int_type)
 
         # The dictionary ID may be different across platforms.
--- a/contrib/python-zstandard/zstandard/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstandard/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -62,4 +62,4 @@
                       'cext, or cffi' % _module_policy)
 
 # Keep this in sync with python-zstandard.h.
-__version__ = '0.11.0'
+__version__ = '0.12.0'
--- a/contrib/python-zstandard/zstandard/cffi.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstandard/cffi.py	Mon Oct 21 11:09:48 2019 -0400
@@ -416,7 +416,7 @@
 
 
 def _set_compression_parameter(params, param, value):
-    zresult = lib.ZSTD_CCtxParam_setParameter(params, param, value)
+    zresult = lib.ZSTD_CCtxParams_setParameter(params, param, value)
     if lib.ZSTD_isError(zresult):
         raise ZstdError('unable to set compression context parameter: %s' %
                         _zstd_error(zresult))
@@ -425,7 +425,7 @@
 def _get_compression_parameter(params, param):
     result = ffi.new('int *')
 
-    zresult = lib.ZSTD_CCtxParam_getParameter(params, param, result)
+    zresult = lib.ZSTD_CCtxParams_getParameter(params, param, result)
     if lib.ZSTD_isError(zresult):
         raise ZstdError('unable to get compression context parameter: %s' %
                         _zstd_error(zresult))
--- a/contrib/python-zstandard/zstd.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd.c	Mon Oct 21 11:09:48 2019 -0400
@@ -210,7 +210,7 @@
 	   We detect this mismatch here and refuse to load the module if this
 	   scenario is detected.
 	*/
-	if (ZSTD_VERSION_NUMBER != 10308 || ZSTD_versionNumber() != 10308) {
+	if (ZSTD_VERSION_NUMBER != 10403 || ZSTD_versionNumber() != 10403) {
 		PyErr_SetString(PyExc_ImportError, "zstd C API mismatch; Python bindings not compiled against expected zstd version");
 		return;
 	}
--- a/contrib/python-zstandard/zstd/common/bitstream.h	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/common/bitstream.h	Mon Oct 21 11:09:48 2019 -0400
@@ -57,6 +57,8 @@
 =========================================*/
 #if defined(__BMI__) && defined(__GNUC__)
 #  include <immintrin.h>   /* support for bextr (experimental) */
+#elif defined(__ICCARM__)
+#  include <intrinsics.h>
 #endif
 
 #define STREAM_ACCUMULATOR_MIN_32  25
@@ -163,6 +165,8 @@
         return (unsigned) r;
 #   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
         return 31 - __builtin_clz (val);
+#   elif defined(__ICCARM__)    /* IAR Intrinsic */
+        return 31 - __CLZ(val);
 #   else   /* Software version */
         static const unsigned DeBruijnClz[32] = { 0,  9,  1, 10, 13, 21,  2, 29,
                                                  11, 14, 16, 18, 22, 25,  3, 30,
--- a/contrib/python-zstandard/zstd/common/compiler.h	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/common/compiler.h	Mon Oct 21 11:09:48 2019 -0400
@@ -23,7 +23,7 @@
 #  define INLINE_KEYWORD
 #endif
 
-#if defined(__GNUC__)
+#if defined(__GNUC__) || defined(__ICCARM__)
 #  define FORCE_INLINE_ATTR __attribute__((always_inline))
 #elif defined(_MSC_VER)
 #  define FORCE_INLINE_ATTR __forceinline
@@ -40,7 +40,7 @@
 
 /**
  * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
- * parameters. They must be inlined for the compiler to elimininate the constant
+ * parameters. They must be inlined for the compiler to eliminate the constant
  * branches.
  */
 #define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
@@ -65,7 +65,7 @@
 #ifdef _MSC_VER
 #  define FORCE_NOINLINE static __declspec(noinline)
 #else
-#  ifdef __GNUC__
+#  if defined(__GNUC__) || defined(__ICCARM__)
 #    define FORCE_NOINLINE static __attribute__((__noinline__))
 #  else
 #    define FORCE_NOINLINE static
@@ -76,7 +76,7 @@
 #ifndef __has_attribute
   #define __has_attribute(x) 0  /* Compatibility with non-clang compilers. */
 #endif
-#if defined(__GNUC__)
+#if defined(__GNUC__) || defined(__ICCARM__)
 #  define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
 #else
 #  define TARGET_ATTRIBUTE(target)
@@ -127,6 +127,13 @@
     }                                     \
 }
 
+/* vectorization */
+#if !defined(__clang__) && defined(__GNUC__)
+#  define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
+#else
+#  define DONT_VECTORIZE
+#endif
+
 /* disable warnings */
 #ifdef _MSC_VER    /* Visual Studio */
 #  include <intrin.h>                    /* For Visual 2005 */
--- a/contrib/python-zstandard/zstd/common/fse.h	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/common/fse.h	Mon Oct 21 11:09:48 2019 -0400
@@ -358,7 +358,7 @@
 typedef enum {
    FSE_repeat_none,  /**< Cannot use the previous table */
    FSE_repeat_check, /**< Can use the previous table but it must be checked */
-   FSE_repeat_valid  /**< Can use the previous table and it is asumed to be valid */
+   FSE_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
  } FSE_repeat;
 
 /* *****************************************
--- a/contrib/python-zstandard/zstd/common/mem.h	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/common/mem.h	Mon Oct 21 11:09:48 2019 -0400
@@ -102,7 +102,7 @@
 #ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
 #  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
 #    define MEM_FORCE_MEMORY_ACCESS 2
-#  elif defined(__INTEL_COMPILER) || defined(__GNUC__)
+#  elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)
 #    define MEM_FORCE_MEMORY_ACCESS 1
 #  endif
 #endif
--- a/contrib/python-zstandard/zstd/common/threading.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/common/threading.c	Mon Oct 21 11:09:48 2019 -0400
@@ -14,8 +14,8 @@
  * This file will hold wrapper for systems, which do not support pthreads
  */
 
-/* create fake symbol to avoid empty trnaslation unit warning */
-int g_ZSTD_threading_useles_symbol;
+/* create fake symbol to avoid empty translation unit warning */
+int g_ZSTD_threading_useless_symbol;
 
 #if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
 
--- a/contrib/python-zstandard/zstd/common/xxhash.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/common/xxhash.c	Mon Oct 21 11:09:48 2019 -0400
@@ -53,7 +53,8 @@
 #  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
 #    define XXH_FORCE_MEMORY_ACCESS 2
 #  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
-  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \
+  defined(__ICCARM__)
 #    define XXH_FORCE_MEMORY_ACCESS 1
 #  endif
 #endif
@@ -66,10 +67,10 @@
 /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
 
 /*!XXH_FORCE_NATIVE_FORMAT :
- * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
+ * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
  * Results are therefore identical for little-endian and big-endian CPU.
  * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
- * Should endian-independance be of no importance for your application, you may set the #define below to 1,
+ * Should endian-independence be of no importance for your application, you may set the #define below to 1,
  * to improve speed for Big-endian CPU.
  * This option has no impact on Little_Endian CPU.
  */
@@ -120,7 +121,7 @@
 #  define INLINE_KEYWORD
 #endif
 
-#if defined(__GNUC__)
+#if defined(__GNUC__) || defined(__ICCARM__)
 #  define FORCE_INLINE_ATTR __attribute__((always_inline))
 #elif defined(_MSC_VER)
 #  define FORCE_INLINE_ATTR __forceinline
@@ -206,7 +207,12 @@
 #  define XXH_rotl32(x,r) _rotl(x,r)
 #  define XXH_rotl64(x,r) _rotl64(x,r)
 #else
+#if defined(__ICCARM__)
+#  include <intrinsics.h>
+#  define XXH_rotl32(x,r) __ROR(x,(32 - r))
+#else
 #  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
+#endif
 #  define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
 #endif
 
--- a/contrib/python-zstandard/zstd/common/zstd_internal.h	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/common/zstd_internal.h	Mon Oct 21 11:09:48 2019 -0400
@@ -34,7 +34,6 @@
 #endif
 #include "xxhash.h"                /* XXH_reset, update, digest */
 
-
 #if defined (__cplusplus)
 extern "C" {
 #endif
@@ -53,8 +52,50 @@
 #undef MAX
 #define MIN(a,b) ((a)<(b) ? (a) : (b))
 #define MAX(a,b) ((a)>(b) ? (a) : (b))
-#define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; }  /* check and Forward error code */
-#define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); }  /* check and send Error code */
+
+/**
+ * Return the specified error if the condition evaluates to true.
+ *
+ * In debug modes, prints additional information.
+ * In order to do that (particularly, printing the conditional that failed),
+ * this can't just wrap RETURN_ERROR().
+ */
+#define RETURN_ERROR_IF(cond, err, ...) \
+  if (cond) { \
+    RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
+    RAWLOG(3, ": " __VA_ARGS__); \
+    RAWLOG(3, "\n"); \
+    return ERROR(err); \
+  }
+
+/**
+ * Unconditionally return the specified error.
+ *
+ * In debug modes, prints additional information.
+ */
+#define RETURN_ERROR(err, ...) \
+  do { \
+    RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
+    RAWLOG(3, ": " __VA_ARGS__); \
+    RAWLOG(3, "\n"); \
+    return ERROR(err); \
+  } while(0);
+
+/**
+ * If the provided expression evaluates to an error code, returns that error code.
+ *
+ * In debug modes, prints additional information.
+ */
+#define FORWARD_IF_ERROR(err, ...) \
+  do { \
+    size_t const err_code = (err); \
+    if (ERR_isError(err_code)) { \
+      RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
+      RAWLOG(3, ": " __VA_ARGS__); \
+      RAWLOG(3, "\n"); \
+      return err_code; \
+    } \
+  } while(0);
 
 
 /*-*************************************
@@ -151,19 +192,72 @@
 *  Shared functions to include for inlining
 *********************************************/
 static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+
 #define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+static void ZSTD_copy16(void* dst, const void* src) { memcpy(dst, src, 16); }
+#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
+
+#define WILDCOPY_OVERLENGTH 8
+#define VECLEN 16
+
+typedef enum {
+    ZSTD_no_overlap,
+    ZSTD_overlap_src_before_dst,
+    /*  ZSTD_overlap_dst_before_src, */
+} ZSTD_overlap_e;
 
 /*! ZSTD_wildcopy() :
  *  custom version of memcpy(), can overwrite up to WILDCOPY_OVERLENGTH bytes (if length==0) */
-#define WILDCOPY_OVERLENGTH 8
-MEM_STATIC void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
+MEM_STATIC FORCE_INLINE_ATTR DONT_VECTORIZE
+void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e ovtype)
 {
+    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
     const BYTE* ip = (const BYTE*)src;
     BYTE* op = (BYTE*)dst;
     BYTE* const oend = op + length;
-    do
-        COPY8(op, ip)
-    while (op < oend);
+
+    assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff < -8));
+    if (length < VECLEN || (ovtype == ZSTD_overlap_src_before_dst && diff < VECLEN)) {
+      do
+          COPY8(op, ip)
+      while (op < oend);
+    }
+    else {
+      if ((length & 8) == 0)
+        COPY8(op, ip);
+      do {
+        COPY16(op, ip);
+      }
+      while (op < oend);
+    }
+}
+
+/*! ZSTD_wildcopy_16min() :
+ *  same semantics as ZSTD_wilcopy() except guaranteed to be able to copy 16 bytes at the start */
+MEM_STATIC FORCE_INLINE_ATTR DONT_VECTORIZE
+void ZSTD_wildcopy_16min(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e ovtype)
+{
+    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + length;
+
+    assert(length >= 8);
+    assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff < -8));
+
+    if (ovtype == ZSTD_overlap_src_before_dst && diff < VECLEN) {
+      do
+          COPY8(op, ip)
+      while (op < oend);
+    }
+    else {
+      if ((length & 8) == 0)
+        COPY8(op, ip);
+      do {
+        COPY16(op, ip);
+      }
+      while (op < oend);
+    }
 }
 
 MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd)   /* should be faster for decoding, but strangely, not verified on all platform */
@@ -200,6 +294,17 @@
     U32   longLengthPos;
 } seqStore_t;
 
+/**
+ * Contains the compressed frame size and an upper-bound for the decompressed frame size.
+ * Note: before using `compressedSize`, check for errors using ZSTD_isError().
+ *       similarly, before using `decompressedBound`, check for errors using:
+ *          `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
+ */
+typedef struct {
+    size_t compressedSize;
+    unsigned long long decompressedBound;
+} ZSTD_frameSizeInfo;   /* decompress & legacy */
+
 const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);   /* compress & dictBuilder */
 void ZSTD_seqToCodes(const seqStore_t* seqStorePtr);   /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
 
@@ -219,6 +324,8 @@
         return (unsigned)r;
 #   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* GCC Intrinsic */
         return 31 - __builtin_clz(val);
+#   elif defined(__ICCARM__)    /* IAR Intrinsic */
+        return 31 - __CLZ(val);
 #   else   /* Software version */
         static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
         U32 v = val;
--- a/contrib/python-zstandard/zstd/compress/fse_compress.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/compress/fse_compress.c	Mon Oct 21 11:09:48 2019 -0400
@@ -129,9 +129,9 @@
     {   U32 position = 0;
         U32 symbol;
         for (symbol=0; symbol<=maxSymbolValue; symbol++) {
-            int nbOccurences;
+            int nbOccurrences;
             int const freq = normalizedCounter[symbol];
-            for (nbOccurences=0; nbOccurences<freq; nbOccurences++) {
+            for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
                 tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
                 position = (position + step) & tableMask;
                 while (position > highThreshold)
--- a/contrib/python-zstandard/zstd/compress/zstd_compress.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress.c	Mon Oct 21 11:09:48 2019 -0400
@@ -21,6 +21,8 @@
 #define HUF_STATIC_LINKING_ONLY
 #include "huf.h"
 #include "zstd_compress_internal.h"
+#include "zstd_compress_sequences.h"
+#include "zstd_compress_literals.h"
 #include "zstd_fast.h"
 #include "zstd_double_fast.h"
 #include "zstd_lazy.h"
@@ -103,12 +105,31 @@
     return cctx;
 }
 
+/**
+ * Clears and frees all of the dictionaries in the CCtx.
+ */
+static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
+{
+    ZSTD_free(cctx->localDict.dictBuffer, cctx->customMem);
+    ZSTD_freeCDict(cctx->localDict.cdict);
+    memset(&cctx->localDict, 0, sizeof(cctx->localDict));
+    memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
+    cctx->cdict = NULL;
+}
+
+static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
+{
+    size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
+    size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
+    return bufferSize + cdictSize;
+}
+
 static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
 {
     assert(cctx != NULL);
     assert(cctx->staticSize == 0);
     ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL;
-    ZSTD_freeCDict(cctx->cdictLocal); cctx->cdictLocal = NULL;
+    ZSTD_clearAllDicts(cctx);
 #ifdef ZSTD_MULTITHREAD
     ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
 #endif
@@ -117,7 +138,8 @@
 size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
 {
     if (cctx==NULL) return 0;   /* support free on NULL */
-    if (cctx->staticSize) return ERROR(memory_allocation);   /* not compatible with static CCtx */
+    RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
+                    "not compatible with static CCtx");
     ZSTD_freeCCtxContent(cctx);
     ZSTD_free(cctx, cctx->customMem);
     return 0;
@@ -139,7 +161,7 @@
 {
     if (cctx==NULL) return 0;   /* support sizeof on NULL */
     return sizeof(*cctx) + cctx->workSpaceSize
-           + ZSTD_sizeof_CDict(cctx->cdictLocal)
+           + ZSTD_sizeof_localDict(cctx->localDict)
            + ZSTD_sizeof_mtctx(cctx);
 }
 
@@ -195,7 +217,7 @@
 }
 
 size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
-    if (!cctxParams) { return ERROR(GENERIC); }
+    RETURN_ERROR_IF(!cctxParams, GENERIC);
     memset(cctxParams, 0, sizeof(*cctxParams));
     cctxParams->compressionLevel = compressionLevel;
     cctxParams->fParams.contentSizeFlag = 1;
@@ -204,8 +226,8 @@
 
 size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
 {
-    if (!cctxParams) { return ERROR(GENERIC); }
-    CHECK_F( ZSTD_checkCParams(params.cParams) );
+    RETURN_ERROR_IF(!cctxParams, GENERIC);
+    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
     memset(cctxParams, 0, sizeof(*cctxParams));
     cctxParams->cParams = params.cParams;
     cctxParams->fParams = params.fParams;
@@ -359,6 +381,17 @@
         bounds.upperBound = ZSTD_dictForceCopy;       /* note : how to ensure at compile time that this is the highest value enum ? */
         return bounds;
 
+    case ZSTD_c_literalCompressionMode:
+        ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
+        bounds.lowerBound = ZSTD_lcm_auto;
+        bounds.upperBound = ZSTD_lcm_uncompressed;
+        return bounds;
+
+    case ZSTD_c_targetCBlockSize:
+        bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
+        bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
+        return bounds;
+
     default:
         {   ZSTD_bounds const boundError = { ERROR(parameter_unsupported), 0, 0 };
             return boundError;
@@ -366,22 +399,22 @@
     }
 }
 
-/* ZSTD_cParam_withinBounds:
- * @return 1 if value is within cParam bounds,
- * 0 otherwise */
-static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
+/* ZSTD_cParam_clampBounds:
+ * Clamps the value into the bounded range.
+ */
+static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
 {
     ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
-    if (ZSTD_isError(bounds.error)) return 0;
-    if (value < bounds.lowerBound) return 0;
-    if (value > bounds.upperBound) return 0;
-    return 1;
+    if (ZSTD_isError(bounds.error)) return bounds.error;
+    if (*value < bounds.lowerBound) *value = bounds.lowerBound;
+    if (*value > bounds.upperBound) *value = bounds.upperBound;
+    return 0;
 }
 
-#define BOUNDCHECK(cParam, val) {                  \
-    if (!ZSTD_cParam_withinBounds(cParam,val)) {   \
-        return ERROR(parameter_outOfBound);        \
-}   }
+#define BOUNDCHECK(cParam, val) { \
+    RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
+                    parameter_outOfBound); \
+}
 
 
 static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
@@ -413,6 +446,8 @@
     case ZSTD_c_ldmBucketSizeLog:
     case ZSTD_c_ldmHashRateLog:
     case ZSTD_c_forceAttachDict:
+    case ZSTD_c_literalCompressionMode:
+    case ZSTD_c_targetCBlockSize:
     default:
         return 0;
     }
@@ -425,18 +460,17 @@
         if (ZSTD_isUpdateAuthorized(param)) {
             cctx->cParamsChanged = 1;
         } else {
-            return ERROR(stage_wrong);
+            RETURN_ERROR(stage_wrong);
     }   }
 
     switch(param)
     {
-    case ZSTD_c_format :
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+    case ZSTD_c_nbWorkers:
+        RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
+                        "MT not compatible with static alloc");
+        break;
 
     case ZSTD_c_compressionLevel:
-        if (cctx->cdict) return ERROR(stage_wrong);
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
     case ZSTD_c_windowLog:
     case ZSTD_c_hashLog:
     case ZSTD_c_chainLog:
@@ -444,49 +478,33 @@
     case ZSTD_c_minMatch:
     case ZSTD_c_targetLength:
     case ZSTD_c_strategy:
-        if (cctx->cdict) return ERROR(stage_wrong);
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
+    case ZSTD_c_ldmHashRateLog:
+    case ZSTD_c_format:
     case ZSTD_c_contentSizeFlag:
     case ZSTD_c_checksumFlag:
     case ZSTD_c_dictIDFlag:
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
-    case ZSTD_c_forceMaxWindow :  /* Force back-references to remain < windowSize,
-                                   * even when referencing into Dictionary content.
-                                   * default : 0 when using a CDict, 1 when using a Prefix */
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
+    case ZSTD_c_forceMaxWindow:
     case ZSTD_c_forceAttachDict:
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
-    case ZSTD_c_nbWorkers:
-        if ((value!=0) && cctx->staticSize) {
-            return ERROR(parameter_unsupported);  /* MT not compatible with static alloc */
-        }
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
+    case ZSTD_c_literalCompressionMode:
     case ZSTD_c_jobSize:
     case ZSTD_c_overlapLog:
     case ZSTD_c_rsyncable:
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
     case ZSTD_c_enableLongDistanceMatching:
     case ZSTD_c_ldmHashLog:
     case ZSTD_c_ldmMinMatch:
     case ZSTD_c_ldmBucketSizeLog:
-    case ZSTD_c_ldmHashRateLog:
-        if (cctx->cdict) return ERROR(stage_wrong);
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
-    default: return ERROR(parameter_unsupported);
+    case ZSTD_c_targetCBlockSize:
+        break;
+
+    default: RETURN_ERROR(parameter_unsupported);
     }
+    return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
 }
 
-size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* CCtxParams,
-                                   ZSTD_cParameter param, int value)
+size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
+                                    ZSTD_cParameter param, int value)
 {
-    DEBUGLOG(4, "ZSTD_CCtxParam_setParameter (%i, %i)", (int)param, value);
+    DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
     switch(param)
     {
     case ZSTD_c_format :
@@ -495,11 +513,9 @@
         return (size_t)CCtxParams->format;
 
     case ZSTD_c_compressionLevel : {
-        int cLevel = value;
-        if (cLevel > ZSTD_maxCLevel()) cLevel = ZSTD_maxCLevel();
-        if (cLevel < ZSTD_minCLevel()) cLevel = ZSTD_minCLevel();
-        if (cLevel) {  /* 0 : does not change current level */
-            CCtxParams->compressionLevel = cLevel;
+        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
+        if (value) {  /* 0 : does not change current level */
+            CCtxParams->compressionLevel = value;
         }
         if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel;
         return 0;  /* return type (size_t) cannot represent negative values */
@@ -573,33 +589,55 @@
         return CCtxParams->attachDictPref;
     }
 
+    case ZSTD_c_literalCompressionMode : {
+        const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;
+        BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
+        CCtxParams->literalCompressionMode = lcm;
+        return CCtxParams->literalCompressionMode;
+    }
+
     case ZSTD_c_nbWorkers :
 #ifndef ZSTD_MULTITHREAD
-        if (value!=0) return ERROR(parameter_unsupported);
+        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
         return 0;
 #else
-        return ZSTDMT_CCtxParam_setNbWorkers(CCtxParams, value);
+        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
+        CCtxParams->nbWorkers = value;
+        return CCtxParams->nbWorkers;
 #endif
 
     case ZSTD_c_jobSize :
 #ifndef ZSTD_MULTITHREAD
-        return ERROR(parameter_unsupported);
+        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+        return 0;
 #else
-        return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_jobSize, value);
+        /* Adjust to the minimum non-default value. */
+        if (value != 0 && value < ZSTDMT_JOBSIZE_MIN)
+            value = ZSTDMT_JOBSIZE_MIN;
+        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
+        assert(value >= 0);
+        CCtxParams->jobSize = value;
+        return CCtxParams->jobSize;
 #endif
 
     case ZSTD_c_overlapLog :
 #ifndef ZSTD_MULTITHREAD
-        return ERROR(parameter_unsupported);
+        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+        return 0;
 #else
-        return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_overlapLog, value);
+        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value));
+        CCtxParams->overlapLog = value;
+        return CCtxParams->overlapLog;
 #endif
 
     case ZSTD_c_rsyncable :
 #ifndef ZSTD_MULTITHREAD
-        return ERROR(parameter_unsupported);
+        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+        return 0;
 #else
-        return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_rsyncable, value);
+        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value));
+        CCtxParams->rsyncable = value;
+        return CCtxParams->rsyncable;
 #endif
 
     case ZSTD_c_enableLongDistanceMatching :
@@ -625,21 +663,27 @@
         return CCtxParams->ldmParams.bucketSizeLog;
 
     case ZSTD_c_ldmHashRateLog :
-        if (value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
-            return ERROR(parameter_outOfBound);
+        RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN,
+                        parameter_outOfBound);
         CCtxParams->ldmParams.hashRateLog = value;
         return CCtxParams->ldmParams.hashRateLog;
 
-    default: return ERROR(parameter_unsupported);
+    case ZSTD_c_targetCBlockSize :
+        if (value!=0)   /* 0 ==> default */
+            BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
+        CCtxParams->targetCBlockSize = value;
+        return CCtxParams->targetCBlockSize;
+
+    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
     }
 }
 
 size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value)
 {
-    return ZSTD_CCtxParam_getParameter(&cctx->requestedParams, param, value);
+    return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
 }
 
-size_t ZSTD_CCtxParam_getParameter(
+size_t ZSTD_CCtxParams_getParameter(
         ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int* value)
 {
     switch(param)
@@ -651,13 +695,13 @@
         *value = CCtxParams->compressionLevel;
         break;
     case ZSTD_c_windowLog :
-        *value = CCtxParams->cParams.windowLog;
+        *value = (int)CCtxParams->cParams.windowLog;
         break;
     case ZSTD_c_hashLog :
-        *value = CCtxParams->cParams.hashLog;
+        *value = (int)CCtxParams->cParams.hashLog;
         break;
     case ZSTD_c_chainLog :
-        *value = CCtxParams->cParams.chainLog;
+        *value = (int)CCtxParams->cParams.chainLog;
         break;
     case ZSTD_c_searchLog :
         *value = CCtxParams->cParams.searchLog;
@@ -686,6 +730,9 @@
     case ZSTD_c_forceAttachDict :
         *value = CCtxParams->attachDictPref;
         break;
+    case ZSTD_c_literalCompressionMode :
+        *value = CCtxParams->literalCompressionMode;
+        break;
     case ZSTD_c_nbWorkers :
 #ifndef ZSTD_MULTITHREAD
         assert(CCtxParams->nbWorkers == 0);
@@ -694,7 +741,7 @@
         break;
     case ZSTD_c_jobSize :
 #ifndef ZSTD_MULTITHREAD
-        return ERROR(parameter_unsupported);
+        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
 #else
         assert(CCtxParams->jobSize <= INT_MAX);
         *value = (int)CCtxParams->jobSize;
@@ -702,14 +749,14 @@
 #endif
     case ZSTD_c_overlapLog :
 #ifndef ZSTD_MULTITHREAD
-        return ERROR(parameter_unsupported);
+        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
 #else
         *value = CCtxParams->overlapLog;
         break;
 #endif
     case ZSTD_c_rsyncable :
 #ifndef ZSTD_MULTITHREAD
-        return ERROR(parameter_unsupported);
+        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
 #else
         *value = CCtxParams->rsyncable;
         break;
@@ -729,7 +776,10 @@
     case ZSTD_c_ldmHashRateLog :
         *value = CCtxParams->ldmParams.hashRateLog;
         break;
-    default: return ERROR(parameter_unsupported);
+    case ZSTD_c_targetCBlockSize :
+        *value = (int)CCtxParams->targetCBlockSize;
+        break;
+    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
     }
     return 0;
 }
@@ -745,8 +795,8 @@
         ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
 {
     DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
-    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
-    if (cctx->cdict) return ERROR(stage_wrong);
+    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+    RETURN_ERROR_IF(cctx->cdict, stage_wrong);
 
     cctx->requestedParams = *params;
     return 0;
@@ -755,33 +805,71 @@
 ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
 {
     DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
-    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
+    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
     cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
     return 0;
 }
 
+/**
+ * Initializes the local dict using the requested parameters.
+ * NOTE: This does not use the pledged src size, because it may be used for more
+ * than one compression.
+ */
+static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
+{
+    ZSTD_localDict* const dl = &cctx->localDict;
+    ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(
+            &cctx->requestedParams, 0, dl->dictSize);
+    if (dl->dict == NULL) {
+        /* No local dictionary. */
+        assert(dl->dictBuffer == NULL);
+        assert(dl->cdict == NULL);
+        assert(dl->dictSize == 0);
+        return 0;
+    }
+    if (dl->cdict != NULL) {
+        assert(cctx->cdict == dl->cdict);
+        /* Local dictionary already initialized. */
+        return 0;
+    }
+    assert(dl->dictSize > 0);
+    assert(cctx->cdict == NULL);
+    assert(cctx->prefixDict.dict == NULL);
+
+    dl->cdict = ZSTD_createCDict_advanced(
+            dl->dict,
+            dl->dictSize,
+            ZSTD_dlm_byRef,
+            dl->dictContentType,
+            cParams,
+            cctx->customMem);
+    RETURN_ERROR_IF(!dl->cdict, memory_allocation);
+    cctx->cdict = dl->cdict;
+    return 0;
+}
+
 size_t ZSTD_CCtx_loadDictionary_advanced(
         ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
         ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
 {
-    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
-    if (cctx->staticSize) return ERROR(memory_allocation);  /* no malloc for static CCtx */
+    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+    RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
+                    "no malloc for static CCtx");
     DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
-    ZSTD_freeCDict(cctx->cdictLocal);  /* in case one already exists */
-    if (dict==NULL || dictSize==0) {   /* no dictionary mode */
-        cctx->cdictLocal = NULL;
-        cctx->cdict = NULL;
+    ZSTD_clearAllDicts(cctx);  /* in case one already exists */
+    if (dict == NULL || dictSize == 0)  /* no dictionary mode */
+        return 0;
+    if (dictLoadMethod == ZSTD_dlm_byRef) {
+        cctx->localDict.dict = dict;
     } else {
-        ZSTD_compressionParameters const cParams =
-                ZSTD_getCParamsFromCCtxParams(&cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, dictSize);
-        cctx->cdictLocal = ZSTD_createCDict_advanced(
-                                dict, dictSize,
-                                dictLoadMethod, dictContentType,
-                                cParams, cctx->customMem);
-        cctx->cdict = cctx->cdictLocal;
-        if (cctx->cdictLocal == NULL)
-            return ERROR(memory_allocation);
+        void* dictBuffer = ZSTD_malloc(dictSize, cctx->customMem);
+        RETURN_ERROR_IF(!dictBuffer, memory_allocation);
+        memcpy(dictBuffer, dict, dictSize);
+        cctx->localDict.dictBuffer = dictBuffer;
+        cctx->localDict.dict = dictBuffer;
     }
+    cctx->localDict.dictSize = dictSize;
+    cctx->localDict.dictContentType = dictContentType;
     return 0;
 }
 
@@ -801,9 +889,10 @@
 
 size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
 {
-    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
+    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+    /* Free the existing local cdict (if any) to save memory. */
+    ZSTD_clearAllDicts(cctx);
     cctx->cdict = cdict;
-    memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));  /* exclusive */
     return 0;
 }
 
@@ -815,8 +904,8 @@
 size_t ZSTD_CCtx_refPrefix_advanced(
         ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
 {
-    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
-    cctx->cdict = NULL;   /* prefix discards any prior cdict */
+    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+    ZSTD_clearAllDicts(cctx);
     cctx->prefixDict.dict = prefix;
     cctx->prefixDict.dictSize = prefixSize;
     cctx->prefixDict.dictContentType = dictContentType;
@@ -834,8 +923,8 @@
     }
     if ( (reset == ZSTD_reset_parameters)
       || (reset == ZSTD_reset_session_and_parameters) ) {
-        if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
-        cctx->cdict = NULL;
+        RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+        ZSTD_clearAllDicts(cctx);
         return ZSTD_CCtxParams_reset(&cctx->requestedParams);
     }
     return 0;
@@ -847,12 +936,12 @@
     @return : 0, or an error code if one value is beyond authorized range */
 size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
 {
-    BOUNDCHECK(ZSTD_c_windowLog, cParams.windowLog);
-    BOUNDCHECK(ZSTD_c_chainLog,  cParams.chainLog);
-    BOUNDCHECK(ZSTD_c_hashLog,   cParams.hashLog);
-    BOUNDCHECK(ZSTD_c_searchLog, cParams.searchLog);
-    BOUNDCHECK(ZSTD_c_minMatch,  cParams.minMatch);
-    BOUNDCHECK(ZSTD_c_targetLength,cParams.targetLength);
+    BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
+    BOUNDCHECK(ZSTD_c_chainLog,  (int)cParams.chainLog);
+    BOUNDCHECK(ZSTD_c_hashLog,   (int)cParams.hashLog);
+    BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
+    BOUNDCHECK(ZSTD_c_minMatch,  (int)cParams.minMatch);
+    BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
     BOUNDCHECK(ZSTD_c_strategy,  cParams.strategy);
     return 0;
 }
@@ -868,7 +957,7 @@
         if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound;      \
         else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
     }
-#   define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, int)
+#   define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
     CLAMP(ZSTD_c_windowLog, cParams.windowLog);
     CLAMP(ZSTD_c_chainLog,  cParams.chainLog);
     CLAMP(ZSTD_c_hashLog,   cParams.hashLog);
@@ -888,10 +977,11 @@
 }
 
 /** ZSTD_adjustCParams_internal() :
-    optimize `cPar` for a given input (`srcSize` and `dictSize`).
-    mostly downsizing to reduce memory consumption and initialization latency.
-    Both `srcSize` and `dictSize` are optional (use 0 if unknown).
-    Note : cPar is assumed validated. Use ZSTD_checkCParams() to ensure this condition. */
+ *  optimize `cPar` for a specified input (`srcSize` and `dictSize`).
+ *  mostly downsize to reduce memory consumption and initialization latency.
+ * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
+ *  note : for the time being, `srcSize==0` means "unknown" too, for compatibility with older convention.
+ *  condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
 static ZSTD_compressionParameters
 ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
                             unsigned long long srcSize,
@@ -901,7 +991,7 @@
     static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
     assert(ZSTD_checkCParams(cPar)==0);
 
-    if (dictSize && (srcSize+1<2) /* srcSize unknown */ )
+    if (dictSize && (srcSize+1<2) /* ZSTD_CONTENTSIZE_UNKNOWN and 0 mean "unknown" */ )
         srcSize = minSrcSize;  /* presumed small when there is a dictionary */
     else if (srcSize == 0)
         srcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* 0 == unknown : presumed large */
@@ -922,7 +1012,7 @@
     }
 
     if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
-        cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* required for frame header */
+        cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* minimum wlog required for valid frame header */
 
     return cPar;
 }
@@ -932,7 +1022,7 @@
                    unsigned long long srcSize,
                    size_t dictSize)
 {
-    cPar = ZSTD_clampCParams(cPar);
+    cPar = ZSTD_clampCParams(cPar);   /* resulting cPar is necessarily valid (all parameters within range) */
     return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
 }
 
@@ -973,8 +1063,7 @@
 
 size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
 {
-    /* Estimate CCtx size is supported for single-threaded compression only. */
-    if (params->nbWorkers > 0) { return ERROR(GENERIC); }
+    RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
     {   ZSTD_compressionParameters const cParams =
                 ZSTD_getCParamsFromCCtxParams(params, 0, 0);
         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
@@ -1022,10 +1111,12 @@
 
 size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
 {
-    if (params->nbWorkers > 0) { return ERROR(GENERIC); }
-    {   size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
-        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params->cParams.windowLog);
-        size_t const inBuffSize = ((size_t)1 << params->cParams.windowLog) + blockSize;
+    RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
+    {   ZSTD_compressionParameters const cParams =
+                ZSTD_getCParamsFromCCtxParams(params, 0, 0);
+        size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
+        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
+        size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize;
         size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
         size_t const streamingSize = inBuffSize + outBuffSize;
 
@@ -1197,15 +1288,14 @@
 }
 
 /*! ZSTD_invalidateMatchState()
- * Invalidate all the matches in the match finder tables.
- * Requires nextSrc and base to be set (can be NULL).
+ *  Invalidate all the matches in the match finder tables.
+ *  Requires nextSrc and base to be set (can be NULL).
  */
 static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
 {
     ZSTD_window_clear(&ms->window);
 
     ms->nextToUpdate = ms->window.dictLimit;
-    ms->nextToUpdate3 = ms->window.dictLimit;
     ms->loadedDictEnd = 0;
     ms->opt.litLengthSum = 0;  /* force reset of btopt stats */
     ms->dictMatchState = NULL;
@@ -1242,15 +1332,17 @@
 
 typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e;
 
+typedef enum { ZSTD_resetTarget_CDict, ZSTD_resetTarget_CCtx } ZSTD_resetTarget_e;
+
 static void*
 ZSTD_reset_matchState(ZSTD_matchState_t* ms,
                       void* ptr,
                 const ZSTD_compressionParameters* cParams,
-                      ZSTD_compResetPolicy_e const crp, U32 const forCCtx)
+                      ZSTD_compResetPolicy_e const crp, ZSTD_resetTarget_e const forWho)
 {
     size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
     size_t const hSize = ((size_t)1) << cParams->hashLog;
-    U32    const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
+    U32    const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
     size_t const h3Size = ((size_t)1) << hashLog3;
     size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
 
@@ -1264,7 +1356,7 @@
     ZSTD_invalidateMatchState(ms);
 
     /* opt parser space */
-    if (forCCtx && (cParams->strategy >= ZSTD_btopt)) {
+    if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
         DEBUGLOG(4, "reserving optimal parser space");
         ms->opt.litFreq = (unsigned*)ptr;
         ms->opt.litLengthFreq = ms->opt.litFreq + (1<<Litbits);
@@ -1292,6 +1384,19 @@
     return ptr;
 }
 
+/* ZSTD_indexTooCloseToMax() :
+ * minor optimization : prefer memset() rather than reduceIndex()
+ * which is measurably slow in some circumstances (reported for Visual Studio).
+ * Works when re-using a context for a lot of smallish inputs :
+ * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
+ * memset() will be triggered before reduceIndex().
+ */
+#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
+static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
+{
+    return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
+}
+
 #define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */
 #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128  /* when workspace is continuously too large
                                          * during at least this number of times,
@@ -1303,7 +1408,7 @@
     note : `params` are assumed fully validated at this stage */
 static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
                                       ZSTD_CCtx_params params,
-                                      U64 pledgedSrcSize,
+                                      U64 const pledgedSrcSize,
                                       ZSTD_compResetPolicy_e const crp,
                                       ZSTD_buffered_policy_e const zbuff)
 {
@@ -1315,13 +1420,21 @@
         if (ZSTD_equivalentParams(zc->appliedParams, params,
                                   zc->inBuffSize,
                                   zc->seqStore.maxNbSeq, zc->seqStore.maxNbLit,
-                                  zbuff, pledgedSrcSize)) {
-            DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%zu)",
-                        zc->appliedParams.cParams.windowLog, zc->blockSize);
+                                  zbuff, pledgedSrcSize) ) {
+            DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> consider continue mode");
             zc->workSpaceOversizedDuration += (zc->workSpaceOversizedDuration > 0);   /* if it was too large, it still is */
-            if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION)
+            if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION) {
+                DEBUGLOG(4, "continue mode confirmed (wLog1=%u, blockSize1=%zu)",
+                            zc->appliedParams.cParams.windowLog, zc->blockSize);
+                if (ZSTD_indexTooCloseToMax(zc->blockState.matchState.window)) {
+                    /* prefer a reset, faster than a rescale */
+                    ZSTD_reset_matchState(&zc->blockState.matchState,
+                                           zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32,
+                                          &params.cParams,
+                                           crp, ZSTD_resetTarget_CCtx);
+                }
                 return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
-    }   }
+    }   }   }
     DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx");
 
     if (params.ldmParams.enableLdm) {
@@ -1364,16 +1477,16 @@
             DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
 
             if (workSpaceTooSmall || workSpaceWasteful) {
-                DEBUGLOG(4, "Need to resize workSpaceSize from %zuKB to %zuKB",
+                DEBUGLOG(4, "Resize workSpaceSize from %zuKB to %zuKB",
                             zc->workSpaceSize >> 10,
                             neededSpace >> 10);
-                /* static cctx : no resize, error out */
-                if (zc->staticSize) return ERROR(memory_allocation);
+
+                RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
 
                 zc->workSpaceSize = 0;
                 ZSTD_free(zc->workSpace, zc->customMem);
                 zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
-                if (zc->workSpace == NULL) return ERROR(memory_allocation);
+                RETURN_ERROR_IF(zc->workSpace == NULL, memory_allocation);
                 zc->workSpaceSize = neededSpace;
                 zc->workSpaceOversizedDuration = 0;
 
@@ -1406,7 +1519,10 @@
 
         ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
 
-        ptr = zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32;
+        ptr = ZSTD_reset_matchState(&zc->blockState.matchState,
+                                     zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32,
+                                    &params.cParams,
+                                     crp, ZSTD_resetTarget_CCtx);
 
         /* ldm hash table */
         /* initialize bucketOffsets table later for pointer alignment */
@@ -1424,8 +1540,6 @@
         }
         assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
 
-        ptr = ZSTD_reset_matchState(&zc->blockState.matchState, ptr, &params.cParams, crp, /* forCCtx */ 1);
-
         /* sequences storage */
         zc->seqStore.maxNbSeq = maxNbSeq;
         zc->seqStore.sequencesStart = (seqDef*)ptr;
@@ -1502,15 +1616,14 @@
                                  * handled in _enforceMaxDist */
 }
 
-static size_t ZSTD_resetCCtx_byAttachingCDict(
-    ZSTD_CCtx* cctx,
-    const ZSTD_CDict* cdict,
-    ZSTD_CCtx_params params,
-    U64 pledgedSrcSize,
-    ZSTD_buffered_policy_e zbuff)
+static size_t
+ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
+                        const ZSTD_CDict* cdict,
+                        ZSTD_CCtx_params params,
+                        U64 pledgedSrcSize,
+                        ZSTD_buffered_policy_e zbuff)
 {
-    {
-        const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
+    {   const ZSTD_compressionParameters* const cdict_cParams = &cdict->matchState.cParams;
         unsigned const windowLog = params.cParams.windowLog;
         assert(windowLog != 0);
         /* Resize working context table params for input only, since the dict
@@ -1522,8 +1635,7 @@
         assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
     }
 
-    {
-        const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
+    {   const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
                                   - cdict->matchState.window.base);
         const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
         if (cdictLen == 0) {
@@ -1540,9 +1652,9 @@
                     cctx->blockState.matchState.window.base + cdictEnd;
                 ZSTD_window_clear(&cctx->blockState.matchState.window);
             }
+            /* loadedDictEnd is expressed within the referential of the active context */
             cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
-        }
-    }
+    }   }
 
     cctx->dictID = cdict->dictID;
 
@@ -1596,7 +1708,6 @@
         ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
         dstMatchState->window       = srcMatchState->window;
         dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
-        dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3;
         dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
     }
 
@@ -1644,7 +1755,7 @@
                             ZSTD_buffered_policy_e zbuff)
 {
     DEBUGLOG(5, "ZSTD_copyCCtx_internal");
-    if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
+    RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong);
 
     memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
     {   ZSTD_CCtx_params params = dstCCtx->requestedParams;
@@ -1676,7 +1787,6 @@
         ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
         dstMatchState->window       = srcMatchState->window;
         dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
-        dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3;
         dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
     }
     dstCCtx->dictID = srcCCtx->dictID;
@@ -1746,16 +1856,15 @@
 
 /*! ZSTD_reduceIndex() :
 *   rescale all indexes to avoid future overflow (indexes are U32) */
-static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
+static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
 {
-    ZSTD_matchState_t* const ms = &zc->blockState.matchState;
-    {   U32 const hSize = (U32)1 << zc->appliedParams.cParams.hashLog;
+    {   U32 const hSize = (U32)1 << params->cParams.hashLog;
         ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
     }
 
-    if (zc->appliedParams.cParams.strategy != ZSTD_fast) {
-        U32 const chainSize = (U32)1 << zc->appliedParams.cParams.chainLog;
-        if (zc->appliedParams.cParams.strategy == ZSTD_btlazy2)
+    if (params->cParams.strategy != ZSTD_fast) {
+        U32 const chainSize = (U32)1 << params->cParams.chainLog;
+        if (params->cParams.strategy == ZSTD_btlazy2)
             ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
         else
             ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
@@ -1777,161 +1886,13 @@
 static size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
 {
     U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
-    if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
+    RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
+                    dstSize_tooSmall);
     MEM_writeLE24(dst, cBlockHeader24);
     memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
     return ZSTD_blockHeaderSize + srcSize;
 }
 
-static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    BYTE* const ostart = (BYTE* const)dst;
-    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
-
-    if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall);
-
-    switch(flSize)
-    {
-        case 1: /* 2 - 1 - 5 */
-            ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
-            break;
-        case 2: /* 2 - 2 - 12 */
-            MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
-            break;
-        case 3: /* 2 - 2 - 20 */
-            MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
-            break;
-        default:   /* not necessary : flSize is {1,2,3} */
-            assert(0);
-    }
-
-    memcpy(ostart + flSize, src, srcSize);
-    return srcSize + flSize;
-}
-
-static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
-    BYTE* const ostart = (BYTE* const)dst;
-    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
-
-    (void)dstCapacity;  /* dstCapacity already guaranteed to be >=4, hence large enough */
-
-    switch(flSize)
-    {
-        case 1: /* 2 - 1 - 5 */
-            ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
-            break;
-        case 2: /* 2 - 2 - 12 */
-            MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
-            break;
-        case 3: /* 2 - 2 - 20 */
-            MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
-            break;
-        default:   /* not necessary : flSize is {1,2,3} */
-            assert(0);
-    }
-
-    ostart[flSize] = *(const BYTE*)src;
-    return flSize+1;
-}
-
-
-/* ZSTD_minGain() :
- * minimum compression required
- * to generate a compress block or a compressed literals section.
- * note : use same formula for both situations */
-static size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
-{
-    U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
-    ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
-    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
-    return (srcSize >> minlog) + 2;
-}
-
-static size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
-                                     ZSTD_hufCTables_t* nextHuf,
-                                     ZSTD_strategy strategy, int disableLiteralCompression,
-                                     void* dst, size_t dstCapacity,
-                               const void* src, size_t srcSize,
-                                     void* workspace, size_t wkspSize,
-                               const int bmi2)
-{
-    size_t const minGain = ZSTD_minGain(srcSize, strategy);
-    size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
-    BYTE*  const ostart = (BYTE*)dst;
-    U32 singleStream = srcSize < 256;
-    symbolEncodingType_e hType = set_compressed;
-    size_t cLitSize;
-
-    DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i)",
-                disableLiteralCompression);
-
-    /* Prepare nextEntropy assuming reusing the existing table */
-    memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
-
-    if (disableLiteralCompression)
-        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
-
-    /* small ? don't even attempt compression (speed opt) */
-#   define COMPRESS_LITERALS_SIZE_MIN 63
-    {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
-        if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
-    }
-
-    if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall);   /* not enough space for compression */
-    {   HUF_repeat repeat = prevHuf->repeatMode;
-        int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
-        if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
-        cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
-                                      workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2)
-                                : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
-                                      workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
-        if (repeat != HUF_repeat_none) {
-            /* reused the existing table */
-            hType = set_repeat;
-        }
-    }
-
-    if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
-        memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
-        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
-    }
-    if (cLitSize==1) {
-        memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
-        return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
-    }
-
-    if (hType == set_compressed) {
-        /* using a newly constructed table */
-        nextHuf->repeatMode = HUF_repeat_check;
-    }
-
-    /* Build header */
-    switch(lhSize)
-    {
-    case 3: /* 2 - 2 - 10 - 10 */
-        {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
-            MEM_writeLE24(ostart, lhc);
-            break;
-        }
-    case 4: /* 2 - 2 - 14 - 14 */
-        {   U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
-            MEM_writeLE32(ostart, lhc);
-            break;
-        }
-    case 5: /* 2 - 2 - 18 - 18 */
-        {   U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
-            MEM_writeLE32(ostart, lhc);
-            ostart[4] = (BYTE)(cLitSize >> 10);
-            break;
-        }
-    default:  /* not possible : lhSize is {3,4,5} */
-        assert(0);
-    }
-    return lhSize+cLitSize;
-}
-
-
 void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
 {
     const seqDef* const sequences = seqStorePtr->sequencesStart;
@@ -1954,418 +1915,19 @@
         mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
 }
 
-
-/**
- * -log2(x / 256) lookup table for x in [0, 256).
- * If x == 0: Return 0
- * Else: Return floor(-log2(x / 256) * 256)
- */
-static unsigned const kInverseProbabiltyLog256[256] = {
-    0,    2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
-    1130, 1100, 1073, 1047, 1024, 1001, 980,  960,  941,  923,  906,  889,
-    874,  859,  844,  830,  817,  804,  791,  779,  768,  756,  745,  734,
-    724,  714,  704,  694,  685,  676,  667,  658,  650,  642,  633,  626,
-    618,  610,  603,  595,  588,  581,  574,  567,  561,  554,  548,  542,
-    535,  529,  523,  517,  512,  506,  500,  495,  489,  484,  478,  473,
-    468,  463,  458,  453,  448,  443,  438,  434,  429,  424,  420,  415,
-    411,  407,  402,  398,  394,  390,  386,  382,  377,  373,  370,  366,
-    362,  358,  354,  350,  347,  343,  339,  336,  332,  329,  325,  322,
-    318,  315,  311,  308,  305,  302,  298,  295,  292,  289,  286,  282,
-    279,  276,  273,  270,  267,  264,  261,  258,  256,  253,  250,  247,
-    244,  241,  239,  236,  233,  230,  228,  225,  222,  220,  217,  215,
-    212,  209,  207,  204,  202,  199,  197,  194,  192,  190,  187,  185,
-    182,  180,  178,  175,  173,  171,  168,  166,  164,  162,  159,  157,
-    155,  153,  151,  149,  146,  144,  142,  140,  138,  136,  134,  132,
-    130,  128,  126,  123,  121,  119,  117,  115,  114,  112,  110,  108,
-    106,  104,  102,  100,  98,   96,   94,   93,   91,   89,   87,   85,
-    83,   82,   80,   78,   76,   74,   73,   71,   69,   67,   66,   64,
-    62,   61,   59,   57,   55,   54,   52,   50,   49,   47,   46,   44,
-    42,   41,   39,   37,   36,   34,   33,   31,   30,   28,   26,   25,
-    23,   22,   20,   19,   17,   16,   14,   13,   11,   10,   8,    7,
-    5,    4,    2,    1,
-};
-
-
-/**
- * Returns the cost in bits of encoding the distribution described by count
- * using the entropy bound.
- */
-static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
-{
-    unsigned cost = 0;
-    unsigned s;
-    for (s = 0; s <= max; ++s) {
-        unsigned norm = (unsigned)((256 * count[s]) / total);
-        if (count[s] != 0 && norm == 0)
-            norm = 1;
-        assert(count[s] < total);
-        cost += count[s] * kInverseProbabiltyLog256[norm];
-    }
-    return cost >> 8;
-}
-
-
-/**
- * Returns the cost in bits of encoding the distribution in count using the
- * table described by norm. The max symbol support by norm is assumed >= max.
- * norm must be valid for every symbol with non-zero probability in count.
- */
-static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
-                                    unsigned const* count, unsigned const max)
+static int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
 {
-    unsigned const shift = 8 - accuracyLog;
-    size_t cost = 0;
-    unsigned s;
-    assert(accuracyLog <= 8);
-    for (s = 0; s <= max; ++s) {
-        unsigned const normAcc = norm[s] != -1 ? norm[s] : 1;
-        unsigned const norm256 = normAcc << shift;
-        assert(norm256 > 0);
-        assert(norm256 < 256);
-        cost += count[s] * kInverseProbabiltyLog256[norm256];
-    }
-    return cost >> 8;
-}
-
-
-static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
-  void const* ptr = ctable;
-  U16 const* u16ptr = (U16 const*)ptr;
-  U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
-  return maxSymbolValue;
-}
-
-
-/**
- * Returns the cost in bits of encoding the distribution in count using ctable.
- * Returns an error if ctable cannot represent all the symbols in count.
- */
-static size_t ZSTD_fseBitCost(
-    FSE_CTable const* ctable,
-    unsigned const* count,
-    unsigned const max)
-{
-    unsigned const kAccuracyLog = 8;
-    size_t cost = 0;
-    unsigned s;
-    FSE_CState_t cstate;
-    FSE_initCState(&cstate, ctable);
-    if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
-        DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
-                    ZSTD_getFSEMaxSymbolValue(ctable), max);
-        return ERROR(GENERIC);
-    }
-    for (s = 0; s <= max; ++s) {
-        unsigned const tableLog = cstate.stateLog;
-        unsigned const badCost = (tableLog + 1) << kAccuracyLog;
-        unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
-        if (count[s] == 0)
-            continue;
-        if (bitCost >= badCost) {
-            DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
-            return ERROR(GENERIC);
-        }
-        cost += count[s] * bitCost;
-    }
-    return cost >> kAccuracyLog;
-}
-
-/**
- * Returns the cost in bytes of encoding the normalized count header.
- * Returns an error if any of the helper functions return an error.
- */
-static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
-                              size_t const nbSeq, unsigned const FSELog)
-{
-    BYTE wksp[FSE_NCOUNTBOUND];
-    S16 norm[MaxSeq + 1];
-    const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
-    CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
-    return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
-}
-
-
-typedef enum {
-    ZSTD_defaultDisallowed = 0,
-    ZSTD_defaultAllowed = 1
-} ZSTD_defaultPolicy_e;
-
-MEM_STATIC symbolEncodingType_e
-ZSTD_selectEncodingType(
-        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
-        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
-        FSE_CTable const* prevCTable,
-        short const* defaultNorm, U32 defaultNormLog,
-        ZSTD_defaultPolicy_e const isDefaultAllowed,
-        ZSTD_strategy const strategy)
-{
-    ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
-    if (mostFrequent == nbSeq) {
-        *repeatMode = FSE_repeat_none;
-        if (isDefaultAllowed && nbSeq <= 2) {
-            /* Prefer set_basic over set_rle when there are 2 or less symbols,
-             * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
-             * If basic encoding isn't possible, always choose RLE.
-             */
-            DEBUGLOG(5, "Selected set_basic");
-            return set_basic;
-        }
-        DEBUGLOG(5, "Selected set_rle");
-        return set_rle;
+    switch (cctxParams->literalCompressionMode) {
+    case ZSTD_lcm_huffman:
+        return 0;
+    case ZSTD_lcm_uncompressed:
+        return 1;
+    default:
+        assert(0 /* impossible: pre-validated */);
+        /* fall-through */
+    case ZSTD_lcm_auto:
+        return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
     }
-    if (strategy < ZSTD_lazy) {
-        if (isDefaultAllowed) {
-            size_t const staticFse_nbSeq_max = 1000;
-            size_t const mult = 10 - strategy;
-            size_t const baseLog = 3;
-            size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog;  /* 28-36 for offset, 56-72 for lengths */
-            assert(defaultNormLog >= 5 && defaultNormLog <= 6);  /* xx_DEFAULTNORMLOG */
-            assert(mult <= 9 && mult >= 7);
-            if ( (*repeatMode == FSE_repeat_valid)
-              && (nbSeq < staticFse_nbSeq_max) ) {
-                DEBUGLOG(5, "Selected set_repeat");
-                return set_repeat;
-            }
-            if ( (nbSeq < dynamicFse_nbSeq_min)
-              || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
-                DEBUGLOG(5, "Selected set_basic");
-                /* The format allows default tables to be repeated, but it isn't useful.
-                 * When using simple heuristics to select encoding type, we don't want
-                 * to confuse these tables with dictionaries. When running more careful
-                 * analysis, we don't need to waste time checking both repeating tables
-                 * and default tables.
-                 */
-                *repeatMode = FSE_repeat_none;
-                return set_basic;
-            }
-        }
-    } else {
-        size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
-        size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
-        size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
-        size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
-
-        if (isDefaultAllowed) {
-            assert(!ZSTD_isError(basicCost));
-            assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
-        }
-        assert(!ZSTD_isError(NCountCost));
-        assert(compressedCost < ERROR(maxCode));
-        DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
-                    (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
-        if (basicCost <= repeatCost && basicCost <= compressedCost) {
-            DEBUGLOG(5, "Selected set_basic");
-            assert(isDefaultAllowed);
-            *repeatMode = FSE_repeat_none;
-            return set_basic;
-        }
-        if (repeatCost <= compressedCost) {
-            DEBUGLOG(5, "Selected set_repeat");
-            assert(!ZSTD_isError(repeatCost));
-            return set_repeat;
-        }
-        assert(compressedCost < basicCost && compressedCost < repeatCost);
-    }
-    DEBUGLOG(5, "Selected set_compressed");
-    *repeatMode = FSE_repeat_check;
-    return set_compressed;
-}
-
-MEM_STATIC size_t
-ZSTD_buildCTable(void* dst, size_t dstCapacity,
-                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
-                unsigned* count, U32 max,
-                const BYTE* codeTable, size_t nbSeq,
-                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
-                const FSE_CTable* prevCTable, size_t prevCTableSize,
-                void* workspace, size_t workspaceSize)
-{
-    BYTE* op = (BYTE*)dst;
-    const BYTE* const oend = op + dstCapacity;
-    DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
-
-    switch (type) {
-    case set_rle:
-        CHECK_F(FSE_buildCTable_rle(nextCTable, (BYTE)max));
-        if (dstCapacity==0) return ERROR(dstSize_tooSmall);
-        *op = codeTable[0];
-        return 1;
-    case set_repeat:
-        memcpy(nextCTable, prevCTable, prevCTableSize);
-        return 0;
-    case set_basic:
-        CHECK_F(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize));  /* note : could be pre-calculated */
-        return 0;
-    case set_compressed: {
-        S16 norm[MaxSeq + 1];
-        size_t nbSeq_1 = nbSeq;
-        const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
-        if (count[codeTable[nbSeq-1]] > 1) {
-            count[codeTable[nbSeq-1]]--;
-            nbSeq_1--;
-        }
-        assert(nbSeq_1 > 1);
-        CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
-        {   size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog);   /* overflow protected */
-            if (FSE_isError(NCountSize)) return NCountSize;
-            CHECK_F(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
-            return NCountSize;
-        }
-    }
-    default: return assert(0), ERROR(GENERIC);
-    }
-}
-
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_encodeSequences_body(
-            void* dst, size_t dstCapacity,
-            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
-            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
-            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
-            seqDef const* sequences, size_t nbSeq, int longOffsets)
-{
-    BIT_CStream_t blockStream;
-    FSE_CState_t  stateMatchLength;
-    FSE_CState_t  stateOffsetBits;
-    FSE_CState_t  stateLitLength;
-
-    CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */
-    DEBUGLOG(6, "available space for bitstream : %i  (dstCapacity=%u)",
-                (int)(blockStream.endPtr - blockStream.startPtr),
-                (unsigned)dstCapacity);
-
-    /* first symbols */
-    FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
-    FSE_initCState2(&stateOffsetBits,  CTable_OffsetBits,  ofCodeTable[nbSeq-1]);
-    FSE_initCState2(&stateLitLength,   CTable_LitLength,   llCodeTable[nbSeq-1]);
-    BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
-    if (MEM_32bits()) BIT_flushBits(&blockStream);
-    BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
-    if (MEM_32bits()) BIT_flushBits(&blockStream);
-    if (longOffsets) {
-        U32 const ofBits = ofCodeTable[nbSeq-1];
-        int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
-        if (extraBits) {
-            BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
-            BIT_flushBits(&blockStream);
-        }
-        BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
-                    ofBits - extraBits);
-    } else {
-        BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
-    }
-    BIT_flushBits(&blockStream);
-
-    {   size_t n;
-        for (n=nbSeq-2 ; n<nbSeq ; n--) {      /* intentional underflow */
-            BYTE const llCode = llCodeTable[n];
-            BYTE const ofCode = ofCodeTable[n];
-            BYTE const mlCode = mlCodeTable[n];
-            U32  const llBits = LL_bits[llCode];
-            U32  const ofBits = ofCode;
-            U32  const mlBits = ML_bits[mlCode];
-            DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
-                        (unsigned)sequences[n].litLength,
-                        (unsigned)sequences[n].matchLength + MINMATCH,
-                        (unsigned)sequences[n].offset);
-                                                                            /* 32b*/  /* 64b*/
-                                                                            /* (7)*/  /* (7)*/
-            FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */
-            FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);      /* 24 */  /* 24 */
-            if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
-            FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);        /* 16 */  /* 33 */
-            if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
-                BIT_flushBits(&blockStream);                                /* (7)*/
-            BIT_addBits(&blockStream, sequences[n].litLength, llBits);
-            if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
-            BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
-            if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
-            if (longOffsets) {
-                int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
-                if (extraBits) {
-                    BIT_addBits(&blockStream, sequences[n].offset, extraBits);
-                    BIT_flushBits(&blockStream);                            /* (7)*/
-                }
-                BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
-                            ofBits - extraBits);                            /* 31 */
-            } else {
-                BIT_addBits(&blockStream, sequences[n].offset, ofBits);     /* 31 */
-            }
-            BIT_flushBits(&blockStream);                                    /* (7)*/
-            DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
-    }   }
-
-    DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
-    FSE_flushCState(&blockStream, &stateMatchLength);
-    DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
-    FSE_flushCState(&blockStream, &stateOffsetBits);
-    DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
-    FSE_flushCState(&blockStream, &stateLitLength);
-
-    {   size_t const streamSize = BIT_closeCStream(&blockStream);
-        if (streamSize==0) return ERROR(dstSize_tooSmall);   /* not enough space */
-        return streamSize;
-    }
-}
-
-static size_t
-ZSTD_encodeSequences_default(
-            void* dst, size_t dstCapacity,
-            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
-            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
-            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
-            seqDef const* sequences, size_t nbSeq, int longOffsets)
-{
-    return ZSTD_encodeSequences_body(dst, dstCapacity,
-                                    CTable_MatchLength, mlCodeTable,
-                                    CTable_OffsetBits, ofCodeTable,
-                                    CTable_LitLength, llCodeTable,
-                                    sequences, nbSeq, longOffsets);
-}
-
-
-#if DYNAMIC_BMI2
-
-static TARGET_ATTRIBUTE("bmi2") size_t
-ZSTD_encodeSequences_bmi2(
-            void* dst, size_t dstCapacity,
-            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
-            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
-            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
-            seqDef const* sequences, size_t nbSeq, int longOffsets)
-{
-    return ZSTD_encodeSequences_body(dst, dstCapacity,
-                                    CTable_MatchLength, mlCodeTable,
-                                    CTable_OffsetBits, ofCodeTable,
-                                    CTable_LitLength, llCodeTable,
-                                    sequences, nbSeq, longOffsets);
-}
-
-#endif
-
-static size_t ZSTD_encodeSequences(
-            void* dst, size_t dstCapacity,
-            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
-            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
-            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
-            seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
-{
-    DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
-#if DYNAMIC_BMI2
-    if (bmi2) {
-        return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
-                                         CTable_MatchLength, mlCodeTable,
-                                         CTable_OffsetBits, ofCodeTable,
-                                         CTable_LitLength, llCodeTable,
-                                         sequences, nbSeq, longOffsets);
-    }
-#endif
-    (void)bmi2;
-    return ZSTD_encodeSequences_default(dst, dstCapacity,
-                                        CTable_MatchLength, mlCodeTable,
-                                        CTable_OffsetBits, ofCodeTable,
-                                        CTable_LitLength, llCodeTable,
-                                        sequences, nbSeq, longOffsets);
 }
 
 /* ZSTD_compressSequences_internal():
@@ -2393,46 +1955,48 @@
     BYTE* const ostart = (BYTE*)dst;
     BYTE* const oend = ostart + dstCapacity;
     BYTE* op = ostart;
-    size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
+    size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
     BYTE* seqHead;
     BYTE* lastNCount = NULL;
 
+    DEBUGLOG(5, "ZSTD_compressSequences_internal (nbSeq=%zu)", nbSeq);
     ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
-    DEBUGLOG(5, "ZSTD_compressSequences_internal");
 
     /* Compress literals */
     {   const BYTE* const literals = seqStorePtr->litStart;
-        size_t const litSize = seqStorePtr->lit - literals;
-        int const disableLiteralCompression = (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
+        size_t const litSize = (size_t)(seqStorePtr->lit - literals);
         size_t const cSize = ZSTD_compressLiterals(
                                     &prevEntropy->huf, &nextEntropy->huf,
-                                    cctxParams->cParams.strategy, disableLiteralCompression,
+                                    cctxParams->cParams.strategy,
+                                    ZSTD_disableLiteralsCompression(cctxParams),
                                     op, dstCapacity,
                                     literals, litSize,
                                     workspace, wkspSize,
                                     bmi2);
-        if (ZSTD_isError(cSize))
-          return cSize;
+        FORWARD_IF_ERROR(cSize);
         assert(cSize <= dstCapacity);
         op += cSize;
     }
 
     /* Sequences Header */
-    if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/) return ERROR(dstSize_tooSmall);
+    RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
+                    dstSize_tooSmall);
     if (nbSeq < 0x7F)
         *op++ = (BYTE)nbSeq;
     else if (nbSeq < LONGNBSEQ)
         op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
     else
         op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
+    assert(op <= oend);
     if (nbSeq==0) {
         /* Copy the old tables over as if we repeated them */
         memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
-        return op - ostart;
+        return (size_t)(op - ostart);
     }
 
     /* seqHead : flags for FSE encoding type */
     seqHead = op++;
+    assert(op <= oend);
 
     /* convert length/distances into codes */
     ZSTD_seqToCodes(seqStorePtr);
@@ -2448,14 +2012,15 @@
                                         ZSTD_defaultAllowed, strategy);
         assert(set_basic < set_compressed && set_rle < set_compressed);
         assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
-        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
+        {   size_t const countSize = ZSTD_buildCTable(op, (size_t)(oend - op), CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
                                                     count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
                                                     prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable),
                                                     workspace, wkspSize);
-            if (ZSTD_isError(countSize)) return countSize;
+            FORWARD_IF_ERROR(countSize);
             if (LLtype == set_compressed)
                 lastNCount = op;
             op += countSize;
+            assert(op <= oend);
     }   }
     /* build CTable for Offsets */
     {   unsigned max = MaxOff;
@@ -2470,14 +2035,15 @@
                                         OF_defaultNorm, OF_defaultNormLog,
                                         defaultPolicy, strategy);
         assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
-        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
+        {   size_t const countSize = ZSTD_buildCTable(op, (size_t)(oend - op), CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
                                                     count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
                                                     prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable),
                                                     workspace, wkspSize);
-            if (ZSTD_isError(countSize)) return countSize;
+            FORWARD_IF_ERROR(countSize);
             if (Offtype == set_compressed)
                 lastNCount = op;
             op += countSize;
+            assert(op <= oend);
     }   }
     /* build CTable for MatchLengths */
     {   unsigned max = MaxML;
@@ -2490,29 +2056,31 @@
                                         ML_defaultNorm, ML_defaultNormLog,
                                         ZSTD_defaultAllowed, strategy);
         assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
-        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
+        {   size_t const countSize = ZSTD_buildCTable(op, (size_t)(oend - op), CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
                                                     count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
                                                     prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable),
                                                     workspace, wkspSize);
-            if (ZSTD_isError(countSize)) return countSize;
+            FORWARD_IF_ERROR(countSize);
             if (MLtype == set_compressed)
                 lastNCount = op;
             op += countSize;
+            assert(op <= oend);
     }   }
 
     *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
 
     {   size_t const bitstreamSize = ZSTD_encodeSequences(
-                                        op, oend - op,
+                                        op, (size_t)(oend - op),
                                         CTable_MatchLength, mlCodeTable,
                                         CTable_OffsetBits, ofCodeTable,
                                         CTable_LitLength, llCodeTable,
                                         sequences, nbSeq,
                                         longOffsets, bmi2);
-        if (ZSTD_isError(bitstreamSize)) return bitstreamSize;
+        FORWARD_IF_ERROR(bitstreamSize);
         op += bitstreamSize;
+        assert(op <= oend);
         /* zstd versions <= 1.3.4 mistakenly report corruption when
-         * FSE_readNCount() recieves a buffer < 4 bytes.
+         * FSE_readNCount() receives a buffer < 4 bytes.
          * Fixed by https://github.com/facebook/zstd/pull/1146.
          * This can happen when the last set_compressed table present is 2
          * bytes and the bitstream is only one byte.
@@ -2529,7 +2097,7 @@
     }
 
     DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
-    return op - ostart;
+    return (size_t)(op - ostart);
 }
 
 MEM_STATIC size_t
@@ -2552,7 +2120,7 @@
      */
     if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
         return 0;  /* block not compressed */
-    if (ZSTD_isError(cSize)) return cSize;
+    FORWARD_IF_ERROR(cSize);
 
     /* Check compressibility */
     {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
@@ -2622,27 +2190,24 @@
     ssPtr->longLengthID = 0;
 }
 
-static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
-                                        void* dst, size_t dstCapacity,
-                                        const void* src, size_t srcSize)
+typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
+
+static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
 {
     ZSTD_matchState_t* const ms = &zc->blockState.matchState;
-    size_t cSize;
-    DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
-                (unsigned)dstCapacity, (unsigned)ms->window.dictLimit, (unsigned)ms->nextToUpdate);
+    DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
     assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
-
     /* Assert that we have correctly flushed the ctx params into the ms's copy */
     ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
-
     if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
         ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
-        cSize = 0;
-        goto out;  /* don't even attempt compression below a certain srcSize */
+        return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
     }
     ZSTD_resetSeqStore(&(zc->seqStore));
-    ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;   /* required for optimal parser to read stats from dictionary */
-
+    /* required for optimal parser to read stats from dictionary */
+    ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
+    /* tell the optimal parser how we expect to compress literals */
+    ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
     /* a gap between an attached dict and the current window is not safe,
      * they must remain adjacent,
      * and when that stops being the case, the dict must be unset */
@@ -2679,7 +2244,7 @@
             ldmSeqStore.seq = zc->ldmSequences;
             ldmSeqStore.capacity = zc->maxNbLdmSequences;
             /* Updates ldmSeqStore.size */
-            CHECK_F(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
+            FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
                                                &zc->appliedParams.ldmParams,
                                                src, srcSize));
             /* Updates ldmSeqStore.pos */
@@ -2696,6 +2261,22 @@
         {   const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
             ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
     }   }
+    return ZSTDbss_compress;
+}
+
+static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
+                                        void* dst, size_t dstCapacity,
+                                        const void* src, size_t srcSize)
+{
+    size_t cSize;
+    DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
+                (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
+                (unsigned)zc->blockState.matchState.nextToUpdate);
+
+    {   const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
+        FORWARD_IF_ERROR(bss);
+        if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
+    }
 
     /* encode sequences and literals */
     cSize = ZSTD_compressSequences(&zc->seqStore,
@@ -2724,6 +2305,25 @@
 }
 
 
+static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, void const* ip, void const* iend)
+{
+    if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {
+        U32 const maxDist = (U32)1 << params->cParams.windowLog;
+        U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
+        U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
+        ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
+        ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
+        ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
+        ZSTD_reduceIndex(ms, params, correction);
+        if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
+        else ms->nextToUpdate -= correction;
+        /* invalidate dictionaries on overflow correction */
+        ms->loadedDictEnd = 0;
+        ms->dictMatchState = NULL;
+    }
+}
+
+
 /*! ZSTD_compress_frameChunk() :
 *   Compress a chunk of data into one or multiple blocks.
 *   All blocks will be terminated, all input will be consumed.
@@ -2742,7 +2342,7 @@
     BYTE* const ostart = (BYTE*)dst;
     BYTE* op = ostart;
     U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
-    assert(cctx->appliedParams.cParams.windowLog <= 31);
+    assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
 
     DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
     if (cctx->appliedParams.fParams.checksumFlag && srcSize)
@@ -2752,33 +2352,25 @@
         ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
         U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
 
-        if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
-            return ERROR(dstSize_tooSmall);   /* not enough space to store compressed block */
+        RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
+                        dstSize_tooSmall,
+                        "not enough space to store compressed block");
         if (remaining < blockSize) blockSize = remaining;
 
-        if (ZSTD_window_needOverflowCorrection(ms->window, ip + blockSize)) {
-            U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy);
-            U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
-            ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
-            ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
-            ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
-            ZSTD_reduceIndex(cctx, correction);
-            if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
-            else ms->nextToUpdate -= correction;
-            ms->loadedDictEnd = 0;
-            ms->dictMatchState = NULL;
-        }
-        ZSTD_window_enforceMaxDist(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
+        ZSTD_overflowCorrectIfNeeded(ms, &cctx->appliedParams, ip, ip + blockSize);
+        ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
+
+        /* Ensure hash/chain table insertion resumes no sooner than lowlimit */
         if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
 
         {   size_t cSize = ZSTD_compressBlock_internal(cctx,
                                 op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
                                 ip, blockSize);
-            if (ZSTD_isError(cSize)) return cSize;
+            FORWARD_IF_ERROR(cSize);
 
             if (cSize == 0) {  /* block is not compressible */
                 cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
-                if (ZSTD_isError(cSize)) return cSize;
+                FORWARD_IF_ERROR(cSize);
             } else {
                 U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
                 MEM_writeLE24(op, cBlockHeader24);
@@ -2796,7 +2388,7 @@
     }   }
 
     if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
-    return op-ostart;
+    return (size_t)(op-ostart);
 }
 
 
@@ -2811,11 +2403,11 @@
     BYTE  const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
     U32   const fcsCode = params.fParams.contentSizeFlag ?
                      (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */
-    BYTE  const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
+    BYTE  const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
     size_t pos=0;
 
     assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
-    if (dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX) return ERROR(dstSize_tooSmall);
+    RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall);
     DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
                 !params.fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
 
@@ -2823,7 +2415,7 @@
         MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
         pos = 4;
     }
-    op[pos++] = frameHeaderDecriptionByte;
+    op[pos++] = frameHeaderDescriptionByte;
     if (!singleSegment) op[pos++] = windowLogByte;
     switch(dictIDSizeCode)
     {
@@ -2847,11 +2439,11 @@
 /* ZSTD_writeLastEmptyBlock() :
  * output an empty Block with end-of-frame mark to complete a frame
  * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
- *           or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
+ *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
  */
 size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
 {
-    if (dstCapacity < ZSTD_blockHeaderSize) return ERROR(dstSize_tooSmall);
+    RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall);
     {   U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1);  /* 0 size */
         MEM_writeLE24(dst, cBlockHeader24);
         return ZSTD_blockHeaderSize;
@@ -2860,10 +2452,9 @@
 
 size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
 {
-    if (cctx->stage != ZSTDcs_init)
-        return ERROR(stage_wrong);
-    if (cctx->appliedParams.ldmParams.enableLdm)
-        return ERROR(parameter_unsupported);
+    RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong);
+    RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
+                    parameter_unsupported);
     cctx->externSeqStore.seq = seq;
     cctx->externSeqStore.size = nbSeq;
     cctx->externSeqStore.capacity = nbSeq;
@@ -2882,12 +2473,14 @@
 
     DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
                 cctx->stage, (unsigned)srcSize);
-    if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong);   /* missing init (ZSTD_compressBegin) */
+    RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
+                    "missing init (ZSTD_compressBegin)");
 
     if (frame && (cctx->stage==ZSTDcs_init)) {
         fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
                                        cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
-        if (ZSTD_isError(fhSize)) return fhSize;
+        FORWARD_IF_ERROR(fhSize);
+        assert(fhSize <= dstCapacity);
         dstCapacity -= fhSize;
         dst = (char*)dst + fhSize;
         cctx->stage = ZSTDcs_ongoing;
@@ -2904,35 +2497,25 @@
 
     if (!frame) {
         /* overflow check and correction for block mode */
-        if (ZSTD_window_needOverflowCorrection(ms->window, (const char*)src + srcSize)) {
-            U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy);
-            U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, 1 << cctx->appliedParams.cParams.windowLog, src);
-            ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
-            ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
-            ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
-            ZSTD_reduceIndex(cctx, correction);
-            if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
-            else ms->nextToUpdate -= correction;
-            ms->loadedDictEnd = 0;
-            ms->dictMatchState = NULL;
-        }
+        ZSTD_overflowCorrectIfNeeded(ms, &cctx->appliedParams, src, (BYTE const*)src + srcSize);
     }
 
     DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
     {   size_t const cSize = frame ?
                              ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
                              ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
-        if (ZSTD_isError(cSize)) return cSize;
+        FORWARD_IF_ERROR(cSize);
         cctx->consumedSrcSize += srcSize;
         cctx->producedCSize += (cSize + fhSize);
         assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
         if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
             ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
-            if (cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne) {
-                DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize >= %u",
-                    (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize);
-                return ERROR(srcSize_wrong);
-            }
+            RETURN_ERROR_IF(
+                cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
+                srcSize_wrong,
+                "error : pledgedSrcSize = %u, while realSrcSize >= %u",
+                (unsigned)cctx->pledgedSrcSizePlusOne-1,
+                (unsigned)cctx->consumedSrcSize);
         }
         return cSize + fhSize;
     }
@@ -2956,8 +2539,9 @@
 
 size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
 {
-    size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
-    if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
+    DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
+    { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
+      RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong); }
 
     return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
 }
@@ -2970,7 +2554,7 @@
                                          const void* src, size_t srcSize,
                                          ZSTD_dictTableLoadMethod_e dtlm)
 {
-    const BYTE* const ip = (const BYTE*) src;
+    const BYTE* ip = (const BYTE*) src;
     const BYTE* const iend = ip + srcSize;
 
     ZSTD_window_update(&ms->window, src, srcSize);
@@ -2981,32 +2565,42 @@
 
     if (srcSize <= HASH_READ_SIZE) return 0;
 
-    switch(params->cParams.strategy)
-    {
-    case ZSTD_fast:
-        ZSTD_fillHashTable(ms, iend, dtlm);
-        break;
-    case ZSTD_dfast:
-        ZSTD_fillDoubleHashTable(ms, iend, dtlm);
-        break;
-
-    case ZSTD_greedy:
-    case ZSTD_lazy:
-    case ZSTD_lazy2:
-        if (srcSize >= HASH_READ_SIZE)
-            ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE);
-        break;
-
-    case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */
-    case ZSTD_btopt:
-    case ZSTD_btultra:
-    case ZSTD_btultra2:
-        if (srcSize >= HASH_READ_SIZE)
-            ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
-        break;
-
-    default:
-        assert(0);  /* not possible : not a valid strategy id */
+    while (iend - ip > HASH_READ_SIZE) {
+        size_t const remaining = (size_t)(iend - ip);
+        size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);
+        const BYTE* const ichunk = ip + chunk;
+
+        ZSTD_overflowCorrectIfNeeded(ms, params, ip, ichunk);
+
+        switch(params->cParams.strategy)
+        {
+        case ZSTD_fast:
+            ZSTD_fillHashTable(ms, ichunk, dtlm);
+            break;
+        case ZSTD_dfast:
+            ZSTD_fillDoubleHashTable(ms, ichunk, dtlm);
+            break;
+
+        case ZSTD_greedy:
+        case ZSTD_lazy:
+        case ZSTD_lazy2:
+            if (chunk >= HASH_READ_SIZE)
+                ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE);
+            break;
+
+        case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */
+        case ZSTD_btopt:
+        case ZSTD_btultra:
+        case ZSTD_btultra2:
+            if (chunk >= HASH_READ_SIZE)
+                ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk);
+            break;
+
+        default:
+            assert(0);  /* not possible : not a valid strategy id */
+        }
+
+        ip = ichunk;
     }
 
     ms->nextToUpdate = (U32)(iend - ms->window.base);
@@ -3020,9 +2614,9 @@
    NOTE: This behavior is not standard and could be improved in the future. */
 static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
     U32 s;
-    if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted);
+    RETURN_ERROR_IF(dictMaxSymbolValue < maxSymbolValue, dictionary_corrupted);
     for (s = 0; s <= maxSymbolValue; ++s) {
-        if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted);
+        RETURN_ERROR_IF(normalizedCounter[s] == 0, dictionary_corrupted);
     }
     return 0;
 }
@@ -3060,53 +2654,56 @@
 
     {   unsigned maxSymbolValue = 255;
         size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
-        if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
-        if (maxSymbolValue < 255) return ERROR(dictionary_corrupted);
+        RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted);
+        RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted);
         dictPtr += hufHeaderSize;
     }
 
     {   unsigned offcodeLog;
         size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
-        if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
-        if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
+        RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);
+        RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);
         /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
         /* fill all offset symbols to avoid garbage at end of table */
-        CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.offcodeCTable,
-                                    offcodeNCount, MaxOff, offcodeLog,
-                                    workspace, HUF_WORKSPACE_SIZE),
-                 dictionary_corrupted);
+        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+                bs->entropy.fse.offcodeCTable,
+                offcodeNCount, MaxOff, offcodeLog,
+                workspace, HUF_WORKSPACE_SIZE)),
+            dictionary_corrupted);
         dictPtr += offcodeHeaderSize;
     }
 
     {   short matchlengthNCount[MaxML+1];
         unsigned matchlengthMaxValue = MaxML, matchlengthLog;
         size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
-        if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
-        if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
+        RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);
+        RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);
         /* Every match length code must have non-zero probability */
-        CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
-        CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.matchlengthCTable,
-                                    matchlengthNCount, matchlengthMaxValue, matchlengthLog,
-                                    workspace, HUF_WORKSPACE_SIZE),
-                 dictionary_corrupted);
+        FORWARD_IF_ERROR( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
+        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+                bs->entropy.fse.matchlengthCTable,
+                matchlengthNCount, matchlengthMaxValue, matchlengthLog,
+                workspace, HUF_WORKSPACE_SIZE)),
+            dictionary_corrupted);
         dictPtr += matchlengthHeaderSize;
     }
 
     {   short litlengthNCount[MaxLL+1];
         unsigned litlengthMaxValue = MaxLL, litlengthLog;
         size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
-        if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
-        if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
+        RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);
+        RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);
         /* Every literal length code must have non-zero probability */
-        CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
-        CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.litlengthCTable,
-                                    litlengthNCount, litlengthMaxValue, litlengthLog,
-                                    workspace, HUF_WORKSPACE_SIZE),
-                 dictionary_corrupted);
+        FORWARD_IF_ERROR( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
+        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+                bs->entropy.fse.litlengthCTable,
+                litlengthNCount, litlengthMaxValue, litlengthLog,
+                workspace, HUF_WORKSPACE_SIZE)),
+            dictionary_corrupted);
         dictPtr += litlengthHeaderSize;
     }
 
-    if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
+    RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);
     bs->rep[0] = MEM_readLE32(dictPtr+0);
     bs->rep[1] = MEM_readLE32(dictPtr+4);
     bs->rep[2] = MEM_readLE32(dictPtr+8);
@@ -3119,19 +2716,19 @@
             offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
         }
         /* All offset values <= dictContentSize + 128 KB must be representable */
-        CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
+        FORWARD_IF_ERROR(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
         /* All repCodes must be <= dictContentSize and != 0*/
         {   U32 u;
             for (u=0; u<3; u++) {
-                if (bs->rep[u] == 0) return ERROR(dictionary_corrupted);
-                if (bs->rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
+                RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted);
+                RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted);
         }   }
 
         bs->entropy.huf.repeatMode = HUF_repeat_valid;
         bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid;
         bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid;
         bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid;
-        CHECK_F(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
+        FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
         return dictID;
     }
 }
@@ -3161,8 +2758,7 @@
             DEBUGLOG(4, "raw content dictionary detected");
             return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
         }
-        if (dictContentType == ZSTD_dct_fullDict)
-            return ERROR(dictionary_wrong);
+        RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong);
         assert(0);   /* impossible */
     }
 
@@ -3189,14 +2785,13 @@
         return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
     }
 
-    CHECK_F( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+    FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
                                      ZSTDcrp_continue, zbuff) );
-    {
-        size_t const dictID = ZSTD_compress_insertDictionary(
+    {   size_t const dictID = ZSTD_compress_insertDictionary(
                 cctx->blockState.prevCBlock, &cctx->blockState.matchState,
                 &params, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace);
-        if (ZSTD_isError(dictID)) return dictID;
-        assert(dictID <= (size_t)(U32)-1);
+        FORWARD_IF_ERROR(dictID);
+        assert(dictID <= UINT_MAX);
         cctx->dictID = (U32)dictID;
     }
     return 0;
@@ -3212,7 +2807,7 @@
 {
     DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog);
     /* compression parameters verification and optimization */
-    CHECK_F( ZSTD_checkCParams(params.cParams) );
+    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
     return ZSTD_compressBegin_internal(cctx,
                                        dict, dictSize, dictContentType, dtlm,
                                        cdict,
@@ -3260,12 +2855,12 @@
     size_t fhSize = 0;
 
     DEBUGLOG(4, "ZSTD_writeEpilogue");
-    if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong);  /* init missing */
+    RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
 
     /* special case : empty frame */
     if (cctx->stage == ZSTDcs_init) {
         fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0);
-        if (ZSTD_isError(fhSize)) return fhSize;
+        FORWARD_IF_ERROR(fhSize);
         dstCapacity -= fhSize;
         op += fhSize;
         cctx->stage = ZSTDcs_ongoing;
@@ -3274,7 +2869,7 @@
     if (cctx->stage != ZSTDcs_ending) {
         /* write one last empty block, make it the "last" block */
         U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
-        if (dstCapacity<4) return ERROR(dstSize_tooSmall);
+        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);
         MEM_writeLE32(op, cBlockHeader24);
         op += ZSTD_blockHeaderSize;
         dstCapacity -= ZSTD_blockHeaderSize;
@@ -3282,7 +2877,7 @@
 
     if (cctx->appliedParams.fParams.checksumFlag) {
         U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
-        if (dstCapacity<4) return ERROR(dstSize_tooSmall);
+        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);
         DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
         MEM_writeLE32(op, checksum);
         op += 4;
@@ -3300,18 +2895,20 @@
     size_t const cSize = ZSTD_compressContinue_internal(cctx,
                                 dst, dstCapacity, src, srcSize,
                                 1 /* frame mode */, 1 /* last chunk */);
-    if (ZSTD_isError(cSize)) return cSize;
+    FORWARD_IF_ERROR(cSize);
     endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
-    if (ZSTD_isError(endResult)) return endResult;
+    FORWARD_IF_ERROR(endResult);
     assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
     if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
         ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
         DEBUGLOG(4, "end of frame : controlling src size");
-        if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) {
-            DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u",
-                (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize);
-            return ERROR(srcSize_wrong);
-    }   }
+        RETURN_ERROR_IF(
+            cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
+            srcSize_wrong,
+             "error : pledgedSrcSize = %u, while realSrcSize = %u",
+            (unsigned)cctx->pledgedSrcSizePlusOne-1,
+            (unsigned)cctx->consumedSrcSize);
+    }
     return cSize + endResult;
 }
 
@@ -3339,7 +2936,7 @@
                                ZSTD_parameters params)
 {
     DEBUGLOG(4, "ZSTD_compress_advanced");
-    CHECK_F(ZSTD_checkCParams(params.cParams));
+    FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams));
     return ZSTD_compress_internal(cctx,
                                   dst, dstCapacity,
                                   src, srcSize,
@@ -3356,7 +2953,7 @@
         ZSTD_CCtx_params params)
 {
     DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
-    CHECK_F( ZSTD_compressBegin_internal(cctx,
+    FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
                          dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
                          params, srcSize, ZSTDb_not_buffered) );
     return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
@@ -3440,17 +3037,17 @@
         void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem);
         cdict->dictBuffer = internalBuffer;
         cdict->dictContent = internalBuffer;
-        if (!internalBuffer) return ERROR(memory_allocation);
+        RETURN_ERROR_IF(!internalBuffer, memory_allocation);
         memcpy(internalBuffer, dictBuffer, dictSize);
     }
     cdict->dictContentSize = dictSize;
 
     /* Reset the state to no dictionary */
     ZSTD_reset_compressedBlockState(&cdict->cBlockState);
-    {   void* const end = ZSTD_reset_matchState(
-                &cdict->matchState,
-                (U32*)cdict->workspace + HUF_WORKSPACE_SIZE_U32,
-                &cParams, ZSTDcrp_continue, /* forCCtx */ 0);
+    {   void* const end = ZSTD_reset_matchState(&cdict->matchState,
+                            (U32*)cdict->workspace + HUF_WORKSPACE_SIZE_U32,
+                            &cParams,
+                             ZSTDcrp_continue, ZSTD_resetTarget_CDict);
         assert(end == (char*)cdict->workspace + cdict->workspaceSize);
         (void)end;
     }
@@ -3466,7 +3063,7 @@
                     &cdict->cBlockState, &cdict->matchState, &params,
                     cdict->dictContent, cdict->dictContentSize,
                     dictContentType, ZSTD_dtlm_full, cdict->workspace);
-            if (ZSTD_isError(dictID)) return dictID;
+            FORWARD_IF_ERROR(dictID);
             assert(dictID <= (size_t)(U32)-1);
             cdict->dictID = (U32)dictID;
         }
@@ -3596,7 +3193,7 @@
     ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
 {
     DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
-    if (cdict==NULL) return ERROR(dictionary_wrong);
+    RETURN_ERROR_IF(cdict==NULL, dictionary_wrong);
     {   ZSTD_CCtx_params params = cctx->requestedParams;
         params.cParams = ZSTD_getCParamsFromCDict(cdict);
         /* Increase window log to fit the entire dictionary and source if the
@@ -3632,7 +3229,7 @@
                                 const void* src, size_t srcSize,
                                 const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
 {
-    CHECK_F (ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize));   /* will check if cdict != NULL */
+    FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize));   /* will check if cdict != NULL */
     return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
 }
 
@@ -3700,7 +3297,7 @@
     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
 
-    CHECK_F( ZSTD_compressBegin_internal(cctx,
+    FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
                                          dict, dictSize, dictContentType, ZSTD_dtlm_fast,
                                          cdict,
                                          params, pledgedSrcSize,
@@ -3718,13 +3315,17 @@
 
 /* ZSTD_resetCStream():
  * pledgedSrcSize == 0 means "unknown" */
-size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
+size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
 {
-    ZSTD_CCtx_params params = zcs->requestedParams;
+    /* temporary : 0 interpreted as "unknown" during transition period.
+     * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
+     * 0 will be interpreted as "empty" in the future.
+     */
+    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
     DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
-    if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
-    params.fParams.contentSizeFlag = 1;
-    return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
+    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+    return 0;
 }
 
 /*! ZSTD_initCStream_internal() :
@@ -3736,32 +3337,18 @@
                     ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
 {
     DEBUGLOG(4, "ZSTD_initCStream_internal");
-    params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
+    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+    zcs->requestedParams = params;
     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
-
-    if (dict && dictSize >= 8) {
-        DEBUGLOG(4, "loading dictionary of size %u", (unsigned)dictSize);
-        if (zcs->staticSize) {   /* static CCtx : never uses malloc */
-            /* incompatible with internal cdict creation */
-            return ERROR(memory_allocation);
-        }
-        ZSTD_freeCDict(zcs->cdictLocal);
-        zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
-                                            ZSTD_dlm_byCopy, ZSTD_dct_auto,
-                                            params.cParams, zcs->customMem);
-        zcs->cdict = zcs->cdictLocal;
-        if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
+    if (dict) {
+        FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
     } else {
-        if (cdict) {
-            params.cParams = ZSTD_getCParamsFromCDict(cdict);  /* cParams are enforced from cdict; it includes windowLog */
-        }
-        ZSTD_freeCDict(zcs->cdictLocal);
-        zcs->cdictLocal = NULL;
-        zcs->cdict = cdict;
+        /* Dictionary is cleared if !cdict */
+        FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
     }
-
-    return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
+    return 0;
 }
 
 /* ZSTD_initCStream_usingCDict_advanced() :
@@ -3772,22 +3359,20 @@
                                             unsigned long long pledgedSrcSize)
 {
     DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
-    if (!cdict) return ERROR(dictionary_wrong); /* cannot handle NULL cdict (does not know what to do) */
-    {   ZSTD_CCtx_params params = zcs->requestedParams;
-        params.cParams = ZSTD_getCParamsFromCDict(cdict);
-        params.fParams = fParams;
-        return ZSTD_initCStream_internal(zcs,
-                                NULL, 0, cdict,
-                                params, pledgedSrcSize);
-    }
+    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+    zcs->requestedParams.fParams = fParams;
+    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
+    return 0;
 }
 
 /* note : cdict must outlive compression session */
 size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
 {
-    ZSTD_frameParameters const fParams = { 0 /* contentSizeFlag */, 0 /* checksum */, 0 /* hideDictID */ };
     DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
-    return ZSTD_initCStream_usingCDict_advanced(zcs, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);  /* note : will check that cdict != NULL */
+    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
+    return 0;
 }
 
 
@@ -3797,33 +3382,53 @@
  * dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */
 size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
                                  const void* dict, size_t dictSize,
-                                 ZSTD_parameters params, unsigned long long pledgedSrcSize)
+                                 ZSTD_parameters params, unsigned long long pss)
 {
-    DEBUGLOG(4, "ZSTD_initCStream_advanced: pledgedSrcSize=%u, flag=%u",
-                (unsigned)pledgedSrcSize, params.fParams.contentSizeFlag);
-    CHECK_F( ZSTD_checkCParams(params.cParams) );
-    if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */
+    /* for compatibility with older programs relying on this behavior.
+     * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
+     * This line will be removed in the future.
+     */
+    U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+    DEBUGLOG(4, "ZSTD_initCStream_advanced");
+    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
     zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
-    return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL /*cdict*/, zcs->requestedParams, pledgedSrcSize);
+    FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
+    return 0;
 }
 
 size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
 {
-    ZSTD_CCtxParams_init(&zcs->requestedParams, compressionLevel);
-    return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, zcs->requestedParams, ZSTD_CONTENTSIZE_UNKNOWN);
+    DEBUGLOG(4, "ZSTD_initCStream_usingDict");
+    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
+    return 0;
 }
 
 size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
 {
-    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;  /* temporary : 0 interpreted as "unknown" during transition period. Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. `0` will be interpreted as "empty" in the future */
-    ZSTD_CCtxParams_init(&zcs->requestedParams, compressionLevel);
-    return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, zcs->requestedParams, pledgedSrcSize);
+    /* temporary : 0 interpreted as "unknown" during transition period.
+     * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
+     * 0 will be interpreted as "empty" in the future.
+     */
+    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+    DEBUGLOG(4, "ZSTD_initCStream_srcSize");
+    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+    return 0;
 }
 
 size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
 {
     DEBUGLOG(4, "ZSTD_initCStream");
-    return ZSTD_initCStream_srcSize(zcs, compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN);
+    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
+    return 0;
 }
 
 /*======   Compression   ======*/
@@ -3847,10 +3452,10 @@
  *  internal function for all *compressStream*() variants
  *  non-static, because can be called from zstdmt_compress.c
  * @return : hint size for next input */
-size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
-                                   ZSTD_outBuffer* output,
-                                   ZSTD_inBuffer* input,
-                                   ZSTD_EndDirective const flushMode)
+static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+                                          ZSTD_outBuffer* output,
+                                          ZSTD_inBuffer* input,
+                                          ZSTD_EndDirective const flushMode)
 {
     const char* const istart = (const char*)input->src;
     const char* const iend = istart + input->size;
@@ -3873,8 +3478,7 @@
         switch(zcs->streamStage)
         {
         case zcss_init:
-            /* call ZSTD_initCStream() first ! */
-            return ERROR(init_missing);
+            RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
 
         case zcss_load:
             if ( (flushMode == ZSTD_e_end)
@@ -3884,7 +3488,7 @@
                 size_t const cSize = ZSTD_compressEnd(zcs,
                                                 op, oend-op, ip, iend-ip);
                 DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
-                if (ZSTD_isError(cSize)) return cSize;
+                FORWARD_IF_ERROR(cSize);
                 ip = iend;
                 op += cSize;
                 zcs->frameEnded = 1;
@@ -3925,7 +3529,7 @@
                                     zcs->inBuff + zcs->inToCompress, iSize) :
                         ZSTD_compressContinue(zcs, cDst, oSize,
                                     zcs->inBuff + zcs->inToCompress, iSize);
-                if (ZSTD_isError(cSize)) return cSize;
+                FORWARD_IF_ERROR(cSize);
                 zcs->frameEnded = lastBlock;
                 /* prepare next block */
                 zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
@@ -3953,7 +3557,7 @@
         case zcss_flush:
             DEBUGLOG(5, "flush stage");
             {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
-                size_t const flushed = ZSTD_limitCopy(op, oend-op,
+                size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
                             zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
                 DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
                             (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
@@ -4001,7 +3605,7 @@
 
 size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
 {
-    CHECK_F( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) );
+    FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) );
     return ZSTD_nextInputSizeHint_MTorST(zcs);
 }
 
@@ -4013,14 +3617,15 @@
 {
     DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
     /* check conditions */
-    if (output->pos > output->size) return ERROR(GENERIC);
-    if (input->pos  > input->size)  return ERROR(GENERIC);
+    RETURN_ERROR_IF(output->pos > output->size, GENERIC);
+    RETURN_ERROR_IF(input->pos  > input->size, GENERIC);
     assert(cctx!=NULL);
 
     /* transparent initialization stage */
     if (cctx->streamStage == zcss_init) {
         ZSTD_CCtx_params params = cctx->requestedParams;
         ZSTD_prefixDict const prefixDict = cctx->prefixDict;
+        FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) ); /* Init the local dict if present. */
         memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));   /* single usage */
         assert(prefixDict.dict==NULL || cctx->cdict==NULL);    /* only one can be set */
         DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
@@ -4039,11 +3644,11 @@
                 DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
                             params.nbWorkers);
                 cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
-                if (cctx->mtctx == NULL) return ERROR(memory_allocation);
+                RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation);
             }
             /* mt compression */
             DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
-            CHECK_F( ZSTDMT_initCStream_internal(
+            FORWARD_IF_ERROR( ZSTDMT_initCStream_internal(
                         cctx->mtctx,
                         prefixDict.dict, prefixDict.dictSize, ZSTD_dct_rawContent,
                         cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
@@ -4051,7 +3656,7 @@
             cctx->appliedParams.nbWorkers = params.nbWorkers;
         } else
 #endif
-        {   CHECK_F( ZSTD_resetCStream_internal(cctx,
+        {   FORWARD_IF_ERROR( ZSTD_resetCStream_internal(cctx,
                             prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
                             cctx->cdict,
                             params, cctx->pledgedSrcSizePlusOne-1) );
@@ -4063,20 +3668,30 @@
     /* compression stage */
 #ifdef ZSTD_MULTITHREAD
     if (cctx->appliedParams.nbWorkers > 0) {
+        int const forceMaxProgress = (endOp == ZSTD_e_flush || endOp == ZSTD_e_end);
+        size_t flushMin;
+        assert(forceMaxProgress || endOp == ZSTD_e_continue /* Protection for a new flush type */);
         if (cctx->cParamsChanged) {
             ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
             cctx->cParamsChanged = 0;
         }
-        {   size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
+        do {
+            flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
             if ( ZSTD_isError(flushMin)
               || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
                 ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
             }
-            DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
-            return flushMin;
-    }   }
+            FORWARD_IF_ERROR(flushMin);
+        } while (forceMaxProgress && flushMin != 0 && output->pos < output->size);
+        DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
+        /* Either we don't require maximum forward progress, we've finished the
+         * flush, or we are out of output space.
+         */
+        assert(!forceMaxProgress || flushMin == 0 || output->pos == output->size);
+        return flushMin;
+    }
 #endif
-    CHECK_F( ZSTD_compressStream_generic(cctx, output, input, endOp) );
+    FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) );
     DEBUGLOG(5, "completed ZSTD_compressStream2");
     return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
 }
@@ -4107,10 +3722,10 @@
                                         dst, dstCapacity, &oPos,
                                         src, srcSize, &iPos,
                                         ZSTD_e_end);
-        if (ZSTD_isError(result)) return result;
+        FORWARD_IF_ERROR(result);
         if (result != 0) {  /* compression not completed, due to lack of output space */
             assert(oPos == dstCapacity);
-            return ERROR(dstSize_tooSmall);
+            RETURN_ERROR(dstSize_tooSmall);
         }
         assert(iPos == srcSize);   /* all input is expected consumed */
         return oPos;
@@ -4132,11 +3747,11 @@
 {
     ZSTD_inBuffer input = { NULL, 0, 0 };
     size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
-    CHECK_F( remainingToFlush );
+    FORWARD_IF_ERROR( remainingToFlush );
     if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush;   /* minimal estimation */
     /* single thread mode : attempt to calculate remaining to flush more precisely */
     {   size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
-        size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4;
+        size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
         size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
         DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
         return toFlush;
@@ -4151,7 +3766,7 @@
 int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
 
 static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
-{   /* "default" - guarantees a monotonically increasing memory budget */
+{   /* "default" - for any srcSize > 256 KB */
     /* W,  C,  H,  S,  L, TL, strat */
     { 19, 12, 13,  1,  6,  1, ZSTD_fast    },  /* base for negative levels */
     { 19, 13, 14,  1,  7,  0, ZSTD_fast    },  /* level  1 */
@@ -4258,13 +3873,13 @@
 };
 
 /*! ZSTD_getCParams() :
-*  @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
-*   Size values are optional, provide 0 if not known or unused */
+ * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
+ *  Size values are optional, provide 0 if not known or unused */
 ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
 {
     size_t const addedSize = srcSizeHint ? 0 : 500;
-    U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : (U64)-1;
-    U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);   /* intentional underflow for srcSizeHint == 0 */
+    U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : ZSTD_CONTENTSIZE_UNKNOWN;  /* intentional overflow for srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN */
+    U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
     int row = compressionLevel;
     DEBUGLOG(5, "ZSTD_getCParams (cLevel=%i)", compressionLevel);
     if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT;   /* 0 == default */
@@ -4272,13 +3887,14 @@
     if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
     {   ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
         if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel);   /* acceleration factor */
-        return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize);
+        return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize);               /* refine parameters based on srcSize & dictSize */
     }
 }
 
 /*! ZSTD_getParams() :
-*   same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
-*   All fields of `ZSTD_frameParameters` are set to default (0) */
+ *  same idea as ZSTD_getCParams()
+ * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
+ *  Fields of `ZSTD_frameParameters` are set to default values */
 ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
     ZSTD_parameters params;
     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize);
--- a/contrib/python-zstandard/zstd/compress/zstd_compress_internal.h	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress_internal.h	Mon Oct 21 11:09:48 2019 -0400
@@ -33,13 +33,13 @@
 ***************************************/
 #define kSearchStrength      8
 #define HASH_READ_SIZE       8
-#define ZSTD_DUBT_UNSORTED_MARK 1   /* For btlazy2 strategy, index 1 now means "unsorted".
+#define ZSTD_DUBT_UNSORTED_MARK 1   /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted".
                                        It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
                                        It's not a big deal though : candidate will just be sorted again.
-                                       Additionnally, candidate position 1 will be lost.
+                                       Additionally, candidate position 1 will be lost.
                                        But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
-                                       The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be misdhandled after table re-use with a different strategy
-                                       Constant required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
+                                       The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.
+                                       This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
 
 
 /*-*************************************
@@ -55,6 +55,14 @@
 } ZSTD_prefixDict;
 
 typedef struct {
+    void* dictBuffer;
+    void const* dict;
+    size_t dictSize;
+    ZSTD_dictContentType_e dictContentType;
+    ZSTD_CDict* cdict;
+} ZSTD_localDict;
+
+typedef struct {
     U32 CTable[HUF_CTABLE_SIZE_U32(255)];
     HUF_repeat repeatMode;
 } ZSTD_hufCTables_t;
@@ -107,6 +115,7 @@
     U32  offCodeSumBasePrice;    /* to compare to log2(offreq)  */
     ZSTD_OptPrice_e priceType;   /* prices can be determined dynamically, or follow a pre-defined cost structure */
     const ZSTD_entropyCTables_t* symbolCosts;  /* pre-calculated dictionary statistics */
+    ZSTD_literalCompressionMode_e literalCompressionMode;
 } optState_t;
 
 typedef struct {
@@ -119,21 +128,26 @@
     BYTE const* base;       /* All regular indexes relative to this position */
     BYTE const* dictBase;   /* extDict indexes relative to this position */
     U32 dictLimit;          /* below that point, need extDict */
-    U32 lowLimit;           /* below that point, no more data */
+    U32 lowLimit;           /* below that point, no more valid data */
 } ZSTD_window_t;
 
 typedef struct ZSTD_matchState_t ZSTD_matchState_t;
 struct ZSTD_matchState_t {
     ZSTD_window_t window;   /* State for window round buffer management */
-    U32 loadedDictEnd;      /* index of end of dictionary */
+    U32 loadedDictEnd;      /* index of end of dictionary, within context's referential.
+                             * When loadedDictEnd != 0, a dictionary is in use, and still valid.
+                             * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.
+                             * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().
+                             * When dict referential is copied into active context (i.e. not attached),
+                             * loadedDictEnd == dictSize, since referential starts from zero.
+                             */
     U32 nextToUpdate;       /* index from which to continue table update */
-    U32 nextToUpdate3;      /* index from which to continue table update */
-    U32 hashLog3;           /* dispatch table : larger == faster, more memory */
+    U32 hashLog3;           /* dispatch table for matches of len==3 : larger == faster, more memory */
     U32* hashTable;
     U32* hashTable3;
     U32* chainTable;
     optState_t opt;         /* optimal parser state */
-    const ZSTD_matchState_t * dictMatchState;
+    const ZSTD_matchState_t* dictMatchState;
     ZSTD_compressionParameters cParams;
 };
 
@@ -186,8 +200,12 @@
     int compressionLevel;
     int forceWindow;           /* force back-references to respect limit of
                                 * 1<<wLog, even for dictionary */
+    size_t targetCBlockSize;   /* Tries to fit compressed block size to be around targetCBlockSize.
+                                * No target when targetCBlockSize == 0.
+                                * There is no guarantee on compressed block size */
 
     ZSTD_dictAttachPref_e attachDictPref;
+    ZSTD_literalCompressionMode_e literalCompressionMode;
 
     /* Multithreading: used to pass parameters to mtctx */
     int nbWorkers;
@@ -243,7 +261,7 @@
     U32    frameEnded;
 
     /* Dictionary */
-    ZSTD_CDict* cdictLocal;
+    ZSTD_localDict localDict;
     const ZSTD_CDict* cdict;
     ZSTD_prefixDict prefixDict;   /* single-usage dictionary */
 
@@ -295,6 +313,30 @@
     return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
 }
 
+/* ZSTD_cParam_withinBounds:
+ * @return 1 if value is within cParam bounds,
+ * 0 otherwise */
+MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
+{
+    ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
+    if (ZSTD_isError(bounds.error)) return 0;
+    if (value < bounds.lowerBound) return 0;
+    if (value > bounds.upperBound) return 0;
+    return 1;
+}
+
+/* ZSTD_minGain() :
+ * minimum compression required
+ * to generate a compress block or a compressed literals section.
+ * note : use same formula for both situations */
+MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
+{
+    U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
+    ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
+    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
+    return (srcSize >> minlog) + 2;
+}
+
 /*! ZSTD_storeSeq() :
  *  Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
  *  `offsetCode` : distance to match + 3 (values 1-3 are repCodes).
@@ -314,7 +356,7 @@
     /* copy Literals */
     assert(seqStorePtr->maxNbLit <= 128 KB);
     assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
-    ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
+    ZSTD_wildcopy(seqStorePtr->lit, literals, (ptrdiff_t)litLength, ZSTD_no_overlap);
     seqStorePtr->lit += litLength;
 
     /* literal Length */
@@ -554,6 +596,9 @@
 /*-*************************************
 *  Round buffer management
 ***************************************/
+#if (ZSTD_WINDOWLOG_MAX_64 > 31)
+# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
+#endif
 /* Max current allowed */
 #define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
 /* Maximum chunk size before overflow correction needs to be called again */
@@ -665,31 +710,49 @@
  * Updates lowLimit so that:
  *    (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
  *
- * This allows a simple check that index >= lowLimit to see if index is valid.
- * This must be called before a block compression call, with srcEnd as the block
- * source end.
+ * It ensures index is valid as long as index >= lowLimit.
+ * This must be called before a block compression call.
+ *
+ * loadedDictEnd is only defined if a dictionary is in use for current compression.
+ * As the name implies, loadedDictEnd represents the index at end of dictionary.
+ * The value lies within context's referential, it can be directly compared to blockEndIdx.
  *
- * If loadedDictEndPtr is not NULL, we set it to zero once we update lowLimit.
- * This is because dictionaries are allowed to be referenced as long as the last
- * byte of the dictionary is in the window, but once they are out of range,
- * they cannot be referenced. If loadedDictEndPtr is NULL, we use
- * loadedDictEnd == 0.
+ * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0.
+ * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit.
+ * This is because dictionaries are allowed to be referenced fully
+ * as long as the last byte of the dictionary is in the window.
+ * Once input has progressed beyond window size, dictionary cannot be referenced anymore.
  *
- * In normal dict mode, the dict is between lowLimit and dictLimit. In
- * dictMatchState mode, lowLimit and dictLimit are the same, and the dictionary
- * is below them. forceWindow and dictMatchState are therefore incompatible.
+ * In normal dict mode, the dictionary lies between lowLimit and dictLimit.
+ * In dictMatchState mode, lowLimit and dictLimit are the same,
+ * and the dictionary is below them.
+ * forceWindow and dictMatchState are therefore incompatible.
  */
 MEM_STATIC void
 ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
-                           void const* srcEnd,
-                           U32 maxDist,
-                           U32* loadedDictEndPtr,
+                     const void* blockEnd,
+                           U32   maxDist,
+                           U32*  loadedDictEndPtr,
                      const ZSTD_matchState_t** dictMatchStatePtr)
 {
-    U32 const blockEndIdx = (U32)((BYTE const*)srcEnd - window->base);
-    U32 loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
-    DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u",
-                (unsigned)blockEndIdx, (unsigned)maxDist);
+    U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
+    U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
+    DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
+                (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
+
+    /* - When there is no dictionary : loadedDictEnd == 0.
+         In which case, the test (blockEndIdx > maxDist) is merely to avoid
+         overflowing next operation `newLowLimit = blockEndIdx - maxDist`.
+       - When there is a standard dictionary :
+         Index referential is copied from the dictionary,
+         which means it starts from 0.
+         In which case, loadedDictEnd == dictSize,
+         and it makes sense to compare `blockEndIdx > maxDist + dictSize`
+         since `blockEndIdx` also starts from zero.
+       - When there is an attached dictionary :
+         loadedDictEnd is expressed within the referential of the context,
+         so it can be directly compared against blockEndIdx.
+    */
     if (blockEndIdx > maxDist + loadedDictEnd) {
         U32 const newLowLimit = blockEndIdx - maxDist;
         if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
@@ -698,11 +761,45 @@
                         (unsigned)window->dictLimit, (unsigned)window->lowLimit);
             window->dictLimit = window->lowLimit;
         }
-        if (loadedDictEndPtr)
+        /* On reaching window size, dictionaries are invalidated */
+        if (loadedDictEndPtr) *loadedDictEndPtr = 0;
+        if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
+    }
+}
+
+/* Similar to ZSTD_window_enforceMaxDist(),
+ * but only invalidates dictionary
+ * when input progresses beyond window size.
+ * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)
+ *              loadedDictEnd uses same referential as window->base
+ *              maxDist is the window size */
+MEM_STATIC void
+ZSTD_checkDictValidity(const ZSTD_window_t* window,
+                       const void* blockEnd,
+                             U32   maxDist,
+                             U32*  loadedDictEndPtr,
+                       const ZSTD_matchState_t** dictMatchStatePtr)
+{
+    assert(loadedDictEndPtr != NULL);
+    assert(dictMatchStatePtr != NULL);
+    {   U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
+        U32 const loadedDictEnd = *loadedDictEndPtr;
+        DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
+                    (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
+        assert(blockEndIdx >= loadedDictEnd);
+
+        if (blockEndIdx > loadedDictEnd + maxDist) {
+            /* On reaching window size, dictionaries are invalidated.
+             * For simplification, if window size is reached anywhere within next block,
+             * the dictionary is invalidated for the full block.
+             */
+            DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
             *loadedDictEndPtr = 0;
-        if (dictMatchStatePtr)
             *dictMatchStatePtr = NULL;
-    }
+        } else {
+            if (*loadedDictEndPtr != 0) {
+                DEBUGLOG(6, "dictionary considered valid for current block");
+    }   }   }
 }
 
 /**
@@ -744,6 +841,17 @@
     return contiguous;
 }
 
+MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 current, unsigned windowLog)
+{
+    U32    const maxDistance = 1U << windowLog;
+    U32    const lowestValid = ms->window.lowLimit;
+    U32    const withinWindow = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;
+    U32    const isDictionary = (ms->loadedDictEnd != 0);
+    U32    const matchLowest = isDictionary ? lowestValid : withinWindow;
+    return matchLowest;
+}
+
+
 
 /* debug functions */
 #if (DEBUGLEVEL>=2)
@@ -806,13 +914,6 @@
 
 void ZSTD_resetSeqStore(seqStore_t* ssPtr);
 
-/*! ZSTD_compressStream_generic() :
- *  Private use only. To be called from zstdmt_compress.c in single-thread mode. */
-size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
-                                   ZSTD_outBuffer* output,
-                                   ZSTD_inBuffer* input,
-                                   ZSTD_EndDirective const flushMode);
-
 /*! ZSTD_getCParamsFromCDict() :
  *  as the name implies */
 ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
@@ -839,7 +940,7 @@
 /* ZSTD_writeLastEmptyBlock() :
  * output an empty Block with end-of-frame mark to complete a frame
  * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
- *           or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
+ *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
  */
 size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress_literals.c	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+ /*-*************************************
+ *  Dependencies
+ ***************************************/
+#include "zstd_compress_literals.h"
+
+size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    BYTE* const ostart = (BYTE* const)dst;
+    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
+
+    RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall);
+
+    switch(flSize)
+    {
+        case 1: /* 2 - 1 - 5 */
+            ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
+            break;
+        case 2: /* 2 - 2 - 12 */
+            MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
+            break;
+        case 3: /* 2 - 2 - 20 */
+            MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
+            break;
+        default:   /* not necessary : flSize is {1,2,3} */
+            assert(0);
+    }
+
+    memcpy(ostart + flSize, src, srcSize);
+    return srcSize + flSize;
+}
+
+size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    BYTE* const ostart = (BYTE* const)dst;
+    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
+
+    (void)dstCapacity;  /* dstCapacity already guaranteed to be >=4, hence large enough */
+
+    switch(flSize)
+    {
+        case 1: /* 2 - 1 - 5 */
+            ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
+            break;
+        case 2: /* 2 - 2 - 12 */
+            MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
+            break;
+        case 3: /* 2 - 2 - 20 */
+            MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
+            break;
+        default:   /* not necessary : flSize is {1,2,3} */
+            assert(0);
+    }
+
+    ostart[flSize] = *(const BYTE*)src;
+    return flSize+1;
+}
+
+size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
+                              ZSTD_hufCTables_t* nextHuf,
+                              ZSTD_strategy strategy, int disableLiteralCompression,
+                              void* dst, size_t dstCapacity,
+                        const void* src, size_t srcSize,
+                              void* workspace, size_t wkspSize,
+                        const int bmi2)
+{
+    size_t const minGain = ZSTD_minGain(srcSize, strategy);
+    size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
+    BYTE*  const ostart = (BYTE*)dst;
+    U32 singleStream = srcSize < 256;
+    symbolEncodingType_e hType = set_compressed;
+    size_t cLitSize;
+
+    DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i)",
+                disableLiteralCompression);
+
+    /* Prepare nextEntropy assuming reusing the existing table */
+    memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+
+    if (disableLiteralCompression)
+        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+
+    /* small ? don't even attempt compression (speed opt) */
+#   define COMPRESS_LITERALS_SIZE_MIN 63
+    {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
+        if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+    }
+
+    RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
+    {   HUF_repeat repeat = prevHuf->repeatMode;
+        int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
+        if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
+        cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
+                                      workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2)
+                                : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
+                                      workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
+        if (repeat != HUF_repeat_none) {
+            /* reused the existing table */
+            hType = set_repeat;
+        }
+    }
+
+    if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
+        memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+    }
+    if (cLitSize==1) {
+        memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+        return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
+    }
+
+    if (hType == set_compressed) {
+        /* using a newly constructed table */
+        nextHuf->repeatMode = HUF_repeat_check;
+    }
+
+    /* Build header */
+    switch(lhSize)
+    {
+    case 3: /* 2 - 2 - 10 - 10 */
+        {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
+            MEM_writeLE24(ostart, lhc);
+            break;
+        }
+    case 4: /* 2 - 2 - 14 - 14 */
+        {   U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
+            MEM_writeLE32(ostart, lhc);
+            break;
+        }
+    case 5: /* 2 - 2 - 18 - 18 */
+        {   U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
+            MEM_writeLE32(ostart, lhc);
+            ostart[4] = (BYTE)(cLitSize >> 10);
+            break;
+        }
+    default:  /* not possible : lhSize is {3,4,5} */
+        assert(0);
+    }
+    return lhSize+cLitSize;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress_literals.h	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPRESS_LITERALS_H
+#define ZSTD_COMPRESS_LITERALS_H
+
+#include "zstd_compress_internal.h" /* ZSTD_hufCTables_t, ZSTD_minGain() */
+
+
+size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
+                              ZSTD_hufCTables_t* nextHuf,
+                              ZSTD_strategy strategy, int disableLiteralCompression,
+                              void* dst, size_t dstCapacity,
+                        const void* src, size_t srcSize,
+                              void* workspace, size_t wkspSize,
+                        const int bmi2);
+
+#endif /* ZSTD_COMPRESS_LITERALS_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress_sequences.c	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+ /*-*************************************
+ *  Dependencies
+ ***************************************/
+#include "zstd_compress_sequences.h"
+
+/**
+ * -log2(x / 256) lookup table for x in [0, 256).
+ * If x == 0: Return 0
+ * Else: Return floor(-log2(x / 256) * 256)
+ */
+static unsigned const kInverseProbabilityLog256[256] = {
+    0,    2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
+    1130, 1100, 1073, 1047, 1024, 1001, 980,  960,  941,  923,  906,  889,
+    874,  859,  844,  830,  817,  804,  791,  779,  768,  756,  745,  734,
+    724,  714,  704,  694,  685,  676,  667,  658,  650,  642,  633,  626,
+    618,  610,  603,  595,  588,  581,  574,  567,  561,  554,  548,  542,
+    535,  529,  523,  517,  512,  506,  500,  495,  489,  484,  478,  473,
+    468,  463,  458,  453,  448,  443,  438,  434,  429,  424,  420,  415,
+    411,  407,  402,  398,  394,  390,  386,  382,  377,  373,  370,  366,
+    362,  358,  354,  350,  347,  343,  339,  336,  332,  329,  325,  322,
+    318,  315,  311,  308,  305,  302,  298,  295,  292,  289,  286,  282,
+    279,  276,  273,  270,  267,  264,  261,  258,  256,  253,  250,  247,
+    244,  241,  239,  236,  233,  230,  228,  225,  222,  220,  217,  215,
+    212,  209,  207,  204,  202,  199,  197,  194,  192,  190,  187,  185,
+    182,  180,  178,  175,  173,  171,  168,  166,  164,  162,  159,  157,
+    155,  153,  151,  149,  146,  144,  142,  140,  138,  136,  134,  132,
+    130,  128,  126,  123,  121,  119,  117,  115,  114,  112,  110,  108,
+    106,  104,  102,  100,  98,   96,   94,   93,   91,   89,   87,   85,
+    83,   82,   80,   78,   76,   74,   73,   71,   69,   67,   66,   64,
+    62,   61,   59,   57,   55,   54,   52,   50,   49,   47,   46,   44,
+    42,   41,   39,   37,   36,   34,   33,   31,   30,   28,   26,   25,
+    23,   22,   20,   19,   17,   16,   14,   13,   11,   10,   8,    7,
+    5,    4,    2,    1,
+};
+
+static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
+  void const* ptr = ctable;
+  U16 const* u16ptr = (U16 const*)ptr;
+  U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
+  return maxSymbolValue;
+}
+
+/**
+ * Returns the cost in bytes of encoding the normalized count header.
+ * Returns an error if any of the helper functions return an error.
+ */
+static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
+                              size_t const nbSeq, unsigned const FSELog)
+{
+    BYTE wksp[FSE_NCOUNTBOUND];
+    S16 norm[MaxSeq + 1];
+    const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
+    FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
+    return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
+}
+
+/**
+ * Returns the cost in bits of encoding the distribution described by count
+ * using the entropy bound.
+ */
+static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
+{
+    unsigned cost = 0;
+    unsigned s;
+    for (s = 0; s <= max; ++s) {
+        unsigned norm = (unsigned)((256 * count[s]) / total);
+        if (count[s] != 0 && norm == 0)
+            norm = 1;
+        assert(count[s] < total);
+        cost += count[s] * kInverseProbabilityLog256[norm];
+    }
+    return cost >> 8;
+}
+
+/**
+ * Returns the cost in bits of encoding the distribution in count using ctable.
+ * Returns an error if ctable cannot represent all the symbols in count.
+ */
+static size_t ZSTD_fseBitCost(
+    FSE_CTable const* ctable,
+    unsigned const* count,
+    unsigned const max)
+{
+    unsigned const kAccuracyLog = 8;
+    size_t cost = 0;
+    unsigned s;
+    FSE_CState_t cstate;
+    FSE_initCState(&cstate, ctable);
+    RETURN_ERROR_IF(ZSTD_getFSEMaxSymbolValue(ctable) < max, GENERIC,
+                    "Repeat FSE_CTable has maxSymbolValue %u < %u",
+                    ZSTD_getFSEMaxSymbolValue(ctable), max);
+    for (s = 0; s <= max; ++s) {
+        unsigned const tableLog = cstate.stateLog;
+        unsigned const badCost = (tableLog + 1) << kAccuracyLog;
+        unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
+        if (count[s] == 0)
+            continue;
+        RETURN_ERROR_IF(bitCost >= badCost, GENERIC,
+                        "Repeat FSE_CTable has Prob[%u] == 0", s);
+        cost += count[s] * bitCost;
+    }
+    return cost >> kAccuracyLog;
+}
+
+/**
+ * Returns the cost in bits of encoding the distribution in count using the
+ * table described by norm. The max symbol support by norm is assumed >= max.
+ * norm must be valid for every symbol with non-zero probability in count.
+ */
+static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
+                                    unsigned const* count, unsigned const max)
+{
+    unsigned const shift = 8 - accuracyLog;
+    size_t cost = 0;
+    unsigned s;
+    assert(accuracyLog <= 8);
+    for (s = 0; s <= max; ++s) {
+        unsigned const normAcc = norm[s] != -1 ? norm[s] : 1;
+        unsigned const norm256 = normAcc << shift;
+        assert(norm256 > 0);
+        assert(norm256 < 256);
+        cost += count[s] * kInverseProbabilityLog256[norm256];
+    }
+    return cost >> 8;
+}
+
+symbolEncodingType_e
+ZSTD_selectEncodingType(
+        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
+        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
+        FSE_CTable const* prevCTable,
+        short const* defaultNorm, U32 defaultNormLog,
+        ZSTD_defaultPolicy_e const isDefaultAllowed,
+        ZSTD_strategy const strategy)
+{
+    ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
+    if (mostFrequent == nbSeq) {
+        *repeatMode = FSE_repeat_none;
+        if (isDefaultAllowed && nbSeq <= 2) {
+            /* Prefer set_basic over set_rle when there are 2 or less symbols,
+             * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
+             * If basic encoding isn't possible, always choose RLE.
+             */
+            DEBUGLOG(5, "Selected set_basic");
+            return set_basic;
+        }
+        DEBUGLOG(5, "Selected set_rle");
+        return set_rle;
+    }
+    if (strategy < ZSTD_lazy) {
+        if (isDefaultAllowed) {
+            size_t const staticFse_nbSeq_max = 1000;
+            size_t const mult = 10 - strategy;
+            size_t const baseLog = 3;
+            size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog;  /* 28-36 for offset, 56-72 for lengths */
+            assert(defaultNormLog >= 5 && defaultNormLog <= 6);  /* xx_DEFAULTNORMLOG */
+            assert(mult <= 9 && mult >= 7);
+            if ( (*repeatMode == FSE_repeat_valid)
+              && (nbSeq < staticFse_nbSeq_max) ) {
+                DEBUGLOG(5, "Selected set_repeat");
+                return set_repeat;
+            }
+            if ( (nbSeq < dynamicFse_nbSeq_min)
+              || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
+                DEBUGLOG(5, "Selected set_basic");
+                /* The format allows default tables to be repeated, but it isn't useful.
+                 * When using simple heuristics to select encoding type, we don't want
+                 * to confuse these tables with dictionaries. When running more careful
+                 * analysis, we don't need to waste time checking both repeating tables
+                 * and default tables.
+                 */
+                *repeatMode = FSE_repeat_none;
+                return set_basic;
+            }
+        }
+    } else {
+        size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
+        size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
+        size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
+        size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
+
+        if (isDefaultAllowed) {
+            assert(!ZSTD_isError(basicCost));
+            assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
+        }
+        assert(!ZSTD_isError(NCountCost));
+        assert(compressedCost < ERROR(maxCode));
+        DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
+                    (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
+        if (basicCost <= repeatCost && basicCost <= compressedCost) {
+            DEBUGLOG(5, "Selected set_basic");
+            assert(isDefaultAllowed);
+            *repeatMode = FSE_repeat_none;
+            return set_basic;
+        }
+        if (repeatCost <= compressedCost) {
+            DEBUGLOG(5, "Selected set_repeat");
+            assert(!ZSTD_isError(repeatCost));
+            return set_repeat;
+        }
+        assert(compressedCost < basicCost && compressedCost < repeatCost);
+    }
+    DEBUGLOG(5, "Selected set_compressed");
+    *repeatMode = FSE_repeat_check;
+    return set_compressed;
+}
+
+size_t
+ZSTD_buildCTable(void* dst, size_t dstCapacity,
+                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+                unsigned* count, U32 max,
+                const BYTE* codeTable, size_t nbSeq,
+                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+                const FSE_CTable* prevCTable, size_t prevCTableSize,
+                void* workspace, size_t workspaceSize)
+{
+    BYTE* op = (BYTE*)dst;
+    const BYTE* const oend = op + dstCapacity;
+    DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
+
+    switch (type) {
+    case set_rle:
+        FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max));
+        RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall);
+        *op = codeTable[0];
+        return 1;
+    case set_repeat:
+        memcpy(nextCTable, prevCTable, prevCTableSize);
+        return 0;
+    case set_basic:
+        FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize));  /* note : could be pre-calculated */
+        return 0;
+    case set_compressed: {
+        S16 norm[MaxSeq + 1];
+        size_t nbSeq_1 = nbSeq;
+        const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
+        if (count[codeTable[nbSeq-1]] > 1) {
+            count[codeTable[nbSeq-1]]--;
+            nbSeq_1--;
+        }
+        assert(nbSeq_1 > 1);
+        FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
+        {   size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog);   /* overflow protected */
+            FORWARD_IF_ERROR(NCountSize);
+            FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
+            return NCountSize;
+        }
+    }
+    default: assert(0); RETURN_ERROR(GENERIC);
+    }
+}
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_encodeSequences_body(
+            void* dst, size_t dstCapacity,
+            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+            seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+    BIT_CStream_t blockStream;
+    FSE_CState_t  stateMatchLength;
+    FSE_CState_t  stateOffsetBits;
+    FSE_CState_t  stateLitLength;
+
+    RETURN_ERROR_IF(
+        ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
+        dstSize_tooSmall, "not enough space remaining");
+    DEBUGLOG(6, "available space for bitstream : %i  (dstCapacity=%u)",
+                (int)(blockStream.endPtr - blockStream.startPtr),
+                (unsigned)dstCapacity);
+
+    /* first symbols */
+    FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
+    FSE_initCState2(&stateOffsetBits,  CTable_OffsetBits,  ofCodeTable[nbSeq-1]);
+    FSE_initCState2(&stateLitLength,   CTable_LitLength,   llCodeTable[nbSeq-1]);
+    BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
+    if (MEM_32bits()) BIT_flushBits(&blockStream);
+    BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
+    if (MEM_32bits()) BIT_flushBits(&blockStream);
+    if (longOffsets) {
+        U32 const ofBits = ofCodeTable[nbSeq-1];
+        int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
+        if (extraBits) {
+            BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
+            BIT_flushBits(&blockStream);
+        }
+        BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
+                    ofBits - extraBits);
+    } else {
+        BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
+    }
+    BIT_flushBits(&blockStream);
+
+    {   size_t n;
+        for (n=nbSeq-2 ; n<nbSeq ; n--) {      /* intentional underflow */
+            BYTE const llCode = llCodeTable[n];
+            BYTE const ofCode = ofCodeTable[n];
+            BYTE const mlCode = mlCodeTable[n];
+            U32  const llBits = LL_bits[llCode];
+            U32  const ofBits = ofCode;
+            U32  const mlBits = ML_bits[mlCode];
+            DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
+                        (unsigned)sequences[n].litLength,
+                        (unsigned)sequences[n].matchLength + MINMATCH,
+                        (unsigned)sequences[n].offset);
+                                                                            /* 32b*/  /* 64b*/
+                                                                            /* (7)*/  /* (7)*/
+            FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */
+            FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);      /* 24 */  /* 24 */
+            if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
+            FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);        /* 16 */  /* 33 */
+            if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
+                BIT_flushBits(&blockStream);                                /* (7)*/
+            BIT_addBits(&blockStream, sequences[n].litLength, llBits);
+            if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
+            BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
+            if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
+            if (longOffsets) {
+                int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
+                if (extraBits) {
+                    BIT_addBits(&blockStream, sequences[n].offset, extraBits);
+                    BIT_flushBits(&blockStream);                            /* (7)*/
+                }
+                BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
+                            ofBits - extraBits);                            /* 31 */
+            } else {
+                BIT_addBits(&blockStream, sequences[n].offset, ofBits);     /* 31 */
+            }
+            BIT_flushBits(&blockStream);                                    /* (7)*/
+            DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
+    }   }
+
+    DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
+    FSE_flushCState(&blockStream, &stateMatchLength);
+    DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
+    FSE_flushCState(&blockStream, &stateOffsetBits);
+    DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
+    FSE_flushCState(&blockStream, &stateLitLength);
+
+    {   size_t const streamSize = BIT_closeCStream(&blockStream);
+        RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
+        return streamSize;
+    }
+}
+
+static size_t
+ZSTD_encodeSequences_default(
+            void* dst, size_t dstCapacity,
+            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+            seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+    return ZSTD_encodeSequences_body(dst, dstCapacity,
+                                    CTable_MatchLength, mlCodeTable,
+                                    CTable_OffsetBits, ofCodeTable,
+                                    CTable_LitLength, llCodeTable,
+                                    sequences, nbSeq, longOffsets);
+}
+
+
+#if DYNAMIC_BMI2
+
+static TARGET_ATTRIBUTE("bmi2") size_t
+ZSTD_encodeSequences_bmi2(
+            void* dst, size_t dstCapacity,
+            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+            seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+    return ZSTD_encodeSequences_body(dst, dstCapacity,
+                                    CTable_MatchLength, mlCodeTable,
+                                    CTable_OffsetBits, ofCodeTable,
+                                    CTable_LitLength, llCodeTable,
+                                    sequences, nbSeq, longOffsets);
+}
+
+#endif
+
+size_t ZSTD_encodeSequences(
+            void* dst, size_t dstCapacity,
+            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+            seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
+{
+    DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
+#if DYNAMIC_BMI2
+    if (bmi2) {
+        return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
+                                         CTable_MatchLength, mlCodeTable,
+                                         CTable_OffsetBits, ofCodeTable,
+                                         CTable_LitLength, llCodeTable,
+                                         sequences, nbSeq, longOffsets);
+    }
+#endif
+    (void)bmi2;
+    return ZSTD_encodeSequences_default(dst, dstCapacity,
+                                        CTable_MatchLength, mlCodeTable,
+                                        CTable_OffsetBits, ofCodeTable,
+                                        CTable_LitLength, llCodeTable,
+                                        sequences, nbSeq, longOffsets);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress_sequences.h	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPRESS_SEQUENCES_H
+#define ZSTD_COMPRESS_SEQUENCES_H
+
+#include "fse.h" /* FSE_repeat, FSE_CTable */
+#include "zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
+
+typedef enum {
+    ZSTD_defaultDisallowed = 0,
+    ZSTD_defaultAllowed = 1
+} ZSTD_defaultPolicy_e;
+
+symbolEncodingType_e
+ZSTD_selectEncodingType(
+        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
+        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
+        FSE_CTable const* prevCTable,
+        short const* defaultNorm, U32 defaultNormLog,
+        ZSTD_defaultPolicy_e const isDefaultAllowed,
+        ZSTD_strategy const strategy);
+
+size_t
+ZSTD_buildCTable(void* dst, size_t dstCapacity,
+                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+                unsigned* count, U32 max,
+                const BYTE* codeTable, size_t nbSeq,
+                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+                const FSE_CTable* prevCTable, size_t prevCTableSize,
+                void* workspace, size_t workspaceSize);
+
+size_t ZSTD_encodeSequences(
+            void* dst, size_t dstCapacity,
+            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+            seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
+
+#endif /* ZSTD_COMPRESS_SEQUENCES_H */
--- a/contrib/python-zstandard/zstd/compress/zstd_double_fast.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_double_fast.c	Mon Oct 21 11:09:48 2019 -0400
@@ -43,8 +43,7 @@
             /* Only load extra positions for ZSTD_dtlm_full */
             if (dtlm == ZSTD_dtlm_fast)
                 break;
-        }
-    }
+    }   }
 }
 
 
@@ -63,7 +62,11 @@
     const BYTE* const istart = (const BYTE*)src;
     const BYTE* ip = istart;
     const BYTE* anchor = istart;
-    const U32 prefixLowestIndex = ms->window.dictLimit;
+    const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
+    const U32 lowestValid = ms->window.dictLimit;
+    const U32 maxDistance = 1U << cParams->windowLog;
+    /* presumes that, if there is a dictionary, it must be using Attach mode */
+    const U32 prefixLowestIndex = (endIndex - lowestValid > maxDistance) ? endIndex - maxDistance : lowestValid;
     const BYTE* const prefixLowest = base + prefixLowestIndex;
     const BYTE* const iend = istart + srcSize;
     const BYTE* const ilimit = iend - HASH_READ_SIZE;
@@ -95,8 +98,15 @@
                                      dictCParams->chainLog : hBitsS;
     const U32 dictAndPrefixLength  = (U32)(ip - prefixLowest + dictEnd - dictStart);
 
+    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic");
+
     assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
 
+    /* if a dictionary is attached, it must be within window range */
+    if (dictMode == ZSTD_dictMatchState) {
+        assert(lowestValid + maxDistance >= endIndex);
+    }
+
     /* init */
     ip += (dictAndPrefixLength == 0);
     if (dictMode == ZSTD_noDict) {
@@ -138,7 +148,7 @@
             const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
             mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
             ip++;
-            ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
             goto _match_stored;
         }
 
@@ -147,7 +157,7 @@
           && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
             mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
             ip++;
-            ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
             goto _match_stored;
         }
 
@@ -170,8 +180,7 @@
                 offset = (U32)(current - dictMatchIndexL - dictIndexDelta);
                 while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
                 goto _match_found;
-            }
-        }
+        }   }
 
         if (matchIndexS > prefixLowestIndex) {
             /* check prefix short match */
@@ -186,16 +195,14 @@
 
             if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
                 goto _search_next_long;
-            }
-        }
+        }   }
 
         ip += ((ip-anchor) >> kSearchStrength) + 1;
         continue;
 
 _search_next_long:
 
-        {
-            size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+        {   size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
             size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
             U32 const matchIndexL3 = hashLong[hl3];
             const BYTE* matchL3 = base + matchIndexL3;
@@ -221,9 +228,7 @@
                     offset = (U32)(current + 1 - dictMatchIndexL3 - dictIndexDelta);
                     while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
                     goto _match_found;
-                }
-            }
-        }
+        }   }   }
 
         /* if no long +1 match, explore the short match we found */
         if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) {
@@ -242,7 +247,7 @@
         offset_2 = offset_1;
         offset_1 = offset;
 
-        ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+        ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
 
 _match_stored:
         /* match found */
@@ -250,11 +255,14 @@
         anchor = ip;
 
         if (ip <= ilimit) {
-            /* Fill Table */
-            hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] =
-                hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;  /* here because current+2 could be > iend-8 */
-            hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] =
-                hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
+            /* Complementary insertion */
+            /* done after iLimit test, as candidates could be > iend-8 */
+            {   U32 const indexToInsert = current+2;
+                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
+                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
+                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
+            }
 
             /* check immediate repcode */
             if (dictMode == ZSTD_dictMatchState) {
@@ -278,8 +286,7 @@
                         continue;
                     }
                     break;
-                }
-            }
+            }   }
 
             if (dictMode == ZSTD_noDict) {
                 while ( (ip <= ilimit)
@@ -294,14 +301,15 @@
                     ip += rLength;
                     anchor = ip;
                     continue;   /* faster when present ... (?) */
-    }   }   }   }
+        }   }   }
+    }   /* while (ip < ilimit) */
 
     /* save reps for next block */
     rep[0] = offset_1 ? offset_1 : offsetSaved;
     rep[1] = offset_2 ? offset_2 : offsetSaved;
 
     /* Return the last literals size */
-    return iend - anchor;
+    return (size_t)(iend - anchor);
 }
 
 
@@ -360,10 +368,13 @@
     const BYTE* anchor = istart;
     const BYTE* const iend = istart + srcSize;
     const BYTE* const ilimit = iend - 8;
-    const U32   prefixStartIndex = ms->window.dictLimit;
     const BYTE* const base = ms->window.base;
+    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
+    const U32   lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
+    const U32   dictStartIndex = lowLimit;
+    const U32   dictLimit = ms->window.dictLimit;
+    const U32   prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
     const BYTE* const prefixStart = base + prefixStartIndex;
-    const U32   dictStartIndex = ms->window.lowLimit;
     const BYTE* const dictBase = ms->window.dictBase;
     const BYTE* const dictStart = dictBase + dictStartIndex;
     const BYTE* const dictEnd = dictBase + prefixStartIndex;
@@ -371,6 +382,10 @@
 
     DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
 
+    /* if extDict is invalidated due to maxDistance, switch to "regular" variant */
+    if (prefixStartIndex == dictStartIndex)
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict);
+
     /* Search Loop */
     while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
         const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
@@ -396,7 +411,7 @@
             const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
             mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
             ip++;
-            ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
         } else {
             if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
                 const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
@@ -407,7 +422,7 @@
                 while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; }   /* catch up */
                 offset_2 = offset_1;
                 offset_1 = offset;
-                ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
 
             } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
                 size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
@@ -432,23 +447,27 @@
                 }
                 offset_2 = offset_1;
                 offset_1 = offset;
-                ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
 
             } else {
                 ip += ((ip-anchor) >> kSearchStrength) + 1;
                 continue;
         }   }
 
-        /* found a match : store it */
+        /* move to next sequence start */
         ip += mLength;
         anchor = ip;
 
         if (ip <= ilimit) {
-            /* Fill Table */
-            hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;
-            hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2;
-            hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
-            hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+            /* Complementary insertion */
+            /* done after iLimit test, as candidates could be > iend-8 */
+            {   U32 const indexToInsert = current+2;
+                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
+                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
+                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
+            }
+
             /* check immediate repcode */
             while (ip <= ilimit) {
                 U32 const current2 = (U32)(ip-base);
@@ -475,7 +494,7 @@
     rep[1] = offset_2;
 
     /* Return the last literals size */
-    return iend - anchor;
+    return (size_t)(iend - anchor);
 }
 
 
--- a/contrib/python-zstandard/zstd/compress/zstd_fast.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_fast.c	Mon Oct 21 11:09:48 2019 -0400
@@ -13,7 +13,8 @@
 
 
 void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
-                        void const* end, ZSTD_dictTableLoadMethod_e dtlm)
+                        const void* const end,
+                        ZSTD_dictTableLoadMethod_e dtlm)
 {
     const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32* const hashTable = ms->hashTable;
@@ -41,11 +42,164 @@
     }   }   }   }
 }
 
+
 FORCE_INLINE_TEMPLATE
 size_t ZSTD_compressBlock_fast_generic(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
         void const* src, size_t srcSize,
-        U32 const mls, ZSTD_dictMode_e const dictMode)
+        U32 const mls)
+{
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
+    U32* const hashTable = ms->hashTable;
+    U32 const hlog = cParams->hashLog;
+    /* support stepSize of 0 */
+    size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
+    const BYTE* const base = ms->window.base;
+    const BYTE* const istart = (const BYTE*)src;
+    /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
+    const BYTE* ip0 = istart;
+    const BYTE* ip1;
+    const BYTE* anchor = istart;
+    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
+    const U32   maxDistance = 1U << cParams->windowLog;
+    const U32   validStartIndex = ms->window.dictLimit;
+    const U32   prefixStartIndex = (endIndex - validStartIndex > maxDistance) ? endIndex - maxDistance : validStartIndex;
+    const BYTE* const prefixStart = base + prefixStartIndex;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - HASH_READ_SIZE;
+    U32 offset_1=rep[0], offset_2=rep[1];
+    U32 offsetSaved = 0;
+
+    /* init */
+    DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
+    ip0 += (ip0 == prefixStart);
+    ip1 = ip0 + 1;
+    {
+        U32 const maxRep = (U32)(ip0 - prefixStart);
+        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
+        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+    }
+
+    /* Main Search Loop */
+    while (ip1 < ilimit) {   /* < instead of <=, because check at ip0+2 */
+        size_t mLength;
+        BYTE const* ip2 = ip0 + 2;
+        size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
+        U32 const val0 = MEM_read32(ip0);
+        size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
+        U32 const val1 = MEM_read32(ip1);
+        U32 const current0 = (U32)(ip0-base);
+        U32 const current1 = (U32)(ip1-base);
+        U32 const matchIndex0 = hashTable[h0];
+        U32 const matchIndex1 = hashTable[h1];
+        BYTE const* repMatch = ip2-offset_1;
+        const BYTE* match0 = base + matchIndex0;
+        const BYTE* match1 = base + matchIndex1;
+        U32 offcode;
+        hashTable[h0] = current0;   /* update hash table */
+        hashTable[h1] = current1;   /* update hash table */
+
+        assert(ip0 + 1 == ip1);
+
+        if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
+            mLength = ip2[-1] == repMatch[-1] ? 1 : 0;
+            ip0 = ip2 - mLength;
+            match0 = repMatch - mLength;
+            offcode = 0;
+            goto _match;
+        }
+        if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
+            /* found a regular match */
+            goto _offset;
+        }
+        if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
+            /* found a regular match after one literal */
+            ip0 = ip1;
+            match0 = match1;
+            goto _offset;
+        }
+        {
+            size_t const step = ((ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
+            assert(step >= 2);
+            ip0 += step;
+            ip1 += step;
+            continue;
+        }
+_offset: /* Requires: ip0, match0 */
+        /* Compute the offset code */
+        offset_2 = offset_1;
+        offset_1 = (U32)(ip0-match0);
+        offcode = offset_1 + ZSTD_REP_MOVE;
+        mLength = 0;
+        /* Count the backwards match length */
+        while (((ip0>anchor) & (match0>prefixStart))
+             && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
+
+_match: /* Requires: ip0, match0, offcode */
+        /* Count the forward length */
+        mLength += ZSTD_count(ip0+mLength+4, match0+mLength+4, iend) + 4;
+        ZSTD_storeSeq(seqStore, ip0-anchor, anchor, offcode, mLength-MINMATCH);
+        /* match found */
+        ip0 += mLength;
+        anchor = ip0;
+        ip1 = ip0 + 1;
+
+        if (ip0 <= ilimit) {
+            /* Fill Table */
+            assert(base+current0+2 > istart);  /* check base overflow */
+            hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2;  /* here because current+2 could be > iend-8 */
+            hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
+
+            while ( (ip0 <= ilimit)
+                 && ( (offset_2>0)
+                    & (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) )) {
+                /* store sequence */
+                size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
+                U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff;  /* swap offset_2 <=> offset_1 */
+                hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
+                ip0 += rLength;
+                ip1 = ip0 + 1;
+                ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
+                anchor = ip0;
+                continue;   /* faster when present (confirmed on gcc-8) ... (?) */
+            }
+        }
+    }
+
+    /* save reps for next block */
+    rep[0] = offset_1 ? offset_1 : offsetSaved;
+    rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+    /* Return the last literals size */
+    return (size_t)(iend - anchor);
+}
+
+
+size_t ZSTD_compressBlock_fast(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize)
+{
+    ZSTD_compressionParameters const* cParams = &ms->cParams;
+    U32 const mls = cParams->minMatch;
+    assert(ms->dictMatchState == NULL);
+    switch(mls)
+    {
+    default: /* includes case 3 */
+    case 4 :
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
+    case 5 :
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
+    case 6 :
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
+    case 7 :
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
+    }
+}
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_fast_dictMatchState_generic(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize, U32 const mls)
 {
     const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32* const hashTable = ms->hashTable;
@@ -64,46 +218,34 @@
     U32 offsetSaved = 0;
 
     const ZSTD_matchState_t* const dms = ms->dictMatchState;
-    const ZSTD_compressionParameters* const dictCParams =
-                                     dictMode == ZSTD_dictMatchState ?
-                                     &dms->cParams : NULL;
-    const U32* const dictHashTable = dictMode == ZSTD_dictMatchState ?
-                                     dms->hashTable : NULL;
-    const U32 dictStartIndex       = dictMode == ZSTD_dictMatchState ?
-                                     dms->window.dictLimit : 0;
-    const BYTE* const dictBase     = dictMode == ZSTD_dictMatchState ?
-                                     dms->window.base : NULL;
-    const BYTE* const dictStart    = dictMode == ZSTD_dictMatchState ?
-                                     dictBase + dictStartIndex : NULL;
-    const BYTE* const dictEnd      = dictMode == ZSTD_dictMatchState ?
-                                     dms->window.nextSrc : NULL;
-    const U32 dictIndexDelta       = dictMode == ZSTD_dictMatchState ?
-                                     prefixStartIndex - (U32)(dictEnd - dictBase) :
-                                     0;
+    const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
+    const U32* const dictHashTable = dms->hashTable;
+    const U32 dictStartIndex       = dms->window.dictLimit;
+    const BYTE* const dictBase     = dms->window.base;
+    const BYTE* const dictStart    = dictBase + dictStartIndex;
+    const BYTE* const dictEnd      = dms->window.nextSrc;
+    const U32 dictIndexDelta       = prefixStartIndex - (U32)(dictEnd - dictBase);
     const U32 dictAndPrefixLength  = (U32)(ip - prefixStart + dictEnd - dictStart);
-    const U32 dictHLog             = dictMode == ZSTD_dictMatchState ?
-                                     dictCParams->hashLog : hlog;
-
-    assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
+    const U32 dictHLog             = dictCParams->hashLog;
 
-    /* otherwise, we would get index underflow when translating a dict index
-     * into a local index */
-    assert(dictMode != ZSTD_dictMatchState
-        || prefixStartIndex >= (U32)(dictEnd - dictBase));
+    /* if a dictionary is still attached, it necessarily means that
+     * it is within window size. So we just check it. */
+    const U32 maxDistance = 1U << cParams->windowLog;
+    const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
+    assert(endIndex - prefixStartIndex <= maxDistance);
+    (void)maxDistance; (void)endIndex;   /* these variables are not used when assert() is disabled */
+
+    /* ensure there will be no no underflow
+     * when translating a dict index into a local index */
+    assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
 
     /* init */
+    DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
     ip += (dictAndPrefixLength == 0);
-    if (dictMode == ZSTD_noDict) {
-        U32 const maxRep = (U32)(ip - prefixStart);
-        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
-        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
-    }
-    if (dictMode == ZSTD_dictMatchState) {
-        /* dictMatchState repCode checks don't currently handle repCode == 0
-         * disabling. */
-        assert(offset_1 <= dictAndPrefixLength);
-        assert(offset_2 <= dictAndPrefixLength);
-    }
+    /* dictMatchState repCode checks don't currently handle repCode == 0
+     * disabling. */
+    assert(offset_1 <= dictAndPrefixLength);
+    assert(offset_2 <= dictAndPrefixLength);
 
     /* Main Search Loop */
     while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
@@ -113,50 +255,37 @@
         U32 const matchIndex = hashTable[h];
         const BYTE* match = base + matchIndex;
         const U32 repIndex = current + 1 - offset_1;
-        const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
-                            && repIndex < prefixStartIndex) ?
+        const BYTE* repMatch = (repIndex < prefixStartIndex) ?
                                dictBase + (repIndex - dictIndexDelta) :
                                base + repIndex;
         hashTable[h] = current;   /* update hash table */
 
-        if ( (dictMode == ZSTD_dictMatchState)
-          && ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
+        if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
           && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
             const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
             mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
             ip++;
-            ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
-        } else if ( dictMode == ZSTD_noDict
-                 && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
-            mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
-            ip++;
-            ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
         } else if ( (matchIndex <= prefixStartIndex) ) {
-            if (dictMode == ZSTD_dictMatchState) {
-                size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
-                U32 const dictMatchIndex = dictHashTable[dictHash];
-                const BYTE* dictMatch = dictBase + dictMatchIndex;
-                if (dictMatchIndex <= dictStartIndex ||
-                    MEM_read32(dictMatch) != MEM_read32(ip)) {
-                    assert(stepSize >= 1);
-                    ip += ((ip-anchor) >> kSearchStrength) + stepSize;
-                    continue;
-                } else {
-                    /* found a dict match */
-                    U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
-                    mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
-                    while (((ip>anchor) & (dictMatch>dictStart))
-                         && (ip[-1] == dictMatch[-1])) {
-                        ip--; dictMatch--; mLength++;
-                    } /* catch up */
-                    offset_2 = offset_1;
-                    offset_1 = offset;
-                    ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
-                }
-            } else {
+            size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
+            U32 const dictMatchIndex = dictHashTable[dictHash];
+            const BYTE* dictMatch = dictBase + dictMatchIndex;
+            if (dictMatchIndex <= dictStartIndex ||
+                MEM_read32(dictMatch) != MEM_read32(ip)) {
                 assert(stepSize >= 1);
                 ip += ((ip-anchor) >> kSearchStrength) + stepSize;
                 continue;
+            } else {
+                /* found a dict match */
+                U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
+                mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
+                while (((ip>anchor) & (dictMatch>dictStart))
+                     && (ip[-1] == dictMatch[-1])) {
+                    ip--; dictMatch--; mLength++;
+                } /* catch up */
+                offset_2 = offset_1;
+                offset_1 = offset;
+                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
             }
         } else if (MEM_read32(match) != MEM_read32(ip)) {
             /* it's not a match, and we're not going to check the dictionary */
@@ -171,7 +300,7 @@
                  && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
             offset_2 = offset_1;
             offset_1 = offset;
-            ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
         }
 
         /* match found */
@@ -185,70 +314,34 @@
             hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
 
             /* check immediate repcode */
-            if (dictMode == ZSTD_dictMatchState) {
-                while (ip <= ilimit) {
-                    U32 const current2 = (U32)(ip-base);
-                    U32 const repIndex2 = current2 - offset_2;
-                    const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
-                            dictBase - dictIndexDelta + repIndex2 :
-                            base + repIndex2;
-                    if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
-                       && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
-                        const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
-                        size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
-                        U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
-                        ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
-                        hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
-                        ip += repLength2;
-                        anchor = ip;
-                        continue;
-                    }
-                    break;
+            while (ip <= ilimit) {
+                U32 const current2 = (U32)(ip-base);
+                U32 const repIndex2 = current2 - offset_2;
+                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
+                        dictBase - dictIndexDelta + repIndex2 :
+                        base + repIndex2;
+                if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
+                    ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
+                    ip += repLength2;
+                    anchor = ip;
+                    continue;
                 }
+                break;
             }
-
-            if (dictMode == ZSTD_noDict) {
-                while ( (ip <= ilimit)
-                     && ( (offset_2>0)
-                        & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
-                    /* store sequence */
-                    size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
-                    U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff;  /* swap offset_2 <=> offset_1 */
-                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = (U32)(ip-base);
-                    ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
-                    ip += rLength;
-                    anchor = ip;
-                    continue;   /* faster when present ... (?) */
-    }   }   }   }
+        }
+    }
 
     /* save reps for next block */
     rep[0] = offset_1 ? offset_1 : offsetSaved;
     rep[1] = offset_2 ? offset_2 : offsetSaved;
 
     /* Return the last literals size */
-    return iend - anchor;
-}
-
-
-size_t ZSTD_compressBlock_fast(
-        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize)
-{
-    ZSTD_compressionParameters const* cParams = &ms->cParams;
-    U32 const mls = cParams->minMatch;
-    assert(ms->dictMatchState == NULL);
-    switch(mls)
-    {
-    default: /* includes case 3 */
-    case 4 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
-    case 5 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
-    case 6 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
-    case 7 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
-    }
+    return (size_t)(iend - anchor);
 }
 
 size_t ZSTD_compressBlock_fast_dictMatchState(
@@ -262,13 +355,13 @@
     {
     default: /* includes case 3 */
     case 4 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
+        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
     case 5 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
+        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
     case 6 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
+        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
     case 7 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
+        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
     }
 }
 
@@ -287,15 +380,24 @@
     const BYTE* const istart = (const BYTE*)src;
     const BYTE* ip = istart;
     const BYTE* anchor = istart;
-    const U32   dictStartIndex = ms->window.lowLimit;
+    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
+    const U32   lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
+    const U32   dictStartIndex = lowLimit;
     const BYTE* const dictStart = dictBase + dictStartIndex;
-    const U32   prefixStartIndex = ms->window.dictLimit;
+    const U32   dictLimit = ms->window.dictLimit;
+    const U32   prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
     const BYTE* const prefixStart = base + prefixStartIndex;
     const BYTE* const dictEnd = dictBase + prefixStartIndex;
     const BYTE* const iend = istart + srcSize;
     const BYTE* const ilimit = iend - 8;
     U32 offset_1=rep[0], offset_2=rep[1];
 
+    DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic");
+
+    /* switch to "regular" variant if extDict is invalidated due to maxDistance */
+    if (prefixStartIndex == dictStartIndex)
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
+
     /* Search Loop */
     while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
         const size_t h = ZSTD_hashPtr(ip, hlog, mls);
@@ -312,10 +414,10 @@
 
         if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
            && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
-            const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
-            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
+            const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+            mLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
             ip++;
-            ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
         } else {
             if ( (matchIndex < dictStartIndex) ||
                  (MEM_read32(match) != MEM_read32(ip)) ) {
@@ -323,15 +425,15 @@
                 ip += ((ip-anchor) >> kSearchStrength) + stepSize;
                 continue;
             }
-            {   const BYTE* matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
-                const BYTE* lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
+            {   const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
+                const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
                 U32 offset;
                 mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
                 while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
                 offset = current - matchIndex;
                 offset_2 = offset_1;
                 offset_1 = offset;
-                ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
         }   }
 
         /* found a match : store it */
@@ -351,7 +453,7 @@
                    && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
                     const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
                     size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
-                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
+                    U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
                     ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
                     hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
                     ip += repLength2;
@@ -366,7 +468,7 @@
     rep[1] = offset_2;
 
     /* Return the last literals size */
-    return iend - anchor;
+    return (size_t)(iend - anchor);
 }
 
 
--- a/contrib/python-zstandard/zstd/compress/zstd_lazy.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_lazy.c	Mon Oct 21 11:09:48 2019 -0400
@@ -83,7 +83,10 @@
     U32* largerPtr  = smallerPtr + 1;
     U32 matchIndex = *smallerPtr;   /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
     U32 dummy32;   /* to be nullified at the end */
-    U32 const windowLow = ms->window.lowLimit;
+    U32 const windowValid = ms->window.lowLimit;
+    U32 const maxDistance = 1U << cParams->windowLog;
+    U32 const windowLow = (current - windowValid > maxDistance) ? current - maxDistance : windowValid;
+
 
     DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
                 current, dictLimit, windowLow);
@@ -239,7 +242,7 @@
 
     const BYTE* const base = ms->window.base;
     U32    const current = (U32)(ip-base);
-    U32    const windowLow = ms->window.lowLimit;
+    U32    const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog);
 
     U32*   const bt = ms->chainTable;
     U32    const btLog  = cParams->chainLog - 1;
@@ -490,8 +493,12 @@
     const U32 dictLimit = ms->window.dictLimit;
     const BYTE* const prefixStart = base + dictLimit;
     const BYTE* const dictEnd = dictBase + dictLimit;
-    const U32 lowLimit = ms->window.lowLimit;
     const U32 current = (U32)(ip-base);
+    const U32 maxDistance = 1U << cParams->windowLog;
+    const U32 lowestValid = ms->window.lowLimit;
+    const U32 withinMaxDistance = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;
+    const U32 isDictionary = (ms->loadedDictEnd != 0);
+    const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
     const U32 minChain = current > chainSize ? current - chainSize : 0;
     U32 nbAttempts = 1U << cParams->searchLog;
     size_t ml=4-1;
@@ -612,12 +619,14 @@
 /* *******************************
 *  Common parser - lazy strategy
 *********************************/
-FORCE_INLINE_TEMPLATE
-size_t ZSTD_compressBlock_lazy_generic(
+typedef enum { search_hashChain, search_binaryTree } searchMethod_e;
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_compressBlock_lazy_generic(
                         ZSTD_matchState_t* ms, seqStore_t* seqStore,
                         U32 rep[ZSTD_REP_NUM],
                         const void* src, size_t srcSize,
-                        const U32 searchMethod, const U32 depth,
+                        const searchMethod_e searchMethod, const U32 depth,
                         ZSTD_dictMode_e const dictMode)
 {
     const BYTE* const istart = (const BYTE*)src;
@@ -633,8 +642,10 @@
                         ZSTD_matchState_t* ms,
                         const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
     searchMax_f const searchMax = dictMode == ZSTD_dictMatchState ?
-        (searchMethod ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
-        (searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS);
+        (searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS
+                                         : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
+        (searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_selectMLS
+                                         : ZSTD_HcFindBestMatch_selectMLS);
     U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
 
     const ZSTD_matchState_t* const dms = ms->dictMatchState;
@@ -653,7 +664,6 @@
 
     /* init */
     ip += (dictAndPrefixLength == 0);
-    ms->nextToUpdate3 = ms->nextToUpdate;
     if (dictMode == ZSTD_noDict) {
         U32 const maxRep = (U32)(ip - prefixLowest);
         if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
@@ -844,7 +854,7 @@
     rep[1] = offset_2 ? offset_2 : savedOffset;
 
     /* Return the last literals size */
-    return iend - anchor;
+    return (size_t)(iend - anchor);
 }
 
 
@@ -852,56 +862,56 @@
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
         void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_noDict);
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
 }
 
 size_t ZSTD_compressBlock_lazy2(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
         void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_noDict);
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
 }
 
 size_t ZSTD_compressBlock_lazy(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
         void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_noDict);
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
 }
 
 size_t ZSTD_compressBlock_greedy(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
         void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_noDict);
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
 }
 
 size_t ZSTD_compressBlock_btlazy2_dictMatchState(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
         void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_dictMatchState);
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
 }
 
 size_t ZSTD_compressBlock_lazy2_dictMatchState(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
         void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_dictMatchState);
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
 }
 
 size_t ZSTD_compressBlock_lazy_dictMatchState(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
         void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_dictMatchState);
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
 }
 
 size_t ZSTD_compressBlock_greedy_dictMatchState(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
         void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_dictMatchState);
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
 }
 
 
@@ -910,7 +920,7 @@
                         ZSTD_matchState_t* ms, seqStore_t* seqStore,
                         U32 rep[ZSTD_REP_NUM],
                         const void* src, size_t srcSize,
-                        const U32 searchMethod, const U32 depth)
+                        const searchMethod_e searchMethod, const U32 depth)
 {
     const BYTE* const istart = (const BYTE*)src;
     const BYTE* ip = istart;
@@ -928,12 +938,11 @@
     typedef size_t (*searchMax_f)(
                         ZSTD_matchState_t* ms,
                         const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
-    searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
+    searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
 
     U32 offset_1 = rep[0], offset_2 = rep[1];
 
     /* init */
-    ms->nextToUpdate3 = ms->nextToUpdate;
     ip += (ip == prefixStart);
 
     /* Match Loop */
@@ -1070,7 +1079,7 @@
     rep[1] = offset_2;
 
     /* Return the last literals size */
-    return iend - anchor;
+    return (size_t)(iend - anchor);
 }
 
 
@@ -1078,7 +1087,7 @@
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
         void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 0);
+    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
 }
 
 size_t ZSTD_compressBlock_lazy_extDict(
@@ -1086,7 +1095,7 @@
         void const* src, size_t srcSize)
 
 {
-    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 1);
+    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
 }
 
 size_t ZSTD_compressBlock_lazy2_extDict(
@@ -1094,7 +1103,7 @@
         void const* src, size_t srcSize)
 
 {
-    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 2);
+    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
 }
 
 size_t ZSTD_compressBlock_btlazy2_extDict(
@@ -1102,5 +1111,5 @@
         void const* src, size_t srcSize)
 
 {
-    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 1, 2);
+    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
 }
--- a/contrib/python-zstandard/zstd/compress/zstd_lazy.h	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_lazy.h	Mon Oct 21 11:09:48 2019 -0400
@@ -19,7 +19,7 @@
 
 U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
 
-void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue);  /*! used in ZSTD_reduceIndex(). pre-emptively increase value of ZSTD_DUBT_UNSORTED_MARK */
+void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue);  /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
 
 size_t ZSTD_compressBlock_btlazy2(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
--- a/contrib/python-zstandard/zstd/compress/zstd_ldm.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_ldm.c	Mon Oct 21 11:09:48 2019 -0400
@@ -429,7 +429,7 @@
      */
     assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
     /* The input could be very large (in zstdmt), so it must be broken up into
-     * chunks to enforce the maximmum distance and handle overflow correction.
+     * chunks to enforce the maximum distance and handle overflow correction.
      */
     assert(sequences->pos <= sequences->size);
     assert(sequences->size <= sequences->capacity);
@@ -447,7 +447,7 @@
         if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
             U32 const ldmHSize = 1U << params->hashLog;
             U32 const correction = ZSTD_window_correctOverflow(
-                &ldmState->window, /* cycleLog */ 0, maxDist, src);
+                &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
             ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
         }
         /* 2. We enforce the maximum offset allowed.
--- a/contrib/python-zstandard/zstd/compress/zstd_opt.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_opt.c	Mon Oct 21 11:09:48 2019 -0400
@@ -64,9 +64,15 @@
 }
 #endif
 
+static int ZSTD_compressedLiterals(optState_t const* const optPtr)
+{
+    return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;
+}
+
 static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
 {
-    optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
+    if (ZSTD_compressedLiterals(optPtr))
+        optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
     optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
     optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
     optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
@@ -99,6 +105,7 @@
             const BYTE* const src, size_t const srcSize,
                   int const optLevel)
 {
+    int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
     DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
     optPtr->priceType = zop_dynamic;
 
@@ -113,9 +120,10 @@
             /* huffman table presumed generated by dictionary */
             optPtr->priceType = zop_dynamic;
 
-            assert(optPtr->litFreq != NULL);
-            optPtr->litSum = 0;
-            {   unsigned lit;
+            if (compressedLiterals) {
+                unsigned lit;
+                assert(optPtr->litFreq != NULL);
+                optPtr->litSum = 0;
                 for (lit=0; lit<=MaxLit; lit++) {
                     U32 const scaleLog = 11;   /* scale to 2K */
                     U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
@@ -163,10 +171,11 @@
         } else {  /* not a dictionary */
 
             assert(optPtr->litFreq != NULL);
-            {   unsigned lit = MaxLit;
+            if (compressedLiterals) {
+                unsigned lit = MaxLit;
                 HIST_count_simple(optPtr->litFreq, &lit, src, srcSize);   /* use raw first block to init statistics */
+                optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
             }
-            optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
 
             {   unsigned ll;
                 for (ll=0; ll<=MaxLL; ll++)
@@ -190,7 +199,8 @@
 
     } else {   /* new block : re-use previous statistics, scaled down */
 
-        optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
+        if (compressedLiterals)
+            optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
         optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
         optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
         optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
@@ -207,6 +217,10 @@
                                 int optLevel)
 {
     if (litLength == 0) return 0;
+
+    if (!ZSTD_compressedLiterals(optPtr))
+        return (litLength << 3) * BITCOST_MULTIPLIER;  /* Uncompressed - 8 bytes per literal. */
+
     if (optPtr->priceType == zop_predef)
         return (litLength*6) * BITCOST_MULTIPLIER;  /* 6 bit per literal - no statistic used */
 
@@ -241,13 +255,13 @@
  * to provide a cost which is directly comparable to a match ending at same position */
 static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr, int optLevel)
 {
-    if (optPtr->priceType >= zop_predef) return WEIGHT(litLength, optLevel);
+    if (optPtr->priceType >= zop_predef) return (int)WEIGHT(litLength, optLevel);
 
     /* dynamic statistics */
     {   U32 const llCode = ZSTD_LLcode(litLength);
-        int const contribution = (LL_bits[llCode] * BITCOST_MULTIPLIER)
-                               + WEIGHT(optPtr->litLengthFreq[0], optLevel)   /* note: log2litLengthSum cancel out */
-                               - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
+        int const contribution = (int)(LL_bits[llCode] * BITCOST_MULTIPLIER)
+                               + (int)WEIGHT(optPtr->litLengthFreq[0], optLevel)   /* note: log2litLengthSum cancel out */
+                               - (int)WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
 #if 1
         return contribution;
 #else
@@ -264,7 +278,7 @@
                                      const optState_t* const optPtr,
                                      int optLevel)
 {
-    int const contribution = ZSTD_rawLiteralsCost(literals, litLength, optPtr, optLevel)
+    int const contribution = (int)ZSTD_rawLiteralsCost(literals, litLength, optPtr, optLevel)
                            + ZSTD_litLengthContribution(litLength, optPtr, optLevel);
     return contribution;
 }
@@ -310,7 +324,8 @@
                              U32 offsetCode, U32 matchLength)
 {
     /* literals */
-    {   U32 u;
+    if (ZSTD_compressedLiterals(optPtr)) {
+        U32 u;
         for (u=0; u < litLength; u++)
             optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
         optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
@@ -357,13 +372,15 @@
 
 /* Update hashTable3 up to ip (excluded)
    Assumption : always within prefix (i.e. not within extDict) */
-static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, const BYTE* const ip)
+static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms,
+                                              U32* nextToUpdate3,
+                                              const BYTE* const ip)
 {
     U32* const hashTable3 = ms->hashTable3;
     U32 const hashLog3 = ms->hashLog3;
     const BYTE* const base = ms->window.base;
-    U32 idx = ms->nextToUpdate3;
-    U32 const target = ms->nextToUpdate3 = (U32)(ip - base);
+    U32 idx = *nextToUpdate3;
+    U32 const target = (U32)(ip - base);
     size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
     assert(hashLog3 > 0);
 
@@ -372,6 +389,7 @@
         idx++;
     }
 
+    *nextToUpdate3 = target;
     return hashTable3[hash3];
 }
 
@@ -488,9 +506,11 @@
     }   }
 
     *smallerPtr = *largerPtr = 0;
-    if (bestLength > 384) return MIN(192, (U32)(bestLength - 384));   /* speed optimization */
-    assert(matchEndIdx > current + 8);
-    return matchEndIdx - (current + 8);
+    {   U32 positions = 0;
+        if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384));   /* speed optimization */
+        assert(matchEndIdx > current + 8);
+        return MAX(positions, matchEndIdx - (current + 8));
+    }
 }
 
 FORCE_INLINE_TEMPLATE
@@ -505,8 +525,13 @@
     DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u  (dictMode:%u)",
                 idx, target, dictMode);
 
-    while(idx < target)
-        idx += ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);
+    while(idx < target) {
+        U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);
+        assert(idx < (U32)(idx + forward));
+        idx += forward;
+    }
+    assert((size_t)(ip - base) <= (size_t)(U32)(-1));
+    assert((size_t)(iend - base) <= (size_t)(U32)(-1));
     ms->nextToUpdate = target;
 }
 
@@ -516,11 +541,12 @@
 
 FORCE_INLINE_TEMPLATE
 U32 ZSTD_insertBtAndGetAllMatches (
+                    ZSTD_match_t* matches,   /* store result (found matches) in this table (presumed large enough) */
                     ZSTD_matchState_t* ms,
+                    U32* nextToUpdate3,
                     const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
-                    U32 rep[ZSTD_REP_NUM],
+                    const U32 rep[ZSTD_REP_NUM],
                     U32 const ll0,   /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
-                    ZSTD_match_t* matches,
                     const U32 lengthToBeat,
                     U32 const mls /* template */)
 {
@@ -541,8 +567,8 @@
     U32 const dictLimit = ms->window.dictLimit;
     const BYTE* const dictEnd = dictBase + dictLimit;
     const BYTE* const prefixStart = base + dictLimit;
-    U32 const btLow = btMask >= current ? 0 : current - btMask;
-    U32 const windowLow = ms->window.lowLimit;
+    U32 const btLow = (btMask >= current) ? 0 : current - btMask;
+    U32 const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog);
     U32 const matchLow = windowLow ? windowLow : 1;
     U32* smallerPtr = bt + 2*(current&btMask);
     U32* largerPtr  = bt + 2*(current&btMask) + 1;
@@ -612,7 +638,7 @@
 
     /* HC3 match finder */
     if ((mls == 3) /*static*/ && (bestLength < mls)) {
-        U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, ip);
+        U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
         if ((matchIndex3 >= matchLow)
           & (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
             size_t mlen;
@@ -638,9 +664,7 @@
                      (ip+mlen == iLimit) ) {  /* best possible length */
                     ms->nextToUpdate = current+1;  /* skip insertion */
                     return 1;
-                }
-            }
-        }
+        }   }   }
         /* no dictMatchState lookup: dicts don't have a populated HC3 table */
     }
 
@@ -648,19 +672,21 @@
 
     while (nbCompares-- && (matchIndex >= matchLow)) {
         U32* const nextPtr = bt + 2*(matchIndex & btMask);
+        const BYTE* match;
         size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
-        const BYTE* match;
         assert(current > matchIndex);
 
         if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
             assert(matchIndex+matchLength >= dictLimit);  /* ensure the condition is correct when !extDict */
             match = base + matchIndex;
+            if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0);  /* ensure early section of match is equal as expected */
             matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
         } else {
             match = dictBase + matchIndex;
+            assert(memcmp(match, ip, matchLength) == 0);  /* ensure early section of match is equal as expected */
             matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
             if (matchIndex+matchLength >= dictLimit)
-                match = base + matchIndex;   /* prepare for match[matchLength] */
+                match = base + matchIndex;   /* prepare for match[matchLength] read */
         }
 
         if (matchLength > bestLength) {
@@ -745,10 +771,13 @@
 
 
 FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
+                        ZSTD_match_t* matches,   /* store result (match found, increasing size) in this table */
                         ZSTD_matchState_t* ms,
+                        U32* nextToUpdate3,
                         const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,
-                        U32 rep[ZSTD_REP_NUM], U32 const ll0,
-                        ZSTD_match_t* matches, U32 const lengthToBeat)
+                        const U32 rep[ZSTD_REP_NUM],
+                        U32 const ll0,
+                        U32 const lengthToBeat)
 {
     const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32 const matchLengthSearch = cParams->minMatch;
@@ -757,12 +786,12 @@
     ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);
     switch(matchLengthSearch)
     {
-    case 3 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 3);
+    case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3);
     default :
-    case 4 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 4);
-    case 5 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 5);
+    case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4);
+    case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5);
     case 7 :
-    case 6 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 6);
+    case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6);
     }
 }
 
@@ -838,6 +867,7 @@
 
     U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
     U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
+    U32 nextToUpdate3 = ms->nextToUpdate;
 
     ZSTD_optimal_t* const opt = optStatePtr->priceTable;
     ZSTD_match_t* const matches = optStatePtr->matchTable;
@@ -847,7 +877,6 @@
     DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
                 (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
     assert(optLevel <= 2);
-    ms->nextToUpdate3 = ms->nextToUpdate;
     ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
     ip += (ip==prefixStart);
 
@@ -858,7 +887,7 @@
         /* find first match */
         {   U32 const litlen = (U32)(ip - anchor);
             U32 const ll0 = !litlen;
-            U32 const nbMatches = ZSTD_BtGetAllMatches(ms, ip, iend, dictMode, rep, ll0, matches, minMatch);
+            U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch);
             if (!nbMatches) { ip++; continue; }
 
             /* initialize opt[0] */
@@ -870,7 +899,7 @@
             /* large match -> immediate encoding */
             {   U32 const maxML = matches[nbMatches-1].len;
                 U32 const maxOffset = matches[nbMatches-1].off;
-                DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new serie",
+                DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
                             nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
 
                 if (maxML > sufficient_len) {
@@ -955,7 +984,7 @@
                 U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
                 U32 const previousPrice = opt[cur].price;
                 U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
-                U32 const nbMatches = ZSTD_BtGetAllMatches(ms, inr, iend, dictMode, opt[cur].rep, ll0, matches, minMatch);
+                U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch);
                 U32 matchNb;
                 if (!nbMatches) {
                     DEBUGLOG(7, "rPos:%u : no match found", cur);
@@ -1079,7 +1108,7 @@
     }   /* while (ip < ilimit) */
 
     /* Return the last literals size */
-    return iend - anchor;
+    return (size_t)(iend - anchor);
 }
 
 
@@ -1108,7 +1137,8 @@
 /* used in 2-pass strategy */
 MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
 {
-    optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
+    if (ZSTD_compressedLiterals(optPtr))
+        optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
     optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
     optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
     optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
@@ -1117,7 +1147,7 @@
 /* ZSTD_initStats_ultra():
  * make a first compression pass, just to seed stats with more accurate starting values.
  * only works on first block, with no dictionary and no ldm.
- * this function cannot error, hence its constract must be respected.
+ * this function cannot error, hence its contract must be respected.
  */
 static void
 ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
@@ -1142,7 +1172,6 @@
     ms->window.dictLimit += (U32)srcSize;
     ms->window.lowLimit = ms->window.dictLimit;
     ms->nextToUpdate = ms->window.dictLimit;
-    ms->nextToUpdate3 = ms->window.dictLimit;
 
     /* re-inforce weight of collected statistics */
     ZSTD_upscaleStats(&ms->opt);
--- a/contrib/python-zstandard/zstd/compress/zstdmt_compress.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstdmt_compress.c	Mon Oct 21 11:09:48 2019 -0400
@@ -22,6 +22,7 @@
 /* ======   Dependencies   ====== */
 #include <string.h>      /* memcpy, memset */
 #include <limits.h>      /* INT_MAX, UINT_MAX */
+#include "mem.h"         /* MEM_STATIC */
 #include "pool.h"        /* threadpool */
 #include "threading.h"   /* mutex */
 #include "zstd_compress_internal.h"  /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
@@ -456,7 +457,7 @@
      * Must be acquired after the main mutex when acquiring both.
      */
     ZSTD_pthread_mutex_t ldmWindowMutex;
-    ZSTD_pthread_cond_t ldmWindowCond;  /* Signaled when ldmWindow is udpated */
+    ZSTD_pthread_cond_t ldmWindowCond;  /* Signaled when ldmWindow is updated */
     ZSTD_window_t ldmWindow;  /* A thread-safe copy of ldmState.window */
 } serialState_t;
 
@@ -647,7 +648,7 @@
     buffer_t dstBuff = job->dstBuff;
     size_t lastCBlockSize = 0;
 
-    /* ressources */
+    /* resources */
     if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
     if (dstBuff.start == NULL) {   /* streaming job : doesn't provide a dstBuffer */
         dstBuff = ZSTDMT_getBuffer(job->bufPool);
@@ -672,7 +673,7 @@
         if (ZSTD_isError(initError)) JOB_ERROR(initError);
     } else {  /* srcStart points at reloaded section */
         U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
-        {   size_t const forceWindowError = ZSTD_CCtxParam_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
+        {   size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
             if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
         }
         {   size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
@@ -864,14 +865,10 @@
  * Internal use only */
 size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
 {
-    if (nbWorkers > ZSTDMT_NBWORKERS_MAX) nbWorkers = ZSTDMT_NBWORKERS_MAX;
-    params->nbWorkers = nbWorkers;
-    params->overlapLog = ZSTDMT_OVERLAPLOG_DEFAULT;
-    params->jobSize = 0;
-    return nbWorkers;
+    return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);
 }
 
-ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
+MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem)
 {
     ZSTDMT_CCtx* mtctx;
     U32 nbJobs = nbWorkers + 2;
@@ -906,6 +903,17 @@
     return mtctx;
 }
 
+ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
+{
+#ifdef ZSTD_MULTITHREAD
+    return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem);
+#else
+    (void)nbWorkers;
+    (void)cMem;
+    return NULL;
+#endif
+}
+
 ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers)
 {
     return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem);
@@ -986,26 +994,13 @@
     {
     case ZSTDMT_p_jobSize :
         DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i", value);
-        if ( value != 0  /* default */
-          && value < ZSTDMT_JOBSIZE_MIN)
-            value = ZSTDMT_JOBSIZE_MIN;
-        assert(value >= 0);
-        if (value > ZSTDMT_JOBSIZE_MAX) value = ZSTDMT_JOBSIZE_MAX;
-        params->jobSize = value;
-        return value;
-
+        return ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, value);
     case ZSTDMT_p_overlapLog :
         DEBUGLOG(4, "ZSTDMT_p_overlapLog : %i", value);
-        if (value < ZSTD_OVERLAPLOG_MIN) value = ZSTD_OVERLAPLOG_MIN;
-        if (value > ZSTD_OVERLAPLOG_MAX) value = ZSTD_OVERLAPLOG_MAX;
-        params->overlapLog = value;
-        return value;
-
+        return ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, value);
     case ZSTDMT_p_rsyncable :
-        value = (value != 0);
-        params->rsyncable = value;
-        return value;
-
+        DEBUGLOG(4, "ZSTD_p_rsyncable : %i", value);
+        return ZSTD_CCtxParams_setParameter(params, ZSTD_c_rsyncable, value);
     default :
         return ERROR(parameter_unsupported);
     }
@@ -1021,32 +1016,29 @@
 {
     switch (parameter) {
     case ZSTDMT_p_jobSize:
-        assert(mtctx->params.jobSize <= INT_MAX);
-        *value = (int)(mtctx->params.jobSize);
-        break;
+        return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_jobSize, value);
     case ZSTDMT_p_overlapLog:
-        *value = mtctx->params.overlapLog;
-        break;
+        return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_overlapLog, value);
     case ZSTDMT_p_rsyncable:
-        *value = mtctx->params.rsyncable;
-        break;
+        return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_rsyncable, value);
     default:
         return ERROR(parameter_unsupported);
     }
-    return 0;
 }
 
 /* Sets parameters relevant to the compression job,
  * initializing others to default values. */
 static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
 {
-    ZSTD_CCtx_params jobParams;
-    memset(&jobParams, 0, sizeof(jobParams));
-
-    jobParams.cParams = params.cParams;
-    jobParams.fParams = params.fParams;
-    jobParams.compressionLevel = params.compressionLevel;
-
+    ZSTD_CCtx_params jobParams = params;
+    /* Clear parameters related to multithreading */
+    jobParams.forceWindow = 0;
+    jobParams.nbWorkers = 0;
+    jobParams.jobSize = 0;
+    jobParams.overlapLog = 0;
+    jobParams.rsyncable = 0;
+    memset(&jobParams.ldmParams, 0, sizeof(ldmParams_t));
+    memset(&jobParams.customMem, 0, sizeof(ZSTD_customMem));
     return jobParams;
 }
 
@@ -1056,7 +1048,7 @@
 static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
 {
     if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
-    CHECK_F( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
+    FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
     mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
     if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
     mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
@@ -1137,9 +1129,14 @@
             size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
             size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
             assert(flushed <= produced);
+            assert(jobPtr->consumed <= jobPtr->src.size);
             toFlush = produced - flushed;
-            if (toFlush==0 && (jobPtr->consumed >= jobPtr->src.size)) {
-                /* doneJobID is not-fully-flushed, but toFlush==0 : doneJobID should be compressing some more data */
+            /* if toFlush==0, nothing is available to flush.
+             * However, jobID is expected to still be active:
+             * if jobID was already completed and fully flushed,
+             * ZSTDMT_flushProduced() should have already moved onto next job.
+             * Therefore, some input has not yet been consumed. */
+            if (toFlush==0) {
                 assert(jobPtr->consumed < jobPtr->src.size);
             }
         }
@@ -1156,12 +1153,16 @@
 
 static unsigned ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params const params)
 {
-    if (params.ldmParams.enableLdm)
+    unsigned jobLog;
+    if (params.ldmParams.enableLdm) {
         /* In Long Range Mode, the windowLog is typically oversized.
          * In which case, it's preferable to determine the jobSize
          * based on chainLog instead. */
-        return MAX(21, params.cParams.chainLog + 4);
-    return MAX(20, params.cParams.windowLog + 2);
+        jobLog = MAX(21, params.cParams.chainLog + 4);
+    } else {
+        jobLog = MAX(20, params.cParams.windowLog + 2);
+    }
+    return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX);
 }
 
 static int ZSTDMT_overlapLog_default(ZSTD_strategy strat)
@@ -1205,7 +1206,7 @@
         ovLog = MIN(params.cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
                 - overlapRLog;
     }
-    assert(0 <= ovLog && ovLog <= 30);
+    assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX);
     DEBUGLOG(4, "overlapLog : %i", params.overlapLog);
     DEBUGLOG(4, "overlap size : %i", 1 << ovLog);
     return (ovLog==0) ? 0 : (size_t)1 << ovLog;
@@ -1263,7 +1264,7 @@
     if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize))
         return ERROR(memory_allocation);
 
-    CHECK_F( ZSTDMT_expandJobsTable(mtctx, nbJobs) );  /* only expands if necessary */
+    FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) );  /* only expands if necessary */
 
     {   unsigned u;
         for (u=0; u<nbJobs; u++) {
@@ -1396,10 +1397,10 @@
 
     /* init */
     if (params.nbWorkers != mtctx->params.nbWorkers)
-        CHECK_F( ZSTDMT_resize(mtctx, params.nbWorkers) );
+        FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) );
 
     if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
-    if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = ZSTDMT_JOBSIZE_MAX;
+    if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX;
 
     mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN);  /* do not trigger multi-threading when srcSize is too small */
     if (mtctx->singleBlockingThread) {
@@ -1440,6 +1441,8 @@
     if (mtctx->targetSectionSize == 0) {
         mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(params);
     }
+    assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX);
+
     if (params.rsyncable) {
         /* Aim for the targetsectionSize as the average job size. */
         U32 const jobSizeMB = (U32)(mtctx->targetSectionSize >> 20);
@@ -1547,7 +1550,7 @@
 /* ZSTDMT_writeLastEmptyBlock()
  * Write a single empty block with an end-of-frame to finish a frame.
  * Job must be created from streaming variant.
- * This function is always successfull if expected conditions are fulfilled.
+ * This function is always successful if expected conditions are fulfilled.
  */
 static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
 {
@@ -1987,7 +1990,7 @@
     assert(input->pos  <= input->size);
 
     if (mtctx->singleBlockingThread) {  /* delegate to single-thread (synchronous) */
-        return ZSTD_compressStream_generic(mtctx->cctxPool->cctx[0], output, input, endOp);
+        return ZSTD_compressStream2(mtctx->cctxPool->cctx[0], output, input, endOp);
     }
 
     if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
@@ -2051,7 +2054,7 @@
       || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) {   /* must finish the frame with a zero-size block */
         size_t const jobSize = mtctx->inBuff.filled;
         assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
-        CHECK_F( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
+        FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
     }
 
     /* check for potential compressed data ready to be flushed */
@@ -2065,7 +2068,7 @@
 
 size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
 {
-    CHECK_F( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
+    FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
 
     /* recommended next input size : fill current input buffer */
     return mtctx->targetSectionSize - mtctx->inBuff.filled;   /* note : could be zero when input buffer is fully filled and no more availability to create new job */
@@ -2082,7 +2085,7 @@
       || ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) {  /* need a last 0-size block to end frame */
            DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)",
                         (U32)srcSize, (U32)endFrame);
-        CHECK_F( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
+        FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
     }
 
     /* check if there is any data available to flush */
--- a/contrib/python-zstandard/zstd/compress/zstdmt_compress.h	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstdmt_compress.h	Mon Oct 21 11:09:48 2019 -0400
@@ -17,10 +17,25 @@
 
 
 /* Note : This is an internal API.
- *        Some methods are still exposed (ZSTDLIB_API),
+ *        These APIs used to be exposed with ZSTDLIB_API,
  *        because it used to be the only way to invoke MT compression.
- *        Now, it's recommended to use ZSTD_compress_generic() instead.
- *        These methods will stop being exposed in a future version */
+ *        Now, it's recommended to use ZSTD_compress2 and ZSTD_compressStream2()
+ *        instead.
+ *
+ *        If you depend on these APIs and can't switch, then define
+ *        ZSTD_LEGACY_MULTITHREADED_API when making the dynamic library.
+ *        However, we may completely remove these functions in a future
+ *        release, so please switch soon.
+ *
+ *        This API requires ZSTD_MULTITHREAD to be defined during compilation,
+ *        otherwise ZSTDMT_createCCtx*() will fail.
+ */
+
+#ifdef ZSTD_LEGACY_MULTITHREADED_API
+#  define ZSTDMT_API ZSTDLIB_API
+#else
+#  define ZSTDMT_API
+#endif
 
 /* ===   Dependencies   === */
 #include <stddef.h>                /* size_t */
@@ -35,22 +50,25 @@
 #ifndef ZSTDMT_JOBSIZE_MIN
 #  define ZSTDMT_JOBSIZE_MIN (1 MB)
 #endif
+#define ZSTDMT_JOBLOG_MAX   (MEM_32bits() ? 29 : 30)
 #define ZSTDMT_JOBSIZE_MAX  (MEM_32bits() ? (512 MB) : (1024 MB))
 
 
 /* ===   Memory management   === */
 typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
-ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
-ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
+/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
+ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
+/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
+ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
                                                     ZSTD_customMem cMem);
-ZSTDLIB_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
+ZSTDMT_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
 
-ZSTDLIB_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
+ZSTDMT_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
 
 
 /* ===   Simple one-pass compression function   === */
 
-ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
+ZSTDMT_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
                                        void* dst, size_t dstCapacity,
                                  const void* src, size_t srcSize,
                                        int compressionLevel);
@@ -59,31 +77,31 @@
 
 /* ===   Streaming functions   === */
 
-ZSTDLIB_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
-ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize);  /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
+ZSTDMT_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
+ZSTDMT_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize);  /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
 
-ZSTDLIB_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
-ZSTDLIB_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+ZSTDMT_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
+ZSTDMT_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
 
-ZSTDLIB_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);   /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
-ZSTDLIB_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);     /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
+ZSTDMT_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);   /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
+ZSTDMT_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);     /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
 
 
 /* ===   Advanced functions and parameters  === */
 
-ZSTDLIB_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
-                                           void* dst, size_t dstCapacity,
-                                     const void* src, size_t srcSize,
-                                     const ZSTD_CDict* cdict,
-                                           ZSTD_parameters params,
-                                           int overlapLog);
+ZSTDMT_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
+                                          void* dst, size_t dstCapacity,
+                                    const void* src, size_t srcSize,
+                                    const ZSTD_CDict* cdict,
+                                          ZSTD_parameters params,
+                                          int overlapLog);
 
-ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
+ZSTDMT_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
                                         const void* dict, size_t dictSize,   /* dict can be released after init, a local copy is preserved within zcs */
                                         ZSTD_parameters params,
                                         unsigned long long pledgedSrcSize);  /* pledgedSrcSize is optional and can be zero == unknown */
 
-ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
+ZSTDMT_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
                                         const ZSTD_CDict* cdict,
                                         ZSTD_frameParameters fparams,
                                         unsigned long long pledgedSrcSize);  /* note : zero means empty */
@@ -92,7 +110,7 @@
  * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
 typedef enum {
     ZSTDMT_p_jobSize,     /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
-    ZSTDMT_p_overlapLog,  /* Each job may reload a part of previous job to enhance compressionr ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
+    ZSTDMT_p_overlapLog,  /* Each job may reload a part of previous job to enhance compression ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
     ZSTDMT_p_rsyncable    /* Enables rsyncable mode. */
 } ZSTDMT_parameter;
 
@@ -101,12 +119,12 @@
  * The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__
  * Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
  * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
-ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value);
+ZSTDMT_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value);
 
 /* ZSTDMT_getMTCtxParameter() :
  * Query the ZSTDMT_CCtx for a parameter value.
  * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
-ZSTDLIB_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value);
+ZSTDMT_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value);
 
 
 /*! ZSTDMT_compressStream_generic() :
@@ -116,7 +134,7 @@
  *           0 if fully flushed
  *           or an error code
  *  note : needs to be init using any ZSTD_initCStream*() variant */
-ZSTDLIB_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
+ZSTDMT_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
                                                 ZSTD_outBuffer* output,
                                                 ZSTD_inBuffer* input,
                                                 ZSTD_EndDirective endOp);
--- a/contrib/python-zstandard/zstd/decompress/zstd_ddict.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/decompress/zstd_ddict.c	Mon Oct 21 11:09:48 2019 -0400
@@ -105,9 +105,9 @@
     ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
 
     /* load entropy tables */
-    CHECK_E( ZSTD_loadDEntropy(&ddict->entropy,
-                                ddict->dictContent, ddict->dictSize),
-             dictionary_corrupted );
+    RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
+            &ddict->entropy, ddict->dictContent, ddict->dictSize)),
+        dictionary_corrupted);
     ddict->entropyPresent = 1;
     return 0;
 }
@@ -133,7 +133,7 @@
     ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
 
     /* parse dictionary content */
-    CHECK_F( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) );
+    FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) );
 
     return 0;
 }
--- a/contrib/python-zstandard/zstd/decompress/zstd_decompress.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/decompress/zstd_decompress.c	Mon Oct 21 11:09:48 2019 -0400
@@ -106,6 +106,7 @@
     dctx->ddictLocal  = NULL;
     dctx->dictEnd     = NULL;
     dctx->ddictIsCold = 0;
+    dctx->dictUses = ZSTD_dont_use;
     dctx->inBuff      = NULL;
     dctx->inBuffSize  = 0;
     dctx->outBuffSize = 0;
@@ -147,13 +148,20 @@
     return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
 }
 
+static void ZSTD_clearDict(ZSTD_DCtx* dctx)
+{
+    ZSTD_freeDDict(dctx->ddictLocal);
+    dctx->ddictLocal = NULL;
+    dctx->ddict = NULL;
+    dctx->dictUses = ZSTD_dont_use;
+}
+
 size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
 {
     if (dctx==NULL) return 0;   /* support free on NULL */
-    if (dctx->staticSize) return ERROR(memory_allocation);   /* not compatible with static DCtx */
+    RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
     {   ZSTD_customMem const cMem = dctx->customMem;
-        ZSTD_freeDDict(dctx->ddictLocal);
-        dctx->ddictLocal = NULL;
+        ZSTD_clearDict(dctx);
         ZSTD_free(dctx->inBuff, cMem);
         dctx->inBuff = NULL;
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
@@ -203,7 +211,7 @@
 static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
 {
     size_t const minInputSize = ZSTD_startingInputLength(format);
-    if (srcSize < minInputSize) return ERROR(srcSize_wrong);
+    RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong);
 
     {   BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
         U32 const dictID= fhd & 3;
@@ -238,7 +246,7 @@
 
     memset(zfhPtr, 0, sizeof(*zfhPtr));   /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
     if (srcSize < minInputSize) return minInputSize;
-    if (src==NULL) return ERROR(GENERIC);   /* invalid parameter */
+    RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
 
     if ( (format != ZSTD_f_zstd1_magicless)
       && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
@@ -251,7 +259,7 @@
             zfhPtr->frameType = ZSTD_skippableFrame;
             return 0;
         }
-        return ERROR(prefix_unknown);
+        RETURN_ERROR(prefix_unknown);
     }
 
     /* ensure there is enough `srcSize` to fully read/decode frame header */
@@ -269,14 +277,13 @@
         U64 windowSize = 0;
         U32 dictID = 0;
         U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
-        if ((fhdByte & 0x08) != 0)
-            return ERROR(frameParameter_unsupported); /* reserved bits, must be zero */
+        RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,
+                        "reserved bits, must be zero");
 
         if (!singleSegment) {
             BYTE const wlByte = ip[pos++];
             U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
-            if (windowLog > ZSTD_WINDOWLOG_MAX)
-                return ERROR(frameParameter_windowTooLarge);
+            RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge);
             windowSize = (1ULL << windowLog);
             windowSize += (windowSize >> 3) * (wlByte&7);
         }
@@ -348,14 +355,16 @@
     size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
     U32 sizeU32;
 
-    if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
-        return ERROR(srcSize_wrong);
+    RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong);
 
     sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
-    if ((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32)
-        return ERROR(frameParameter_unsupported);
-
-    return skippableHeaderSize + sizeU32;
+    RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
+                    frameParameter_unsupported);
+    {
+        size_t const skippableSize = skippableHeaderSize + sizeU32;
+        RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong);
+        return skippableSize;
+    }
 }
 
 /** ZSTD_findDecompressedSize() :
@@ -372,11 +381,10 @@
 
         if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
             size_t const skippableSize = readSkippableFrameSize(src, srcSize);
-            if (ZSTD_isError(skippableSize))
-                return skippableSize;
-            if (srcSize < skippableSize) {
+            if (ZSTD_isError(skippableSize)) {
                 return ZSTD_CONTENTSIZE_ERROR;
             }
+            assert(skippableSize <= srcSize);
 
             src = (const BYTE *)src + skippableSize;
             srcSize -= skippableSize;
@@ -428,13 +436,91 @@
 {
     size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
     if (ZSTD_isError(result)) return result;    /* invalid header */
-    if (result>0) return ERROR(srcSize_wrong);  /* headerSize too small */
-    if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
-        return ERROR(dictionary_wrong);
+    RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+    /* Skip the dictID check in fuzzing mode, because it makes the search
+     * harder.
+     */
+    RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
+                    dictionary_wrong);
+#endif
     if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
     return 0;
 }
 
+static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
+{
+    ZSTD_frameSizeInfo frameSizeInfo;
+    frameSizeInfo.compressedSize = ret;
+    frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
+    return frameSizeInfo;
+}
+
+static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
+{
+    ZSTD_frameSizeInfo frameSizeInfo;
+    memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
+
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+    if (ZSTD_isLegacy(src, srcSize))
+        return ZSTD_findFrameSizeInfoLegacy(src, srcSize);
+#endif
+
+    if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
+        && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+        frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
+        assert(ZSTD_isError(frameSizeInfo.compressedSize) ||
+               frameSizeInfo.compressedSize <= srcSize);
+        return frameSizeInfo;
+    } else {
+        const BYTE* ip = (const BYTE*)src;
+        const BYTE* const ipstart = ip;
+        size_t remainingSize = srcSize;
+        size_t nbBlocks = 0;
+        ZSTD_frameHeader zfh;
+
+        /* Extract Frame Header */
+        {   size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
+            if (ZSTD_isError(ret))
+                return ZSTD_errorFrameSizeInfo(ret);
+            if (ret > 0)
+                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+        }
+
+        ip += zfh.headerSize;
+        remainingSize -= zfh.headerSize;
+
+        /* Iterate over each block */
+        while (1) {
+            blockProperties_t blockProperties;
+            size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+            if (ZSTD_isError(cBlockSize))
+                return ZSTD_errorFrameSizeInfo(cBlockSize);
+
+            if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
+                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+
+            ip += ZSTD_blockHeaderSize + cBlockSize;
+            remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
+            nbBlocks++;
+
+            if (blockProperties.lastBlock) break;
+        }
+
+        /* Final frame content checksum */
+        if (zfh.checksumFlag) {
+            if (remainingSize < 4)
+                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+            ip += 4;
+        }
+
+        frameSizeInfo.compressedSize = ip - ipstart;
+        frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
+                                        ? zfh.frameContentSize
+                                        : nbBlocks * zfh.blockSizeMax;
+        return frameSizeInfo;
+    }
+}
 
 /** ZSTD_findFrameCompressedSize() :
  *  compatible with legacy mode
@@ -443,52 +529,33 @@
  *  @return : the compressed size of the frame starting at `src` */
 size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
 {
-#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
-    if (ZSTD_isLegacy(src, srcSize))
-        return ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
-#endif
-    if ( (srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
-      && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START ) {
-        return readSkippableFrameSize(src, srcSize);
-    } else {
-        const BYTE* ip = (const BYTE*)src;
-        const BYTE* const ipstart = ip;
-        size_t remainingSize = srcSize;
-        ZSTD_frameHeader zfh;
-
-        /* Extract Frame Header */
-        {   size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
-            if (ZSTD_isError(ret)) return ret;
-            if (ret > 0) return ERROR(srcSize_wrong);
-        }
-
-        ip += zfh.headerSize;
-        remainingSize -= zfh.headerSize;
-
-        /* Loop on each block */
-        while (1) {
-            blockProperties_t blockProperties;
-            size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
-            if (ZSTD_isError(cBlockSize)) return cBlockSize;
-
-            if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
-                return ERROR(srcSize_wrong);
-
-            ip += ZSTD_blockHeaderSize + cBlockSize;
-            remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
-
-            if (blockProperties.lastBlock) break;
-        }
-
-        if (zfh.checksumFlag) {   /* Final frame content checksum */
-            if (remainingSize < 4) return ERROR(srcSize_wrong);
-            ip += 4;
-        }
-
-        return ip - ipstart;
-    }
+    ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+    return frameSizeInfo.compressedSize;
 }
 
+/** ZSTD_decompressBound() :
+ *  compatible with legacy mode
+ *  `src` must point to the start of a ZSTD frame or a skippeable frame
+ *  `srcSize` must be at least as large as the frame contained
+ *  @return : the maximum decompressed size of the compressed source
+ */
+unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
+{
+    unsigned long long bound = 0;
+    /* Iterate over each frame */
+    while (srcSize > 0) {
+        ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+        size_t const compressedSize = frameSizeInfo.compressedSize;
+        unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
+        if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
+            return ZSTD_CONTENTSIZE_ERROR;
+        assert(srcSize >= compressedSize);
+        src = (const BYTE*)src + compressedSize;
+        srcSize -= compressedSize;
+        bound += decompressedBound;
+    }
+    return bound;
+}
 
 
 /*-*************************************************************
@@ -507,9 +574,10 @@
 }
 
 /** ZSTD_insertBlock() :
-    insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
+ *  insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
 size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
 {
+    DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize);
     ZSTD_checkContinuity(dctx, blockStart);
     dctx->previousDstEnd = (const char*)blockStart + blockSize;
     return blockSize;
@@ -522,9 +590,9 @@
     DEBUGLOG(5, "ZSTD_copyRawBlock");
     if (dst == NULL) {
         if (srcSize == 0) return 0;
-        return ERROR(dstBuffer_null);
+        RETURN_ERROR(dstBuffer_null);
     }
-    if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
+    RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall);
     memcpy(dst, src, srcSize);
     return srcSize;
 }
@@ -535,9 +603,9 @@
 {
     if (dst == NULL) {
         if (regenSize == 0) return 0;
-        return ERROR(dstBuffer_null);
+        RETURN_ERROR(dstBuffer_null);
     }
-    if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall);
+    RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall);
     memset(dst, b, regenSize);
     return regenSize;
 }
@@ -560,15 +628,16 @@
     DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr);
 
     /* check */
-    if (remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN+ZSTD_blockHeaderSize)
-        return ERROR(srcSize_wrong);
+    RETURN_ERROR_IF(
+        remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN+ZSTD_blockHeaderSize,
+        srcSize_wrong);
 
     /* Frame Header */
     {   size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_FRAMEHEADERSIZE_PREFIX);
         if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
-        if (remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize)
-            return ERROR(srcSize_wrong);
-        CHECK_F( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
+        RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
+                        srcSize_wrong);
+        FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
         ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
     }
 
@@ -581,7 +650,7 @@
 
         ip += ZSTD_blockHeaderSize;
         remainingSrcSize -= ZSTD_blockHeaderSize;
-        if (cBlockSize > remainingSrcSize) return ERROR(srcSize_wrong);
+        RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong);
 
         switch(blockProperties.blockType)
         {
@@ -596,7 +665,7 @@
             break;
         case bt_reserved :
         default:
-            return ERROR(corruption_detected);
+            RETURN_ERROR(corruption_detected);
         }
 
         if (ZSTD_isError(decodedSize)) return decodedSize;
@@ -609,15 +678,15 @@
     }
 
     if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
-        if ((U64)(op-ostart) != dctx->fParams.frameContentSize) {
-            return ERROR(corruption_detected);
-    }   }
+        RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
+                        corruption_detected);
+    }
     if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
         U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
         U32 checkRead;
-        if (remainingSrcSize<4) return ERROR(checksum_wrong);
+        RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong);
         checkRead = MEM_readLE32(ip);
-        if (checkRead != checkCalc) return ERROR(checksum_wrong);
+        RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong);
         ip += 4;
         remainingSrcSize -= 4;
     }
@@ -652,8 +721,8 @@
             size_t decodedSize;
             size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
             if (ZSTD_isError(frameSize)) return frameSize;
-            /* legacy support is not compatible with static dctx */
-            if (dctx->staticSize) return ERROR(memory_allocation);
+            RETURN_ERROR_IF(dctx->staticSize, memory_allocation,
+                "legacy support is not compatible with static dctx");
 
             decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);
             if (ZSTD_isError(decodedSize)) return decodedSize;
@@ -674,9 +743,8 @@
                         (unsigned)magicNumber, ZSTD_MAGICNUMBER);
             if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
                 size_t const skippableSize = readSkippableFrameSize(src, srcSize);
-                if (ZSTD_isError(skippableSize))
-                    return skippableSize;
-                if (srcSize < skippableSize) return ERROR(srcSize_wrong);
+                FORWARD_IF_ERROR(skippableSize);
+                assert(skippableSize <= srcSize);
 
                 src = (const BYTE *)src + skippableSize;
                 srcSize -= skippableSize;
@@ -685,29 +753,29 @@
 
         if (ddict) {
             /* we were called from ZSTD_decompress_usingDDict */
-            CHECK_F(ZSTD_decompressBegin_usingDDict(dctx, ddict));
+            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict));
         } else {
             /* this will initialize correctly with no dict if dict == NULL, so
              * use this in all cases but ddict */
-            CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
+            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
         }
         ZSTD_checkContinuity(dctx, dst);
 
         {   const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
                                                     &src, &srcSize);
-            if ( (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
-              && (moreThan1Frame==1) ) {
-                /* at least one frame successfully completed,
-                 * but following bytes are garbage :
-                 * it's more likely to be a srcSize error,
-                 * specifying more bytes than compressed size of frame(s).
-                 * This error message replaces ERROR(prefix_unknown),
-                 * which would be confusing, as the first header is actually correct.
-                 * Note that one could be unlucky, it might be a corruption error instead,
-                 * happening right at the place where we expect zstd magic bytes.
-                 * But this is _much_ less likely than a srcSize field error. */
-                return ERROR(srcSize_wrong);
-            }
+            RETURN_ERROR_IF(
+                (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
+             && (moreThan1Frame==1),
+                srcSize_wrong,
+                "at least one frame successfully completed, but following "
+                "bytes are garbage: it's more likely to be a srcSize error, "
+                "specifying more bytes than compressed size of frame(s). This "
+                "error message replaces ERROR(prefix_unknown), which would be "
+                "confusing, as the first header is actually correct. Note that "
+                "one could be unlucky, it might be a corruption error instead, "
+                "happening right at the place where we expect zstd magic "
+                "bytes. But this is _much_ less likely than a srcSize field "
+                "error.");
             if (ZSTD_isError(res)) return res;
             assert(res <= dstCapacity);
             dst = (BYTE*)dst + res;
@@ -716,7 +784,7 @@
         moreThan1Frame = 1;
     }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
 
-    if (srcSize) return ERROR(srcSize_wrong); /* input not entirely consumed */
+    RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
 
     return (BYTE*)dst - (BYTE*)dststart;
 }
@@ -730,9 +798,26 @@
 }
 
 
+static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx)
+{
+    switch (dctx->dictUses) {
+    default:
+        assert(0 /* Impossible */);
+        /* fall-through */
+    case ZSTD_dont_use:
+        ZSTD_clearDict(dctx);
+        return NULL;
+    case ZSTD_use_indefinitely:
+        return dctx->ddict;
+    case ZSTD_use_once:
+        dctx->dictUses = ZSTD_dont_use;
+        return dctx->ddict;
+    }
+}
+
 size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
 {
-    return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
+    return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx));
 }
 
 
@@ -741,7 +826,7 @@
 #if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
     size_t regenSize;
     ZSTD_DCtx* const dctx = ZSTD_createDCtx();
-    if (dctx==NULL) return ERROR(memory_allocation);
+    RETURN_ERROR_IF(dctx==NULL, memory_allocation);
     regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
     ZSTD_freeDCtx(dctx);
     return regenSize;
@@ -791,8 +876,7 @@
 {
     DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
     /* Sanity check */
-    if (srcSize != dctx->expected)
-        return ERROR(srcSize_wrong);  /* not allowed */
+    RETURN_ERROR_IF(srcSize != dctx->expected, srcSize_wrong, "not allowed");
     if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
 
     switch (dctx->stage)
@@ -817,7 +901,7 @@
     case ZSTDds_decodeFrameHeader:
         assert(src != NULL);
         memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
-        CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
+        FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
         dctx->expected = ZSTD_blockHeaderSize;
         dctx->stage = ZSTDds_decodeBlockHeader;
         return 0;
@@ -826,6 +910,7 @@
         {   blockProperties_t bp;
             size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
             if (ZSTD_isError(cBlockSize)) return cBlockSize;
+            RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, "Block Size Exceeds Maximum");
             dctx->expected = cBlockSize;
             dctx->bType = bp.blockType;
             dctx->rleSize = bp.origSize;
@@ -867,19 +952,20 @@
                 break;
             case bt_reserved :   /* should never happen */
             default:
-                return ERROR(corruption_detected);
+                RETURN_ERROR(corruption_detected);
             }
             if (ZSTD_isError(rSize)) return rSize;
+            RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum");
             DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
             dctx->decodedSize += rSize;
             if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
 
             if (dctx->stage == ZSTDds_decompressLastBlock) {   /* end of frame */
                 DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
-                if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
-                    if (dctx->decodedSize != dctx->fParams.frameContentSize) {
-                        return ERROR(corruption_detected);
-                }   }
+                RETURN_ERROR_IF(
+                    dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
+                 && dctx->decodedSize != dctx->fParams.frameContentSize,
+                    corruption_detected);
                 if (dctx->fParams.checksumFlag) {  /* another round for frame checksum */
                     dctx->expected = 4;
                     dctx->stage = ZSTDds_checkChecksum;
@@ -900,7 +986,7 @@
         {   U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
             U32 const check32 = MEM_readLE32(src);
             DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
-            if (check32 != h32) return ERROR(checksum_wrong);
+            RETURN_ERROR_IF(check32 != h32, checksum_wrong);
             dctx->expected = 0;
             dctx->stage = ZSTDds_getFrameHeaderSize;
             return 0;
@@ -921,7 +1007,7 @@
 
     default:
         assert(0);   /* impossible */
-        return ERROR(GENERIC);   /* some compiler require default to do something */
+        RETURN_ERROR(GENERIC);   /* some compiler require default to do something */
     }
 }
 
@@ -945,7 +1031,7 @@
     const BYTE* dictPtr = (const BYTE*)dict;
     const BYTE* const dictEnd = dictPtr + dictSize;
 
-    if (dictSize <= 8) return ERROR(dictionary_corrupted);
+    RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted);
     assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY);   /* dict must be valid */
     dictPtr += 8;   /* skip header = magic + dictID */
 
@@ -964,16 +1050,16 @@
                                                 dictPtr, dictEnd - dictPtr,
                                                 workspace, workspaceSize);
 #endif
-        if (HUF_isError(hSize)) return ERROR(dictionary_corrupted);
+        RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted);
         dictPtr += hSize;
     }
 
     {   short offcodeNCount[MaxOff+1];
         unsigned offcodeMaxValue = MaxOff, offcodeLog;
         size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
-        if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
-        if (offcodeMaxValue > MaxOff) return ERROR(dictionary_corrupted);
-        if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
+        RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);
+        RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted);
+        RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);
         ZSTD_buildFSETable( entropy->OFTable,
                             offcodeNCount, offcodeMaxValue,
                             OF_base, OF_bits,
@@ -984,9 +1070,9 @@
     {   short matchlengthNCount[MaxML+1];
         unsigned matchlengthMaxValue = MaxML, matchlengthLog;
         size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
-        if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
-        if (matchlengthMaxValue > MaxML) return ERROR(dictionary_corrupted);
-        if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
+        RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);
+        RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted);
+        RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);
         ZSTD_buildFSETable( entropy->MLTable,
                             matchlengthNCount, matchlengthMaxValue,
                             ML_base, ML_bits,
@@ -997,9 +1083,9 @@
     {   short litlengthNCount[MaxLL+1];
         unsigned litlengthMaxValue = MaxLL, litlengthLog;
         size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
-        if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
-        if (litlengthMaxValue > MaxLL) return ERROR(dictionary_corrupted);
-        if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
+        RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);
+        RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted);
+        RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);
         ZSTD_buildFSETable( entropy->LLTable,
                             litlengthNCount, litlengthMaxValue,
                             LL_base, LL_bits,
@@ -1007,12 +1093,13 @@
         dictPtr += litlengthHeaderSize;
     }
 
-    if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
+    RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);
     {   int i;
         size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
         for (i=0; i<3; i++) {
             U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
-            if (rep==0 || rep >= dictContentSize) return ERROR(dictionary_corrupted);
+            RETURN_ERROR_IF(rep==0 || rep >= dictContentSize,
+                            dictionary_corrupted);
             entropy->rep[i] = rep;
     }   }
 
@@ -1030,7 +1117,7 @@
 
     /* load entropy tables */
     {   size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
-        if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted);
+        RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted);
         dict = (const char*)dict + eSize;
         dictSize -= eSize;
     }
@@ -1064,9 +1151,11 @@
 
 size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
 {
-    CHECK_F( ZSTD_decompressBegin(dctx) );
+    FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );
     if (dict && dictSize)
-        CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
+        RETURN_ERROR_IF(
+            ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
+            dictionary_corrupted);
     return 0;
 }
 
@@ -1085,7 +1174,7 @@
         DEBUGLOG(4, "DDict is %s",
                     dctx->ddictIsCold ? "~cold~" : "hot!");
     }
-    CHECK_F( ZSTD_decompressBegin(dctx) );
+    FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );
     if (ddict) {   /* NULL ddict is equivalent to no dictionary */
         ZSTD_copyDDictParameters(dctx, ddict);
     }
@@ -1104,7 +1193,7 @@
 }
 
 /*! ZSTD_getDictID_fromFrame() :
- *  Provides the dictID required to decompresse frame stored within `src`.
+ *  Provides the dictID required to decompress frame stored within `src`.
  *  If @return == 0, the dictID could not be decoded.
  *  This could for one of the following reasons :
  *  - The frame does not require a dictionary (most common case).
@@ -1176,15 +1265,14 @@
                                          ZSTD_dictLoadMethod_e dictLoadMethod,
                                          ZSTD_dictContentType_e dictContentType)
 {
-    if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
-    ZSTD_freeDDict(dctx->ddictLocal);
+    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+    ZSTD_clearDict(dctx);
     if (dict && dictSize >= 8) {
         dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
-        if (dctx->ddictLocal == NULL) return ERROR(memory_allocation);
-    } else {
-        dctx->ddictLocal = NULL;
+        RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation);
+        dctx->ddict = dctx->ddictLocal;
+        dctx->dictUses = ZSTD_use_indefinitely;
     }
-    dctx->ddict = dctx->ddictLocal;
     return 0;
 }
 
@@ -1200,7 +1288,9 @@
 
 size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
 {
-    return ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType);
+    FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType));
+    dctx->dictUses = ZSTD_use_once;
+    return 0;
 }
 
 size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
@@ -1215,9 +1305,8 @@
 size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
 {
     DEBUGLOG(4, "ZSTD_initDStream_usingDict");
-    zds->streamStage = zdss_init;
-    zds->noForwardProgress = 0;
-    CHECK_F( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
+    FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
     return ZSTD_FRAMEHEADERSIZE_PREFIX;
 }
 
@@ -1225,7 +1314,7 @@
 size_t ZSTD_initDStream(ZSTD_DStream* zds)
 {
     DEBUGLOG(4, "ZSTD_initDStream");
-    return ZSTD_initDStream_usingDict(zds, NULL, 0);
+    return ZSTD_initDStream_usingDDict(zds, NULL);
 }
 
 /* ZSTD_initDStream_usingDDict() :
@@ -1233,9 +1322,9 @@
  * this function cannot fail */
 size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
 {
-    size_t const initResult = ZSTD_initDStream(dctx);
-    dctx->ddict = ddict;
-    return initResult;
+    FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) );
+    return ZSTD_FRAMEHEADERSIZE_PREFIX;
 }
 
 /* ZSTD_resetDStream() :
@@ -1243,19 +1332,19 @@
  * this function cannot fail */
 size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
 {
-    DEBUGLOG(4, "ZSTD_resetDStream");
-    dctx->streamStage = zdss_loadHeader;
-    dctx->lhSize = dctx->inPos = dctx->outStart = dctx->outEnd = 0;
-    dctx->legacyVersion = 0;
-    dctx->hostageByte = 0;
+    FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only));
     return ZSTD_FRAMEHEADERSIZE_PREFIX;
 }
 
 
 size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
 {
-    if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
-    dctx->ddict = ddict;
+    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+    ZSTD_clearDict(dctx);
+    if (ddict) {
+        dctx->ddict = ddict;
+        dctx->dictUses = ZSTD_use_indefinitely;
+    }
     return 0;
 }
 
@@ -1267,9 +1356,9 @@
     ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
     size_t const min = (size_t)1 << bounds.lowerBound;
     size_t const max = (size_t)1 << bounds.upperBound;
-    if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
-    if (maxWindowSize < min) return ERROR(parameter_outOfBound);
-    if (maxWindowSize > max) return ERROR(parameter_outOfBound);
+    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+    RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound);
+    RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound);
     dctx->maxWindowSize = maxWindowSize;
     return 0;
 }
@@ -1311,15 +1400,15 @@
 }
 
 #define CHECK_DBOUNDS(p,v) {                \
-    if (!ZSTD_dParam_withinBounds(p, v))    \
-        return ERROR(parameter_outOfBound); \
+    RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound); \
 }
 
 size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
 {
-    if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
+    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
     switch(dParam) {
         case ZSTD_d_windowLogMax:
+            if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT;
             CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);
             dctx->maxWindowSize = ((size_t)1) << value;
             return 0;
@@ -1329,19 +1418,20 @@
             return 0;
         default:;
     }
-    return ERROR(parameter_unsupported);
+    RETURN_ERROR(parameter_unsupported);
 }
 
 size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
 {
     if ( (reset == ZSTD_reset_session_only)
       || (reset == ZSTD_reset_session_and_parameters) ) {
-        (void)ZSTD_initDStream(dctx);
+        dctx->streamStage = zdss_init;
+        dctx->noForwardProgress = 0;
     }
     if ( (reset == ZSTD_reset_parameters)
       || (reset == ZSTD_reset_session_and_parameters) ) {
-        if (dctx->streamStage != zdss_init)
-            return ERROR(stage_wrong);
+        RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+        ZSTD_clearDict(dctx);
         dctx->format = ZSTD_f_zstd1;
         dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
     }
@@ -1360,7 +1450,8 @@
     unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
     unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
     size_t const minRBSize = (size_t) neededSize;
-    if ((unsigned long long)minRBSize != neededSize) return ERROR(frameParameter_windowTooLarge);
+    RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
+                    frameParameter_windowTooLarge);
     return minRBSize;
 }
 
@@ -1378,9 +1469,9 @@
     ZSTD_frameHeader zfh;
     size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
     if (ZSTD_isError(err)) return err;
-    if (err>0) return ERROR(srcSize_wrong);
-    if (zfh.windowSize > windowSizeMax)
-        return ERROR(frameParameter_windowTooLarge);
+    RETURN_ERROR_IF(err>0, srcSize_wrong);
+    RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
+                    frameParameter_windowTooLarge);
     return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
 }
 
@@ -1406,16 +1497,16 @@
     U32 someMoreWork = 1;
 
     DEBUGLOG(5, "ZSTD_decompressStream");
-    if (input->pos > input->size) {  /* forbidden */
-        DEBUGLOG(5, "in: pos: %u   vs size: %u",
-                    (U32)input->pos, (U32)input->size);
-        return ERROR(srcSize_wrong);
-    }
-    if (output->pos > output->size) {  /* forbidden */
-        DEBUGLOG(5, "out: pos: %u   vs size: %u",
-                    (U32)output->pos, (U32)output->size);
-        return ERROR(dstSize_tooSmall);
-    }
+    RETURN_ERROR_IF(
+        input->pos > input->size,
+        srcSize_wrong,
+        "forbidden. in: pos: %u   vs size: %u",
+        (U32)input->pos, (U32)input->size);
+    RETURN_ERROR_IF(
+        output->pos > output->size,
+        dstSize_tooSmall,
+        "forbidden. out: pos: %u   vs size: %u",
+        (U32)output->pos, (U32)output->size);
     DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
 
     while (someMoreWork) {
@@ -1423,15 +1514,18 @@
         {
         case zdss_init :
             DEBUGLOG(5, "stage zdss_init => transparent reset ");
-            ZSTD_resetDStream(zds);   /* transparent reset on starting decoding a new frame */
+            zds->streamStage = zdss_loadHeader;
+            zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
+            zds->legacyVersion = 0;
+            zds->hostageByte = 0;
             /* fall-through */
 
         case zdss_loadHeader :
             DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
             if (zds->legacyVersion) {
-                /* legacy support is incompatible with static dctx */
-                if (zds->staticSize) return ERROR(memory_allocation);
+                RETURN_ERROR_IF(zds->staticSize, memory_allocation,
+                    "legacy support is incompatible with static dctx");
                 {   size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);
                     if (hint==0) zds->streamStage = zdss_init;
                     return hint;
@@ -1443,12 +1537,13 @@
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
                     U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
                     if (legacyVersion) {
-                        const void* const dict = zds->ddict ? ZSTD_DDict_dictContent(zds->ddict) : NULL;
-                        size_t const dictSize = zds->ddict ? ZSTD_DDict_dictSize(zds->ddict) : 0;
+                        ZSTD_DDict const* const ddict = ZSTD_getDDict(zds);
+                        const void* const dict = ddict ? ZSTD_DDict_dictContent(ddict) : NULL;
+                        size_t const dictSize = ddict ? ZSTD_DDict_dictSize(ddict) : 0;
                         DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion);
-                        /* legacy support is incompatible with static dctx */
-                        if (zds->staticSize) return ERROR(memory_allocation);
-                        CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext,
+                        RETURN_ERROR_IF(zds->staticSize, memory_allocation,
+                            "legacy support is incompatible with static dctx");
+                        FORWARD_IF_ERROR(ZSTD_initLegacyStream(&zds->legacyContext,
                                     zds->previousLegacyVersion, legacyVersion,
                                     dict, dictSize));
                         zds->legacyVersion = zds->previousLegacyVersion = legacyVersion;
@@ -1482,7 +1577,7 @@
                 size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart);
                 if (cSize <= (size_t)(iend-istart)) {
                     /* shortcut : using single-pass mode */
-                    size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, zds->ddict);
+                    size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, ZSTD_getDDict(zds));
                     if (ZSTD_isError(decompressedSize)) return decompressedSize;
                     DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
                     ip = istart + cSize;
@@ -1495,13 +1590,13 @@
 
             /* Consume header (see ZSTDds_decodeFrameHeader) */
             DEBUGLOG(4, "Consume header");
-            CHECK_F(ZSTD_decompressBegin_usingDDict(zds, zds->ddict));
+            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)));
 
             if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {  /* skippable frame */
                 zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
                 zds->stage = ZSTDds_skipFrame;
             } else {
-                CHECK_F(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
+                FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
                 zds->expected = ZSTD_blockHeaderSize;
                 zds->stage = ZSTDds_decodeBlockHeader;
             }
@@ -1511,7 +1606,8 @@
                         (U32)(zds->fParams.windowSize >>10),
                         (U32)(zds->maxWindowSize >> 10) );
             zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
-            if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge);
+            RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
+                            frameParameter_windowTooLarge);
 
             /* Adapt buffer sizes to frame header instructions */
             {   size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
@@ -1525,14 +1621,15 @@
                     if (zds->staticSize) {  /* static DCtx */
                         DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
                         assert(zds->staticSize >= sizeof(ZSTD_DCtx));  /* controlled at init */
-                        if (bufferSize > zds->staticSize - sizeof(ZSTD_DCtx))
-                            return ERROR(memory_allocation);
+                        RETURN_ERROR_IF(
+                            bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
+                            memory_allocation);
                     } else {
                         ZSTD_free(zds->inBuff, zds->customMem);
                         zds->inBuffSize = 0;
                         zds->outBuffSize = 0;
                         zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem);
-                        if (zds->inBuff == NULL) return ERROR(memory_allocation);
+                        RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation);
                     }
                     zds->inBuffSize = neededInBuffSize;
                     zds->outBuff = zds->inBuff + zds->inBuffSize;
@@ -1574,7 +1671,9 @@
                 if (isSkipFrame) {
                     loadedSize = MIN(toLoad, (size_t)(iend-ip));
                 } else {
-                    if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected);   /* should never happen */
+                    RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
+                                    corruption_detected,
+                                    "should never happen");
                     loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip);
                 }
                 ip += loadedSize;
@@ -1615,7 +1714,7 @@
 
         default:
             assert(0);    /* impossible */
-            return ERROR(GENERIC);   /* some compiler require default to do something */
+            RETURN_ERROR(GENERIC);   /* some compiler require default to do something */
     }   }
 
     /* result */
@@ -1624,8 +1723,8 @@
     if ((ip==istart) && (op==ostart)) {  /* no forward progress */
         zds->noForwardProgress ++;
         if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
-            if (op==oend) return ERROR(dstSize_tooSmall);
-            if (ip==iend) return ERROR(srcSize_wrong);
+            RETURN_ERROR_IF(op==oend, dstSize_tooSmall);
+            RETURN_ERROR_IF(ip==iend, srcSize_wrong);
             assert(0);
         }
     } else {
--- a/contrib/python-zstandard/zstd/decompress/zstd_decompress_block.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/decompress/zstd_decompress_block.c	Mon Oct 21 11:09:48 2019 -0400
@@ -56,14 +56,15 @@
 size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
                           blockProperties_t* bpPtr)
 {
-    if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+    RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong);
+
     {   U32 const cBlockHeader = MEM_readLE24(src);
         U32 const cSize = cBlockHeader >> 3;
         bpPtr->lastBlock = cBlockHeader & 1;
         bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
         bpPtr->origSize = cSize;   /* only useful for RLE */
         if (bpPtr->blockType == bt_rle) return 1;
-        if (bpPtr->blockType == bt_reserved) return ERROR(corruption_detected);
+        RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected);
         return cSize;
     }
 }
@@ -78,7 +79,8 @@
 size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
                           const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
 {
-    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+    DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
+    RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected);
 
     {   const BYTE* const istart = (const BYTE*) src;
         symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
@@ -86,11 +88,12 @@
         switch(litEncType)
         {
         case set_repeat:
-            if (dctx->litEntropy==0) return ERROR(dictionary_corrupted);
+            DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
+            RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted);
             /* fall-through */
 
         case set_compressed:
-            if (srcSize < 5) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
+            RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
             {   size_t lhSize, litSize, litCSize;
                 U32 singleStream=0;
                 U32 const lhlCode = (istart[0] >> 2) & 3;
@@ -115,11 +118,11 @@
                     /* 2 - 2 - 18 - 18 */
                     lhSize = 5;
                     litSize  = (lhc >> 4) & 0x3FFFF;
-                    litCSize = (lhc >> 22) + (istart[4] << 10);
+                    litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
                     break;
                 }
-                if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
-                if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+                RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
+                RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected);
 
                 /* prefetch huffman table if cold */
                 if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
@@ -157,7 +160,7 @@
                     }
                 }
 
-                if (HUF_isError(hufSuccess)) return ERROR(corruption_detected);
+                RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected);
 
                 dctx->litPtr = dctx->litBuffer;
                 dctx->litSize = litSize;
@@ -187,7 +190,7 @@
                 }
 
                 if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */
-                    if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
+                    RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected);
                     memcpy(dctx->litBuffer, istart+lhSize, litSize);
                     dctx->litPtr = dctx->litBuffer;
                     dctx->litSize = litSize;
@@ -216,17 +219,17 @@
                 case 3:
                     lhSize = 3;
                     litSize = MEM_readLE24(istart) >> 4;
-                    if (srcSize<4) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
+                    RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
                     break;
                 }
-                if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
+                RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
                 memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
                 dctx->litPtr = dctx->litBuffer;
                 dctx->litSize = litSize;
                 return lhSize+1;
             }
         default:
-            return ERROR(corruption_detected);   /* impossible */
+            RETURN_ERROR(corruption_detected, "impossible");
         }
     }
 }
@@ -390,7 +393,8 @@
                     symbolNext[s] = 1;
                 } else {
                     if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
-                    symbolNext[s] = normalizedCounter[s];
+                    assert(normalizedCounter[s]>=0);
+                    symbolNext[s] = (U16)normalizedCounter[s];
         }   }   }
         memcpy(dt, &DTableH, sizeof(DTableH));
     }
@@ -436,8 +440,8 @@
     switch(type)
     {
     case set_rle :
-        if (!srcSize) return ERROR(srcSize_wrong);
-        if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
+        RETURN_ERROR_IF(!srcSize, srcSize_wrong);
+        RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected);
         {   U32 const symbol = *(const BYTE*)src;
             U32 const baseline = baseValue[symbol];
             U32 const nbBits = nbAdditionalBits[symbol];
@@ -449,7 +453,7 @@
         *DTablePtr = defaultTable;
         return 0;
     case set_repeat:
-        if (!flagRepeatTable) return ERROR(corruption_detected);
+        RETURN_ERROR_IF(!flagRepeatTable, corruption_detected);
         /* prefetch FSE table if used */
         if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
             const void* const pStart = *DTablePtr;
@@ -461,15 +465,15 @@
         {   unsigned tableLog;
             S16 norm[MaxSeq+1];
             size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
-            if (FSE_isError(headerSize)) return ERROR(corruption_detected);
-            if (tableLog > maxLog) return ERROR(corruption_detected);
+            RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected);
+            RETURN_ERROR_IF(tableLog > maxLog, corruption_detected);
             ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
             *DTablePtr = DTableSpace;
             return headerSize;
         }
-    default :   /* impossible */
+    default :
         assert(0);
-        return ERROR(GENERIC);
+        RETURN_ERROR(GENERIC, "impossible");
     }
 }
 
@@ -483,28 +487,28 @@
     DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
 
     /* check */
-    if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
+    RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong);
 
     /* SeqHead */
     nbSeq = *ip++;
     if (!nbSeq) {
         *nbSeqPtr=0;
-        if (srcSize != 1) return ERROR(srcSize_wrong);
+        RETURN_ERROR_IF(srcSize != 1, srcSize_wrong);
         return 1;
     }
     if (nbSeq > 0x7F) {
         if (nbSeq == 0xFF) {
-            if (ip+2 > iend) return ERROR(srcSize_wrong);
+            RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong);
             nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
         } else {
-            if (ip >= iend) return ERROR(srcSize_wrong);
+            RETURN_ERROR_IF(ip >= iend, srcSize_wrong);
             nbSeq = ((nbSeq-0x80)<<8) + *ip++;
         }
     }
     *nbSeqPtr = nbSeq;
 
     /* FSE table descriptors */
-    if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */
+    RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong); /* minimum possible size: 1 byte for symbol encoding types */
     {   symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
         symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
         symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
@@ -517,7 +521,7 @@
                                                       LL_base, LL_bits,
                                                       LL_defaultDTable, dctx->fseEntropy,
                                                       dctx->ddictIsCold, nbSeq);
-            if (ZSTD_isError(llhSize)) return ERROR(corruption_detected);
+            RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected);
             ip += llhSize;
         }
 
@@ -527,7 +531,7 @@
                                                       OF_base, OF_bits,
                                                       OF_defaultDTable, dctx->fseEntropy,
                                                       dctx->ddictIsCold, nbSeq);
-            if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected);
+            RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected);
             ip += ofhSize;
         }
 
@@ -537,7 +541,7 @@
                                                       ML_base, ML_bits,
                                                       ML_defaultDTable, dctx->fseEntropy,
                                                       dctx->ddictIsCold, nbSeq);
-            if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected);
+            RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected);
             ip += mlhSize;
         }
     }
@@ -590,8 +594,8 @@
     const BYTE* match = oLitEnd - sequence.offset;
 
     /* check */
-    if (oMatchEnd>oend) return ERROR(dstSize_tooSmall);   /* last match must fit within dstBuffer */
-    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* try to read beyond literal buffer */
+    RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must fit within dstBuffer");
+    RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "try to read beyond literal buffer");
 
     /* copy literals */
     while (op < oLitEnd) *op++ = *(*litPtr)++;
@@ -599,7 +603,7 @@
     /* copy Match */
     if (sequence.offset > (size_t)(oLitEnd - base)) {
         /* offset beyond prefix */
-        if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
+        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - vBase),corruption_detected);
         match = dictEnd - (base-match);
         if (match + sequence.matchLength <= dictEnd) {
             memmove(oLitEnd, match, sequence.matchLength);
@@ -631,22 +635,22 @@
     const BYTE* match = oLitEnd - sequence.offset;
 
     /* check */
-    if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
-    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */
+    RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
+    RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
     if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
 
     /* copy Literals */
-    ZSTD_copy8(op, *litPtr);
     if (sequence.litLength > 8)
-        ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+        ZSTD_wildcopy_16min(op, (*litPtr), sequence.litLength, ZSTD_no_overlap);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+    else
+        ZSTD_copy8(op, *litPtr);
     op = oLitEnd;
     *litPtr = iLitEnd;   /* update for next sequence */
 
     /* copy Match */
     if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
         /* offset beyond prefix -> go into extDict */
-        if (sequence.offset > (size_t)(oLitEnd - virtualStart))
-            return ERROR(corruption_detected);
+        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
         match = dictEnd + (match - prefixStart);
         if (match + sequence.matchLength <= dictEnd) {
             memmove(oLitEnd, match, sequence.matchLength);
@@ -686,13 +690,13 @@
 
     if (oMatchEnd > oend-(16-MINMATCH)) {
         if (op < oend_w) {
-            ZSTD_wildcopy(op, match, oend_w - op);
+            ZSTD_wildcopy(op, match, oend_w - op, ZSTD_overlap_src_before_dst);
             match += oend_w - op;
             op = oend_w;
         }
         while (op < oMatchEnd) *op++ = *match++;
     } else {
-        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
+        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);   /* works even if matchLength < 8 */
     }
     return sequenceLength;
 }
@@ -712,21 +716,23 @@
     const BYTE* match = sequence.match;
 
     /* check */
-    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
-    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */
+    RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
+    RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
     if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
 
     /* copy Literals */
-    ZSTD_copy8(op, *litPtr);  /* note : op <= oLitEnd <= oend_w == oend - 8 */
     if (sequence.litLength > 8)
-        ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+        ZSTD_wildcopy_16min(op, *litPtr, sequence.litLength, ZSTD_no_overlap);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+    else
+        ZSTD_copy8(op, *litPtr);  /* note : op <= oLitEnd <= oend_w == oend - 8 */
+
     op = oLitEnd;
     *litPtr = iLitEnd;   /* update for next sequence */
 
     /* copy Match */
     if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
         /* offset beyond prefix */
-        if (sequence.offset > (size_t)(oLitEnd - dictStart)) return ERROR(corruption_detected);
+        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - dictStart), corruption_detected);
         if (match + sequence.matchLength <= dictEnd) {
             memmove(oLitEnd, match, sequence.matchLength);
             return sequenceLength;
@@ -766,13 +772,13 @@
 
     if (oMatchEnd > oend-(16-MINMATCH)) {
         if (op < oend_w) {
-            ZSTD_wildcopy(op, match, oend_w - op);
+            ZSTD_wildcopy(op, match, oend_w - op, ZSTD_overlap_src_before_dst);
             match += oend_w - op;
             op = oend_w;
         }
         while (op < oMatchEnd) *op++ = *match++;
     } else {
-        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
+        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);   /* works even if matchLength < 8 */
     }
     return sequenceLength;
 }
@@ -801,7 +807,7 @@
 /* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
  * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
  * bits before reloading. This value is the maximum number of bytes we read
- * after reloading when we are decoding long offets.
+ * after reloading when we are decoding long offsets.
  */
 #define LONG_OFFSETS_MAX_EXTRA_BITS_32                       \
     (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32       \
@@ -889,6 +895,7 @@
 }
 
 FORCE_INLINE_TEMPLATE size_t
+DONT_VECTORIZE
 ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
                                void* dst, size_t maxDstSize,
                          const void* seqStart, size_t seqSize, int nbSeq,
@@ -911,11 +918,18 @@
         seqState_t seqState;
         dctx->fseEntropy = 1;
         { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
-        CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
+        RETURN_ERROR_IF(
+            ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
+            corruption_detected);
         ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
         ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
         ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
 
+        ZSTD_STATIC_ASSERT(
+                BIT_DStream_unfinished < BIT_DStream_completed &&
+                BIT_DStream_endOfBuffer < BIT_DStream_completed &&
+                BIT_DStream_completed < BIT_DStream_overflow);
+
         for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) {
             nbSeq--;
             {   seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
@@ -927,14 +941,15 @@
 
         /* check if reached exact end */
         DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
-        if (nbSeq) return ERROR(corruption_detected);
+        RETURN_ERROR_IF(nbSeq, corruption_detected);
+        RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected);
         /* save reps for next block */
         { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
     }
 
     /* last literal segment */
     {   size_t const lastLLSize = litEnd - litPtr;
-        if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
+        RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
         memcpy(op, litPtr, lastLLSize);
         op += lastLLSize;
     }
@@ -1066,7 +1081,9 @@
         seqState.pos = (size_t)(op-prefixStart);
         seqState.dictEnd = dictEnd;
         assert(iend >= ip);
-        CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
+        RETURN_ERROR_IF(
+            ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
+            corruption_detected);
         ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
         ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
         ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
@@ -1076,7 +1093,7 @@
             sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
             PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
         }
-        if (seqNb<seqAdvance) return ERROR(corruption_detected);
+        RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected);
 
         /* decode and decompress */
         for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
@@ -1087,7 +1104,7 @@
             sequences[seqNb & STORED_SEQS_MASK] = sequence;
             op += oneSeqSize;
         }
-        if (seqNb<nbSeq) return ERROR(corruption_detected);
+        RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected);
 
         /* finish queue */
         seqNb -= seqAdvance;
@@ -1103,7 +1120,7 @@
 
     /* last literal segment */
     {   size_t const lastLLSize = litEnd - litPtr;
-        if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
+        RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
         memcpy(op, litPtr, lastLLSize);
         op += lastLLSize;
     }
@@ -1127,6 +1144,7 @@
 
 #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
 static TARGET_ATTRIBUTE("bmi2") size_t
+DONT_VECTORIZE
 ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
                                  void* dst, size_t maxDstSize,
                            const void* seqStart, size_t seqSize, int nbSeq,
@@ -1176,7 +1194,7 @@
 /* ZSTD_decompressSequencesLong() :
  * decompression function triggered when a minimum share of offsets is considered "long",
  * aka out of cache.
- * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes mearning "farther than memory cache distance".
+ * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
  * This function will try to mitigate main memory latency through the use of prefetching */
 static size_t
 ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
@@ -1240,7 +1258,7 @@
     ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
     DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
 
-    if (srcSize >= ZSTD_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);
+    RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong);
 
     /* Decode literals section */
     {   size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
--- a/contrib/python-zstandard/zstd/decompress/zstd_decompress_internal.h	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/decompress/zstd_decompress_internal.h	Mon Oct 21 11:09:48 2019 -0400
@@ -89,6 +89,12 @@
 typedef enum { zdss_init=0, zdss_loadHeader,
                zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
 
+typedef enum {
+    ZSTD_use_indefinitely = -1,  /* Use the dictionary indefinitely */
+    ZSTD_dont_use = 0,           /* Do not use the dictionary (if one exists free it) */
+    ZSTD_use_once = 1            /* Use the dictionary once and set to ZSTD_dont_use */
+} ZSTD_dictUses_e;
+
 struct ZSTD_DCtx_s
 {
     const ZSTD_seqSymbol* LLTptr;
@@ -123,6 +129,7 @@
     const ZSTD_DDict* ddict;     /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
     U32 dictID;
     int ddictIsCold;             /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
+    ZSTD_dictUses_e dictUses;
 
     /* streaming */
     ZSTD_dStreamStage streamStage;
--- a/contrib/python-zstandard/zstd/dictBuilder/cover.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/dictBuilder/cover.c	Mon Oct 21 11:09:48 2019 -0400
@@ -391,7 +391,7 @@
  *
  *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
  *
- * Once the dmer d is in the dictionay we set F(d) = 0.
+ * Once the dmer d is in the dictionary we set F(d) = 0.
  */
 static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
                                            COVER_map_t *activeDmers, U32 begin,
@@ -435,7 +435,7 @@
       U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);
       activeSegment.begin += 1;
       *delDmerOcc -= 1;
-      /* If this is the last occurence of the dmer, subtract its score */
+      /* If this is the last occurrence of the dmer, subtract its score */
       if (*delDmerOcc == 0) {
         COVER_map_remove(activeDmers, delDmer);
         activeSegment.score -= freqs[delDmer];
@@ -526,10 +526,10 @@
  * Prepare a context for dictionary building.
  * The context is only dependent on the parameter `d` and can used multiple
  * times.
- * Returns 1 on success or zero on error.
+ * Returns 0 on success or error code on error.
  * The context must be destroyed with `COVER_ctx_destroy()`.
  */
-static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
+static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
                           const size_t *samplesSizes, unsigned nbSamples,
                           unsigned d, double splitPoint) {
   const BYTE *const samples = (const BYTE *)samplesBuffer;
@@ -544,17 +544,17 @@
       totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
     DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
                  (unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));
-    return 0;
+    return ERROR(srcSize_wrong);
   }
   /* Check if there are at least 5 training samples */
   if (nbTrainSamples < 5) {
     DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid.", nbTrainSamples);
-    return 0;
+    return ERROR(srcSize_wrong);
   }
   /* Check if there's testing sample */
   if (nbTestSamples < 1) {
     DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.", nbTestSamples);
-    return 0;
+    return ERROR(srcSize_wrong);
   }
   /* Zero the context */
   memset(ctx, 0, sizeof(*ctx));
@@ -577,7 +577,7 @@
   if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) {
     DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n");
     COVER_ctx_destroy(ctx);
-    return 0;
+    return ERROR(memory_allocation);
   }
   ctx->freqs = NULL;
   ctx->d = d;
@@ -624,7 +624,40 @@
                 (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group);
   ctx->freqs = ctx->suffix;
   ctx->suffix = NULL;
-  return 1;
+  return 0;
+}
+
+void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel)
+{
+  const double ratio = (double)nbDmers / maxDictSize;
+  if (ratio >= 10) {
+      return;
+  }
+  LOCALDISPLAYLEVEL(displayLevel, 1,
+                    "WARNING: The maximum dictionary size %u is too large "
+                    "compared to the source size %u! "
+                    "size(source)/size(dictionary) = %f, but it should be >= "
+                    "10! This may lead to a subpar dictionary! We recommend "
+                    "training on sources at least 10x, and up to 100x the "
+                    "size of the dictionary!\n", (U32)maxDictSize,
+                    (U32)nbDmers, ratio);
+}
+
+COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize,
+                                       U32 nbDmers, U32 k, U32 passes)
+{
+  const U32 minEpochSize = k * 10;
+  COVER_epoch_info_t epochs;
+  epochs.num = MAX(1, maxDictSize / k / passes);
+  epochs.size = nbDmers / epochs.num;
+  if (epochs.size >= minEpochSize) {
+      assert(epochs.size * epochs.num <= nbDmers);
+      return epochs;
+  }
+  epochs.size = MIN(minEpochSize, nbDmers);
+  epochs.num = nbDmers / epochs.size;
+  assert(epochs.size * epochs.num <= nbDmers);
+  return epochs;
 }
 
 /**
@@ -636,28 +669,34 @@
                                     ZDICT_cover_params_t parameters) {
   BYTE *const dict = (BYTE *)dictBuffer;
   size_t tail = dictBufferCapacity;
-  /* Divide the data up into epochs of equal size.
-   * We will select at least one segment from each epoch.
-   */
-  const unsigned epochs = MAX(1, (U32)(dictBufferCapacity / parameters.k / 4));
-  const unsigned epochSize = (U32)(ctx->suffixSize / epochs);
+  /* Divide the data into epochs. We will select one segment from each epoch. */
+  const COVER_epoch_info_t epochs = COVER_computeEpochs(
+      (U32)dictBufferCapacity, (U32)ctx->suffixSize, parameters.k, 4);
+  const size_t maxZeroScoreRun = MAX(10, MIN(100, epochs.num >> 3));
+  size_t zeroScoreRun = 0;
   size_t epoch;
   DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
-                epochs, epochSize);
+                (U32)epochs.num, (U32)epochs.size);
   /* Loop through the epochs until there are no more segments or the dictionary
    * is full.
    */
-  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs) {
-    const U32 epochBegin = (U32)(epoch * epochSize);
-    const U32 epochEnd = epochBegin + epochSize;
+  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {
+    const U32 epochBegin = (U32)(epoch * epochs.size);
+    const U32 epochEnd = epochBegin + epochs.size;
     size_t segmentSize;
     /* Select a segment */
     COVER_segment_t segment = COVER_selectSegment(
         ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);
-    /* If the segment covers no dmers, then we are out of content */
+    /* If the segment covers no dmers, then we are out of content.
+     * There may be new content in other epochs, for continue for some time.
+     */
     if (segment.score == 0) {
-      break;
+      if (++zeroScoreRun >= maxZeroScoreRun) {
+          break;
+      }
+      continue;
     }
+    zeroScoreRun = 0;
     /* Trim the segment if necessary and if it is too small then we are done */
     segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
     if (segmentSize < parameters.d) {
@@ -690,11 +729,11 @@
   /* Checks */
   if (!COVER_checkParameters(parameters, dictBufferCapacity)) {
     DISPLAYLEVEL(1, "Cover parameters incorrect\n");
-    return ERROR(GENERIC);
+    return ERROR(parameter_outOfBound);
   }
   if (nbSamples == 0) {
     DISPLAYLEVEL(1, "Cover must have at least one input file\n");
-    return ERROR(GENERIC);
+    return ERROR(srcSize_wrong);
   }
   if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
     DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
@@ -702,14 +741,18 @@
     return ERROR(dstSize_tooSmall);
   }
   /* Initialize context and activeDmers */
-  if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
-                      parameters.d, parameters.splitPoint)) {
-    return ERROR(GENERIC);
+  {
+    size_t const initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
+                      parameters.d, parameters.splitPoint);
+    if (ZSTD_isError(initVal)) {
+      return initVal;
+    }
   }
+  COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, g_displayLevel);
   if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
     DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
     COVER_ctx_destroy(&ctx);
-    return ERROR(GENERIC);
+    return ERROR(memory_allocation);
   }
 
   DISPLAYLEVEL(2, "Building dictionary\n");
@@ -770,7 +813,7 @@
         cctx, dst, dstCapacity, samples + offsets[i],
         samplesSizes[i], cdict);
     if (ZSTD_isError(size)) {
-      totalCompressedSize = ERROR(GENERIC);
+      totalCompressedSize = size;
       goto _compressCleanup;
     }
     totalCompressedSize += size;
@@ -846,9 +889,11 @@
  * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
  * If this dictionary is the best so far save it and its parameters.
  */
-void COVER_best_finish(COVER_best_t *best, size_t compressedSize,
-                              ZDICT_cover_params_t parameters, void *dict,
-                              size_t dictSize) {
+void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters,
+                              COVER_dictSelection_t selection) {
+  void* dict = selection.dictContent;
+  size_t compressedSize = selection.totalCompressedSize;
+  size_t dictSize = selection.dictSize;
   if (!best) {
     return;
   }
@@ -874,10 +919,12 @@
         }
       }
       /* Save the dictionary, parameters, and size */
-      memcpy(best->dict, dict, dictSize);
-      best->dictSize = dictSize;
-      best->parameters = parameters;
-      best->compressedSize = compressedSize;
+      if (dict) {
+        memcpy(best->dict, dict, dictSize);
+        best->dictSize = dictSize;
+        best->parameters = parameters;
+        best->compressedSize = compressedSize;
+      }
     }
     if (liveJobs == 0) {
       ZSTD_pthread_cond_broadcast(&best->cond);
@@ -886,6 +933,111 @@
   }
 }
 
+COVER_dictSelection_t COVER_dictSelectionError(size_t error) {
+    COVER_dictSelection_t selection = { NULL, 0, error };
+    return selection;
+}
+
+unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection) {
+  return (ZSTD_isError(selection.totalCompressedSize) || !selection.dictContent);
+}
+
+void COVER_dictSelectionFree(COVER_dictSelection_t selection){
+  free(selection.dictContent);
+}
+
+COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent,
+        size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples,
+        size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize) {
+
+  size_t largestDict = 0;
+  size_t largestCompressed = 0;
+  BYTE* customDictContentEnd = customDictContent + dictContentSize;
+
+  BYTE * largestDictbuffer = (BYTE *)malloc(dictContentSize);
+  BYTE * candidateDictBuffer = (BYTE *)malloc(dictContentSize);
+  double regressionTolerance = ((double)params.shrinkDictMaxRegression / 100.0) + 1.00;
+
+  if (!largestDictbuffer || !candidateDictBuffer) {
+    free(largestDictbuffer);
+    free(candidateDictBuffer);
+    return COVER_dictSelectionError(dictContentSize);
+  }
+
+  /* Initial dictionary size and compressed size */
+  memcpy(largestDictbuffer, customDictContent, dictContentSize);
+  dictContentSize = ZDICT_finalizeDictionary(
+    largestDictbuffer, dictContentSize, customDictContent, dictContentSize,
+    samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
+
+  if (ZDICT_isError(dictContentSize)) {
+    free(largestDictbuffer);
+    free(candidateDictBuffer);
+    return COVER_dictSelectionError(dictContentSize);
+  }
+
+  totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,
+                                                       samplesBuffer, offsets,
+                                                       nbCheckSamples, nbSamples,
+                                                       largestDictbuffer, dictContentSize);
+
+  if (ZSTD_isError(totalCompressedSize)) {
+    free(largestDictbuffer);
+    free(candidateDictBuffer);
+    return COVER_dictSelectionError(totalCompressedSize);
+  }
+
+  if (params.shrinkDict == 0) {
+    COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize };
+    free(candidateDictBuffer);
+    return selection;
+  }
+
+  largestDict = dictContentSize;
+  largestCompressed = totalCompressedSize;
+  dictContentSize = ZDICT_DICTSIZE_MIN;
+
+  /* Largest dict is initially at least ZDICT_DICTSIZE_MIN */
+  while (dictContentSize < largestDict) {
+    memcpy(candidateDictBuffer, largestDictbuffer, largestDict);
+    dictContentSize = ZDICT_finalizeDictionary(
+      candidateDictBuffer, dictContentSize, customDictContentEnd - dictContentSize, dictContentSize,
+      samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
+
+    if (ZDICT_isError(dictContentSize)) {
+      free(largestDictbuffer);
+      free(candidateDictBuffer);
+      return COVER_dictSelectionError(dictContentSize);
+
+    }
+
+    totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,
+                                                         samplesBuffer, offsets,
+                                                         nbCheckSamples, nbSamples,
+                                                         candidateDictBuffer, dictContentSize);
+
+    if (ZSTD_isError(totalCompressedSize)) {
+      free(largestDictbuffer);
+      free(candidateDictBuffer);
+      return COVER_dictSelectionError(totalCompressedSize);
+    }
+
+    if (totalCompressedSize <= largestCompressed * regressionTolerance) {
+      COVER_dictSelection_t selection = { candidateDictBuffer, dictContentSize, totalCompressedSize };
+      free(largestDictbuffer);
+      return selection;
+    }
+    dictContentSize *= 2;
+  }
+  dictContentSize = largestDict;
+  totalCompressedSize = largestCompressed;
+  {
+    COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize };
+    free(candidateDictBuffer);
+    return selection;
+  }
+}
+
 /**
  * Parameters for COVER_tryParameters().
  */
@@ -911,6 +1063,7 @@
   /* Allocate space for hash table, dict, and freqs */
   COVER_map_t activeDmers;
   BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);
+  COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));
   U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
   if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
     DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
@@ -926,29 +1079,21 @@
   {
     const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
                                               dictBufferCapacity, parameters);
-    dictBufferCapacity = ZDICT_finalizeDictionary(
-        dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
-        ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples,
-        parameters.zParams);
-    if (ZDICT_isError(dictBufferCapacity)) {
-      DISPLAYLEVEL(1, "Failed to finalize dictionary\n");
+    selection = COVER_selectDict(dict + tail, dictBufferCapacity - tail,
+        ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,
+        totalCompressedSize);
+
+    if (COVER_dictSelectionIsError(selection)) {
+      DISPLAYLEVEL(1, "Failed to select dictionary\n");
       goto _cleanup;
     }
   }
-  /* Check total compressed size */
-  totalCompressedSize = COVER_checkTotalCompressedSize(parameters, ctx->samplesSizes,
-                                                       ctx->samples, ctx->offsets,
-                                                       ctx->nbTrainSamples, ctx->nbSamples,
-                                                       dict, dictBufferCapacity);
-
 _cleanup:
-  COVER_best_finish(data->best, totalCompressedSize, parameters, dict,
-                    dictBufferCapacity);
+  free(dict);
+  COVER_best_finish(data->best, parameters, selection);
   free(data);
   COVER_map_destroy(&activeDmers);
-  if (dict) {
-    free(dict);
-  }
+  COVER_dictSelectionFree(selection);
   if (freqs) {
     free(freqs);
   }
@@ -970,6 +1115,7 @@
   const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
   const unsigned kIterations =
       (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
+  const unsigned shrinkDict = 0;
   /* Local variables */
   const int displayLevel = parameters->zParams.notificationLevel;
   unsigned iteration = 1;
@@ -977,19 +1123,20 @@
   unsigned k;
   COVER_best_t best;
   POOL_ctx *pool = NULL;
+  int warned = 0;
 
   /* Checks */
   if (splitPoint <= 0 || splitPoint > 1) {
     LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
-    return ERROR(GENERIC);
+    return ERROR(parameter_outOfBound);
   }
   if (kMinK < kMaxD || kMaxK < kMinK) {
     LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
-    return ERROR(GENERIC);
+    return ERROR(parameter_outOfBound);
   }
   if (nbSamples == 0) {
     DISPLAYLEVEL(1, "Cover must have at least one input file\n");
-    return ERROR(GENERIC);
+    return ERROR(srcSize_wrong);
   }
   if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
     DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
@@ -1013,11 +1160,18 @@
     /* Initialize the context for this value of d */
     COVER_ctx_t ctx;
     LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
-    if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint)) {
-      LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
-      COVER_best_destroy(&best);
-      POOL_free(pool);
-      return ERROR(GENERIC);
+    {
+      const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint);
+      if (ZSTD_isError(initVal)) {
+        LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
+        COVER_best_destroy(&best);
+        POOL_free(pool);
+        return initVal;
+      }
+    }
+    if (!warned) {
+      COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel);
+      warned = 1;
     }
     /* Loop through k reusing the same context */
     for (k = kMinK; k <= kMaxK; k += kStepSize) {
@@ -1030,7 +1184,7 @@
         COVER_best_destroy(&best);
         COVER_ctx_destroy(&ctx);
         POOL_free(pool);
-        return ERROR(GENERIC);
+        return ERROR(memory_allocation);
       }
       data->ctx = &ctx;
       data->best = &best;
@@ -1040,6 +1194,7 @@
       data->parameters.d = d;
       data->parameters.splitPoint = splitPoint;
       data->parameters.steps = kSteps;
+      data->parameters.shrinkDict = shrinkDict;
       data->parameters.zParams.notificationLevel = g_displayLevel;
       /* Check the parameters */
       if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) {
--- a/contrib/python-zstandard/zstd/dictBuilder/cover.h	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/dictBuilder/cover.h	Mon Oct 21 11:09:48 2019 -0400
@@ -39,6 +39,44 @@
 } COVER_segment_t;
 
 /**
+ *Number of epochs and size of each epoch.
+ */
+typedef struct {
+  U32 num;
+  U32 size;
+} COVER_epoch_info_t;
+
+/**
+ * Struct used for the dictionary selection function.
+ */
+typedef struct COVER_dictSelection {
+  BYTE* dictContent;
+  size_t dictSize;
+  size_t totalCompressedSize;
+} COVER_dictSelection_t;
+
+/**
+ * Computes the number of epochs and the size of each epoch.
+ * We will make sure that each epoch gets at least 10 * k bytes.
+ *
+ * The COVER algorithms divide the data up into epochs of equal size and
+ * select one segment from each epoch.
+ *
+ * @param maxDictSize The maximum allowed dictionary size.
+ * @param nbDmers     The number of dmers we are training on.
+ * @param k           The parameter k (segment size).
+ * @param passes      The target number of passes over the dmer corpus.
+ *                    More passes means a better dictionary.
+ */
+COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize, U32 nbDmers,
+                                       U32 k, U32 passes);
+
+/**
+ * Warns the user when their corpus is too small.
+ */
+void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel);
+
+/**
  *  Checks total compressed size of a dictionary
  */
 size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,
@@ -78,6 +116,32 @@
  * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
  * If this dictionary is the best so far save it and its parameters.
  */
-void COVER_best_finish(COVER_best_t *best, size_t compressedSize,
-                       ZDICT_cover_params_t parameters, void *dict,
-                       size_t dictSize);
+void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters,
+                       COVER_dictSelection_t selection);
+/**
+ * Error function for COVER_selectDict function. Checks if the return
+ * value is an error.
+ */
+unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection);
+
+ /**
+  * Error function for COVER_selectDict function. Returns a struct where
+  * return.totalCompressedSize is a ZSTD error.
+  */
+COVER_dictSelection_t COVER_dictSelectionError(size_t error);
+
+/**
+ * Always call after selectDict is called to free up used memory from
+ * newly created dictionary.
+ */
+void COVER_dictSelectionFree(COVER_dictSelection_t selection);
+
+/**
+ * Called to finalize the dictionary and select one based on whether or not
+ * the shrink-dict flag was enabled. If enabled the dictionary used is the
+ * smallest dictionary within a specified regression of the compressed size
+ * from the largest dictionary.
+ */
+ COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent,
+                       size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples,
+                       size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize);
--- a/contrib/python-zstandard/zstd/dictBuilder/fastcover.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/dictBuilder/fastcover.c	Mon Oct 21 11:09:48 2019 -0400
@@ -132,7 +132,7 @@
  *
  *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
  *
- * Once the dmer with hash value d is in the dictionay we set F(d) = 0.
+ * Once the dmer with hash value d is in the dictionary we set F(d) = 0.
  */
 static COVER_segment_t FASTCOVER_selectSegment(const FASTCOVER_ctx_t *ctx,
                                               U32 *freqs, U32 begin, U32 end,
@@ -161,7 +161,7 @@
     /* Get hash value of current dmer */
     const size_t idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d);
 
-    /* Add frequency of this index to score if this is the first occurence of index in active segment */
+    /* Add frequency of this index to score if this is the first occurrence of index in active segment */
     if (segmentFreqs[idx] == 0) {
       activeSegment.score += freqs[idx];
     }
@@ -287,10 +287,10 @@
  * Prepare a context for dictionary building.
  * The context is only dependent on the parameter `d` and can used multiple
  * times.
- * Returns 1 on success or zero on error.
+ * Returns 0 on success or error code on error.
  * The context must be destroyed with `FASTCOVER_ctx_destroy()`.
  */
-static int
+static size_t
 FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx,
                    const void* samplesBuffer,
                    const size_t* samplesSizes, unsigned nbSamples,
@@ -310,19 +310,19 @@
         totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) {
         DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
                     (unsigned)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20));
-        return 0;
+        return ERROR(srcSize_wrong);
     }
 
     /* Check if there are at least 5 training samples */
     if (nbTrainSamples < 5) {
         DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid\n", nbTrainSamples);
-        return 0;
+        return ERROR(srcSize_wrong);
     }
 
     /* Check if there's testing sample */
     if (nbTestSamples < 1) {
         DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.\n", nbTestSamples);
-        return 0;
+        return ERROR(srcSize_wrong);
     }
 
     /* Zero the context */
@@ -347,7 +347,7 @@
     if (ctx->offsets == NULL) {
         DISPLAYLEVEL(1, "Failed to allocate scratch buffers \n");
         FASTCOVER_ctx_destroy(ctx);
-        return 0;
+        return ERROR(memory_allocation);
     }
 
     /* Fill offsets from the samplesSizes */
@@ -364,13 +364,13 @@
     if (ctx->freqs == NULL) {
         DISPLAYLEVEL(1, "Failed to allocate frequency table \n");
         FASTCOVER_ctx_destroy(ctx);
-        return 0;
+        return ERROR(memory_allocation);
     }
 
     DISPLAYLEVEL(2, "Computing frequencies\n");
     FASTCOVER_computeFrequency(ctx->freqs, ctx);
 
-    return 1;
+    return 0;
 }
 
 
@@ -386,29 +386,35 @@
 {
   BYTE *const dict = (BYTE *)dictBuffer;
   size_t tail = dictBufferCapacity;
-  /* Divide the data up into epochs of equal size.
-   * We will select at least one segment from each epoch.
-   */
-  const unsigned epochs = MAX(1, (U32)(dictBufferCapacity / parameters.k));
-  const unsigned epochSize = (U32)(ctx->nbDmers / epochs);
+  /* Divide the data into epochs. We will select one segment from each epoch. */
+  const COVER_epoch_info_t epochs = COVER_computeEpochs(
+      (U32)dictBufferCapacity, (U32)ctx->nbDmers, parameters.k, 1);
+  const size_t maxZeroScoreRun = 10;
+  size_t zeroScoreRun = 0;
   size_t epoch;
   DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
-                epochs, epochSize);
+                (U32)epochs.num, (U32)epochs.size);
   /* Loop through the epochs until there are no more segments or the dictionary
    * is full.
    */
-  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs) {
-    const U32 epochBegin = (U32)(epoch * epochSize);
-    const U32 epochEnd = epochBegin + epochSize;
+  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {
+    const U32 epochBegin = (U32)(epoch * epochs.size);
+    const U32 epochEnd = epochBegin + epochs.size;
     size_t segmentSize;
     /* Select a segment */
     COVER_segment_t segment = FASTCOVER_selectSegment(
         ctx, freqs, epochBegin, epochEnd, parameters, segmentFreqs);
 
-    /* If the segment covers no dmers, then we are out of content */
+    /* If the segment covers no dmers, then we are out of content.
+     * There may be new content in other epochs, for continue for some time.
+     */
     if (segment.score == 0) {
-      break;
+      if (++zeroScoreRun >= maxZeroScoreRun) {
+          break;
+      }
+      continue;
     }
+    zeroScoreRun = 0;
 
     /* Trim the segment if necessary and if it is too small then we are done */
     segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
@@ -429,7 +435,6 @@
   return tail;
 }
 
-
 /**
  * Parameters for FASTCOVER_tryParameters().
  */
@@ -458,6 +463,7 @@
   U16* segmentFreqs = (U16 *)calloc(((U64)1 << ctx->f), sizeof(U16));
   /* Allocate space for hash table, dict, and freqs */
   BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);
+  COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));
   U32 *freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32));
   if (!segmentFreqs || !dict || !freqs) {
     DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
@@ -467,27 +473,24 @@
   memcpy(freqs, ctx->freqs, ((U64)1 << ctx->f) * sizeof(U32));
   /* Build the dictionary */
   { const size_t tail = FASTCOVER_buildDictionary(ctx, freqs, dict, dictBufferCapacity,
-                                                  parameters, segmentFreqs);
+                                                    parameters, segmentFreqs);
+
     const unsigned nbFinalizeSamples = (unsigned)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100);
-    dictBufferCapacity = ZDICT_finalizeDictionary(
-        dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
-        ctx->samples, ctx->samplesSizes, nbFinalizeSamples, parameters.zParams);
-    if (ZDICT_isError(dictBufferCapacity)) {
-      DISPLAYLEVEL(1, "Failed to finalize dictionary\n");
+    selection = COVER_selectDict(dict + tail, dictBufferCapacity - tail,
+         ctx->samples, ctx->samplesSizes, nbFinalizeSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,
+         totalCompressedSize);
+
+    if (COVER_dictSelectionIsError(selection)) {
+      DISPLAYLEVEL(1, "Failed to select dictionary\n");
       goto _cleanup;
     }
   }
-  /* Check total compressed size */
-  totalCompressedSize = COVER_checkTotalCompressedSize(parameters, ctx->samplesSizes,
-                                                       ctx->samples, ctx->offsets,
-                                                       ctx->nbTrainSamples, ctx->nbSamples,
-                                                       dict, dictBufferCapacity);
 _cleanup:
-  COVER_best_finish(data->best, totalCompressedSize, parameters, dict,
-                    dictBufferCapacity);
+  free(dict);
+  COVER_best_finish(data->best, parameters, selection);
   free(data);
   free(segmentFreqs);
-  free(dict);
+  COVER_dictSelectionFree(selection);
   free(freqs);
 }
 
@@ -502,6 +505,7 @@
     coverParams->nbThreads = fastCoverParams.nbThreads;
     coverParams->splitPoint = fastCoverParams.splitPoint;
     coverParams->zParams = fastCoverParams.zParams;
+    coverParams->shrinkDict = fastCoverParams.shrinkDict;
 }
 
 
@@ -518,6 +522,7 @@
     fastCoverParams->f = f;
     fastCoverParams->accel = accel;
     fastCoverParams->zParams = coverParams.zParams;
+    fastCoverParams->shrinkDict = coverParams.shrinkDict;
 }
 
 
@@ -544,11 +549,11 @@
     if (!FASTCOVER_checkParameters(coverParams, dictBufferCapacity, parameters.f,
                                    parameters.accel)) {
       DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n");
-      return ERROR(GENERIC);
+      return ERROR(parameter_outOfBound);
     }
     if (nbSamples == 0) {
       DISPLAYLEVEL(1, "FASTCOVER must have at least one input file\n");
-      return ERROR(GENERIC);
+      return ERROR(srcSize_wrong);
     }
     if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
       DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
@@ -558,12 +563,16 @@
     /* Assign corresponding FASTCOVER_accel_t to accelParams*/
     accelParams = FASTCOVER_defaultAccelParameters[parameters.accel];
     /* Initialize context */
-    if (!FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
+    {
+      size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
                             coverParams.d, parameters.splitPoint, parameters.f,
-                            accelParams)) {
-      DISPLAYLEVEL(1, "Failed to initialize context\n");
-      return ERROR(GENERIC);
+                            accelParams);
+      if (ZSTD_isError(initVal)) {
+        DISPLAYLEVEL(1, "Failed to initialize context\n");
+        return initVal;
+      }
     }
+    COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel);
     /* Build the dictionary */
     DISPLAYLEVEL(2, "Building dictionary\n");
     {
@@ -609,6 +618,7 @@
         (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
     const unsigned f = parameters->f == 0 ? DEFAULT_F : parameters->f;
     const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel;
+    const unsigned shrinkDict = 0;
     /* Local variables */
     const int displayLevel = parameters->zParams.notificationLevel;
     unsigned iteration = 1;
@@ -616,22 +626,23 @@
     unsigned k;
     COVER_best_t best;
     POOL_ctx *pool = NULL;
+    int warned = 0;
     /* Checks */
     if (splitPoint <= 0 || splitPoint > 1) {
       LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect splitPoint\n");
-      return ERROR(GENERIC);
+      return ERROR(parameter_outOfBound);
     }
     if (accel == 0 || accel > FASTCOVER_MAX_ACCEL) {
       LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect accel\n");
-      return ERROR(GENERIC);
+      return ERROR(parameter_outOfBound);
     }
     if (kMinK < kMaxD || kMaxK < kMinK) {
       LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect k\n");
-      return ERROR(GENERIC);
+      return ERROR(parameter_outOfBound);
     }
     if (nbSamples == 0) {
       LOCALDISPLAYLEVEL(displayLevel, 1, "FASTCOVER must have at least one input file\n");
-      return ERROR(GENERIC);
+      return ERROR(srcSize_wrong);
     }
     if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
       LOCALDISPLAYLEVEL(displayLevel, 1, "dictBufferCapacity must be at least %u\n",
@@ -658,11 +669,18 @@
       /* Initialize the context for this value of d */
       FASTCOVER_ctx_t ctx;
       LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
-      if (!FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams)) {
-        LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
-        COVER_best_destroy(&best);
-        POOL_free(pool);
-        return ERROR(GENERIC);
+      {
+        size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams);
+        if (ZSTD_isError(initVal)) {
+          LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
+          COVER_best_destroy(&best);
+          POOL_free(pool);
+          return initVal;
+        }
+      }
+      if (!warned) {
+        COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel);
+        warned = 1;
       }
       /* Loop through k reusing the same context */
       for (k = kMinK; k <= kMaxK; k += kStepSize) {
@@ -675,7 +693,7 @@
           COVER_best_destroy(&best);
           FASTCOVER_ctx_destroy(&ctx);
           POOL_free(pool);
-          return ERROR(GENERIC);
+          return ERROR(memory_allocation);
         }
         data->ctx = &ctx;
         data->best = &best;
@@ -685,6 +703,7 @@
         data->parameters.d = d;
         data->parameters.splitPoint = splitPoint;
         data->parameters.steps = kSteps;
+        data->parameters.shrinkDict = shrinkDict;
         data->parameters.zParams.notificationLevel = g_displayLevel;
         /* Check the parameters */
         if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity,
--- a/contrib/python-zstandard/zstd/dictBuilder/zdict.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/dictBuilder/zdict.c	Mon Oct 21 11:09:48 2019 -0400
@@ -741,7 +741,7 @@
     /* analyze, build stats, starting with literals */
     {   size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
         if (HUF_isError(maxNbBits)) {
-            eSize = ERROR(GENERIC);
+            eSize = maxNbBits;
             DISPLAYLEVEL(1, " HUF_buildCTable error \n");
             goto _cleanup;
         }
@@ -764,7 +764,7 @@
     total=0; for (u=0; u<=offcodeMax; u++) total+=offcodeCount[u];
     errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax);
     if (FSE_isError(errorCode)) {
-        eSize = ERROR(GENERIC);
+        eSize = errorCode;
         DISPLAYLEVEL(1, "FSE_normalizeCount error with offcodeCount \n");
         goto _cleanup;
     }
@@ -773,7 +773,7 @@
     total=0; for (u=0; u<=MaxML; u++) total+=matchLengthCount[u];
     errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, MaxML);
     if (FSE_isError(errorCode)) {
-        eSize = ERROR(GENERIC);
+        eSize = errorCode;
         DISPLAYLEVEL(1, "FSE_normalizeCount error with matchLengthCount \n");
         goto _cleanup;
     }
@@ -782,7 +782,7 @@
     total=0; for (u=0; u<=MaxLL; u++) total+=litLengthCount[u];
     errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, MaxLL);
     if (FSE_isError(errorCode)) {
-        eSize = ERROR(GENERIC);
+        eSize = errorCode;
         DISPLAYLEVEL(1, "FSE_normalizeCount error with litLengthCount \n");
         goto _cleanup;
     }
@@ -791,7 +791,7 @@
     /* write result to buffer */
     {   size_t const hhSize = HUF_writeCTable(dstPtr, maxDstSize, hufTable, 255, huffLog);
         if (HUF_isError(hhSize)) {
-            eSize = ERROR(GENERIC);
+            eSize = hhSize;
             DISPLAYLEVEL(1, "HUF_writeCTable error \n");
             goto _cleanup;
         }
@@ -802,7 +802,7 @@
 
     {   size_t const ohSize = FSE_writeNCount(dstPtr, maxDstSize, offcodeNCount, OFFCODE_MAX, Offlog);
         if (FSE_isError(ohSize)) {
-            eSize = ERROR(GENERIC);
+            eSize = ohSize;
             DISPLAYLEVEL(1, "FSE_writeNCount error with offcodeNCount \n");
             goto _cleanup;
         }
@@ -813,7 +813,7 @@
 
     {   size_t const mhSize = FSE_writeNCount(dstPtr, maxDstSize, matchLengthNCount, MaxML, mlLog);
         if (FSE_isError(mhSize)) {
-            eSize = ERROR(GENERIC);
+            eSize = mhSize;
             DISPLAYLEVEL(1, "FSE_writeNCount error with matchLengthNCount \n");
             goto _cleanup;
         }
@@ -824,7 +824,7 @@
 
     {   size_t const lhSize = FSE_writeNCount(dstPtr, maxDstSize, litLengthNCount, MaxLL, llLog);
         if (FSE_isError(lhSize)) {
-            eSize = ERROR(GENERIC);
+            eSize = lhSize;
             DISPLAYLEVEL(1, "FSE_writeNCount error with litlengthNCount \n");
             goto _cleanup;
         }
@@ -834,7 +834,7 @@
     }
 
     if (maxDstSize<12) {
-        eSize = ERROR(GENERIC);
+        eSize = ERROR(dstSize_tooSmall);
         DISPLAYLEVEL(1, "not enough space to write RepOffsets \n");
         goto _cleanup;
     }
--- a/contrib/python-zstandard/zstd/dictBuilder/zdict.h	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/dictBuilder/zdict.h	Mon Oct 21 11:09:48 2019 -0400
@@ -46,7 +46,12 @@
  *  The resulting dictionary will be saved into `dictBuffer`.
  * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
  *          or an error code, which can be tested with ZDICT_isError().
- *  Note: ZDICT_trainFromBuffer() requires about 9 bytes of memory for each input byte.
+ *  Note:  Dictionary training will fail if there are not enough samples to construct a
+ *         dictionary, or if most of the samples are too small (< 8 bytes being the lower limit).
+ *         If dictionary training fails, you should use zstd without a dictionary, as the dictionary
+ *         would've been ineffective anyways. If you believe your samples would benefit from a dictionary
+ *         please open an issue with details, and we can look into it.
+ *  Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB.
  *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
  *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
  *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.
@@ -89,6 +94,8 @@
     unsigned steps;              /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */
     unsigned nbThreads;          /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
     double splitPoint;           /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */
+    unsigned shrinkDict;         /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking  */
+    unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */
     ZDICT_params_t zParams;
 } ZDICT_cover_params_t;
 
@@ -100,6 +107,9 @@
     unsigned nbThreads;          /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
     double splitPoint;           /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */
     unsigned accel;              /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */
+    unsigned shrinkDict;         /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking  */
+    unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */
+
     ZDICT_params_t zParams;
 } ZDICT_fastCover_params_t;
 
@@ -110,6 +120,7 @@
  *  The resulting dictionary will be saved into `dictBuffer`.
  * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
  *          or an error code, which can be tested with ZDICT_isError().
+ *          See ZDICT_trainFromBuffer() for details on failure modes.
  *  Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte.
  *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
  *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
@@ -133,8 +144,9 @@
  * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
  *
  * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
- *           or an error code, which can be tested with ZDICT_isError().
- *           On success `*parameters` contains the parameters selected.
+ *          or an error code, which can be tested with ZDICT_isError().
+ *          On success `*parameters` contains the parameters selected.
+ *          See ZDICT_trainFromBuffer() for details on failure modes.
  * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.
  */
 ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
@@ -151,7 +163,8 @@
  *  The resulting dictionary will be saved into `dictBuffer`.
  * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
  *          or an error code, which can be tested with ZDICT_isError().
- *  Note: ZDICT_trainFromBuffer_fastCover() requires about 1 bytes of memory for each input byte and additionally another 6 * 2^f bytes of memory .
+ *          See ZDICT_trainFromBuffer() for details on failure modes.
+ *  Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory.
  *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
  *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
  *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.
@@ -175,9 +188,10 @@
  * If accel is zero, default value of 1 is used.
  *
  * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
- *           or an error code, which can be tested with ZDICT_isError().
- *           On success `*parameters` contains the parameters selected.
- * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 1 byte of memory for each input byte and additionally another 6 * 2^f bytes of memory for each thread.
+ *          or an error code, which can be tested with ZDICT_isError().
+ *          On success `*parameters` contains the parameters selected.
+ *          See ZDICT_trainFromBuffer() for details on failure modes.
+ * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread.
  */
 ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer,
                     size_t dictBufferCapacity, const void* samplesBuffer,
@@ -195,7 +209,7 @@
  * maxDictSize must be >= dictContentSize, and must be >= ZDICT_DICTSIZE_MIN bytes.
  *
  * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`),
- *           or an error code, which can be tested by ZDICT_isError().
+ *          or an error code, which can be tested by ZDICT_isError().
  * Note: ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0.
  * Note 2: dictBuffer and dictContent can overlap
  */
@@ -219,6 +233,7 @@
  * `parameters` is optional and can be provided with values set to 0 to mean "default".
  * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
  *          or an error code, which can be tested with ZDICT_isError().
+ *          See ZDICT_trainFromBuffer() for details on failure modes.
  *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
  *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
  *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.
--- a/contrib/python-zstandard/zstd/zstd.h	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python-zstandard/zstd/zstd.h	Mon Oct 21 11:09:48 2019 -0400
@@ -70,8 +70,8 @@
 
 /*------   Version   ------*/
 #define ZSTD_VERSION_MAJOR    1
-#define ZSTD_VERSION_MINOR    3
-#define ZSTD_VERSION_RELEASE  8
+#define ZSTD_VERSION_MINOR    4
+#define ZSTD_VERSION_RELEASE  3
 
 #define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
 ZSTDLIB_API unsigned ZSTD_versionNumber(void);   /**< to check runtime library version */
@@ -82,13 +82,28 @@
 #define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
 ZSTDLIB_API const char* ZSTD_versionString(void);   /* requires v1.3.0+ */
 
-/***************************************
-*  Default constant
-***************************************/
+/* *************************************
+ *  Default constant
+ ***************************************/
 #ifndef ZSTD_CLEVEL_DEFAULT
 #  define ZSTD_CLEVEL_DEFAULT 3
 #endif
 
+/* *************************************
+ *  Constants
+ ***************************************/
+
+/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */
+#define ZSTD_MAGICNUMBER            0xFD2FB528    /* valid since v0.8.0 */
+#define ZSTD_MAGIC_DICTIONARY       0xEC30A437    /* valid since v0.7.0 */
+#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50    /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */
+#define ZSTD_MAGIC_SKIPPABLE_MASK   0xFFFFFFF0
+
+#define ZSTD_BLOCKSIZELOG_MAX  17
+#define ZSTD_BLOCKSIZE_MAX     (1<<ZSTD_BLOCKSIZELOG_MAX)
+
+
+
 /***************************************
 *  Simple API
 ***************************************/
@@ -145,12 +160,21 @@
  * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
 ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
 
+/*! ZSTD_findFrameCompressedSize() :
+ * `src` should point to the start of a ZSTD frame or skippable frame.
+ * `srcSize` must be >= first frame size
+ * @return : the compressed size of the first frame starting at `src`,
+ *           suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
+ *        or an error code if input is invalid */
+ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
+
 
 /*======  Helper functions  ======*/
 #define ZSTD_COMPRESSBOUND(srcSize)   ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
 ZSTDLIB_API size_t      ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
 ZSTDLIB_API unsigned    ZSTD_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
 ZSTDLIB_API const char* ZSTD_getErrorName(size_t code);     /*!< provides readable string from an error code */
+ZSTDLIB_API int         ZSTD_minCLevel(void);               /*!< minimum negative compression level allowed */
 ZSTDLIB_API int         ZSTD_maxCLevel(void);               /*!< maximum compression level available */
 
 
@@ -159,9 +183,14 @@
 ***************************************/
 /*= Compression context
  *  When compressing many times,
- *  it is recommended to allocate a context just once, and re-use it for each successive compression operation.
+ *  it is recommended to allocate a context just once,
+ *  and re-use it for each successive compression operation.
  *  This will make workload friendlier for system's memory.
- *  Use one context per thread for parallel execution in multi-threaded environments. */
+ *  Note : re-using context is just a speed / resource optimization.
+ *         It doesn't change the compression ratio, which remains identical.
+ *  Note 2 : In multi-threaded environments,
+ *         use one different context per thread for parallel execution.
+ */
 typedef struct ZSTD_CCtx_s ZSTD_CCtx;
 ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
 ZSTDLIB_API size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);
@@ -195,279 +224,6 @@
                                  const void* src, size_t srcSize);
 
 
-/**************************
-*  Simple dictionary API
-***************************/
-/*! ZSTD_compress_usingDict() :
- *  Compression at an explicit compression level using a Dictionary.
- *  A dictionary can be any arbitrary data segment (also called a prefix),
- *  or a buffer with specified information (see dictBuilder/zdict.h).
- *  Note : This function loads the dictionary, resulting in significant startup delay.
- *         It's intended for a dictionary used only once.
- *  Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
-ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
-                                           void* dst, size_t dstCapacity,
-                                     const void* src, size_t srcSize,
-                                     const void* dict,size_t dictSize,
-                                           int compressionLevel);
-
-/*! ZSTD_decompress_usingDict() :
- *  Decompression using a known Dictionary.
- *  Dictionary must be identical to the one used during compression.
- *  Note : This function loads the dictionary, resulting in significant startup delay.
- *         It's intended for a dictionary used only once.
- *  Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
-ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
-                                             void* dst, size_t dstCapacity,
-                                       const void* src, size_t srcSize,
-                                       const void* dict,size_t dictSize);
-
-
-/***********************************
- *  Bulk processing dictionary API
- **********************************/
-typedef struct ZSTD_CDict_s ZSTD_CDict;
-
-/*! ZSTD_createCDict() :
- *  When compressing multiple messages / blocks using the same dictionary, it's recommended to load it only once.
- *  ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup cost.
- *  ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
- * `dictBuffer` can be released after ZSTD_CDict creation, because its content is copied within CDict.
- *  Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate `dictBuffer` content.
- *  Note : A ZSTD_CDict can be created from an empty dictBuffer, but it is inefficient when used to compress small data. */
-ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
-                                         int compressionLevel);
-
-/*! ZSTD_freeCDict() :
- *  Function frees memory allocated by ZSTD_createCDict(). */
-ZSTDLIB_API size_t      ZSTD_freeCDict(ZSTD_CDict* CDict);
-
-/*! ZSTD_compress_usingCDict() :
- *  Compression using a digested Dictionary.
- *  Recommended when same dictionary is used multiple times.
- *  Note : compression level is _decided at dictionary creation time_,
- *     and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
-ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
-                                            void* dst, size_t dstCapacity,
-                                      const void* src, size_t srcSize,
-                                      const ZSTD_CDict* cdict);
-
-
-typedef struct ZSTD_DDict_s ZSTD_DDict;
-
-/*! ZSTD_createDDict() :
- *  Create a digested dictionary, ready to start decompression operation without startup delay.
- *  dictBuffer can be released after DDict creation, as its content is copied inside DDict. */
-ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
-
-/*! ZSTD_freeDDict() :
- *  Function frees memory allocated with ZSTD_createDDict() */
-ZSTDLIB_API size_t      ZSTD_freeDDict(ZSTD_DDict* ddict);
-
-/*! ZSTD_decompress_usingDDict() :
- *  Decompression using a digested Dictionary.
- *  Recommended when same dictionary is used multiple times. */
-ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
-                                              void* dst, size_t dstCapacity,
-                                        const void* src, size_t srcSize,
-                                        const ZSTD_DDict* ddict);
-
-
-/****************************
-*  Streaming
-****************************/
-
-typedef struct ZSTD_inBuffer_s {
-  const void* src;    /**< start of input buffer */
-  size_t size;        /**< size of input buffer */
-  size_t pos;         /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
-} ZSTD_inBuffer;
-
-typedef struct ZSTD_outBuffer_s {
-  void*  dst;         /**< start of output buffer */
-  size_t size;        /**< size of output buffer */
-  size_t pos;         /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
-} ZSTD_outBuffer;
-
-
-
-/*-***********************************************************************
-*  Streaming compression - HowTo
-*
-*  A ZSTD_CStream object is required to track streaming operation.
-*  Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
-*  ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
-*  It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
-*
-*  For parallel execution, use one separate ZSTD_CStream per thread.
-*
-*  note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
-*
-*  Parameters are sticky : when starting a new compression on the same context,
-*  it will re-use the same sticky parameters as previous compression session.
-*  When in doubt, it's recommended to fully initialize the context before usage.
-*  Use ZSTD_initCStream() to set the parameter to a selected compression level.
-*  Use advanced API (ZSTD_CCtx_setParameter(), etc.) to set more specific parameters.
-*
-*  Use ZSTD_compressStream() as many times as necessary to consume input stream.
-*  The function will automatically update both `pos` fields within `input` and `output`.
-*  Note that the function may not consume the entire input,
-*  for example, because the output buffer is already full,
-*  in which case `input.pos < input.size`.
-*  The caller must check if input has been entirely consumed.
-*  If not, the caller must make some room to receive more compressed data,
-*  and then present again remaining input data.
-* @return : a size hint, preferred nb of bytes to use as input for next function call
-*           or an error code, which can be tested using ZSTD_isError().
-*           Note 1 : it's just a hint, to help latency a little, any value will work fine.
-*           Note 2 : size hint is guaranteed to be <= ZSTD_CStreamInSize()
-*
-*  At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
-*  using ZSTD_flushStream(). `output->pos` will be updated.
-*  Note that, if `output->size` is too small, a single invocation of ZSTD_flushStream() might not be enough (return code > 0).
-*  In which case, make some room to receive more compressed data, and call again ZSTD_flushStream().
-*  @return : 0 if internal buffers are entirely flushed,
-*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
-*            or an error code, which can be tested using ZSTD_isError().
-*
-*  ZSTD_endStream() instructs to finish a frame.
-*  It will perform a flush and write frame epilogue.
-*  The epilogue is required for decoders to consider a frame completed.
-*  flush() operation is the same, and follows same rules as ZSTD_flushStream().
-*  @return : 0 if frame fully completed and fully flushed,
-*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
-*            or an error code, which can be tested using ZSTD_isError().
-*
-* *******************************************************************/
-
-typedef ZSTD_CCtx ZSTD_CStream;  /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
-                                 /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
-/*===== ZSTD_CStream management functions =====*/
-ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
-ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
-
-/*===== Streaming compression functions =====*/
-ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
-ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
-ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
-ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
-
-ZSTDLIB_API size_t ZSTD_CStreamInSize(void);    /**< recommended size for input buffer */
-ZSTDLIB_API size_t ZSTD_CStreamOutSize(void);   /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block in all circumstances. */
-
-
-
-/*-***************************************************************************
-*  Streaming decompression - HowTo
-*
-*  A ZSTD_DStream object is required to track streaming operations.
-*  Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
-*  ZSTD_DStream objects can be re-used multiple times.
-*
-*  Use ZSTD_initDStream() to start a new decompression operation.
-* @return : recommended first input size
-*  Alternatively, use advanced API to set specific properties.
-*
-*  Use ZSTD_decompressStream() repetitively to consume your input.
-*  The function will update both `pos` fields.
-*  If `input.pos < input.size`, some input has not been consumed.
-*  It's up to the caller to present again remaining data.
-*  The function tries to flush all data decoded immediately, respecting output buffer size.
-*  If `output.pos < output.size`, decoder has flushed everything it could.
-*  But if `output.pos == output.size`, there might be some data left within internal buffers.,
-*  In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
-*  Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
-* @return : 0 when a frame is completely decoded and fully flushed,
-*        or an error code, which can be tested using ZSTD_isError(),
-*        or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
-*                                the return value is a suggested next input size (just a hint for better latency)
-*                                that will never request more than the remaining frame size.
-* *******************************************************************************/
-
-typedef ZSTD_DCtx ZSTD_DStream;  /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
-                                 /* For compatibility with versions <= v1.2.0, prefer differentiating them. */
-/*===== ZSTD_DStream management functions =====*/
-ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
-ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);
-
-/*===== Streaming decompression functions =====*/
-ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
-ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
-
-ZSTDLIB_API size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */
-ZSTDLIB_API size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
-
-#endif  /* ZSTD_H_235446 */
-
-
-
-
-/****************************************************************************************
- *   ADVANCED AND EXPERIMENTAL FUNCTIONS
- ****************************************************************************************
- * The definitions in the following section are considered experimental.
- * They are provided for advanced scenarios.
- * They should never be used with a dynamic library, as prototypes may change in the future.
- * Use them only in association with static linking.
- * ***************************************************************************************/
-
-#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
-#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
-
-
-/****************************************************************************************
- *   Candidate API for promotion to stable status
- ****************************************************************************************
- * The following symbols and constants form the "staging area" :
- * they are considered to join "stable API" by v1.4.0.
- * The proposal is written so that it can be made stable "as is",
- * though it's still possible to suggest improvements.
- * Staging is in fact last chance for changes,
- * the API is locked once reaching "stable" status.
- * ***************************************************************************************/
-
-
-/* ===  Constants   === */
-
-/* all magic numbers are supposed read/written to/from files/memory using little-endian convention */
-#define ZSTD_MAGICNUMBER            0xFD2FB528    /* valid since v0.8.0 */
-#define ZSTD_MAGIC_DICTIONARY       0xEC30A437    /* valid since v0.7.0 */
-#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50    /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */
-#define ZSTD_MAGIC_SKIPPABLE_MASK   0xFFFFFFF0
-
-#define ZSTD_BLOCKSIZELOG_MAX  17
-#define ZSTD_BLOCKSIZE_MAX     (1<<ZSTD_BLOCKSIZELOG_MAX)
-
-
-/* ===   query limits   === */
-
-ZSTDLIB_API int ZSTD_minCLevel(void);  /*!< minimum negative compression level allowed */
-
-
-/* ===   frame size   === */
-
-/*! ZSTD_findFrameCompressedSize() :
- * `src` should point to the start of a ZSTD frame or skippable frame.
- * `srcSize` must be >= first frame size
- * @return : the compressed size of the first frame starting at `src`,
- *           suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
- *        or an error code if input is invalid */
-ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
-
-
-/* ===   Memory management   === */
-
-/*! ZSTD_sizeof_*() :
- *  These functions give the _current_ memory usage of selected object.
- *  Note that object memory usage can evolve (increase or decrease) over time. */
-ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
-ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
-ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
-ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
-ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
-ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
-
-
 /***************************************
 *  Advanced compression API
 ***************************************/
@@ -503,7 +259,10 @@
 
 typedef enum {
 
-    /* compression parameters */
+    /* compression parameters
+     * Note: When compressing with a ZSTD_CDict these parameters are superseded
+     * by the parameters used to construct the ZSTD_CDict. See ZSTD_CCtx_refCDict()
+     * for more info (superseded-by-cdict). */
     ZSTD_c_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
                               * Default level is ZSTD_CLEVEL_DEFAULT==3.
                               * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
@@ -625,6 +384,8 @@
      * ZSTD_c_format
      * ZSTD_c_forceMaxWindow
      * ZSTD_c_forceAttachDict
+     * ZSTD_c_literalCompressionMode
+     * ZSTD_c_targetCBlockSize
      * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
      * note : never ever use experimentalParam? names directly;
      *        also, the enums values themselves are unstable and can still change.
@@ -632,10 +393,11 @@
      ZSTD_c_experimentalParam1=500,
      ZSTD_c_experimentalParam2=10,
      ZSTD_c_experimentalParam3=1000,
-     ZSTD_c_experimentalParam4=1001
+     ZSTD_c_experimentalParam4=1001,
+     ZSTD_c_experimentalParam5=1002,
+     ZSTD_c_experimentalParam6=1003,
 } ZSTD_cParameter;
 
-
 typedef struct {
     size_t error;
     int lowerBound;
@@ -677,10 +439,443 @@
  *  Note 3 : Whenever all input data is provided and consumed in a single round,
  *           for example with ZSTD_compress2(),
  *           or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),
- *           this value is automatically overriden by srcSize instead.
+ *           this value is automatically overridden by srcSize instead.
  */
 ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
 
+typedef enum {
+    ZSTD_reset_session_only = 1,
+    ZSTD_reset_parameters = 2,
+    ZSTD_reset_session_and_parameters = 3
+} ZSTD_ResetDirective;
+
+/*! ZSTD_CCtx_reset() :
+ *  There are 2 different things that can be reset, independently or jointly :
+ *  - The session : will stop compressing current frame, and make CCtx ready to start a new one.
+ *                  Useful after an error, or to interrupt any ongoing compression.
+ *                  Any internal data not yet flushed is cancelled.
+ *                  Compression parameters and dictionary remain unchanged.
+ *                  They will be used to compress next frame.
+ *                  Resetting session never fails.
+ *  - The parameters : changes all parameters back to "default".
+ *                  This removes any reference to any dictionary too.
+ *                  Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
+ *                  otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
+ *  - Both : similar to resetting the session, followed by resetting parameters.
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
+
+/*! ZSTD_compress2() :
+ *  Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
+ *  ZSTD_compress2() always starts a new frame.
+ *  Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
+ *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
+ *  - The function is always blocking, returns when compression is completed.
+ *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
+ * @return : compressed size written into `dst` (<= `dstCapacity),
+ *           or an error code if it fails (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx,
+                                   void* dst, size_t dstCapacity,
+                             const void* src, size_t srcSize);
+
+
+/***************************************
+*  Advanced decompression API
+***************************************/
+
+/* The advanced API pushes parameters one by one into an existing DCtx context.
+ * Parameters are sticky, and remain valid for all following frames
+ * using the same DCtx context.
+ * It's possible to reset parameters to default values using ZSTD_DCtx_reset().
+ * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream().
+ *        Therefore, no new decompression function is necessary.
+ */
+
+typedef enum {
+
+    ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which
+                              * the streaming API will refuse to allocate memory buffer
+                              * in order to protect the host from unreasonable memory requirements.
+                              * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
+                              * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT).
+                              * Special: value 0 means "use default maximum windowLog". */
+
+    /* note : additional experimental parameters are also available
+     * within the experimental section of the API.
+     * At the time of this writing, they include :
+     * ZSTD_c_format
+     * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
+     * note : never ever use experimentalParam? names directly
+     */
+     ZSTD_d_experimentalParam1=1000
+
+} ZSTD_dParameter;
+
+/*! ZSTD_dParam_getBounds() :
+ *  All parameters must belong to an interval with lower and upper bounds,
+ *  otherwise they will either trigger an error or be automatically clamped.
+ * @return : a structure, ZSTD_bounds, which contains
+ *         - an error status field, which must be tested using ZSTD_isError()
+ *         - both lower and upper bounds, inclusive
+ */
+ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam);
+
+/*! ZSTD_DCtx_setParameter() :
+ *  Set one compression parameter, selected by enum ZSTD_dParameter.
+ *  All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().
+ *  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
+ *  Setting a parameter is only possible during frame initialization (before starting decompression).
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value);
+
+/*! ZSTD_DCtx_reset() :
+ *  Return a DCtx to clean state.
+ *  Session and parameters can be reset jointly or separately.
+ *  Parameters can only be reset when no active frame is being decompressed.
+ * @return : 0, or an error code, which can be tested with ZSTD_isError()
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);
+
+
+/****************************
+*  Streaming
+****************************/
+
+typedef struct ZSTD_inBuffer_s {
+  const void* src;    /**< start of input buffer */
+  size_t size;        /**< size of input buffer */
+  size_t pos;         /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_inBuffer;
+
+typedef struct ZSTD_outBuffer_s {
+  void*  dst;         /**< start of output buffer */
+  size_t size;        /**< size of output buffer */
+  size_t pos;         /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_outBuffer;
+
+
+
+/*-***********************************************************************
+*  Streaming compression - HowTo
+*
+*  A ZSTD_CStream object is required to track streaming operation.
+*  Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
+*  ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
+*  It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
+*
+*  For parallel execution, use one separate ZSTD_CStream per thread.
+*
+*  note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
+*
+*  Parameters are sticky : when starting a new compression on the same context,
+*  it will re-use the same sticky parameters as previous compression session.
+*  When in doubt, it's recommended to fully initialize the context before usage.
+*  Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
+*  ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
+*  set more specific parameters, the pledged source size, or load a dictionary.
+*
+*  Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to
+*  consume input stream. The function will automatically update both `pos`
+*  fields within `input` and `output`.
+*  Note that the function may not consume the entire input, for example, because
+*  the output buffer is already full, in which case `input.pos < input.size`.
+*  The caller must check if input has been entirely consumed.
+*  If not, the caller must make some room to receive more compressed data,
+*  and then present again remaining input data.
+*  note: ZSTD_e_continue is guaranteed to make some forward progress when called,
+*        but doesn't guarantee maximal forward progress. This is especially relevant
+*        when compressing with multiple threads. The call won't block if it can
+*        consume some input, but if it can't it will wait for some, but not all,
+*        output to be flushed.
+* @return : provides a minimum amount of data remaining to be flushed from internal buffers
+*           or an error code, which can be tested using ZSTD_isError().
+*
+*  At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
+*  using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated.
+*  Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0).
+*  In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush.
+*  You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the
+*  operation.
+*  note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will
+*        block until the flush is complete or the output buffer is full.
+*  @return : 0 if internal buffers are entirely flushed,
+*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
+*            or an error code, which can be tested using ZSTD_isError().
+*
+*  Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame.
+*  It will perform a flush and write frame epilogue.
+*  The epilogue is required for decoders to consider a frame completed.
+*  flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush.
+*  You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to
+*  start a new frame.
+*  note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will
+*        block until the flush is complete or the output buffer is full.
+*  @return : 0 if frame fully completed and fully flushed,
+*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
+*            or an error code, which can be tested using ZSTD_isError().
+*
+* *******************************************************************/
+
+typedef ZSTD_CCtx ZSTD_CStream;  /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
+                                 /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
+/*===== ZSTD_CStream management functions =====*/
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
+ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
+
+/*===== Streaming compression functions =====*/
+typedef enum {
+    ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
+    ZSTD_e_flush=1,    /* flush any data provided so far,
+                        * it creates (at least) one new block, that can be decoded immediately on reception;
+                        * frame will continue: any future data can still reference previously compressed data, improving compression.
+                        * note : multithreaded compression will block to flush as much output as possible. */
+    ZSTD_e_end=2       /* flush any remaining data _and_ close current frame.
+                        * note that frame is only closed after compressed data is fully flushed (return value == 0).
+                        * After that point, any additional data starts a new frame.
+                        * note : each frame is independent (does not reference any content from previous frame).
+                        : note : multithreaded compression will block to flush as much output as possible. */
+} ZSTD_EndDirective;
+
+/*! ZSTD_compressStream2() :
+ *  Behaves about the same as ZSTD_compressStream, with additional control on end directive.
+ *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
+ *  - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
+ *  - output->pos must be <= dstCapacity, input->pos must be <= srcSize
+ *  - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
+ *  - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
+ *  - When nbWorkers>=1, function is non-blocking : it just acquires a copy of input, and distributes jobs to internal worker threads, flush whatever is available,
+ *                                                  and then immediately returns, just indicating that there is some data remaining to be flushed.
+ *                                                  The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
+ *  - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
+ *  - @return provides a minimum amount of data remaining to be flushed from internal buffers
+ *            or an error code, which can be tested using ZSTD_isError().
+ *            if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
+ *            This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
+ *            For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
+ *  - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
+ *            only ZSTD_e_end or ZSTD_e_flush operations are allowed.
+ *            Before starting a new compression job, or changing compression parameters,
+ *            it is required to fully flush internal buffers.
+ */
+ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
+                                         ZSTD_outBuffer* output,
+                                         ZSTD_inBuffer* input,
+                                         ZSTD_EndDirective endOp);
+
+
+/* These buffer sizes are softly recommended.
+ * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output.
+ * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(),
+ * reducing the amount of memory shuffling and buffering, resulting in minor performance savings.
+ *
+ * However, note that these recommendations are from the perspective of a C caller program.
+ * If the streaming interface is invoked from some other language,
+ * especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo,
+ * a major performance rule is to reduce crossing such interface to an absolute minimum.
+ * It's not rare that performance ends being spent more into the interface, rather than compression itself.
+ * In which cases, prefer using large buffers, as large as practical,
+ * for both input and output, to reduce the nb of roundtrips.
+ */
+ZSTDLIB_API size_t ZSTD_CStreamInSize(void);    /**< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_CStreamOutSize(void);   /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */
+
+
+/* *****************************************************************************
+ * This following is a legacy streaming API.
+ * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
+ * It is redundant, but remains fully supported.
+ * Advanced parameters and dictionary compression can only be used through the
+ * new API.
+ ******************************************************************************/
+
+/*!
+ * Equivalent to:
+ *
+ *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
+ *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ */
+ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
+/*!
+ * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).
+ * NOTE: The return value is different. ZSTD_compressStream() returns a hint for
+ * the next read size (if non-zero and not an error). ZSTD_compressStream2()
+ * returns the minimum nb of bytes left to flush (if non-zero and not an error).
+ */
+ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */
+ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */
+ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+
+
+/*-***************************************************************************
+*  Streaming decompression - HowTo
+*
+*  A ZSTD_DStream object is required to track streaming operations.
+*  Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
+*  ZSTD_DStream objects can be re-used multiple times.
+*
+*  Use ZSTD_initDStream() to start a new decompression operation.
+* @return : recommended first input size
+*  Alternatively, use advanced API to set specific properties.
+*
+*  Use ZSTD_decompressStream() repetitively to consume your input.
+*  The function will update both `pos` fields.
+*  If `input.pos < input.size`, some input has not been consumed.
+*  It's up to the caller to present again remaining data.
+*  The function tries to flush all data decoded immediately, respecting output buffer size.
+*  If `output.pos < output.size`, decoder has flushed everything it could.
+*  But if `output.pos == output.size`, there might be some data left within internal buffers.,
+*  In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
+*  Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
+* @return : 0 when a frame is completely decoded and fully flushed,
+*        or an error code, which can be tested using ZSTD_isError(),
+*        or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
+*                                the return value is a suggested next input size (just a hint for better latency)
+*                                that will never request more than the remaining frame size.
+* *******************************************************************************/
+
+typedef ZSTD_DCtx ZSTD_DStream;  /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
+                                 /* For compatibility with versions <= v1.2.0, prefer differentiating them. */
+/*===== ZSTD_DStream management functions =====*/
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
+ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);
+
+/*===== Streaming decompression functions =====*/
+
+/* This function is redundant with the advanced API and equivalent to:
+ *
+ *     ZSTD_DCtx_reset(zds);
+ *     ZSTD_DCtx_refDDict(zds, NULL);
+ */
+ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
+
+ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+
+ZSTDLIB_API size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
+
+
+/**************************
+*  Simple dictionary API
+***************************/
+/*! ZSTD_compress_usingDict() :
+ *  Compression at an explicit compression level using a Dictionary.
+ *  A dictionary can be any arbitrary data segment (also called a prefix),
+ *  or a buffer with specified information (see dictBuilder/zdict.h).
+ *  Note : This function loads the dictionary, resulting in significant startup delay.
+ *         It's intended for a dictionary used only once.
+ *  Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
+                                           void* dst, size_t dstCapacity,
+                                     const void* src, size_t srcSize,
+                                     const void* dict,size_t dictSize,
+                                           int compressionLevel);
+
+/*! ZSTD_decompress_usingDict() :
+ *  Decompression using a known Dictionary.
+ *  Dictionary must be identical to the one used during compression.
+ *  Note : This function loads the dictionary, resulting in significant startup delay.
+ *         It's intended for a dictionary used only once.
+ *  Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+                                             void* dst, size_t dstCapacity,
+                                       const void* src, size_t srcSize,
+                                       const void* dict,size_t dictSize);
+
+
+/***********************************
+ *  Bulk processing dictionary API
+ **********************************/
+typedef struct ZSTD_CDict_s ZSTD_CDict;
+
+/*! ZSTD_createCDict() :
+ *  When compressing multiple messages / blocks using the same dictionary, it's recommended to load it only once.
+ *  ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup cost.
+ *  ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ * `dictBuffer` can be released after ZSTD_CDict creation, because its content is copied within CDict.
+ *  Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate `dictBuffer` content.
+ *  Note : A ZSTD_CDict can be created from an empty dictBuffer, but it is inefficient when used to compress small data. */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
+                                         int compressionLevel);
+
+/*! ZSTD_freeCDict() :
+ *  Function frees memory allocated by ZSTD_createCDict(). */
+ZSTDLIB_API size_t      ZSTD_freeCDict(ZSTD_CDict* CDict);
+
+/*! ZSTD_compress_usingCDict() :
+ *  Compression using a digested Dictionary.
+ *  Recommended when same dictionary is used multiple times.
+ *  Note : compression level is _decided at dictionary creation time_,
+ *     and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+                                            void* dst, size_t dstCapacity,
+                                      const void* src, size_t srcSize,
+                                      const ZSTD_CDict* cdict);
+
+
+typedef struct ZSTD_DDict_s ZSTD_DDict;
+
+/*! ZSTD_createDDict() :
+ *  Create a digested dictionary, ready to start decompression operation without startup delay.
+ *  dictBuffer can be released after DDict creation, as its content is copied inside DDict. */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
+
+/*! ZSTD_freeDDict() :
+ *  Function frees memory allocated with ZSTD_createDDict() */
+ZSTDLIB_API size_t      ZSTD_freeDDict(ZSTD_DDict* ddict);
+
+/*! ZSTD_decompress_usingDDict() :
+ *  Decompression using a digested Dictionary.
+ *  Recommended when same dictionary is used multiple times. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+                                              void* dst, size_t dstCapacity,
+                                        const void* src, size_t srcSize,
+                                        const ZSTD_DDict* ddict);
+
+
+/********************************
+ *  Dictionary helper functions
+ *******************************/
+
+/*! ZSTD_getDictID_fromDict() :
+ *  Provides the dictID stored within dictionary.
+ *  if @return == 0, the dictionary is not conformant with Zstandard specification.
+ *  It can still be loaded, but as a content-only dictionary. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
+
+/*! ZSTD_getDictID_fromDDict() :
+ *  Provides the dictID of the dictionary loaded into `ddict`.
+ *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
+
+/*! ZSTD_getDictID_fromFrame() :
+ *  Provides the dictID required to decompressed the frame stored within `src`.
+ *  If @return == 0, the dictID could not be decoded.
+ *  This could for one of the following reasons :
+ *  - The frame does not require a dictionary to be decoded (most common case).
+ *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
+ *    Note : this use case also happens when using a non-conformant dictionary.
+ *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
+ *  - This is not a Zstandard frame.
+ *  When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+
+
+/*******************************************************************************
+ * Advanced dictionary and prefix API
+ *
+ * This API allows dictionaries to be used with ZSTD_compress2(),
+ * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
+ * only reset with the context is reset with ZSTD_reset_parameters or
+ * ZSTD_reset_session_and_parameters. Prefixes are single-use.
+ ******************************************************************************/
+
+
 /*! ZSTD_CCtx_loadDictionary() :
  *  Create an internal CDict from `dict` buffer.
  *  Decompression will have to use same dictionary.
@@ -703,7 +898,9 @@
 /*! ZSTD_CCtx_refCDict() :
  *  Reference a prepared dictionary, to be used for all next compressed frames.
  *  Note that compression parameters are enforced from within CDict,
- *  and supercede any compression parameter previously set within CCtx.
+ *  and supersede any compression parameter previously set within CCtx.
+ *  The parameters ignored are labled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
+ *  The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.
  *  The dictionary will remain valid for future compressed frames using same CCtx.
  * @result : 0, or an error code (which can be tested with ZSTD_isError()).
  *  Special : Referencing a NULL CDict means "return to no-dictionary mode".
@@ -733,136 +930,6 @@
 ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
                                  const void* prefix, size_t prefixSize);
 
-
-typedef enum {
-    ZSTD_reset_session_only = 1,
-    ZSTD_reset_parameters = 2,
-    ZSTD_reset_session_and_parameters = 3
-} ZSTD_ResetDirective;
-
-/*! ZSTD_CCtx_reset() :
- *  There are 2 different things that can be reset, independently or jointly :
- *  - The session : will stop compressing current frame, and make CCtx ready to start a new one.
- *                  Useful after an error, or to interrupt any ongoing compression.
- *                  Any internal data not yet flushed is cancelled.
- *                  Compression parameters and dictionary remain unchanged.
- *                  They will be used to compress next frame.
- *                  Resetting session never fails.
- *  - The parameters : changes all parameters back to "default".
- *                  This removes any reference to any dictionary too.
- *                  Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
- *                  otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
- *  - Both : similar to resetting the session, followed by resetting parameters.
- */
-ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
-
-
-
-/*! ZSTD_compress2() :
- *  Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
- *  ZSTD_compress2() always starts a new frame.
- *  Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
- *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
- *  - The function is always blocking, returns when compression is completed.
- *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
- * @return : compressed size written into `dst` (<= `dstCapacity),
- *           or an error code if it fails (which can be tested using ZSTD_isError()).
- */
-ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx,
-                                   void* dst, size_t dstCapacity,
-                             const void* src, size_t srcSize);
-
-typedef enum {
-    ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
-    ZSTD_e_flush=1,    /* flush any data provided so far,
-                        * it creates (at least) one new block, that can be decoded immediately on reception;
-                        * frame will continue: any future data can still reference previously compressed data, improving compression. */
-    ZSTD_e_end=2       /* flush any remaining data _and_ close current frame.
-                        * note that frame is only closed after compressed data is fully flushed (return value == 0).
-                        * After that point, any additional data starts a new frame.
-                        * note : each frame is independent (does not reference any content from previous frame). */
-} ZSTD_EndDirective;
-
-/*! ZSTD_compressStream2() :
- *  Behaves about the same as ZSTD_compressStream, with additional control on end directive.
- *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
- *  - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
- *  - outpot->pos must be <= dstCapacity, input->pos must be <= srcSize
- *  - outpot->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
- *  - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
- *  - When nbWorkers>=1, function is non-blocking : it just acquires a copy of input, and distributes jobs to internal worker threads, flush whatever is available,
- *                                                  and then immediately returns, just indicating that there is some data remaining to be flushed.
- *                                                  The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
- *  - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
- *  - @return provides a minimum amount of data remaining to be flushed from internal buffers
- *            or an error code, which can be tested using ZSTD_isError().
- *            if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
- *            This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
- *            For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
- *  - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
- *            only ZSTD_e_end or ZSTD_e_flush operations are allowed.
- *            Before starting a new compression job, or changing compression parameters,
- *            it is required to fully flush internal buffers.
- */
-ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
-                                         ZSTD_outBuffer* output,
-                                         ZSTD_inBuffer* input,
-                                         ZSTD_EndDirective endOp);
-
-
-
-/* ============================== */
-/*   Advanced decompression API   */
-/* ============================== */
-
-/* The advanced API pushes parameters one by one into an existing DCtx context.
- * Parameters are sticky, and remain valid for all following frames
- * using the same DCtx context.
- * It's possible to reset parameters to default values using ZSTD_DCtx_reset().
- * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream().
- *        Therefore, no new decompression function is necessary.
- */
-
-
-typedef enum {
-
-    ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which
-                              * the streaming API will refuse to allocate memory buffer
-                              * in order to protect the host from unreasonable memory requirements.
-                              * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
-                              * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) */
-
-    /* note : additional experimental parameters are also available
-     * within the experimental section of the API.
-     * At the time of this writing, they include :
-     * ZSTD_c_format
-     * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
-     * note : never ever use experimentalParam? names directly
-     */
-     ZSTD_d_experimentalParam1=1000
-
-} ZSTD_dParameter;
-
-
-/*! ZSTD_dParam_getBounds() :
- *  All parameters must belong to an interval with lower and upper bounds,
- *  otherwise they will either trigger an error or be automatically clamped.
- * @return : a structure, ZSTD_bounds, which contains
- *         - an error status field, which must be tested using ZSTD_isError()
- *         - both lower and upper bounds, inclusive
- */
-ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam);
-
-/*! ZSTD_DCtx_setParameter() :
- *  Set one compression parameter, selected by enum ZSTD_dParameter.
- *  All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().
- *  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
- *  Setting a parameter is only possible during frame initialization (before starting decompression).
- * @return : 0, or an error code (which can be tested using ZSTD_isError()).
- */
-ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value);
-
-
 /*! ZSTD_DCtx_loadDictionary() :
  *  Create an internal DDict from dict buffer,
  *  to be used to decompress next frames.
@@ -910,15 +977,32 @@
 ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,
                                  const void* prefix, size_t prefixSize);
 
-/*! ZSTD_DCtx_reset() :
- *  Return a DCtx to clean state.
- *  Session and parameters can be reset jointly or separately.
- *  Parameters can only be reset when no active frame is being decompressed.
- * @return : 0, or an error code, which can be tested with ZSTD_isError()
- */
-ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);
+/* ===   Memory management   === */
+
+/*! ZSTD_sizeof_*() :
+ *  These functions give the _current_ memory usage of selected object.
+ *  Note that object memory usage can evolve (increase or decrease) over time. */
+ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
+ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
+ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
+ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+
+#endif  /* ZSTD_H_235446 */
 
 
+/* **************************************************************************************
+ *   ADVANCED AND EXPERIMENTAL FUNCTIONS
+ ****************************************************************************************
+ * The definitions in the following section are considered experimental.
+ * They are provided for advanced scenarios.
+ * They should never be used with a dynamic library, as prototypes may change in the future.
+ * Use them only in association with static linking.
+ * ***************************************************************************************/
+
+#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
+#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
 
 /****************************************************************************************
  *   experimental API (static linking only)
@@ -962,7 +1046,7 @@
 #define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27   /* by default, the streaming decoder will refuse any frame
                                            * requiring larger than (1<<ZSTD_WINDOWLOG_LIMIT_DEFAULT) window size,
                                            * to preserve host's memory from unreasonable requirements.
-                                           * This limit can be overriden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
+                                           * This limit can be overridden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
                                            * The limit does not apply for one-pass decoders (such as ZSTD_decompress()), since no additional memory is allocated */
 
 
@@ -976,6 +1060,10 @@
 #define ZSTD_LDM_HASHRATELOG_MIN     0
 #define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
 
+/* Advanced parameter bounds */
+#define ZSTD_TARGETCBLOCKSIZE_MIN   64
+#define ZSTD_TARGETCBLOCKSIZE_MAX   ZSTD_BLOCKSIZE_MAX
+
 /* internal */
 #define ZSTD_HASHLOG3_MAX           17
 
@@ -1064,15 +1152,24 @@
     ZSTD_dictForceCopy     = 2, /* Always copy the dictionary. */
 } ZSTD_dictAttachPref_e;
 
+typedef enum {
+  ZSTD_lcm_auto = 0,          /**< Automatically determine the compression mode based on the compression level.
+                               *   Negative compression levels will be uncompressed, and positive compression
+                               *   levels will be compressed. */
+  ZSTD_lcm_huffman = 1,       /**< Always attempt Huffman compression. Uncompressed literals will still be
+                               *   emitted if Huffman compression is not profitable. */
+  ZSTD_lcm_uncompressed = 2,  /**< Always emit uncompressed literals. */
+} ZSTD_literalCompressionMode_e;
+
 
 /***************************************
 *  Frame size functions
 ***************************************/
 
 /*! ZSTD_findDecompressedSize() :
- *  `src` should point the start of a series of ZSTD encoded and/or skippable frames
+ *  `src` should point to the start of a series of ZSTD encoded and/or skippable frames
  *  `srcSize` must be the _exact_ size of this series
- *       (i.e. there should be a frame boundary exactly at `srcSize` bytes after `src`)
+ *       (i.e. there should be a frame boundary at `src + srcSize`)
  *  @return : - decompressed size of all data in all successive frames
  *            - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
  *            - if an error occurred: ZSTD_CONTENTSIZE_ERROR
@@ -1092,6 +1189,21 @@
  *            however it does mean that all frame data must be present and valid. */
 ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
 
+/*! ZSTD_decompressBound() :
+ *  `src` should point to the start of a series of ZSTD encoded and/or skippable frames
+ *  `srcSize` must be the _exact_ size of this series
+ *       (i.e. there should be a frame boundary at `src + srcSize`)
+ *  @return : - upper-bound for the decompressed size of all data in all successive frames
+ *            - if an error occured: ZSTD_CONTENTSIZE_ERROR
+ *
+ *  note 1  : an error can occur if `src` contains an invalid or incorrectly formatted frame.
+ *  note 2  : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.
+ *            in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.
+ *  note 3  : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:
+ *              upper-bound = # blocks * min(128 KB, Window_Size)
+ */
+ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
+
 /*! ZSTD_frameHeaderSize() :
  *  srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
  * @return : size of the Frame Header,
@@ -1110,7 +1222,7 @@
  *  It will also consider src size to be arbitrarily "large", which is worst case.
  *  If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
  *  ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
- *  ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
+ *  ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
  *  Note : CCtx size estimation is only correct for single-threaded compression. */
 ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
 ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
@@ -1122,7 +1234,7 @@
  *  It will also consider src size to be arbitrarily "large", which is worst case.
  *  If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
  *  ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
- *  ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
+ *  ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
  *  Note : CStream size estimation is only correct for single-threaded compression.
  *  ZSTD_DStream memory budget depends on window Size.
  *  This information can be passed manually, using ZSTD_estimateDStreamSize,
@@ -1226,22 +1338,26 @@
 ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
 
 /*! ZSTD_getCParams() :
-*   @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
-*   `estimatedSrcSize` value is optional, select 0 if not known */
+ * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
+ * `estimatedSrcSize` value is optional, select 0 if not known */
 ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
 
 /*! ZSTD_getParams() :
-*   same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
-*   All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
+ *  same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
+ *  All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
 ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
 
 /*! ZSTD_checkCParams() :
-*   Ensure param values remain within authorized range */
+ *  Ensure param values remain within authorized range.
+ * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
 ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
 
 /*! ZSTD_adjustCParams() :
  *  optimize params for a given `srcSize` and `dictSize`.
- *  both values are optional, select `0` if unknown. */
+ * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.
+ * `dictSize` must be `0` when there is no dictionary.
+ *  cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
+ *  This function never fails (wide contract) */
 ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
 
 /*! ZSTD_compress_advanced() :
@@ -1314,6 +1430,17 @@
  * See the comments on that enum for an explanation of the feature. */
 #define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
 
+/* Controls how the literals are compressed (default is auto).
+ * The value must be of type ZSTD_literalCompressionMode_e.
+ * See ZSTD_literalCompressionMode_t enum definition for details.
+ */
+#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
+
+/* Tries to fit compressed block size to be around targetCBlockSize.
+ * No target when targetCBlockSize == 0.
+ * There is no guarantee on compressed block size (default:0) */
+#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
+
 /*! ZSTD_CCtx_getParameter() :
  *  Get the requested compression parameter value, selected by enum ZSTD_cParameter,
  *  and store it into int* value.
@@ -1325,10 +1452,10 @@
 /*! ZSTD_CCtx_params :
  *  Quick howto :
  *  - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
- *  - ZSTD_CCtxParam_setParameter() : Push parameters one by one into
- *                                    an existing ZSTD_CCtx_params structure.
- *                                    This is similar to
- *                                    ZSTD_CCtx_setParameter().
+ *  - ZSTD_CCtxParams_setParameter() : Push parameters one by one into
+ *                                     an existing ZSTD_CCtx_params structure.
+ *                                     This is similar to
+ *                                     ZSTD_CCtx_setParameter().
  *  - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
  *                                    an existing CCtx.
  *                                    These parameters will be applied to
@@ -1359,20 +1486,20 @@
  */
 ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
 
-/*! ZSTD_CCtxParam_setParameter() :
+/*! ZSTD_CCtxParams_setParameter() :
  *  Similar to ZSTD_CCtx_setParameter.
  *  Set one compression parameter, selected by enum ZSTD_cParameter.
  *  Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams().
  * @result : 0, or an error code (which can be tested with ZSTD_isError()).
  */
-ZSTDLIB_API size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
+ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
 
-/*! ZSTD_CCtxParam_getParameter() :
+/*! ZSTD_CCtxParams_getParameter() :
  * Similar to ZSTD_CCtx_getParameter.
  * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
  * @result : 0, or an error code (which can be tested with ZSTD_isError()).
  */
-ZSTDLIB_API size_t ZSTD_CCtxParam_getParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
+ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
 
 /*! ZSTD_CCtx_setParametersUsingCCtxParams() :
  *  Apply a set of ZSTD_CCtx_params to the compression context.
@@ -1415,31 +1542,6 @@
  *  it must remain read accessible throughout the lifetime of DDict */
 ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
 
-
-/*! ZSTD_getDictID_fromDict() :
- *  Provides the dictID stored within dictionary.
- *  if @return == 0, the dictionary is not conformant with Zstandard specification.
- *  It can still be loaded, but as a content-only dictionary. */
-ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
-
-/*! ZSTD_getDictID_fromDDict() :
- *  Provides the dictID of the dictionary loaded into `ddict`.
- *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
- *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
-ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
-
-/*! ZSTD_getDictID_fromFrame() :
- *  Provides the dictID required to decompressed the frame stored within `src`.
- *  If @return == 0, the dictID could not be decoded.
- *  This could for one of the following reasons :
- *  - The frame does not require a dictionary to be decoded (most common case).
- *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
- *    Note : this use case also happens when using a non-conformant dictionary.
- *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
- *  - This is not a Zstandard frame.
- *  When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
-ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
-
 /*! ZSTD_DCtx_loadDictionary_byReference() :
  *  Same as ZSTD_DCtx_loadDictionary(),
  *  but references `dict` content instead of copying it into `dctx`.
@@ -1501,14 +1603,68 @@
 ********************************************************************/
 
 /*=====   Advanced Streaming compression functions  =====*/
-ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);   /**< pledgedSrcSize must be correct. If it is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, "0" also disables frame content size field. It may be enabled in the future. */
-ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< creates of an internal CDict (incompatible with static CCtx), except if dict == NULL or dictSize < 8, in which case no dict is used. Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.*/
+/**! ZSTD_initCStream_srcSize() :
+ * This function is deprecated, and equivalent to:
+ *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
+ *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *
+ * pledgedSrcSize must be correct. If it is not known at init time, use
+ * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
+ * "0" also disables frame content size field. It may be enabled in the future.
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);
+/**! ZSTD_initCStream_usingDict() :
+ * This function is deprecated, and is equivalent to:
+ *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
+ *
+ * Creates of an internal CDict (incompatible with static CCtx), except if
+ * dict == NULL or dictSize < 8, in which case no dict is used.
+ * Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if
+ * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel);
+/**! ZSTD_initCStream_advanced() :
+ * This function is deprecated, and is approximately equivalent to:
+ *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ *     ZSTD_CCtx_setZstdParams(zcs, params); // Set the zstd params and leave the rest as-is
+ *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
+ *
+ * pledgedSrcSize must be correct. If srcSize is not known at init time, use
+ * value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy.
+ */
 ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
-                                             ZSTD_parameters params, unsigned long long pledgedSrcSize);  /**< pledgedSrcSize must be correct. If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy. */
-ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);  /**< note : cdict will just be referenced, and must outlive compression session */
-ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize);  /**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters. pledgedSrcSize must be correct. If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. */
+                                             ZSTD_parameters params, unsigned long long pledgedSrcSize);
+/**! ZSTD_initCStream_usingCDict() :
+ * This function is deprecated, and equivalent to:
+ *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ *     ZSTD_CCtx_refCDict(zcs, cdict);
+ *
+ * note : cdict will just be referenced, and must outlive compression session
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
+/**! ZSTD_initCStream_usingCDict_advanced() :
+ * This function is deprecated, and is approximately equivalent to:
+ *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ *     ZSTD_CCtx_setZstdFrameParams(zcs, fParams); // Set the zstd frame params and leave the rest as-is
+ *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *     ZSTD_CCtx_refCDict(zcs, cdict);
+ *
+ * same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
+ * pledgedSrcSize must be correct. If srcSize is not known at init time, use
+ * value ZSTD_CONTENTSIZE_UNKNOWN.
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize);
 
 /*! ZSTD_resetCStream() :
+ * This function is deprecated, and is equivalent to:
+ *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *
  *  start a new frame, using same parameters from previous frame.
  *  This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
  *  Note that zcs must be init at least once before using ZSTD_resetCStream().
@@ -1555,9 +1711,32 @@
 
 
 /*=====   Advanced Streaming decompression functions  =====*/
-ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: no dictionary will be used if dict == NULL or dictSize < 8 */
-ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);  /**< note : ddict is referenced, it must outlive decompression session */
-ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);  /**< re-use decompression parameters from previous init; saves dictionary loading */
+/**
+ * This function is deprecated, and is equivalent to:
+ *
+ *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ *     ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
+ *
+ * note: no dictionary will be used if dict == NULL or dictSize < 8
+ */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
+/**
+ * This function is deprecated, and is equivalent to:
+ *
+ *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ *     ZSTD_DCtx_refDDict(zds, ddict);
+ *
+ * note : ddict is referenced, it must outlive decompression session
+ */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
+/**
+ * This function is deprecated, and is equivalent to:
+ *
+ *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ *
+ * re-use decompression parameters from previous init; saves dictionary loading
+ */
+ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
 
 
 /*********************************************************************
@@ -1696,7 +1875,7 @@
     unsigned checksumFlag;
 } ZSTD_frameHeader;
 
-/** ZSTD_getFrameHeader() :
+/*! ZSTD_getFrameHeader() :
  *  decode Frame Header, or requires larger `srcSize`.
  * @return : 0, `zfhPtr` is correctly filled,
  *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
@@ -1730,7 +1909,7 @@
 /*!
     Block functions produce and decode raw zstd blocks, without frame metadata.
     Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
-    User will have to take in charge required information to regenerate data, such as compressed and content sizes.
+    But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
 
     A few rules to respect :
     - Compressing and decompressing require a context structure
@@ -1741,12 +1920,14 @@
       + copyCCtx() and copyDCtx() can be used too
     - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
       + If input is larger than a block size, it's necessary to split input data into multiple blocks
-      + For inputs larger than a single block, really consider using regular ZSTD_compress() instead.
-        Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
-    - When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero.
-      In which case, nothing is produced into `dst` !
-      + User must test for such outcome and deal directly with uncompressed data
-      + ZSTD_decompressBlock() doesn't accept uncompressed data as input !!!
+      + For inputs larger than a single block, consider using regular ZSTD_compress() instead.
+        Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
+    - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
+      ===> In which case, nothing is produced into `dst` !
+      + User __must__ test for such outcome and deal directly with uncompressed data
+      + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
+        Doing so would mess up with statistics history, leading to potential data corruption.
+      + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
       + In case of multiple successive blocks, should some of them be uncompressed,
         decoder must be informed of their existence in order to follow proper history.
         Use ZSTD_insertBlock() for such a case.
--- a/contrib/python3-ratchet.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python3-ratchet.py	Mon Oct 21 11:09:48 2019 -0400
@@ -25,65 +25,103 @@
 import sys
 
 _hgenv = dict(os.environ)
-_hgenv.update({
-    'HGPLAIN': '1',
-    })
+_hgenv.update(
+    {'HGPLAIN': '1',}
+)
 
 _HG_FIRST_CHANGE = '9117c6561b0bd7792fa13b50d28239d51b78e51f'
 
+
 def _runhg(*args):
     return subprocess.check_output(args, env=_hgenv)
 
+
 def _is_hg_repo(path):
-    return _runhg('hg', 'log', '-R', path,
-                  '-r0', '--template={node}').strip() == _HG_FIRST_CHANGE
+    return (
+        _runhg('hg', 'log', '-R', path, '-r0', '--template={node}').strip()
+        == _HG_FIRST_CHANGE
+    )
+
 
 def _py3default():
     if sys.version_info[0] >= 3:
         return sys.executable
     return 'python3'
 
+
 def main(argv=()):
     p = argparse.ArgumentParser()
-    p.add_argument('--working-tests',
-                   help='List of tests that already work in Python 3.')
-    p.add_argument('--commit-to-repo',
-                   help='If set, commit newly fixed tests to the given repo')
-    p.add_argument('-j', default=os.sysconf(r'SC_NPROCESSORS_ONLN'), type=int,
-                   help='Number of parallel tests to run.')
-    p.add_argument('--python3', default=_py3default(),
-                   help='python3 interpreter to use for test run')
-    p.add_argument('--commit-user',
-                   default='python3-ratchet@mercurial-scm.org',
-                   help='Username to specify when committing to a repo.')
+    p.add_argument(
+        '--working-tests', help='List of tests that already work in Python 3.'
+    )
+    p.add_argument(
+        '--commit-to-repo',
+        help='If set, commit newly fixed tests to the given repo',
+    )
+    p.add_argument(
+        '-j',
+        default=os.sysconf(r'SC_NPROCESSORS_ONLN'),
+        type=int,
+        help='Number of parallel tests to run.',
+    )
+    p.add_argument(
+        '--python3',
+        default=_py3default(),
+        help='python3 interpreter to use for test run',
+    )
+    p.add_argument(
+        '--commit-user',
+        default='python3-ratchet@mercurial-scm.org',
+        help='Username to specify when committing to a repo.',
+    )
     opts = p.parse_args(argv)
     if opts.commit_to_repo:
         if not _is_hg_repo(opts.commit_to_repo):
             print('abort: specified repository is not the hg repository')
             sys.exit(1)
     if not opts.working_tests or not os.path.isfile(opts.working_tests):
-        print('abort: --working-tests must exist and be a file (got %r)' %
-              opts.working_tests)
+        print(
+            'abort: --working-tests must exist and be a file (got %r)'
+            % opts.working_tests
+        )
         sys.exit(1)
     elif opts.commit_to_repo:
         root = _runhg('hg', 'root').strip()
         if not opts.working_tests.startswith(root):
-            print('abort: if --commit-to-repo is given, '
-                  '--working-tests must be from that repo')
+            print(
+                'abort: if --commit-to-repo is given, '
+                '--working-tests must be from that repo'
+            )
             sys.exit(1)
     try:
-        subprocess.check_call([opts.python3, '-c',
-                               'import sys ; '
-                               'assert ((3, 5) <= sys.version_info < (3, 6) '
-                               'or sys.version_info >= (3, 6, 2))'])
+        subprocess.check_call(
+            [
+                opts.python3,
+                '-c',
+                'import sys ; '
+                'assert ((3, 5) <= sys.version_info < (3, 6) '
+                'or sys.version_info >= (3, 6, 2))',
+            ]
+        )
     except subprocess.CalledProcessError:
-        print('warning: Python 3.6.0 and 3.6.1 have '
-              'a bug which breaks Mercurial')
+        print(
+            'warning: Python 3.6.0 and 3.6.1 have '
+            'a bug which breaks Mercurial'
+        )
         print('(see https://bugs.python.org/issue29714 for details)')
         sys.exit(1)
 
-    rt = subprocess.Popen([opts.python3, 'run-tests.py', '-j', str(opts.j),
-                           '--blacklist', opts.working_tests, '--json'])
+    rt = subprocess.Popen(
+        [
+            opts.python3,
+            'run-tests.py',
+            '-j',
+            str(opts.j),
+            '--blacklist',
+            opts.working_tests,
+            '--json',
+        ]
+    )
     rt.wait()
     with open('report.json') as f:
         data = f.read()
@@ -104,12 +142,20 @@
             with open(opts.working_tests, 'w') as f:
                 for p in sorted(oldpass | newpass):
                     f.write('%s\n' % p)
-            _runhg('hg', 'commit', '-R', opts.commit_to_repo,
-                   '--user', opts.commit_user,
-                   '--message', 'python3: expand list of passing tests')
+            _runhg(
+                'hg',
+                'commit',
+                '-R',
+                opts.commit_to_repo,
+                '--user',
+                opts.commit_user,
+                '--message',
+                'python3: expand list of passing tests',
+            )
         else:
             print('Newly passing tests:', '\n'.join(sorted(newpass)))
             sys.exit(2)
 
+
 if __name__ == '__main__':
     main(sys.argv[1:])
--- a/contrib/python3-whitelist	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/python3-whitelist	Mon Oct 21 11:09:48 2019 -0400
@@ -124,6 +124,7 @@
 test-convert-hg-sink.t
 test-convert-hg-source.t
 test-convert-hg-startrev.t
+test-convert-identity.t
 test-convert-mtn.t
 test-convert-splicemap.t
 test-convert-svn-sink.t
@@ -295,6 +296,7 @@
 test-hgwebdir-paths.py
 test-hgwebdir.t
 test-hgwebdirsym.t
+test-highlight.t
 test-histedit-arguments.t
 test-histedit-base.t
 test-histedit-bookmark-motion.t
--- a/contrib/revsetbenchmarks.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/revsetbenchmarks.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,9 +16,20 @@
 import subprocess
 import sys
 
-DEFAULTVARIANTS = ['plain', 'min', 'max', 'first', 'last',
-                   'reverse', 'reverse+first', 'reverse+last',
-                   'sort', 'sort+first', 'sort+last']
+DEFAULTVARIANTS = [
+    'plain',
+    'min',
+    'max',
+    'first',
+    'last',
+    'reverse',
+    'reverse+first',
+    'reverse+last',
+    'sort',
+    'sort+first',
+    'sort+last',
+]
+
 
 def check_output(*args, **kwargs):
     kwargs.setdefault('stderr', subprocess.PIPE)
@@ -29,14 +40,16 @@
         raise subprocess.CalledProcessError(proc.returncode, ' '.join(args[0]))
     return output
 
+
 def update(rev):
     """update the repo to a revision"""
     try:
         subprocess.check_call(['hg', 'update', '--quiet', '--check', str(rev)])
-        check_output(['make', 'local'],
-                     stderr=None)  # suppress output except for error/warning
+        check_output(
+            ['make', 'local'], stderr=None
+        )  # suppress output except for error/warning
     except subprocess.CalledProcessError as exc:
-        print('update to revision %s failed, aborting'%rev, file=sys.stderr)
+        print('update to revision %s failed, aborting' % rev, file=sys.stderr)
         sys.exit(exc.returncode)
 
 
@@ -48,11 +61,14 @@
     fullcmd = ['./hg']
     if repo is not None:
         fullcmd += ['-R', repo]
-    fullcmd += ['--config',
-                'extensions.perf=' + os.path.join(contribdir, 'perf.py')]
+    fullcmd += [
+        '--config',
+        'extensions.perf=' + os.path.join(contribdir, 'perf.py'),
+    ]
     fullcmd += cmd
     return check_output(fullcmd, stderr=subprocess.STDOUT)
 
+
 def perf(revset, target=None, contexts=False):
     """run benchmark for this very revset"""
     try:
@@ -64,15 +80,21 @@
         output = hg(args, repo=target)
         return parseoutput(output)
     except subprocess.CalledProcessError as exc:
-        print('abort: cannot run revset benchmark: %s'%exc.cmd, file=sys.stderr)
-        if getattr(exc, 'output', None) is None: # no output before 2.7
+        print(
+            'abort: cannot run revset benchmark: %s' % exc.cmd, file=sys.stderr
+        )
+        if getattr(exc, 'output', None) is None:  # no output before 2.7
             print('(no output)', file=sys.stderr)
         else:
             print(exc.output, file=sys.stderr)
         return None
 
-outputre = re.compile(br'! wall (\d+.\d+) comb (\d+.\d+) user (\d+.\d+) '
-                      br'sys (\d+.\d+) \(best of (\d+)\)')
+
+outputre = re.compile(
+    br'! wall (\d+.\d+) comb (\d+.\d+) user (\d+.\d+) '
+    br'sys (\d+.\d+) \(best of (\d+)\)'
+)
+
 
 def parseoutput(output):
     """parse a textual output into a dict
@@ -85,20 +107,30 @@
         print('abort: invalid output:', file=sys.stderr)
         print(output, file=sys.stderr)
         sys.exit(1)
-    return {'comb': float(match.group(2)),
-            'count': int(match.group(5)),
-            'sys': float(match.group(3)),
-            'user': float(match.group(4)),
-            'wall': float(match.group(1)),
-            }
+    return {
+        'comb': float(match.group(2)),
+        'count': int(match.group(5)),
+        'sys': float(match.group(3)),
+        'user': float(match.group(4)),
+        'wall': float(match.group(1)),
+    }
+
 
 def printrevision(rev):
     """print data about a revision"""
     sys.stdout.write("Revision ")
     sys.stdout.flush()
-    subprocess.check_call(['hg', 'log', '--rev', str(rev), '--template',
-                           '{if(tags, " ({tags})")} '
-                           '{rev}:{node|short}: {desc|firstline}\n'])
+    subprocess.check_call(
+        [
+            'hg',
+            'log',
+            '--rev',
+            str(rev),
+            '--template',
+            '{if(tags, " ({tags})")} ' '{rev}:{node|short}: {desc|firstline}\n',
+        ]
+    )
+
 
 def idxwidth(nbidx):
     """return the max width of number used for index
@@ -107,7 +139,7 @@
     because we start with zero and we'd rather not deal with all the
     extra rounding business that log10 would imply.
     """
-    nbidx -= 1 # starts at 0
+    nbidx -= 1  # starts at 0
     idxwidth = 0
     while nbidx:
         idxwidth += 1
@@ -116,6 +148,7 @@
         idxwidth = 1
     return idxwidth
 
+
 def getfactor(main, other, field, sensitivity=0.05):
     """return the relative factor between values for 'field' in main and other
 
@@ -125,10 +158,11 @@
     if main is not None:
         factor = other[field] / main[field]
     low, high = 1 - sensitivity, 1 + sensitivity
-    if (low < factor < high):
+    if low < factor < high:
         return None
     return factor
 
+
 def formatfactor(factor):
     """format a factor into a 4 char string
 
@@ -155,15 +189,19 @@
             factor //= 0
         return 'x%ix%i' % (factor, order)
 
+
 def formattiming(value):
     """format a value to strictly 8 char, dropping some precision if needed"""
-    if value < 10**7:
+    if value < 10 ** 7:
         return ('%.6f' % value)[:8]
     else:
         # value is HUGE very unlikely to happen (4+ month run)
         return '%i' % value
 
+
 _marker = object()
+
+
 def printresult(variants, idx, data, maxidx, verbose=False, reference=_marker):
     """print a line of result to stdout"""
     mask = '%%0%ii) %%s' % idxwidth(maxidx)
@@ -184,9 +222,10 @@
             out.append(formattiming(data[var]['comb']))
             out.append(formattiming(data[var]['user']))
             out.append(formattiming(data[var]['sys']))
-            out.append('%6d'    % data[var]['count'])
+            out.append('%6d' % data[var]['count'])
     print(mask % (idx, ' '.join(out)))
 
+
 def printheader(variants, maxidx, verbose=False, relative=False):
     header = [' ' * (idxwidth(maxidx) + 1)]
     for var in variants:
@@ -204,12 +243,13 @@
             header.append('%6s' % 'count')
     print(' '.join(header))
 
+
 def getrevs(spec):
     """get the list of rev matched by a revset"""
     try:
         out = check_output(['hg', 'log', '--template={rev}\n', '--rev', spec])
     except subprocess.CalledProcessError as exc:
-        print("abort, can't get revision from %s"%spec, file=sys.stderr)
+        print("abort, can't get revision from %s" % spec, file=sys.stderr)
         sys.exit(exc.returncode)
     return [r for r in out.split() if r]
 
@@ -221,31 +261,44 @@
         revset = '%s(%s)' % (var, revset)
     return revset
 
-helptext="""This script will run multiple variants of provided revsets using
+
+helptext = """This script will run multiple variants of provided revsets using
 different revisions in your mercurial repository. After the benchmark are run
 summary output is provided. Use it to demonstrate speed improvements or pin
 point regressions. Revsets to run are specified in a file (or from stdin), one
 revsets per line. Line starting with '#' will be ignored, allowing insertion of
 comments."""
-parser = optparse.OptionParser(usage="usage: %prog [options] <revs>",
-                               description=helptext)
-parser.add_option("-f", "--file",
-                  help="read revset from FILE (stdin if omitted)",
-                  metavar="FILE")
-parser.add_option("-R", "--repo",
-                  help="run benchmark on REPO", metavar="REPO")
+parser = optparse.OptionParser(
+    usage="usage: %prog [options] <revs>", description=helptext
+)
+parser.add_option(
+    "-f",
+    "--file",
+    help="read revset from FILE (stdin if omitted)",
+    metavar="FILE",
+)
+parser.add_option("-R", "--repo", help="run benchmark on REPO", metavar="REPO")
 
-parser.add_option("-v", "--verbose",
-                  action='store_true',
-                  help="display all timing data (not just best total time)")
+parser.add_option(
+    "-v",
+    "--verbose",
+    action='store_true',
+    help="display all timing data (not just best total time)",
+)
 
-parser.add_option("", "--variants",
-                  default=','.join(DEFAULTVARIANTS),
-                  help="comma separated list of variant to test "
-                       "(eg: plain,min,sorted) (plain = no modification)")
-parser.add_option('', '--contexts',
-                  action='store_true',
-                  help='obtain changectx from results instead of integer revs')
+parser.add_option(
+    "",
+    "--variants",
+    default=','.join(DEFAULTVARIANTS),
+    help="comma separated list of variant to test "
+    "(eg: plain,min,sorted) (plain = no modification)",
+)
+parser.add_option(
+    '',
+    '--contexts',
+    action='store_true',
+    help='obtain changectx from results instead of integer revs',
+)
 
 (options, args) = parser.parse_args()
 
@@ -294,17 +347,20 @@
             data = perf(varrset, target=options.repo, contexts=options.contexts)
             varres[var] = data
         res.append(varres)
-        printresult(variants, idx, varres, len(revsets),
-                    verbose=options.verbose)
+        printresult(
+            variants, idx, varres, len(revsets), verbose=options.verbose
+        )
         sys.stdout.flush()
     print("----------------------------")
 
 
-print("""
+print(
+    """
 
 Result by revset
 ================
-""")
+"""
+)
 
 print('Revision:')
 for idx, rev in enumerate(revs):
@@ -321,7 +377,13 @@
     printheader(variants, len(results), verbose=options.verbose, relative=True)
     ref = None
     for idx, data in enumerate(results):
-        printresult(variants, idx, data[ridx], len(results),
-                    verbose=options.verbose, reference=ref)
+        printresult(
+            variants,
+            idx,
+            data[ridx],
+            len(results),
+            verbose=options.verbose,
+            reference=ref,
+        )
         ref = data[ridx]
     print()
--- a/contrib/showstack.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/showstack.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,16 +9,19 @@
 import sys
 import traceback
 
+
 def sigshow(*args):
     sys.stderr.write("\n")
     traceback.print_stack(args[1], limit=10, file=sys.stderr)
     sys.stderr.write("----\n")
 
+
 def sigexit(*args):
     sigshow(*args)
     print('alarm!')
     sys.exit(1)
 
+
 def extsetup(ui):
     signal.signal(signal.SIGQUIT, sigshow)
     signal.signal(signal.SIGALRM, sigexit)
--- a/contrib/synthrepo.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/synthrepo.py	Mon Oct 21 11:09:48 2019 -0400
@@ -58,12 +58,11 @@
     error,
     hg,
     patch,
+    pycompat,
     registrar,
     scmutil,
 )
-from mercurial.utils import (
-    dateutil,
-)
+from mercurial.utils import dateutil
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
@@ -76,14 +75,17 @@
 
 newfile = {'new fi', 'rename', 'copy f', 'copy t'}
 
+
 def zerodict():
     return collections.defaultdict(lambda: 0)
 
+
 def roundto(x, k):
     if x > k * 2:
         return int(round(x / float(k)) * k)
     return int(round(x))
 
+
 def parsegitdiff(lines):
     filename, mar, lineadd, lineremove = None, None, zerodict(), 0
     binary = False
@@ -109,10 +111,16 @@
     if filename:
         yield filename, mar, lineadd, lineremove, binary
 
-@command('analyze',
-         [('o', 'output', '', _('write output to given file'), _('FILE')),
-          ('r', 'rev', [], _('analyze specified revisions'), _('REV'))],
-         _('hg analyze'), optionalrepo=True)
+
+@command(
+    'analyze',
+    [
+        ('o', 'output', '', _('write output to given file'), _('FILE')),
+        ('r', 'rev', [], _('analyze specified revisions'), _('REV')),
+    ],
+    _('hg analyze'),
+    optionalrepo=True,
+)
 def analyze(ui, repo, *revs, **opts):
     '''create a simple model of a repository to use for later synthesis
 
@@ -175,8 +183,9 @@
         revs = scmutil.revrange(repo, revs)
         revs.sort()
 
-        progress = ui.makeprogress(_('analyzing'), unit=_('changesets'),
-                                   total=len(revs))
+        progress = ui.makeprogress(
+            _('analyzing'), unit=_('changesets'), total=len(revs)
+        )
         for i, rev in enumerate(revs):
             progress.update(i)
             ctx = repo[rev]
@@ -197,17 +206,19 @@
                 timedelta = ctx.date()[0] - lastctx.date()[0]
                 interarrival[roundto(timedelta, 300)] += 1
             diffopts = diffutil.diffallopts(ui, {'git': True})
-            diff = sum((d.splitlines()
-                       for d in ctx.diff(pctx, opts=diffopts)), [])
+            diff = sum(
+                (d.splitlines() for d in ctx.diff(pctx, opts=diffopts)), []
+            )
             fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0
             for filename, mar, lineadd, lineremove, isbin in parsegitdiff(diff):
                 if isbin:
                     continue
-                added = sum(lineadd.itervalues(), 0)
+                added = sum(pycompat.itervalues(lineadd), 0)
                 if mar == 'm':
                     if added and lineremove:
-                        lineschanged[roundto(added, 5),
-                                     roundto(lineremove, 5)] += 1
+                        lineschanged[
+                            roundto(added, 5), roundto(lineremove, 5)
+                        ] += 1
                         filechanges += 1
                 elif mar == 'a':
                     fileadds += 1
@@ -237,30 +248,38 @@
     def pronk(d):
         return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)
 
-    json.dump({'revs': len(revs),
-               'initdirs': pronk(dirs),
-               'lineschanged': pronk(lineschanged),
-               'children': pronk(invchildren),
-               'fileschanged': pronk(fileschanged),
-               'filesadded': pronk(filesadded),
-               'linesinfilesadded': pronk(linesinfilesadded),
-               'dirsadded': pronk(dirsadded),
-               'filesremoved': pronk(filesremoved),
-               'linelengths': pronk(linelengths),
-               'parents': pronk(parents),
-               'p1distance': pronk(p1distance),
-               'p2distance': pronk(p2distance),
-               'interarrival': pronk(interarrival),
-               'tzoffset': pronk(tzoffset),
-               },
-              fp)
+    json.dump(
+        {
+            'revs': len(revs),
+            'initdirs': pronk(dirs),
+            'lineschanged': pronk(lineschanged),
+            'children': pronk(invchildren),
+            'fileschanged': pronk(fileschanged),
+            'filesadded': pronk(filesadded),
+            'linesinfilesadded': pronk(linesinfilesadded),
+            'dirsadded': pronk(dirsadded),
+            'filesremoved': pronk(filesremoved),
+            'linelengths': pronk(linelengths),
+            'parents': pronk(parents),
+            'p1distance': pronk(p1distance),
+            'p2distance': pronk(p2distance),
+            'interarrival': pronk(interarrival),
+            'tzoffset': pronk(tzoffset),
+        },
+        fp,
+    )
     fp.close()
 
-@command('synthesize',
-         [('c', 'count', 0, _('create given number of commits'), _('COUNT')),
-          ('', 'dict', '', _('path to a dictionary of words'), _('FILE')),
-          ('', 'initfiles', 0, _('initial file count to create'), _('COUNT'))],
-         _('hg synthesize [OPTION].. DESCFILE'))
+
+@command(
+    'synthesize',
+    [
+        ('c', 'count', 0, _('create given number of commits'), _('COUNT')),
+        ('', 'dict', '', _('path to a dictionary of words'), _('FILE')),
+        ('', 'initfiles', 0, _('initial file count to create'), _('COUNT')),
+    ],
+    _('hg synthesize [OPTION].. DESCFILE'),
+)
 def synthesize(ui, repo, descpath, **opts):
     '''synthesize commits based on a model of an existing repository
 
@@ -365,7 +384,7 @@
             return True
 
         progress = ui.makeprogress(_synthesizing, unit=_files, total=initcount)
-        for i in xrange(0, initcount):
+        for i in pycompat.xrange(0, initcount):
             progress.update(i)
 
             path = pickpath()
@@ -383,22 +402,29 @@
 
         progress.complete()
         message = 'synthesized wide repo with %d files' % (len(files),)
-        mc = context.memctx(repo, [pctx.node(), nullid], message,
-                            files, filectxfn, ui.username(),
-                            '%d %d' % dateutil.makedate())
+        mc = context.memctx(
+            repo,
+            [pctx.node(), nullid],
+            message,
+            files,
+            filectxfn,
+            ui.username(),
+            '%d %d' % dateutil.makedate(),
+        )
         initnode = mc.commit()
         if ui.debugflag:
             hexfn = hex
         else:
             hexfn = short
-        ui.status(_('added commit %s with %d files\n')
-                  % (hexfn(initnode), len(files)))
+        ui.status(
+            _('added commit %s with %d files\n') % (hexfn(initnode), len(files))
+        )
 
     # Synthesize incremental revisions to the repository, adding repo depth.
     count = int(opts['count'])
     heads = set(map(repo.changelog.rev, repo.heads()))
     progress = ui.makeprogress(_synthesizing, unit=_changesets, total=count)
-    for i in xrange(count):
+    for i in pycompat.xrange(count):
         progress.update(i)
 
         node = repo.changelog.node
@@ -432,32 +458,35 @@
         mfk = mf.keys()
         changes = {}
         if mfk:
-            for __ in xrange(pick(fileschanged)):
-                for __ in xrange(10):
+            for __ in pycompat.xrange(pick(fileschanged)):
+                for __ in pycompat.xrange(10):
                     fctx = pctx.filectx(random.choice(mfk))
                     path = fctx.path()
-                    if not (path in nevertouch or fctx.isbinary() or
-                            'l' in fctx.flags()):
+                    if not (
+                        path in nevertouch
+                        or fctx.isbinary()
+                        or 'l' in fctx.flags()
+                    ):
                         break
                 lines = fctx.data().splitlines()
                 add, remove = pick(lineschanged)
-                for __ in xrange(remove):
+                for __ in pycompat.xrange(remove):
                     if not lines:
                         break
                     del lines[random.randrange(0, len(lines))]
-                for __ in xrange(add):
+                for __ in pycompat.xrange(add):
                     lines.insert(random.randint(0, len(lines)), makeline())
                 path = fctx.path()
                 changes[path] = '\n'.join(lines) + '\n'
-            for __ in xrange(pick(filesremoved)):
-                for __ in xrange(10):
+            for __ in pycompat.xrange(pick(filesremoved)):
+                for __ in pycompat.xrange(10):
                     path = random.choice(mfk)
                     if path not in changes:
                         break
         if filesadded:
             dirs = list(pctx.dirs())
             dirs.insert(0, '')
-        for __ in xrange(pick(filesadded)):
+        for __ in pycompat.xrange(pick(filesadded)):
             pathstr = ''
             while pathstr in dirs:
                 path = [random.choice(dirs)]
@@ -465,13 +494,20 @@
                     path.append(random.choice(words))
                 path.append(random.choice(words))
                 pathstr = '/'.join(filter(None, path))
-            data = '\n'.join(makeline()
-                             for __ in xrange(pick(linesinfilesadded))) + '\n'
+            data = (
+                '\n'.join(
+                    makeline()
+                    for __ in pycompat.xrange(pick(linesinfilesadded))
+                )
+                + '\n'
+            )
             changes[pathstr] = data
+
         def filectxfn(repo, memctx, path):
             if path not in changes:
                 return None
             return context.memfilectx(repo, memctx, path, changes[path])
+
         if not changes:
             continue
         if revs:
@@ -479,11 +515,17 @@
         else:
             date = time.time() - (86400 * count)
         # dates in mercurial must be positive, fit in 32-bit signed integers.
-        date = min(0x7fffffff, max(0, date))
+        date = min(0x7FFFFFFF, max(0, date))
         user = random.choice(words) + '@' + random.choice(words)
-        mc = context.memctx(repo, pl, makeline(minimum=2),
-                            sorted(changes),
-                            filectxfn, user, '%d %d' % (date, pick(tzoffset)))
+        mc = context.memctx(
+            repo,
+            pl,
+            makeline(minimum=2),
+            sorted(changes),
+            filectxfn,
+            user,
+            '%d %d' % (date, pick(tzoffset)),
+        )
         newnode = mc.commit()
         heads.add(repo.changelog.rev(newnode))
         heads.discard(r1)
@@ -493,10 +535,12 @@
     lock.release()
     wlock.release()
 
+
 def renamedirs(dirs, words):
     '''Randomly rename the directory names in the per-dir file count dict.'''
     wordgen = itertools.cycle(words)
     replacements = {'': ''}
+
     def rename(dirpath):
         '''Recursively rename the directory and all path prefixes.
 
@@ -514,6 +558,7 @@
         renamed = os.path.join(head, next(wordgen))
         replacements[dirpath] = renamed
         return renamed
+
     result = []
     for dirpath, count in dirs.iteritems():
         result.append([rename(dirpath.lstrip(os.sep)), count])
--- a/contrib/testparseutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/testparseutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,11 +14,13 @@
 ####################
 # for Python3 compatibility (almost comes from mercurial/pycompat.py)
 
-ispy3 = (sys.version_info[0] >= 3)
+ispy3 = sys.version_info[0] >= 3
+
 
 def identity(a):
     return a
 
+
 def _rapply(f, xs):
     if xs is None:
         # assume None means non-value of optional data
@@ -29,12 +31,14 @@
         return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
     return f(xs)
 
+
 def rapply(f, xs):
     if f is identity:
         # fast path mainly for py2
         return xs
     return _rapply(f, xs)
 
+
 if ispy3:
     import builtins
 
@@ -45,33 +49,41 @@
     def sysstr(s):
         if isinstance(s, builtins.str):
             return s
-        return s.decode(u'latin-1')
+        return s.decode('latin-1')
 
     def opentext(f):
         return open(f, 'r')
+
+
 else:
     bytestr = str
     sysstr = identity
 
     opentext = open
 
+
 def b2s(x):
     # convert BYTES elements in "x" to SYSSTR recursively
     return rapply(sysstr, x)
 
+
 def writeout(data):
     # write "data" in BYTES into stdout
     sys.stdout.write(data)
 
+
 def writeerr(data):
     # write "data" in BYTES into stderr
     sys.stderr.write(data)
 
+
 ####################
 
+
 class embeddedmatcher(object):
     """Base class to detect embedded code fragments in *.t test script
     """
+
     __metaclass__ = abc.ABCMeta
 
     def __init__(self, desc):
@@ -126,6 +138,7 @@
     def codeinside(self, ctx, line):
         """Return actual code at line inside embedded code"""
 
+
 def embedded(basefile, lines, errors, matchers):
     """pick embedded code fragments up from given lines
 
@@ -168,12 +181,12 @@
 
     """
     matcher = None
-    ctx = filename = code = startline = None # for pyflakes
+    ctx = filename = code = startline = None  # for pyflakes
 
     for lineno, line in enumerate(lines, 1):
         if not line.endswith('\n'):
-            line += '\n' # to normalize EOF line
-        if matcher: # now, inside embedded code
+            line += '\n'  # to normalize EOF line
+        if matcher:  # now, inside embedded code
             if matcher.endsat(ctx, line):
                 codeatend = matcher.codeatend(ctx, line)
                 if codeatend is not None:
@@ -185,8 +198,10 @@
             elif not matcher.isinside(ctx, line):
                 # this is an error of basefile
                 # (if matchers are implemented correctly)
-                errors.append('%s:%d: unexpected line for "%s"'
-                              % (basefile, lineno, matcher.desc))
+                errors.append(
+                    '%s:%d: unexpected line for "%s"'
+                    % (basefile, lineno, matcher.desc)
+                )
                 # stop extracting embedded code by current 'matcher',
                 # because appearance of unexpected line might mean
                 # that expected end-of-embedded-code line might never
@@ -208,10 +223,14 @@
         if matched:
             if len(matched) > 1:
                 # this is an error of matchers, maybe
-                errors.append('%s:%d: ambiguous line for %s' %
-                              (basefile, lineno,
-                               ', '.join(['"%s"' % m.desc
-                                           for m, c in matched])))
+                errors.append(
+                    '%s:%d: ambiguous line for %s'
+                    % (
+                        basefile,
+                        lineno,
+                        ', '.join(['"%s"' % m.desc for m, c in matched]),
+                    )
+                )
                 # omit extracting embedded code, because choosing
                 # arbitrary matcher from matched ones might fail to
                 # detect the end of embedded code as expected.
@@ -238,8 +257,11 @@
         else:
             # this is an error of basefile
             # (if matchers are implemented correctly)
-            errors.append('%s:%d: unexpected end of file for "%s"'
-                          % (basefile, lineno, matcher.desc))
+            errors.append(
+                '%s:%d: unexpected end of file for "%s"'
+                % (basefile, lineno, matcher.desc)
+            )
+
 
 # heredoc limit mark to ignore embedded code at check-code.py or so
 heredocignorelimit = 'NO_CHECK_EOF'
@@ -252,6 +274,7 @@
 # - << 'LIMITMARK'
 heredoclimitpat = r'\s*<<\s*(?P<lquote>["\']?)(?P<limit>\w+)(?P=lquote)'
 
+
 class fileheredocmatcher(embeddedmatcher):
     """Detect "cat > FILE << LIMIT" style embedded code
 
@@ -290,6 +313,7 @@
     >>> matcher.ignores(ctx)
     True
     """
+
     _prefix = '  > '
 
     def __init__(self, desc, namepat):
@@ -302,8 +326,9 @@
         # - > NAMEPAT
         # - > "NAMEPAT"
         # - > 'NAMEPAT'
-        namepat = (r'\s*>>?\s*(?P<nquote>["\']?)(?P<name>%s)(?P=nquote)'
-                   % namepat)
+        namepat = (
+            r'\s*>>?\s*(?P<nquote>["\']?)(?P<name>%s)(?P=nquote)' % namepat
+        )
         self._fileres = [
             # "cat > NAME << LIMIT" case
             re.compile(r'  \$ \s*cat' + namepat + heredoclimitpat),
@@ -316,8 +341,10 @@
         for filere in self._fileres:
             matched = filere.match(line)
             if matched:
-                return (matched.group('name'),
-                        '  > %s\n' % matched.group('limit'))
+                return (
+                    matched.group('name'),
+                    '  > %s\n' % matched.group('limit'),
+                )
 
     def endsat(self, ctx, line):
         return ctx[1] == line
@@ -332,17 +359,19 @@
         return ctx[0]
 
     def codeatstart(self, ctx, line):
-        return None # no embedded code at start line
+        return None  # no embedded code at start line
 
     def codeatend(self, ctx, line):
-        return None # no embedded code at end line
+        return None  # no embedded code at end line
 
     def codeinside(self, ctx, line):
-        return line[len(self._prefix):] # strip prefix
+        return line[len(self._prefix) :]  # strip prefix
+
 
 ####
 # for embedded python script
 
+
 class pydoctestmatcher(embeddedmatcher):
     """Detect ">>> code" style embedded python code
 
@@ -395,6 +424,7 @@
     True
     >>> matcher.codeatend(ctx, end)
     """
+
     _prefix = '  >>> '
     _prefixre = re.compile(r'  (>>>|\.\.\.) ')
 
@@ -419,24 +449,25 @@
         return not (self._prefixre.match(line) or self._outputre.match(line))
 
     def isinside(self, ctx, line):
-        return True # always true, if not yet ended
+        return True  # always true, if not yet ended
 
     def ignores(self, ctx):
-        return False # should be checked always
+        return False  # should be checked always
 
     def filename(self, ctx):
-        return None # no filename
+        return None  # no filename
 
     def codeatstart(self, ctx, line):
-        return line[len(self._prefix):] # strip prefix '  >>> '/'  ... '
+        return line[len(self._prefix) :]  # strip prefix '  >>> '/'  ... '
 
     def codeatend(self, ctx, line):
-        return None # no embedded code at end line
+        return None  # no embedded code at end line
 
     def codeinside(self, ctx, line):
         if self._prefixre.match(line):
-            return line[len(self._prefix):] # strip prefix '  >>> '/'  ... '
-        return '\n' # an expected output line is treated as an empty line
+            return line[len(self._prefix) :]  # strip prefix '  >>> '/'  ... '
+        return '\n'  # an expected output line is treated as an empty line
+
 
 class pyheredocmatcher(embeddedmatcher):
     """Detect "python << LIMIT" style embedded python code
@@ -474,10 +505,12 @@
     >>> matcher.ignores(ctx)
     True
     """
+
     _prefix = '  > '
 
-    _startre = re.compile(r'  \$ (\$PYTHON|"\$PYTHON"|python).*' +
-                          heredoclimitpat)
+    _startre = re.compile(
+        r'  \$ (\$PYTHON|"\$PYTHON"|python).*' + heredoclimitpat
+    )
 
     def __init__(self):
         super(pyheredocmatcher, self).__init__("heredoc python invocation")
@@ -498,16 +531,17 @@
         return '  > %s\n' % heredocignorelimit == ctx
 
     def filename(self, ctx):
-        return None # no filename
+        return None  # no filename
 
     def codeatstart(self, ctx, line):
-        return None # no embedded code at start line
+        return None  # no embedded code at start line
 
     def codeatend(self, ctx, line):
-        return None # no embedded code at end line
+        return None  # no embedded code at end line
 
     def codeinside(self, ctx, line):
-        return line[len(self._prefix):] # strip prefix
+        return line[len(self._prefix) :]  # strip prefix
+
 
 _pymatchers = [
     pydoctestmatcher(),
@@ -517,9 +551,11 @@
     fileheredocmatcher('heredoc .py file', r'[^<]+\.py'),
 ]
 
+
 def pyembedded(basefile, lines, errors):
     return embedded(basefile, lines, errors, _pymatchers)
 
+
 ####
 # for embedded shell script
 
@@ -529,22 +565,27 @@
     fileheredocmatcher('heredoc .sh file', r'[^<]+\.sh'),
 ]
 
+
 def shembedded(basefile, lines, errors):
     return embedded(basefile, lines, errors, _shmatchers)
 
+
 ####
 # for embedded hgrc configuration
 
 _hgrcmatchers = [
     # use '[^<]+' instead of '\S+', in order to match against
     # paths including whitespaces
-    fileheredocmatcher('heredoc hgrc file',
-                       r'(([^/<]+/)+hgrc|\$HGRCPATH|\${HGRCPATH})'),
+    fileheredocmatcher(
+        'heredoc hgrc file', r'(([^/<]+/)+hgrc|\$HGRCPATH|\${HGRCPATH})'
+    ),
 ]
 
+
 def hgrcembedded(basefile, lines, errors):
     return embedded(basefile, lines, errors, _hgrcmatchers)
 
+
 ####
 
 if __name__ == "__main__":
@@ -558,8 +599,7 @@
                 name = '<anonymous>'
             writeout("%s:%d: %s starts\n" % (basefile, starts, name))
             if opts.verbose and code:
-                writeout("  |%s\n" %
-                         "\n  |".join(l for l in code.splitlines()))
+                writeout("  |%s\n" % "\n  |".join(l for l in code.splitlines()))
             writeout("%s:%d: %s ends\n" % (basefile, ends, name))
         for e in errors:
             writeerr("%s\n" % e)
@@ -579,9 +619,11 @@
         return ret
 
     commands = {}
+
     def command(name, desc):
         def wrap(func):
             commands[name] = (desc, func)
+
         return wrap
 
     @command("pyembedded", "detect embedded python script")
@@ -596,21 +638,29 @@
     def hgrcembeddedcmd(args, opts):
         return applyembedded(args, hgrcembedded, opts)
 
-    availablecommands = "\n".join(["  - %s: %s" % (key, value[0])
-                                   for key, value in commands.items()])
+    availablecommands = "\n".join(
+        ["  - %s: %s" % (key, value[0]) for key, value in commands.items()]
+    )
 
-    parser = optparse.OptionParser("""%prog COMMAND [file ...]
+    parser = optparse.OptionParser(
+        """%prog COMMAND [file ...]
 
 Pick up embedded code fragments from given file(s) or stdin, and list
 up start/end lines of them in standard compiler format
 ("FILENAME:LINENO:").
 
 Available commands are:
-""" + availablecommands + """
-""")
-    parser.add_option("-v", "--verbose",
-                      help="enable additional output (e.g. actual code)",
-                      action="store_true")
+"""
+        + availablecommands
+        + """
+"""
+    )
+    parser.add_option(
+        "-v",
+        "--verbose",
+        help="enable additional output (e.g. actual code)",
+        action="store_true",
+    )
     (opts, args) = parser.parse_args()
 
     if not args or args[0] not in commands:
--- a/contrib/win32/hgwebdir_wsgi.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/contrib/win32/hgwebdir_wsgi.py	Mon Oct 21 11:09:48 2019 -0400
@@ -84,19 +84,20 @@
 hgweb_config = r'c:\your\directory\wsgi.config'
 
 # Global settings for IIS path translation
-path_strip = 0   # Strip this many path elements off (when using url rewrite)
+path_strip = 0  # Strip this many path elements off (when using url rewrite)
 path_prefix = 1  # This many path elements are prefixes (depends on the
-                 # virtual path of the IIS application).
+# virtual path of the IIS application).
 
 import sys
 
 # Adjust python path if this is not a system-wide install
-#sys.path.insert(0, r'C:\your\custom\hg\build\lib.win32-2.7')
+# sys.path.insert(0, r'C:\your\custom\hg\build\lib.win32-2.7')
 
 # Enable tracing. Run 'python -m win32traceutil' to debug
 if getattr(sys, 'isapidllhandle', None) is not None:
     import win32traceutil
-    win32traceutil.SetupForPrint # silence unused import warning
+
+    win32traceutil.SetupForPrint  # silence unused import warning
 
 import isapi_wsgi
 from mercurial.hgweb.hgwebdir_mod import hgwebdir
@@ -104,13 +105,15 @@
 # Example tweak: Replace isapi_wsgi's handler to provide better error message
 # Other stuff could also be done here, like logging errors etc.
 class WsgiHandler(isapi_wsgi.IsapiWsgiHandler):
-    error_status = '500 Internal Server Error' # less silly error message
+    error_status = '500 Internal Server Error'  # less silly error message
+
 
 isapi_wsgi.IsapiWsgiHandler = WsgiHandler
 
 # Only create the hgwebdir instance once
 application = hgwebdir(hgweb_config)
 
+
 def handler(environ, start_response):
 
     # Translate IIS's weird URLs
@@ -125,10 +128,13 @@
 
     return application(environ, start_response)
 
+
 def __ExtensionFactory__():
     return isapi_wsgi.ISAPISimpleHandler(handler)
 
-if __name__=='__main__':
+
+if __name__ == '__main__':
     from isapi.install import ISAPIParameters, HandleCommandLine
+
     params = ISAPIParameters()
     HandleCommandLine(params)
--- a/doc/check-seclevel.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/doc/check-seclevel.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,7 +11,9 @@
 # import from the live mercurial repo
 os.environ['HGMODULEPOLICY'] = 'py'
 sys.path.insert(0, "..")
-from mercurial import demandimport; demandimport.enable()
+from mercurial import demandimport
+
+demandimport.enable()
 from mercurial import (
     commands,
     extensions,
@@ -36,13 +38,16 @@
 initlevel_ext = 1
 initlevel_ext_cmd = 3
 
+
 def showavailables(ui, initlevel):
-    avail = ('    available marks and order of them in this help: %s\n') % (
-        ', '.join(['%r' % (m * 4) for m in level2mark[initlevel + 1:]]))
+    avail = '    available marks and order of them in this help: %s\n' % (
+        ', '.join(['%r' % (m * 4) for m in level2mark[initlevel + 1 :]])
+    )
     ui.warn(avail.encode('utf-8'))
 
+
 def checkseclevel(ui, doc, name, initlevel):
-    ui.note(('checking "%s"\n') % name)
+    ui.notenoi18n('checking "%s"\n' % name)
     if not isinstance(doc, bytes):
         doc = doc.encode('utf-8')
     blocks, pruned = minirst.parse(doc, 0, ['verbose'])
@@ -54,66 +59,83 @@
         mark = block[b'underline']
         title = block[b'lines'][0]
         if (mark not in mark2level) or (mark2level[mark] <= initlevel):
-            ui.warn((('invalid section mark %r for "%s" of %s\n') %
-                     (mark * 4, title, name)).encode('utf-8'))
+            ui.warn(
+                (
+                    'invalid section mark %r for "%s" of %s\n'
+                    % (mark * 4, title, name)
+                ).encode('utf-8')
+            )
             showavailables(ui, initlevel)
             errorcnt += 1
             continue
         nextlevel = mark2level[mark]
         if curlevel < nextlevel and curlevel + 1 != nextlevel:
-            ui.warn(('gap of section level at "%s" of %s\n') %
-                    (title, name))
+            ui.warnnoi18n(
+                'gap of section level at "%s" of %s\n' % (title, name)
+            )
             showavailables(ui, initlevel)
             errorcnt += 1
             continue
-        ui.note(('appropriate section level for "%s %s"\n') %
-                (mark * (nextlevel * 2), title))
+        ui.notenoi18n(
+            'appropriate section level for "%s %s"\n'
+            % (mark * (nextlevel * 2), title)
+        )
         curlevel = nextlevel
 
     return errorcnt
 
+
 def checkcmdtable(ui, cmdtable, namefmt, initlevel):
     errorcnt = 0
     for k, entry in cmdtable.items():
         name = k.split(b"|")[0].lstrip(b"^")
         if not entry[0].__doc__:
-            ui.note(('skip checking %s: no help document\n') %
-                    (namefmt % name))
+            ui.notenoi18n(
+                'skip checking %s: no help document\n' % (namefmt % name)
+            )
             continue
-        errorcnt += checkseclevel(ui, entry[0].__doc__,
-                                  namefmt % name,
-                                  initlevel)
+        errorcnt += checkseclevel(
+            ui, entry[0].__doc__, namefmt % name, initlevel
+        )
     return errorcnt
 
+
 def checkhghelps(ui):
     errorcnt = 0
     for h in helptable:
         names, sec, doc = h[0:3]
         if callable(doc):
             doc = doc(ui)
-        errorcnt += checkseclevel(ui, doc,
-                                  '%s help topic' % names[0],
-                                  initlevel_topic)
+        errorcnt += checkseclevel(
+            ui, doc, '%s help topic' % names[0], initlevel_topic
+        )
 
     errorcnt += checkcmdtable(ui, table, '%s command', initlevel_cmd)
 
-    for name in sorted(list(extensions.enabled()) +
-                       list(extensions.disabled())):
+    for name in sorted(
+        list(extensions.enabled()) + list(extensions.disabled())
+    ):
         mod = extensions.load(ui, name, None)
         if not mod.__doc__:
-            ui.note(('skip checking %s extension: no help document\n') % name)
+            ui.notenoi18n(
+                'skip checking %s extension: no help document\n' % name
+            )
             continue
-        errorcnt += checkseclevel(ui, mod.__doc__,
-                                  '%s extension' % name,
-                                  initlevel_ext)
+        errorcnt += checkseclevel(
+            ui, mod.__doc__, '%s extension' % name, initlevel_ext
+        )
 
         cmdtable = getattr(mod, 'cmdtable', None)
         if cmdtable:
-            errorcnt += checkcmdtable(ui, cmdtable,
-                                      '%%s command of %s extension' % name,
-                                      initlevel_ext_cmd)
+            errorcnt += checkcmdtable(
+                ui,
+                cmdtable,
+                '%%s command of %s extension' % name,
+                initlevel_ext_cmd,
+            )
     return errorcnt
 
+
 def checkfile(ui, filename, initlevel):
     if filename == '-':
         filename = 'stdin'
@@ -122,43 +144,76 @@
         with open(filename) as fp:
             doc = fp.read()
 
-    ui.note(('checking input from %s with initlevel %d\n') %
-            (filename, initlevel))
+    ui.notenoi18n(
+        'checking input from %s with initlevel %d\n' % (filename, initlevel)
+    )
     return checkseclevel(ui, doc, 'input from %s' % filename, initlevel)
 
+
 def main():
-    optparser = optparse.OptionParser("""%prog [options]
+    optparser = optparse.OptionParser(
+        """%prog [options]
 
 This checks all help documents of Mercurial (topics, commands,
 extensions and commands of them), if no file is specified by --file
 option.
-""")
-    optparser.add_option("-v", "--verbose",
-                         help="enable additional output",
-                         action="store_true")
-    optparser.add_option("-d", "--debug",
-                         help="debug mode",
-                         action="store_true")
-    optparser.add_option("-f", "--file",
-                         help="filename to read in (or '-' for stdin)",
-                         action="store", default="")
+"""
+    )
+    optparser.add_option(
+        "-v", "--verbose", help="enable additional output", action="store_true"
+    )
+    optparser.add_option(
+        "-d", "--debug", help="debug mode", action="store_true"
+    )
+    optparser.add_option(
+        "-f",
+        "--file",
+        help="filename to read in (or '-' for stdin)",
+        action="store",
+        default="",
+    )
 
-    optparser.add_option("-t", "--topic",
-                         help="parse file as help topic",
-                         action="store_const", dest="initlevel", const=0)
-    optparser.add_option("-c", "--command",
-                         help="parse file as help of core command",
-                         action="store_const", dest="initlevel", const=1)
-    optparser.add_option("-e", "--extension",
-                         help="parse file as help of extension",
-                         action="store_const", dest="initlevel", const=1)
-    optparser.add_option("-C", "--extension-command",
-                         help="parse file as help of extension command",
-                         action="store_const", dest="initlevel", const=3)
+    optparser.add_option(
+        "-t",
+        "--topic",
+        help="parse file as help topic",
+        action="store_const",
+        dest="initlevel",
+        const=0,
+    )
+    optparser.add_option(
+        "-c",
+        "--command",
+        help="parse file as help of core command",
+        action="store_const",
+        dest="initlevel",
+        const=1,
+    )
+    optparser.add_option(
+        "-e",
+        "--extension",
+        help="parse file as help of extension",
+        action="store_const",
+        dest="initlevel",
+        const=1,
+    )
+    optparser.add_option(
+        "-C",
+        "--extension-command",
+        help="parse file as help of extension command",
+        action="store_const",
+        dest="initlevel",
+        const=3,
+    )
 
-    optparser.add_option("-l", "--initlevel",
-                         help="set initial section level manually",
-                         action="store", type="int", default=0)
+    optparser.add_option(
+        "-l",
+        "--initlevel",
+        help="set initial section level manually",
+        action="store",
+        type="int",
+        default=0,
+    )
 
     (options, args) = optparser.parse_args()
 
@@ -173,5 +228,6 @@
         if checkhghelps(ui):
             sys.exit(1)
 
+
 if __name__ == "__main__":
     main()
--- a/doc/gendoc.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/doc/gendoc.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,6 +12,7 @@
 
 try:
     import msvcrt
+
     msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
     msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
 except ImportError:
@@ -22,10 +23,13 @@
 os.environ[r'HGMODULEPOLICY'] = r'allow'
 # import from the live mercurial repo
 sys.path.insert(0, r"..")
-from mercurial import demandimport; demandimport.enable()
+from mercurial import demandimport
+
+demandimport.enable()
 # Load util so that the locale path is set by i18n.setdatapath() before
 # calling _().
 from mercurial import util
+
 util.datapath
 from mercurial import (
     commands,
@@ -46,6 +50,7 @@
 helptable = help.helptable
 loaddoc = help.loaddoc
 
+
 def get_desc(docstr):
     if not docstr:
         return b"", b""
@@ -56,7 +61,7 @@
 
     i = docstr.find(b"\n")
     if i != -1:
-        desc = docstr[i + 2:]
+        desc = docstr[i + 2 :]
     else:
         desc = shortdesc
 
@@ -64,6 +69,7 @@
 
     return (shortdesc, desc)
 
+
 def get_opts(opts):
     for opt in opts:
         if len(opt) == 5:
@@ -86,6 +92,7 @@
         desc += default and _(b" (default: %s)") % bytes(default) or b""
         yield (b", ".join(allopts), desc)
 
+
 def get_cmd(cmd, cmdtable):
     d = {}
     attr = cmdtable[cmd]
@@ -106,6 +113,7 @@
 
     return d
 
+
 def showdoc(ui):
     # print options
     ui.write(minirst.section(_(b"Options")))
@@ -127,14 +135,22 @@
     helpprinter(ui, helptable, minirst.section, exclude=[b'config'])
 
     ui.write(minirst.section(_(b"Extensions")))
-    ui.write(_(b"This section contains help for extensions that are "
-               b"distributed together with Mercurial. Help for other "
-               b"extensions is available in the help system."))
-    ui.write((b"\n\n"
-              b".. contents::\n"
-              b"   :class: htmlonly\n"
-              b"   :local:\n"
-              b"   :depth: 1\n\n"))
+    ui.write(
+        _(
+            b"This section contains help for extensions that are "
+            b"distributed together with Mercurial. Help for other "
+            b"extensions is available in the help system."
+        )
+    )
+    ui.write(
+        (
+            b"\n\n"
+            b".. contents::\n"
+            b"   :class: htmlonly\n"
+            b"   :local:\n"
+            b"   :depth: 1\n\n"
+        )
+    )
 
     for extensionname in sorted(allextensionnames()):
         mod = extensions.load(ui, extensionname, None)
@@ -143,24 +159,42 @@
         cmdtable = getattr(mod, 'cmdtable', None)
         if cmdtable:
             ui.write(minirst.subsubsection(_(b'Commands')))
-            commandprinter(ui, cmdtable, minirst.subsubsubsection,
-                    minirst.subsubsubsubsection)
+            commandprinter(
+                ui,
+                cmdtable,
+                minirst.subsubsubsection,
+                minirst.subsubsubsubsection,
+            )
+
 
 def showtopic(ui, topic):
     extrahelptable = [
         ([b"common"], b'', loaddoc(b'common'), help.TOPIC_CATEGORY_MISC),
         ([b"hg.1"], b'', loaddoc(b'hg.1'), help.TOPIC_CATEGORY_CONFIG),
         ([b"hg-ssh.8"], b'', loaddoc(b'hg-ssh.8'), help.TOPIC_CATEGORY_CONFIG),
-        ([b"hgignore.5"], b'', loaddoc(b'hgignore.5'),
-         help.TOPIC_CATEGORY_CONFIG),
+        (
+            [b"hgignore.5"],
+            b'',
+            loaddoc(b'hgignore.5'),
+            help.TOPIC_CATEGORY_CONFIG,
+        ),
         ([b"hgrc.5"], b'', loaddoc(b'hgrc.5'), help.TOPIC_CATEGORY_CONFIG),
-        ([b"hgignore.5.gendoc"], b'', loaddoc(b'hgignore'),
-         help.TOPIC_CATEGORY_CONFIG),
-        ([b"hgrc.5.gendoc"], b'', loaddoc(b'config'),
-         help.TOPIC_CATEGORY_CONFIG),
+        (
+            [b"hgignore.5.gendoc"],
+            b'',
+            loaddoc(b'hgignore'),
+            help.TOPIC_CATEGORY_CONFIG,
+        ),
+        (
+            [b"hgrc.5.gendoc"],
+            b'',
+            loaddoc(b'config'),
+            help.TOPIC_CATEGORY_CONFIG,
+        ),
     ]
     helpprinter(ui, helptable + extrahelptable, None, include=[topic])
 
+
 def helpprinter(ui, helptable, sectionfunc, include=[], exclude=[]):
     for h in helptable:
         names, sec, doc = h[0:3]
@@ -178,6 +212,7 @@
         ui.write(doc)
         ui.write(b"\n")
 
+
 def commandprinter(ui, cmdtable, sectionfunc, subsectionfunc):
     """Render restructuredtext describing a list of commands and their
     documentations, grouped by command category.
@@ -222,7 +257,8 @@
         if helpcategory(cmd) not in cmdsbycategory:
             raise AssertionError(
                 "The following command did not register its (category) in "
-                "help.CATEGORY_ORDER: %s (%s)" % (cmd, helpcategory(cmd)))
+                "help.CATEGORY_ORDER: %s (%s)" % (cmd, helpcategory(cmd))
+            )
         cmdsbycategory[helpcategory(cmd)].append(cmd)
 
     # Print the help for each command. We present the commands grouped by
@@ -270,16 +306,22 @@
                     if optstr.endswith(b"[+]>"):
                         multioccur = True
                 if multioccur:
-                    ui.write(_(b"\n[+] marked option can be specified"
-                               b" multiple times\n"))
+                    ui.write(
+                        _(
+                            b"\n[+] marked option can be specified"
+                            b" multiple times\n"
+                        )
+                    )
                 ui.write(b"\n")
             # aliases
             if d[b'aliases']:
                 ui.write(_(b"    aliases: %s\n\n") % b" ".join(d[b'aliases']))
 
+
 def allextensionnames():
     return set(extensions.enabled().keys()) | set(extensions.disabled().keys())
 
+
 if __name__ == "__main__":
     doc = b'hg.1.gendoc'
     if len(sys.argv) > 1:
--- a/doc/hgmanpage.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/doc/hgmanpage.py	Mon Oct 21 11:09:48 2019 -0400
@@ -53,6 +53,7 @@
     nodes,
     writers,
 )
+
 try:
     import roman
 except ImportError:
@@ -65,7 +66,7 @@
 
 # Define two macros so man/roff can calculate the
 # indent/unindent margins by itself
-MACRO_DEF = (r""".
+MACRO_DEF = r""".
 .nr rst2man-indent-level 0
 .
 .de1 rstReportMargin
@@ -92,11 +93,12 @@
 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
 .in \\n[rst2man-indent\\n[rst2man-indent-level]]u
 ..
-""")
+"""
+
 
 class Writer(writers.Writer):
 
-    supported = ('manpage')
+    supported = 'manpage'
     """Formats this writer supports."""
 
     output = None
@@ -118,11 +120,14 @@
         self._options = ['center']
         self._tab_char = '\t'
         self._coldefs = []
+
     def new_row(self):
         self._rows.append([])
+
     def append_separator(self, separator):
         """Append the separator for table head."""
         self._rows.append([separator])
+
     def append_cell(self, cell_lines):
         """cell_lines is an array of lines"""
         start = 0
@@ -131,19 +136,21 @@
         self._rows[-1].append(cell_lines[start:])
         if len(self._coldefs) < len(self._rows[-1]):
             self._coldefs.append('l')
+
     def _minimize_cell(self, cell_lines):
         """Remove leading and trailing blank and ``.sp`` lines"""
-        while (cell_lines and cell_lines[0] in ('\n', '.sp\n')):
+        while cell_lines and cell_lines[0] in ('\n', '.sp\n'):
             del cell_lines[0]
-        while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')):
+        while cell_lines and cell_lines[-1] in ('\n', '.sp\n'):
             del cell_lines[-1]
+
     def as_list(self):
         text = ['.TS\n']
         text.append(' '.join(self._options) + ';\n')
         text.append('|%s|.\n' % ('|'.join(self._coldefs)))
         for row in self._rows:
             # row = array of cells. cell = array of lines.
-            text.append('_\n')       # line above
+            text.append('_\n')  # line above
             text.append('T{\n')
             for i in range(len(row)):
                 cell = row[i]
@@ -152,13 +159,14 @@
                 if not text[-1].endswith('\n'):
                     text[-1] += '\n'
                 if i < len(row) - 1:
-                    text.append('T}'+self._tab_char+'T{\n')
+                    text.append('T}' + self._tab_char + 'T{\n')
                 else:
                     text.append('T}\n')
         text.append('_\n')
         text.append('.TE\n')
         return text
 
+
 class Translator(nodes.NodeVisitor):
     """"""
 
@@ -171,8 +179,9 @@
         lcode = settings.language_code
         arglen = len(inspect.getargspec(languages.get_language)[0])
         if arglen == 2:
-            self.language = languages.get_language(lcode,
-                                                   self.document.reporter)
+            self.language = languages.get_language(
+                lcode, self.document.reporter
+            )
         else:
             self.language = languages.get_language(lcode)
         self.head = []
@@ -189,16 +198,18 @@
         # writing the header .TH and .SH NAME is postboned after
         # docinfo.
         self._docinfo = {
-                "title" : "", "title_upper": "",
-                "subtitle" : "",
-                "manual_section" : "", "manual_group" : "",
-                "author" : [],
-                "date" : "",
-                "copyright" : "",
-                "version" : "",
-                    }
-        self._docinfo_keys = []     # a list to keep the sequence as in source.
-        self._docinfo_names = {}    # to get name from text not normalized.
+            "title": "",
+            "title_upper": "",
+            "subtitle": "",
+            "manual_section": "",
+            "manual_group": "",
+            "author": [],
+            "date": "",
+            "copyright": "",
+            "version": "",
+        }
+        self._docinfo_keys = []  # a list to keep the sequence as in source.
+        self._docinfo_names = {}  # to get name from text not normalized.
         self._in_docinfo = None
         self._active_table = None
         self._in_literal = False
@@ -217,25 +228,21 @@
         # ``B`` bold, ``I`` italic, ``R`` roman should be available.
         # Hopefully ``C`` courier too.
         self.defs = {
-                'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'),
-                'definition_list_item' : ('.TP', ''),
-                'field_name' : ('.TP\n.B ', '\n'),
-                'literal' : ('\\fB', '\\fP'),
-                'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'),
-
-                'option_list_item' : ('.TP\n', ''),
-
-                'reference' : (r'\%', r'\:'),
-                'emphasis': ('\\fI', '\\fP'),
-                'strong' : ('\\fB', '\\fP'),
-                'term' : ('\n.B ', '\n'),
-                'title_reference' : ('\\fI', '\\fP'),
-
-                'topic-title' : ('.SS ',),
-                'sidebar-title' : ('.SS ',),
-
-                'problematic' : ('\n.nf\n', '\n.fi\n'),
-                    }
+            'indent': ('.INDENT %.1f\n', '.UNINDENT\n'),
+            'definition_list_item': ('.TP', ''),
+            'field_name': ('.TP\n.B ', '\n'),
+            'literal': ('\\fB', '\\fP'),
+            'literal_block': ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'),
+            'option_list_item': ('.TP\n', ''),
+            'reference': (r'\%', r'\:'),
+            'emphasis': ('\\fI', '\\fP'),
+            'strong': ('\\fB', '\\fP'),
+            'term': ('\n.B ', '\n'),
+            'title_reference': ('\\fI', '\\fP'),
+            'topic-title': ('.SS ',),
+            'sidebar-title': ('.SS ',),
+            'problematic': ('\n.nf\n', '\n.fi\n'),
+        }
         # NOTE don't specify the newline before a dot-command, but ensure
         # it is there.
 
@@ -244,13 +251,13 @@
         line/comment."""
         prefix = '.\\" '
         out_text = ''.join(
-            [(prefix + in_line + '\n')
-            for in_line in text.split('\n')])
+            [(prefix + in_line + '\n') for in_line in text.split('\n')]
+        )
         return out_text
 
     def comment(self, text):
         """Return commented version of the passed text."""
-        return self.comment_begin(text)+'.\n'
+        return self.comment_begin(text) + '.\n'
 
     def ensure_eol(self):
         """Ensure the last line in body is terminated by new line."""
@@ -266,16 +273,21 @@
         for i in range(len(self.body) - 1, 0, -1):
             # remove superfluous vertical gaps.
             if self.body[i] == '.sp\n':
-                if self.body[i - 1][:4] in ('.BI ','.IP '):
+                if self.body[i - 1][:4] in ('.BI ', '.IP '):
                     self.body[i] = '.\n'
-                elif (self.body[i - 1][:3] == '.B ' and
-                    self.body[i - 2][:4] == '.TP\n'):
+                elif (
+                    self.body[i - 1][:3] == '.B '
+                    and self.body[i - 2][:4] == '.TP\n'
+                ):
                     self.body[i] = '.\n'
-                elif (self.body[i - 1] == '\n' and
-                    self.body[i - 2][0] != '.' and
-                    (self.body[i - 3][:7] == '.TP\n.B '
-                        or self.body[i - 3][:4] == '\n.B ')
-                     ):
+                elif (
+                    self.body[i - 1] == '\n'
+                    and self.body[i - 2][0] != '.'
+                    and (
+                        self.body[i - 3][:7] == '.TP\n.B '
+                        or self.body[i - 3][:4] == '\n.B '
+                    )
+                ):
                     self.body[i] = '.\n'
         return ''.join(self.head + self.body + self.foot)
 
@@ -286,13 +298,13 @@
 
     def visit_Text(self, node):
         text = node.astext()
-        text = text.replace('\\','\\e')
+        text = text.replace('\\', '\\e')
         replace_pairs = [
             (u'-', u'\\-'),
             (u"'", u'\\(aq'),
             (u'´', u"\\'"),
             (u'`', u'\\(ga'),
-            ]
+        ]
         for (in_char, out_markup) in replace_pairs:
             text = text.replace(in_char, out_markup)
         # unicode
@@ -310,9 +322,9 @@
     def list_start(self, node):
         class enum_char(object):
             enum_style = {
-                    'bullet'     : '\\(bu',
-                    'emdash'     : '\\(em',
-                     }
+                'bullet': '\\(bu',
+                'emdash': '\\(em',
+            }
 
             def __init__(self, style):
                 self._style = style
@@ -358,6 +370,7 @@
 
             def get_width(self):
                 return self._indent
+
             def __repr__(self):
                 return 'enum_style-%s' % list(self._style)
 
@@ -376,10 +389,12 @@
         self._list_char.pop()
 
     def header(self):
-        tmpl = (".TH %(title_upper)s %(manual_section)s"
-                " \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
-                ".SH NAME\n"
-                "%(title)s \\- %(subtitle)s\n")
+        tmpl = (
+            ".TH %(title_upper)s %(manual_section)s"
+            " \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
+            ".SH NAME\n"
+            "%(title)s \\- %(subtitle)s\n"
+        )
         return tmpl % self._docinfo
 
     def append_header(self):
@@ -400,8 +415,7 @@
 
     def visit_admonition(self, node, name=None):
         if name:
-            self.body.append('.IP %s\n' %
-                        self.language.labels.get(name, name))
+            self.body.append('.IP %s\n' % self.language.labels.get(name, name))
 
     def depart_admonition(self, node):
         self.body.append('.RE\n')
@@ -470,7 +484,7 @@
         pass
 
     def visit_citation_reference(self, node):
-        self.body.append('['+node.astext()+']')
+        self.body.append('[' + node.astext() + ']')
         raise nodes.SkipNode()
 
     def visit_classifier(self, node):
@@ -486,10 +500,9 @@
         pass
 
     def write_colspecs(self):
-        self.body.append("%s.\n" % ('L '*len(self.colspecs)))
+        self.body.append("%s.\n" % ('L ' * len(self.colspecs)))
 
-    def visit_comment(self, node,
-                      sub=re.compile('-(?=-)').sub):
+    def visit_comment(self, node, sub=re.compile('-(?=-)').sub):
         self.body.append(self.comment(node.astext()))
         raise nodes.SkipNode()
 
@@ -569,27 +582,39 @@
 
     def visit_document(self, node):
         # no blank line between comment and header.
-        self.body.append(self.comment(self.document_start).rstrip()+'\n')
+        self.body.append(self.comment(self.document_start).rstrip() + '\n')
         # writing header is postboned
         self.header_written = 0
 
     def depart_document(self, node):
         if self._docinfo['author']:
-            self.body.append('.SH AUTHOR\n%s\n'
-                    % ', '.join(self._docinfo['author']))
-        skip = ('author', 'copyright', 'date',
-                'manual_group', 'manual_section',
-                'subtitle',
-                'title', 'title_upper', 'version')
+            self.body.append(
+                '.SH AUTHOR\n%s\n' % ', '.join(self._docinfo['author'])
+            )
+        skip = (
+            'author',
+            'copyright',
+            'date',
+            'manual_group',
+            'manual_section',
+            'subtitle',
+            'title',
+            'title_upper',
+            'version',
+        )
         for name in self._docinfo_keys:
             if name == 'address':
-                self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % (
-                                    self.language.labels.get(name, name),
-                                    self.defs['indent'][0] % 0,
-                                    self.defs['indent'][0] % BLOCKQOUTE_INDENT,
-                                    self._docinfo[name],
-                                    self.defs['indent'][1],
-                                    self.defs['indent'][1]))
+                self.body.append(
+                    "\n%s:\n%s%s.nf\n%s\n.fi\n%s%s"
+                    % (
+                        self.language.labels.get(name, name),
+                        self.defs['indent'][0] % 0,
+                        self.defs['indent'][0] % BLOCKQOUTE_INDENT,
+                        self._docinfo[name],
+                        self.defs['indent'][1],
+                        self.defs['indent'][1],
+                    )
+                )
             elif name not in skip:
                 if name in self._docinfo_names:
                     label = self._docinfo_names[name]
@@ -597,10 +622,10 @@
                     label = self.language.labels.get(name, name)
                 self.body.append("\n%s: %s\n" % (label, self._docinfo[name]))
         if self._docinfo['copyright']:
-            self.body.append('.SH COPYRIGHT\n%s\n'
-                    % self._docinfo['copyright'])
-        self.body.append(self.comment(
-                        'Generated by docutils manpage writer.\n'))
+            self.body.append('.SH COPYRIGHT\n%s\n' % self._docinfo['copyright'])
+        self.body.append(
+            self.comment('Generated by docutils manpage writer.\n')
+        )
 
     def visit_emphasis(self, node):
         self.body.append(self.defs['emphasis'][0])
@@ -611,11 +636,13 @@
     def visit_entry(self, node):
         # a cell in a table row
         if 'morerows' in node:
-            self.document.reporter.warning('"table row spanning" not supported',
-                    base_node=node)
+            self.document.reporter.warning(
+                '"table row spanning" not supported', base_node=node
+            )
         if 'morecols' in node:
             self.document.reporter.warning(
-                    '"table cell spanning" not supported', base_node=node)
+                '"table cell spanning" not supported', base_node=node
+            )
         self.context.append(len(self.body))
 
     def depart_entry(self, node):
@@ -642,7 +669,7 @@
 
     def visit_field_body(self, node):
         if self._in_docinfo:
-            name_normalized = self._field_name.lower().replace(" ","_")
+            name_normalized = self._field_name.lower().replace(" ", "_")
             self._docinfo_names[name_normalized] = self._field_name
             self.visit_docinfo_item(node, name_normalized)
             raise nodes.SkipNode()
@@ -675,8 +702,7 @@
         self.dedent()
 
     def visit_footer(self, node):
-        self.document.reporter.warning('"footer" not supported',
-                base_node=node)
+        self.document.reporter.warning('"footer" not supported', base_node=node)
 
     def depart_footer(self, node):
         pass
@@ -690,11 +716,12 @@
         pass
 
     def footnote_backrefs(self, node):
-        self.document.reporter.warning('"footnote_backrefs" not supported',
-                base_node=node)
+        self.document.reporter.warning(
+            '"footnote_backrefs" not supported', base_node=node
+        )
 
     def visit_footnote_reference(self, node):
-        self.body.append('['+self.deunicode(node.astext())+']')
+        self.body.append('[' + self.deunicode(node.astext()) + ']')
         raise nodes.SkipNode()
 
     def depart_footnote_reference(self, node):
@@ -736,8 +763,7 @@
         self.body.append('\n')
 
     def visit_image(self, node):
-        self.document.reporter.warning('"image" not supported',
-                base_node=node)
+        self.document.reporter.warning('"image" not supported', base_node=node)
         text = []
         if 'alt' in node.attributes:
             text.append(node.attributes['alt'])
@@ -753,11 +779,11 @@
 
     def visit_label(self, node):
         # footnote and citation
-        if (isinstance(node.parent, nodes.footnote)
-            or isinstance(node.parent, nodes.citation)):
+        if isinstance(node.parent, nodes.footnote) or isinstance(
+            node.parent, nodes.citation
+        ):
             raise nodes.SkipNode()
-        self.document.reporter.warning('"unsupported "label"',
-                base_node=node)
+        self.document.reporter.warning('"unsupported "label"', base_node=node)
         self.body.append('[')
 
     def depart_label(self, node):
@@ -794,9 +820,10 @@
 
     def visit_list_item(self, node):
         # man 7 man argues to use ".IP" instead of ".TP"
-        self.body.append('.IP %s %d\n' % (
-                next(self._list_char[-1]),
-                self._list_char[-1].get_width(),))
+        self.body.append(
+            '.IP %s %d\n'
+            % (next(self._list_char[-1]), self._list_char[-1].get_width(),)
+        )
 
     def depart_list_item(self, node):
         pass
@@ -855,9 +882,9 @@
         # options with parameter bold italic, .BI, -f file
         #
         # we do not know if .B or .BI
-        self.context.append('.B')           # blind guess
-        self.context.append(len(self.body)) # to be able to insert later
-        self.context.append(0)              # option counter
+        self.context.append('.B')  # blind guess
+        self.context.append(len(self.body))  # to be able to insert later
+        self.context.append(0)  # option counter
 
     def depart_option_group(self, node):
         self.context.pop()  # the counter
@@ -885,7 +912,7 @@
         pass
 
     def visit_option_argument(self, node):
-        self.context[-3] = '.BI' # bold/italic alternate
+        self.context[-3] = '.BI'  # bold/italic alternate
         if node['delimiter'] != ' ':
             self.body.append('\\fB%s ' % node['delimiter'])
         elif self.body[len(self.body) - 1].endswith('='):
@@ -968,8 +995,9 @@
         raise nodes.SkipNode()
 
     def visit_substitution_reference(self, node):
-        self.document.reporter.warning('"substitution_reference" not supported',
-                base_node=node)
+        self.document.reporter.warning(
+            '"substitution_reference" not supported', base_node=node
+        )
 
     def visit_subtitle(self, node):
         if isinstance(node.parent, nodes.sidebar):
@@ -981,11 +1009,11 @@
 
     def depart_subtitle(self, node):
         # document subtitle calls SkipNode
-        self.body.append(self.defs['strong'][1]+'\n.PP\n')
+        self.body.append(self.defs['strong'][1] + '\n.PP\n')
 
     def visit_system_message(self, node):
         # TODO add report_level
-        #if node['level'] < self.document.reporter['writer'].report_level:
+        # if node['level'] < self.document.reporter['writer'].report_level:
         #    Level is too low to display:
         #    raise nodes.SkipNode
         attr = {}
@@ -995,8 +1023,10 @@
             line = ', line %s' % node['line']
         else:
             line = ''
-        self.body.append('.IP "System Message: %s/%s (%s:%s)"\n'
-                         % (node['type'], node['level'], node['source'], line))
+        self.body.append(
+            '.IP "System Message: %s/%s (%s:%s)"\n'
+            % (node['type'], node['level'], node['source'], line)
+        )
 
     def depart_system_message(self, node):
         pass
@@ -1111,7 +1141,9 @@
     depart_warning = depart_admonition
 
     def unimplemented_visit(self, node):
-        raise NotImplementedError('visiting unimplemented node type: %s'
-                                  % node.__class__.__name__)
+        raise NotImplementedError(
+            'visiting unimplemented node type: %s' % node.__class__.__name__
+        )
+
 
 # vim: set fileencoding=utf-8 et ts=4 ai :
--- a/hg	Wed Oct 02 12:20:36 2019 -0400
+++ b/hg	Mon Oct 21 11:09:48 2019 -0400
@@ -11,13 +11,6 @@
 import os
 import sys
 
-if os.environ.get('HGUNICODEPEDANTRY', False):
-    try:
-        reload(sys)
-        sys.setdefaultencoding("undefined")
-    except NameError:
-        pass
-
 libdir = '@LIBDIR@'
 
 if libdir != '@' 'LIBDIR' '@':
--- a/hgdemandimport/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgdemandimport/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -30,10 +30,10 @@
     '_imp',
     '_xmlplus',
     'fcntl',
-    'nt', # pathlib2 tests the existence of built-in 'nt' module
+    'nt',  # pathlib2 tests the existence of built-in 'nt' module
     'win32com.gen_py',
-    'win32com.shell', # 'appdirs' tries to import win32com.shell
-    '_winreg', # 2.7 mimetypes needs immediate ImportError
+    'win32com.shell',  # 'appdirs' tries to import win32com.shell
+    '_winreg',  # 2.7 mimetypes needs immediate ImportError
     'pythoncom',
     # imported by tarfile, not available under Windows
     'pwd',
@@ -46,16 +46,16 @@
     # setuptools' pkg_resources.py expects "from __main__ import x" to
     # raise ImportError if x not defined
     '__main__',
-    '_ssl', # conditional imports in the stdlib, issue1964
-    '_sre', # issue4920
+    '_ssl',  # conditional imports in the stdlib, issue1964
+    '_sre',  # issue4920
     'rfc822',
     'mimetools',
-    'sqlalchemy.events', # has import-time side effects (issue5085)
+    'sqlalchemy.events',  # has import-time side effects (issue5085)
     # setuptools 8 expects this module to explode early when not on windows
     'distutils.msvc9compiler',
     '__builtin__',
     'builtins',
-    'urwid.command_map', # for pudb
+    'urwid.command_map',  # for pudb
 }
 
 _pypy = '__pypy__' in sys.builtin_module_names
@@ -71,8 +71,11 @@
 disable = demandimport.disable
 deactivated = demandimport.deactivated
 
+
 def enable():
     # chg pre-imports modules so do not enable demandimport for it
-    if ('CHGINTERNALMARK' not in os.environ
-        and os.environ.get('HGDEMANDIMPORT') != 'disable'):
+    if (
+        'CHGINTERNALMARK' not in os.environ
+        and os.environ.get('HGDEMANDIMPORT') != 'disable'
+    ):
         demandimport.enable()
--- a/hgdemandimport/demandimportpy2.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgdemandimport/demandimportpy2.py	Mon Oct 21 11:09:48 2019 -0400
@@ -38,6 +38,7 @@
 
 nothing = object()
 
+
 def _hgextimport(importfunc, name, globals, *args, **kwargs):
     try:
         return importfunc(name, globals, *args, **kwargs)
@@ -53,6 +54,7 @@
         # retry to import with "hgext_" prefix
         return importfunc(hgextname, globals, *args, **kwargs)
 
+
 class _demandmod(object):
     """module demand-loader and proxy
 
@@ -67,8 +69,9 @@
         else:
             head = name
             after = []
-        object.__setattr__(self, r"_data",
-                           (head, globals, locals, after, level, set()))
+        object.__setattr__(
+            self, r"_data", (head, globals, locals, after, level, set())
+        )
         object.__setattr__(self, r"_module", None)
 
     def _extend(self, name):
@@ -91,7 +94,8 @@
             with tracing.log('demandimport %s', self._data[0]):
                 head, globals, locals, after, level, modrefs = self._data
                 mod = _hgextimport(
-                    _origimport, head, globals, locals, None, level)
+                    _origimport, head, globals, locals, None, level
+                )
                 if mod is self:
                     # In this case, _hgextimport() above should imply
                     # _demandimport(). Otherwise, _hgextimport() never
@@ -115,8 +119,11 @@
                     if '.' in p:
                         h, t = p.split('.', 1)
                     if getattr(mod, h, nothing) is nothing:
-                        setattr(mod, h, _demandmod(
-                            p, mod.__dict__, mod.__dict__, level=1))
+                        setattr(
+                            mod,
+                            h,
+                            _demandmod(p, mod.__dict__, mod.__dict__, level=1),
+                        )
                     elif t:
                         subload(getattr(mod, h), t)
 
@@ -164,15 +171,17 @@
         self._load()
         return self._module.__doc__
 
+
 _pypy = '__pypy__' in sys.builtin_module_names
 
+
 def _demandimport(name, globals=None, locals=None, fromlist=None, level=-1):
     if locals is None or name in ignores or fromlist == ('*',):
         # these cases we can't really delay
         return _hgextimport(_origimport, name, globals, locals, fromlist, level)
     elif not fromlist:
         # import a [as b]
-        if '.' in name: # a.b
+        if '.' in name:  # a.b
             base, rest = name.split('.', 1)
             # email.__init__ loading email.mime
             if globals and globals.get('__name__', None) == base:
@@ -244,8 +253,9 @@
         if level >= 0:
             if name:
                 # "from a import b" or "from .a import b" style
-                rootmod = _hgextimport(_origimport, name, globals, locals,
-                                       level=level)
+                rootmod = _hgextimport(
+                    _origimport, name, globals, locals, level=level
+                )
                 mod = chainmodules(rootmod, name)
             elif _pypy:
                 # PyPy's __import__ throws an exception if invoked
@@ -260,8 +270,9 @@
                     mn = mn.rsplit('.', level - 1)[0]
                     mod = sys.modules[mn]
             else:
-                mod = _hgextimport(_origimport, name, globals, locals,
-                                   level=level)
+                mod = _hgextimport(
+                    _origimport, name, globals, locals, level=level
+                )
 
             for x in fromlist:
                 processfromitem(mod, x)
@@ -278,23 +289,29 @@
 
         return mod
 
+
 ignores = set()
 
+
 def init(ignoreset):
     global ignores
     ignores = ignoreset
 
+
 def isenabled():
     return builtins.__import__ == _demandimport
 
+
 def enable():
     "enable global demand-loading of modules"
     builtins.__import__ = _demandimport
 
+
 def disable():
     "disable global demand-loading of modules"
     builtins.__import__ = _origimport
 
+
 @contextmanager
 def deactivated():
     "context manager for disabling demandimport in 'with' blocks"
--- a/hgdemandimport/demandimportpy3.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgdemandimport/demandimportpy3.py	Mon Oct 21 11:09:48 2019 -0400
@@ -36,10 +36,12 @@
 
 _deactivated = False
 
+
 class _lazyloaderex(importlib.util.LazyLoader):
     """This is a LazyLoader except it also follows the _deactivated global and
     the ignore list.
     """
+
     def exec_module(self, module):
         """Make the module load lazily."""
         with tracing.log('demandimport %s', module):
@@ -48,14 +50,18 @@
             else:
                 super().exec_module(module)
 
+
 # This is 3.6+ because with Python 3.5 it isn't possible to lazily load
 # extensions. See the discussion in https://bugs.python.org/issue26186 for more.
 _extensions_loader = _lazyloaderex.factory(
-    importlib.machinery.ExtensionFileLoader)
+    importlib.machinery.ExtensionFileLoader
+)
 _bytecode_loader = _lazyloaderex.factory(
-    importlib.machinery.SourcelessFileLoader)
+    importlib.machinery.SourcelessFileLoader
+)
 _source_loader = _lazyloaderex.factory(importlib.machinery.SourceFileLoader)
 
+
 def _makefinder(path):
     return importlib.machinery.FileFinder(
         path,
@@ -65,15 +71,19 @@
         (_bytecode_loader, importlib.machinery.BYTECODE_SUFFIXES),
     )
 
+
 ignores = set()
 
+
 def init(ignoreset):
     global ignores
     ignores = ignoreset
 
+
 def isenabled():
     return _makefinder in sys.path_hooks and not _deactivated
 
+
 def disable():
     try:
         while True:
@@ -81,9 +91,11 @@
     except ValueError:
         pass
 
+
 def enable():
     sys.path_hooks.insert(0, _makefinder)
 
+
 @contextlib.contextmanager
 def deactivated():
     # This implementation is a bit different from Python 2's. Python 3
--- a/hgdemandimport/tracing.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgdemandimport/tracing.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,6 +14,7 @@
 _checked = False
 _session = 'none'
 
+
 def _isactive():
     global _pipe, _session, _checked
     if _pipe is None:
@@ -26,6 +27,7 @@
         _session = os.environ.get('HGCATAPULTSESSION', 'none')
     return True
 
+
 @contextlib.contextmanager
 def log(whencefmt, *whenceargs):
     if not _isactive():
@@ -48,6 +50,7 @@
         except IOError:
             pass
 
+
 def counter(label, amount, *labelargs):
     if not _isactive():
         return
--- a/hgext/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -1,3 +1,4 @@
 from __future__ import absolute_import
 import pkgutil
+
 __path__ = pkgutil.extend_path(__path__, __name__)
--- a/hgext/absorb.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/absorb.py	Mon Oct 21 11:09:48 2019 -0400
@@ -53,15 +53,13 @@
     scmutil,
     util,
 )
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.utils import stringutil
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 cmdtable = {}
 command = registrar.command(cmdtable)
@@ -69,20 +67,22 @@
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('absorb', 'add-noise', default=True)
-configitem('absorb', 'amend-flag', default=None)
-configitem('absorb', 'max-stack-size', default=50)
+configitem(b'absorb', b'add-noise', default=True)
+configitem(b'absorb', b'amend-flag', default=None)
+configitem(b'absorb', b'max-stack-size', default=50)
 
 colortable = {
-    'absorb.description': 'yellow',
-    'absorb.node': 'blue bold',
-    'absorb.path': 'bold',
+    b'absorb.description': b'yellow',
+    b'absorb.node': b'blue bold',
+    b'absorb.path': b'bold',
 }
 
 defaultdict = collections.defaultdict
 
+
 class nullui(object):
     """blank ui object doing nothing"""
+
     debugflag = False
     verbose = False
     quiet = True
@@ -90,16 +90,20 @@
     def __getitem__(name):
         def nullfunc(*args, **kwds):
             return
+
         return nullfunc
 
+
 class emptyfilecontext(object):
     """minimal filecontext representing an empty file"""
+
     def data(self):
-        return ''
+        return b''
 
     def node(self):
         return node.nullid
 
+
 def uniq(lst):
     """list -> list. remove duplicated items without changing the order"""
     seen = set()
@@ -110,6 +114,7 @@
             result.append(x)
     return result
 
+
 def getdraftstack(headctx, limit=None):
     """(ctx, int?) -> [ctx]. get a linear stack of non-public changesets.
 
@@ -132,6 +137,7 @@
     result.reverse()
     return result
 
+
 def getfilestack(stack, path, seenfctxs=None):
     """([ctx], str, set) -> [fctx], {ctx: fctx}
 
@@ -179,25 +185,25 @@
     fctxs = []
     fctxmap = {}
 
-    pctx = stack[0].p1() # the public (immutable) ctx we stop at
+    pctx = stack[0].p1()  # the public (immutable) ctx we stop at
     for ctx in reversed(stack):
-        if path not in ctx: # the file is added in the next commit
+        if path not in ctx:  # the file is added in the next commit
             pctx = ctx
             break
         fctx = ctx[path]
         fctxs.append(fctx)
-        if fctx in seenfctxs: # treat fctx as the immutable one
-            pctx = None # do not add another immutable fctx
+        if fctx in seenfctxs:  # treat fctx as the immutable one
+            pctx = None  # do not add another immutable fctx
             break
-        fctxmap[ctx] = fctx # only for mutable fctxs
+        fctxmap[ctx] = fctx  # only for mutable fctxs
         copy = fctx.copysource()
         if copy:
-            path = copy # follow rename
-            if path in ctx: # but do not follow copy
+            path = copy  # follow rename
+            if path in ctx:  # but do not follow copy
                 pctx = ctx.p1()
                 break
 
-    if pctx is not None: # need an extra immutable fctx
+    if pctx is not None:  # need an extra immutable fctx
         if path in pctx:
             fctxs.append(pctx[path])
         else:
@@ -213,10 +219,12 @@
     # remove uniq and find a different way to identify fctxs.
     return uniq(fctxs), fctxmap
 
+
 class overlaystore(patch.filestore):
     """read-only, hybrid store based on a dict and ctx.
     memworkingcopy: {path: content}, overrides file contents.
     """
+
     def __init__(self, basectx, memworkingcopy):
         self.basectx = basectx
         self.memworkingcopy = memworkingcopy
@@ -234,6 +242,7 @@
         copy = fctx.copysource()
         return content, mode, copy
 
+
 def overlaycontext(memworkingcopy, ctx, parents=None, extra=None):
     """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx
     memworkingcopy overrides file contents.
@@ -249,9 +258,17 @@
     files = set(ctx.files()).union(memworkingcopy)
     store = overlaystore(ctx, memworkingcopy)
     return context.memctx(
-        repo=ctx.repo(), parents=parents, text=desc,
-        files=files, filectxfn=store, user=user, date=date,
-        branch=None, extra=extra)
+        repo=ctx.repo(),
+        parents=parents,
+        text=desc,
+        files=files,
+        filectxfn=store,
+        user=user,
+        date=date,
+        branch=None,
+        extra=extra,
+    )
+
 
 class filefixupstate(object):
     """state needed to apply fixups to a single file
@@ -294,10 +311,10 @@
             assert self._checkoutlinelog() == self.contents
 
         # following fields will be filled later
-        self.chunkstats = [0, 0] # [adopted, total : int]
-        self.targetlines = [] # [str]
-        self.fixups = [] # [(linelog rev, a1, a2, b1, b2)]
-        self.finalcontents = [] # [str]
+        self.chunkstats = [0, 0]  # [adopted, total : int]
+        self.targetlines = []  # [str]
+        self.fixups = []  # [(linelog rev, a1, a2, b1, b2)]
+        self.finalcontents = []  # [str]
         self.ctxaffected = set()
 
     def diffwith(self, targetfctx, fm=None):
@@ -319,7 +336,7 @@
         self.targetlines = blines
 
         self.linelog.annotate(self.linelog.maxrev)
-        annotated = self.linelog.annotateresult # [(linelog rev, linenum)]
+        annotated = self.linelog.annotateresult  # [(linelog rev, linenum)]
         assert len(annotated) == len(alines)
         # add a dummy end line to make insertion at the end easier
         if annotated:
@@ -329,7 +346,7 @@
         # analyse diff blocks
         for chunk in self._alldiffchunks(a, b, alines, blines):
             newfixups = self._analysediffchunk(chunk, annotated)
-            self.chunkstats[0] += bool(newfixups) # 1 or 0
+            self.chunkstats[0] += bool(newfixups)  # 1 or 0
             self.chunkstats[1] += 1
             self.fixups += newfixups
             if fm is not None:
@@ -346,11 +363,12 @@
             blines = self.targetlines[b1:b2]
             if self.ui.debugflag:
                 idx = (max(rev - 1, 0)) // 2
-                self.ui.write(_('%s: chunk %d:%d -> %d lines\n')
-                              % (node.short(self.fctxs[idx].node()),
-                                 a1, a2, len(blines)))
+                self.ui.write(
+                    _(b'%s: chunk %d:%d -> %d lines\n')
+                    % (node.short(self.fctxs[idx].node()), a1, a2, len(blines))
+                )
             self.linelog.replacelines(rev, a1, a2, b1, b2)
-        if self.opts.get('edit_lines', False):
+        if self.opts.get(b'edit_lines', False):
             self.finalcontents = self._checkoutlinelogwithedits()
         else:
             self.finalcontents = self._checkoutlinelog()
@@ -382,12 +400,13 @@
         a1, a2, b1, b2 = chunk
         # find involved indexes from annotate result
         involved = annotated[a1:a2]
-        if not involved and annotated: # a1 == a2 and a is not empty
+        if not involved and annotated:  # a1 == a2 and a is not empty
             # pure insertion, check nearby lines. ignore lines belong
             # to the public (first) changeset (i.e. annotated[i][0] == 1)
             nearbylinenums = {a2, max(0, a1 - 1)}
-            involved = [annotated[i]
-                        for i in nearbylinenums if annotated[i][0] != 1]
+            involved = [
+                annotated[i] for i in nearbylinenums if annotated[i][0] != 1
+            ]
         involvedrevs = list(set(r for r, l in involved))
         newfixups = []
         if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True):
@@ -401,9 +420,9 @@
             for i in pycompat.xrange(a1, a2):
                 rev, linenum = annotated[i]
                 if rev > 1:
-                    if b1 == b2: # deletion, simply remove that single line
+                    if b1 == b2:  # deletion, simply remove that single line
                         nb1 = nb2 = 0
-                    else: # 1:1 line mapping, change the corresponding rev
+                    else:  # 1:1 line mapping, change the corresponding rev
                         nb1 = b1 + i - a1
                         nb2 = nb1 + 1
                     fixuprev = rev + 1
@@ -415,7 +434,7 @@
         """like mdiff.allblocks, but only care about differences"""
         blocks = mdiff.allblocks(a, b, lines1=alines, lines2=blines)
         for chunk, btype in blocks:
-            if btype != '!':
+            if btype != b'!':
                 continue
             yield chunk
 
@@ -424,7 +443,7 @@
         this is similar to running a partial "annotate".
         """
         llog = linelog.linelog()
-        a, alines = '', []
+        a, alines = b'', []
         for i in pycompat.xrange(len(self.contents)):
             b, blines = self.contents[i], self.contentlines[i]
             llrev = i * 2 + 1
@@ -440,7 +459,7 @@
         for i in pycompat.xrange(len(self.contents)):
             rev = (i + 1) * 2
             self.linelog.annotate(rev)
-            content = ''.join(map(self._getline, self.linelog.annotateresult))
+            content = b''.join(map(self._getline, self.linelog.annotateresult))
             contents.append(content)
         return contents
 
@@ -448,49 +467,63 @@
         """() -> [str]. prompt all lines for edit"""
         alllines = self.linelog.getalllines()
         # header
-        editortext = (_('HG: editing %s\nHG: "y" means the line to the right '
-                        'exists in the changeset to the top\nHG:\n')
-                      % self.fctxs[-1].path())
+        editortext = (
+            _(
+                b'HG: editing %s\nHG: "y" means the line to the right '
+                b'exists in the changeset to the top\nHG:\n'
+            )
+            % self.fctxs[-1].path()
+        )
         # [(idx, fctx)]. hide the dummy emptyfilecontext
-        visiblefctxs = [(i, f)
-                        for i, f in enumerate(self.fctxs)
-                        if not isinstance(f, emptyfilecontext)]
+        visiblefctxs = [
+            (i, f)
+            for i, f in enumerate(self.fctxs)
+            if not isinstance(f, emptyfilecontext)
+        ]
         for i, (j, f) in enumerate(visiblefctxs):
-            editortext += (_('HG: %s/%s %s %s\n') %
-                           ('|' * i, '-' * (len(visiblefctxs) - i + 1),
-                            node.short(f.node()),
-                            f.description().split('\n',1)[0]))
-        editortext += _('HG: %s\n') % ('|' * len(visiblefctxs))
+            editortext += _(b'HG: %s/%s %s %s\n') % (
+                b'|' * i,
+                b'-' * (len(visiblefctxs) - i + 1),
+                node.short(f.node()),
+                f.description().split(b'\n', 1)[0],
+            )
+        editortext += _(b'HG: %s\n') % (b'|' * len(visiblefctxs))
         # figure out the lifetime of a line, this is relatively inefficient,
         # but probably fine
-        lineset = defaultdict(lambda: set()) # {(llrev, linenum): {llrev}}
+        lineset = defaultdict(lambda: set())  # {(llrev, linenum): {llrev}}
         for i, f in visiblefctxs:
             self.linelog.annotate((i + 1) * 2)
             for l in self.linelog.annotateresult:
                 lineset[l].add(i)
         # append lines
         for l in alllines:
-            editortext += ('    %s : %s' %
-                           (''.join([('y' if i in lineset[l] else ' ')
-                                     for i, _f in visiblefctxs]),
-                            self._getline(l)))
+            editortext += b'    %s : %s' % (
+                b''.join(
+                    [
+                        (b'y' if i in lineset[l] else b' ')
+                        for i, _f in visiblefctxs
+                    ]
+                ),
+                self._getline(l),
+            )
         # run editor
-        editedtext = self.ui.edit(editortext, '', action='absorb')
+        editedtext = self.ui.edit(editortext, b'', action=b'absorb')
         if not editedtext:
-            raise error.Abort(_('empty editor text'))
+            raise error.Abort(_(b'empty editor text'))
         # parse edited result
-        contents = ['' for i in self.fctxs]
+        contents = [b'' for i in self.fctxs]
         leftpadpos = 4
         colonpos = leftpadpos + len(visiblefctxs) + 1
         for l in mdiff.splitnewlines(editedtext):
-            if l.startswith('HG:'):
+            if l.startswith(b'HG:'):
                 continue
-            if l[colonpos - 1:colonpos + 2] != ' : ':
-                raise error.Abort(_('malformed line: %s') % l)
-            linecontent = l[colonpos + 2:]
+            if l[colonpos - 1 : colonpos + 2] != b' : ':
+                raise error.Abort(_(b'malformed line: %s') % l)
+            linecontent = l[colonpos + 2 :]
             for i, ch in enumerate(
-                    pycompat.bytestr(l[leftpadpos:colonpos - 1])):
-                if ch == 'y':
+                pycompat.bytestr(l[leftpadpos : colonpos - 1])
+            ):
+                if ch == b'y':
                     contents[visiblefctxs[i][0]] += linecontent
         # chunkstats is hard to calculate if anything changes, therefore
         # set them to just a simple value (1, 1).
@@ -501,9 +534,9 @@
     def _getline(self, lineinfo):
         """((rev, linenum)) -> str. convert rev+line number to line content"""
         rev, linenum = lineinfo
-        if rev & 1: # odd: original line taken from fctxs
+        if rev & 1:  # odd: original line taken from fctxs
             return self.contentlines[rev // 2][linenum]
-        else: # even: fixup line from targetfctx
+        else:  # even: fixup line from targetfctx
             return self.targetlines[linenum]
 
     def _iscontinuous(self, a1, a2, closedinterval=False):
@@ -539,8 +572,12 @@
             lastrev = pcurrentchunk[0][0]
             lasta2 = pcurrentchunk[0][2]
             lastb2 = pcurrentchunk[0][4]
-            if (a1 == lasta2 and b1 == lastb2 and rev == lastrev and
-                    self._iscontinuous(max(a1 - 1, 0), a1)):
+            if (
+                a1 == lasta2
+                and b1 == lastb2
+                and rev == lastrev
+                and self._iscontinuous(max(a1 - 1, 0), a1)
+            ):
                 # merge into currentchunk
                 pcurrentchunk[0][2] = a2
                 pcurrentchunk[0][4] = b2
@@ -551,9 +588,8 @@
         return result
 
     def _showchanges(self, fm, alines, blines, chunk, fixups):
-
         def trim(line):
-            if line.endswith('\n'):
+            if line.endswith(b'\n'):
                 line = line[:-1]
             return line
 
@@ -568,30 +604,49 @@
                 bidxs[i - b1] = (max(idx, 1) - 1) // 2
 
         fm.startitem()
-        fm.write('hunk', '        %s\n',
-                 '@@ -%d,%d +%d,%d @@'
-                 % (a1, a2 - a1, b1, b2 - b1), label='diff.hunk')
-        fm.data(path=self.path, linetype='hunk')
+        fm.write(
+            b'hunk',
+            b'        %s\n',
+            b'@@ -%d,%d +%d,%d @@' % (a1, a2 - a1, b1, b2 - b1),
+            label=b'diff.hunk',
+        )
+        fm.data(path=self.path, linetype=b'hunk')
 
         def writeline(idx, diffchar, line, linetype, linelabel):
             fm.startitem()
-            node = ''
+            node = b''
             if idx:
                 ctx = self.fctxs[idx]
                 fm.context(fctx=ctx)
                 node = ctx.hex()
                 self.ctxaffected.add(ctx.changectx())
-            fm.write('node', '%-7.7s ', node, label='absorb.node')
-            fm.write('diffchar ' + linetype, '%s%s\n', diffchar, line,
-                     label=linelabel)
+            fm.write(b'node', b'%-7.7s ', node, label=b'absorb.node')
+            fm.write(
+                b'diffchar ' + linetype,
+                b'%s%s\n',
+                diffchar,
+                line,
+                label=linelabel,
+            )
             fm.data(path=self.path, linetype=linetype)
 
         for i in pycompat.xrange(a1, a2):
-            writeline(aidxs[i - a1], '-', trim(alines[i]), 'deleted',
-                      'diff.deleted')
+            writeline(
+                aidxs[i - a1],
+                b'-',
+                trim(alines[i]),
+                b'deleted',
+                b'diff.deleted',
+            )
         for i in pycompat.xrange(b1, b2):
-            writeline(bidxs[i - b1], '+', trim(blines[i]), 'inserted',
-                      'diff.inserted')
+            writeline(
+                bidxs[i - b1],
+                b'+',
+                trim(blines[i]),
+                b'inserted',
+                b'diff.inserted',
+            )
+
 
 class fixupstate(object):
     """state needed to run absorb
@@ -619,13 +674,13 @@
         self.repo = stack[-1].repo().unfiltered()
 
         # following fields will be filled later
-        self.paths = [] # [str]
-        self.status = None # ctx.status output
-        self.fctxmap = {} # {path: {ctx: fctx}}
-        self.fixupmap = {} # {path: filefixupstate}
-        self.replacemap = {} # {oldnode: newnode or None}
-        self.finalnode = None # head after all fixups
-        self.ctxaffected = set() # ctx that will be absorbed into
+        self.paths = []  # [str]
+        self.status = None  # ctx.status output
+        self.fctxmap = {}  # {path: {ctx: fctx}}
+        self.fixupmap = {}  # {path: filefixupstate}
+        self.replacemap = {}  # {oldnode: newnode or None}
+        self.finalnode = None  # head after all fixups
+        self.ctxaffected = set()  # ctx that will be absorbed into
 
     def diffwith(self, targetctx, match=None, fm=None):
         """diff and prepare fixups. update self.fixupmap, self.paths"""
@@ -634,7 +689,7 @@
         self.paths = []
         # but if --edit-lines is used, the user may want to edit files
         # even if they are not modified
-        editopt = self.opts.get('edit_lines')
+        editopt = self.opts.get(b'edit_lines')
         if not self.status.modified and editopt and match:
             interestingpaths = match.files()
         else:
@@ -644,13 +699,15 @@
         # sorting is necessary to eliminate ambiguity for the "double move"
         # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A".
         for path in sorted(interestingpaths):
-            self.ui.debug('calculating fixups for %s\n' % path)
+            self.ui.debug(b'calculating fixups for %s\n' % path)
             targetfctx = targetctx[path]
             fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs)
             # ignore symbolic links or binary, or unchanged files
-            if any(f.islink() or stringutil.binary(f.data())
-                   for f in [targetfctx] + fctxs
-                   if not isinstance(f, emptyfilecontext)):
+            if any(
+                f.islink() or stringutil.binary(f.data())
+                for f in [targetfctx] + fctxs
+                if not isinstance(f, emptyfilecontext)
+            ):
                 continue
             if targetfctx.data() == fctxs[-1].data() and not editopt:
                 continue
@@ -659,9 +716,9 @@
             fstate = filefixupstate(fctxs, path, ui=self.ui, opts=self.opts)
             if fm is not None:
                 fm.startitem()
-                fm.plain('showing changes for ')
-                fm.write('path', '%s\n', path, label='absorb.path')
-                fm.data(linetype='path')
+                fm.plain(b'showing changes for ')
+                fm.write(b'path', b'%s\n', path, label=b'absorb.path')
+                fm.data(linetype=b'path')
             fstate.diffwith(targetfctx, fm)
             self.fixupmap[path] = fstate
             self.paths.append(path)
@@ -669,23 +726,25 @@
 
     def apply(self):
         """apply fixups to individual filefixupstates"""
-        for path, state in self.fixupmap.iteritems():
+        for path, state in pycompat.iteritems(self.fixupmap):
             if self.ui.debugflag:
-                self.ui.write(_('applying fixups to %s\n') % path)
+                self.ui.write(_(b'applying fixups to %s\n') % path)
             state.apply()
 
     @property
     def chunkstats(self):
         """-> {path: chunkstats}. collect chunkstats from filefixupstates"""
-        return dict((path, state.chunkstats)
-                    for path, state in self.fixupmap.iteritems())
+        return dict(
+            (path, state.chunkstats)
+            for path, state in pycompat.iteritems(self.fixupmap)
+        )
 
     def commit(self):
         """commit changes. update self.finalnode, self.replacemap"""
-        with self.repo.transaction('absorb') as tr:
+        with self.repo.transaction(b'absorb') as tr:
             self._commitstack()
             self._movebookmarks(tr)
-            if self.repo['.'].node() in self.replacemap:
+            if self.repo[b'.'].node() in self.replacemap:
                 self._moveworkingdirectoryparent()
             self._cleanupoldcommits()
         return self.finalnode
@@ -696,15 +755,17 @@
         chunkstats = self.chunkstats
         if ui.verbose:
             # chunkstats for each file
-            for path, stat in chunkstats.iteritems():
+            for path, stat in pycompat.iteritems(chunkstats):
                 if stat[0]:
-                    ui.write(_('%s: %d of %d chunk(s) applied\n')
-                             % (path, stat[0], stat[1]))
+                    ui.write(
+                        _(b'%s: %d of %d chunk(s) applied\n')
+                        % (path, stat[0], stat[1])
+                    )
         elif not ui.quiet:
             # a summary for all files
             stats = chunkstats.values()
             applied, total = (sum(s[i] for s in stats) for i in (0, 1))
-            ui.write(_('%d of %d chunk(s) applied\n') % (applied, total))
+            ui.write(_(b'%d of %d chunk(s) applied\n') % (applied, total))
 
     def _commitstack(self):
         """make new commits. update self.finalnode, self.replacemap.
@@ -724,7 +785,7 @@
             if self._willbecomenoop(memworkingcopy, ctx, nextp1):
                 # changeset is no longer necessary
                 self.replacemap[ctx.node()] = None
-                msg = _('became empty and was dropped')
+                msg = _(b'became empty and was dropped')
             else:
                 # changeset needs re-commit
                 nodestr = self._commitsingle(memworkingcopy, ctx, p1=nextp1)
@@ -732,19 +793,21 @@
                 nextp1 = lastcommitted
                 self.replacemap[ctx.node()] = lastcommitted.node()
                 if memworkingcopy:
-                    msg = _('%d file(s) changed, became %s') % (
-                        len(memworkingcopy), self._ctx2str(lastcommitted))
+                    msg = _(b'%d file(s) changed, became %s') % (
+                        len(memworkingcopy),
+                        self._ctx2str(lastcommitted),
+                    )
                 else:
-                    msg = _('became %s') % self._ctx2str(lastcommitted)
+                    msg = _(b'became %s') % self._ctx2str(lastcommitted)
             if self.ui.verbose and msg:
-                self.ui.write(_('%s: %s\n') % (self._ctx2str(ctx), msg))
+                self.ui.write(_(b'%s: %s\n') % (self._ctx2str(ctx), msg))
         self.finalnode = lastcommitted and lastcommitted.node()
 
     def _ctx2str(self, ctx):
         if self.ui.debugflag:
-            return '%d:%s' % (ctx.rev(), ctx.hex())
+            return b'%d:%s' % (ctx.rev(), ctx.hex())
         else:
-            return '%d:%s' % (ctx.rev(), node.short(ctx.node()))
+            return b'%d:%s' % (ctx.rev(), node.short(ctx.node()))
 
     def _getnewfilecontents(self, ctx):
         """(ctx) -> {path: str}
@@ -754,7 +817,7 @@
         """
         result = {}
         for path in self.paths:
-            ctx2fctx = self.fctxmap[path] # {ctx: fctx}
+            ctx2fctx = self.fctxmap[path]  # {ctx: fctx}
             if ctx not in ctx2fctx:
                 continue
             fctx = ctx2fctx[ctx]
@@ -766,26 +829,29 @@
 
     def _movebookmarks(self, tr):
         repo = self.repo
-        needupdate = [(name, self.replacemap[hsh])
-                      for name, hsh in repo._bookmarks.iteritems()
-                      if hsh in self.replacemap]
+        needupdate = [
+            (name, self.replacemap[hsh])
+            for name, hsh in pycompat.iteritems(repo._bookmarks)
+            if hsh in self.replacemap
+        ]
         changes = []
         for name, hsh in needupdate:
             if hsh:
                 changes.append((name, hsh))
                 if self.ui.verbose:
-                    self.ui.write(_('moving bookmark %s to %s\n')
-                                  % (name, node.hex(hsh)))
+                    self.ui.write(
+                        _(b'moving bookmark %s to %s\n') % (name, node.hex(hsh))
+                    )
             else:
                 changes.append((name, None))
                 if self.ui.verbose:
-                    self.ui.write(_('deleting bookmark %s\n') % name)
+                    self.ui.write(_(b'deleting bookmark %s\n') % name)
         repo._bookmarks.applychanges(repo, tr, changes)
 
     def _moveworkingdirectoryparent(self):
         if not self.finalnode:
             # Find the latest not-{obsoleted,stripped} parent.
-            revs = self.repo.revs('max(::. - %ln)', self.replacemap.keys())
+            revs = self.repo.revs(b'max(::. - %ln)', self.replacemap.keys())
             ctx = self.repo[revs.first()]
             self.finalnode = ctx.node()
         else:
@@ -798,8 +864,10 @@
         restore = noop
         if util.safehasattr(dirstate, '_fsmonitorstate'):
             bak = dirstate._fsmonitorstate.invalidate
+
             def restore():
                 dirstate._fsmonitorstate.invalidate = bak
+
             dirstate._fsmonitorstate.invalidate = noop
         try:
             with dirstate.parentchange():
@@ -822,7 +890,7 @@
         # ctx changes more files (not a subset of memworkingcopy)
         if not set(ctx.files()).issubset(set(memworkingcopy)):
             return False
-        for path, content in memworkingcopy.iteritems():
+        for path, content in pycompat.iteritems(memworkingcopy):
             if path not in pctx or path not in ctx:
                 return False
             fctx = ctx[path]
@@ -841,8 +909,8 @@
         """
         parents = p1 and (p1, node.nullid)
         extra = ctx.extra()
-        if self._useobsolete and self.ui.configbool('absorb', 'add-noise'):
-            extra['absorb_source'] = ctx.hex()
+        if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'):
+            extra[b'absorb_source'] = ctx.hex()
         mctx = overlaycontext(memworkingcopy, ctx, parents, extra=extra)
         return mctx.commit()
 
@@ -852,11 +920,15 @@
         return obsolete.isenabled(self.repo, obsolete.createmarkersopt)
 
     def _cleanupoldcommits(self):
-        replacements = {k: ([v] if v is not None else [])
-                        for k, v in self.replacemap.iteritems()}
+        replacements = {
+            k: ([v] if v is not None else [])
+            for k, v in pycompat.iteritems(self.replacemap)
+        }
         if replacements:
-            scmutil.cleanupnodes(self.repo, replacements, operation='absorb',
-                                 fixphase=True)
+            scmutil.cleanupnodes(
+                self.repo, replacements, operation=b'absorb', fixphase=True
+            )
+
 
 def _parsechunk(hunk):
     """(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))"""
@@ -871,9 +943,10 @@
     patchlines = mdiff.splitnewlines(buf.getvalue())
     # hunk.prettystr() will update hunk.removed
     a2 = a1 + hunk.removed
-    blines = [l[1:] for l in patchlines[1:] if not l.startswith('-')]
+    blines = [l[1:] for l in patchlines[1:] if not l.startswith(b'-')]
     return path, (a1, a2, blines)
 
+
 def overlaydiffcontext(ctx, chunks):
     """(ctx, [crecord.uihunk]) -> memctx
 
@@ -889,22 +962,23 @@
     # as we only care about applying changes to modified files, no mode
     # change, no binary diff, and no renames, it's probably okay to
     # re-invent the logic using much simpler code here.
-    memworkingcopy = {} # {path: content}
-    patchmap = defaultdict(lambda: []) # {path: [(a1, a2, [bline])]}
+    memworkingcopy = {}  # {path: content}
+    patchmap = defaultdict(lambda: [])  # {path: [(a1, a2, [bline])]}
     for path, info in map(_parsechunk, chunks):
         if not path or not info:
             continue
         patchmap[path].append(info)
-    for path, patches in patchmap.iteritems():
+    for path, patches in pycompat.iteritems(patchmap):
         if path not in ctx or not patches:
             continue
         patches.sort(reverse=True)
         lines = mdiff.splitnewlines(ctx[path].data())
         for a1, a2, blines in patches:
             lines[a1:a2] = blines
-        memworkingcopy[path] = ''.join(lines)
+        memworkingcopy[path] = b''.join(lines)
     return overlaycontext(memworkingcopy, ctx)
 
+
 def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None):
     """pick fixup chunks from targetctx, apply them to stack.
 
@@ -913,18 +987,22 @@
     return fixupstate.
     """
     if stack is None:
-        limit = ui.configint('absorb', 'max-stack-size')
-        headctx = repo['.']
+        limit = ui.configint(b'absorb', b'max-stack-size')
+        headctx = repo[b'.']
         if len(headctx.parents()) > 1:
-            raise error.Abort(_('cannot absorb into a merge'))
+            raise error.Abort(_(b'cannot absorb into a merge'))
         stack = getdraftstack(headctx, limit)
         if limit and len(stack) >= limit:
-            ui.warn(_('absorb: only the recent %d changesets will '
-                      'be analysed\n')
-                    % limit)
+            ui.warn(
+                _(
+                    b'absorb: only the recent %d changesets will '
+                    b'be analysed\n'
+                )
+                % limit
+            )
     if not stack:
-        raise error.Abort(_('no mutable changeset to change'))
-    if targetctx is None: # default to working copy
+        raise error.Abort(_(b'no mutable changeset to change'))
+    if targetctx is None:  # default to working copy
         targetctx = repo[None]
     if pats is None:
         pats = ()
@@ -932,57 +1010,92 @@
         opts = {}
     state = fixupstate(stack, ui=ui, opts=opts)
     matcher = scmutil.match(targetctx, pats, opts)
-    if opts.get('interactive'):
+    if opts.get(b'interactive'):
         diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher)
         origchunks = patch.parsepatch(diff)
         chunks = cmdutil.recordfilter(ui, origchunks, matcher)[0]
         targetctx = overlaydiffcontext(stack[-1], chunks)
     fm = None
-    if opts.get('print_changes') or not opts.get('apply_changes'):
-        fm = ui.formatter('absorb', opts)
+    if opts.get(b'print_changes') or not opts.get(b'apply_changes'):
+        fm = ui.formatter(b'absorb', opts)
     state.diffwith(targetctx, matcher, fm)
     if fm is not None:
         fm.startitem()
-        fm.write("count", "\n%d changesets affected\n", len(state.ctxaffected))
-        fm.data(linetype='summary')
+        fm.write(
+            b"count", b"\n%d changesets affected\n", len(state.ctxaffected)
+        )
+        fm.data(linetype=b'summary')
         for ctx in reversed(stack):
             if ctx not in state.ctxaffected:
                 continue
             fm.startitem()
             fm.context(ctx=ctx)
-            fm.data(linetype='changeset')
-            fm.write('node', '%-7.7s ', ctx.hex(), label='absorb.node')
+            fm.data(linetype=b'changeset')
+            fm.write(b'node', b'%-7.7s ', ctx.hex(), label=b'absorb.node')
             descfirstline = ctx.description().splitlines()[0]
-            fm.write('descfirstline', '%s\n', descfirstline,
-                     label='absorb.description')
+            fm.write(
+                b'descfirstline',
+                b'%s\n',
+                descfirstline,
+                label=b'absorb.description',
+            )
         fm.end()
-    if not opts.get('dry_run'):
-        if (not opts.get('apply_changes') and
-            state.ctxaffected and
-            ui.promptchoice("apply changes (yn)? $$ &Yes $$ &No", default=1)):
-            raise error.Abort(_('absorb cancelled\n'))
+    if not opts.get(b'dry_run'):
+        if (
+            not opts.get(b'apply_changes')
+            and state.ctxaffected
+            and ui.promptchoice(
+                b"apply changes (yn)? $$ &Yes $$ &No", default=1
+            )
+        ):
+            raise error.Abort(_(b'absorb cancelled\n'))
 
         state.apply()
         if state.commit():
             state.printchunkstats()
         elif not ui.quiet:
-            ui.write(_('nothing applied\n'))
+            ui.write(_(b'nothing applied\n'))
     return state
 
-@command('absorb',
-         [('a', 'apply-changes', None,
-           _('apply changes without prompting for confirmation')),
-          ('p', 'print-changes', None,
-           _('always print which changesets are modified by which changes')),
-          ('i', 'interactive', None,
-           _('interactively select which chunks to apply (EXPERIMENTAL)')),
-          ('e', 'edit-lines', None,
-           _('edit what lines belong to which changesets before commit '
-             '(EXPERIMENTAL)')),
-         ] + commands.dryrunopts + commands.templateopts + commands.walkopts,
-         _('hg absorb [OPTION] [FILE]...'),
-         helpcategory=command.CATEGORY_COMMITTING,
-         helpbasic=True)
+
+@command(
+    b'absorb',
+    [
+        (
+            b'a',
+            b'apply-changes',
+            None,
+            _(b'apply changes without prompting for confirmation'),
+        ),
+        (
+            b'p',
+            b'print-changes',
+            None,
+            _(b'always print which changesets are modified by which changes'),
+        ),
+        (
+            b'i',
+            b'interactive',
+            None,
+            _(b'interactively select which chunks to apply (EXPERIMENTAL)'),
+        ),
+        (
+            b'e',
+            b'edit-lines',
+            None,
+            _(
+                b'edit what lines belong to which changesets before commit '
+                b'(EXPERIMENTAL)'
+            ),
+        ),
+    ]
+    + commands.dryrunopts
+    + commands.templateopts
+    + commands.walkopts,
+    _(b'hg absorb [OPTION] [FILE]...'),
+    helpcategory=command.CATEGORY_COMMITTING,
+    helpbasic=True,
+)
 def absorbcmd(ui, repo, *pats, **opts):
     """incorporate corrections into the stack of draft changesets
 
@@ -1010,7 +1123,7 @@
     opts = pycompat.byteskwargs(opts)
 
     with repo.wlock(), repo.lock():
-        if not opts['dry_run']:
+        if not opts[b'dry_run']:
             cmdutil.checkunfinished(repo)
 
         state = absorb(ui, repo, pats=pats, opts=opts)
--- a/hgext/acl.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/acl.py	Mon Oct 21 11:09:48 2019 -0400
@@ -224,9 +224,7 @@
     registrar,
     util,
 )
-from mercurial.utils import (
-    procutil,
-)
+from mercurial.utils import procutil
 
 urlreq = util.urlreq
 
@@ -234,104 +232,110 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
 # deprecated config: acl.config
-configitem('acl', 'config',
-    default=None,
+configitem(
+    b'acl', b'config', default=None,
 )
-configitem('acl.groups', '.*',
-    default=None,
-    generic=True,
+configitem(
+    b'acl.groups', b'.*', default=None, generic=True,
 )
-configitem('acl.deny.branches', '.*',
-    default=None,
-    generic=True,
+configitem(
+    b'acl.deny.branches', b'.*', default=None, generic=True,
 )
-configitem('acl.allow.branches', '.*',
-    default=None,
-    generic=True,
+configitem(
+    b'acl.allow.branches', b'.*', default=None, generic=True,
 )
-configitem('acl.deny', '.*',
-    default=None,
-    generic=True,
+configitem(
+    b'acl.deny', b'.*', default=None, generic=True,
 )
-configitem('acl.allow', '.*',
-    default=None,
-    generic=True,
+configitem(
+    b'acl.allow', b'.*', default=None, generic=True,
 )
-configitem('acl', 'sources',
-    default=lambda: ['serve'],
+configitem(
+    b'acl', b'sources', default=lambda: [b'serve'],
 )
 
+
 def _getusers(ui, group):
 
     # First, try to use group definition from section [acl.groups]
-    hgrcusers = ui.configlist('acl.groups', group)
+    hgrcusers = ui.configlist(b'acl.groups', group)
     if hgrcusers:
         return hgrcusers
 
-    ui.debug('acl: "%s" not defined in [acl.groups]\n' % group)
+    ui.debug(b'acl: "%s" not defined in [acl.groups]\n' % group)
     # If no users found in group definition, get users from OS-level group
     try:
         return util.groupmembers(group)
     except KeyError:
-        raise error.Abort(_("group '%s' is undefined") % group)
+        raise error.Abort(_(b"group '%s' is undefined") % group)
+
 
 def _usermatch(ui, user, usersorgroups):
 
-    if usersorgroups == '*':
+    if usersorgroups == b'*':
         return True
 
-    for ug in usersorgroups.replace(',', ' ').split():
+    for ug in usersorgroups.replace(b',', b' ').split():
 
-        if ug.startswith('!'):
+        if ug.startswith(b'!'):
             # Test for excluded user or group. Format:
             # if ug is a user  name: !username
             # if ug is a group name: !@groupname
             ug = ug[1:]
-            if (not ug.startswith('@') and user != ug
-                or ug.startswith('@') and user not in _getusers(ui, ug[1:])):
+            if (
+                not ug.startswith(b'@')
+                and user != ug
+                or ug.startswith(b'@')
+                and user not in _getusers(ui, ug[1:])
+            ):
                 return True
 
         # Test for user or group. Format:
         # if ug is a user  name: username
         # if ug is a group name: @groupname
-        elif (user == ug
-              or ug.startswith('@') and user in _getusers(ui, ug[1:])):
+        elif (
+            user == ug or ug.startswith(b'@') and user in _getusers(ui, ug[1:])
+        ):
             return True
 
     return False
 
+
 def buildmatch(ui, repo, user, key):
     '''return tuple of (match function, list enabled).'''
     if not ui.has_section(key):
-        ui.debug('acl: %s not enabled\n' % key)
+        ui.debug(b'acl: %s not enabled\n' % key)
         return None
 
-    pats = [pat for pat, users in ui.configitems(key)
-            if _usermatch(ui, user, users)]
-    ui.debug('acl: %s enabled, %d entries for user %s\n' %
-             (key, len(pats), user))
+    pats = [
+        pat for pat, users in ui.configitems(key) if _usermatch(ui, user, users)
+    ]
+    ui.debug(
+        b'acl: %s enabled, %d entries for user %s\n' % (key, len(pats), user)
+    )
 
     # Branch-based ACL
     if not repo:
         if pats:
             # If there's an asterisk (meaning "any branch"), always return True;
             # Otherwise, test if b is in pats
-            if '*' in pats:
+            if b'*' in pats:
                 return util.always
             return lambda b: b in pats
         return util.never
 
     # Path-based ACL
     if pats:
-        return match.match(repo.root, '', pats)
+        return match.match(repo.root, b'', pats)
     return util.never
 
+
 def ensureenabled(ui):
     """make sure the extension is enabled when used as hook
 
@@ -340,89 +344,128 @@
     never loaded. This function ensure the extension is enabled when running
     hooks.
     """
-    if 'acl' in ui._knownconfig:
+    if b'acl' in ui._knownconfig:
         return
-    ui.setconfig('extensions', 'acl', '', source='internal')
-    extensions.loadall(ui, ['acl'])
+    ui.setconfig(b'extensions', b'acl', b'', source=b'internal')
+    extensions.loadall(ui, [b'acl'])
+
 
 def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
 
     ensureenabled(ui)
 
-    if hooktype not in ['pretxnchangegroup', 'pretxncommit', 'prepushkey']:
+    if hooktype not in [b'pretxnchangegroup', b'pretxncommit', b'prepushkey']:
         raise error.Abort(
-            _('config error - hook type "%s" cannot stop '
-              'incoming changesets, commits, nor bookmarks') % hooktype)
-    if (hooktype == 'pretxnchangegroup' and
-        source not in ui.configlist('acl', 'sources')):
-        ui.debug('acl: changes have source "%s" - skipping\n' % source)
+            _(
+                b'config error - hook type "%s" cannot stop '
+                b'incoming changesets, commits, nor bookmarks'
+            )
+            % hooktype
+        )
+    if hooktype == b'pretxnchangegroup' and source not in ui.configlist(
+        b'acl', b'sources'
+    ):
+        ui.debug(b'acl: changes have source "%s" - skipping\n' % source)
         return
 
     user = None
-    if source == 'serve' and r'url' in kwargs:
-        url = kwargs[r'url'].split(':')
-        if url[0] == 'remote' and url[1].startswith('http'):
+    if source == b'serve' and r'url' in kwargs:
+        url = kwargs[r'url'].split(b':')
+        if url[0] == b'remote' and url[1].startswith(b'http'):
             user = urlreq.unquote(url[3])
 
     if user is None:
         user = procutil.getuser()
 
-    ui.debug('acl: checking access for user "%s"\n' % user)
+    ui.debug(b'acl: checking access for user "%s"\n' % user)
 
-    if hooktype == 'prepushkey':
+    if hooktype == b'prepushkey':
         _pkhook(ui, repo, hooktype, node, source, user, **kwargs)
     else:
         _txnhook(ui, repo, hooktype, node, source, user, **kwargs)
 
+
 def _pkhook(ui, repo, hooktype, node, source, user, **kwargs):
-    if kwargs[r'namespace'] == 'bookmarks':
+    if kwargs[r'namespace'] == b'bookmarks':
         bookmark = kwargs[r'key']
         ctx = kwargs[r'new']
-        allowbookmarks = buildmatch(ui, None, user, 'acl.allow.bookmarks')
-        denybookmarks = buildmatch(ui, None, user, 'acl.deny.bookmarks')
+        allowbookmarks = buildmatch(ui, None, user, b'acl.allow.bookmarks')
+        denybookmarks = buildmatch(ui, None, user, b'acl.deny.bookmarks')
 
         if denybookmarks and denybookmarks(bookmark):
-            raise error.Abort(_('acl: user "%s" denied on bookmark "%s"'
-                               ' (changeset "%s")')
-                               % (user, bookmark, ctx))
+            raise error.Abort(
+                _(
+                    b'acl: user "%s" denied on bookmark "%s"'
+                    b' (changeset "%s")'
+                )
+                % (user, bookmark, ctx)
+            )
         if allowbookmarks and not allowbookmarks(bookmark):
-            raise error.Abort(_('acl: user "%s" not allowed on bookmark "%s"'
-                               ' (changeset "%s")')
-                               % (user, bookmark, ctx))
-        ui.debug('acl: bookmark access granted: "%s" on bookmark "%s"\n'
-                 % (ctx, bookmark))
+            raise error.Abort(
+                _(
+                    b'acl: user "%s" not allowed on bookmark "%s"'
+                    b' (changeset "%s")'
+                )
+                % (user, bookmark, ctx)
+            )
+        ui.debug(
+            b'acl: bookmark access granted: "%s" on bookmark "%s"\n'
+            % (ctx, bookmark)
+        )
+
 
 def _txnhook(ui, repo, hooktype, node, source, user, **kwargs):
     # deprecated config: acl.config
-    cfg = ui.config('acl', 'config')
+    cfg = ui.config(b'acl', b'config')
     if cfg:
-        ui.readconfig(cfg, sections=['acl.groups', 'acl.allow.branches',
-            'acl.deny.branches', 'acl.allow', 'acl.deny'])
+        ui.readconfig(
+            cfg,
+            sections=[
+                b'acl.groups',
+                b'acl.allow.branches',
+                b'acl.deny.branches',
+                b'acl.allow',
+                b'acl.deny',
+            ],
+        )
 
-    allowbranches = buildmatch(ui, None, user, 'acl.allow.branches')
-    denybranches = buildmatch(ui, None, user, 'acl.deny.branches')
-    allow = buildmatch(ui, repo, user, 'acl.allow')
-    deny = buildmatch(ui, repo, user, 'acl.deny')
+    allowbranches = buildmatch(ui, None, user, b'acl.allow.branches')
+    denybranches = buildmatch(ui, None, user, b'acl.deny.branches')
+    allow = buildmatch(ui, repo, user, b'acl.allow')
+    deny = buildmatch(ui, repo, user, b'acl.deny')
 
     for rev in pycompat.xrange(repo[node].rev(), len(repo)):
         ctx = repo[rev]
         branch = ctx.branch()
         if denybranches and denybranches(branch):
-            raise error.Abort(_('acl: user "%s" denied on branch "%s"'
-                               ' (changeset "%s")')
-                               % (user, branch, ctx))
+            raise error.Abort(
+                _(b'acl: user "%s" denied on branch "%s" (changeset "%s")')
+                % (user, branch, ctx)
+            )
         if allowbranches and not allowbranches(branch):
-            raise error.Abort(_('acl: user "%s" not allowed on branch "%s"'
-                               ' (changeset "%s")')
-                               % (user, branch, ctx))
-        ui.debug('acl: branch access granted: "%s" on branch "%s"\n'
-        % (ctx, branch))
+            raise error.Abort(
+                _(
+                    b'acl: user "%s" not allowed on branch "%s"'
+                    b' (changeset "%s")'
+                )
+                % (user, branch, ctx)
+            )
+        ui.debug(
+            b'acl: branch access granted: "%s" on branch "%s"\n' % (ctx, branch)
+        )
 
         for f in ctx.files():
             if deny and deny(f):
-                raise error.Abort(_('acl: user "%s" denied on "%s"'
-                ' (changeset "%s")') % (user, f, ctx))
+                raise error.Abort(
+                    _(b'acl: user "%s" denied on "%s" (changeset "%s")')
+                    % (user, f, ctx)
+                )
             if allow and not allow(f):
-                raise error.Abort(_('acl: user "%s" not allowed on "%s"'
-                ' (changeset "%s")') % (user, f, ctx))
-        ui.debug('acl: path access granted: "%s"\n' % ctx)
+                raise error.Abort(
+                    _(
+                        b'acl: user "%s" not allowed on "%s"'
+                        b' (changeset "%s")'
+                    )
+                    % (user, f, ctx)
+                )
+        ui.debug(b'acl: path access granted: "%s"\n' % ctx)
--- a/hgext/amend.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/amend.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,7 +16,6 @@
 from mercurial import (
     cmdutil,
     commands,
-    error,
     pycompat,
     registrar,
 )
@@ -25,23 +24,40 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 cmdtable = {}
 command = registrar.command(cmdtable)
 
-@command('amend',
-    [('A', 'addremove', None,
-      _('mark new/missing files as added/removed before committing')),
-     ('e', 'edit', None, _('invoke editor on commit messages')),
-     ('i', 'interactive', None, _('use interactive mode')),
-     ('n', 'note', '', _('store a note on the amend')),
-     ('D', 'currentdate', None,
-      _('record the current date as commit date')),
-    ] + cmdutil.walkopts + cmdutil.commitopts + cmdutil.commitopts2,
-    _('[OPTION]... [FILE]...'),
+
+@command(
+    b'amend',
+    [
+        (
+            b'A',
+            b'addremove',
+            None,
+            _(b'mark new/missing files as added/removed before committing'),
+        ),
+        (b'e', b'edit', None, _(b'invoke editor on commit messages')),
+        (b'i', b'interactive', None, _(b'use interactive mode')),
+        (
+            b'',
+            b'close-branch',
+            None,
+            _(b'mark a branch as closed, hiding it from the branch list'),
+        ),
+        (b's', b'secret', None, _(b'use the secret phase for committing')),
+        (b'n', b'note', b'', _(b'store a note on the amend')),
+    ]
+    + cmdutil.walkopts
+    + cmdutil.commitopts
+    + cmdutil.commitopts2
+    + cmdutil.commitopts3,
+    _(b'[OPTION]... [FILE]...'),
     helpcategory=command.CATEGORY_COMMITTING,
-    inferrepo=True)
+    inferrepo=True,
+)
 def amend(ui, repo, *pats, **opts):
     """amend the working copy parent with all or specified outstanding changes
 
@@ -51,10 +67,10 @@
     See :hg:`help commit` for more details.
     """
     opts = pycompat.byteskwargs(opts)
-    if len(opts['note']) > 255:
-        raise error.Abort(_("cannot store a note of more than 255 bytes"))
+    cmdutil.checknotesize(ui, opts)
+
     with repo.wlock(), repo.lock():
-        if not opts.get('logfile'):
-            opts['message'] = opts.get('message') or repo['.'].description()
-        opts['amend'] = True
+        if not opts.get(b'logfile'):
+            opts[b'message'] = opts.get(b'message') or repo[b'.'].description()
+        opts[b'amend'] = True
         return commands._docommit(ui, repo, *pats, **pycompat.strkwargs(opts))
--- a/hgext/automv.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/automv.py	Mon Oct 21 11:09:48 2019 -0400
@@ -35,44 +35,47 @@
     pycompat,
     registrar,
     scmutil,
-    similar
+    similar,
 )
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('automv', 'similarity',
-    default=95,
+configitem(
+    b'automv', b'similarity', default=95,
 )
 
+
 def extsetup(ui):
-    entry = extensions.wrapcommand(
-        commands.table, 'commit', mvcheck)
+    entry = extensions.wrapcommand(commands.table, b'commit', mvcheck)
     entry[1].append(
-        ('', 'no-automv', None,
-         _('disable automatic file move detection')))
+        (b'', b'no-automv', None, _(b'disable automatic file move detection'))
+    )
+
 
 def mvcheck(orig, ui, repo, *pats, **opts):
     """Hook to check for moves at commit time"""
     opts = pycompat.byteskwargs(opts)
     renames = None
-    disabled = opts.pop('no_automv', False)
+    disabled = opts.pop(b'no_automv', False)
     if not disabled:
-        threshold = ui.configint('automv', 'similarity')
+        threshold = ui.configint(b'automv', b'similarity')
         if not 0 <= threshold <= 100:
-            raise error.Abort(_('automv.similarity must be between 0 and 100'))
+            raise error.Abort(_(b'automv.similarity must be between 0 and 100'))
         if threshold > 0:
             match = scmutil.match(repo[None], pats, opts)
             added, removed = _interestingfiles(repo, match)
             uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
-            renames = _findrenames(repo, uipathfn, added, removed,
-                                   threshold / 100.0)
+            renames = _findrenames(
+                repo, uipathfn, added, removed, threshold / 100.0
+            )
 
     with repo.wlock():
         if renames is not None:
             scmutil._markchanges(repo, (), (), renames)
         return orig(ui, repo, *pats, **pycompat.strkwargs(opts))
 
+
 def _interestingfiles(repo, matcher):
     """Find what files were added or removed in this commit.
 
@@ -84,12 +87,13 @@
     added = stat.added
     removed = stat.removed
 
-    copy = copies.pathcopies(repo['.'], repo[None], matcher)
+    copy = copies.pathcopies(repo[b'.'], repo[None], matcher)
     # remove the copy files for which we already have copy info
     added = [f for f in added if f not in copy]
 
     return added, removed
 
+
 def _findrenames(repo, uipathfn, added, removed, similarity):
     """Find what files in added are really moved files.
 
@@ -100,12 +104,14 @@
     renames = {}
     if similarity > 0:
         for src, dst, score in similar.findrenames(
-                repo, added, removed, similarity):
+            repo, added, removed, similarity
+        ):
             if repo.ui.verbose:
                 repo.ui.status(
-                    _('detected move of %s as %s (%d%% similar)\n') % (
-                        uipathfn(src), uipathfn(dst), score * 100))
+                    _(b'detected move of %s as %s (%d%% similar)\n')
+                    % (uipathfn(src), uipathfn(dst), score * 100)
+                )
             renames[dst] = src
     if renames:
-        repo.ui.status(_('detected move of %d files\n') % len(renames))
+        repo.ui.status(_(b'detected move of %d files\n') % len(renames))
     return renames
--- a/hgext/beautifygraph.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/beautifygraph.py	Mon Oct 21 11:09:48 2019 -0400
@@ -26,69 +26,82 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
 def prettyedge(before, edge, after):
-    if edge == '~':
-        return '\xE2\x95\xA7' # U+2567 ╧
-    if edge == '/':
-        return '\xE2\x95\xB1' # U+2571 ╱
-    if edge == '-':
-        return '\xE2\x94\x80' # U+2500 ─
-    if edge == '|':
-        return '\xE2\x94\x82' # U+2502 │
-    if edge == ':':
-        return '\xE2\x94\x86' # U+2506 ┆
-    if edge == '\\':
-        return '\xE2\x95\xB2' # U+2572 ╲
-    if edge == '+':
-        if before == ' ' and not after  == ' ':
-            return '\xE2\x94\x9C' # U+251C ├
-        if after  == ' ' and not before == ' ':
-            return '\xE2\x94\xA4' # U+2524 ┤
-        return '\xE2\x94\xBC' # U+253C ┼
+    if edge == b'~':
+        return b'\xE2\x95\xA7'  # U+2567 ╧
+    if edge == b'/':
+        return b'\xE2\x95\xB1'  # U+2571 ╱
+    if edge == b'-':
+        return b'\xE2\x94\x80'  # U+2500 ─
+    if edge == b'|':
+        return b'\xE2\x94\x82'  # U+2502 │
+    if edge == b':':
+        return b'\xE2\x94\x86'  # U+2506 ┆
+    if edge == b'\\':
+        return b'\xE2\x95\xB2'  # U+2572 ╲
+    if edge == b'+':
+        if before == b' ' and not after == b' ':
+            return b'\xE2\x94\x9C'  # U+251C ├
+        if after == b' ' and not before == b' ':
+            return b'\xE2\x94\xA4'  # U+2524 ┤
+        return b'\xE2\x94\xBC'  # U+253C ┼
     return edge
 
+
 def convertedges(line):
-    line = ' %s ' % line
+    line = b' %s ' % line
     pretty = []
     for idx in pycompat.xrange(len(line) - 2):
-        pretty.append(prettyedge(line[idx:idx + 1],
-                                 line[idx + 1:idx + 2],
-                                 line[idx + 2:idx + 3]))
-    return ''.join(pretty)
+        pretty.append(
+            prettyedge(
+                line[idx : idx + 1],
+                line[idx + 1 : idx + 2],
+                line[idx + 2 : idx + 3],
+            )
+        )
+    return b''.join(pretty)
+
 
 def getprettygraphnode(orig, *args, **kwargs):
     node = orig(*args, **kwargs)
-    if node == 'o':
-        return '\xE2\x97\x8B' # U+25CB ○
-    if node == '@':
-        return '\xE2\x97\x8D' # U+25CD ◍
-    if node == '*':
-        return '\xE2\x88\x97' # U+2217 ∗
-    if node == 'x':
-        return '\xE2\x97\x8C' # U+25CC ◌
-    if node == '_':
-        return '\xE2\x95\xA4' # U+2564 ╤
+    if node == b'o':
+        return b'\xE2\x97\x8B'  # U+25CB ○
+    if node == b'@':
+        return b'\xE2\x97\x8D'  # U+25CD ◍
+    if node == b'*':
+        return b'\xE2\x88\x97'  # U+2217 ∗
+    if node == b'x':
+        return b'\xE2\x97\x8C'  # U+25CC ◌
+    if node == b'_':
+        return b'\xE2\x95\xA4'  # U+2564 ╤
     return node
 
+
 def outputprettygraph(orig, ui, graph, *args, **kwargs):
     (edges, text) = zip(*graph)
     graph = zip([convertedges(e) for e in edges], text)
     return orig(ui, graph, *args, **kwargs)
 
+
 def extsetup(ui):
-    if ui.plain('graph'):
+    if ui.plain(b'graph'):
         return
 
-    if encoding.encoding != 'UTF-8':
-        ui.warn(_('beautifygraph: unsupported encoding, UTF-8 required\n'))
+    if encoding.encoding != b'UTF-8':
+        ui.warn(_(b'beautifygraph: unsupported encoding, UTF-8 required\n'))
         return
 
     if r'A' in encoding._wide:
-        ui.warn(_('beautifygraph: unsupported terminal settings, '
-                  'monospace narrow text required\n'))
+        ui.warn(
+            _(
+                b'beautifygraph: unsupported terminal settings, '
+                b'monospace narrow text required\n'
+            )
+        )
         return
 
-    extensions.wrapfunction(graphmod, 'outputgraph', outputprettygraph)
-    extensions.wrapfunction(templatekw, 'getgraphnode', getprettygraphnode)
+    extensions.wrapfunction(graphmod, b'outputgraph', outputprettygraph)
+    extensions.wrapfunction(templatekw, b'getgraphnode', getprettygraphnode)
--- a/hgext/blackbox.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/blackbox.py	Mon Oct 21 11:09:48 2019 -0400
@@ -63,7 +63,7 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 cmdtable = {}
 command = registrar.command(cmdtable)
@@ -71,43 +71,46 @@
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('blackbox', 'dirty',
-    default=False,
+configitem(
+    b'blackbox', b'dirty', default=False,
 )
-configitem('blackbox', 'maxsize',
-    default='1 MB',
+configitem(
+    b'blackbox', b'maxsize', default=b'1 MB',
 )
-configitem('blackbox', 'logsource',
-    default=False,
+configitem(
+    b'blackbox', b'logsource', default=False,
 )
-configitem('blackbox', 'maxfiles',
-    default=7,
+configitem(
+    b'blackbox', b'maxfiles', default=7,
 )
-configitem('blackbox', 'track',
-    default=lambda: ['*'],
+configitem(
+    b'blackbox', b'track', default=lambda: [b'*'],
 )
-configitem('blackbox', 'ignore',
-    default=lambda: ['chgserver', 'cmdserver', 'extension'],
+configitem(
+    b'blackbox',
+    b'ignore',
+    default=lambda: [b'chgserver', b'cmdserver', b'extension'],
 )
-configitem('blackbox', 'date-format',
-    default='%Y/%m/%d %H:%M:%S',
+configitem(
+    b'blackbox', b'date-format', default=b'%Y/%m/%d %H:%M:%S',
 )
 
 _lastlogger = loggingutil.proxylogger()
 
+
 class blackboxlogger(object):
     def __init__(self, ui, repo):
         self._repo = repo
-        self._trackedevents = set(ui.configlist('blackbox', 'track'))
-        self._ignoredevents = set(ui.configlist('blackbox', 'ignore'))
-        self._maxfiles = ui.configint('blackbox', 'maxfiles')
-        self._maxsize = ui.configbytes('blackbox', 'maxsize')
+        self._trackedevents = set(ui.configlist(b'blackbox', b'track'))
+        self._ignoredevents = set(ui.configlist(b'blackbox', b'ignore'))
+        self._maxfiles = ui.configint(b'blackbox', b'maxfiles')
+        self._maxsize = ui.configbytes(b'blackbox', b'maxsize')
         self._inlog = False
 
     def tracked(self, event):
-        return ((b'*' in self._trackedevents
-                 and event not in self._ignoredevents)
-                or event in self._trackedevents)
+        return (
+            b'*' in self._trackedevents and event not in self._ignoredevents
+        ) or event in self._trackedevents
 
     def log(self, ui, event, msg, opts):
         # self._log() -> ctx.dirty() may create new subrepo instance, which
@@ -122,39 +125,48 @@
             self._inlog = False
 
     def _log(self, ui, event, msg, opts):
-        default = ui.configdate('devel', 'default-date')
-        date = dateutil.datestr(default, ui.config('blackbox', 'date-format'))
+        default = ui.configdate(b'devel', b'default-date')
+        date = dateutil.datestr(default, ui.config(b'blackbox', b'date-format'))
         user = procutil.getuser()
-        pid = '%d' % procutil.getpid()
-        changed = ''
+        pid = b'%d' % procutil.getpid()
+        changed = b''
         ctx = self._repo[None]
         parents = ctx.parents()
-        rev = ('+'.join([hex(p.node()) for p in parents]))
-        if (ui.configbool('blackbox', 'dirty') and
-            ctx.dirty(missing=True, merge=False, branch=False)):
-            changed = '+'
-        if ui.configbool('blackbox', 'logsource'):
-            src = ' [%s]' % event
+        rev = b'+'.join([hex(p.node()) for p in parents])
+        if ui.configbool(b'blackbox', b'dirty') and ctx.dirty(
+            missing=True, merge=False, branch=False
+        ):
+            changed = b'+'
+        if ui.configbool(b'blackbox', b'logsource'):
+            src = b' [%s]' % event
         else:
-            src = ''
+            src = b''
         try:
-            fmt = '%s %s @%s%s (%s)%s> %s'
+            fmt = b'%s %s @%s%s (%s)%s> %s'
             args = (date, user, rev, changed, pid, src, msg)
             with loggingutil.openlogfile(
-                    ui, self._repo.vfs, name='blackbox.log',
-                    maxfiles=self._maxfiles, maxsize=self._maxsize) as fp:
+                ui,
+                self._repo.vfs,
+                name=b'blackbox.log',
+                maxfiles=self._maxfiles,
+                maxsize=self._maxsize,
+            ) as fp:
                 fp.write(fmt % args)
         except (IOError, OSError) as err:
             # deactivate this to avoid failed logging again
             self._trackedevents.clear()
-            ui.debug('warning: cannot write to blackbox.log: %s\n' %
-                     encoding.strtolocal(err.strerror))
+            ui.debug(
+                b'warning: cannot write to blackbox.log: %s\n'
+                % encoding.strtolocal(err.strerror)
+            )
             return
         _lastlogger.logger = self
 
+
 def uipopulate(ui):
     ui.setlogger(b'blackbox', _lastlogger)
 
+
 def reposetup(ui, repo):
     # During 'hg pull' a httppeer repo is created to represent the remote repo.
     # It doesn't have a .hg directory to put a blackbox in, so we don't do
@@ -172,24 +184,26 @@
     if _lastlogger.logger is None:
         _lastlogger.logger = logger
 
-    repo._wlockfreeprefix.add('blackbox.log')
+    repo._wlockfreeprefix.add(b'blackbox.log')
+
 
-@command('blackbox',
-    [('l', 'limit', 10, _('the number of events to show')),
-    ],
-    _('hg blackbox [OPTION]...'),
+@command(
+    b'blackbox',
+    [(b'l', b'limit', 10, _(b'the number of events to show')),],
+    _(b'hg blackbox [OPTION]...'),
     helpcategory=command.CATEGORY_MAINTENANCE,
-    helpbasic=True)
+    helpbasic=True,
+)
 def blackbox(ui, repo, *revs, **opts):
     '''view the recent repository events
     '''
 
-    if not repo.vfs.exists('blackbox.log'):
+    if not repo.vfs.exists(b'blackbox.log'):
         return
 
     limit = opts.get(r'limit')
-    fp = repo.vfs('blackbox.log', 'r')
-    lines = fp.read().split('\n')
+    fp = repo.vfs(b'blackbox.log', b'r')
+    lines = fp.read().split(b'\n')
 
     count = 0
     output = []
@@ -202,4 +216,4 @@
             count += 1
         output.append(line)
 
-    ui.status('\n'.join(reversed(output)))
+    ui.status(b'\n'.join(reversed(output)))
--- a/hgext/bookflow.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/bookflow.py	Mon Oct 21 11:09:48 2019 -0400
@@ -24,32 +24,38 @@
     registrar,
 )
 
-MY_NAME = 'bookflow'
+MY_NAME = b'bookflow'
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem(MY_NAME, 'protect', ['@'])
-configitem(MY_NAME, 'require-bookmark', True)
-configitem(MY_NAME, 'enable-branches', False)
+configitem(MY_NAME, b'protect', [b'@'])
+configitem(MY_NAME, b'require-bookmark', True)
+configitem(MY_NAME, b'enable-branches', False)
 
 cmdtable = {}
 command = registrar.command(cmdtable)
 
+
 def commit_hook(ui, repo, **kwargs):
     active = repo._bookmarks.active
     if active:
-        if active in ui.configlist(MY_NAME, 'protect'):
+        if active in ui.configlist(MY_NAME, b'protect'):
             raise error.Abort(
-                _('cannot commit, bookmark %s is protected') % active)
+                _(b'cannot commit, bookmark %s is protected') % active
+            )
         if not cwd_at_bookmark(repo, active):
             raise error.Abort(
-       _('cannot commit, working directory out of sync with active bookmark'),
-                hint=_("run 'hg up %s'") % active)
-    elif ui.configbool(MY_NAME, 'require-bookmark', True):
-        raise error.Abort(_('cannot commit without an active bookmark'))
+                _(
+                    b'cannot commit, working directory out of sync with active bookmark'
+                ),
+                hint=_(b"run 'hg up %s'") % active,
+            )
+    elif ui.configbool(MY_NAME, b'require-bookmark', True):
+        raise error.Abort(_(b'cannot commit without an active bookmark'))
     return 0
 
+
 def bookmarks_update(orig, repo, parents, node):
     if len(parents) == 2:
         # called during commit
@@ -58,47 +64,63 @@
         # called during update
         return False
 
+
 def bookmarks_addbookmarks(
-        orig, repo, tr, names, rev=None, force=False, inactive=False):
+    orig, repo, tr, names, rev=None, force=False, inactive=False
+):
     if not rev:
         marks = repo._bookmarks
         for name in names:
             if name in marks:
-                raise error.Abort(_(
-                    "bookmark %s already exists, to move use the --rev option"
-                    ) % name)
+                raise error.Abort(
+                    _(
+                        b"bookmark %s already exists, to move use the --rev option"
+                    )
+                    % name
+                )
     return orig(repo, tr, names, rev, force, inactive)
 
+
 def commands_commit(orig, ui, repo, *args, **opts):
     commit_hook(ui, repo)
     return orig(ui, repo, *args, **opts)
 
+
 def commands_pull(orig, ui, repo, *args, **opts):
     rc = orig(ui, repo, *args, **opts)
     active = repo._bookmarks.active
     if active and not cwd_at_bookmark(repo, active):
-        ui.warn(_(
-            "working directory out of sync with active bookmark, run "
-            "'hg up %s'"
-        ) % active)
+        ui.warn(
+            _(
+                b"working directory out of sync with active bookmark, run "
+                b"'hg up %s'"
+            )
+            % active
+        )
     return rc
 
+
 def commands_branch(orig, ui, repo, label=None, **opts):
     if label and not opts.get(r'clean') and not opts.get(r'rev'):
         raise error.Abort(
-         _("creating named branches is disabled and you should use bookmarks"),
-            hint="see 'hg help bookflow'")
+            _(
+                b"creating named branches is disabled and you should use bookmarks"
+            ),
+            hint=b"see 'hg help bookflow'",
+        )
     return orig(ui, repo, label, **opts)
 
+
 def cwd_at_bookmark(repo, mark):
     mark_id = repo._bookmarks[mark]
-    cur_id = repo.lookup('.')
+    cur_id = repo.lookup(b'.')
     return cur_id == mark_id
 
+
 def uisetup(ui):
-    extensions.wrapfunction(bookmarks, 'update', bookmarks_update)
-    extensions.wrapfunction(bookmarks, 'addbookmarks', bookmarks_addbookmarks)
-    extensions.wrapcommand(commands.table, 'commit', commands_commit)
-    extensions.wrapcommand(commands.table, 'pull', commands_pull)
-    if not ui.configbool(MY_NAME, 'enable-branches'):
-        extensions.wrapcommand(commands.table, 'branch', commands_branch)
+    extensions.wrapfunction(bookmarks, b'update', bookmarks_update)
+    extensions.wrapfunction(bookmarks, b'addbookmarks', bookmarks_addbookmarks)
+    extensions.wrapcommand(commands.table, b'commit', commands_commit)
+    extensions.wrapcommand(commands.table, b'pull', commands_pull)
+    if not ui.configbool(MY_NAME, b'enable-branches'):
+        extensions.wrapcommand(commands.table, b'branch', commands_branch)
--- a/hgext/bugzilla.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/bugzilla.py	Mon Oct 21 11:09:48 2019 -0400
@@ -319,89 +319,98 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('bugzilla', 'apikey',
-    default='',
+configitem(
+    b'bugzilla', b'apikey', default=b'',
 )
-configitem('bugzilla', 'bzdir',
-    default='/var/www/html/bugzilla',
+configitem(
+    b'bugzilla', b'bzdir', default=b'/var/www/html/bugzilla',
+)
+configitem(
+    b'bugzilla', b'bzemail', default=None,
 )
-configitem('bugzilla', 'bzemail',
-    default=None,
+configitem(
+    b'bugzilla', b'bzurl', default=b'http://localhost/bugzilla/',
 )
-configitem('bugzilla', 'bzurl',
-    default='http://localhost/bugzilla/',
+configitem(
+    b'bugzilla', b'bzuser', default=None,
 )
-configitem('bugzilla', 'bzuser',
-    default=None,
+configitem(
+    b'bugzilla', b'db', default=b'bugs',
 )
-configitem('bugzilla', 'db',
-    default='bugs',
-)
-configitem('bugzilla', 'fixregexp',
-    default=(br'fix(?:es)?\s*(?:bugs?\s*)?,?\s*'
-             br'(?:nos?\.?|num(?:ber)?s?)?\s*'
-             br'(?P<ids>(?:#?\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
-             br'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
+configitem(
+    b'bugzilla',
+    b'fixregexp',
+    default=(
+        br'fix(?:es)?\s*(?:bugs?\s*)?,?\s*'
+        br'(?:nos?\.?|num(?:ber)?s?)?\s*'
+        br'(?P<ids>(?:#?\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
+        br'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?'
+    ),
 )
-configitem('bugzilla', 'fixresolution',
-    default='FIXED',
+configitem(
+    b'bugzilla', b'fixresolution', default=b'FIXED',
 )
-configitem('bugzilla', 'fixstatus',
-    default='RESOLVED',
+configitem(
+    b'bugzilla', b'fixstatus', default=b'RESOLVED',
 )
-configitem('bugzilla', 'host',
-    default='localhost',
+configitem(
+    b'bugzilla', b'host', default=b'localhost',
 )
-configitem('bugzilla', 'notify',
-    default=configitem.dynamicdefault,
+configitem(
+    b'bugzilla', b'notify', default=configitem.dynamicdefault,
 )
-configitem('bugzilla', 'password',
-    default=None,
+configitem(
+    b'bugzilla', b'password', default=None,
 )
-configitem('bugzilla', 'regexp',
-    default=(br'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
-             br'(?P<ids>(?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
-             br'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
+configitem(
+    b'bugzilla',
+    b'regexp',
+    default=(
+        br'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
+        br'(?P<ids>(?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
+        br'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?'
+    ),
 )
-configitem('bugzilla', 'strip',
-    default=0,
+configitem(
+    b'bugzilla', b'strip', default=0,
 )
-configitem('bugzilla', 'style',
-    default=None,
+configitem(
+    b'bugzilla', b'style', default=None,
 )
-configitem('bugzilla', 'template',
-    default=None,
+configitem(
+    b'bugzilla', b'template', default=None,
 )
-configitem('bugzilla', 'timeout',
-    default=5,
+configitem(
+    b'bugzilla', b'timeout', default=5,
 )
-configitem('bugzilla', 'user',
-    default='bugs',
+configitem(
+    b'bugzilla', b'user', default=b'bugs',
 )
-configitem('bugzilla', 'usermap',
-    default=None,
+configitem(
+    b'bugzilla', b'usermap', default=None,
 )
-configitem('bugzilla', 'version',
-    default=None,
+configitem(
+    b'bugzilla', b'version', default=None,
 )
 
+
 class bzaccess(object):
     '''Base class for access to Bugzilla.'''
 
     def __init__(self, ui):
         self.ui = ui
-        usermap = self.ui.config('bugzilla', 'usermap')
+        usermap = self.ui.config(b'bugzilla', b'usermap')
         if usermap:
-            self.ui.readconfig(usermap, sections=['usermap'])
+            self.ui.readconfig(usermap, sections=[b'usermap'])
 
     def map_committer(self, user):
         '''map name of committer to Bugzilla user name.'''
-        for committer, bzuser in self.ui.configitems('usermap'):
+        for committer, bzuser in self.ui.configitems(b'usermap'):
             if committer.lower() == user.lower():
                 return bzuser
         return user
@@ -434,6 +443,7 @@
         emails automatically.
         '''
 
+
 # Bugzilla via direct access to MySQL database.
 class bzmysql(bzaccess):
     '''Support for direct MySQL access to Bugzilla.
@@ -447,96 +457,107 @@
     @staticmethod
     def sql_buglist(ids):
         '''return SQL-friendly list of bug ids'''
-        return '(' + ','.join(map(str, ids)) + ')'
+        return b'(' + b','.join(map(str, ids)) + b')'
 
     _MySQLdb = None
 
     def __init__(self, ui):
         try:
             import MySQLdb as mysql
+
             bzmysql._MySQLdb = mysql
         except ImportError as err:
-            raise error.Abort(_('python mysql support not available: %s') % err)
+            raise error.Abort(
+                _(b'python mysql support not available: %s') % err
+            )
 
         bzaccess.__init__(self, ui)
 
-        host = self.ui.config('bugzilla', 'host')
-        user = self.ui.config('bugzilla', 'user')
-        passwd = self.ui.config('bugzilla', 'password')
-        db = self.ui.config('bugzilla', 'db')
-        timeout = int(self.ui.config('bugzilla', 'timeout'))
-        self.ui.note(_('connecting to %s:%s as %s, password %s\n') %
-                     (host, db, user, '*' * len(passwd)))
-        self.conn = bzmysql._MySQLdb.connect(host=host,
-                                                   user=user, passwd=passwd,
-                                                   db=db,
-                                                   connect_timeout=timeout)
+        host = self.ui.config(b'bugzilla', b'host')
+        user = self.ui.config(b'bugzilla', b'user')
+        passwd = self.ui.config(b'bugzilla', b'password')
+        db = self.ui.config(b'bugzilla', b'db')
+        timeout = int(self.ui.config(b'bugzilla', b'timeout'))
+        self.ui.note(
+            _(b'connecting to %s:%s as %s, password %s\n')
+            % (host, db, user, b'*' * len(passwd))
+        )
+        self.conn = bzmysql._MySQLdb.connect(
+            host=host, user=user, passwd=passwd, db=db, connect_timeout=timeout
+        )
         self.cursor = self.conn.cursor()
         self.longdesc_id = self.get_longdesc_id()
         self.user_ids = {}
-        self.default_notify = "cd %(bzdir)s && ./processmail %(id)s %(user)s"
+        self.default_notify = b"cd %(bzdir)s && ./processmail %(id)s %(user)s"
 
     def run(self, *args, **kwargs):
         '''run a query.'''
-        self.ui.note(_('query: %s %s\n') % (args, kwargs))
+        self.ui.note(_(b'query: %s %s\n') % (args, kwargs))
         try:
             self.cursor.execute(*args, **kwargs)
         except bzmysql._MySQLdb.MySQLError:
-            self.ui.note(_('failed query: %s %s\n') % (args, kwargs))
+            self.ui.note(_(b'failed query: %s %s\n') % (args, kwargs))
             raise
 
     def get_longdesc_id(self):
         '''get identity of longdesc field'''
-        self.run('select fieldid from fielddefs where name = "longdesc"')
+        self.run(b'select fieldid from fielddefs where name = "longdesc"')
         ids = self.cursor.fetchall()
         if len(ids) != 1:
-            raise error.Abort(_('unknown database schema'))
+            raise error.Abort(_(b'unknown database schema'))
         return ids[0][0]
 
     def filter_real_bug_ids(self, bugs):
         '''filter not-existing bugs from set.'''
-        self.run('select bug_id from bugs where bug_id in %s' %
-                 bzmysql.sql_buglist(bugs.keys()))
+        self.run(
+            b'select bug_id from bugs where bug_id in %s'
+            % bzmysql.sql_buglist(bugs.keys())
+        )
         existing = [id for (id,) in self.cursor.fetchall()]
         for id in bugs.keys():
             if id not in existing:
-                self.ui.status(_('bug %d does not exist\n') % id)
+                self.ui.status(_(b'bug %d does not exist\n') % id)
                 del bugs[id]
 
     def filter_cset_known_bug_ids(self, node, bugs):
         '''filter bug ids that already refer to this changeset from set.'''
-        self.run('''select bug_id from longdescs where
-                    bug_id in %s and thetext like "%%%s%%"''' %
-                 (bzmysql.sql_buglist(bugs.keys()), short(node)))
+        self.run(
+            '''select bug_id from longdescs where
+                    bug_id in %s and thetext like "%%%s%%"'''
+            % (bzmysql.sql_buglist(bugs.keys()), short(node))
+        )
         for (id,) in self.cursor.fetchall():
-            self.ui.status(_('bug %d already knows about changeset %s\n') %
-                           (id, short(node)))
+            self.ui.status(
+                _(b'bug %d already knows about changeset %s\n')
+                % (id, short(node))
+            )
             del bugs[id]
 
     def notify(self, bugs, committer):
         '''tell bugzilla to send mail.'''
-        self.ui.status(_('telling bugzilla to send mail:\n'))
+        self.ui.status(_(b'telling bugzilla to send mail:\n'))
         (user, userid) = self.get_bugzilla_user(committer)
         for id in bugs.keys():
-            self.ui.status(_('  bug %s\n') % id)
-            cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
-            bzdir = self.ui.config('bugzilla', 'bzdir')
+            self.ui.status(_(b'  bug %s\n') % id)
+            cmdfmt = self.ui.config(b'bugzilla', b'notify', self.default_notify)
+            bzdir = self.ui.config(b'bugzilla', b'bzdir')
             try:
                 # Backwards-compatible with old notify string, which
                 # took one string. This will throw with a new format
                 # string.
                 cmd = cmdfmt % id
             except TypeError:
-                cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
-            self.ui.note(_('running notify command %s\n') % cmd)
-            fp = procutil.popen('(%s) 2>&1' % cmd, 'rb')
+                cmd = cmdfmt % {b'bzdir': bzdir, b'id': id, b'user': user}
+            self.ui.note(_(b'running notify command %s\n') % cmd)
+            fp = procutil.popen(b'(%s) 2>&1' % cmd, b'rb')
             out = util.fromnativeeol(fp.read())
             ret = fp.close()
             if ret:
                 self.ui.warn(out)
-                raise error.Abort(_('bugzilla notify command %s') %
-                                  procutil.explainexit(ret))
-        self.ui.status(_('done\n'))
+                raise error.Abort(
+                    _(b'bugzilla notify command %s') % procutil.explainexit(ret)
+                )
+        self.ui.status(_(b'done\n'))
 
     def get_user_id(self, user):
         '''look up numeric bugzilla user id.'''
@@ -546,9 +567,12 @@
             try:
                 userid = int(user)
             except ValueError:
-                self.ui.note(_('looking up user %s\n') % user)
-                self.run('''select userid from profiles
-                            where login_name like %s''', user)
+                self.ui.note(_(b'looking up user %s\n') % user)
+                self.run(
+                    '''select userid from profiles
+                            where login_name like %s''',
+                    user,
+                )
                 all = self.cursor.fetchall()
                 if len(all) != 1:
                     raise KeyError(user)
@@ -565,15 +589,18 @@
             userid = self.get_user_id(user)
         except KeyError:
             try:
-                defaultuser = self.ui.config('bugzilla', 'bzuser')
+                defaultuser = self.ui.config(b'bugzilla', b'bzuser')
                 if not defaultuser:
-                    raise error.Abort(_('cannot find bugzilla user id for %s') %
-                                     user)
+                    raise error.Abort(
+                        _(b'cannot find bugzilla user id for %s') % user
+                    )
                 userid = self.get_user_id(defaultuser)
                 user = defaultuser
             except KeyError:
-                raise error.Abort(_('cannot find bugzilla user id for %s or %s')
-                                 % (user, defaultuser))
+                raise error.Abort(
+                    _(b'cannot find bugzilla user id for %s or %s')
+                    % (user, defaultuser)
+                )
         return (user, userid)
 
     def updatebug(self, bugid, newstate, text, committer):
@@ -582,26 +609,33 @@
         Try adding comment as committer of changeset, otherwise as
         default bugzilla user.'''
         if len(newstate) > 0:
-            self.ui.warn(_("Bugzilla/MySQL cannot update bug state\n"))
+            self.ui.warn(_(b"Bugzilla/MySQL cannot update bug state\n"))
 
         (user, userid) = self.get_bugzilla_user(committer)
         now = time.strftime(r'%Y-%m-%d %H:%M:%S')
-        self.run('''insert into longdescs
+        self.run(
+            '''insert into longdescs
                     (bug_id, who, bug_when, thetext)
                     values (%s, %s, %s, %s)''',
-                 (bugid, userid, now, text))
-        self.run('''insert into bugs_activity (bug_id, who, bug_when, fieldid)
+            (bugid, userid, now, text),
+        )
+        self.run(
+            '''insert into bugs_activity (bug_id, who, bug_when, fieldid)
                     values (%s, %s, %s, %s)''',
-                 (bugid, userid, now, self.longdesc_id))
+            (bugid, userid, now, self.longdesc_id),
+        )
         self.conn.commit()
 
+
 class bzmysql_2_18(bzmysql):
     '''support for bugzilla 2.18 series.'''
 
     def __init__(self, ui):
         bzmysql.__init__(self, ui)
         self.default_notify = (
-            "cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s")
+            b"cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s"
+        )
+
 
 class bzmysql_3_0(bzmysql_2_18):
     '''support for bugzilla 3.0 series.'''
@@ -611,14 +645,16 @@
 
     def get_longdesc_id(self):
         '''get identity of longdesc field'''
-        self.run('select id from fielddefs where name = "longdesc"')
+        self.run(b'select id from fielddefs where name = "longdesc"')
         ids = self.cursor.fetchall()
         if len(ids) != 1:
-            raise error.Abort(_('unknown database schema'))
+            raise error.Abort(_(b'unknown database schema'))
         return ids[0][0]
 
+
 # Bugzilla via XMLRPC interface.
 
+
 class cookietransportrequest(object):
     """A Transport request method that retains cookies over its lifetime.
 
@@ -636,10 +672,11 @@
     # http://www.itkovian.net/base/transport-class-for-pythons-xml-rpc-lib/
 
     cookies = []
+
     def send_cookies(self, connection):
         if self.cookies:
             for cookie in self.cookies:
-                connection.putheader("Cookie", cookie)
+                connection.putheader(b"Cookie", cookie)
 
     def request(self, host, handler, request_body, verbose=0):
         self.verbose = verbose
@@ -667,14 +704,18 @@
             response = h._conn.getresponse()
 
         # Add any cookie definitions to our list.
-        for header in response.msg.getallmatchingheaders("Set-Cookie"):
-            val = header.split(": ", 1)[1]
-            cookie = val.split(";", 1)[0]
+        for header in response.msg.getallmatchingheaders(b"Set-Cookie"):
+            val = header.split(b": ", 1)[1]
+            cookie = val.split(b";", 1)[0]
             self.cookies.append(cookie)
 
         if response.status != 200:
-            raise xmlrpclib.ProtocolError(host + handler, response.status,
-                                          response.reason, response.msg.headers)
+            raise xmlrpclib.ProtocolError(
+                host + handler,
+                response.status,
+                response.reason,
+                response.msg.headers,
+            )
 
         payload = response.read()
         parser, unmarshaller = self.getparser()
@@ -683,6 +724,7 @@
 
         return unmarshaller.close()
 
+
 # The explicit calls to the underlying xmlrpclib __init__() methods are
 # necessary. The xmlrpclib.Transport classes are old-style classes, and
 # it turns out their __init__() doesn't get called when doing multiple
@@ -692,11 +734,13 @@
         if util.safehasattr(xmlrpclib.Transport, "__init__"):
             xmlrpclib.Transport.__init__(self, use_datetime)
 
+
 class cookiesafetransport(cookietransportrequest, xmlrpclib.SafeTransport):
     def __init__(self, use_datetime=0):
         if util.safehasattr(xmlrpclib.Transport, "__init__"):
             xmlrpclib.SafeTransport.__init__(self, use_datetime)
 
+
 class bzxmlrpc(bzaccess):
     """Support for access to Bugzilla via the Bugzilla XMLRPC API.
 
@@ -706,75 +750,88 @@
     def __init__(self, ui):
         bzaccess.__init__(self, ui)
 
-        bzweb = self.ui.config('bugzilla', 'bzurl')
-        bzweb = bzweb.rstrip("/") + "/xmlrpc.cgi"
+        bzweb = self.ui.config(b'bugzilla', b'bzurl')
+        bzweb = bzweb.rstrip(b"/") + b"/xmlrpc.cgi"
 
-        user = self.ui.config('bugzilla', 'user')
-        passwd = self.ui.config('bugzilla', 'password')
+        user = self.ui.config(b'bugzilla', b'user')
+        passwd = self.ui.config(b'bugzilla', b'password')
 
-        self.fixstatus = self.ui.config('bugzilla', 'fixstatus')
-        self.fixresolution = self.ui.config('bugzilla', 'fixresolution')
+        self.fixstatus = self.ui.config(b'bugzilla', b'fixstatus')
+        self.fixresolution = self.ui.config(b'bugzilla', b'fixresolution')
 
         self.bzproxy = xmlrpclib.ServerProxy(bzweb, self.transport(bzweb))
-        ver = self.bzproxy.Bugzilla.version()['version'].split('.')
+        ver = self.bzproxy.Bugzilla.version()[b'version'].split(b'.')
         self.bzvermajor = int(ver[0])
         self.bzverminor = int(ver[1])
-        login = self.bzproxy.User.login({'login': user, 'password': passwd,
-                                         'restrict_login': True})
-        self.bztoken = login.get('token', '')
+        login = self.bzproxy.User.login(
+            {b'login': user, b'password': passwd, b'restrict_login': True}
+        )
+        self.bztoken = login.get(b'token', b'')
 
     def transport(self, uri):
-        if util.urlreq.urlparse(uri, "http")[0] == "https":
+        if util.urlreq.urlparse(uri, b"http")[0] == b"https":
             return cookiesafetransport()
         else:
             return cookietransport()
 
     def get_bug_comments(self, id):
         """Return a string with all comment text for a bug."""
-        c = self.bzproxy.Bug.comments({'ids': [id],
-                                       'include_fields': ['text'],
-                                       'token': self.bztoken})
-        return ''.join([t['text'] for t in c['bugs']['%d' % id]['comments']])
+        c = self.bzproxy.Bug.comments(
+            {b'ids': [id], b'include_fields': [b'text'], b'token': self.bztoken}
+        )
+        return b''.join(
+            [t[b'text'] for t in c[b'bugs'][b'%d' % id][b'comments']]
+        )
 
     def filter_real_bug_ids(self, bugs):
-        probe = self.bzproxy.Bug.get({'ids': sorted(bugs.keys()),
-                                      'include_fields': [],
-                                      'permissive': True,
-                                      'token': self.bztoken,
-                                      })
-        for badbug in probe['faults']:
-            id = badbug['id']
-            self.ui.status(_('bug %d does not exist\n') % id)
+        probe = self.bzproxy.Bug.get(
+            {
+                b'ids': sorted(bugs.keys()),
+                b'include_fields': [],
+                b'permissive': True,
+                b'token': self.bztoken,
+            }
+        )
+        for badbug in probe[b'faults']:
+            id = badbug[b'id']
+            self.ui.status(_(b'bug %d does not exist\n') % id)
             del bugs[id]
 
     def filter_cset_known_bug_ids(self, node, bugs):
         for id in sorted(bugs.keys()):
             if self.get_bug_comments(id).find(short(node)) != -1:
-                self.ui.status(_('bug %d already knows about changeset %s\n') %
-                               (id, short(node)))
+                self.ui.status(
+                    _(b'bug %d already knows about changeset %s\n')
+                    % (id, short(node))
+                )
                 del bugs[id]
 
     def updatebug(self, bugid, newstate, text, committer):
         args = {}
-        if 'hours' in newstate:
-            args['work_time'] = newstate['hours']
+        if b'hours' in newstate:
+            args[b'work_time'] = newstate[b'hours']
 
         if self.bzvermajor >= 4:
-            args['ids'] = [bugid]
-            args['comment'] = {'body' : text}
-            if 'fix' in newstate:
-                args['status'] = self.fixstatus
-                args['resolution'] = self.fixresolution
-            args['token'] = self.bztoken
+            args[b'ids'] = [bugid]
+            args[b'comment'] = {b'body': text}
+            if b'fix' in newstate:
+                args[b'status'] = self.fixstatus
+                args[b'resolution'] = self.fixresolution
+            args[b'token'] = self.bztoken
             self.bzproxy.Bug.update(args)
         else:
-            if 'fix' in newstate:
-                self.ui.warn(_("Bugzilla/XMLRPC needs Bugzilla 4.0 or later "
-                               "to mark bugs fixed\n"))
-            args['id'] = bugid
-            args['comment'] = text
+            if b'fix' in newstate:
+                self.ui.warn(
+                    _(
+                        b"Bugzilla/XMLRPC needs Bugzilla 4.0 or later "
+                        b"to mark bugs fixed\n"
+                    )
+                )
+            args[b'id'] = bugid
+            args[b'comment'] = text
             self.bzproxy.Bug.add_comment(args)
 
+
 class bzxmlrpcemail(bzxmlrpc):
     """Read data from Bugzilla via XMLRPC, send updates via email.
 
@@ -798,18 +855,18 @@
     def __init__(self, ui):
         bzxmlrpc.__init__(self, ui)
 
-        self.bzemail = self.ui.config('bugzilla', 'bzemail')
+        self.bzemail = self.ui.config(b'bugzilla', b'bzemail')
         if not self.bzemail:
-            raise error.Abort(_("configuration 'bzemail' missing"))
+            raise error.Abort(_(b"configuration 'bzemail' missing"))
         mail.validateconfig(self.ui)
 
     def makecommandline(self, fieldname, value):
         if self.bzvermajor >= 4:
-            return "@%s %s" % (fieldname, pycompat.bytestr(value))
+            return b"@%s %s" % (fieldname, pycompat.bytestr(value))
         else:
-            if fieldname == "id":
-                fieldname = "bug_id"
-            return "@%s = %s" % (fieldname, pycompat.bytestr(value))
+            if fieldname == b"id":
+                fieldname = b"bug_id"
+            return b"@%s = %s" % (fieldname, pycompat.bytestr(value))
 
     def send_bug_modify_email(self, bugid, commands, comment, committer):
         '''send modification message to Bugzilla bug via email.
@@ -823,68 +880,76 @@
         than the subject line, and leave a blank line after it.
         '''
         user = self.map_committer(committer)
-        matches = self.bzproxy.User.get({'match': [user],
-                                         'token': self.bztoken})
-        if not matches['users']:
-            user = self.ui.config('bugzilla', 'user')
-            matches = self.bzproxy.User.get({'match': [user],
-                                             'token': self.bztoken})
-            if not matches['users']:
-                raise error.Abort(_("default bugzilla user %s email not found")
-                                  % user)
-        user = matches['users'][0]['email']
-        commands.append(self.makecommandline("id", bugid))
+        matches = self.bzproxy.User.get(
+            {b'match': [user], b'token': self.bztoken}
+        )
+        if not matches[b'users']:
+            user = self.ui.config(b'bugzilla', b'user')
+            matches = self.bzproxy.User.get(
+                {b'match': [user], b'token': self.bztoken}
+            )
+            if not matches[b'users']:
+                raise error.Abort(
+                    _(b"default bugzilla user %s email not found") % user
+                )
+        user = matches[b'users'][0][b'email']
+        commands.append(self.makecommandline(b"id", bugid))
 
-        text = "\n".join(commands) + "\n\n" + comment
+        text = b"\n".join(commands) + b"\n\n" + comment
 
         _charsets = mail._charsets(self.ui)
         user = mail.addressencode(self.ui, user, _charsets)
         bzemail = mail.addressencode(self.ui, self.bzemail, _charsets)
         msg = mail.mimeencode(self.ui, text, _charsets)
-        msg['From'] = user
-        msg['To'] = bzemail
-        msg['Subject'] = mail.headencode(self.ui, "Bug modification", _charsets)
+        msg[b'From'] = user
+        msg[b'To'] = bzemail
+        msg[b'Subject'] = mail.headencode(
+            self.ui, b"Bug modification", _charsets
+        )
         sendmail = mail.connect(self.ui)
         sendmail(user, bzemail, msg.as_string())
 
     def updatebug(self, bugid, newstate, text, committer):
         cmds = []
-        if 'hours' in newstate:
-            cmds.append(self.makecommandline("work_time", newstate['hours']))
-        if 'fix' in newstate:
-            cmds.append(self.makecommandline("bug_status", self.fixstatus))
-            cmds.append(self.makecommandline("resolution", self.fixresolution))
+        if b'hours' in newstate:
+            cmds.append(self.makecommandline(b"work_time", newstate[b'hours']))
+        if b'fix' in newstate:
+            cmds.append(self.makecommandline(b"bug_status", self.fixstatus))
+            cmds.append(self.makecommandline(b"resolution", self.fixresolution))
         self.send_bug_modify_email(bugid, cmds, text, committer)
 
+
 class NotFound(LookupError):
     pass
 
+
 class bzrestapi(bzaccess):
     """Read and write bugzilla data using the REST API available since
     Bugzilla 5.0.
     """
+
     def __init__(self, ui):
         bzaccess.__init__(self, ui)
-        bz = self.ui.config('bugzilla', 'bzurl')
-        self.bzroot = '/'.join([bz, 'rest'])
-        self.apikey = self.ui.config('bugzilla', 'apikey')
-        self.user = self.ui.config('bugzilla', 'user')
-        self.passwd = self.ui.config('bugzilla', 'password')
-        self.fixstatus = self.ui.config('bugzilla', 'fixstatus')
-        self.fixresolution = self.ui.config('bugzilla', 'fixresolution')
+        bz = self.ui.config(b'bugzilla', b'bzurl')
+        self.bzroot = b'/'.join([bz, b'rest'])
+        self.apikey = self.ui.config(b'bugzilla', b'apikey')
+        self.user = self.ui.config(b'bugzilla', b'user')
+        self.passwd = self.ui.config(b'bugzilla', b'password')
+        self.fixstatus = self.ui.config(b'bugzilla', b'fixstatus')
+        self.fixresolution = self.ui.config(b'bugzilla', b'fixresolution')
 
     def apiurl(self, targets, include_fields=None):
-        url = '/'.join([self.bzroot] + [pycompat.bytestr(t) for t in targets])
+        url = b'/'.join([self.bzroot] + [pycompat.bytestr(t) for t in targets])
         qv = {}
         if self.apikey:
-            qv['api_key'] = self.apikey
+            qv[b'api_key'] = self.apikey
         elif self.user and self.passwd:
-            qv['login'] = self.user
-            qv['password'] = self.passwd
+            qv[b'login'] = self.user
+            qv[b'password'] = self.passwd
         if include_fields:
-            qv['include_fields'] = include_fields
+            qv[b'include_fields'] = include_fields
         if qv:
-            url = '%s?%s' % (url, util.urlreq.urlencode(qv))
+            url = b'%s?%s' % (url, util.urlreq.urlencode(qv))
         return url
 
     def _fetch(self, burl):
@@ -893,29 +958,30 @@
             return json.loads(resp.read())
         except util.urlerr.httperror as inst:
             if inst.code == 401:
-                raise error.Abort(_('authorization failed'))
+                raise error.Abort(_(b'authorization failed'))
             if inst.code == 404:
                 raise NotFound()
             else:
                 raise
 
-    def _submit(self, burl, data, method='POST'):
+    def _submit(self, burl, data, method=b'POST'):
         data = json.dumps(data)
-        if method == 'PUT':
+        if method == b'PUT':
+
             class putrequest(util.urlreq.request):
                 def get_method(self):
-                    return 'PUT'
+                    return b'PUT'
+
             request_type = putrequest
         else:
             request_type = util.urlreq.request
-        req = request_type(burl, data,
-                           {'Content-Type': 'application/json'})
+        req = request_type(burl, data, {b'Content-Type': b'application/json'})
         try:
             resp = url.opener(self.ui).open(req)
             return json.loads(resp.read())
         except util.urlerr.httperror as inst:
             if inst.code == 401:
-                raise error.Abort(_('authorization failed'))
+                raise error.Abort(_(b'authorization failed'))
             if inst.code == 404:
                 raise NotFound()
             else:
@@ -925,7 +991,7 @@
         '''remove bug IDs that do not exist in Bugzilla from bugs.'''
         badbugs = set()
         for bugid in bugs:
-            burl = self.apiurl(('bug', bugid), include_fields='status')
+            burl = self.apiurl((b'bug', bugid), include_fields=b'status')
             try:
                 self._fetch(burl)
             except NotFound:
@@ -937,12 +1003,16 @@
         '''remove bug IDs where node occurs in comment text from bugs.'''
         sn = short(node)
         for bugid in bugs.keys():
-            burl = self.apiurl(('bug', bugid, 'comment'), include_fields='text')
+            burl = self.apiurl(
+                (b'bug', bugid, b'comment'), include_fields=b'text'
+            )
             result = self._fetch(burl)
-            comments = result['bugs'][pycompat.bytestr(bugid)]['comments']
-            if any(sn in c['text'] for c in comments):
-                self.ui.status(_('bug %d already knows about changeset %s\n') %
-                               (bugid, sn))
+            comments = result[b'bugs'][pycompat.bytestr(bugid)][b'comments']
+            if any(sn in c[b'text'] for c in comments):
+                self.ui.status(
+                    _(b'bug %d already knows about changeset %s\n')
+                    % (bugid, sn)
+                )
                 del bugs[bugid]
 
     def updatebug(self, bugid, newstate, text, committer):
@@ -952,29 +1022,32 @@
         the changeset. Otherwise use the default Bugzilla user.
         '''
         bugmod = {}
-        if 'hours' in newstate:
-            bugmod['work_time'] = newstate['hours']
-        if 'fix' in newstate:
-            bugmod['status'] = self.fixstatus
-            bugmod['resolution'] = self.fixresolution
+        if b'hours' in newstate:
+            bugmod[b'work_time'] = newstate[b'hours']
+        if b'fix' in newstate:
+            bugmod[b'status'] = self.fixstatus
+            bugmod[b'resolution'] = self.fixresolution
         if bugmod:
             # if we have to change the bugs state do it here
-            bugmod['comment'] = {
-                'comment': text,
-                'is_private': False,
-                'is_markdown': False,
+            bugmod[b'comment'] = {
+                b'comment': text,
+                b'is_private': False,
+                b'is_markdown': False,
             }
-            burl = self.apiurl(('bug', bugid))
-            self._submit(burl, bugmod, method='PUT')
-            self.ui.debug('updated bug %s\n' % bugid)
+            burl = self.apiurl((b'bug', bugid))
+            self._submit(burl, bugmod, method=b'PUT')
+            self.ui.debug(b'updated bug %s\n' % bugid)
         else:
-            burl = self.apiurl(('bug', bugid, 'comment'))
-            self._submit(burl, {
-                'comment': text,
-                'is_private': False,
-                'is_markdown': False,
-            })
-            self.ui.debug('added comment to bug %s\n' % bugid)
+            burl = self.apiurl((b'bug', bugid, b'comment'))
+            self._submit(
+                burl,
+                {
+                    b'comment': text,
+                    b'is_private': False,
+                    b'is_markdown': False,
+                },
+            )
+            self.ui.debug(b'added comment to bug %s\n' % bugid)
 
     def notify(self, bugs, committer):
         '''Force sending of Bugzilla notification emails.
@@ -984,34 +1057,38 @@
         '''
         pass
 
+
 class bugzilla(object):
     # supported versions of bugzilla. different versions have
     # different schemas.
     _versions = {
-        '2.16': bzmysql,
-        '2.18': bzmysql_2_18,
-        '3.0':  bzmysql_3_0,
-        'xmlrpc': bzxmlrpc,
-        'xmlrpc+email': bzxmlrpcemail,
-        'restapi': bzrestapi,
-        }
+        b'2.16': bzmysql,
+        b'2.18': bzmysql_2_18,
+        b'3.0': bzmysql_3_0,
+        b'xmlrpc': bzxmlrpc,
+        b'xmlrpc+email': bzxmlrpcemail,
+        b'restapi': bzrestapi,
+    }
 
     def __init__(self, ui, repo):
         self.ui = ui
         self.repo = repo
 
-        bzversion = self.ui.config('bugzilla', 'version')
+        bzversion = self.ui.config(b'bugzilla', b'version')
         try:
             bzclass = bugzilla._versions[bzversion]
         except KeyError:
-            raise error.Abort(_('bugzilla version %s not supported') %
-                             bzversion)
+            raise error.Abort(
+                _(b'bugzilla version %s not supported') % bzversion
+            )
         self.bzdriver = bzclass(self.ui)
 
         self.bug_re = re.compile(
-            self.ui.config('bugzilla', 'regexp'), re.IGNORECASE)
+            self.ui.config(b'bugzilla', b'regexp'), re.IGNORECASE
+        )
         self.fix_re = re.compile(
-            self.ui.config('bugzilla', 'fixregexp'), re.IGNORECASE)
+            self.ui.config(b'bugzilla', b'fixregexp'), re.IGNORECASE
+        )
         self.split_re = re.compile(br'\D+')
 
     def find_bugs(self, ctx):
@@ -1042,25 +1119,25 @@
             start = m.end()
             if m is bugmatch:
                 bugmatch = self.bug_re.search(ctx.description(), start)
-                if 'fix' in bugattribs:
-                    del bugattribs['fix']
+                if b'fix' in bugattribs:
+                    del bugattribs[b'fix']
             else:
                 fixmatch = self.fix_re.search(ctx.description(), start)
-                bugattribs['fix'] = None
+                bugattribs[b'fix'] = None
 
             try:
-                ids = m.group('ids')
+                ids = m.group(b'ids')
             except IndexError:
                 ids = m.group(1)
             try:
-                hours = float(m.group('hours'))
-                bugattribs['hours'] = hours
+                hours = float(m.group(b'hours'))
+                bugattribs[b'hours'] = hours
             except IndexError:
                 pass
             except TypeError:
                 pass
             except ValueError:
-                self.ui.status(_("%s: invalid hours\n") % m.group('hours'))
+                self.ui.status(_(b"%s: invalid hours\n") % m.group(b'hours'))
 
             for id in self.split_re.split(ids):
                 if not id:
@@ -1078,46 +1155,54 @@
         def webroot(root):
             '''strip leading prefix of repo root and turn into
             url-safe path.'''
-            count = int(self.ui.config('bugzilla', 'strip'))
+            count = int(self.ui.config(b'bugzilla', b'strip'))
             root = util.pconvert(root)
             while count > 0:
-                c = root.find('/')
+                c = root.find(b'/')
                 if c == -1:
                     break
-                root = root[c + 1:]
+                root = root[c + 1 :]
                 count -= 1
             return root
 
         mapfile = None
-        tmpl = self.ui.config('bugzilla', 'template')
+        tmpl = self.ui.config(b'bugzilla', b'template')
         if not tmpl:
-            mapfile = self.ui.config('bugzilla', 'style')
+            mapfile = self.ui.config(b'bugzilla', b'style')
         if not mapfile and not tmpl:
-            tmpl = _('changeset {node|short} in repo {root} refers '
-                     'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
+            tmpl = _(
+                b'changeset {node|short} in repo {root} refers '
+                b'to bug {bug}.\ndetails:\n\t{desc|tabindent}'
+            )
         spec = logcmdutil.templatespec(tmpl, mapfile)
         t = logcmdutil.changesettemplater(self.ui, self.repo, spec)
         self.ui.pushbuffer()
-        t.show(ctx, changes=ctx.changeset(),
-               bug=pycompat.bytestr(bugid),
-               hgweb=self.ui.config('web', 'baseurl'),
-               root=self.repo.root,
-               webroot=webroot(self.repo.root))
+        t.show(
+            ctx,
+            changes=ctx.changeset(),
+            bug=pycompat.bytestr(bugid),
+            hgweb=self.ui.config(b'web', b'baseurl'),
+            root=self.repo.root,
+            webroot=webroot(self.repo.root),
+        )
         data = self.ui.popbuffer()
-        self.bzdriver.updatebug(bugid, newstate, data,
-                                stringutil.email(ctx.user()))
+        self.bzdriver.updatebug(
+            bugid, newstate, data, stringutil.email(ctx.user())
+        )
 
     def notify(self, bugs, committer):
         '''ensure Bugzilla users are notified of bug change.'''
         self.bzdriver.notify(bugs, committer)
 
+
 def hook(ui, repo, hooktype, node=None, **kwargs):
     '''add comment to bugzilla for each changeset that refers to a
     bugzilla bug id. only add a comment once per bug, so same change
     seen multiple times does not fill bug with duplicate data.'''
     if node is None:
-        raise error.Abort(_('hook type %s does not pass a changeset id') %
-                         hooktype)
+        raise error.Abort(
+            _(b'hook type %s does not pass a changeset id') % hooktype
+        )
     try:
         bz = bugzilla(ui, repo)
         ctx = repo[node]
@@ -1127,4 +1212,4 @@
                 bz.update(bug, bugs[bug], ctx)
             bz.notify(bugs, stringutil.email(ctx.user()))
     except Exception as e:
-        raise error.Abort(_('Bugzilla error: %s') % e)
+        raise error.Abort(_(b'Bugzilla error: %s') % e)
--- a/hgext/censor.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/censor.py	Mon Oct 21 11:09:48 2019 -0400
@@ -42,43 +42,55 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
-@command('censor',
-    [('r', 'rev', '', _('censor file from specified revision'), _('REV')),
-     ('t', 'tombstone', '', _('replacement tombstone data'), _('TEXT'))],
-    _('-r REV [-t TEXT] [FILE]'),
-    helpcategory=command.CATEGORY_MAINTENANCE)
-def censor(ui, repo, path, rev='', tombstone='', **opts):
+@command(
+    b'censor',
+    [
+        (
+            b'r',
+            b'rev',
+            b'',
+            _(b'censor file from specified revision'),
+            _(b'REV'),
+        ),
+        (b't', b'tombstone', b'', _(b'replacement tombstone data'), _(b'TEXT')),
+    ],
+    _(b'-r REV [-t TEXT] [FILE]'),
+    helpcategory=command.CATEGORY_MAINTENANCE,
+)
+def censor(ui, repo, path, rev=b'', tombstone=b'', **opts):
     with repo.wlock(), repo.lock():
         return _docensor(ui, repo, path, rev, tombstone, **opts)
 
-def _docensor(ui, repo, path, rev='', tombstone='', **opts):
+
+def _docensor(ui, repo, path, rev=b'', tombstone=b'', **opts):
     if not path:
-        raise error.Abort(_('must specify file path to censor'))
+        raise error.Abort(_(b'must specify file path to censor'))
     if not rev:
-        raise error.Abort(_('must specify revision to censor'))
+        raise error.Abort(_(b'must specify revision to censor'))
 
     wctx = repo[None]
 
     m = scmutil.match(wctx, (path,))
     if m.anypats() or len(m.files()) != 1:
-        raise error.Abort(_('can only specify an explicit filename'))
+        raise error.Abort(_(b'can only specify an explicit filename'))
     path = m.files()[0]
     flog = repo.file(path)
     if not len(flog):
-        raise error.Abort(_('cannot censor file with no history'))
+        raise error.Abort(_(b'cannot censor file with no history'))
 
     rev = scmutil.revsingle(repo, rev, rev).rev()
     try:
         ctx = repo[rev]
     except KeyError:
-        raise error.Abort(_('invalid revision identifier %s') % rev)
+        raise error.Abort(_(b'invalid revision identifier %s') % rev)
 
     try:
         fctx = ctx.filectx(path)
     except error.LookupError:
-        raise error.Abort(_('file does not exist at revision %s') % rev)
+        raise error.Abort(_(b'file does not exist at revision %s') % rev)
 
     fnode = fctx.filenode()
     heads = []
@@ -87,14 +99,18 @@
         if path in hc and hc.filenode(path) == fnode:
             heads.append(hc)
     if heads:
-        headlist = ', '.join([short(c.node()) for c in heads])
-        raise error.Abort(_('cannot censor file in heads (%s)') % headlist,
-            hint=_('clean/delete and commit first'))
+        headlist = b', '.join([short(c.node()) for c in heads])
+        raise error.Abort(
+            _(b'cannot censor file in heads (%s)') % headlist,
+            hint=_(b'clean/delete and commit first'),
+        )
 
     wp = wctx.parents()
     if ctx.node() in [p.node() for p in wp]:
-        raise error.Abort(_('cannot censor working directory'),
-            hint=_('clean/delete/update first'))
+        raise error.Abort(
+            _(b'cannot censor working directory'),
+            hint=_(b'clean/delete/update first'),
+        )
 
     with repo.transaction(b'censor') as tr:
         flog.censorrevision(tr, fnode, tombstone=tombstone)
--- a/hgext/children.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/children.py	Mon Oct 21 11:09:48 2019 -0400
@@ -33,15 +33,25 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
-@command('children',
-    [('r', 'rev', '.',
-     _('show children of the specified revision'), _('REV')),
-    ] + templateopts,
-    _('hg children [-r REV] [FILE]'),
+@command(
+    b'children',
+    [
+        (
+            b'r',
+            b'rev',
+            b'.',
+            _(b'show children of the specified revision'),
+            _(b'REV'),
+        ),
+    ]
+    + templateopts,
+    _(b'hg children [-r REV] [FILE]'),
     helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
-    inferrepo=True)
+    inferrepo=True,
+)
 def children(ui, repo, file_=None, **opts):
     """show the children of the given or working directory revision
 
@@ -60,7 +70,7 @@
 
     """
     opts = pycompat.byteskwargs(opts)
-    rev = opts.get('rev')
+    rev = opts.get(b'rev')
     ctx = scmutil.revsingle(repo, rev)
     if file_:
         fctx = repo.filectx(file_, changeid=ctx.rev())
--- a/hgext/churn.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/churn.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,6 +15,7 @@
 import time
 
 from mercurial.i18n import _
+from mercurial.pycompat import open
 from mercurial import (
     cmdutil,
     encoding,
@@ -32,57 +33,65 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
 def changedlines(ui, repo, ctx1, ctx2, fns):
     added, removed = 0, 0
     fmatch = scmutil.matchfiles(repo, fns)
-    diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
-    for l in diff.split('\n'):
-        if l.startswith("+") and not l.startswith("+++ "):
+    diff = b''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
+    for l in diff.split(b'\n'):
+        if l.startswith(b"+") and not l.startswith(b"+++ "):
             added += 1
-        elif l.startswith("-") and not l.startswith("--- "):
+        elif l.startswith(b"-") and not l.startswith(b"--- "):
             removed += 1
     return (added, removed)
 
+
 def countrate(ui, repo, amap, *pats, **opts):
     """Calculate stats"""
     opts = pycompat.byteskwargs(opts)
-    if opts.get('dateformat'):
+    if opts.get(b'dateformat'):
+
         def getkey(ctx):
             t, tz = ctx.date()
             date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
             return encoding.strtolocal(
-                date.strftime(encoding.strfromlocal(opts['dateformat'])))
+                date.strftime(encoding.strfromlocal(opts[b'dateformat']))
+            )
+
     else:
-        tmpl = opts.get('oldtemplate') or opts.get('template')
+        tmpl = opts.get(b'oldtemplate') or opts.get(b'template')
         tmpl = logcmdutil.maketemplater(ui, repo, tmpl)
+
         def getkey(ctx):
             ui.pushbuffer()
             tmpl.show(ctx)
             return ui.popbuffer()
 
-    progress = ui.makeprogress(_('analyzing'), unit=_('revisions'),
-                               total=len(repo))
+    progress = ui.makeprogress(
+        _(b'analyzing'), unit=_(b'revisions'), total=len(repo)
+    )
     rate = {}
     df = False
-    if opts.get('date'):
-        df = dateutil.matchdate(opts['date'])
+    if opts.get(b'date'):
+        df = dateutil.matchdate(opts[b'date'])
 
     m = scmutil.match(repo[None], pats, opts)
+
     def prep(ctx, fns):
         rev = ctx.rev()
-        if df and not df(ctx.date()[0]): # doesn't match date format
+        if df and not df(ctx.date()[0]):  # doesn't match date format
             return
 
         key = getkey(ctx).strip()
-        key = amap.get(key, key) # alias remap
-        if opts.get('changesets'):
+        key = amap.get(key, key)  # alias remap
+        if opts.get(b'changesets'):
             rate[key] = (rate.get(key, (0,))[0] + 1, 0)
         else:
             parents = ctx.parents()
             if len(parents) > 1:
-                ui.note(_('revision %d is a merge, ignoring...\n') % (rev,))
+                ui.note(_(b'revision %d is a merge, ignoring...\n') % (rev,))
                 return
 
             ctx1 = parents[0]
@@ -99,25 +108,54 @@
     return rate
 
 
-@command('churn',
-    [('r', 'rev', [],
-     _('count rate for the specified revision or revset'), _('REV')),
-    ('d', 'date', '',
-     _('count rate for revisions matching date spec'), _('DATE')),
-    ('t', 'oldtemplate', '',
-     _('template to group changesets (DEPRECATED)'), _('TEMPLATE')),
-    ('T', 'template', '{author|email}',
-     _('template to group changesets'), _('TEMPLATE')),
-    ('f', 'dateformat', '',
-     _('strftime-compatible format for grouping by date'), _('FORMAT')),
-    ('c', 'changesets', False, _('count rate by number of changesets')),
-    ('s', 'sort', False, _('sort by key (default: sort by count)')),
-    ('', 'diffstat', False, _('display added/removed lines separately')),
-    ('', 'aliases', '', _('file with email aliases'), _('FILE')),
-    ] + cmdutil.walkopts,
-    _("hg churn [-d DATE] [-r REV] [--aliases FILE] [FILE]"),
+@command(
+    b'churn',
+    [
+        (
+            b'r',
+            b'rev',
+            [],
+            _(b'count rate for the specified revision or revset'),
+            _(b'REV'),
+        ),
+        (
+            b'd',
+            b'date',
+            b'',
+            _(b'count rate for revisions matching date spec'),
+            _(b'DATE'),
+        ),
+        (
+            b't',
+            b'oldtemplate',
+            b'',
+            _(b'template to group changesets (DEPRECATED)'),
+            _(b'TEMPLATE'),
+        ),
+        (
+            b'T',
+            b'template',
+            b'{author|email}',
+            _(b'template to group changesets'),
+            _(b'TEMPLATE'),
+        ),
+        (
+            b'f',
+            b'dateformat',
+            b'',
+            _(b'strftime-compatible format for grouping by date'),
+            _(b'FORMAT'),
+        ),
+        (b'c', b'changesets', False, _(b'count rate by number of changesets')),
+        (b's', b'sort', False, _(b'sort by key (default: sort by count)')),
+        (b'', b'diffstat', False, _(b'display added/removed lines separately')),
+        (b'', b'aliases', b'', _(b'file with email aliases'), _(b'FILE')),
+    ]
+    + cmdutil.walkopts,
+    _(b"hg churn [-d DATE] [-r REV] [--aliases FILE] [FILE]"),
     helpcategory=command.CATEGORY_MAINTENANCE,
-    inferrepo=True)
+    inferrepo=True,
+)
 def churn(ui, repo, *pats, **opts):
     '''histogram of changes to the repository
 
@@ -154,22 +192,23 @@
     a .hgchurn file will be looked for in the working directory root.
     Aliases will be split from the rightmost "=".
     '''
+
     def pad(s, l):
-        return s + " " * (l - encoding.colwidth(s))
+        return s + b" " * (l - encoding.colwidth(s))
 
     amap = {}
     aliases = opts.get(r'aliases')
-    if not aliases and os.path.exists(repo.wjoin('.hgchurn')):
-        aliases = repo.wjoin('.hgchurn')
+    if not aliases and os.path.exists(repo.wjoin(b'.hgchurn')):
+        aliases = repo.wjoin(b'.hgchurn')
     if aliases:
-        for l in open(aliases, "rb"):
+        for l in open(aliases, b"rb"):
             try:
-                alias, actual = l.rsplit('=' in l and '=' or None, 1)
+                alias, actual = l.rsplit(b'=' in l and b'=' or None, 1)
                 amap[alias.strip()] = actual.strip()
             except ValueError:
                 l = l.strip()
                 if l:
-                    ui.warn(_("skipping malformed alias: %s\n") % l)
+                    ui.warn(_(b"skipping malformed alias: %s\n") % l)
                 continue
 
     rate = list(countrate(ui, repo, amap, *pats, **opts).items())
@@ -186,24 +225,30 @@
     maxname = max(len(k) for k, v in rate)
 
     ttywidth = ui.termwidth()
-    ui.debug("assuming %i character terminal\n" % ttywidth)
+    ui.debug(b"assuming %i character terminal\n" % ttywidth)
     width = ttywidth - maxname - 2 - 2 - 2
 
     if opts.get(r'diffstat'):
         width -= 15
+
         def format(name, diffstat):
             added, removed = diffstat
-            return "%s %15s %s%s\n" % (pad(name, maxname),
-                                       '+%d/-%d' % (added, removed),
-                                       ui.label('+' * charnum(added),
-                                                'diffstat.inserted'),
-                                       ui.label('-' * charnum(removed),
-                                                'diffstat.deleted'))
+            return b"%s %15s %s%s\n" % (
+                pad(name, maxname),
+                b'+%d/-%d' % (added, removed),
+                ui.label(b'+' * charnum(added), b'diffstat.inserted'),
+                ui.label(b'-' * charnum(removed), b'diffstat.deleted'),
+            )
+
     else:
         width -= 6
+
         def format(name, count):
-            return "%s %6d %s\n" % (pad(name, maxname), sum(count),
-                                    '*' * charnum(sum(count)))
+            return b"%s %6d %s\n" % (
+                pad(name, maxname),
+                sum(count),
+                b'*' * charnum(sum(count)),
+            )
 
     def charnum(count):
         return int(count * width // maxcount)
--- a/hgext/clonebundles.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/clonebundles.py	Mon Oct 21 11:09:48 2019 -0400
@@ -201,7 +201,8 @@
     wireprotov1server,
 )
 
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
 def capabilities(orig, repo, proto):
     caps = orig(repo, proto)
@@ -209,10 +210,11 @@
     # Only advertise if a manifest exists. This does add some I/O to requests.
     # But this should be cheaper than a wasted network round trip due to
     # missing file.
-    if repo.vfs.exists('clonebundles.manifest'):
-        caps.append('clonebundles')
+    if repo.vfs.exists(b'clonebundles.manifest'):
+        caps.append(b'clonebundles')
 
     return caps
 
+
 def extsetup(ui):
-    extensions.wrapfunction(wireprotov1server, '_capabilities', capabilities)
+    extensions.wrapfunction(wireprotov1server, b'_capabilities', capabilities)
--- a/hgext/closehead.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/closehead.py	Mon Oct 21 11:09:48 2019 -0400
@@ -24,17 +24,20 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 commitopts = cmdutil.commitopts
 commitopts2 = cmdutil.commitopts2
-commitopts3 = [('r', 'rev', [],
-               _('revision to check'), _('REV'))]
+commitopts3 = [(b'r', b'rev', [], _(b'revision to check'), _(b'REV'))]
+
 
-@command('close-head|close-heads', commitopts + commitopts2 + commitopts3,
-    _('[OPTION]... [REV]...'),
+@command(
+    b'close-head|close-heads',
+    commitopts + commitopts2 + commitopts3,
+    _(b'[OPTION]... [REV]...'),
     helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
-    inferrepo=True)
+    inferrepo=True,
+)
 def close_branch(ui, repo, *revs, **opts):
     """close the given head revisions
 
@@ -44,11 +47,19 @@
 
     The commit message must be specified with -l or -m.
     """
+
     def docommit(rev):
-        cctx = context.memctx(repo, parents=[rev, None], text=message,
-                              files=[], filectxfn=None, user=opts.get('user'),
-                              date=opts.get('date'), extra=extra)
-        tr = repo.transaction('commit')
+        cctx = context.memctx(
+            repo,
+            parents=[rev, None],
+            text=message,
+            files=[],
+            filectxfn=None,
+            user=opts.get(b'user'),
+            date=opts.get(b'date'),
+            extra=extra,
+        )
+        tr = repo.transaction(b'commit')
         ret = repo.commitctx(cctx, True)
         bookmarks.update(repo, [rev, None], ret)
         cctx.markcommitted(ret)
@@ -56,11 +67,11 @@
 
     opts = pycompat.byteskwargs(opts)
 
-    revs += tuple(opts.get('rev', []))
+    revs += tuple(opts.get(b'rev', []))
     revs = scmutil.revrange(repo, revs)
 
     if not revs:
-        raise error.Abort(_('no revisions specified'))
+        raise error.Abort(_(b'no revisions specified'))
 
     heads = []
     for branch in repo.branchmap():
@@ -68,17 +79,17 @@
     heads = set(repo[h].rev() for h in heads)
     for rev in revs:
         if rev not in heads:
-            raise error.Abort(_('revision is not an open head: %d') % rev)
+            raise error.Abort(_(b'revision is not an open head: %d') % rev)
 
     message = cmdutil.logmessage(ui, opts)
     if not message:
-        raise error.Abort(_("no commit message specified with -l or -m"))
-    extra = { 'close': '1' }
+        raise error.Abort(_(b"no commit message specified with -l or -m"))
+    extra = {b'close': b'1'}
 
     with repo.wlock(), repo.lock():
         for rev in revs:
             r = repo[rev]
             branch = r.branch()
-            extra['branch'] = branch
+            extra[b'branch'] = branch
             docommit(r)
     return 0
--- a/hgext/commitextras.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/commitextras.py	Mon Oct 21 11:09:48 2019 -0400
@@ -22,51 +22,68 @@
 
 cmdtable = {}
 command = registrar.command(cmdtable)
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 usedinternally = {
-    'amend_source',
-    'branch',
-    'close',
-    'histedit_source',
-    'topic',
-    'rebase_source',
-    'intermediate-source',
-    '__touch-noise__',
-    'source',
-    'transplant_source',
+    b'amend_source',
+    b'branch',
+    b'close',
+    b'histedit_source',
+    b'topic',
+    b'rebase_source',
+    b'intermediate-source',
+    b'__touch-noise__',
+    b'source',
+    b'transplant_source',
 }
 
+
 def extsetup(ui):
-    entry = extensions.wrapcommand(commands.table, 'commit', _commit)
+    entry = extensions.wrapcommand(commands.table, b'commit', _commit)
     options = entry[1]
-    options.append(('', 'extra', [],
-        _('set a changeset\'s extra values'), _("KEY=VALUE")))
+    options.append(
+        (
+            b'',
+            b'extra',
+            [],
+            _(b'set a changeset\'s extra values'),
+            _(b"KEY=VALUE"),
+        )
+    )
+
 
 def _commit(orig, ui, repo, *pats, **opts):
     if util.safehasattr(repo, 'unfiltered'):
         repo = repo.unfiltered()
+
     class repoextra(repo.__class__):
         def commit(self, *innerpats, **inneropts):
             extras = opts.get(r'extra')
             for raw in extras:
-                if '=' not in raw:
-                    msg = _("unable to parse '%s', should follow "
-                            "KEY=VALUE format")
+                if b'=' not in raw:
+                    msg = _(
+                        b"unable to parse '%s', should follow "
+                        b"KEY=VALUE format"
+                    )
                     raise error.Abort(msg % raw)
-                k, v = raw.split('=', 1)
+                k, v = raw.split(b'=', 1)
                 if not k:
-                    msg = _("unable to parse '%s', keys can't be empty")
+                    msg = _(b"unable to parse '%s', keys can't be empty")
                     raise error.Abort(msg % raw)
                 if re.search(br'[^\w-]', k):
-                    msg = _("keys can only contain ascii letters, digits,"
-                            " '_' and '-'")
+                    msg = _(
+                        b"keys can only contain ascii letters, digits,"
+                        b" '_' and '-'"
+                    )
                     raise error.Abort(msg)
                 if k in usedinternally:
-                    msg = _("key '%s' is used internally, can't be set "
-                            "manually")
+                    msg = _(
+                        b"key '%s' is used internally, can't be set "
+                        b"manually"
+                    )
                     raise error.Abort(msg % k)
                 inneropts[r'extra'][k] = v
             return super(repoextra, self).commit(*innerpats, **inneropts)
+
     repo.__class__ = repoextra
     return orig(ui, repo, *pats, **opts)
--- a/hgext/convert/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/convert/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,9 +10,7 @@
 from __future__ import absolute_import
 
 from mercurial.i18n import _
-from mercurial import (
-    registrar,
-)
+from mercurial import registrar
 
 from . import (
     convcmd,
@@ -26,32 +24,74 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 # Commands definition was moved elsewhere to ease demandload job.
 
-@command('convert',
-    [('', 'authors', '',
-      _('username mapping filename (DEPRECATED) (use --authormap instead)'),
-      _('FILE')),
-    ('s', 'source-type', '', _('source repository type'), _('TYPE')),
-    ('d', 'dest-type', '', _('destination repository type'), _('TYPE')),
-    ('r', 'rev', [], _('import up to source revision REV'), _('REV')),
-    ('A', 'authormap', '', _('remap usernames using this file'), _('FILE')),
-    ('', 'filemap', '', _('remap file names using contents of file'),
-     _('FILE')),
-    ('', 'full', None,
-     _('apply filemap changes by converting all files again')),
-    ('', 'splicemap', '', _('splice synthesized history into place'),
-     _('FILE')),
-    ('', 'branchmap', '', _('change branch names while converting'),
-     _('FILE')),
-    ('', 'branchsort', None, _('try to sort changesets by branches')),
-    ('', 'datesort', None, _('try to sort changesets by date')),
-    ('', 'sourcesort', None, _('preserve source changesets order')),
-    ('', 'closesort', None, _('try to reorder closed revisions'))],
-   _('hg convert [OPTION]... SOURCE [DEST [REVMAP]]'),
-   norepo=True)
+
+@command(
+    b'convert',
+    [
+        (
+            b'',
+            b'authors',
+            b'',
+            _(
+                b'username mapping filename (DEPRECATED) (use --authormap instead)'
+            ),
+            _(b'FILE'),
+        ),
+        (b's', b'source-type', b'', _(b'source repository type'), _(b'TYPE')),
+        (
+            b'd',
+            b'dest-type',
+            b'',
+            _(b'destination repository type'),
+            _(b'TYPE'),
+        ),
+        (b'r', b'rev', [], _(b'import up to source revision REV'), _(b'REV')),
+        (
+            b'A',
+            b'authormap',
+            b'',
+            _(b'remap usernames using this file'),
+            _(b'FILE'),
+        ),
+        (
+            b'',
+            b'filemap',
+            b'',
+            _(b'remap file names using contents of file'),
+            _(b'FILE'),
+        ),
+        (
+            b'',
+            b'full',
+            None,
+            _(b'apply filemap changes by converting all files again'),
+        ),
+        (
+            b'',
+            b'splicemap',
+            b'',
+            _(b'splice synthesized history into place'),
+            _(b'FILE'),
+        ),
+        (
+            b'',
+            b'branchmap',
+            b'',
+            _(b'change branch names while converting'),
+            _(b'FILE'),
+        ),
+        (b'', b'branchsort', None, _(b'try to sort changesets by branches')),
+        (b'', b'datesort', None, _(b'try to sort changesets by date')),
+        (b'', b'sourcesort', None, _(b'preserve source changesets order')),
+        (b'', b'closesort', None, _(b'try to reorder closed revisions')),
+    ],
+    _(b'hg convert [OPTION]... SOURCE [DEST [REVMAP]]'),
+    norepo=True,
+)
 def convert(ui, src, dest=None, revmapfile=None, **opts):
     """convert a foreign SCM repository to a Mercurial one.
 
@@ -454,29 +494,47 @@
     """
     return convcmd.convert(ui, src, dest, revmapfile, **opts)
 
-@command('debugsvnlog', [], 'hg debugsvnlog', norepo=True)
+
+@command(b'debugsvnlog', [], b'hg debugsvnlog', norepo=True)
 def debugsvnlog(ui, **opts):
     return subversion.debugsvnlog(ui, **opts)
 
-@command('debugcvsps',
+
+@command(
+    b'debugcvsps',
     [
-    # Main options shared with cvsps-2.1
-    ('b', 'branches', [], _('only return changes on specified branches')),
-    ('p', 'prefix', '', _('prefix to remove from file names')),
-    ('r', 'revisions', [],
-     _('only return changes after or between specified tags')),
-    ('u', 'update-cache', None, _("update cvs log cache")),
-    ('x', 'new-cache', None, _("create new cvs log cache")),
-    ('z', 'fuzz', 60, _('set commit time fuzz in seconds')),
-    ('', 'root', '', _('specify cvsroot')),
-    # Options specific to builtin cvsps
-    ('', 'parents', '', _('show parent changesets')),
-    ('', 'ancestors', '', _('show current changeset in ancestor branches')),
-    # Options that are ignored for compatibility with cvsps-2.1
-    ('A', 'cvs-direct', None, _('ignored for compatibility')),
+        # Main options shared with cvsps-2.1
+        (
+            b'b',
+            b'branches',
+            [],
+            _(b'only return changes on specified branches'),
+        ),
+        (b'p', b'prefix', b'', _(b'prefix to remove from file names')),
+        (
+            b'r',
+            b'revisions',
+            [],
+            _(b'only return changes after or between specified tags'),
+        ),
+        (b'u', b'update-cache', None, _(b"update cvs log cache")),
+        (b'x', b'new-cache', None, _(b"create new cvs log cache")),
+        (b'z', b'fuzz', 60, _(b'set commit time fuzz in seconds')),
+        (b'', b'root', b'', _(b'specify cvsroot')),
+        # Options specific to builtin cvsps
+        (b'', b'parents', b'', _(b'show parent changesets')),
+        (
+            b'',
+            b'ancestors',
+            b'',
+            _(b'show current changeset in ancestor branches'),
+        ),
+        # Options that are ignored for compatibility with cvsps-2.1
+        (b'A', b'cvs-direct', None, _(b'ignored for compatibility')),
     ],
-    _('hg debugcvsps [OPTION]... [PATH]...'),
-    norepo=True)
+    _(b'hg debugcvsps [OPTION]... [PATH]...'),
+    norepo=True,
+)
 def debugcvsps(ui, *args, **opts):
     '''create changeset information from CVS
 
@@ -490,34 +548,40 @@
     dates.'''
     return cvsps.debugcvsps(ui, *args, **opts)
 
+
 def kwconverted(context, mapping, name):
-    ctx = context.resource(mapping, 'ctx')
-    rev = ctx.extra().get('convert_revision', '')
-    if rev.startswith('svn:'):
-        if name == 'svnrev':
-            return (b"%d" % subversion.revsplit(rev)[2])
-        elif name == 'svnpath':
+    ctx = context.resource(mapping, b'ctx')
+    rev = ctx.extra().get(b'convert_revision', b'')
+    if rev.startswith(b'svn:'):
+        if name == b'svnrev':
+            return b"%d" % subversion.revsplit(rev)[2]
+        elif name == b'svnpath':
             return subversion.revsplit(rev)[1]
-        elif name == 'svnuuid':
+        elif name == b'svnuuid':
             return subversion.revsplit(rev)[0]
     return rev
 
+
 templatekeyword = registrar.templatekeyword()
 
-@templatekeyword('svnrev', requires={'ctx'})
+
+@templatekeyword(b'svnrev', requires={b'ctx'})
 def kwsvnrev(context, mapping):
     """String. Converted subversion revision number."""
-    return kwconverted(context, mapping, 'svnrev')
+    return kwconverted(context, mapping, b'svnrev')
 
-@templatekeyword('svnpath', requires={'ctx'})
+
+@templatekeyword(b'svnpath', requires={b'ctx'})
 def kwsvnpath(context, mapping):
     """String. Converted subversion revision project path."""
-    return kwconverted(context, mapping, 'svnpath')
+    return kwconverted(context, mapping, b'svnpath')
 
-@templatekeyword('svnuuid', requires={'ctx'})
+
+@templatekeyword(b'svnuuid', requires={b'ctx'})
 def kwsvnuuid(context, mapping):
     """String. Converted subversion revision repository identifier."""
-    return kwconverted(context, mapping, 'svnuuid')
+    return kwconverted(context, mapping, b'svnuuid')
+
 
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = [kwsvnrev, kwsvnpath, kwsvnuuid]
--- a/hgext/convert/bzr.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/convert/bzr.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,16 +14,15 @@
 from mercurial.i18n import _
 from mercurial import (
     demandimport,
-    error
+    error,
+    pycompat,
 )
 from . import common
 
 # these do not work with demandimport, blacklist
-demandimport.IGNORES.update([
-        'bzrlib.transactions',
-        'bzrlib.urlutils',
-        'ElementPath',
-    ])
+demandimport.IGNORES.update(
+    [b'bzrlib.transactions', b'bzrlib.urlutils', b'ElementPath',]
+)
 
 try:
     # bazaar imports
@@ -31,6 +30,7 @@
     import bzrlib.errors
     import bzrlib.revision
     import bzrlib.revisionspec
+
     bzrdir = bzrlib.bzrdir
     errors = bzrlib.errors
     revision = bzrlib.revision
@@ -39,7 +39,8 @@
 except ImportError:
     pass
 
-supportedkinds = ('file', 'symlink')
+supportedkinds = (b'file', b'symlink')
+
 
 class bzr_source(common.converter_source):
     """Reads Bazaar repositories by using the Bazaar Python libraries"""
@@ -47,25 +48,27 @@
     def __init__(self, ui, repotype, path, revs=None):
         super(bzr_source, self).__init__(ui, repotype, path, revs=revs)
 
-        if not os.path.exists(os.path.join(path, '.bzr')):
-            raise common.NoRepo(_('%s does not look like a Bazaar repository')
-                              % path)
+        if not os.path.exists(os.path.join(path, b'.bzr')):
+            raise common.NoRepo(
+                _(b'%s does not look like a Bazaar repository') % path
+            )
 
         try:
             # access bzrlib stuff
             bzrdir
         except NameError:
-            raise common.NoRepo(_('Bazaar modules could not be loaded'))
+            raise common.NoRepo(_(b'Bazaar modules could not be loaded'))
 
         path = os.path.abspath(path)
         self._checkrepotype(path)
         try:
             self.sourcerepo = bzrdir.BzrDir.open(path).open_repository()
         except errors.NoRepositoryPresent:
-            raise common.NoRepo(_('%s does not look like a Bazaar repository')
-                              % path)
+            raise common.NoRepo(
+                _(b'%s does not look like a Bazaar repository') % path
+            )
         self._parentids = {}
-        self._saverev = ui.configbool('convert', 'bzr.saverev')
+        self._saverev = ui.configbool(b'convert', b'bzr.saverev')
 
     def _checkrepotype(self, path):
         # Lightweight checkouts detection is informational but probably
@@ -78,13 +81,20 @@
             except (errors.NoWorkingTree, errors.NotLocalUrl):
                 tree = None
                 branch = dir.open_branch()
-            if (tree is not None and tree.bzrdir.root_transport.base !=
-                branch.bzrdir.root_transport.base):
-                self.ui.warn(_('warning: lightweight checkouts may cause '
-                               'conversion failures, try with a regular '
-                               'branch instead.\n'))
+            if (
+                tree is not None
+                and tree.bzrdir.root_transport.base
+                != branch.bzrdir.root_transport.base
+            ):
+                self.ui.warn(
+                    _(
+                        b'warning: lightweight checkouts may cause '
+                        b'conversion failures, try with a regular '
+                        b'branch instead.\n'
+                    )
+                )
         except Exception:
-            self.ui.note(_('bzr source type could not be determined\n'))
+            self.ui.note(_(b'bzr source type could not be determined\n'))
 
     def before(self):
         """Before the conversion begins, acquire a read lock
@@ -119,16 +129,17 @@
                     pass
                 revid = info.rev_id
             if revid is None:
-                raise error.Abort(_('%s is not a valid revision')
-                                  % self.revs[0])
+                raise error.Abort(
+                    _(b'%s is not a valid revision') % self.revs[0]
+                )
             heads = [revid]
         # Empty repositories return 'null:', which cannot be retrieved
-        heads = [h for h in heads if h != 'null:']
+        heads = [h for h in heads if h != b'null:']
         return heads
 
     def getfile(self, name, rev):
         revtree = self.sourcerepo.revision_tree(rev)
-        fileid = revtree.path2id(name.decode(self.encoding or 'utf-8'))
+        fileid = revtree.path2id(name.decode(self.encoding or b'utf-8'))
         kind = None
         if fileid is not None:
             kind = revtree.kind(fileid)
@@ -136,11 +147,12 @@
             # the file is not available anymore - was deleted
             return None, None
         mode = self._modecache[(name, rev)]
-        if kind == 'symlink':
+        if kind == b'symlink':
             target = revtree.get_symlink_target(fileid)
             if target is None:
-                raise error.Abort(_('%s.%s symlink has no target')
-                                 % (name, rev))
+                raise error.Abort(
+                    _(b'%s.%s symlink has no target') % (name, rev)
+                )
             return target, mode
         else:
             sio = revtree.get_file(fileid)
@@ -148,7 +160,7 @@
 
     def getchanges(self, version, full):
         if full:
-            raise error.Abort(_("convert from cvs does not support --full"))
+            raise error.Abort(_(b"convert from cvs does not support --full"))
         self._modecache = {}
         self._revtree = self.sourcerepo.revision_tree(version)
         # get the parentids from the cache
@@ -168,16 +180,18 @@
             parents = self._filterghosts(rev.parent_ids)
             self._parentids[version] = parents
 
-        branch = self.recode(rev.properties.get('branch-nick', u'default'))
-        if branch == 'trunk':
-            branch = 'default'
-        return common.commit(parents=parents,
-                date='%d %d' % (rev.timestamp, -rev.timezone),
-                author=self.recode(rev.committer),
-                desc=self.recode(rev.message),
-                branch=branch,
-                rev=version,
-                saverev=self._saverev)
+        branch = self.recode(rev.properties.get(b'branch-nick', u'default'))
+        if branch == b'trunk':
+            branch = b'default'
+        return common.commit(
+            parents=parents,
+            date=b'%d %d' % (rev.timestamp, -rev.timezone),
+            author=self.recode(rev.committer),
+            desc=self.recode(rev.message),
+            branch=branch,
+            rev=version,
+            saverev=self._saverev,
+        )
 
     def gettags(self):
         bytetags = {}
@@ -185,7 +199,7 @@
             if not branch.supports_tags():
                 return {}
             tagdict = branch.tags.get_tag_dict()
-            for name, rev in tagdict.iteritems():
+            for name, rev in pycompat.iteritems(tagdict):
                 bytetags[self.recode(name)] = rev
         return bytetags
 
@@ -216,11 +230,21 @@
 
         # Process the entries by reverse lexicographic name order to
         # handle nested renames correctly, most specific first.
-        curchanges = sorted(current.iter_changes(origin),
-                            key=lambda c: c[1][0] or c[1][1],
-                            reverse=True)
-        for (fileid, paths, changed_content, versioned, parent, name,
-            kind, executable) in curchanges:
+        curchanges = sorted(
+            current.iter_changes(origin),
+            key=lambda c: c[1][0] or c[1][1],
+            reverse=True,
+        )
+        for (
+            fileid,
+            paths,
+            changed_content,
+            versioned,
+            parent,
+            name,
+            kind,
+            executable,
+        ) in curchanges:
 
             if paths[0] == u'' or paths[1] == u'':
                 # ignore changes to tree root
@@ -228,13 +252,13 @@
 
             # bazaar tracks directories, mercurial does not, so
             # we have to rename the directory contents
-            if kind[1] == 'directory':
-                if kind[0] not in (None, 'directory'):
+            if kind[1] == b'directory':
+                if kind[0] not in (None, b'directory'):
                     # Replacing 'something' with a directory, record it
                     # so it can be removed.
                     changes.append((self.recode(paths[0]), revid))
 
-                if kind[0] == 'directory' and None not in paths:
+                if kind[0] == b'directory' and None not in paths:
                     renaming = paths[0] != paths[1]
                     # neither an add nor an delete - a move
                     # rename all directory contents manually
@@ -242,9 +266,9 @@
                     # get all child-entries of the directory
                     for name, entry in inventory.iter_entries(subdir):
                         # hg does not track directory renames
-                        if entry.kind == 'directory':
+                        if entry.kind == b'directory':
                             continue
-                        frompath = self.recode(paths[0] + '/' + name)
+                        frompath = self.recode(paths[0] + b'/' + name)
                         if frompath in seen:
                             # Already handled by a more specific change entry
                             # This is important when you have:
@@ -255,14 +279,16 @@
                         seen.add(frompath)
                         if not renaming:
                             continue
-                        topath = self.recode(paths[1] + '/' + name)
+                        topath = self.recode(paths[1] + b'/' + name)
                         # register the files as changed
                         changes.append((frompath, revid))
                         changes.append((topath, revid))
                         # add to mode cache
-                        mode = ((entry.executable and 'x')
-                                or (entry.kind == 'symlink' and 's')
-                                or '')
+                        mode = (
+                            (entry.executable and b'x')
+                            or (entry.kind == b'symlink' and b's')
+                            or b''
+                        )
                         self._modecache[(topath, revid)] = mode
                         # register the change as move
                         renames[topath] = frompath
@@ -290,8 +316,7 @@
 
             # populate the mode cache
             kind, executable = [e[1] for e in (kind, executable)]
-            mode = ((executable and 'x') or (kind == 'symlink' and 'l')
-                    or '')
+            mode = (executable and b'x') or (kind == b'symlink' and b'l') or b''
             self._modecache[(topath, revid)] = mode
             changes.append((topath, revid))
 
--- a/hgext/convert/common.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/convert/common.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,6 +15,7 @@
 import subprocess
 
 from mercurial.i18n import _
+from mercurial.pycompat import open
 from mercurial import (
     encoding,
     error,
@@ -22,20 +23,19 @@
     pycompat,
     util,
 )
-from mercurial.utils import (
-    procutil,
-)
+from mercurial.utils import procutil
 
 pickle = util.pickle
 propertycache = util.propertycache
 
+
 def _encodeornone(d):
     if d is None:
         return
     return d.encode('latin1')
 
+
 class _shlexpy3proxy(object):
-
     def __init__(self, l):
         self._l = l
 
@@ -47,22 +47,24 @@
 
     @property
     def infile(self):
-        return self._l.infile or '<unknown>'
+        return self._l.infile or b'<unknown>'
 
     @property
     def lineno(self):
         return self._l.lineno
 
+
 def shlexer(data=None, filepath=None, wordchars=None, whitespace=None):
     if data is None:
         if pycompat.ispy3:
-            data = open(filepath, 'r', encoding=r'latin1')
+            data = open(filepath, b'r', encoding=r'latin1')
         else:
-            data = open(filepath, 'r')
+            data = open(filepath, b'r')
     else:
         if filepath is not None:
             raise error.ProgrammingError(
-                'shlexer only accepts data or filepath, not both')
+                b'shlexer only accepts data or filepath, not both'
+            )
         if pycompat.ispy3:
             data = data.decode('latin1')
     l = shlex.shlex(data, infile=filepath, posix=True)
@@ -81,22 +83,26 @@
         return _shlexpy3proxy(l)
     return l
 
+
 def encodeargs(args):
     def encodearg(s):
         lines = base64.encodestring(s)
         lines = [l.splitlines()[0] for l in lines]
-        return ''.join(lines)
+        return b''.join(lines)
 
     s = pickle.dumps(args)
     return encodearg(s)
 
+
 def decodeargs(s):
     s = base64.decodestring(s)
     return pickle.loads(s)
 
+
 class MissingTool(Exception):
     pass
 
+
 def checktool(exe, name=None, abort=True):
     name = name or exe
     if not procutil.findexe(exe):
@@ -104,29 +110,45 @@
             exc = error.Abort
         else:
             exc = MissingTool
-        raise exc(_('cannot find required "%s" tool') % name)
+        raise exc(_(b'cannot find required "%s" tool') % name)
+
 
 class NoRepo(Exception):
     pass
 
-SKIPREV = 'SKIP'
+
+SKIPREV = b'SKIP'
+
 
 class commit(object):
-    def __init__(self, author, date, desc, parents, branch=None, rev=None,
-                 extra=None, sortkey=None, saverev=True, phase=phases.draft,
-                 optparents=None, ctx=None):
-        self.author = author or 'unknown'
-        self.date = date or '0 0'
+    def __init__(
+        self,
+        author,
+        date,
+        desc,
+        parents,
+        branch=None,
+        rev=None,
+        extra=None,
+        sortkey=None,
+        saverev=True,
+        phase=phases.draft,
+        optparents=None,
+        ctx=None,
+    ):
+        self.author = author or b'unknown'
+        self.date = date or b'0 0'
         self.desc = desc
-        self.parents = parents # will be converted and used as parents
-        self.optparents = optparents or [] # will be used if already converted
+        self.parents = parents  # will be converted and used as parents
+        self.optparents = optparents or []  # will be used if already converted
         self.branch = branch
         self.rev = rev
         self.extra = extra or {}
         self.sortkey = sortkey
         self.saverev = saverev
         self.phase = phase
-        self.ctx = ctx # for hg to hg conversions
+        self.ctx = ctx  # for hg to hg conversions
+
 
 class converter_source(object):
     """Conversion source interface"""
@@ -139,15 +161,17 @@
         self.revs = revs
         self.repotype = repotype
 
-        self.encoding = 'utf-8'
+        self.encoding = b'utf-8'
 
-    def checkhexformat(self, revstr, mapname='splicemap'):
+    def checkhexformat(self, revstr, mapname=b'splicemap'):
         """ fails if revstr is not a 40 byte hex. mercurial and git both uses
             such format for their revision numbering
         """
         if not re.match(br'[0-9a-fA-F]{40,40}$', revstr):
-            raise error.Abort(_('%s entry %s is not a valid revision'
-                               ' identifier') % (mapname, revstr))
+            raise error.Abort(
+                _(b'%s entry %s is not a valid revision identifier')
+                % (mapname, revstr)
+            )
 
     def before(self):
         pass
@@ -213,7 +237,7 @@
 
     def recode(self, s, encoding=None):
         if not encoding:
-            encoding = self.encoding or 'utf-8'
+            encoding = self.encoding or b'utf-8'
 
         if isinstance(s, pycompat.unicode):
             return s.encode("utf-8")
@@ -223,8 +247,9 @@
             try:
                 return s.decode("latin-1").encode("utf-8")
             except UnicodeError:
-                return s.decode(pycompat.sysstr(encoding),
-                                "replace").encode("utf-8")
+                return s.decode(pycompat.sysstr(encoding), "replace").encode(
+                    "utf-8"
+                )
 
     def getchangedfiles(self, rev, i):
         """Return the files changed by rev compared to parent[i].
@@ -268,13 +293,14 @@
         """
         return {}
 
-    def checkrevformat(self, revstr, mapname='splicemap'):
+    def checkrevformat(self, revstr, mapname=b'splicemap'):
         """revstr is a string that describes a revision in the given
            source control system.  Return true if revstr has correct
            format.
         """
         return True
 
+
 class converter_sink(object):
     """Conversion sink (target) interface"""
 
@@ -301,8 +327,9 @@
         mapping equivalent authors identifiers for each system."""
         return None
 
-    def putcommit(self, files, copies, parents, commit, source, revmap, full,
-                  cleanp2):
+    def putcommit(
+        self, files, copies, parents, commit, source, revmap, full, cleanp2
+    ):
         """Create a revision with all changed files listed in 'files'
         and having listed parents. 'commit' is a commit object
         containing at a minimum the author, date, and message for this
@@ -369,6 +396,7 @@
         special cases."""
         raise NotImplementedError
 
+
 class commandline(object):
     def __init__(self, ui, command):
         self.ui = ui
@@ -383,31 +411,35 @@
     def _cmdline(self, cmd, *args, **kwargs):
         kwargs = pycompat.byteskwargs(kwargs)
         cmdline = [self.command, cmd] + list(args)
-        for k, v in kwargs.iteritems():
+        for k, v in pycompat.iteritems(kwargs):
             if len(k) == 1:
-                cmdline.append('-' + k)
+                cmdline.append(b'-' + k)
             else:
-                cmdline.append('--' + k.replace('_', '-'))
+                cmdline.append(b'--' + k.replace(b'_', b'-'))
             try:
                 if len(k) == 1:
-                    cmdline.append('' + v)
+                    cmdline.append(b'' + v)
                 else:
-                    cmdline[-1] += '=' + v
+                    cmdline[-1] += b'=' + v
             except TypeError:
                 pass
         cmdline = [procutil.shellquote(arg) for arg in cmdline]
         if not self.ui.debugflag:
-            cmdline += ['2>', pycompat.bytestr(os.devnull)]
-        cmdline = ' '.join(cmdline)
+            cmdline += [b'2>', pycompat.bytestr(os.devnull)]
+        cmdline = b' '.join(cmdline)
         return cmdline
 
     def _run(self, cmd, *args, **kwargs):
         def popen(cmdline):
-            p = subprocess.Popen(procutil.tonativestr(cmdline),
-                                 shell=True, bufsize=-1,
-                                 close_fds=procutil.closefds,
-                                 stdout=subprocess.PIPE)
+            p = subprocess.Popen(
+                procutil.tonativestr(cmdline),
+                shell=True,
+                bufsize=-1,
+                close_fds=procutil.closefds,
+                stdout=subprocess.PIPE,
+            )
             return p
+
         return self._dorun(popen, cmd, *args, **kwargs)
 
     def _run2(self, cmd, *args, **kwargs):
@@ -416,9 +448,9 @@
     def _run3(self, cmd, *args, **kwargs):
         return self._dorun(procutil.popen3, cmd, *args, **kwargs)
 
-    def _dorun(self, openfunc, cmd,  *args, **kwargs):
+    def _dorun(self, openfunc, cmd, *args, **kwargs):
         cmdline = self._cmdline(cmd, *args, **kwargs)
-        self.ui.debug('running: %s\n' % (cmdline,))
+        self.ui.debug(b'running: %s\n' % (cmdline,))
         self.prerun()
         try:
             return openfunc(cmdline)
@@ -435,16 +467,16 @@
         p = self._run(cmd, *args, **kwargs)
         output = p.stdout.readlines()
         p.wait()
-        self.ui.debug(''.join(output))
+        self.ui.debug(b''.join(output))
         return output, p.returncode
 
-    def checkexit(self, status, output=''):
+    def checkexit(self, status, output=b''):
         if status:
             if output:
-                self.ui.warn(_('%s error:\n') % self.command)
+                self.ui.warn(_(b'%s error:\n') % self.command)
                 self.ui.warn(output)
             msg = procutil.explainexit(status)
-            raise error.Abort('%s %s' % (self.command, msg))
+            raise error.Abort(b'%s %s' % (self.command, msg))
 
     def run0(self, cmd, *args, **kwargs):
         output, status = self.run(cmd, *args, **kwargs)
@@ -453,7 +485,7 @@
 
     def runlines0(self, cmd, *args, **kwargs):
         output, status = self.runlines(cmd, *args, **kwargs)
-        self.checkexit(status, ''.join(output))
+        self.checkexit(status, b''.join(output))
         return output
 
     @propertycache
@@ -495,6 +527,7 @@
         for l in self._limit_arglist(arglist, cmd, *args, **kwargs):
             self.run0(cmd, *(list(args) + l), **kwargs)
 
+
 class mapfile(dict):
     def __init__(self, ui, path):
         super(mapfile, self).__init__()
@@ -508,7 +541,7 @@
         if not self.path:
             return
         try:
-            fp = open(self.path, 'rb')
+            fp = open(self.path, b'rb')
         except IOError as err:
             if err.errno != errno.ENOENT:
                 raise
@@ -519,11 +552,12 @@
                 # Ignore blank lines
                 continue
             try:
-                key, value = line.rsplit(' ', 1)
+                key, value = line.rsplit(b' ', 1)
             except ValueError:
                 raise error.Abort(
-                    _('syntax error in %s(%d): key/value pair expected')
-                    % (self.path, i + 1))
+                    _(b'syntax error in %s(%d): key/value pair expected')
+                    % (self.path, i + 1)
+                )
             if key not in self:
                 self.order.append(key)
             super(mapfile, self).__setitem__(key, value)
@@ -532,12 +566,13 @@
     def __setitem__(self, key, value):
         if self.fp is None:
             try:
-                self.fp = open(self.path, 'ab')
+                self.fp = open(self.path, b'ab')
             except IOError as err:
                 raise error.Abort(
-                    _('could not open map file %r: %s') %
-                    (self.path, encoding.strtolocal(err.strerror)))
-        self.fp.write(util.tonativeeol('%s %s\n' % (key, value)))
+                    _(b'could not open map file %r: %s')
+                    % (self.path, encoding.strtolocal(err.strerror))
+                )
+        self.fp.write(util.tonativeeol(b'%s %s\n' % (key, value)))
         self.fp.flush()
         super(mapfile, self).__setitem__(key, value)
 
@@ -546,9 +581,11 @@
             self.fp.close()
             self.fp = None
 
+
 def makedatetimestamp(t):
     """Like dateutil.makedate() but for time t instead of current time"""
-    delta = (datetime.datetime.utcfromtimestamp(t) -
-             datetime.datetime.fromtimestamp(t))
+    delta = datetime.datetime.utcfromtimestamp(
+        t
+    ) - datetime.datetime.fromtimestamp(t)
     tz = delta.days * 86400 + delta.seconds
     return t, tz
--- a/hgext/convert/convcmd.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/convert/convcmd.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,6 +11,7 @@
 import shutil
 
 from mercurial.i18n import _
+from mercurial.pycompat import open
 from mercurial import (
     encoding,
     error,
@@ -52,14 +53,17 @@
 svn_sink = subversion.svn_sink
 svn_source = subversion.svn_source
 
-orig_encoding = 'ascii'
+orig_encoding = b'ascii'
+
 
 def recode(s):
     if isinstance(s, pycompat.unicode):
         return s.encode(pycompat.sysstr(orig_encoding), 'replace')
     else:
         return s.decode('utf-8').encode(
-            pycompat.sysstr(orig_encoding), 'replace')
+            pycompat.sysstr(orig_encoding), 'replace'
+        )
+
 
 def mapbranch(branch, branchmap):
     '''
@@ -87,34 +91,36 @@
     # destination repository. For such commits, using a literal "default"
     # in branchmap below allows the user to map "default" to an alternate
     # default branch in the destination repository.
-    branch = branchmap.get(branch or 'default', branch)
+    branch = branchmap.get(branch or b'default', branch)
     # At some point we used "None" literal to denote the default branch,
     # attempt to use that for backward compatibility.
-    if (not branch):
-        branch = branchmap.get('None', branch)
+    if not branch:
+        branch = branchmap.get(b'None', branch)
     return branch
 
+
 source_converters = [
-    ('cvs', convert_cvs, 'branchsort'),
-    ('git', convert_git, 'branchsort'),
-    ('svn', svn_source, 'branchsort'),
-    ('hg', mercurial_source, 'sourcesort'),
-    ('darcs', darcs_source, 'branchsort'),
-    ('mtn', monotone_source, 'branchsort'),
-    ('gnuarch', gnuarch_source, 'branchsort'),
-    ('bzr', bzr_source, 'branchsort'),
-    ('p4', p4_source, 'branchsort'),
-    ]
+    (b'cvs', convert_cvs, b'branchsort'),
+    (b'git', convert_git, b'branchsort'),
+    (b'svn', svn_source, b'branchsort'),
+    (b'hg', mercurial_source, b'sourcesort'),
+    (b'darcs', darcs_source, b'branchsort'),
+    (b'mtn', monotone_source, b'branchsort'),
+    (b'gnuarch', gnuarch_source, b'branchsort'),
+    (b'bzr', bzr_source, b'branchsort'),
+    (b'p4', p4_source, b'branchsort'),
+]
 
 sink_converters = [
-    ('hg', mercurial_sink),
-    ('svn', svn_sink),
-    ]
+    (b'hg', mercurial_sink),
+    (b'svn', svn_sink),
+]
+
 
 def convertsource(ui, path, type, revs):
     exceptions = []
     if type and type not in [s[0] for s in source_converters]:
-        raise error.Abort(_('%s: invalid source repository type') % type)
+        raise error.Abort(_(b'%s: invalid source repository type') % type)
     for name, source, sortmode in source_converters:
         try:
             if not type or name == type:
@@ -123,28 +129,31 @@
             exceptions.append(inst)
     if not ui.quiet:
         for inst in exceptions:
-            ui.write("%s\n" % pycompat.bytestr(inst.args[0]))
-    raise error.Abort(_('%s: missing or unsupported repository') % path)
+            ui.write(b"%s\n" % pycompat.bytestr(inst.args[0]))
+    raise error.Abort(_(b'%s: missing or unsupported repository') % path)
+
 
 def convertsink(ui, path, type):
     if type and type not in [s[0] for s in sink_converters]:
-        raise error.Abort(_('%s: invalid destination repository type') % type)
+        raise error.Abort(_(b'%s: invalid destination repository type') % type)
     for name, sink in sink_converters:
         try:
             if not type or name == type:
                 return sink(ui, name, path)
         except NoRepo as inst:
-            ui.note(_("convert: %s\n") % inst)
+            ui.note(_(b"convert: %s\n") % inst)
         except MissingTool as inst:
-            raise error.Abort('%s\n' % inst)
-    raise error.Abort(_('%s: unknown repository type') % path)
+            raise error.Abort(b'%s\n' % inst)
+    raise error.Abort(_(b'%s: unknown repository type') % path)
+
 
 class progresssource(object):
     def __init__(self, ui, source, filecount):
         self.ui = ui
         self.source = source
-        self.progress = ui.makeprogress(_('getting files'), unit=_('files'),
-                                        total=filecount)
+        self.progress = ui.makeprogress(
+            _(b'getting files'), unit=_(b'files'), total=filecount
+        )
 
     def getfile(self, file, rev):
         self.progress.increment(item=file)
@@ -159,6 +168,7 @@
     def close(self):
         self.progress.complete()
 
+
 class converter(object):
     def __init__(self, ui, source, dest, revmapfile, opts):
 
@@ -180,12 +190,12 @@
         if authorfile and os.path.exists(authorfile):
             self.readauthormap(authorfile)
         # Extend/Override with new author map if necessary
-        if opts.get('authormap'):
-            self.readauthormap(opts.get('authormap'))
+        if opts.get(b'authormap'):
+            self.readauthormap(opts.get(b'authormap'))
             self.authorfile = self.dest.authorfile()
 
-        self.splicemap = self.parsesplicemap(opts.get('splicemap'))
-        self.branchmap = mapfile(ui, opts.get('branchmap'))
+        self.splicemap = self.parsesplicemap(opts.get(b'splicemap'))
+        self.branchmap = mapfile(ui, opts.get(b'branchmap'))
 
     def parsesplicemap(self, path):
         """ check and validate the splicemap format and
@@ -202,19 +212,24 @@
             return {}
         m = {}
         try:
-            fp = open(path, 'rb')
+            fp = open(path, b'rb')
             for i, line in enumerate(util.iterfile(fp)):
                 line = line.splitlines()[0].rstrip()
                 if not line:
                     # Ignore blank lines
                     continue
                 # split line
-                lex = common.shlexer(data=line, whitespace=',')
+                lex = common.shlexer(data=line, whitespace=b',')
                 line = list(lex)
                 # check number of parents
                 if not (2 <= len(line) <= 3):
-                    raise error.Abort(_('syntax error in %s(%d): child parent1'
-                                       '[,parent2] expected') % (path, i + 1))
+                    raise error.Abort(
+                        _(
+                            b'syntax error in %s(%d): child parent1'
+                            b'[,parent2] expected'
+                        )
+                        % (path, i + 1)
+                    )
                 for part in line:
                     self.source.checkrevformat(part)
                 child, p1, p2 = line[0], line[1:2], line[2:]
@@ -222,13 +237,13 @@
                     m[child] = p1
                 else:
                     m[child] = p1 + p2
-         # if file does not exist or error reading, exit
+        # if file does not exist or error reading, exit
         except IOError:
-            raise error.Abort(_('splicemap file not found or error reading %s:')
-                               % path)
+            raise error.Abort(
+                _(b'splicemap file not found or error reading %s:') % path
+            )
         return m
 
-
     def walktree(self, heads):
         '''Return a mapping that identifies the uncommitted parents of every
         uncommitted changeset.'''
@@ -236,8 +251,9 @@
         known = set()
         parents = {}
         numcommits = self.source.numcommits()
-        progress = self.ui.makeprogress(_('scanning'), unit=_('revisions'),
-                                        total=numcommits)
+        progress = self.ui.makeprogress(
+            _(b'scanning'), unit=_(b'revisions'), total=numcommits
+        )
         while visit:
             n = visit.pop(0)
             if n in known:
@@ -266,8 +282,13 @@
             if c not in parents:
                 if not self.dest.hascommitforsplicemap(self.map.get(c, c)):
                     # Could be in source but not converted during this run
-                    self.ui.warn(_('splice map revision %s is not being '
-                                   'converted, ignoring\n') % c)
+                    self.ui.warn(
+                        _(
+                            b'splice map revision %s is not being '
+                            b'converted, ignoring\n'
+                        )
+                        % c
+                    )
                 continue
             pc = []
             for p in splicemap[c]:
@@ -276,7 +297,7 @@
                     continue
                 # Parent is not in dest and not being converted, not good
                 if p not in parents:
-                    raise error.Abort(_('unknown splice map parent: %s') % p)
+                    raise error.Abort(_(b'unknown splice map parent: %s') % p)
                 pc.append(p)
             parents[c] = pc
 
@@ -325,6 +346,7 @@
             compression.
             """
             prev = [None]
+
             def picknext(nodes):
                 next = nodes[0]
                 for n in nodes:
@@ -333,26 +355,34 @@
                         break
                 prev[0] = next
                 return next
+
             return picknext
 
         def makesourcesorter():
             """Source specific sort."""
             keyfn = lambda n: self.commitcache[n].sortkey
+
             def picknext(nodes):
                 return sorted(nodes, key=keyfn)[0]
+
             return picknext
 
         def makeclosesorter():
             """Close order sort."""
-            keyfn = lambda n: ('close' not in self.commitcache[n].extra,
-                               self.commitcache[n].sortkey)
+            keyfn = lambda n: (
+                b'close' not in self.commitcache[n].extra,
+                self.commitcache[n].sortkey,
+            )
+
             def picknext(nodes):
                 return sorted(nodes, key=keyfn)[0]
+
             return picknext
 
         def makedatesorter():
             """Sort revisions by date."""
             dates = {}
+
             def getdate(n):
                 if n not in dates:
                     dates[n] = dateutil.parsedate(self.commitcache[n].date)
@@ -363,16 +393,16 @@
 
             return picknext
 
-        if sortmode == 'branchsort':
+        if sortmode == b'branchsort':
             picknext = makebranchsorter()
-        elif sortmode == 'datesort':
+        elif sortmode == b'datesort':
             picknext = makedatesorter()
-        elif sortmode == 'sourcesort':
+        elif sortmode == b'sourcesort':
             picknext = makesourcesorter()
-        elif sortmode == 'closesort':
+        elif sortmode == b'closesort':
             picknext = makeclosesorter()
         else:
-            raise error.Abort(_('unknown sort mode: %s') % sortmode)
+            raise error.Abort(_(b'unknown sort mode: %s') % sortmode)
 
         children, actives = mapchildren(parents)
 
@@ -390,52 +420,57 @@
                 try:
                     pendings[c].remove(n)
                 except ValueError:
-                    raise error.Abort(_('cycle detected between %s and %s')
-                                       % (recode(c), recode(n)))
+                    raise error.Abort(
+                        _(b'cycle detected between %s and %s')
+                        % (recode(c), recode(n))
+                    )
                 if not pendings[c]:
                     # Parents are converted, node is eligible
                     actives.insert(0, c)
                     pendings[c] = None
 
         if len(s) != len(parents):
-            raise error.Abort(_("not all revisions were sorted"))
+            raise error.Abort(_(b"not all revisions were sorted"))
 
         return s
 
     def writeauthormap(self):
         authorfile = self.authorfile
         if authorfile:
-            self.ui.status(_('writing author map file %s\n') % authorfile)
-            ofile = open(authorfile, 'wb+')
+            self.ui.status(_(b'writing author map file %s\n') % authorfile)
+            ofile = open(authorfile, b'wb+')
             for author in self.authors:
-                ofile.write(util.tonativeeol("%s=%s\n"
-                                             % (author, self.authors[author])))
+                ofile.write(
+                    util.tonativeeol(
+                        b"%s=%s\n" % (author, self.authors[author])
+                    )
+                )
             ofile.close()
 
     def readauthormap(self, authorfile):
-        afile = open(authorfile, 'rb')
+        afile = open(authorfile, b'rb')
         for line in afile:
 
             line = line.strip()
-            if not line or line.startswith('#'):
+            if not line or line.startswith(b'#'):
                 continue
 
             try:
-                srcauthor, dstauthor = line.split('=', 1)
+                srcauthor, dstauthor = line.split(b'=', 1)
             except ValueError:
-                msg = _('ignoring bad line in author map file %s: %s\n')
+                msg = _(b'ignoring bad line in author map file %s: %s\n')
                 self.ui.warn(msg % (authorfile, line.rstrip()))
                 continue
 
             srcauthor = srcauthor.strip()
             dstauthor = dstauthor.strip()
             if self.authors.get(srcauthor) in (None, dstauthor):
-                msg = _('mapping author %s to %s\n')
+                msg = _(b'mapping author %s to %s\n')
                 self.ui.debug(msg % (srcauthor, dstauthor))
                 self.authors[srcauthor] = dstauthor
                 continue
 
-            m = _('overriding mapping for author %s, was %s, will be %s\n')
+            m = _(b'overriding mapping for author %s, was %s, will be %s\n')
             self.ui.status(m % (srcauthor, self.authors[srcauthor], dstauthor))
 
         afile.close()
@@ -449,7 +484,7 @@
 
     def copy(self, rev):
         commit = self.commitcache[rev]
-        full = self.opts.get('full')
+        full = self.opts.get(b'full')
         changes = self.source.getchanges(rev, full)
         if isinstance(changes, bytes):
             if changes == SKIPREV:
@@ -464,19 +499,22 @@
             for prev in commit.parents:
                 if prev not in self.commitcache:
                     self.cachecommit(prev)
-                pbranches.append((self.map[prev],
-                                  self.commitcache[prev].branch))
+                pbranches.append(
+                    (self.map[prev], self.commitcache[prev].branch)
+                )
         self.dest.setbranch(commit.branch, pbranches)
         try:
             parents = self.splicemap[rev]
-            self.ui.status(_('spliced in %s as parents of %s\n') %
-                           (_(' and ').join(parents), rev))
+            self.ui.status(
+                _(b'spliced in %s as parents of %s\n')
+                % (_(b' and ').join(parents), rev)
+            )
             parents = [self.map.get(p, p) for p in parents]
         except KeyError:
             parents = [b[0] for b in pbranches]
-            parents.extend(self.map[x]
-                           for x in commit.optparents
-                           if x in self.map)
+            parents.extend(
+                self.map[x] for x in commit.optparents if x in self.map
+            )
         if len(pbranches) != 2:
             cleanp2 = set()
         if len(parents) < 3:
@@ -486,10 +524,12 @@
             # changed files N-1 times. This tweak to the number of
             # files makes it so the progress bar doesn't overflow
             # itself.
-            source = progresssource(self.ui, self.source,
-                                    len(files) * (len(parents) - 1))
-        newnode = self.dest.putcommit(files, copies, parents, commit,
-                                      source, self.map, full, cleanp2)
+            source = progresssource(
+                self.ui, self.source, len(files) * (len(parents) - 1)
+            )
+        newnode = self.dest.putcommit(
+            files, copies, parents, commit, source, self.map, full, cleanp2
+        )
         source.close()
         self.source.converted(rev, newnode)
         self.map[rev] = newnode
@@ -499,33 +539,34 @@
             self.source.before()
             self.dest.before()
             self.source.setrevmap(self.map)
-            self.ui.status(_("scanning source...\n"))
+            self.ui.status(_(b"scanning source...\n"))
             heads = self.source.getheads()
             parents = self.walktree(heads)
             self.mergesplicemap(parents, self.splicemap)
-            self.ui.status(_("sorting...\n"))
+            self.ui.status(_(b"sorting...\n"))
             t = self.toposort(parents, sortmode)
             num = len(t)
             c = None
 
-            self.ui.status(_("converting...\n"))
-            progress = self.ui.makeprogress(_('converting'),
-                                            unit=_('revisions'), total=len(t))
+            self.ui.status(_(b"converting...\n"))
+            progress = self.ui.makeprogress(
+                _(b'converting'), unit=_(b'revisions'), total=len(t)
+            )
             for i, c in enumerate(t):
                 num -= 1
                 desc = self.commitcache[c].desc
-                if "\n" in desc:
+                if b"\n" in desc:
                     desc = desc.splitlines()[0]
                 # convert log message to local encoding without using
                 # tolocal() because the encoding.encoding convert()
                 # uses is 'utf-8'
-                self.ui.status("%d %s\n" % (num, recode(desc)))
-                self.ui.note(_("source: %s\n") % recode(c))
+                self.ui.status(b"%d %s\n" % (num, recode(desc)))
+                self.ui.note(_(b"source: %s\n") % recode(c))
                 progress.update(i)
                 self.copy(c)
             progress.complete()
 
-            if not self.ui.configbool('convert', 'skiptags'):
+            if not self.ui.configbool(b'convert', b'skiptags'):
                 tags = self.source.gettags()
                 ctags = {}
                 for k in tags:
@@ -538,8 +579,11 @@
                     if nrev and tagsparent:
                         # write another hash correspondence to override the
                         # previous one so we don't end up with extra tag heads
-                        tagsparents = [e for e in self.map.iteritems()
-                                       if e[1] == tagsparent]
+                        tagsparents = [
+                            e
+                            for e in pycompat.iteritems(self.map)
+                            if e[1] == tagsparent
+                        ]
                         if tagsparents:
                             self.map[tagsparents[0][0]] = nrev
 
@@ -564,47 +608,52 @@
             self.source.after()
         self.map.close()
 
+
 def convert(ui, src, dest=None, revmapfile=None, **opts):
     opts = pycompat.byteskwargs(opts)
     global orig_encoding
     orig_encoding = encoding.encoding
-    encoding.encoding = 'UTF-8'
+    encoding.encoding = b'UTF-8'
 
     # support --authors as an alias for --authormap
-    if not opts.get('authormap'):
-        opts['authormap'] = opts.get('authors')
+    if not opts.get(b'authormap'):
+        opts[b'authormap'] = opts.get(b'authors')
 
     if not dest:
-        dest = hg.defaultdest(src) + "-hg"
-        ui.status(_("assuming destination %s\n") % dest)
+        dest = hg.defaultdest(src) + b"-hg"
+        ui.status(_(b"assuming destination %s\n") % dest)
 
-    destc = convertsink(ui, dest, opts.get('dest_type'))
+    destc = convertsink(ui, dest, opts.get(b'dest_type'))
     destc = scmutil.wrapconvertsink(destc)
 
     try:
-        srcc, defaultsort = convertsource(ui, src, opts.get('source_type'),
-                                          opts.get('rev'))
+        srcc, defaultsort = convertsource(
+            ui, src, opts.get(b'source_type'), opts.get(b'rev')
+        )
     except Exception:
         for path in destc.created:
             shutil.rmtree(path, True)
         raise
 
-    sortmodes = ('branchsort', 'datesort', 'sourcesort', 'closesort')
+    sortmodes = (b'branchsort', b'datesort', b'sourcesort', b'closesort')
     sortmode = [m for m in sortmodes if opts.get(m)]
     if len(sortmode) > 1:
-        raise error.Abort(_('more than one sort mode specified'))
+        raise error.Abort(_(b'more than one sort mode specified'))
     if sortmode:
         sortmode = sortmode[0]
     else:
         sortmode = defaultsort
 
-    if sortmode == 'sourcesort' and not srcc.hasnativeorder():
-        raise error.Abort(_('--sourcesort is not supported by this data source')
-                         )
-    if sortmode == 'closesort' and not srcc.hasnativeclose():
-        raise error.Abort(_('--closesort is not supported by this data source'))
+    if sortmode == b'sourcesort' and not srcc.hasnativeorder():
+        raise error.Abort(
+            _(b'--sourcesort is not supported by this data source')
+        )
+    if sortmode == b'closesort' and not srcc.hasnativeclose():
+        raise error.Abort(
+            _(b'--closesort is not supported by this data source')
+        )
 
-    fmap = opts.get('filemap')
+    fmap = opts.get(b'filemap')
     if fmap:
         srcc = filemap.filemap_source(ui, srcc, fmap)
         destc.setfilemapmode(True)
--- a/hgext/convert/cvs.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/convert/cvs.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,9 +12,14 @@
 import socket
 
 from mercurial.i18n import _
+from mercurial.pycompat import (
+    getattr,
+    open,
+)
 from mercurial import (
     encoding,
     error,
+    pycompat,
     util,
 )
 from mercurial.utils import (
@@ -34,23 +39,24 @@
 makedatetimestamp = common.makedatetimestamp
 NoRepo = common.NoRepo
 
+
 class convert_cvs(converter_source):
     def __init__(self, ui, repotype, path, revs=None):
         super(convert_cvs, self).__init__(ui, repotype, path, revs=revs)
 
-        cvs = os.path.join(path, "CVS")
+        cvs = os.path.join(path, b"CVS")
         if not os.path.exists(cvs):
-            raise NoRepo(_("%s does not look like a CVS checkout") % path)
+            raise NoRepo(_(b"%s does not look like a CVS checkout") % path)
 
-        checktool('cvs')
+        checktool(b'cvs')
 
         self.changeset = None
         self.files = {}
         self.tags = {}
         self.lastbranch = {}
         self.socket = None
-        self.cvsroot = open(os.path.join(cvs, "Root"), 'rb').read()[:-1]
-        self.cvsrepo = open(os.path.join(cvs, "Repository"), 'rb').read()[:-1]
+        self.cvsroot = open(os.path.join(cvs, b"Root"), b'rb').read()[:-1]
+        self.cvsrepo = open(os.path.join(cvs, b"Repository"), b'rb').read()[:-1]
         self.encoding = encoding.encoding
 
         self._connect()
@@ -63,51 +69,64 @@
         maxrev = 0
         if self.revs:
             if len(self.revs) > 1:
-                raise error.Abort(_('cvs source does not support specifying '
-                                   'multiple revs'))
+                raise error.Abort(
+                    _(
+                        b'cvs source does not support specifying '
+                        b'multiple revs'
+                    )
+                )
             # TODO: handle tags
             try:
                 # patchset number?
                 maxrev = int(self.revs[0])
             except ValueError:
-                raise error.Abort(_('revision %s is not a patchset number')
-                                 % self.revs[0])
+                raise error.Abort(
+                    _(b'revision %s is not a patchset number') % self.revs[0]
+                )
 
         d = encoding.getcwd()
         try:
             os.chdir(self.path)
 
-            cache = 'update'
-            if not self.ui.configbool('convert', 'cvsps.cache'):
+            cache = b'update'
+            if not self.ui.configbool(b'convert', b'cvsps.cache'):
                 cache = None
             db = cvsps.createlog(self.ui, cache=cache)
-            db = cvsps.createchangeset(self.ui, db,
-                fuzz=int(self.ui.config('convert', 'cvsps.fuzz')),
-                mergeto=self.ui.config('convert', 'cvsps.mergeto'),
-                mergefrom=self.ui.config('convert', 'cvsps.mergefrom'))
+            db = cvsps.createchangeset(
+                self.ui,
+                db,
+                fuzz=int(self.ui.config(b'convert', b'cvsps.fuzz')),
+                mergeto=self.ui.config(b'convert', b'cvsps.mergeto'),
+                mergefrom=self.ui.config(b'convert', b'cvsps.mergefrom'),
+            )
 
             for cs in db:
                 if maxrev and cs.id > maxrev:
                     break
-                id = (b"%d" % cs.id)
+                id = b"%d" % cs.id
                 cs.author = self.recode(cs.author)
                 self.lastbranch[cs.branch] = id
                 cs.comment = self.recode(cs.comment)
-                if self.ui.configbool('convert', 'localtimezone'):
+                if self.ui.configbool(b'convert', b'localtimezone'):
                     cs.date = makedatetimestamp(cs.date[0])
-                date = dateutil.datestr(cs.date, '%Y-%m-%d %H:%M:%S %1%2')
+                date = dateutil.datestr(cs.date, b'%Y-%m-%d %H:%M:%S %1%2')
                 self.tags.update(dict.fromkeys(cs.tags, id))
 
                 files = {}
                 for f in cs.entries:
-                    files[f.file] = "%s%s" % ('.'.join([(b"%d" % x)
-                                                        for x in f.revision]),
-                                              ['', '(DEAD)'][f.dead])
+                    files[f.file] = b"%s%s" % (
+                        b'.'.join([(b"%d" % x) for x in f.revision]),
+                        [b'', b'(DEAD)'][f.dead],
+                    )
 
                 # add current commit to set
-                c = commit(author=cs.author, date=date,
-                           parents=[(b"%d" % p.id) for p in cs.parents],
-                           desc=cs.comment, branch=cs.branch or '')
+                c = commit(
+                    author=cs.author,
+                    date=date,
+                    parents=[(b"%d" % p.id) for p in cs.parents],
+                    desc=cs.comment,
+                    branch=cs.branch or b'',
+                )
                 self.changeset[id] = c
                 self.files[id] = files
 
@@ -119,37 +138,38 @@
         root = self.cvsroot
         conntype = None
         user, host = None, None
-        cmd = ['cvs', 'server']
+        cmd = [b'cvs', b'server']
 
-        self.ui.status(_("connecting to %s\n") % root)
+        self.ui.status(_(b"connecting to %s\n") % root)
 
-        if root.startswith(":pserver:"):
+        if root.startswith(b":pserver:"):
             root = root[9:]
-            m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
-                         root)
+            m = re.match(
+                r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)', root
+            )
             if m:
-                conntype = "pserver"
+                conntype = b"pserver"
                 user, passw, serv, port, root = m.groups()
                 if not user:
-                    user = "anonymous"
+                    user = b"anonymous"
                 if not port:
                     port = 2401
                 else:
                     port = int(port)
-                format0 = ":pserver:%s@%s:%s" % (user, serv, root)
-                format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
+                format0 = b":pserver:%s@%s:%s" % (user, serv, root)
+                format1 = b":pserver:%s@%s:%d%s" % (user, serv, port, root)
 
                 if not passw:
-                    passw = "A"
-                    cvspass = os.path.expanduser("~/.cvspass")
+                    passw = b"A"
+                    cvspass = os.path.expanduser(b"~/.cvspass")
                     try:
-                        pf = open(cvspass, 'rb')
+                        pf = open(cvspass, b'rb')
                         for line in pf.read().splitlines():
-                            part1, part2 = line.split(' ', 1)
+                            part1, part2 = line.split(b' ', 1)
                             # /1 :pserver:user@example.com:2401/cvsroot/foo
                             # Ah<Z
-                            if part1 == '/1':
-                                part1, part2 = part2.split(' ', 1)
+                            if part1 == b'/1':
+                                part1, part2 = part2.split(b' ', 1)
                                 format = format1
                             # :pserver:user@example.com:/cvsroot/foo Ah<Z
                             else:
@@ -166,57 +186,73 @@
 
                 sck = socket.socket()
                 sck.connect((serv, port))
-                sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
-                                    "END AUTH REQUEST", ""]))
-                if sck.recv(128) != "I LOVE YOU\n":
-                    raise error.Abort(_("CVS pserver authentication failed"))
+                sck.send(
+                    b"\n".join(
+                        [
+                            b"BEGIN AUTH REQUEST",
+                            root,
+                            user,
+                            passw,
+                            b"END AUTH REQUEST",
+                            b"",
+                        ]
+                    )
+                )
+                if sck.recv(128) != b"I LOVE YOU\n":
+                    raise error.Abort(_(b"CVS pserver authentication failed"))
 
-                self.writep = self.readp = sck.makefile('r+')
+                self.writep = self.readp = sck.makefile(b'r+')
 
-        if not conntype and root.startswith(":local:"):
-            conntype = "local"
+        if not conntype and root.startswith(b":local:"):
+            conntype = b"local"
             root = root[7:]
 
         if not conntype:
             # :ext:user@host/home/user/path/to/cvsroot
-            if root.startswith(":ext:"):
+            if root.startswith(b":ext:"):
                 root = root[5:]
             m = re.match(br'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
             # Do not take Windows path "c:\foo\bar" for a connection strings
             if os.path.isdir(root) or not m:
-                conntype = "local"
+                conntype = b"local"
             else:
-                conntype = "rsh"
+                conntype = b"rsh"
                 user, host, root = m.group(1), m.group(2), m.group(3)
 
-        if conntype != "pserver":
-            if conntype == "rsh":
-                rsh = encoding.environ.get("CVS_RSH") or "ssh"
+        if conntype != b"pserver":
+            if conntype == b"rsh":
+                rsh = encoding.environ.get(b"CVS_RSH") or b"ssh"
                 if user:
-                    cmd = [rsh, '-l', user, host] + cmd
+                    cmd = [rsh, b'-l', user, host] + cmd
                 else:
                     cmd = [rsh, host] + cmd
 
             # popen2 does not support argument lists under Windows
             cmd = [procutil.shellquote(arg) for arg in cmd]
-            cmd = procutil.quotecommand(' '.join(cmd))
+            cmd = procutil.quotecommand(b' '.join(cmd))
             self.writep, self.readp = procutil.popen2(cmd)
 
         self.realroot = root
 
-        self.writep.write("Root %s\n" % root)
-        self.writep.write("Valid-responses ok error Valid-requests Mode"
-                          " M Mbinary E Checked-in Created Updated"
-                          " Merged Removed\n")
-        self.writep.write("valid-requests\n")
+        self.writep.write(b"Root %s\n" % root)
+        self.writep.write(
+            b"Valid-responses ok error Valid-requests Mode"
+            b" M Mbinary E Checked-in Created Updated"
+            b" Merged Removed\n"
+        )
+        self.writep.write(b"valid-requests\n")
         self.writep.flush()
         r = self.readp.readline()
-        if not r.startswith("Valid-requests"):
-            raise error.Abort(_('unexpected response from CVS server '
-                               '(expected "Valid-requests", but got %r)')
-                             % r)
-        if "UseUnchanged" in r:
-            self.writep.write("UseUnchanged\n")
+        if not r.startswith(b"Valid-requests"):
+            raise error.Abort(
+                _(
+                    b'unexpected response from CVS server '
+                    b'(expected "Valid-requests", but got %r)'
+                )
+                % r
+            )
+        if b"UseUnchanged" in r:
+            self.writep.write(b"UseUnchanged\n")
             self.writep.flush()
             self.readp.readline()
 
@@ -225,7 +261,6 @@
         return self.heads
 
     def getfile(self, name, rev):
-
         def chunkedread(fp, count):
             # file-objects returned by socket.makefile() do not handle
             # large read() requests very well.
@@ -234,57 +269,58 @@
             while count > 0:
                 data = fp.read(min(count, chunksize))
                 if not data:
-                    raise error.Abort(_("%d bytes missing from remote file")
-                                     % count)
+                    raise error.Abort(
+                        _(b"%d bytes missing from remote file") % count
+                    )
                 count -= len(data)
                 output.write(data)
             return output.getvalue()
 
         self._parse()
-        if rev.endswith("(DEAD)"):
+        if rev.endswith(b"(DEAD)"):
             return None, None
 
-        args = ("-N -P -kk -r %s --" % rev).split()
-        args.append(self.cvsrepo + '/' + name)
+        args = (b"-N -P -kk -r %s --" % rev).split()
+        args.append(self.cvsrepo + b'/' + name)
         for x in args:
-            self.writep.write("Argument %s\n" % x)
-        self.writep.write("Directory .\n%s\nco\n" % self.realroot)
+            self.writep.write(b"Argument %s\n" % x)
+        self.writep.write(b"Directory .\n%s\nco\n" % self.realroot)
         self.writep.flush()
 
-        data = ""
+        data = b""
         mode = None
         while True:
             line = self.readp.readline()
-            if line.startswith("Created ") or line.startswith("Updated "):
-                self.readp.readline() # path
-                self.readp.readline() # entries
+            if line.startswith(b"Created ") or line.startswith(b"Updated "):
+                self.readp.readline()  # path
+                self.readp.readline()  # entries
                 mode = self.readp.readline()[:-1]
                 count = int(self.readp.readline()[:-1])
                 data = chunkedread(self.readp, count)
-            elif line.startswith(" "):
+            elif line.startswith(b" "):
                 data += line[1:]
-            elif line.startswith("M "):
+            elif line.startswith(b"M "):
                 pass
-            elif line.startswith("Mbinary "):
+            elif line.startswith(b"Mbinary "):
                 count = int(self.readp.readline()[:-1])
                 data = chunkedread(self.readp, count)
             else:
-                if line == "ok\n":
+                if line == b"ok\n":
                     if mode is None:
-                        raise error.Abort(_('malformed response from CVS'))
-                    return (data, "x" in mode and "x" or "")
-                elif line.startswith("E "):
-                    self.ui.warn(_("cvs server: %s\n") % line[2:])
-                elif line.startswith("Remove"):
+                        raise error.Abort(_(b'malformed response from CVS'))
+                    return (data, b"x" in mode and b"x" or b"")
+                elif line.startswith(b"E "):
+                    self.ui.warn(_(b"cvs server: %s\n") % line[2:])
+                elif line.startswith(b"Remove"):
                     self.readp.readline()
                 else:
-                    raise error.Abort(_("unknown CVS response: %s") % line)
+                    raise error.Abort(_(b"unknown CVS response: %s") % line)
 
     def getchanges(self, rev, full):
         if full:
-            raise error.Abort(_("convert from cvs does not support --full"))
+            raise error.Abort(_(b"convert from cvs does not support --full"))
         self._parse()
-        return sorted(self.files[rev].iteritems()), {}, set()
+        return sorted(pycompat.iteritems(self.files[rev])), {}, set()
 
     def getcommit(self, rev):
         self._parse()
--- a/hgext/convert/cvsps.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/convert/cvsps.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,6 +11,7 @@
 import re
 
 from mercurial.i18n import _
+from mercurial.pycompat import open
 from mercurial import (
     encoding,
     error,
@@ -26,6 +27,7 @@
 
 pickle = util.pickle
 
+
 class logentry(object):
     '''Class logentry has the following attributes:
         .author    - author name as CVS knows it
@@ -46,17 +48,22 @@
                       rlog output) or None
         .branchpoints - the branches that start at the current entry or empty
     '''
+
     def __init__(self, **entries):
         self.synthetic = False
         self.__dict__.update(entries)
 
     def __repr__(self):
-        items = (r"%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
-        return r"%s(%s)"%(type(self).__name__, r", ".join(items))
+        items = (
+            r"%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__)
+        )
+        return r"%s(%s)" % (type(self).__name__, r", ".join(items))
+
 
 class logerror(Exception):
     pass
 
+
 def getrepopath(cvspath):
     """Return the repository path from a CVS path.
 
@@ -86,83 +93,91 @@
     # of the '/' char after the '@' is located. The solution is the rest of the
     # string after that '/' sign including it
 
-    parts = cvspath.split(':')
-    atposition = parts[-1].find('@')
+    parts = cvspath.split(b':')
+    atposition = parts[-1].find(b'@')
     start = 0
 
     if atposition != -1:
         start = atposition
 
-    repopath = parts[-1][parts[-1].find('/', start):]
+    repopath = parts[-1][parts[-1].find(b'/', start) :]
     return repopath
 
-def createlog(ui, directory=None, root="", rlog=True, cache=None):
+
+def createlog(ui, directory=None, root=b"", rlog=True, cache=None):
     '''Collect the CVS rlog'''
 
     # Because we store many duplicate commit log messages, reusing strings
     # saves a lot of memory and pickle storage space.
     _scache = {}
+
     def scache(s):
-        "return a shared version of a string"
+        b"return a shared version of a string"
         return _scache.setdefault(s, s)
 
-    ui.status(_('collecting CVS rlog\n'))
+    ui.status(_(b'collecting CVS rlog\n'))
 
-    log = []      # list of logentry objects containing the CVS state
+    log = []  # list of logentry objects containing the CVS state
 
     # patterns to match in CVS (r)log output, by state of use
     re_00 = re.compile(b'RCS file: (.+)$')
     re_01 = re.compile(b'cvs \\[r?log aborted\\]: (.+)$')
     re_02 = re.compile(b'cvs (r?log|server): (.+)\n$')
-    re_03 = re.compile(b"(Cannot access.+CVSROOT)|"
-                       b"(can't create temporary directory.+)$")
+    re_03 = re.compile(
+        b"(Cannot access.+CVSROOT)|(can't create temporary directory.+)$"
+    )
     re_10 = re.compile(b'Working file: (.+)$')
     re_20 = re.compile(b'symbolic names:')
     re_30 = re.compile(b'\t(.+): ([\\d.]+)$')
     re_31 = re.compile(b'----------------------------$')
-    re_32 = re.compile(b'======================================='
-                       b'======================================$')
+    re_32 = re.compile(
+        b'======================================='
+        b'======================================$'
+    )
     re_50 = re.compile(br'revision ([\d.]+)(\s+locked by:\s+.+;)?$')
-    re_60 = re.compile(br'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
-                       br'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
-                       br'(\s+commitid:\s+([^;]+);)?'
-                       br'(.*mergepoint:\s+([^;]+);)?')
+    re_60 = re.compile(
+        br'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
+        br'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
+        br'(\s+commitid:\s+([^;]+);)?'
+        br'(.*mergepoint:\s+([^;]+);)?'
+    )
     re_70 = re.compile(b'branches: (.+);$')
 
     file_added_re = re.compile(br'file [^/]+ was (initially )?added on branch')
 
-    prefix = ''   # leading path to strip of what we get from CVS
+    prefix = b''  # leading path to strip of what we get from CVS
 
     if directory is None:
         # Current working directory
 
         # Get the real directory in the repository
         try:
-            prefix = open(os.path.join('CVS','Repository'), 'rb').read().strip()
+            with open(os.path.join(b'CVS', b'Repository'), b'rb') as f:
+                prefix = f.read().strip()
             directory = prefix
-            if prefix == ".":
-                prefix = ""
+            if prefix == b".":
+                prefix = b""
         except IOError:
-            raise logerror(_('not a CVS sandbox'))
+            raise logerror(_(b'not a CVS sandbox'))
 
         if prefix and not prefix.endswith(pycompat.ossep):
             prefix += pycompat.ossep
 
         # Use the Root file in the sandbox, if it exists
         try:
-            root = open(os.path.join('CVS','Root'), 'rb').read().strip()
+            root = open(os.path.join(b'CVS', b'Root'), b'rb').read().strip()
         except IOError:
             pass
 
     if not root:
-        root = encoding.environ.get('CVSROOT', '')
+        root = encoding.environ.get(b'CVSROOT', b'')
 
     # read log cache if one exists
     oldlog = []
     date = None
 
     if cache:
-        cachedir = os.path.expanduser('~/.hg.cvsps')
+        cachedir = os.path.expanduser(b'~/.hg.cvsps')
         if not os.path.exists(cachedir):
             os.mkdir(cachedir)
 
@@ -175,70 +190,73 @@
         # and
         #    /pserver/user/server/path
         # are mapped to different cache file names.
-        cachefile = root.split(":") + [directory, "cache"]
-        cachefile = ['-'.join(re.findall(br'\w+', s)) for s in cachefile if s]
-        cachefile = os.path.join(cachedir,
-                                 '.'.join([s for s in cachefile if s]))
+        cachefile = root.split(b":") + [directory, b"cache"]
+        cachefile = [b'-'.join(re.findall(br'\w+', s)) for s in cachefile if s]
+        cachefile = os.path.join(
+            cachedir, b'.'.join([s for s in cachefile if s])
+        )
 
-    if cache == 'update':
+    if cache == b'update':
         try:
-            ui.note(_('reading cvs log cache %s\n') % cachefile)
-            oldlog = pickle.load(open(cachefile, 'rb'))
+            ui.note(_(b'reading cvs log cache %s\n') % cachefile)
+            oldlog = pickle.load(open(cachefile, b'rb'))
             for e in oldlog:
-                if not (util.safehasattr(e, 'branchpoints') and
-                        util.safehasattr(e, 'commitid') and
-                        util.safehasattr(e, 'mergepoint')):
-                    ui.status(_('ignoring old cache\n'))
+                if not (
+                    util.safehasattr(e, b'branchpoints')
+                    and util.safehasattr(e, b'commitid')
+                    and util.safehasattr(e, b'mergepoint')
+                ):
+                    ui.status(_(b'ignoring old cache\n'))
                     oldlog = []
                     break
 
-            ui.note(_('cache has %d log entries\n') % len(oldlog))
+            ui.note(_(b'cache has %d log entries\n') % len(oldlog))
         except Exception as e:
-            ui.note(_('error reading cache: %r\n') % e)
+            ui.note(_(b'error reading cache: %r\n') % e)
 
         if oldlog:
-            date = oldlog[-1].date    # last commit date as a (time,tz) tuple
-            date = dateutil.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
+            date = oldlog[-1].date  # last commit date as a (time,tz) tuple
+            date = dateutil.datestr(date, b'%Y/%m/%d %H:%M:%S %1%2')
 
     # build the CVS commandline
-    cmd = ['cvs', '-q']
+    cmd = [b'cvs', b'-q']
     if root:
-        cmd.append('-d%s' % root)
+        cmd.append(b'-d%s' % root)
         p = util.normpath(getrepopath(root))
-        if not p.endswith('/'):
-            p += '/'
+        if not p.endswith(b'/'):
+            p += b'/'
         if prefix:
             # looks like normpath replaces "" by "."
             prefix = p + util.normpath(prefix)
         else:
             prefix = p
-    cmd.append(['log', 'rlog'][rlog])
+    cmd.append([b'log', b'rlog'][rlog])
     if date:
         # no space between option and date string
-        cmd.append('-d>%s' % date)
+        cmd.append(b'-d>%s' % date)
     cmd.append(directory)
 
     # state machine begins here
-    tags = {}     # dictionary of revisions on current file with their tags
-    branchmap = {} # mapping between branch names and revision numbers
+    tags = {}  # dictionary of revisions on current file with their tags
+    branchmap = {}  # mapping between branch names and revision numbers
     rcsmap = {}
     state = 0
-    store = False # set when a new record can be appended
+    store = False  # set when a new record can be appended
 
     cmd = [procutil.shellquote(arg) for arg in cmd]
-    ui.note(_("running %s\n") % (' '.join(cmd)))
-    ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
+    ui.note(_(b"running %s\n") % (b' '.join(cmd)))
+    ui.debug(b"prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
 
-    pfp = procutil.popen(' '.join(cmd), 'rb')
+    pfp = procutil.popen(b' '.join(cmd), b'rb')
     peek = util.fromnativeeol(pfp.readline())
     while True:
         line = peek
-        if line == '':
+        if line == b'':
             break
         peek = util.fromnativeeol(pfp.readline())
-        if line.endswith('\n'):
+        if line.endswith(b'\n'):
             line = line[:-1]
-        #ui.debug('state=%d line=%r\n' % (state, line))
+        # ui.debug('state=%d line=%r\n' % (state, line))
 
         if state == 0:
             # initial state, consume input until we see 'RCS file'
@@ -249,13 +267,13 @@
                 if rlog:
                     filename = util.normpath(rcs[:-2])
                     if filename.startswith(prefix):
-                        filename = filename[len(prefix):]
-                    if filename.startswith('/'):
+                        filename = filename[len(prefix) :]
+                    if filename.startswith(b'/'):
                         filename = filename[1:]
-                    if filename.startswith('Attic/'):
+                    if filename.startswith(b'Attic/'):
                         filename = filename[6:]
                     else:
-                        filename = filename.replace('/Attic/', '/')
+                        filename = filename.replace(b'/Attic/', b'/')
                     state = 2
                     continue
                 state = 1
@@ -272,7 +290,7 @@
         elif state == 1:
             # expect 'Working file' (only when using log instead of rlog)
             match = re_10.match(line)
-            assert match, _('RCS file must be followed by working file')
+            assert match, _(b'RCS file must be followed by working file')
             filename = util.normpath(match.group(1))
             state = 2
 
@@ -286,7 +304,7 @@
             # read the symbolic names and store as tags
             match = re_30.match(line)
             if match:
-                rev = [int(x) for x in match.group(2).split('.')]
+                rev = [int(x) for x in match.group(2).split(b'.')]
 
                 # Convert magic branch number to an odd-numbered one
                 revn = len(rev)
@@ -309,44 +327,51 @@
             if re_31.match(line):
                 state = 5
             else:
-                assert not re_32.match(line), _('must have at least '
-                                                'some revisions')
+                assert not re_32.match(line), _(
+                    b'must have at least some revisions'
+                )
 
         elif state == 5:
             # expecting revision number and possibly (ignored) lock indication
             # we create the logentry here from values stored in states 0 to 4,
             # as this state is re-entered for subsequent revisions of a file.
             match = re_50.match(line)
-            assert match, _('expected revision number')
-            e = logentry(rcs=scache(rcs),
-                         file=scache(filename),
-                         revision=tuple([int(x) for x in
-                                         match.group(1).split('.')]),
-                         branches=[],
-                         parent=None,
-                         commitid=None,
-                         mergepoint=None,
-                         branchpoints=set())
+            assert match, _(b'expected revision number')
+            e = logentry(
+                rcs=scache(rcs),
+                file=scache(filename),
+                revision=tuple([int(x) for x in match.group(1).split(b'.')]),
+                branches=[],
+                parent=None,
+                commitid=None,
+                mergepoint=None,
+                branchpoints=set(),
+            )
 
             state = 6
 
         elif state == 6:
             # expecting date, author, state, lines changed
             match = re_60.match(line)
-            assert match, _('revision must be followed by date line')
+            assert match, _(b'revision must be followed by date line')
             d = match.group(1)
-            if d[2] == '/':
+            if d[2] == b'/':
                 # Y2K
-                d = '19' + d
+                d = b'19' + d
 
             if len(d.split()) != 3:
                 # cvs log dates always in GMT
-                d = d + ' UTC'
-            e.date = dateutil.parsedate(d, ['%y/%m/%d %H:%M:%S',
-                                        '%Y/%m/%d %H:%M:%S',
-                                        '%Y-%m-%d %H:%M:%S'])
+                d = d + b' UTC'
+            e.date = dateutil.parsedate(
+                d,
+                [
+                    b'%y/%m/%d %H:%M:%S',
+                    b'%Y/%m/%d %H:%M:%S',
+                    b'%Y-%m-%d %H:%M:%S',
+                ],
+            )
             e.author = scache(match.group(2))
-            e.dead = match.group(3).lower() == 'dead'
+            e.dead = match.group(3).lower() == b'dead'
 
             if match.group(5):
                 if match.group(6):
@@ -358,18 +383,19 @@
             else:
                 e.lines = None
 
-            if match.group(7): # cvs 1.12 commitid
+            if match.group(7):  # cvs 1.12 commitid
                 e.commitid = match.group(8)
 
-            if match.group(9): # cvsnt mergepoint
-                myrev = match.group(10).split('.')
-                if len(myrev) == 2: # head
-                    e.mergepoint = 'HEAD'
+            if match.group(9):  # cvsnt mergepoint
+                myrev = match.group(10).split(b'.')
+                if len(myrev) == 2:  # head
+                    e.mergepoint = b'HEAD'
                 else:
-                    myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
+                    myrev = b'.'.join(myrev[:-2] + [b'0', myrev[-2]])
                     branches = [b for b in branchmap if branchmap[b] == myrev]
-                    assert len(branches) == 1, ('unknown branch: %s'
-                                                % e.mergepoint)
+                    assert len(branches) == 1, (
+                        b'unknown branch: %s' % e.mergepoint
+                    )
                     e.mergepoint = branches[0]
 
             e.comment = []
@@ -380,8 +406,10 @@
             # or store the commit log message otherwise
             m = re_70.match(line)
             if m:
-                e.branches = [tuple([int(y) for y in x.strip().split('.')])
-                                for x in m.group(1).split(';')]
+                e.branches = [
+                    tuple([int(y) for y in x.strip().split(b'.')])
+                    for x in m.group(1).split(b';')
+                ]
                 state = 8
             elif re_31.match(line) and re_50.match(peek):
                 state = 5
@@ -396,7 +424,7 @@
             # store commit log message
             if re_31.match(line):
                 cpeek = peek
-                if cpeek.endswith('\n'):
+                if cpeek.endswith(b'\n'):
                     cpeek = cpeek[:-1]
                 if re_50.match(cpeek):
                     state = 5
@@ -416,20 +444,23 @@
         # creates a synthetic dead revision 1.1.x.1 on B2.  Don't drop
         # these revisions now, but mark them synthetic so
         # createchangeset() can take care of them.
-        if (store and
-              e.dead and
-              e.revision[-1] == 1 and      # 1.1 or 1.1.x.1
-              len(e.comment) == 1 and
-              file_added_re.match(e.comment[0])):
-            ui.debug('found synthetic revision in %s: %r\n'
-                     % (e.rcs, e.comment[0]))
+        if (
+            store
+            and e.dead
+            and e.revision[-1] == 1
+            and len(e.comment) == 1  # 1.1 or 1.1.x.1
+            and file_added_re.match(e.comment[0])
+        ):
+            ui.debug(
+                b'found synthetic revision in %s: %r\n' % (e.rcs, e.comment[0])
+            )
             e.synthetic = True
 
         if store:
             # clean up the results and save in the log.
             store = False
             e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
-            e.comment = scache('\n'.join(e.comment))
+            e.comment = scache(b'\n'.join(e.comment))
 
             revn = len(e.revision)
             if revn > 3 and (revn % 2) == 0:
@@ -439,33 +470,35 @@
 
             # find the branches starting from this revision
             branchpoints = set()
-            for branch, revision in branchmap.iteritems():
-                revparts = tuple([int(i) for i in revision.split('.')])
-                if len(revparts) < 2: # bad tags
+            for branch, revision in pycompat.iteritems(branchmap):
+                revparts = tuple([int(i) for i in revision.split(b'.')])
+                if len(revparts) < 2:  # bad tags
                     continue
                 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
                     # normal branch
                     if revparts[:-2] == e.revision:
                         branchpoints.add(branch)
-                elif revparts == (1, 1, 1): # vendor branch
+                elif revparts == (1, 1, 1):  # vendor branch
                     if revparts in e.branches:
                         branchpoints.add(branch)
             e.branchpoints = branchpoints
 
             log.append(e)
 
-            rcsmap[e.rcs.replace('/Attic/', '/')] = e.rcs
+            rcsmap[e.rcs.replace(b'/Attic/', b'/')] = e.rcs
 
             if len(log) % 100 == 0:
-                ui.status(stringutil.ellipsis('%d %s' % (len(log), e.file), 80)
-                          + '\n')
+                ui.status(
+                    stringutil.ellipsis(b'%d %s' % (len(log), e.file), 80)
+                    + b'\n'
+                )
 
     log.sort(key=lambda x: (x.rcs, x.revision))
 
     # find parent revisions of individual files
     versions = {}
     for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)):
-        rcs = e.rcs.replace('/Attic/', '/')
+        rcs = e.rcs.replace(b'/Attic/', b'/')
         if rcs in rcsmap:
             e.rcs = rcsmap[rcs]
         branch = e.revision[:-1]
@@ -486,50 +519,64 @@
             log.sort(key=lambda x: x.date)
 
             if oldlog and oldlog[-1].date >= log[0].date:
-                raise logerror(_('log cache overlaps with new log entries,'
-                                 ' re-run without cache.'))
+                raise logerror(
+                    _(
+                        b'log cache overlaps with new log entries,'
+                        b' re-run without cache.'
+                    )
+                )
 
             log = oldlog + log
 
             # write the new cachefile
-            ui.note(_('writing cvs log cache %s\n') % cachefile)
-            pickle.dump(log, open(cachefile, 'wb'))
+            ui.note(_(b'writing cvs log cache %s\n') % cachefile)
+            pickle.dump(log, open(cachefile, b'wb'))
         else:
             log = oldlog
 
-    ui.status(_('%d log entries\n') % len(log))
+    ui.status(_(b'%d log entries\n') % len(log))
 
-    encodings = ui.configlist('convert', 'cvsps.logencoding')
+    encodings = ui.configlist(b'convert', b'cvsps.logencoding')
     if encodings:
+
         def revstr(r):
             # this is needed, because logentry.revision is a tuple of "int"
             # (e.g. (1, 2) for "1.2")
-            return '.'.join(pycompat.maplist(pycompat.bytestr, r))
+            return b'.'.join(pycompat.maplist(pycompat.bytestr, r))
 
         for entry in log:
             comment = entry.comment
             for e in encodings:
                 try:
-                    entry.comment = comment.decode(
-                        pycompat.sysstr(e)).encode('utf-8')
+                    entry.comment = comment.decode(pycompat.sysstr(e)).encode(
+                        'utf-8'
+                    )
                     if ui.debugflag:
-                        ui.debug("transcoding by %s: %s of %s\n" %
-                                 (e, revstr(entry.revision), entry.file))
+                        ui.debug(
+                            b"transcoding by %s: %s of %s\n"
+                            % (e, revstr(entry.revision), entry.file)
+                        )
                     break
                 except UnicodeDecodeError:
-                    pass # try next encoding
-                except LookupError as inst: # unknown encoding, maybe
-                    raise error.Abort(inst,
-                                      hint=_('check convert.cvsps.logencoding'
-                                             ' configuration'))
+                    pass  # try next encoding
+                except LookupError as inst:  # unknown encoding, maybe
+                    raise error.Abort(
+                        inst,
+                        hint=_(
+                            b'check convert.cvsps.logencoding configuration'
+                        ),
+                    )
             else:
-                raise error.Abort(_("no encoding can transcode"
-                                    " CVS log message for %s of %s")
-                                  % (revstr(entry.revision), entry.file),
-                                  hint=_('check convert.cvsps.logencoding'
-                                         ' configuration'))
+                raise error.Abort(
+                    _(
+                        b"no encoding can transcode"
+                        b" CVS log message for %s of %s"
+                    )
+                    % (revstr(entry.revision), entry.file),
+                    hint=_(b'check convert.cvsps.logencoding configuration'),
+                )
 
-    hook.hook(ui, None, "cvslog", True, log=log)
+    hook.hook(ui, None, b"cvslog", True, log=log)
 
     return log
 
@@ -549,19 +596,23 @@
         .mergepoint- the branch that has been merged from or None
         .branchpoints- the branches that start at the current entry or empty
     '''
+
     def __init__(self, **entries):
         self.id = None
         self.synthetic = False
         self.__dict__.update(entries)
 
     def __repr__(self):
-        items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
-        return "%s(%s)"%(type(self).__name__, ", ".join(items))
+        items = (
+            b"%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__)
+        )
+        return b"%s(%s)" % (type(self).__name__, b", ".join(items))
+
 
 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
     '''Convert log into changesets.'''
 
-    ui.status(_('creating changesets\n'))
+    ui.status(_(b'creating changesets\n'))
 
     # try to order commitids by date
     mindate = {}
@@ -573,9 +624,17 @@
                 mindate[e.commitid] = min(e.date, mindate[e.commitid])
 
     # Merge changesets
-    log.sort(key=lambda x: (mindate.get(x.commitid, (-1, 0)),
-                            x.commitid or '', x.comment,
-                            x.author, x.branch or '', x.date, x.branchpoints))
+    log.sort(
+        key=lambda x: (
+            mindate.get(x.commitid, (-1, 0)),
+            x.commitid or b'',
+            x.comment,
+            x.author,
+            x.branch or b'',
+            x.date,
+            x.branchpoints,
+        )
+    )
 
     changesets = []
     files = set()
@@ -598,32 +657,45 @@
         # first changeset and bar the next and MYBRANCH and MYBRANCH2
         # should both start off of the bar changeset. No provisions are
         # made to ensure that this is, in fact, what happens.
-        if not (c and e.branchpoints == c.branchpoints and
-                (# cvs commitids
-                 (e.commitid is not None and e.commitid == c.commitid) or
-                 (# no commitids, use fuzzy commit detection
-                  (e.commitid is None or c.commitid is None) and
-                   e.comment == c.comment and
-                   e.author == c.author and
-                   e.branch == c.branch and
-                   ((c.date[0] + c.date[1]) <=
-                    (e.date[0] + e.date[1]) <=
-                    (c.date[0] + c.date[1]) + fuzz) and
-                   e.file not in files))):
-            c = changeset(comment=e.comment, author=e.author,
-                          branch=e.branch, date=e.date,
-                          entries=[], mergepoint=e.mergepoint,
-                          branchpoints=e.branchpoints, commitid=e.commitid)
+        if not (
+            c
+            and e.branchpoints == c.branchpoints
+            and (  # cvs commitids
+                (e.commitid is not None and e.commitid == c.commitid)
+                or (  # no commitids, use fuzzy commit detection
+                    (e.commitid is None or c.commitid is None)
+                    and e.comment == c.comment
+                    and e.author == c.author
+                    and e.branch == c.branch
+                    and (
+                        (c.date[0] + c.date[1])
+                        <= (e.date[0] + e.date[1])
+                        <= (c.date[0] + c.date[1]) + fuzz
+                    )
+                    and e.file not in files
+                )
+            )
+        ):
+            c = changeset(
+                comment=e.comment,
+                author=e.author,
+                branch=e.branch,
+                date=e.date,
+                entries=[],
+                mergepoint=e.mergepoint,
+                branchpoints=e.branchpoints,
+                commitid=e.commitid,
+            )
             changesets.append(c)
 
             files = set()
             if len(changesets) % 100 == 0:
-                t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
-                ui.status(stringutil.ellipsis(t, 80) + '\n')
+                t = b'%d %s' % (len(changesets), repr(e.comment)[1:-1])
+                ui.status(stringutil.ellipsis(t, 80) + b'\n')
 
         c.entries.append(e)
         files.add(e.file)
-        c.date = e.date       # changeset date is date of latest commit in it
+        c.date = e.date  # changeset date is date of latest commit in it
 
     # Mark synthetic changesets
 
@@ -641,9 +713,9 @@
     # Sort files in each changeset
 
     def entitycompare(l, r):
-        'Mimic cvsps sorting order'
-        l = l.file.split('/')
-        r = r.file.split('/')
+        b'Mimic cvsps sorting order'
+        l = l.file.split(b'/')
+        r = r.file.split(b'/')
         nl = len(l)
         nr = len(r)
         n = min(nl, nr)
@@ -664,6 +736,7 @@
     # Sort changesets by date
 
     odd = set()
+
     def cscmp(l, r):
         d = sum(l.date) - sum(r.date)
         if d:
@@ -744,8 +817,8 @@
     if mergefrom:
         mergefrom = re.compile(mergefrom)
 
-    versions = {}    # changeset index where we saw any particular file version
-    branches = {}    # changeset index where we saw a branch
+    versions = {}  # changeset index where we saw any particular file version
+    branches = {}  # changeset index where we saw a branch
     n = len(changesets)
     i = 0
     while i < n:
@@ -776,8 +849,9 @@
 
             # Ensure no changeset has a synthetic changeset as a parent.
             while p.synthetic:
-                assert len(p.parents) <= 1, (
-                       _('synthetic changeset cannot have multiple parents'))
+                assert len(p.parents) <= 1, _(
+                    b'synthetic changeset cannot have multiple parents'
+                )
                 if p.parents:
                     p = p.parents[0]
                 else:
@@ -788,7 +862,7 @@
                 c.parents.append(p)
 
         if c.mergepoint:
-            if c.mergepoint == 'HEAD':
+            if c.mergepoint == b'HEAD':
                 c.mergepoint = None
             c.parents.append(changesets[branches[c.mergepoint]])
 
@@ -796,14 +870,18 @@
             m = mergefrom.search(c.comment)
             if m:
                 m = m.group(1)
-                if m == 'HEAD':
+                if m == b'HEAD':
                     m = None
                 try:
                     candidate = changesets[branches[m]]
                 except KeyError:
-                    ui.warn(_("warning: CVS commit message references "
-                              "non-existent branch %r:\n%s\n")
-                            % (pycompat.bytestr(m), c.comment))
+                    ui.warn(
+                        _(
+                            b"warning: CVS commit message references "
+                            b"non-existent branch %r:\n%s\n"
+                        )
+                        % (pycompat.bytestr(m), c.comment)
+                    )
                 if m in branches and c.branch != m and not candidate.synthetic:
                     c.parents.append(candidate)
 
@@ -812,18 +890,22 @@
             if m:
                 if m.groups():
                     m = m.group(1)
-                    if m == 'HEAD':
+                    if m == b'HEAD':
                         m = None
                 else:
-                    m = None   # if no group found then merge to HEAD
+                    m = None  # if no group found then merge to HEAD
                 if m in branches and c.branch != m:
                     # insert empty changeset for merge
                     cc = changeset(
-                        author=c.author, branch=m, date=c.date,
-                        comment='convert-repo: CVS merge from branch %s'
+                        author=c.author,
+                        branch=m,
+                        date=c.date,
+                        comment=b'convert-repo: CVS merge from branch %s'
                         % c.branch,
-                        entries=[], tags=[],
-                        parents=[changesets[branches[m]], c])
+                        entries=[],
+                        tags=[],
+                        parents=[changesets[branches[m]], c],
+                    )
                     changesets.insert(i + 1, cc)
                     branches[m] = i + 1
 
@@ -852,12 +934,14 @@
     if odd:
         for l, r in odd:
             if l.id is not None and r.id is not None:
-                ui.warn(_('changeset %d is both before and after %d\n')
-                        % (l.id, r.id))
+                ui.warn(
+                    _(b'changeset %d is both before and after %d\n')
+                    % (l.id, r.id)
+                )
 
-    ui.status(_('%d changeset entries\n') % len(changesets))
+    ui.status(_(b'%d changeset entries\n') % len(changesets))
 
-    hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
+    hook.hook(ui, None, b"cvschangesets", True, changesets=changesets)
 
     return changesets
 
@@ -868,98 +952,121 @@
     commit log entries and dates.
     '''
     opts = pycompat.byteskwargs(opts)
-    if opts["new_cache"]:
-        cache = "write"
-    elif opts["update_cache"]:
-        cache = "update"
+    if opts[b"new_cache"]:
+        cache = b"write"
+    elif opts[b"update_cache"]:
+        cache = b"update"
     else:
         cache = None
 
-    revisions = opts["revisions"]
+    revisions = opts[b"revisions"]
 
     try:
         if args:
             log = []
             for d in args:
-                log += createlog(ui, d, root=opts["root"], cache=cache)
+                log += createlog(ui, d, root=opts[b"root"], cache=cache)
         else:
-            log = createlog(ui, root=opts["root"], cache=cache)
+            log = createlog(ui, root=opts[b"root"], cache=cache)
     except logerror as e:
-        ui.write("%r\n"%e)
+        ui.write(b"%r\n" % e)
         return
 
-    changesets = createchangeset(ui, log, opts["fuzz"])
+    changesets = createchangeset(ui, log, opts[b"fuzz"])
     del log
 
     # Print changesets (optionally filtered)
 
     off = len(revisions)
-    branches = {}    # latest version number in each branch
-    ancestors = {}   # parent branch
+    branches = {}  # latest version number in each branch
+    ancestors = {}  # parent branch
     for cs in changesets:
 
-        if opts["ancestors"]:
+        if opts[b"ancestors"]:
             if cs.branch not in branches and cs.parents and cs.parents[0].id:
-                ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
-                                        cs.parents[0].id)
+                ancestors[cs.branch] = (
+                    changesets[cs.parents[0].id - 1].branch,
+                    cs.parents[0].id,
+                )
             branches[cs.branch] = cs.id
 
         # limit by branches
-        if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
+        if (
+            opts[b"branches"]
+            and (cs.branch or b'HEAD') not in opts[b"branches"]
+        ):
             continue
 
         if not off:
             # Note: trailing spaces on several lines here are needed to have
             #       bug-for-bug compatibility with cvsps.
-            ui.write('---------------------\n')
-            ui.write(('PatchSet %d \n' % cs.id))
-            ui.write(('Date: %s\n' % dateutil.datestr(cs.date,
-                                                 '%Y/%m/%d %H:%M:%S %1%2')))
-            ui.write(('Author: %s\n' % cs.author))
-            ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
-            ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
-                                  ','.join(cs.tags) or '(none)')))
+            ui.write(b'---------------------\n')
+            ui.write((b'PatchSet %d \n' % cs.id))
+            ui.write(
+                (
+                    b'Date: %s\n'
+                    % dateutil.datestr(cs.date, b'%Y/%m/%d %H:%M:%S %1%2')
+                )
+            )
+            ui.write((b'Author: %s\n' % cs.author))
+            ui.write((b'Branch: %s\n' % (cs.branch or b'HEAD')))
+            ui.write(
+                (
+                    b'Tag%s: %s \n'
+                    % (
+                        [b'', b's'][len(cs.tags) > 1],
+                        b','.join(cs.tags) or b'(none)',
+                    )
+                )
+            )
             if cs.branchpoints:
-                ui.write(('Branchpoints: %s \n') %
-                         ', '.join(sorted(cs.branchpoints)))
-            if opts["parents"] and cs.parents:
+                ui.writenoi18n(
+                    b'Branchpoints: %s \n' % b', '.join(sorted(cs.branchpoints))
+                )
+            if opts[b"parents"] and cs.parents:
                 if len(cs.parents) > 1:
-                    ui.write(('Parents: %s\n' %
-                             (','.join([(b"%d" % p.id) for p in cs.parents]))))
+                    ui.write(
+                        (
+                            b'Parents: %s\n'
+                            % (b','.join([(b"%d" % p.id) for p in cs.parents]))
+                        )
+                    )
                 else:
-                    ui.write(('Parent: %d\n' % cs.parents[0].id))
+                    ui.write((b'Parent: %d\n' % cs.parents[0].id))
 
-            if opts["ancestors"]:
+            if opts[b"ancestors"]:
                 b = cs.branch
                 r = []
                 while b:
                     b, c = ancestors[b]
-                    r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
+                    r.append(b'%s:%d:%d' % (b or b"HEAD", c, branches[b]))
                 if r:
-                    ui.write(('Ancestors: %s\n' % (','.join(r))))
+                    ui.write((b'Ancestors: %s\n' % (b','.join(r))))
 
-            ui.write(('Log:\n'))
-            ui.write('%s\n\n' % cs.comment)
-            ui.write(('Members: \n'))
+            ui.writenoi18n(b'Log:\n')
+            ui.write(b'%s\n\n' % cs.comment)
+            ui.writenoi18n(b'Members: \n')
             for f in cs.entries:
                 fn = f.file
-                if fn.startswith(opts["prefix"]):
-                    fn = fn[len(opts["prefix"]):]
-                ui.write('\t%s:%s->%s%s \n' % (
+                if fn.startswith(opts[b"prefix"]):
+                    fn = fn[len(opts[b"prefix"]) :]
+                ui.write(
+                    b'\t%s:%s->%s%s \n'
+                    % (
                         fn,
-                        '.'.join([b"%d" % x for x in f.parent]) or 'INITIAL',
-                        '.'.join([(b"%d" % x) for x in f.revision]),
-                        ['', '(DEAD)'][f.dead]))
-            ui.write('\n')
+                        b'.'.join([b"%d" % x for x in f.parent]) or b'INITIAL',
+                        b'.'.join([(b"%d" % x) for x in f.revision]),
+                        [b'', b'(DEAD)'][f.dead],
+                    )
+                )
+            ui.write(b'\n')
 
         # have we seen the start tag?
         if revisions and off:
-            if (revisions[0] == (b"%d" % cs.id) or
-                revisions[0] in cs.tags):
+            if revisions[0] == (b"%d" % cs.id) or revisions[0] in cs.tags:
                 off = False
 
         # see if we reached the end tag
         if len(revisions) > 1 and not off:
-            if (revisions[1] == (b"%d" % cs.id) or
-                revisions[1] in cs.tags):
+            if revisions[1] == (b"%d" % cs.id) or revisions[1] in cs.tags:
                 break
--- a/hgext/convert/darcs.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/convert/darcs.py	Mon Oct 21 11:09:48 2019 -0400
@@ -19,6 +19,7 @@
 )
 from mercurial.utils import dateutil
 from . import common
+
 NoRepo = common.NoRepo
 
 # The naming drift of ElementTree is fun!
@@ -37,28 +38,30 @@
         except ImportError:
             try:
                 import elementtree.ElementTree.ElementTree as ElementTree
-                import elementtree.ElementTree.XMLParser  as XMLParser
+                import elementtree.ElementTree.XMLParser as XMLParser
             except ImportError:
                 pass
 
+
 class darcs_source(common.converter_source, common.commandline):
     def __init__(self, ui, repotype, path, revs=None):
         common.converter_source.__init__(self, ui, repotype, path, revs=revs)
-        common.commandline.__init__(self, ui, 'darcs')
+        common.commandline.__init__(self, ui, b'darcs')
 
         # check for _darcs, ElementTree so that we can easily skip
         # test-convert-darcs if ElementTree is not around
-        if not os.path.exists(os.path.join(path, '_darcs')):
-            raise NoRepo(_("%s does not look like a darcs repository") % path)
+        if not os.path.exists(os.path.join(path, b'_darcs')):
+            raise NoRepo(_(b"%s does not look like a darcs repository") % path)
 
-        common.checktool('darcs')
-        version = self.run0('--version').splitlines()[0].strip()
-        if version < '2.1':
-            raise error.Abort(_('darcs version 2.1 or newer needed (found %r)')
-                              % version)
+        common.checktool(b'darcs')
+        version = self.run0(b'--version').splitlines()[0].strip()
+        if version < b'2.1':
+            raise error.Abort(
+                _(b'darcs version 2.1 or newer needed (found %r)') % version
+            )
 
-        if "ElementTree" not in globals():
-            raise error.Abort(_("Python ElementTree module is not available"))
+        if b"ElementTree" not in globals():
+            raise error.Abort(_(b"Python ElementTree module is not available"))
 
         self.path = os.path.realpath(path)
 
@@ -70,26 +73,33 @@
         # Check darcs repository format
         format = self.format()
         if format:
-            if format in ('darcs-1.0', 'hashed'):
-                raise NoRepo(_("%s repository format is unsupported, "
-                               "please upgrade") % format)
+            if format in (b'darcs-1.0', b'hashed'):
+                raise NoRepo(
+                    _(
+                        b"%s repository format is unsupported, "
+                        b"please upgrade"
+                    )
+                    % format
+                )
         else:
-            self.ui.warn(_('failed to detect repository format!'))
+            self.ui.warn(_(b'failed to detect repository format!'))
 
     def before(self):
         self.tmppath = pycompat.mkdtemp(
-            prefix='convert-' + os.path.basename(self.path) + '-')
-        output, status = self.run('init', repodir=self.tmppath)
+            prefix=b'convert-' + os.path.basename(self.path) + b'-'
+        )
+        output, status = self.run(b'init', repodir=self.tmppath)
         self.checkexit(status)
 
-        tree = self.xml('changes', xml_output=True, summary=True,
-                        repodir=self.path)
+        tree = self.xml(
+            b'changes', xml_output=True, summary=True, repodir=self.path
+        )
         tagname = None
         child = None
-        for elt in tree.findall('patch'):
-            node = elt.get('hash')
-            name = elt.findtext('name', '')
-            if name.startswith('TAG '):
+        for elt in tree.findall(b'patch'):
+            node = elt.get(b'hash')
+            name = elt.findtext(b'name', b'')
+            if name.startswith(b'TAG '):
                 tagname = name[4:].strip()
             elif tagname is not None:
                 self.tags[tagname] = node
@@ -100,7 +110,7 @@
         self.parents[child] = []
 
     def after(self):
-        self.ui.debug('cleaning up %s\n' % self.tmppath)
+        self.ui.debug(b'cleaning up %s\n' % self.tmppath)
         shutil.rmtree(self.tmppath, ignore_errors=True)
 
     def recode(self, s, encoding=None):
@@ -118,7 +128,7 @@
         # While we are decoding the XML as latin-1 to be as liberal as
         # possible, etree will still raise an exception if any
         # non-printable characters are in the XML changelog.
-        parser = XMLParser(encoding='latin-1')
+        parser = XMLParser(encoding=b'latin-1')
         p = self._run(cmd, **kwargs)
         etree.parse(p.stdout, parser=parser)
         p.wait()
@@ -126,19 +136,20 @@
         return etree.getroot()
 
     def format(self):
-        output, status = self.run('show', 'repo', repodir=self.path)
+        output, status = self.run(b'show', b'repo', repodir=self.path)
         self.checkexit(status)
         m = re.search(r'^\s*Format:\s*(.*)$', output, re.MULTILINE)
         if not m:
             return None
-        return ','.join(sorted(f.strip() for f in m.group(1).split(',')))
+        return b','.join(sorted(f.strip() for f in m.group(1).split(b',')))
 
     def manifest(self):
         man = []
-        output, status = self.run('show', 'files', no_directories=True,
-                                  repodir=self.tmppath)
+        output, status = self.run(
+            b'show', b'files', no_directories=True, repodir=self.tmppath
+        )
         self.checkexit(status)
-        for line in output.split('\n'):
+        for line in output.split(b'\n'):
             path = line[2:]
             if path:
                 man.append(path)
@@ -149,42 +160,49 @@
 
     def getcommit(self, rev):
         elt = self.changes[rev]
-        dateformat = '%a %b %d %H:%M:%S %Z %Y'
-        date = dateutil.strdate(elt.get('local_date'), dateformat)
-        desc = elt.findtext('name') + '\n' + elt.findtext('comment', '')
+        dateformat = b'%a %b %d %H:%M:%S %Z %Y'
+        date = dateutil.strdate(elt.get(b'local_date'), dateformat)
+        desc = elt.findtext(b'name') + b'\n' + elt.findtext(b'comment', b'')
         # etree can return unicode objects for name, comment, and author,
         # so recode() is used to ensure str objects are emitted.
-        newdateformat = '%Y-%m-%d %H:%M:%S %1%2'
-        return common.commit(author=self.recode(elt.get('author')),
-                             date=dateutil.datestr(date, newdateformat),
-                             desc=self.recode(desc).strip(),
-                             parents=self.parents[rev])
+        newdateformat = b'%Y-%m-%d %H:%M:%S %1%2'
+        return common.commit(
+            author=self.recode(elt.get(b'author')),
+            date=dateutil.datestr(date, newdateformat),
+            desc=self.recode(desc).strip(),
+            parents=self.parents[rev],
+        )
 
     def pull(self, rev):
-        output, status = self.run('pull', self.path, all=True,
-                                  match='hash %s' % rev,
-                                  no_test=True, no_posthook=True,
-                                  external_merge='/bin/false',
-                                  repodir=self.tmppath)
+        output, status = self.run(
+            b'pull',
+            self.path,
+            all=True,
+            match=b'hash %s' % rev,
+            no_test=True,
+            no_posthook=True,
+            external_merge=b'/bin/false',
+            repodir=self.tmppath,
+        )
         if status:
-            if output.find('We have conflicts in') == -1:
+            if output.find(b'We have conflicts in') == -1:
                 self.checkexit(status, output)
-            output, status = self.run('revert', all=True, repodir=self.tmppath)
+            output, status = self.run(b'revert', all=True, repodir=self.tmppath)
             self.checkexit(status, output)
 
     def getchanges(self, rev, full):
         if full:
-            raise error.Abort(_("convert from darcs does not support --full"))
+            raise error.Abort(_(b"convert from darcs does not support --full"))
         copies = {}
         changes = []
         man = None
-        for elt in self.changes[rev].find('summary').getchildren():
-            if elt.tag in ('add_directory', 'remove_directory'):
+        for elt in self.changes[rev].find(b'summary').getchildren():
+            if elt.tag in (b'add_directory', b'remove_directory'):
                 continue
-            if elt.tag == 'move':
+            if elt.tag == b'move':
                 if man is None:
                     man = self.manifest()
-                source, dest = elt.get('from'), elt.get('to')
+                source, dest = elt.get(b'from'), elt.get(b'to')
                 if source in man:
                     # File move
                     changes.append((source, rev))
@@ -192,11 +210,11 @@
                     copies[dest] = source
                 else:
                     # Directory move, deduce file moves from manifest
-                    source = source + '/'
+                    source = source + b'/'
                     for f in man:
                         if not f.startswith(source):
                             continue
-                        fdest = dest + '/' + f[len(source):]
+                        fdest = dest + b'/' + f[len(source) :]
                         changes.append((f, rev))
                         changes.append((fdest, rev))
                         copies[fdest] = f
@@ -208,7 +226,7 @@
 
     def getfile(self, name, rev):
         if rev != self.lastrev:
-            raise error.Abort(_('internal calling inconsistency'))
+            raise error.Abort(_(b'internal calling inconsistency'))
         path = os.path.join(self.tmppath, name)
         try:
             data = util.readfile(path)
@@ -217,7 +235,7 @@
             if inst.errno == errno.ENOENT:
                 return None, None
             raise
-        mode = (mode & 0o111) and 'x' or ''
+        mode = (mode & 0o111) and b'x' or b''
         return data, mode
 
     def gettags(self):
--- a/hgext/convert/filemap.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/convert/filemap.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,8 +14,10 @@
     pycompat,
 )
 from . import common
+
 SKIPREV = common.SKIPREV
 
+
 def rpairs(path):
     '''Yield tuples with path split at '/', starting with the full path.
     No leading, trailing or double '/', please.
@@ -27,9 +29,10 @@
     '''
     i = len(path)
     while i != -1:
-        yield path[:i], path[i + 1:]
-        i = path.rfind('/', 0, i)
-    yield '.', path
+        yield path[:i], path[i + 1 :]
+        i = path.rfind(b'/', 0, i)
+    yield b'.', path
+
 
 def normalize(path):
     ''' We use posixpath.normpath to support cross-platform path format.
@@ -38,6 +41,7 @@
         return None
     return posixpath.normpath(path)
 
+
 class filemapper(object):
     '''Map and filter filenames when importing.
     A name can be mapped to itself, a new name, or None (omit from new
@@ -51,50 +55,58 @@
         self.targetprefixes = None
         if path:
             if self.parse(path):
-                raise error.Abort(_('errors in filemap'))
+                raise error.Abort(_(b'errors in filemap'))
 
     def parse(self, path):
         errs = 0
+
         def check(name, mapping, listname):
             if not name:
-                self.ui.warn(_('%s:%d: path to %s is missing\n') %
-                             (lex.infile, lex.lineno, listname))
+                self.ui.warn(
+                    _(b'%s:%d: path to %s is missing\n')
+                    % (lex.infile, lex.lineno, listname)
+                )
                 return 1
             if name in mapping:
-                self.ui.warn(_('%s:%d: %r already in %s list\n') %
-                             (lex.infile, lex.lineno, name, listname))
+                self.ui.warn(
+                    _(b'%s:%d: %r already in %s list\n')
+                    % (lex.infile, lex.lineno, name, listname)
+                )
                 return 1
-            if (name.startswith('/') or
-                name.endswith('/') or
-                '//' in name):
-                self.ui.warn(_('%s:%d: superfluous / in %s %r\n') %
-                             (lex.infile, lex.lineno, listname,
-                              pycompat.bytestr(name)))
+            if name.startswith(b'/') or name.endswith(b'/') or b'//' in name:
+                self.ui.warn(
+                    _(b'%s:%d: superfluous / in %s %r\n')
+                    % (lex.infile, lex.lineno, listname, pycompat.bytestr(name))
+                )
                 return 1
             return 0
+
         lex = common.shlexer(
-            filepath=path, wordchars='!@#$%^&*()-=+[]{}|;:,./<>?')
+            filepath=path, wordchars=b'!@#$%^&*()-=+[]{}|;:,./<>?'
+        )
         cmd = lex.get_token()
         while cmd:
-            if cmd == 'include':
+            if cmd == b'include':
                 name = normalize(lex.get_token())
-                errs += check(name, self.exclude, 'exclude')
+                errs += check(name, self.exclude, b'exclude')
                 self.include[name] = name
-            elif cmd == 'exclude':
+            elif cmd == b'exclude':
                 name = normalize(lex.get_token())
-                errs += check(name, self.include, 'include')
-                errs += check(name, self.rename, 'rename')
+                errs += check(name, self.include, b'include')
+                errs += check(name, self.rename, b'rename')
                 self.exclude[name] = name
-            elif cmd == 'rename':
+            elif cmd == b'rename':
                 src = normalize(lex.get_token())
                 dest = normalize(lex.get_token())
-                errs += check(src, self.exclude, 'exclude')
+                errs += check(src, self.exclude, b'exclude')
                 self.rename[src] = dest
-            elif cmd == 'source':
+            elif cmd == b'source':
                 errs += self.parse(normalize(lex.get_token()))
             else:
-                self.ui.warn(_('%s:%d: unknown directive %r\n') %
-                             (lex.infile, lex.lineno, pycompat.bytestr(cmd)))
+                self.ui.warn(
+                    _(b'%s:%d: unknown directive %r\n')
+                    % (lex.infile, lex.lineno, pycompat.bytestr(cmd))
+                )
                 errs += 1
             cmd = lex.get_token()
         return errs
@@ -106,7 +118,7 @@
                 return mapping[pre], pre, suf
             except KeyError:
                 pass
-        return '', name, ''
+        return b'', name, b''
 
     def istargetfile(self, filename):
         """Return true if the given target filename is covered as a destination
@@ -114,12 +126,12 @@
         repo belong to the source repo and what parts don't."""
         if self.targetprefixes is None:
             self.targetprefixes = set()
-            for before, after in self.rename.iteritems():
+            for before, after in pycompat.iteritems(self.rename):
                 self.targetprefixes.add(after)
 
         # If "." is a target, then all target files are considered from the
         # source.
-        if not self.targetprefixes or '.' in self.targetprefixes:
+        if not self.targetprefixes or b'.' in self.targetprefixes:
             return True
 
         filename = normalize(filename)
@@ -140,23 +152,24 @@
         if self.exclude:
             exc = self.lookup(name, self.exclude)[0]
         else:
-            exc = ''
+            exc = b''
         if (not self.include and exc) or (len(inc) <= len(exc)):
             return None
         newpre, pre, suf = self.lookup(name, self.rename)
         if newpre:
-            if newpre == '.':
+            if newpre == b'.':
                 return suf
             if suf:
-                if newpre.endswith('/'):
+                if newpre.endswith(b'/'):
                     return newpre + suf
-                return newpre + '/' + suf
+                return newpre + b'/' + suf
             return newpre
         return name
 
     def active(self):
         return bool(self.include or self.exclude or self.rename)
 
+
 # This class does two additional things compared to a regular source:
 #
 # - Filter and rename files.  This is mostly wrapped by the filemapper
@@ -171,6 +184,7 @@
 #   touch files we're interested in, but also merges that merge two
 #   or more interesting revisions.
 
+
 class filemap_source(common.converter_source):
     def __init__(self, ui, baseconverter, filemap):
         super(filemap_source, self).__init__(ui, baseconverter.repotype)
@@ -189,8 +203,9 @@
         self.children = {}
         self.seenchildren = {}
         # experimental config: convert.ignoreancestorcheck
-        self.ignoreancestorcheck = self.ui.configbool('convert',
-                                                      'ignoreancestorcheck')
+        self.ignoreancestorcheck = self.ui.configbool(
+            b'convert', b'ignoreancestorcheck'
+        )
 
     def before(self):
         self.base.before()
@@ -241,7 +256,7 @@
                 try:
                     self.origparents[rev] = self.getcommit(rev).parents
                 except error.RepoLookupError:
-                    self.ui.debug("unknown revmap source: %s\n" % rev)
+                    self.ui.debug(b"unknown revmap source: %s\n" % rev)
                     continue
             if arg is not None:
                 self.children[arg] = self.children.get(arg, 0) + 1
@@ -250,7 +265,7 @@
             try:
                 parents = self.origparents[rev]
             except KeyError:
-                continue # unknown revmap source
+                continue  # unknown revmap source
             if wanted:
                 self.mark_wanted(rev, parents)
             else:
@@ -301,7 +316,7 @@
         try:
             files = self.base.getchangedfiles(rev, i)
         except NotImplementedError:
-            raise error.Abort(_("source repository doesn't support --filemap"))
+            raise error.Abort(_(b"source repository doesn't support --filemap"))
         for f in files:
             if self.filemapper(f):
                 return True
@@ -316,7 +331,7 @@
         # close marker is significant (i.e. all of the branch ancestors weren't
         # eliminated).  Therefore if there *is* a close marker, getchanges()
         # doesn't consider it significant, and this revision should be dropped.
-        return not files and 'close' not in self.commits[rev].extra
+        return not files and b'close' not in self.commits[rev].extra
 
     def mark_not_wanted(self, rev, p):
         # Mark rev as not interesting and update data structures.
@@ -348,8 +363,9 @@
             if p in self.wantedancestors:
                 wrev.update(self.wantedancestors[p])
             else:
-                self.ui.warn(_('warning: %s parent %s is missing\n') %
-                             (rev, p))
+                self.ui.warn(
+                    _(b'warning: %s parent %s is missing\n') % (rev, p)
+                )
         wrev.add(rev)
         self.wantedancestors[rev] = wrev
 
@@ -382,10 +398,13 @@
             if mp1 == SKIPREV or mp1 in knownparents:
                 continue
 
-            isancestor = (not self.ignoreancestorcheck and
-                          any(p2 for p2 in parents
-                              if p1 != p2 and mp1 != self.parentmap[p2]
-                                 and mp1 in self.wantedancestors[p2]))
+            isancestor = not self.ignoreancestorcheck and any(
+                p2
+                for p2 in parents
+                if p1 != p2
+                and mp1 != self.parentmap[p2]
+                and mp1 in self.wantedancestors[p2]
+            )
             if not isancestor and not hasbranchparent and len(parents) > 1:
                 # This could be expensive, avoid unnecessary calls.
                 if self._cachedcommit(p1).branch == branch:
@@ -406,7 +425,7 @@
         self.origparents[rev] = parents
 
         closed = False
-        if 'close' in self.commits[rev].extra:
+        if b'close' in self.commits[rev].extra:
             # A branch closing revision is only useful if one of its
             # parents belong to the branch being closed
             pbranches = [self._cachedcommit(p).branch for p in mparents]
--- a/hgext/convert/git.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/convert/git.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,9 +16,8 @@
     pycompat,
 )
 
-from . import (
-    common,
-)
+from . import common
+
 
 class submodule(object):
     def __init__(self, path, node, url):
@@ -27,30 +26,32 @@
         self.url = url
 
     def hgsub(self):
-        return "%s = [git]%s" % (self.path, self.url)
+        return b"%s = [git]%s" % (self.path, self.url)
 
     def hgsubstate(self):
-        return "%s %s" % (self.node, self.path)
+        return b"%s %s" % (self.node, self.path)
+
 
 # Keys in extra fields that should not be copied if the user requests.
 bannedextrakeys = {
     # Git commit object built-ins.
-    'tree',
-    'parent',
-    'author',
-    'committer',
+    b'tree',
+    b'parent',
+    b'author',
+    b'committer',
     # Mercurial built-ins.
-    'branch',
-    'close',
+    b'branch',
+    b'close',
 }
 
+
 class convert_git(common.converter_source, common.commandline):
     # Windows does not support GIT_DIR= construct while other systems
     # cannot remove environment variable. Just assume none have
     # both issues.
 
     def _gitcmd(self, cmd, *args, **kwargs):
-        return cmd('--git-dir=%s' % self.path, *args, **kwargs)
+        return cmd(b'--git-dir=%s' % self.path, *args, **kwargs)
 
     def gitrun0(self, *args, **kwargs):
         return self._gitcmd(self.run0, *args, **kwargs)
@@ -69,85 +70,104 @@
 
     def __init__(self, ui, repotype, path, revs=None):
         super(convert_git, self).__init__(ui, repotype, path, revs=revs)
-        common.commandline.__init__(self, ui, 'git')
+        common.commandline.__init__(self, ui, b'git')
 
         # Pass an absolute path to git to prevent from ever being interpreted
         # as a URL
         path = os.path.abspath(path)
 
-        if os.path.isdir(path + "/.git"):
-            path += "/.git"
-        if not os.path.exists(path + "/objects"):
-            raise common.NoRepo(_("%s does not look like a Git repository") %
-                                path)
+        if os.path.isdir(path + b"/.git"):
+            path += b"/.git"
+        if not os.path.exists(path + b"/objects"):
+            raise common.NoRepo(
+                _(b"%s does not look like a Git repository") % path
+            )
 
         # The default value (50) is based on the default for 'git diff'.
-        similarity = ui.configint('convert', 'git.similarity')
+        similarity = ui.configint(b'convert', b'git.similarity')
         if similarity < 0 or similarity > 100:
-            raise error.Abort(_('similarity must be between 0 and 100'))
+            raise error.Abort(_(b'similarity must be between 0 and 100'))
         if similarity > 0:
-            self.simopt = ['-C%d%%' % similarity]
-            findcopiesharder = ui.configbool('convert', 'git.findcopiesharder')
+            self.simopt = [b'-C%d%%' % similarity]
+            findcopiesharder = ui.configbool(
+                b'convert', b'git.findcopiesharder'
+            )
             if findcopiesharder:
-                self.simopt.append('--find-copies-harder')
+                self.simopt.append(b'--find-copies-harder')
 
-            renamelimit = ui.configint('convert', 'git.renamelimit')
-            self.simopt.append('-l%d' % renamelimit)
+            renamelimit = ui.configint(b'convert', b'git.renamelimit')
+            self.simopt.append(b'-l%d' % renamelimit)
         else:
             self.simopt = []
 
-        common.checktool('git', 'git')
+        common.checktool(b'git', b'git')
 
         self.path = path
         self.submodules = []
 
-        self.catfilepipe = self.gitpipe('cat-file', '--batch')
+        self.catfilepipe = self.gitpipe(b'cat-file', b'--batch')
 
-        self.copyextrakeys = self.ui.configlist('convert', 'git.extrakeys')
+        self.copyextrakeys = self.ui.configlist(b'convert', b'git.extrakeys')
         banned = set(self.copyextrakeys) & bannedextrakeys
         if banned:
-            raise error.Abort(_('copying of extra key is forbidden: %s') %
-                              _(', ').join(sorted(banned)))
+            raise error.Abort(
+                _(b'copying of extra key is forbidden: %s')
+                % _(b', ').join(sorted(banned))
+            )
 
-        committeractions = self.ui.configlist('convert', 'git.committeractions')
+        committeractions = self.ui.configlist(
+            b'convert', b'git.committeractions'
+        )
 
         messagedifferent = None
         messagealways = None
         for a in committeractions:
-            if a.startswith(('messagedifferent', 'messagealways')):
+            if a.startswith((b'messagedifferent', b'messagealways')):
                 k = a
                 v = None
-                if '=' in a:
-                    k, v = a.split('=', 1)
+                if b'=' in a:
+                    k, v = a.split(b'=', 1)
 
-                if k == 'messagedifferent':
-                    messagedifferent = v or 'committer:'
-                elif k == 'messagealways':
-                    messagealways = v or 'committer:'
+                if k == b'messagedifferent':
+                    messagedifferent = v or b'committer:'
+                elif k == b'messagealways':
+                    messagealways = v or b'committer:'
 
         if messagedifferent and messagealways:
-            raise error.Abort(_('committeractions cannot define both '
-                                'messagedifferent and messagealways'))
+            raise error.Abort(
+                _(
+                    b'committeractions cannot define both '
+                    b'messagedifferent and messagealways'
+                )
+            )
 
-        dropcommitter = 'dropcommitter' in committeractions
-        replaceauthor = 'replaceauthor' in committeractions
+        dropcommitter = b'dropcommitter' in committeractions
+        replaceauthor = b'replaceauthor' in committeractions
 
         if dropcommitter and replaceauthor:
-            raise error.Abort(_('committeractions cannot define both '
-                                'dropcommitter and replaceauthor'))
+            raise error.Abort(
+                _(
+                    b'committeractions cannot define both '
+                    b'dropcommitter and replaceauthor'
+                )
+            )
 
         if dropcommitter and messagealways:
-            raise error.Abort(_('committeractions cannot define both '
-                                'dropcommitter and messagealways'))
+            raise error.Abort(
+                _(
+                    b'committeractions cannot define both '
+                    b'dropcommitter and messagealways'
+                )
+            )
 
         if not messagedifferent and not messagealways:
-            messagedifferent = 'committer:'
+            messagedifferent = b'committer:'
 
         self.committeractions = {
-            'dropcommitter': dropcommitter,
-            'replaceauthor': replaceauthor,
-            'messagedifferent': messagedifferent,
-            'messagealways': messagealways,
+            b'dropcommitter': dropcommitter,
+            b'replaceauthor': replaceauthor,
+            b'messagedifferent': messagedifferent,
+            b'messagealways': messagealways,
         }
 
     def after(self):
@@ -156,33 +176,39 @@
 
     def getheads(self):
         if not self.revs:
-            output, status = self.gitrun('rev-parse', '--branches', '--remotes')
+            output, status = self.gitrun(
+                b'rev-parse', b'--branches', b'--remotes'
+            )
             heads = output.splitlines()
             if status:
-                raise error.Abort(_('cannot retrieve git heads'))
+                raise error.Abort(_(b'cannot retrieve git heads'))
         else:
             heads = []
             for rev in self.revs:
-                rawhead, ret = self.gitrun('rev-parse', '--verify', rev)
+                rawhead, ret = self.gitrun(b'rev-parse', b'--verify', rev)
                 heads.append(rawhead[:-1])
                 if ret:
-                    raise error.Abort(_('cannot retrieve git head "%s"') % rev)
+                    raise error.Abort(_(b'cannot retrieve git head "%s"') % rev)
         return heads
 
     def catfile(self, rev, ftype):
         if rev == nodemod.nullhex:
             raise IOError
-        self.catfilepipe[0].write(rev+'\n')
+        self.catfilepipe[0].write(rev + b'\n')
         self.catfilepipe[0].flush()
         info = self.catfilepipe[1].readline().split()
         if info[1] != ftype:
-            raise error.Abort(_('cannot read %r object at %s') % (
-                pycompat.bytestr(ftype), rev))
+            raise error.Abort(
+                _(b'cannot read %r object at %s')
+                % (pycompat.bytestr(ftype), rev)
+            )
         size = int(info[2])
         data = self.catfilepipe[1].read(size)
         if len(data) < size:
-            raise error.Abort(_('cannot read %r object at %s: unexpected size')
-                              % (ftype, rev))
+            raise error.Abort(
+                _(b'cannot read %r object at %s: unexpected size')
+                % (ftype, rev)
+            )
         # read the trailing newline
         self.catfilepipe[1].read(1)
         return data
@@ -190,14 +216,14 @@
     def getfile(self, name, rev):
         if rev == nodemod.nullhex:
             return None, None
-        if name == '.hgsub':
-            data = '\n'.join([m.hgsub() for m in self.submoditer()])
-            mode = ''
-        elif name == '.hgsubstate':
-            data = '\n'.join([m.hgsubstate() for m in self.submoditer()])
-            mode = ''
+        if name == b'.hgsub':
+            data = b'\n'.join([m.hgsub() for m in self.submoditer()])
+            mode = b''
+        elif name == b'.hgsubstate':
+            data = b'\n'.join([m.hgsubstate() for m in self.submoditer()])
+            mode = b''
         else:
-            data = self.catfile(rev, "blob")
+            data = self.catfile(rev, b"blob")
             mode = self.modecache[(name, rev)]
         return data, mode
 
@@ -216,90 +242,102 @@
         self.submodules = []
         c = config.config()
         # Each item in .gitmodules starts with whitespace that cant be parsed
-        c.parse('.gitmodules', '\n'.join(line.strip() for line in
-                               content.split('\n')))
+        c.parse(
+            b'.gitmodules',
+            b'\n'.join(line.strip() for line in content.split(b'\n')),
+        )
         for sec in c.sections():
             s = c[sec]
-            if 'url' in s and 'path' in s:
-                self.submodules.append(submodule(s['path'], '', s['url']))
+            if b'url' in s and b'path' in s:
+                self.submodules.append(submodule(s[b'path'], b'', s[b'url']))
 
     def retrievegitmodules(self, version):
-        modules, ret = self.gitrun('show', '%s:%s' % (version, '.gitmodules'))
+        modules, ret = self.gitrun(
+            b'show', b'%s:%s' % (version, b'.gitmodules')
+        )
         if ret:
             # This can happen if a file is in the repo that has permissions
             # 160000, but there is no .gitmodules file.
-            self.ui.warn(_("warning: cannot read submodules config file in "
-                           "%s\n") % version)
+            self.ui.warn(
+                _(b"warning: cannot read submodules config file in %s\n")
+                % version
+            )
             return
 
         try:
             self.parsegitmodules(modules)
         except error.ParseError:
-            self.ui.warn(_("warning: unable to parse .gitmodules in %s\n")
-                         % version)
+            self.ui.warn(
+                _(b"warning: unable to parse .gitmodules in %s\n") % version
+            )
             return
 
         for m in self.submodules:
-            node, ret = self.gitrun('rev-parse', '%s:%s' % (version, m.path))
+            node, ret = self.gitrun(b'rev-parse', b'%s:%s' % (version, m.path))
             if ret:
                 continue
             m.node = node.strip()
 
     def getchanges(self, version, full):
         if full:
-            raise error.Abort(_("convert from git does not support --full"))
+            raise error.Abort(_(b"convert from git does not support --full"))
         self.modecache = {}
-        cmd = ['diff-tree','-z', '--root', '-m', '-r'] + self.simopt + [version]
+        cmd = (
+            [b'diff-tree', b'-z', b'--root', b'-m', b'-r']
+            + self.simopt
+            + [version]
+        )
         output, status = self.gitrun(*cmd)
         if status:
-            raise error.Abort(_('cannot read changes in %s') % version)
+            raise error.Abort(_(b'cannot read changes in %s') % version)
         changes = []
         copies = {}
         seen = set()
         entry = None
         subexists = [False]
         subdeleted = [False]
-        difftree = output.split('\x00')
+        difftree = output.split(b'\x00')
         lcount = len(difftree)
         i = 0
 
-        skipsubmodules = self.ui.configbool('convert', 'git.skipsubmodules')
+        skipsubmodules = self.ui.configbool(b'convert', b'git.skipsubmodules')
+
         def add(entry, f, isdest):
             seen.add(f)
             h = entry[3]
-            p = (entry[1] == "100755")
-            s = (entry[1] == "120000")
-            renamesource = (not isdest and entry[4][0] == 'R')
+            p = entry[1] == b"100755"
+            s = entry[1] == b"120000"
+            renamesource = not isdest and entry[4][0] == b'R'
 
-            if f == '.gitmodules':
+            if f == b'.gitmodules':
                 if skipsubmodules:
                     return
 
                 subexists[0] = True
-                if entry[4] == 'D' or renamesource:
+                if entry[4] == b'D' or renamesource:
                     subdeleted[0] = True
-                    changes.append(('.hgsub', nodemod.nullhex))
+                    changes.append((b'.hgsub', nodemod.nullhex))
                 else:
-                    changes.append(('.hgsub', ''))
-            elif entry[1] == '160000' or entry[0] == ':160000':
+                    changes.append((b'.hgsub', b''))
+            elif entry[1] == b'160000' or entry[0] == b':160000':
                 if not skipsubmodules:
                     subexists[0] = True
             else:
                 if renamesource:
                     h = nodemod.nullhex
-                self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
+                self.modecache[(f, h)] = (p and b"x") or (s and b"l") or b""
                 changes.append((f, h))
 
         while i < lcount:
             l = difftree[i]
             i += 1
             if not entry:
-                if not l.startswith(':'):
+                if not l.startswith(b':'):
                     continue
                 entry = tuple(pycompat.bytestr(p) for p in l.split())
                 continue
             f = l
-            if entry[4][0] == 'C':
+            if entry[4][0] == b'C':
                 copysrc = f
                 copydest = difftree[i]
                 i += 1
@@ -309,7 +347,7 @@
                 add(entry, f, False)
             # A file can be copied multiple times, or modified and copied
             # simultaneously. So f can be repeated even if fdest isn't.
-            if entry[4][0] == 'R':
+            if entry[4][0] == b'R':
                 # rename: next line is the destination
                 fdest = difftree[i]
                 i += 1
@@ -317,105 +355,110 @@
                     add(entry, fdest, True)
                     # .gitmodules isn't imported at all, so it being copied to
                     # and fro doesn't really make sense
-                    if f != '.gitmodules' and fdest != '.gitmodules':
+                    if f != b'.gitmodules' and fdest != b'.gitmodules':
                         copies[fdest] = f
             entry = None
 
         if subexists[0]:
             if subdeleted[0]:
-                changes.append(('.hgsubstate', nodemod.nullhex))
+                changes.append((b'.hgsubstate', nodemod.nullhex))
             else:
                 self.retrievegitmodules(version)
-                changes.append(('.hgsubstate', ''))
+                changes.append((b'.hgsubstate', b''))
         return (changes, copies, set())
 
     def getcommit(self, version):
-        c = self.catfile(version, "commit") # read the commit hash
-        end = c.find("\n\n")
-        message = c[end + 2:]
+        c = self.catfile(version, b"commit")  # read the commit hash
+        end = c.find(b"\n\n")
+        message = c[end + 2 :]
         message = self.recode(message)
         l = c[:end].splitlines()
         parents = []
         author = committer = None
         extra = {}
         for e in l[1:]:
-            n, v = e.split(" ", 1)
-            if n == "author":
+            n, v = e.split(b" ", 1)
+            if n == b"author":
                 p = v.split()
                 tm, tz = p[-2:]
-                author = " ".join(p[:-2])
-                if author[0] == "<":
+                author = b" ".join(p[:-2])
+                if author[0] == b"<":
                     author = author[1:-1]
                 author = self.recode(author)
-            if n == "committer":
+            if n == b"committer":
                 p = v.split()
                 tm, tz = p[-2:]
-                committer = " ".join(p[:-2])
-                if committer[0] == "<":
+                committer = b" ".join(p[:-2])
+                if committer[0] == b"<":
                     committer = committer[1:-1]
                 committer = self.recode(committer)
-            if n == "parent":
+            if n == b"parent":
                 parents.append(v)
             if n in self.copyextrakeys:
                 extra[n] = v
 
-        if self.committeractions['dropcommitter']:
+        if self.committeractions[b'dropcommitter']:
             committer = None
-        elif self.committeractions['replaceauthor']:
+        elif self.committeractions[b'replaceauthor']:
             author = committer
 
         if committer:
-            messagealways = self.committeractions['messagealways']
-            messagedifferent = self.committeractions['messagedifferent']
+            messagealways = self.committeractions[b'messagealways']
+            messagedifferent = self.committeractions[b'messagedifferent']
             if messagealways:
-                message += '\n%s %s\n' % (messagealways, committer)
+                message += b'\n%s %s\n' % (messagealways, committer)
             elif messagedifferent and author != committer:
-                message += '\n%s %s\n' % (messagedifferent, committer)
+                message += b'\n%s %s\n' % (messagedifferent, committer)
 
-        tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
+        tzs, tzh, tzm = tz[-5:-4] + b"1", tz[-4:-2], tz[-2:]
         tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
-        date = tm + " " + (b"%d" % tz)
-        saverev = self.ui.configbool('convert', 'git.saverev')
+        date = tm + b" " + (b"%d" % tz)
+        saverev = self.ui.configbool(b'convert', b'git.saverev')
 
-        c = common.commit(parents=parents, date=date, author=author,
-                          desc=message,
-                          rev=version,
-                          extra=extra,
-                          saverev=saverev)
+        c = common.commit(
+            parents=parents,
+            date=date,
+            author=author,
+            desc=message,
+            rev=version,
+            extra=extra,
+            saverev=saverev,
+        )
         return c
 
     def numcommits(self):
-        output, ret = self.gitrunlines('rev-list', '--all')
+        output, ret = self.gitrunlines(b'rev-list', b'--all')
         if ret:
-            raise error.Abort(_('cannot retrieve number of commits in %s')
-                              % self.path)
+            raise error.Abort(
+                _(b'cannot retrieve number of commits in %s') % self.path
+            )
         return len(output)
 
     def gettags(self):
         tags = {}
         alltags = {}
-        output, status = self.gitrunlines('ls-remote', '--tags', self.path)
+        output, status = self.gitrunlines(b'ls-remote', b'--tags', self.path)
 
         if status:
-            raise error.Abort(_('cannot read tags from %s') % self.path)
-        prefix = 'refs/tags/'
+            raise error.Abort(_(b'cannot read tags from %s') % self.path)
+        prefix = b'refs/tags/'
 
         # Build complete list of tags, both annotated and bare ones
         for line in output:
             line = line.strip()
-            if line.startswith("error:") or line.startswith("fatal:"):
-                raise error.Abort(_('cannot read tags from %s') % self.path)
+            if line.startswith(b"error:") or line.startswith(b"fatal:"):
+                raise error.Abort(_(b'cannot read tags from %s') % self.path)
             node, tag = line.split(None, 1)
             if not tag.startswith(prefix):
                 continue
-            alltags[tag[len(prefix):]] = node
+            alltags[tag[len(prefix) :]] = node
 
         # Filter out tag objects for annotated tag refs
         for tag in alltags:
-            if tag.endswith('^{}'):
+            if tag.endswith(b'^{}'):
                 tags[tag[:-3]] = alltags[tag]
             else:
-                if tag + '^{}' in alltags:
+                if tag + b'^{}' in alltags:
                     continue
                 else:
                     tags[tag] = alltags[tag]
@@ -425,22 +468,29 @@
     def getchangedfiles(self, version, i):
         changes = []
         if i is None:
-            output, status = self.gitrunlines('diff-tree', '--root', '-m',
-                                              '-r', version)
+            output, status = self.gitrunlines(
+                b'diff-tree', b'--root', b'-m', b'-r', version
+            )
             if status:
-                raise error.Abort(_('cannot read changes in %s') % version)
+                raise error.Abort(_(b'cannot read changes in %s') % version)
             for l in output:
-                if "\t" not in l:
+                if b"\t" not in l:
                     continue
-                m, f = l[:-1].split("\t")
+                m, f = l[:-1].split(b"\t")
                 changes.append(f)
         else:
-            output, status = self.gitrunlines('diff-tree', '--name-only',
-                                              '--root', '-r', version,
-                                              '%s^%d' % (version, i + 1), '--')
+            output, status = self.gitrunlines(
+                b'diff-tree',
+                b'--name-only',
+                b'--root',
+                b'-r',
+                version,
+                b'%s^%d' % (version, i + 1),
+                b'--',
+            )
             if status:
-                raise error.Abort(_('cannot read changes in %s') % version)
-            changes = [f.rstrip('\n') for f in output]
+                raise error.Abort(_(b'cannot read changes in %s') % version)
+            changes = [f.rstrip(b'\n') for f in output]
 
         return changes
 
@@ -448,19 +498,19 @@
         bookmarks = {}
 
         # Handle local and remote branches
-        remoteprefix = self.ui.config('convert', 'git.remoteprefix')
+        remoteprefix = self.ui.config(b'convert', b'git.remoteprefix')
         reftypes = [
             # (git prefix, hg prefix)
-            ('refs/remotes/origin/', remoteprefix + '/'),
-            ('refs/heads/', '')
+            (b'refs/remotes/origin/', remoteprefix + b'/'),
+            (b'refs/heads/', b''),
         ]
 
         exclude = {
-            'refs/remotes/origin/HEAD',
+            b'refs/remotes/origin/HEAD',
         }
 
         try:
-            output, status = self.gitrunlines('show-ref')
+            output, status = self.gitrunlines(b'show-ref')
             for line in output:
                 line = line.strip()
                 rev, name = line.split(None, 1)
@@ -468,13 +518,13 @@
                 for gitprefix, hgprefix in reftypes:
                     if not name.startswith(gitprefix) or name in exclude:
                         continue
-                    name = '%s%s' % (hgprefix, name[len(gitprefix):])
+                    name = b'%s%s' % (hgprefix, name[len(gitprefix) :])
                     bookmarks[name] = rev
         except Exception:
             pass
 
         return bookmarks
 
-    def checkrevformat(self, revstr, mapname='splicemap'):
+    def checkrevformat(self, revstr, mapname=b'splicemap'):
         """ git revision string is a 40 byte hex """
         self.checkhexformat(revstr, mapname)
--- a/hgext/convert/gnuarch.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/convert/gnuarch.py	Mon Oct 21 11:09:48 2019 -0400
@@ -26,14 +26,14 @@
 )
 from . import common
 
+
 class gnuarch_source(common.converter_source, common.commandline):
-
     class gnuarch_rev(object):
         def __init__(self, rev):
             self.rev = rev
-            self.summary = ''
+            self.summary = b''
             self.date = None
-            self.author = ''
+            self.author = b''
             self.continuationof = None
             self.add_files = []
             self.mod_files = []
@@ -44,19 +44,20 @@
     def __init__(self, ui, repotype, path, revs=None):
         super(gnuarch_source, self).__init__(ui, repotype, path, revs=revs)
 
-        if not os.path.exists(os.path.join(path, '{arch}')):
-            raise common.NoRepo(_("%s does not look like a GNU Arch repository")
-                         % path)
+        if not os.path.exists(os.path.join(path, b'{arch}')):
+            raise common.NoRepo(
+                _(b"%s does not look like a GNU Arch repository") % path
+            )
 
         # Could use checktool, but we want to check for baz or tla.
         self.execmd = None
-        if procutil.findexe('baz'):
-            self.execmd = 'baz'
+        if procutil.findexe(b'baz'):
+            self.execmd = b'baz'
         else:
-            if procutil.findexe('tla'):
-                self.execmd = 'tla'
+            if procutil.findexe(b'tla'):
+                self.execmd = b'tla'
             else:
-                raise error.Abort(_('cannot find a GNU Arch tool'))
+                raise error.Abort(_(b'cannot find a GNU Arch tool'))
 
         common.commandline.__init__(self, ui, self.execmd)
 
@@ -74,37 +75,47 @@
 
     def before(self):
         # Get registered archives
-        self.archives = [i.rstrip('\n')
-                         for i in self.runlines0('archives', '-n')]
+        self.archives = [
+            i.rstrip(b'\n') for i in self.runlines0(b'archives', b'-n')
+        ]
 
-        if self.execmd == 'tla':
-            output = self.run0('tree-version', self.path)
+        if self.execmd == b'tla':
+            output = self.run0(b'tree-version', self.path)
         else:
-            output = self.run0('tree-version', '-d', self.path)
+            output = self.run0(b'tree-version', b'-d', self.path)
         self.treeversion = output.strip()
 
         # Get name of temporary directory
-        version = self.treeversion.split('/')
-        self.tmppath = os.path.join(tempfile.gettempdir(),
-                                    'hg-%s' % version[1])
+        version = self.treeversion.split(b'/')
+        self.tmppath = os.path.join(
+            pycompat.fsencode(tempfile.gettempdir()), b'hg-%s' % version[1]
+        )
 
         # Generate parents dictionary
         self.parents[None] = []
         treeversion = self.treeversion
         child = None
         while treeversion:
-            self.ui.status(_('analyzing tree version %s...\n') % treeversion)
+            self.ui.status(_(b'analyzing tree version %s...\n') % treeversion)
 
-            archive = treeversion.split('/')[0]
+            archive = treeversion.split(b'/')[0]
             if archive not in self.archives:
-                self.ui.status(_('tree analysis stopped because it points to '
-                                 'an unregistered archive %s...\n') % archive)
+                self.ui.status(
+                    _(
+                        b'tree analysis stopped because it points to '
+                        b'an unregistered archive %s...\n'
+                    )
+                    % archive
+                )
                 break
 
             # Get the complete list of revisions for that tree version
-            output, status = self.runlines('revisions', '-r', '-f', treeversion)
-            self.checkexit(status, 'failed retrieving revisions for %s'
-                           % treeversion)
+            output, status = self.runlines(
+                b'revisions', b'-r', b'-f', treeversion
+            )
+            self.checkexit(
+                status, b'failed retrieving revisions for %s' % treeversion
+            )
 
             # No new iteration unless a revision has a continuation-of header
             treeversion = None
@@ -115,9 +126,9 @@
                 self.parents[rev] = []
 
                 # Read author, date and summary
-                catlog, status = self.run('cat-log', '-d', self.path, rev)
+                catlog, status = self.run(b'cat-log', b'-d', self.path, rev)
                 if status:
-                    catlog  = self.run0('cat-archive-log', rev)
+                    catlog = self.run0(b'cat-archive-log', rev)
                 self._parsecatlog(catlog, rev)
 
                 # Populate the parents map
@@ -131,17 +142,18 @@
                 # or if we have to 'jump' to a different treeversion given
                 # by the continuation-of header.
                 if self.changes[rev].continuationof:
-                    treeversion = '--'.join(
-                        self.changes[rev].continuationof.split('--')[:-1])
+                    treeversion = b'--'.join(
+                        self.changes[rev].continuationof.split(b'--')[:-1]
+                    )
                     break
 
                 # If we reached a base-0 revision w/o any continuation-of
                 # header, it means the tree history ends here.
-                if rev[-6:] == 'base-0':
+                if rev[-6:] == b'base-0':
                     break
 
     def after(self):
-        self.ui.debug('cleaning up %s\n' % self.tmppath)
+        self.ui.debug(b'cleaning up %s\n' % self.tmppath)
         shutil.rmtree(self.tmppath, ignore_errors=True)
 
     def getheads(self):
@@ -149,7 +161,7 @@
 
     def getfile(self, name, rev):
         if rev != self.lastrev:
-            raise error.Abort(_('internal calling inconsistency'))
+            raise error.Abort(_(b'internal calling inconsistency'))
 
         if not os.path.lexists(os.path.join(self.tmppath, name)):
             return None, None
@@ -158,7 +170,7 @@
 
     def getchanges(self, rev, full):
         if full:
-            raise error.Abort(_("convert from arch does not support --full"))
+            raise error.Abort(_(b"convert from arch does not support --full"))
         self._update(rev)
         changes = []
         copies = {}
@@ -189,9 +201,13 @@
 
     def getcommit(self, rev):
         changes = self.changes[rev]
-        return common.commit(author=changes.author, date=changes.date,
-                             desc=changes.summary, parents=self.parents[rev],
-                             rev=rev)
+        return common.commit(
+            author=changes.author,
+            date=changes.date,
+            desc=changes.summary,
+            parents=self.parents[rev],
+            rev=rev,
+        )
 
     def gettags(self):
         return self.tags
@@ -200,15 +216,15 @@
         cmdline = [self.execmd, cmd]
         cmdline += args
         cmdline = [procutil.shellquote(arg) for arg in cmdline]
-        cmdline += ['>', os.devnull, '2>', os.devnull]
-        cmdline = procutil.quotecommand(' '.join(cmdline))
-        self.ui.debug(cmdline, '\n')
+        bdevnull = pycompat.bytestr(os.devnull)
+        cmdline += [b'>', bdevnull, b'2>', bdevnull]
+        cmdline = procutil.quotecommand(b' '.join(cmdline))
+        self.ui.debug(cmdline, b'\n')
         return os.system(pycompat.rapply(procutil.tonativestr, cmdline))
 
     def _update(self, rev):
-        self.ui.debug('applying revision %s...\n' % rev)
-        changeset, status = self.runlines('replay', '-d', self.tmppath,
-                                              rev)
+        self.ui.debug(b'applying revision %s...\n' % rev)
+        changeset, status = self.runlines(b'replay', b'-d', self.tmppath, rev)
         if status:
             # Something went wrong while merging (baz or tla
             # issue?), get latest revision and try from there
@@ -216,8 +232,9 @@
             self._obtainrevision(rev)
         else:
             old_rev = self.parents[rev][0]
-            self.ui.debug('computing changeset between %s and %s...\n'
-                          % (old_rev, rev))
+            self.ui.debug(
+                b'computing changeset between %s and %s...\n' % (old_rev, rev)
+            )
             self._parsechangeset(changeset, rev)
 
     def _getfile(self, name, rev):
@@ -225,16 +242,16 @@
         if stat.S_ISLNK(mode):
             data = util.readlink(os.path.join(self.tmppath, name))
             if mode:
-                mode = 'l'
+                mode = b'l'
             else:
-                mode = ''
+                mode = b''
         else:
             data = util.readfile(os.path.join(self.tmppath, name))
-            mode = (mode & 0o111) and 'x' or ''
+            mode = (mode & 0o111) and b'x' or b''
         return data, mode
 
     def _exclude(self, name):
-        exclude = ['{arch}', '.arch-ids', '.arch-inventory']
+        exclude = [b'{arch}', b'.arch-ids', b'.arch-inventory']
         for exc in exclude:
             if name.find(exc) != -1:
                 return True
@@ -268,15 +285,15 @@
         return changes, copies
 
     def _obtainrevision(self, rev):
-        self.ui.debug('obtaining revision %s...\n' % rev)
-        output = self._execute('get', rev, self.tmppath)
+        self.ui.debug(b'obtaining revision %s...\n' % rev)
+        output = self._execute(b'get', rev, self.tmppath)
         self.checkexit(output)
-        self.ui.debug('analyzing revision %s...\n' % rev)
+        self.ui.debug(b'analyzing revision %s...\n' % rev)
         files = self._readcontents(self.tmppath)
         self.changes[rev].add_files += files
 
     def _stripbasepath(self, path):
-        if path.startswith('./'):
+        if path.startswith(b'./'):
             return path[2:]
         return path
 
@@ -286,71 +303,73 @@
 
             # Commit date
             self.changes[rev].date = dateutil.datestr(
-                dateutil.strdate(catlog['Standard-date'],
-                             '%Y-%m-%d %H:%M:%S'))
+                dateutil.strdate(catlog[b'Standard-date'], b'%Y-%m-%d %H:%M:%S')
+            )
 
             # Commit author
-            self.changes[rev].author = self.recode(catlog['Creator'])
+            self.changes[rev].author = self.recode(catlog[b'Creator'])
 
             # Commit description
-            self.changes[rev].summary = '\n\n'.join((catlog['Summary'],
-                                                    catlog.get_payload()))
+            self.changes[rev].summary = b'\n\n'.join(
+                (catlog[b'Summary'], catlog.get_payload())
+            )
             self.changes[rev].summary = self.recode(self.changes[rev].summary)
 
             # Commit revision origin when dealing with a branch or tag
-            if 'Continuation-of' in catlog:
+            if b'Continuation-of' in catlog:
                 self.changes[rev].continuationof = self.recode(
-                    catlog['Continuation-of'])
+                    catlog[b'Continuation-of']
+                )
         except Exception:
-            raise error.Abort(_('could not parse cat-log of %s') % rev)
+            raise error.Abort(_(b'could not parse cat-log of %s') % rev)
 
     def _parsechangeset(self, data, rev):
         for l in data:
             l = l.strip()
             # Added file (ignore added directory)
-            if l.startswith('A') and not l.startswith('A/'):
+            if l.startswith(b'A') and not l.startswith(b'A/'):
                 file = self._stripbasepath(l[1:].strip())
                 if not self._exclude(file):
                     self.changes[rev].add_files.append(file)
             # Deleted file (ignore deleted directory)
-            elif l.startswith('D') and not l.startswith('D/'):
+            elif l.startswith(b'D') and not l.startswith(b'D/'):
                 file = self._stripbasepath(l[1:].strip())
                 if not self._exclude(file):
                     self.changes[rev].del_files.append(file)
             # Modified binary file
-            elif l.startswith('Mb'):
+            elif l.startswith(b'Mb'):
                 file = self._stripbasepath(l[2:].strip())
                 if not self._exclude(file):
                     self.changes[rev].mod_files.append(file)
             # Modified link
-            elif l.startswith('M->'):
+            elif l.startswith(b'M->'):
                 file = self._stripbasepath(l[3:].strip())
                 if not self._exclude(file):
                     self.changes[rev].mod_files.append(file)
             # Modified file
-            elif l.startswith('M'):
+            elif l.startswith(b'M'):
                 file = self._stripbasepath(l[1:].strip())
                 if not self._exclude(file):
                     self.changes[rev].mod_files.append(file)
             # Renamed file (or link)
-            elif l.startswith('=>'):
-                files = l[2:].strip().split(' ')
+            elif l.startswith(b'=>'):
+                files = l[2:].strip().split(b' ')
                 if len(files) == 1:
-                    files = l[2:].strip().split('\t')
+                    files = l[2:].strip().split(b'\t')
                 src = self._stripbasepath(files[0])
                 dst = self._stripbasepath(files[1])
                 if not self._exclude(src) and not self._exclude(dst):
                     self.changes[rev].ren_files[src] = dst
             # Conversion from file to link or from link to file (modified)
-            elif l.startswith('ch'):
+            elif l.startswith(b'ch'):
                 file = self._stripbasepath(l[2:].strip())
                 if not self._exclude(file):
                     self.changes[rev].mod_files.append(file)
             # Renamed directory
-            elif l.startswith('/>'):
-                dirs = l[2:].strip().split(' ')
+            elif l.startswith(b'/>'):
+                dirs = l[2:].strip().split(b' ')
                 if len(dirs) == 1:
-                    dirs = l[2:].strip().split('\t')
+                    dirs = l[2:].strip().split(b'\t')
                 src = self._stripbasepath(dirs[0])
                 dst = self._stripbasepath(dirs[1])
                 if not self._exclude(src) and not self._exclude(dst):
--- a/hgext/convert/hg.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/convert/hg.py	Mon Oct 21 11:09:48 2019 -0400
@@ -23,6 +23,7 @@
 import time
 
 from mercurial.i18n import _
+from mercurial.pycompat import open
 from mercurial import (
     bookmarks,
     context,
@@ -33,78 +34,85 @@
     merge as mergemod,
     node as nodemod,
     phases,
+    pycompat,
     scmutil,
     util,
 )
 from mercurial.utils import dateutil
+
 stringio = util.stringio
 
 from . import common
+
 mapfile = common.mapfile
 NoRepo = common.NoRepo
 
 sha1re = re.compile(br'\b[0-9a-f]{12,40}\b')
 
+
 class mercurial_sink(common.converter_sink):
     def __init__(self, ui, repotype, path):
         common.converter_sink.__init__(self, ui, repotype, path)
-        self.branchnames = ui.configbool('convert', 'hg.usebranchnames')
-        self.clonebranches = ui.configbool('convert', 'hg.clonebranches')
-        self.tagsbranch = ui.config('convert', 'hg.tagsbranch')
+        self.branchnames = ui.configbool(b'convert', b'hg.usebranchnames')
+        self.clonebranches = ui.configbool(b'convert', b'hg.clonebranches')
+        self.tagsbranch = ui.config(b'convert', b'hg.tagsbranch')
         self.lastbranch = None
         if os.path.isdir(path) and len(os.listdir(path)) > 0:
             try:
                 self.repo = hg.repository(self.ui, path)
                 if not self.repo.local():
-                    raise NoRepo(_('%s is not a local Mercurial repository')
-                                 % path)
+                    raise NoRepo(
+                        _(b'%s is not a local Mercurial repository') % path
+                    )
             except error.RepoError as err:
                 ui.traceback()
                 raise NoRepo(err.args[0])
         else:
             try:
-                ui.status(_('initializing destination %s repository\n') % path)
+                ui.status(_(b'initializing destination %s repository\n') % path)
                 self.repo = hg.repository(self.ui, path, create=True)
                 if not self.repo.local():
-                    raise NoRepo(_('%s is not a local Mercurial repository')
-                                 % path)
+                    raise NoRepo(
+                        _(b'%s is not a local Mercurial repository') % path
+                    )
                 self.created.append(path)
             except error.RepoError:
                 ui.traceback()
-                raise NoRepo(_("could not create hg repository %s as sink")
-                             % path)
+                raise NoRepo(
+                    _(b"could not create hg repository %s as sink") % path
+                )
         self.lock = None
         self.wlock = None
         self.filemapmode = False
         self.subrevmaps = {}
 
     def before(self):
-        self.ui.debug('run hg sink pre-conversion action\n')
+        self.ui.debug(b'run hg sink pre-conversion action\n')
         self.wlock = self.repo.wlock()
         self.lock = self.repo.lock()
 
     def after(self):
-        self.ui.debug('run hg sink post-conversion action\n')
+        self.ui.debug(b'run hg sink post-conversion action\n')
         if self.lock:
             self.lock.release()
         if self.wlock:
             self.wlock.release()
 
     def revmapfile(self):
-        return self.repo.vfs.join("shamap")
+        return self.repo.vfs.join(b"shamap")
 
     def authorfile(self):
-        return self.repo.vfs.join("authormap")
+        return self.repo.vfs.join(b"authormap")
 
     def setbranch(self, branch, pbranches):
         if not self.clonebranches:
             return
 
-        setbranch = (branch != self.lastbranch)
+        setbranch = branch != self.lastbranch
         self.lastbranch = branch
         if not branch:
-            branch = 'default'
-        pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
+            branch = b'default'
+        pbranches = [(b[0], b[1] and b[1] or b'default') for b in pbranches]
 
         branchpath = os.path.join(self.path, branch)
         if setbranch:
@@ -126,21 +134,24 @@
 
         if missings:
             self.after()
-            for pbranch, heads in sorted(missings.iteritems()):
+            for pbranch, heads in sorted(pycompat.iteritems(missings)):
                 pbranchpath = os.path.join(self.path, pbranch)
                 prepo = hg.peer(self.ui, {}, pbranchpath)
-                self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
-                exchange.pull(self.repo, prepo,
-                              [prepo.lookup(h) for h in heads])
+                self.ui.note(
+                    _(b'pulling from %s into %s\n') % (pbranch, branch)
+                )
+                exchange.pull(
+                    self.repo, prepo, [prepo.lookup(h) for h in heads]
+                )
             self.before()
 
     def _rewritetags(self, source, revmap, data):
         fp = stringio()
         for line in data.splitlines():
-            s = line.split(' ', 1)
+            s = line.split(b' ', 1)
             if len(s) != 2:
-                self.ui.warn(_('invalid tag entry: "%s"\n') % line)
-                fp.write('%s\n' % line)  # Bogus, but keep for hash stability
+                self.ui.warn(_(b'invalid tag entry: "%s"\n') % line)
+                fp.write(b'%s\n' % line)  # Bogus, but keep for hash stability
                 continue
             revid = revmap.get(source.lookuprev(s[0]))
             if not revid:
@@ -148,16 +159,16 @@
                     revid = s[0]
                 else:
                     # missing, but keep for hash stability
-                    self.ui.warn(_('missing tag entry: "%s"\n') % line)
-                    fp.write('%s\n' % line)
+                    self.ui.warn(_(b'missing tag entry: "%s"\n') % line)
+                    fp.write(b'%s\n' % line)
                     continue
-            fp.write('%s %s\n' % (revid, s[1]))
+            fp.write(b'%s %s\n' % (revid, s[1]))
         return fp.getvalue()
 
     def _rewritesubstate(self, source, data):
         fp = stringio()
         for line in data.splitlines():
-            s = line.split(' ', 1)
+            s = line.split(b' ', 1)
             if len(s) != 2:
                 continue
 
@@ -166,17 +177,18 @@
             if revid != nodemod.nullhex:
                 revmap = self.subrevmaps.get(subpath)
                 if revmap is None:
-                    revmap = mapfile(self.ui,
-                                     self.repo.wjoin(subpath, '.hg/shamap'))
+                    revmap = mapfile(
+                        self.ui, self.repo.wjoin(subpath, b'.hg/shamap')
+                    )
                     self.subrevmaps[subpath] = revmap
 
                     # It is reasonable that one or more of the subrepos don't
                     # need to be converted, in which case they can be cloned
                     # into place instead of converted.  Therefore, only warn
                     # once.
-                    msg = _('no ".hgsubstate" updates will be made for "%s"\n')
+                    msg = _(b'no ".hgsubstate" updates will be made for "%s"\n')
                     if len(revmap) == 0:
-                        sub = self.repo.wvfs.reljoin(subpath, '.hg')
+                        sub = self.repo.wvfs.reljoin(subpath, b'.hg')
 
                         if self.repo.wvfs.exists(sub):
                             self.ui.warn(msg % subpath)
@@ -184,12 +196,14 @@
                 newid = revmap.get(revid)
                 if not newid:
                     if len(revmap) > 0:
-                        self.ui.warn(_("%s is missing from %s/.hg/shamap\n") %
-                                     (revid, subpath))
+                        self.ui.warn(
+                            _(b"%s is missing from %s/.hg/shamap\n")
+                            % (revid, subpath)
+                        )
                 else:
                     revid = newid
 
-            fp.write('%s %s\n' % (revid, subpath))
+            fp.write(b'%s %s\n' % (revid, subpath))
 
         return fp.getvalue()
 
@@ -204,14 +218,17 @@
         anc = [p1ctx.ancestor(p2ctx)]
         # Calculate what files are coming from p2
         actions, diverge, rename = mergemod.calculateupdates(
-            self.repo, p1ctx, p2ctx, anc,
-            True,  # branchmerge
-            True,  # force
-            False, # acceptremote
-            False, # followcopies
+            self.repo,
+            p1ctx,
+            p2ctx,
+            anc,
+            branchmerge=True,
+            force=True,
+            acceptremote=False,
+            followcopies=False,
         )
 
-        for file, (action, info, msg) in actions.iteritems():
+        for file, (action, info, msg) in pycompat.iteritems(actions):
             if source.targetfilebelongstosource(file):
                 # If the file belongs to the source repo, ignore the p2
                 # since it will be covered by the existing fileset.
@@ -219,25 +236,30 @@
 
             # If the file requires actual merging, abort. We don't have enough
             # context to resolve merges correctly.
-            if action in ['m', 'dm', 'cd', 'dc']:
-                raise error.Abort(_("unable to convert merge commit "
-                    "since target parents do not merge cleanly (file "
-                    "%s, parents %s and %s)") % (file, p1ctx,
-                                                 p2ctx))
-            elif action == 'k':
+            if action in [b'm', b'dm', b'cd', b'dc']:
+                raise error.Abort(
+                    _(
+                        b"unable to convert merge commit "
+                        b"since target parents do not merge cleanly (file "
+                        b"%s, parents %s and %s)"
+                    )
+                    % (file, p1ctx, p2ctx)
+                )
+            elif action == b'k':
                 # 'keep' means nothing changed from p1
                 continue
             else:
                 # Any other change means we want to take the p2 version
                 yield file
 
-    def putcommit(self, files, copies, parents, commit, source, revmap, full,
-                  cleanp2):
+    def putcommit(
+        self, files, copies, parents, commit, source, revmap, full, cleanp2
+    ):
         files = dict(files)
 
         def getfilectx(repo, memctx, f):
             if p2ctx and f in p2files and f not in copies:
-                self.ui.debug('reusing %s from p2\n' % f)
+                self.ui.debug(b'reusing %s from p2\n' % f)
                 try:
                     return p2ctx[f]
                 except error.ManifestLookupError:
@@ -251,12 +273,19 @@
             data, mode = source.getfile(f, v)
             if data is None:
                 return None
-            if f == '.hgtags':
+            if f == b'.hgtags':
                 data = self._rewritetags(source, revmap, data)
-            if f == '.hgsubstate':
+            if f == b'.hgsubstate':
                 data = self._rewritesubstate(source, data)
-            return context.memfilectx(self.repo, memctx, f, data, 'l' in mode,
-                                      'x' in mode, copies.get(f))
+            return context.memfilectx(
+                self.repo,
+                memctx,
+                f,
+                data,
+                b'l' in mode,
+                b'x' in mode,
+                copies.get(f),
+            )
 
         pl = []
         for p in parents:
@@ -281,36 +310,40 @@
             oldrev = source.lookuprev(sha1)
             newrev = revmap.get(oldrev)
             if newrev is not None:
-                text = text.replace(sha1, newrev[:len(sha1)])
+                text = text.replace(sha1, newrev[: len(sha1)])
 
         extra = commit.extra.copy()
 
-        sourcename = self.repo.ui.config('convert', 'hg.sourcename')
+        sourcename = self.repo.ui.config(b'convert', b'hg.sourcename')
         if sourcename:
-            extra['convert_source'] = sourcename
+            extra[b'convert_source'] = sourcename
 
-        for label in ('source', 'transplant_source', 'rebase_source',
-                      'intermediate-source'):
+        for label in (
+            b'source',
+            b'transplant_source',
+            b'rebase_source',
+            b'intermediate-source',
+        ):
             node = extra.get(label)
 
             if node is None:
                 continue
 
             # Only transplant stores its reference in binary
-            if label == 'transplant_source':
+            if label == b'transplant_source':
                 node = nodemod.hex(node)
 
             newrev = revmap.get(node)
             if newrev is not None:
-                if label == 'transplant_source':
+                if label == b'transplant_source':
                     newrev = nodemod.bin(newrev)
 
                 extra[label] = newrev
 
         if self.branchnames and commit.branch:
-            extra['branch'] = commit.branch
+            extra[b'branch'] = commit.branch
         if commit.rev and commit.saverev:
-            extra['convert_revision'] = commit.rev
+            extra[b'convert_revision'] = commit.rev
 
         while parents:
             p1 = p2
@@ -330,16 +363,28 @@
                     p2files.add(file)
                     fileset.add(file)
 
-            ctx = context.memctx(self.repo, (p1, p2), text, fileset,
-                                 getfilectx, commit.author, commit.date, extra)
+            ctx = context.memctx(
+                self.repo,
+                (p1, p2),
+                text,
+                fileset,
+                getfilectx,
+                commit.author,
+                commit.date,
+                extra,
+            )
 
             # We won't know if the conversion changes the node until after the
             # commit, so copy the source's phase for now.
-            self.repo.ui.setconfig('phases', 'new-commit',
-                                   phases.phasenames[commit.phase], 'convert')
+            self.repo.ui.setconfig(
+                b'phases',
+                b'new-commit',
+                phases.phasenames[commit.phase],
+                b'convert',
+            )
 
-            with self.repo.transaction("convert") as tr:
-                if self.repo.ui.config('convert', 'hg.preserve-hash'):
+            with self.repo.transaction(b"convert") as tr:
+                if self.repo.ui.config(b'convert', b'hg.preserve-hash'):
                     origctx = commit.ctx
                 else:
                     origctx = None
@@ -351,18 +396,19 @@
                 if commit.rev != node:
                     ctx = self.repo[node]
                     if ctx.phase() < phases.draft:
-                        phases.registernew(self.repo, tr, phases.draft,
-                                           [ctx.node()])
+                        phases.registernew(
+                            self.repo, tr, phases.draft, [ctx.node()]
+                        )
 
-            text = "(octopus merge fixup)\n"
+            text = b"(octopus merge fixup)\n"
             p2 = node
 
         if self.filemapmode and nparents == 1:
             man = self.repo.manifestlog.getstorage(b'')
             mnode = self.repo.changelog.read(nodemod.bin(p2))[0]
-            closed = 'close' in commit.extra
+            closed = b'close' in commit.extra
             if not closed and not man.cmp(m1node, man.revision(mnode)):
-                self.ui.status(_("filtering out empty revision\n"))
+                self.ui.status(_(b"filtering out empty revision\n"))
                 self.repo.rollback(force=True)
                 return parent
         return p2
@@ -372,14 +418,15 @@
         tagparent = tagparent or nodemod.nullid
 
         oldlines = set()
-        for branch, heads in self.repo.branchmap().iteritems():
+        for branch, heads in pycompat.iteritems(self.repo.branchmap()):
             for h in heads:
-                if '.hgtags' in self.repo[h]:
+                if b'.hgtags' in self.repo[h]:
                     oldlines.update(
-                        set(self.repo[h]['.hgtags'].data().splitlines(True)))
+                        set(self.repo[h][b'.hgtags'].data().splitlines(True))
+                    )
         oldlines = sorted(list(oldlines))
 
-        newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
+        newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags])
         if newlines == oldlines:
             return None, None
 
@@ -387,12 +434,12 @@
         oldtags = set()
         newtags = set()
         for line in oldlines:
-            s = line.strip().split(' ', 1)
+            s = line.strip().split(b' ', 1)
             if len(s) != 2:
                 continue
             oldtags.add(s[1])
         for line in newlines:
-            s = line.strip().split(' ', 1)
+            s = line.strip().split(b' ', 1)
             if len(s) != 2:
                 continue
             if s[1] not in oldtags:
@@ -401,16 +448,24 @@
         if not newtags:
             return None, None
 
-        data = "".join(newlines)
+        data = b"".join(newlines)
+
         def getfilectx(repo, memctx, f):
             return context.memfilectx(repo, memctx, f, data, False, False, None)
 
-        self.ui.status(_("updating tags\n"))
-        date = "%d 0" % int(time.mktime(time.gmtime()))
-        extra = {'branch': self.tagsbranch}
-        ctx = context.memctx(self.repo, (tagparent, None), "update tags",
-                             [".hgtags"], getfilectx, "convert-repo", date,
-                             extra)
+        self.ui.status(_(b"updating tags\n"))
+        date = b"%d 0" % int(time.mktime(time.gmtime()))
+        extra = {b'branch': self.tagsbranch}
+        ctx = context.memctx(
+            self.repo,
+            (tagparent, None),
+            b"update tags",
+            [b".hgtags"],
+            getfilectx,
+            b"convert-repo",
+            date,
+            extra,
+        )
         node = self.repo.commitctx(ctx)
         return nodemod.hex(node), nodemod.hex(tagparent)
 
@@ -424,11 +479,13 @@
         try:
             wlock = self.repo.wlock()
             lock = self.repo.lock()
-            tr = self.repo.transaction('bookmark')
-            self.ui.status(_("updating bookmarks\n"))
+            tr = self.repo.transaction(b'bookmark')
+            self.ui.status(_(b"updating bookmarks\n"))
             destmarks = self.repo._bookmarks
-            changes = [(bookmark, nodemod.bin(updatedbookmark[bookmark]))
-                       for bookmark in updatedbookmark]
+            changes = [
+                (bookmark, nodemod.bin(updatedbookmark[bookmark]))
+                for bookmark in updatedbookmark
+            ]
             destmarks.applychanges(self.repo, tr, changes)
             tr.close()
         finally:
@@ -440,17 +497,23 @@
 
     def hascommitforsplicemap(self, rev):
         if rev not in self.repo and self.clonebranches:
-            raise error.Abort(_('revision %s not found in destination '
-                               'repository (lookups with clonebranches=true '
-                               'are not implemented)') % rev)
+            raise error.Abort(
+                _(
+                    b'revision %s not found in destination '
+                    b'repository (lookups with clonebranches=true '
+                    b'are not implemented)'
+                )
+                % rev
+            )
         return rev in self.repo
 
+
 class mercurial_source(common.converter_source):
     def __init__(self, ui, repotype, path, revs=None):
         common.converter_source.__init__(self, ui, repotype, path, revs)
-        self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors')
+        self.ignoreerrors = ui.configbool(b'convert', b'hg.ignoreerrors')
         self.ignored = set()
-        self.saverev = ui.configbool('convert', 'hg.saverev')
+        self.saverev = ui.configbool(b'convert', b'hg.saverev')
         try:
             self.repo = hg.repository(self.ui, path)
             # try to provoke an exception if this isn't really a hg
@@ -459,21 +522,22 @@
                 raise error.RepoError
         except error.RepoError:
             ui.traceback()
-            raise NoRepo(_("%s is not a local Mercurial repository") % path)
+            raise NoRepo(_(b"%s is not a local Mercurial repository") % path)
         self.lastrev = None
         self.lastctx = None
         self._changescache = None, None
         self.convertfp = None
         # Restrict converted revisions to startrev descendants
-        startnode = ui.config('convert', 'hg.startrev')
-        hgrevs = ui.config('convert', 'hg.revs')
+        startnode = ui.config(b'convert', b'hg.startrev')
+        hgrevs = ui.config(b'convert', b'hg.revs')
         if hgrevs is None:
             if startnode is not None:
                 try:
                     startnode = self.repo.lookup(startnode)
                 except error.RepoError:
-                    raise error.Abort(_('%s is not a valid start revision')
-                                     % startnode)
+                    raise error.Abort(
+                        _(b'%s is not a valid start revision') % startnode
+                    )
                 startrev = self.repo.changelog.rev(startnode)
                 children = {startnode: 1}
                 for r in self.repo.changelog.descendants([startrev]):
@@ -487,8 +551,12 @@
                 self._heads = self.repo.heads()
         else:
             if revs or startnode is not None:
-                raise error.Abort(_('hg.revs cannot be combined with '
-                                   'hg.startrev or --rev'))
+                raise error.Abort(
+                    _(
+                        b'hg.revs cannot be combined with '
+                        b'hg.startrev or --rev'
+                    )
+                )
             nodes = set()
             parents = set()
             for r in scmutil.revrange(self.repo, [hgrevs]):
@@ -522,7 +590,7 @@
         maappend = ma.append
         rappend = r.append
         d = ctx1.manifest().diff(ctx2.manifest())
-        for f, ((node1, flag1), (node2, flag2)) in d.iteritems():
+        for f, ((node1, flag1), (node2, flag2)) in pycompat.iteritems(d):
             if node2 is None:
                 rappend(f)
             else:
@@ -548,7 +616,7 @@
         cleanp2 = set()
         if len(parents) == 2:
             d = parents[1].manifest().diff(ctx.manifest(), clean=True)
-            for f, value in d.iteritems():
+            for f, value in pycompat.iteritems(d):
                 if value is None:
                     cleanp2.add(f)
         changes = [(f, rev) for f in files if f not in self.ignored]
@@ -574,7 +642,7 @@
                 if not self.ignoreerrors:
                     raise
                 self.ignored.add(name)
-                self.ui.warn(_('ignoring: %s\n') % e)
+                self.ui.warn(_(b'ignoring: %s\n') % e)
         return copies
 
     def getcommit(self, rev):
@@ -584,29 +652,38 @@
         optparents = [p.hex() for p in ctx.parents() if p and p not in _parents]
         crev = rev
 
-        return common.commit(author=ctx.user(),
-                             date=dateutil.datestr(ctx.date(),
-                                               '%Y-%m-%d %H:%M:%S %1%2'),
-                             desc=ctx.description(),
-                             rev=crev,
-                             parents=parents,
-                             optparents=optparents,
-                             branch=ctx.branch(),
-                             extra=ctx.extra(),
-                             sortkey=ctx.rev(),
-                             saverev=self.saverev,
-                             phase=ctx.phase(),
-                             ctx=ctx)
+        return common.commit(
+            author=ctx.user(),
+            date=dateutil.datestr(ctx.date(), b'%Y-%m-%d %H:%M:%S %1%2'),
+            desc=ctx.description(),
+            rev=crev,
+            parents=parents,
+            optparents=optparents,
+            branch=ctx.branch(),
+            extra=ctx.extra(),
+            sortkey=ctx.rev(),
+            saverev=self.saverev,
+            phase=ctx.phase(),
+            ctx=ctx,
+        )
 
     def numcommits(self):
         return len(self.repo)
 
     def gettags(self):
         # This will get written to .hgtags, filter non global tags out.
-        tags = [t for t in self.repo.tagslist()
-                if self.repo.tagtype(t[0]) == 'global']
-        return dict([(name, nodemod.hex(node)) for name, node in tags
-                     if self.keep(node)])
+        tags = [
+            t
+            for t in self.repo.tagslist()
+            if self.repo.tagtype(t[0]) == b'global'
+        ]
+        return dict(
+            [
+                (name, nodemod.hex(node))
+                for name, node in tags
+                if self.keep(node)
+            ]
+        )
 
     def getchangedfiles(self, rev, i):
         ctx = self._changectx(rev)
@@ -626,15 +703,15 @@
 
     def converted(self, rev, destrev):
         if self.convertfp is None:
-            self.convertfp = open(self.repo.vfs.join('shamap'), 'ab')
-        self.convertfp.write(util.tonativeeol('%s %s\n' % (destrev, rev)))
+            self.convertfp = open(self.repo.vfs.join(b'shamap'), b'ab')
+        self.convertfp.write(util.tonativeeol(b'%s %s\n' % (destrev, rev)))
         self.convertfp.flush()
 
     def before(self):
-        self.ui.debug('run hg source pre-conversion action\n')
+        self.ui.debug(b'run hg source pre-conversion action\n')
 
     def after(self):
-        self.ui.debug('run hg source post-conversion action\n')
+        self.ui.debug(b'run hg source post-conversion action\n')
 
     def hasnativeorder(self):
         return True
@@ -651,6 +728,6 @@
     def getbookmarks(self):
         return bookmarks.listbookmarks(self.repo)
 
-    def checkrevformat(self, revstr, mapname='splicemap'):
+    def checkrevformat(self, revstr, mapname=b'splicemap'):
         """ Mercurial, revision string is a 40 byte hex """
         self.checkhexformat(revstr, mapname)
--- a/hgext/convert/monotone.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/convert/monotone.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,6 +11,7 @@
 import re
 
 from mercurial.i18n import _
+from mercurial.pycompat import open
 from mercurial import (
     error,
     pycompat,
@@ -19,64 +20,74 @@
 
 from . import common
 
+
 class monotone_source(common.converter_source, common.commandline):
     def __init__(self, ui, repotype, path=None, revs=None):
         common.converter_source.__init__(self, ui, repotype, path, revs)
         if revs and len(revs) > 1:
-            raise error.Abort(_('monotone source does not support specifying '
-                               'multiple revs'))
-        common.commandline.__init__(self, ui, 'mtn')
+            raise error.Abort(
+                _(
+                    b'monotone source does not support specifying '
+                    b'multiple revs'
+                )
+            )
+        common.commandline.__init__(self, ui, b'mtn')
 
         self.ui = ui
         self.path = path
         self.automatestdio = False
         self.revs = revs
 
-        norepo = common.NoRepo(_("%s does not look like a monotone repository")
-                             % path)
-        if not os.path.exists(os.path.join(path, '_MTN')):
+        norepo = common.NoRepo(
+            _(b"%s does not look like a monotone repository") % path
+        )
+        if not os.path.exists(os.path.join(path, b'_MTN')):
             # Could be a monotone repository (SQLite db file)
             try:
-                f = open(path, 'rb')
+                f = open(path, b'rb')
                 header = f.read(16)
                 f.close()
             except IOError:
-                header = ''
-            if header != 'SQLite format 3\x00':
+                header = b''
+            if header != b'SQLite format 3\x00':
                 raise norepo
 
         # regular expressions for parsing monotone output
-        space    = br'\s*'
-        name     = br'\s+"((?:\\"|[^"])*)"\s*'
-        value    = name
+        space = br'\s*'
+        name = br'\s+"((?:\\"|[^"])*)"\s*'
+        value = name
         revision = br'\s+\[(\w+)\]\s*'
-        lines    = br'(?:.|\n)+'
+        lines = br'(?:.|\n)+'
 
-        self.dir_re      = re.compile(space + "dir" + name)
-        self.file_re     = re.compile(space + "file" + name +
-                                      "content" + revision)
-        self.add_file_re = re.compile(space + "add_file" + name +
-                                      "content" + revision)
-        self.patch_re    = re.compile(space + "patch" + name +
-                                      "from" + revision + "to" + revision)
-        self.rename_re   = re.compile(space + "rename" + name + "to" + name)
-        self.delete_re   = re.compile(space + "delete" + name)
-        self.tag_re      = re.compile(space + "tag" + name + "revision" +
-                                      revision)
-        self.cert_re     = re.compile(lines + space + "name" + name +
-                                      "value" + value)
+        self.dir_re = re.compile(space + b"dir" + name)
+        self.file_re = re.compile(
+            space + b"file" + name + b"content" + revision
+        )
+        self.add_file_re = re.compile(
+            space + b"add_file" + name + b"content" + revision
+        )
+        self.patch_re = re.compile(
+            space + b"patch" + name + b"from" + revision + b"to" + revision
+        )
+        self.rename_re = re.compile(space + b"rename" + name + b"to" + name)
+        self.delete_re = re.compile(space + b"delete" + name)
+        self.tag_re = re.compile(space + b"tag" + name + b"revision" + revision)
+        self.cert_re = re.compile(
+            lines + space + b"name" + name + b"value" + value
+        )
 
-        attr = space + "file" + lines + space + "attr" + space
-        self.attr_execute_re = re.compile(attr  + '"mtn:execute"' +
-                                          space + '"true"')
+        attr = space + b"file" + lines + space + b"attr" + space
+        self.attr_execute_re = re.compile(
+            attr + b'"mtn:execute"' + space + b'"true"'
+        )
 
         # cached data
         self.manifest_rev = None
         self.manifest = None
         self.files = None
-        self.dirs  = None
+        self.dirs = None
 
-        common.checktool('mtn', abort=False)
+        common.checktool(b'mtn', abort=False)
 
     def mtnrun(self, *args, **kwargs):
         if self.automatestdio:
@@ -86,27 +97,27 @@
 
     def mtnrunsingle(self, *args, **kwargs):
         kwargs[r'd'] = self.path
-        return self.run0('automate', *args, **kwargs)
+        return self.run0(b'automate', *args, **kwargs)
 
     def mtnrunstdio(self, *args, **kwargs):
         # Prepare the command in automate stdio format
         kwargs = pycompat.byteskwargs(kwargs)
         command = []
-        for k, v in kwargs.iteritems():
-            command.append("%d:%s" % (len(k), k))
+        for k, v in pycompat.iteritems(kwargs):
+            command.append(b"%d:%s" % (len(k), k))
             if v:
-                command.append("%d:%s" % (len(v), v))
+                command.append(b"%d:%s" % (len(v), v))
         if command:
-            command.insert(0, 'o')
-            command.append('e')
+            command.insert(0, b'o')
+            command.append(b'e')
 
-        command.append('l')
+        command.append(b'l')
         for arg in args:
-            command.append("%d:%s" % (len(arg), arg))
-        command.append('e')
-        command = ''.join(command)
+            command.append(b"%d:%s" % (len(arg), arg))
+        command.append(b'e')
+        command = b''.join(command)
 
-        self.ui.debug("mtn: sending '%s'\n" % command)
+        self.ui.debug(b"mtn: sending '%s'\n" % command)
         self.mtnwritefp.write(command)
         self.mtnwritefp.flush()
 
@@ -114,39 +125,47 @@
 
     def mtnstdioreadpacket(self):
         read = None
-        commandnbr = ''
-        while read != ':':
+        commandnbr = b''
+        while read != b':':
             read = self.mtnreadfp.read(1)
             if not read:
-                raise error.Abort(_('bad mtn packet - no end of commandnbr'))
+                raise error.Abort(_(b'bad mtn packet - no end of commandnbr'))
             commandnbr += read
         commandnbr = commandnbr[:-1]
 
         stream = self.mtnreadfp.read(1)
-        if stream not in 'mewptl':
-            raise error.Abort(_('bad mtn packet - bad stream type %s') % stream)
+        if stream not in b'mewptl':
+            raise error.Abort(
+                _(b'bad mtn packet - bad stream type %s') % stream
+            )
 
         read = self.mtnreadfp.read(1)
-        if read != ':':
-            raise error.Abort(_('bad mtn packet - no divider before size'))
+        if read != b':':
+            raise error.Abort(_(b'bad mtn packet - no divider before size'))
 
         read = None
-        lengthstr = ''
-        while read != ':':
+        lengthstr = b''
+        while read != b':':
             read = self.mtnreadfp.read(1)
             if not read:
-                raise error.Abort(_('bad mtn packet - no end of packet size'))
+                raise error.Abort(_(b'bad mtn packet - no end of packet size'))
             lengthstr += read
         try:
             length = pycompat.long(lengthstr[:-1])
         except TypeError:
-            raise error.Abort(_('bad mtn packet - bad packet size %s')
-                % lengthstr)
+            raise error.Abort(
+                _(b'bad mtn packet - bad packet size %s') % lengthstr
+            )
 
         read = self.mtnreadfp.read(length)
         if len(read) != length:
-            raise error.Abort(_("bad mtn packet - unable to read full packet "
-                "read %s of %s") % (len(read), length))
+            raise error.Abort(
+                _(
+                    b"bad mtn packet - unable to read full packet "
+                    b"read %s of %s"
+                )
+                % (len(read), length)
+            )
 
         return (commandnbr, stream, length, read)
 
@@ -154,32 +173,34 @@
         retval = []
         while True:
             commandnbr, stream, length, output = self.mtnstdioreadpacket()
-            self.ui.debug('mtn: read packet %s:%s:%d\n' %
-                (commandnbr, stream, length))
+            self.ui.debug(
+                b'mtn: read packet %s:%s:%d\n' % (commandnbr, stream, length)
+            )
 
-            if stream == 'l':
+            if stream == b'l':
                 # End of command
-                if output != '0':
-                    raise error.Abort(_("mtn command '%s' returned %s") %
-                        (command, output))
+                if output != b'0':
+                    raise error.Abort(
+                        _(b"mtn command '%s' returned %s") % (command, output)
+                    )
                 break
-            elif stream in 'ew':
+            elif stream in b'ew':
                 # Error, warning output
-                self.ui.warn(_('%s error:\n') % self.command)
+                self.ui.warn(_(b'%s error:\n') % self.command)
                 self.ui.warn(output)
-            elif stream == 'p':
+            elif stream == b'p':
                 # Progress messages
-                self.ui.debug('mtn: ' + output)
-            elif stream == 'm':
+                self.ui.debug(b'mtn: ' + output)
+            elif stream == b'm':
                 # Main stream - command output
                 retval.append(output)
 
-        return ''.join(retval)
+        return b''.join(retval)
 
     def mtnloadmanifest(self, rev):
         if self.manifest_rev == rev:
             return
-        self.manifest = self.mtnrun("get_manifest_of", rev).split("\n\n")
+        self.manifest = self.mtnrun(b"get_manifest_of", rev).split(b"\n\n")
         self.manifest_rev = rev
         self.files = {}
         self.dirs = {}
@@ -187,11 +208,11 @@
         for e in self.manifest:
             m = self.file_re.match(e)
             if m:
-                attr = ""
+                attr = b""
                 name = m.group(1)
                 node = m.group(2)
                 if self.attr_execute_re.match(e):
-                    attr += "x"
+                    attr += b"x"
                 self.files[name] = (node, attr)
             m = self.dir_re.match(e)
             if m:
@@ -207,9 +228,13 @@
         return name in self.dirs
 
     def mtngetcerts(self, rev):
-        certs = {"author":"<missing>", "date":"<missing>",
-            "changelog":"<missing>", "branch":"<missing>"}
-        certlist = self.mtnrun("certs", rev)
+        certs = {
+            b"author": b"<missing>",
+            b"date": b"<missing>",
+            b"changelog": b"<missing>",
+            b"branch": b"<missing>",
+        }
+        certlist = self.mtnrun(b"certs", rev)
         # mtn < 0.45:
         #   key "test@selenic.com"
         # mtn >= 0.45:
@@ -219,27 +244,28 @@
             m = self.cert_re.match(e)
             if m:
                 name, value = m.groups()
-                value = value.replace(br'\"', '"')
-                value = value.replace(br'\\', '\\')
+                value = value.replace(br'\"', b'"')
+                value = value.replace(br'\\', b'\\')
                 certs[name] = value
         # Monotone may have subsecond dates: 2005-02-05T09:39:12.364306
         # and all times are stored in UTC
-        certs["date"] = certs["date"].split('.')[0] + " UTC"
+        certs[b"date"] = certs[b"date"].split(b'.')[0] + b" UTC"
         return certs
 
     # implement the converter_source interface:
 
     def getheads(self):
         if not self.revs:
-            return self.mtnrun("leaves").splitlines()
+            return self.mtnrun(b"leaves").splitlines()
         else:
             return self.revs
 
     def getchanges(self, rev, full):
         if full:
-            raise error.Abort(_("convert from monotone does not support "
-                              "--full"))
-        revision = self.mtnrun("get_revision", rev).split("\n\n")
+            raise error.Abort(
+                _(b"convert from monotone does not support --full")
+            )
+        revision = self.mtnrun(b"get_revision", rev).split(b"\n\n")
         files = {}
         ignoremove = {}
         renameddirs = []
@@ -277,16 +303,18 @@
             for tofile in self.files:
                 if tofile in ignoremove:
                     continue
-                if tofile.startswith(todir + '/'):
-                    renamed[tofile] = fromdir + tofile[len(todir):]
+                if tofile.startswith(todir + b'/'):
+                    renamed[tofile] = fromdir + tofile[len(todir) :]
                     # Avoid chained moves like:
                     # d1(/a) => d3/d1(/a)
                     # d2 => d3
                     ignoremove[tofile] = 1
             for tofile, fromfile in renamed.items():
                 self.ui.debug(
-                    "copying file in renamed directory from '%s' to '%s'"
-                    % (fromfile, tofile), '\n')
+                    b"copying file in renamed directory from '%s' to '%s'"
+                    % (fromfile, tofile),
+                    b'\n',
+                )
                 files[tofile] = rev
                 copies[tofile] = fromfile
             for fromfile in renamed.values():
@@ -298,31 +326,32 @@
         if not self.mtnisfile(name, rev):
             return None, None
         try:
-            data = self.mtnrun("get_file_of", name, r=rev)
+            data = self.mtnrun(b"get_file_of", name, r=rev)
         except Exception:
             return None, None
         self.mtnloadmanifest(rev)
-        node, attr = self.files.get(name, (None, ""))
+        node, attr = self.files.get(name, (None, b""))
         return data, attr
 
     def getcommit(self, rev):
         extra = {}
         certs = self.mtngetcerts(rev)
-        if certs.get('suspend') == certs["branch"]:
-            extra['close'] = 1
-        dateformat = "%Y-%m-%dT%H:%M:%S"
+        if certs.get(b'suspend') == certs[b"branch"]:
+            extra[b'close'] = 1
+        dateformat = b"%Y-%m-%dT%H:%M:%S"
         return common.commit(
-            author=certs["author"],
-            date=dateutil.datestr(dateutil.strdate(certs["date"], dateformat)),
-            desc=certs["changelog"],
+            author=certs[b"author"],
+            date=dateutil.datestr(dateutil.strdate(certs[b"date"], dateformat)),
+            desc=certs[b"changelog"],
             rev=rev,
-            parents=self.mtnrun("parents", rev).splitlines(),
-            branch=certs["branch"],
-            extra=extra)
+            parents=self.mtnrun(b"parents", rev).splitlines(),
+            branch=certs[b"branch"],
+            extra=extra,
+        )
 
     def gettags(self):
         tags = {}
-        for e in self.mtnrun("tags").split("\n\n"):
+        for e in self.mtnrun(b"tags").split(b"\n\n"):
             m = self.tag_re.match(e)
             if m:
                 tags[m.group(1)] = m.group(2)
@@ -336,33 +365,43 @@
     def before(self):
         # Check if we have a new enough version to use automate stdio
         try:
-            versionstr = self.mtnrunsingle("interface_version")
+            versionstr = self.mtnrunsingle(b"interface_version")
             version = float(versionstr)
         except Exception:
-            raise error.Abort(_("unable to determine mtn automate interface "
-                "version"))
+            raise error.Abort(
+                _(b"unable to determine mtn automate interface version")
+            )
 
         if version >= 12.0:
             self.automatestdio = True
-            self.ui.debug("mtn automate version %f - using automate stdio\n" %
-                version)
+            self.ui.debug(
+                b"mtn automate version %f - using automate stdio\n" % version
+            )
 
             # launch the long-running automate stdio process
-            self.mtnwritefp, self.mtnreadfp = self._run2('automate', 'stdio',
-                '-d', self.path)
+            self.mtnwritefp, self.mtnreadfp = self._run2(
+                b'automate', b'stdio', b'-d', self.path
+            )
             # read the headers
             read = self.mtnreadfp.readline()
-            if read != 'format-version: 2\n':
-                raise error.Abort(_('mtn automate stdio header unexpected: %s')
-                    % read)
-            while read != '\n':
+            if read != b'format-version: 2\n':
+                raise error.Abort(
+                    _(b'mtn automate stdio header unexpected: %s') % read
+                )
+            while read != b'\n':
                 read = self.mtnreadfp.readline()
                 if not read:
-                    raise error.Abort(_("failed to reach end of mtn automate "
-                        "stdio headers"))
+                    raise error.Abort(
+                        _(
+                            b"failed to reach end of mtn automate "
+                            b"stdio headers"
+                        )
+                    )
         else:
-            self.ui.debug("mtn automate version %s - not using automate stdio "
-                "(automate >= 12.0 - mtn >= 0.46 is needed)\n" % version)
+            self.ui.debug(
+                b"mtn automate version %s - not using automate stdio "
+                b"(automate >= 12.0 - mtn >= 0.46 is needed)\n" % version
+            )
 
     def after(self):
         if self.automatestdio:
--- a/hgext/convert/p4.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/convert/p4.py	Mon Oct 21 11:09:48 2019 -0400
@@ -22,8 +22,9 @@
 
 from . import common
 
+
 def loaditer(f):
-    "Yield the dictionary objects generated by p4"
+    b"Yield the dictionary objects generated by p4"
     try:
         while True:
             d = marshal.load(f)
@@ -33,6 +34,7 @@
     except EOFError:
         pass
 
+
 def decodefilename(filename):
     """Perforce escapes special characters @, #, *, or %
     with %40, %23, %2A, or %25 respectively
@@ -42,11 +44,17 @@
     >>> decodefilename(b'//Depot/Directory/%2525/%2523/%23%40.%2A')
     '//Depot/Directory/%25/%23/#@.*'
     """
-    replacements = [('%2A', '*'), ('%23', '#'), ('%40', '@'), ('%25', '%')]
+    replacements = [
+        (b'%2A', b'*'),
+        (b'%23', b'#'),
+        (b'%40', b'@'),
+        (b'%25', b'%'),
+    ]
     for k, v in replacements:
         filename = filename.replace(k, v)
     return filename
 
+
 class p4_source(common.converter_source):
     def __init__(self, ui, repotype, path, revs=None):
         # avoid import cycle
@@ -54,26 +62,34 @@
 
         super(p4_source, self).__init__(ui, repotype, path, revs=revs)
 
-        if "/" in path and not path.startswith('//'):
-            raise common.NoRepo(_('%s does not look like a P4 repository') %
-                                path)
+        if b"/" in path and not path.startswith(b'//'):
+            raise common.NoRepo(
+                _(b'%s does not look like a P4 repository') % path
+            )
 
-        common.checktool('p4', abort=False)
+        common.checktool(b'p4', abort=False)
 
         self.revmap = {}
-        self.encoding = self.ui.config('convert', 'p4.encoding',
-                                       convcmd.orig_encoding)
+        self.encoding = self.ui.config(
+            b'convert', b'p4.encoding', convcmd.orig_encoding
+        )
         self.re_type = re.compile(
             br"([a-z]+)?(text|binary|symlink|apple|resource|unicode|utf\d+)"
-            br"(\+\w+)?$")
+            br"(\+\w+)?$"
+        )
         self.re_keywords = re.compile(
             br"\$(Id|Header|Date|DateTime|Change|File|Revision|Author)"
-            br":[^$\n]*\$")
+            br":[^$\n]*\$"
+        )
         self.re_keywords_old = re.compile(br"\$(Id|Header):[^$\n]*\$")
 
         if revs and len(revs) > 1:
-            raise error.Abort(_("p4 source does not support specifying "
-                               "multiple revisions"))
+            raise error.Abort(
+                _(
+                    b"p4 source does not support specifying "
+                    b"multiple revisions"
+                )
+            )
 
     def setrevmap(self, revmap):
         """Sets the parsed revmap dictionary.
@@ -89,18 +105,18 @@
         self.revmap = revmap
 
     def _parse_view(self, path):
-        "Read changes affecting the path"
-        cmd = 'p4 -G changes -s submitted %s' % procutil.shellquote(path)
-        stdout = procutil.popen(cmd, mode='rb')
+        b"Read changes affecting the path"
+        cmd = b'p4 -G changes -s submitted %s' % procutil.shellquote(path)
+        stdout = procutil.popen(cmd, mode=b'rb')
         p4changes = {}
         for d in loaditer(stdout):
-            c = d.get("change", None)
+            c = d.get(b"change", None)
             if c:
                 p4changes[c] = True
         return p4changes
 
     def _parse(self, ui, path):
-        "Prepare list of P4 filenames and revisions to import"
+        b"Prepare list of P4 filenames and revisions to import"
         p4changes = {}
         changeset = {}
         files_map = {}
@@ -109,29 +125,29 @@
         depotname = {}
         heads = []
 
-        ui.status(_('reading p4 views\n'))
+        ui.status(_(b'reading p4 views\n'))
 
         # read client spec or view
-        if "/" in path:
+        if b"/" in path:
             p4changes.update(self._parse_view(path))
-            if path.startswith("//") and path.endswith("/..."):
-                views = {path[:-3]:""}
+            if path.startswith(b"//") and path.endswith(b"/..."):
+                views = {path[:-3]: b""}
             else:
-                views = {"//": ""}
+                views = {b"//": b""}
         else:
-            cmd = 'p4 -G client -o %s' % procutil.shellquote(path)
-            clientspec = marshal.load(procutil.popen(cmd, mode='rb'))
+            cmd = b'p4 -G client -o %s' % procutil.shellquote(path)
+            clientspec = marshal.load(procutil.popen(cmd, mode=b'rb'))
 
             views = {}
             for client in clientspec:
-                if client.startswith("View"):
+                if client.startswith(b"View"):
                     sview, cview = clientspec[client].split()
                     p4changes.update(self._parse_view(sview))
-                    if sview.endswith("...") and cview.endswith("..."):
+                    if sview.endswith(b"...") and cview.endswith(b"..."):
                         sview = sview[:-3]
                         cview = cview[:-3]
                     cview = cview[2:]
-                    cview = cview[cview.find("/") + 1:]
+                    cview = cview[cview.find(b"/") + 1 :]
                     views[sview] = cview
 
         # list of changes that affect our source files
@@ -143,10 +159,10 @@
         vieworder.sort(key=len, reverse=True)
 
         # handle revision limiting
-        startrev = self.ui.config('convert', 'p4.startrev')
+        startrev = self.ui.config(b'convert', b'p4.startrev')
 
         # now read the full changelists to get the list of file revisions
-        ui.status(_('collecting p4 changelists\n'))
+        ui.status(_(b'collecting p4 changelists\n'))
         lastid = None
         for change in p4changes:
             if startrev and int(change) < int(startrev):
@@ -168,28 +184,28 @@
 
             descarr = c.desc.splitlines(True)
             if len(descarr) > 0:
-                shortdesc = descarr[0].rstrip('\r\n')
+                shortdesc = descarr[0].rstrip(b'\r\n')
             else:
-                shortdesc = '**empty changelist description**'
+                shortdesc = b'**empty changelist description**'
 
-            t = '%s %s' % (c.rev, repr(shortdesc)[1:-1])
-            ui.status(stringutil.ellipsis(t, 80) + '\n')
+            t = b'%s %s' % (c.rev, repr(shortdesc)[1:-1])
+            ui.status(stringutil.ellipsis(t, 80) + b'\n')
 
             files = []
             copies = {}
             copiedfiles = []
             i = 0
-            while ("depotFile%d" % i) in d and ("rev%d" % i) in d:
-                oldname = d["depotFile%d" % i]
+            while (b"depotFile%d" % i) in d and (b"rev%d" % i) in d:
+                oldname = d[b"depotFile%d" % i]
                 filename = None
                 for v in vieworder:
                     if oldname.lower().startswith(v.lower()):
-                        filename = decodefilename(views[v] + oldname[len(v):])
+                        filename = decodefilename(views[v] + oldname[len(v) :])
                         break
                 if filename:
-                    files.append((filename, d["rev%d" % i]))
+                    files.append((filename, d[b"rev%d" % i]))
                     depotname[filename] = oldname
-                    if (d.get("action%d" % i) == "move/add"):
+                    if d.get(b"action%d" % i) == b"move/add":
                         copiedfiles.append(filename)
                     localname[oldname] = filename
                 i += 1
@@ -198,22 +214,23 @@
             for filename in copiedfiles:
                 oldname = depotname[filename]
 
-                flcmd = ('p4 -G filelog %s'
-                         % procutil.shellquote(oldname))
-                flstdout = procutil.popen(flcmd, mode='rb')
+                flcmd = b'p4 -G filelog %s' % procutil.shellquote(oldname)
+                flstdout = procutil.popen(flcmd, mode=b'rb')
 
                 copiedfilename = None
                 for d in loaditer(flstdout):
                     copiedoldname = None
 
                     i = 0
-                    while ("change%d" % i) in d:
-                        if (d["change%d" % i] == change and
-                            d["action%d" % i] == "move/add"):
+                    while (b"change%d" % i) in d:
+                        if (
+                            d[b"change%d" % i] == change
+                            and d[b"action%d" % i] == b"move/add"
+                        ):
                             j = 0
-                            while ("file%d,%d" % (i, j)) in d:
-                                if d["how%d,%d" % (i, j)] == "moved from":
-                                    copiedoldname = d["file%d,%d" % (i, j)]
+                            while (b"file%d,%d" % (i, j)) in d:
+                                if d[b"how%d,%d" % (i, j)] == b"moved from":
+                                    copiedoldname = d[b"file%d,%d" % (i, j)]
                                     break
                                 j += 1
                         i += 1
@@ -225,8 +242,10 @@
                 if copiedfilename:
                     copies[filename] = copiedfilename
                 else:
-                    ui.warn(_("cannot find source for copied file: %s@%s\n")
-                            % (filename, change))
+                    ui.warn(
+                        _(b"cannot find source for copied file: %s@%s\n")
+                        % (filename, change)
+                    )
 
             changeset[change] = c
             files_map[change] = files
@@ -237,11 +256,11 @@
             heads = [lastid]
 
         return {
-            'changeset': changeset,
-            'files': files_map,
-            'copies': copies_map,
-            'heads': heads,
-            'depotname': depotname,
+            b'changeset': changeset,
+            b'files': files_map,
+            b'copies': copies_map,
+            b'heads': heads,
+            b'depotname': depotname,
         }
 
     @util.propertycache
@@ -250,72 +269,74 @@
 
     @util.propertycache
     def copies(self):
-        return self._parse_once['copies']
+        return self._parse_once[b'copies']
 
     @util.propertycache
     def files(self):
-        return self._parse_once['files']
+        return self._parse_once[b'files']
 
     @util.propertycache
     def changeset(self):
-        return self._parse_once['changeset']
+        return self._parse_once[b'changeset']
 
     @util.propertycache
     def heads(self):
-        return self._parse_once['heads']
+        return self._parse_once[b'heads']
 
     @util.propertycache
     def depotname(self):
-        return self._parse_once['depotname']
+        return self._parse_once[b'depotname']
 
     def getheads(self):
         return self.heads
 
     def getfile(self, name, rev):
-        cmd = ('p4 -G print %s'
-               % procutil.shellquote("%s#%s" % (self.depotname[name], rev)))
+        cmd = b'p4 -G print %s' % procutil.shellquote(
+            b"%s#%s" % (self.depotname[name], rev)
+        )
 
         lasterror = None
         while True:
-            stdout = procutil.popen(cmd, mode='rb')
+            stdout = procutil.popen(cmd, mode=b'rb')
 
             mode = None
             contents = []
             keywords = None
 
             for d in loaditer(stdout):
-                code = d["code"]
-                data = d.get("data")
+                code = d[b"code"]
+                data = d.get(b"data")
 
-                if code == "error":
+                if code == b"error":
                     # if this is the first time error happened
                     # re-attempt getting the file
                     if not lasterror:
-                        lasterror = IOError(d["generic"], data)
+                        lasterror = IOError(d[b"generic"], data)
                         # this will exit inner-most for-loop
                         break
                     else:
                         raise lasterror
 
-                elif code == "stat":
-                    action = d.get("action")
-                    if action in ["purge", "delete", "move/delete"]:
+                elif code == b"stat":
+                    action = d.get(b"action")
+                    if action in [b"purge", b"delete", b"move/delete"]:
                         return None, None
-                    p4type = self.re_type.match(d["type"])
+                    p4type = self.re_type.match(d[b"type"])
                     if p4type:
-                        mode = ""
-                        flags = ((p4type.group(1) or "")
-                               + (p4type.group(3) or ""))
-                        if "x" in flags:
-                            mode = "x"
-                        if p4type.group(2) == "symlink":
-                            mode = "l"
-                        if "ko" in flags:
+                        mode = b""
+                        flags = (p4type.group(1) or b"") + (
+                            p4type.group(3) or b""
+                        )
+                        if b"x" in flags:
+                            mode = b"x"
+                        if p4type.group(2) == b"symlink":
+                            mode = b"l"
+                        if b"ko" in flags:
                             keywords = self.re_keywords_old
-                        elif "k" in flags:
+                        elif b"k" in flags:
                             keywords = self.re_keywords
 
-                elif code == "text" or code == "binary":
+                elif code == b"text" or code == b"binary":
                     contents.append(data)
 
                 lasterror = None
@@ -326,18 +347,18 @@
         if mode is None:
             return None, None
 
-        contents = ''.join(contents)
+        contents = b''.join(contents)
 
         if keywords:
-            contents = keywords.sub("$\\1$", contents)
-        if mode == "l" and contents.endswith("\n"):
+            contents = keywords.sub(b"$\\1$", contents)
+        if mode == b"l" and contents.endswith(b"\n"):
             contents = contents[:-1]
 
         return contents, mode
 
     def getchanges(self, rev, full):
         if full:
-            raise error.Abort(_("convert from p4 does not support --full"))
+            raise error.Abort(_(b"convert from p4 does not support --full"))
         return self.files[rev], self.copies[rev], set()
 
     def _construct_commit(self, obj, parents=None):
@@ -345,21 +366,26 @@
         Constructs a common.commit object from an unmarshalled
         `p4 describe` output
         """
-        desc = self.recode(obj.get("desc", ""))
-        date = (int(obj["time"]), 0)     # timezone not set
+        desc = self.recode(obj.get(b"desc", b""))
+        date = (int(obj[b"time"]), 0)  # timezone not set
         if parents is None:
             parents = []
 
-        return common.commit(author=self.recode(obj["user"]),
-            date=dateutil.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
-            parents=parents, desc=desc, branch=None, rev=obj['change'],
-            extra={"p4": obj['change'], "convert_revision": obj['change']})
+        return common.commit(
+            author=self.recode(obj[b"user"]),
+            date=dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2'),
+            parents=parents,
+            desc=desc,
+            branch=None,
+            rev=obj[b'change'],
+            extra={b"p4": obj[b'change'], b"convert_revision": obj[b'change']},
+        )
 
     def _fetch_revision(self, rev):
         """Return an output of `p4 describe` including author, commit date as
         a dictionary."""
-        cmd = "p4 -G describe -s %s" % rev
-        stdout = procutil.popen(cmd, mode='rb')
+        cmd = b"p4 -G describe -s %s" % rev
+        stdout = procutil.popen(cmd, mode=b'rb')
         return marshal.load(stdout)
 
     def getcommit(self, rev):
@@ -369,7 +395,8 @@
             d = self._fetch_revision(rev)
             return self._construct_commit(d, parents=None)
         raise error.Abort(
-            _("cannot find %s in the revmap or parsed changesets") % rev)
+            _(b"cannot find %s in the revmap or parsed changesets") % rev
+        )
 
     def gettags(self):
         return {}
--- a/hgext/convert/subversion.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/convert/subversion.py	Mon Oct 21 11:09:48 2019 -0400
@@ -8,6 +8,7 @@
 import xml.dom.minidom
 
 from mercurial.i18n import _
+from mercurial.pycompat import open
 from mercurial import (
     encoding,
     error,
@@ -52,17 +53,20 @@
     import svn.delta
     from . import transport
     import warnings
-    warnings.filterwarnings('ignore',
-            module='svn.core',
-            category=DeprecationWarning)
-    svn.core.SubversionException # trigger import to catch error
+
+    warnings.filterwarnings(
+        b'ignore', module=b'svn.core', category=DeprecationWarning
+    )
+    svn.core.SubversionException  # trigger import to catch error
 
 except ImportError:
     svn = None
 
+
 class SvnPathNotFound(Exception):
     pass
 
+
 def revsplit(rev):
     """Parse a revision string and return (uuid, path, revnum).
     >>> revsplit(b'svn:a2147622-4a9f-4db4-a8d3-13562ff547b2'
@@ -77,18 +81,19 @@
     >>> revsplit(b'bad')
     ('', '', 0)
     """
-    parts = rev.rsplit('@', 1)
+    parts = rev.rsplit(b'@', 1)
     revnum = 0
     if len(parts) > 1:
         revnum = int(parts[1])
-    parts = parts[0].split('/', 1)
-    uuid = ''
-    mod = ''
-    if len(parts) > 1 and parts[0].startswith('svn:'):
+    parts = parts[0].split(b'/', 1)
+    uuid = b''
+    mod = b''
+    if len(parts) > 1 and parts[0].startswith(b'svn:'):
         uuid = parts[0][4:]
-        mod = '/' + parts[1]
+        mod = b'/' + parts[1]
     return uuid, mod, revnum
 
+
 def quote(s):
     # As of svn 1.7, many svn calls expect "canonical" paths. In
     # theory, we should call svn.core.*canonicalize() on all paths
@@ -97,7 +102,8 @@
     # so we can extend it safely with new components. The "safe"
     # characters were taken from the "svn_uri__char_validity" table in
     # libsvn_subr/path.c.
-    return urlreq.quote(s, "!$&'()*+,-./:=@_~")
+    return urlreq.quote(s, b"!$&'()*+,-./:=@_~")
+
 
 def geturl(path):
     try:
@@ -108,44 +114,61 @@
     if os.path.isdir(path):
         path = os.path.normpath(os.path.abspath(path))
         if pycompat.iswindows:
-            path = '/' + util.normpath(path)
+            path = b'/' + util.normpath(path)
         # Module URL is later compared with the repository URL returned
         # by svn API, which is UTF-8.
         path = encoding.tolocal(path)
-        path = 'file://%s' % quote(path)
+        path = b'file://%s' % quote(path)
     return svn.core.svn_path_canonicalize(path)
 
+
 def optrev(number):
     optrev = svn.core.svn_opt_revision_t()
     optrev.kind = svn.core.svn_opt_revision_number
     optrev.value.number = number
     return optrev
 
+
 class changedpath(object):
     def __init__(self, p):
         self.copyfrom_path = p.copyfrom_path
         self.copyfrom_rev = p.copyfrom_rev
         self.action = p.action
 
-def get_log_child(fp, url, paths, start, end, limit=0,
-                  discover_changed_paths=True, strict_node_history=False):
+
+def get_log_child(
+    fp,
+    url,
+    paths,
+    start,
+    end,
+    limit=0,
+    discover_changed_paths=True,
+    strict_node_history=False,
+):
     protocol = -1
+
     def receiver(orig_paths, revnum, author, date, message, pool):
         paths = {}
         if orig_paths is not None:
-            for k, v in orig_paths.iteritems():
+            for k, v in pycompat.iteritems(orig_paths):
                 paths[k] = changedpath(v)
-        pickle.dump((paths, revnum, author, date, message),
-                    fp, protocol)
+        pickle.dump((paths, revnum, author, date, message), fp, protocol)
 
     try:
         # Use an ra of our own so that our parent can consume
         # our results without confusing the server.
         t = transport.SvnRaTransport(url=url)
-        svn.ra.get_log(t.ra, paths, start, end, limit,
-                       discover_changed_paths,
-                       strict_node_history,
-                       receiver)
+        svn.ra.get_log(
+            t.ra,
+            paths,
+            start,
+            end,
+            limit,
+            discover_changed_paths,
+            strict_node_history,
+            receiver,
+        )
     except IOError:
         # Caller may interrupt the iteration
         pickle.dump(None, fp, protocol)
@@ -159,19 +182,23 @@
     # there is no need for clean termination.
     os._exit(0)
 
+
 def debugsvnlog(ui, **opts):
     """Fetch SVN log in a subprocess and channel them back to parent to
     avoid memory collection issues.
     """
     if svn is None:
-        raise error.Abort(_('debugsvnlog could not load Subversion python '
-                           'bindings'))
+        raise error.Abort(
+            _(b'debugsvnlog could not load Subversion python bindings')
+        )
 
     args = decodeargs(ui.fin.read())
     get_log_child(ui.fout, *args)
 
+
 class logstream(object):
     """Interruptible revision log iterator."""
+
     def __init__(self, stdout):
         self._stdout = stdout
 
@@ -180,14 +207,18 @@
             try:
                 entry = pickle.load(self._stdout)
             except EOFError:
-                raise error.Abort(_('Mercurial failed to run itself, check'
-                                   ' hg executable is in PATH'))
+                raise error.Abort(
+                    _(
+                        b'Mercurial failed to run itself, check'
+                        b' hg executable is in PATH'
+                    )
+                )
             try:
                 orig_paths, revnum, author, date, message = entry
             except (TypeError, ValueError):
                 if entry is None:
                     break
-                raise error.Abort(_("log stream exception '%s'") % entry)
+                raise error.Abort(_(b"log stream exception '%s'") % entry)
             yield entry
 
     def close(self):
@@ -195,85 +226,115 @@
             self._stdout.close()
             self._stdout = None
 
+
 class directlogstream(list):
     """Direct revision log iterator.
     This can be used for debugging and development but it will probably leak
     memory and is not suitable for real conversions."""
-    def __init__(self, url, paths, start, end, limit=0,
-                  discover_changed_paths=True, strict_node_history=False):
 
+    def __init__(
+        self,
+        url,
+        paths,
+        start,
+        end,
+        limit=0,
+        discover_changed_paths=True,
+        strict_node_history=False,
+    ):
         def receiver(orig_paths, revnum, author, date, message, pool):
             paths = {}
             if orig_paths is not None:
-                for k, v in orig_paths.iteritems():
+                for k, v in pycompat.iteritems(orig_paths):
                     paths[k] = changedpath(v)
             self.append((paths, revnum, author, date, message))
 
         # Use an ra of our own so that our parent can consume
         # our results without confusing the server.
         t = transport.SvnRaTransport(url=url)
-        svn.ra.get_log(t.ra, paths, start, end, limit,
-                       discover_changed_paths,
-                       strict_node_history,
-                       receiver)
+        svn.ra.get_log(
+            t.ra,
+            paths,
+            start,
+            end,
+            limit,
+            discover_changed_paths,
+            strict_node_history,
+            receiver,
+        )
 
     def close(self):
         pass
 
+
 # Check to see if the given path is a local Subversion repo. Verify this by
 # looking for several svn-specific files and directories in the given
 # directory.
 def filecheck(ui, path, proto):
-    for x in ('locks', 'hooks', 'format', 'db'):
+    for x in (b'locks', b'hooks', b'format', b'db'):
         if not os.path.exists(os.path.join(path, x)):
             return False
     return True
 
+
 # Check to see if a given path is the root of an svn repo over http. We verify
 # this by requesting a version-controlled URL we know can't exist and looking
 # for the svn-specific "not found" XML.
 def httpcheck(ui, path, proto):
     try:
         opener = urlreq.buildopener()
-        rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path), 'rb')
+        rsp = opener.open(b'%s://%s/!svn/ver/0/.svn' % (proto, path), b'rb')
         data = rsp.read()
     except urlerr.httperror as inst:
         if inst.code != 404:
             # Except for 404 we cannot know for sure this is not an svn repo
-            ui.warn(_('svn: cannot probe remote repository, assume it could '
-                      'be a subversion repository. Use --source-type if you '
-                      'know better.\n'))
+            ui.warn(
+                _(
+                    b'svn: cannot probe remote repository, assume it could '
+                    b'be a subversion repository. Use --source-type if you '
+                    b'know better.\n'
+                )
+            )
             return True
         data = inst.fp.read()
     except Exception:
         # Could be urlerr.urlerror if the URL is invalid or anything else.
         return False
-    return '<m:human-readable errcode="160013">' in data
+    return b'<m:human-readable errcode="160013">' in data
+
 
-protomap = {'http': httpcheck,
-            'https': httpcheck,
-            'file': filecheck,
-            }
+protomap = {
+    b'http': httpcheck,
+    b'https': httpcheck,
+    b'file': filecheck,
+}
+
+
 def issvnurl(ui, url):
     try:
-        proto, path = url.split('://', 1)
-        if proto == 'file':
-            if (pycompat.iswindows and path[:1] == '/'
-                  and path[1:2].isalpha() and path[2:6].lower() == '%3a/'):
-                path = path[:2] + ':/' + path[6:]
+        proto, path = url.split(b'://', 1)
+        if proto == b'file':
+            if (
+                pycompat.iswindows
+                and path[:1] == b'/'
+                and path[1:2].isalpha()
+                and path[2:6].lower() == b'%3a/'
+            ):
+                path = path[:2] + b':/' + path[6:]
             path = urlreq.url2pathname(path)
     except ValueError:
-        proto = 'file'
+        proto = b'file'
         path = os.path.abspath(url)
-    if proto == 'file':
+    if proto == b'file':
         path = util.pconvert(path)
     check = protomap.get(proto, lambda *args: False)
-    while '/' in path:
+    while b'/' in path:
         if check(ui, path, proto):
             return True
-        path = path.rsplit('/', 1)[0]
+        path = path.rsplit(b'/', 1)[0]
     return False
 
+
 # SVN conversion code stolen from bzr-svn and tailor
 #
 # Subversion looks like a versioned filesystem, branches structures
@@ -292,23 +353,38 @@
     def __init__(self, ui, repotype, url, revs=None):
         super(svn_source, self).__init__(ui, repotype, url, revs=revs)
 
-        if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
-                (os.path.exists(url) and
-                 os.path.exists(os.path.join(url, '.svn'))) or
-                issvnurl(ui, url)):
-            raise NoRepo(_("%s does not look like a Subversion repository")
-                         % url)
+        if not (
+            url.startswith(b'svn://')
+            or url.startswith(b'svn+ssh://')
+            or (
+                os.path.exists(url)
+                and os.path.exists(os.path.join(url, b'.svn'))
+            )
+            or issvnurl(ui, url)
+        ):
+            raise NoRepo(
+                _(b"%s does not look like a Subversion repository") % url
+            )
         if svn is None:
-            raise MissingTool(_('could not load Subversion python bindings'))
+            raise MissingTool(_(b'could not load Subversion python bindings'))
 
         try:
             version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
             if version < (1, 4):
-                raise MissingTool(_('Subversion python bindings %d.%d found, '
-                                    '1.4 or later required') % version)
+                raise MissingTool(
+                    _(
+                        b'Subversion python bindings %d.%d found, '
+                        b'1.4 or later required'
+                    )
+                    % version
+                )
         except AttributeError:
-            raise MissingTool(_('Subversion python bindings are too old, 1.4 '
-                                'or later required'))
+            raise MissingTool(
+                _(
+                    b'Subversion python bindings are too old, 1.4 '
+                    b'or later required'
+                )
+            )
 
         self.lastrevs = {}
 
@@ -316,14 +392,14 @@
         try:
             # Support file://path@rev syntax. Useful e.g. to convert
             # deleted branches.
-            at = url.rfind('@')
+            at = url.rfind(b'@')
             if at >= 0:
-                latest = int(url[at + 1:])
+                latest = int(url[at + 1 :])
                 url = url[:at]
         except ValueError:
             pass
         self.url = geturl(url)
-        self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
+        self.encoding = b'UTF-8'  # Subversion is always nominal UTF-8
         try:
             self.transport = transport.SvnRaTransport(url=self.url)
             self.ra = self.transport.ra
@@ -331,7 +407,7 @@
             self.baseurl = svn.ra.get_repos_root(self.ra)
             # Module is either empty or a repository path starting with
             # a slash and not ending with a slash.
-            self.module = urlreq.unquote(self.url[len(self.baseurl):])
+            self.module = urlreq.unquote(self.url[len(self.baseurl) :])
             self.prevmodule = None
             self.rootmodule = self.module
             self.commits = {}
@@ -339,48 +415,61 @@
             self.uuid = svn.ra.get_uuid(self.ra)
         except svn.core.SubversionException:
             ui.traceback()
-            svnversion = '%d.%d.%d' % (svn.core.SVN_VER_MAJOR,
-                                       svn.core.SVN_VER_MINOR,
-                                       svn.core.SVN_VER_MICRO)
-            raise NoRepo(_("%s does not look like a Subversion repository "
-                           "to libsvn version %s")
-                         % (self.url, svnversion))
+            svnversion = b'%d.%d.%d' % (
+                svn.core.SVN_VER_MAJOR,
+                svn.core.SVN_VER_MINOR,
+                svn.core.SVN_VER_MICRO,
+            )
+            raise NoRepo(
+                _(
+                    b"%s does not look like a Subversion repository "
+                    b"to libsvn version %s"
+                )
+                % (self.url, svnversion)
+            )
 
         if revs:
             if len(revs) > 1:
-                raise error.Abort(_('subversion source does not support '
-                                   'specifying multiple revisions'))
+                raise error.Abort(
+                    _(
+                        b'subversion source does not support '
+                        b'specifying multiple revisions'
+                    )
+                )
             try:
                 latest = int(revs[0])
             except ValueError:
-                raise error.Abort(_('svn: revision %s is not an integer') %
-                                 revs[0])
+                raise error.Abort(
+                    _(b'svn: revision %s is not an integer') % revs[0]
+                )
 
-        trunkcfg = self.ui.config('convert', 'svn.trunk')
+        trunkcfg = self.ui.config(b'convert', b'svn.trunk')
         if trunkcfg is None:
-            trunkcfg = 'trunk'
-        self.trunkname = trunkcfg.strip('/')
-        self.startrev = self.ui.config('convert', 'svn.startrev')
+            trunkcfg = b'trunk'
+        self.trunkname = trunkcfg.strip(b'/')
+        self.startrev = self.ui.config(b'convert', b'svn.startrev')
         try:
             self.startrev = int(self.startrev)
             if self.startrev < 0:
                 self.startrev = 0
         except ValueError:
-            raise error.Abort(_('svn: start revision %s is not an integer')
-                             % self.startrev)
+            raise error.Abort(
+                _(b'svn: start revision %s is not an integer') % self.startrev
+            )
 
         try:
             self.head = self.latest(self.module, latest)
         except SvnPathNotFound:
             self.head = None
         if not self.head:
-            raise error.Abort(_('no revision found in module %s')
-                             % self.module)
+            raise error.Abort(
+                _(b'no revision found in module %s') % self.module
+            )
         self.last_changed = self.revnum(self.head)
 
         self._changescache = (None, None)
 
-        if os.path.exists(os.path.join(url, '.svn/entries')):
+        if os.path.exists(os.path.join(url, b'.svn/entries')):
             self.wc = url
         else:
             self.wc = None
@@ -397,81 +486,95 @@
 
     def exists(self, path, optrev):
         try:
-            svn.client.ls(self.url.rstrip('/') + '/' + quote(path),
-                                 optrev, False, self.ctx)
+            svn.client.ls(
+                self.url.rstrip(b'/') + b'/' + quote(path),
+                optrev,
+                False,
+                self.ctx,
+            )
             return True
         except svn.core.SubversionException:
             return False
 
     def getheads(self):
-
         def isdir(path, revnum):
             kind = self._checkpath(path, revnum)
             return kind == svn.core.svn_node_dir
 
         def getcfgpath(name, rev):
-            cfgpath = self.ui.config('convert', 'svn.' + name)
-            if cfgpath is not None and cfgpath.strip() == '':
+            cfgpath = self.ui.config(b'convert', b'svn.' + name)
+            if cfgpath is not None and cfgpath.strip() == b'':
                 return None
-            path = (cfgpath or name).strip('/')
+            path = (cfgpath or name).strip(b'/')
             if not self.exists(path, rev):
-                if self.module.endswith(path) and name == 'trunk':
+                if self.module.endswith(path) and name == b'trunk':
                     # we are converting from inside this directory
                     return None
                 if cfgpath:
-                    raise error.Abort(_('expected %s to be at %r, but not found'
-                                       ) % (name, path))
+                    raise error.Abort(
+                        _(b'expected %s to be at %r, but not found')
+                        % (name, path)
+                    )
                 return None
-            self.ui.note(_('found %s at %r\n') % (name, path))
+            self.ui.note(_(b'found %s at %r\n') % (name, path))
             return path
 
         rev = optrev(self.last_changed)
-        oldmodule = ''
-        trunk = getcfgpath('trunk', rev)
-        self.tags = getcfgpath('tags', rev)
-        branches = getcfgpath('branches', rev)
+        oldmodule = b''
+        trunk = getcfgpath(b'trunk', rev)
+        self.tags = getcfgpath(b'tags', rev)
+        branches = getcfgpath(b'branches', rev)
 
         # If the project has a trunk or branches, we will extract heads
         # from them. We keep the project root otherwise.
         if trunk:
-            oldmodule = self.module or ''
-            self.module += '/' + trunk
+            oldmodule = self.module or b''
+            self.module += b'/' + trunk
             self.head = self.latest(self.module, self.last_changed)
             if not self.head:
-                raise error.Abort(_('no revision found in module %s')
-                                 % self.module)
+                raise error.Abort(
+                    _(b'no revision found in module %s') % self.module
+                )
 
         # First head in the list is the module's head
         self.heads = [self.head]
         if self.tags is not None:
-            self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
+            self.tags = b'%s/%s' % (oldmodule, (self.tags or b'tags'))
 
         # Check if branches bring a few more heads to the list
         if branches:
-            rpath = self.url.strip('/')
-            branchnames = svn.client.ls(rpath + '/' + quote(branches),
-                                        rev, False, self.ctx)
+            rpath = self.url.strip(b'/')
+            branchnames = svn.client.ls(
+                rpath + b'/' + quote(branches), rev, False, self.ctx
+            )
             for branch in sorted(branchnames):
-                module = '%s/%s/%s' % (oldmodule, branches, branch)
+                module = b'%s/%s/%s' % (oldmodule, branches, branch)
                 if not isdir(module, self.last_changed):
                     continue
                 brevid = self.latest(module, self.last_changed)
                 if not brevid:
-                    self.ui.note(_('ignoring empty branch %s\n') % branch)
+                    self.ui.note(_(b'ignoring empty branch %s\n') % branch)
                     continue
-                self.ui.note(_('found branch %s at %d\n') %
-                             (branch, self.revnum(brevid)))
+                self.ui.note(
+                    _(b'found branch %s at %d\n')
+                    % (branch, self.revnum(brevid))
+                )
                 self.heads.append(brevid)
 
         if self.startrev and self.heads:
             if len(self.heads) > 1:
-                raise error.Abort(_('svn: start revision is not supported '
-                                   'with more than one branch'))
+                raise error.Abort(
+                    _(
+                        b'svn: start revision is not supported '
+                        b'with more than one branch'
+                    )
+                )
             revnum = self.revnum(self.heads[0])
             if revnum < self.startrev:
                 raise error.Abort(
-                    _('svn: no revision found after start revision %d')
-                                 % self.startrev)
+                    _(b'svn: no revision found after start revision %d')
+                    % self.startrev
+                )
 
         return self.heads
 
@@ -483,10 +586,14 @@
         if full or not parents:
             # Perform a full checkout on roots
             uuid, module, revnum = revsplit(rev)
-            entries = svn.client.ls(self.baseurl + quote(module),
-                                    optrev(revnum), True, self.ctx)
-            files = [n for n, e in entries.iteritems()
-                     if e.kind == svn.core.svn_node_file]
+            entries = svn.client.ls(
+                self.baseurl + quote(module), optrev(revnum), True, self.ctx
+            )
+            files = [
+                n
+                for n, e in pycompat.iteritems(entries)
+                if e.kind == svn.core.svn_node_file
+            ]
             self.removed = set()
 
         files.sort()
@@ -525,22 +632,27 @@
                 stop = revnum + 1
             self._fetch_revisions(revnum, stop)
             if rev not in self.commits:
-                raise error.Abort(_('svn: revision %s not found') % revnum)
+                raise error.Abort(_(b'svn: revision %s not found') % revnum)
         revcommit = self.commits[rev]
         # caller caches the result, so free it here to release memory
         del self.commits[rev]
         return revcommit
 
-    def checkrevformat(self, revstr, mapname='splicemap'):
+    def checkrevformat(self, revstr, mapname=b'splicemap'):
         """ fails if revision format does not match the correct format"""
-        if not re.match(r'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-'
-                              r'[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]'
-                              r'{12,12}(.*)\@[0-9]+$',revstr):
-            raise error.Abort(_('%s entry %s is not a valid revision'
-                               ' identifier') % (mapname, revstr))
+        if not re.match(
+            r'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-'
+            r'[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]'
+            r'{12,12}(.*)\@[0-9]+$',
+            revstr,
+        ):
+            raise error.Abort(
+                _(b'%s entry %s is not a valid revision identifier')
+                % (mapname, revstr)
+            )
 
     def numcommits(self):
-        return int(self.head.rsplit('@', 1)[1]) - self.startrev
+        return int(self.head.rsplit(b'@', 1)[1]) - self.startrev
 
     def gettags(self):
         tags = {}
@@ -567,8 +679,11 @@
                 origpaths, revnum, author, date, message = entry
                 if not origpaths:
                     origpaths = []
-                copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
-                          in origpaths.iteritems() if e.copyfrom_path]
+                copies = [
+                    (e.copyfrom_path, e.copyfrom_rev, p)
+                    for p, e in pycompat.iteritems(origpaths)
+                    if e.copyfrom_path
+                ]
                 # Apply moves/copies from more specific to general
                 copies.sort(reverse=True)
 
@@ -578,11 +693,11 @@
                     srctagspath = copies.pop()[0]
 
                 for source, sourcerev, dest in copies:
-                    if not dest.startswith(tagspath + '/'):
+                    if not dest.startswith(tagspath + b'/'):
                         continue
                     for tag in pendings:
                         if tag[0].startswith(dest):
-                            tagpath = source + tag[0][len(dest):]
+                            tagpath = source + tag[0][len(dest) :]
                             tag[:2] = [tagpath, sourcerev]
                             break
                     else:
@@ -595,26 +710,33 @@
                 # Here/tags/tag.1 discarded as well as its children.
                 # It happens with tools like cvs2svn. Such tags cannot
                 # be represented in mercurial.
-                addeds = dict((p, e.copyfrom_path) for p, e
-                              in origpaths.iteritems()
-                              if e.action == 'A' and e.copyfrom_path)
+                addeds = dict(
+                    (p, e.copyfrom_path)
+                    for p, e in pycompat.iteritems(origpaths)
+                    if e.action == b'A' and e.copyfrom_path
+                )
                 badroots = set()
                 for destroot in addeds:
                     for source, sourcerev, dest in pendings:
-                        if (not dest.startswith(destroot + '/')
-                            or source.startswith(addeds[destroot] + '/')):
+                        if not dest.startswith(
+                            destroot + b'/'
+                        ) or source.startswith(addeds[destroot] + b'/'):
                             continue
                         badroots.add(destroot)
                         break
 
                 for badroot in badroots:
-                    pendings = [p for p in pendings if p[2] != badroot
-                                and not p[2].startswith(badroot + '/')]
+                    pendings = [
+                        p
+                        for p in pendings
+                        if p[2] != badroot
+                        and not p[2].startswith(badroot + b'/')
+                    ]
 
                 # Tell tag renamings from tag creations
                 renamings = []
                 for source, sourcerev, dest in pendings:
-                    tagname = dest.split('/')[-1]
+                    tagname = dest.split(b'/')[-1]
                     if source.startswith(srctagspath):
                         renamings.append([source, sourcerev, tagname])
                         continue
@@ -642,17 +764,19 @@
         if not self.wc:
             return
         if self.convertfp is None:
-            self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
-                                  'ab')
-        self.convertfp.write(util.tonativeeol('%s %d\n'
-                                              % (destrev, self.revnum(rev))))
+            self.convertfp = open(
+                os.path.join(self.wc, b'.svn', b'hg-shamap'), b'ab'
+            )
+        self.convertfp.write(
+            util.tonativeeol(b'%s %d\n' % (destrev, self.revnum(rev)))
+        )
         self.convertfp.flush()
 
     def revid(self, revnum, module=None):
-        return 'svn:%s%s@%s' % (self.uuid, module or self.module, revnum)
+        return b'svn:%s%s@%s' % (self.uuid, module or self.module, revnum)
 
     def revnum(self, rev):
-        return int(rev.split('@')[-1])
+        return int(rev.split(b'@')[-1])
 
     def latest(self, path, stop=None):
         """Find the latest revid affecting path, up to stop revision
@@ -662,6 +786,7 @@
         reported. Return None if computed module does not belong to
         rootmodule subtree.
         """
+
         def findchanges(path, start, stop=None):
             stream = self._getlog([path], start, stop or 1)
             try:
@@ -675,12 +800,13 @@
                         break
 
                     for p in paths:
-                        if (not path.startswith(p) or
-                            not paths[p].copyfrom_path):
+                        if not path.startswith(p) or not paths[p].copyfrom_path:
                             continue
-                        newpath = paths[p].copyfrom_path + path[len(p):]
-                        self.ui.debug("branch renamed from %s to %s at %d\n" %
-                                      (path, newpath, revnum))
+                        newpath = paths[p].copyfrom_path + path[len(p) :]
+                        self.ui.debug(
+                            b"branch renamed from %s to %s at %d\n"
+                            % (path, newpath, revnum)
+                        )
                         path = newpath
                         break
                 if not paths:
@@ -691,20 +817,21 @@
 
         if not path.startswith(self.rootmodule):
             # Requests on foreign branches may be forbidden at server level
-            self.ui.debug('ignoring foreign branch %r\n' % path)
+            self.ui.debug(b'ignoring foreign branch %r\n' % path)
             return None
 
         if stop is None:
             stop = svn.ra.get_latest_revnum(self.ra)
         try:
-            prevmodule = self.reparent('')
-            dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
+            prevmodule = self.reparent(b'')
+            dirent = svn.ra.stat(self.ra, path.strip(b'/'), stop)
             self.reparent(prevmodule)
         except svn.core.SubversionException:
             dirent = None
         if not dirent:
-            raise SvnPathNotFound(_('%s not found up to revision %d')
-                                  % (path, stop))
+            raise SvnPathNotFound(
+                _(b'%s not found up to revision %d') % (path, stop)
+            )
 
         # stat() gives us the previous revision on this line of
         # development, but it might be in *another module*. Fetch the
@@ -720,11 +847,11 @@
             # the whole history.
             revnum, realpath = findchanges(path, stop)
             if revnum is None:
-                self.ui.debug('ignoring empty branch %r\n' % realpath)
+                self.ui.debug(b'ignoring empty branch %r\n' % realpath)
                 return None
 
         if not realpath.startswith(self.rootmodule):
-            self.ui.debug('ignoring foreign branch %r\n' % realpath)
+            self.ui.debug(b'ignoring foreign branch %r\n' % realpath)
             return None
         return self.revid(revnum, realpath)
 
@@ -735,8 +862,8 @@
         svnurl = self.baseurl + quote(module)
         prevmodule = self.prevmodule
         if prevmodule is None:
-            prevmodule = ''
-        self.ui.debug("reparent to %s\n" % svnurl)
+            prevmodule = b''
+        self.ui.debug(b"reparent to %s\n" % svnurl)
         svn.ra.reparent(self.ra, svnurl)
         self.prevmodule = module
         return prevmodule
@@ -750,8 +877,9 @@
             self.module = new_module
             self.reparent(self.module)
 
-        progress = self.ui.makeprogress(_('scanning paths'), unit=_('paths'),
-                                        total=len(paths))
+        progress = self.ui.makeprogress(
+            _(b'scanning paths'), unit=_(b'paths'), total=len(paths)
+        )
         for i, (path, ent) in enumerate(paths):
             progress.update(i, item=path)
             entrypath = self.getrelpath(path)
@@ -769,35 +897,38 @@
                 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
                 if not copyfrom_path:
                     continue
-                self.ui.debug("copied to %s from %s@%s\n" %
-                              (entrypath, copyfrom_path, ent.copyfrom_rev))
+                self.ui.debug(
+                    b"copied to %s from %s@%s\n"
+                    % (entrypath, copyfrom_path, ent.copyfrom_rev)
+                )
                 copies[self.recode(entrypath)] = self.recode(copyfrom_path)
-            elif kind == 0: # gone, but had better be a deleted *file*
-                self.ui.debug("gone from %s\n" % ent.copyfrom_rev)
+            elif kind == 0:  # gone, but had better be a deleted *file*
+                self.ui.debug(b"gone from %s\n" % ent.copyfrom_rev)
                 pmodule, prevnum = revsplit(parents[0])[1:]
-                parentpath = pmodule + "/" + entrypath
+                parentpath = pmodule + b"/" + entrypath
                 fromkind = self._checkpath(entrypath, prevnum, pmodule)
 
                 if fromkind == svn.core.svn_node_file:
                     removed.add(self.recode(entrypath))
                 elif fromkind == svn.core.svn_node_dir:
-                    oroot = parentpath.strip('/')
-                    nroot = path.strip('/')
+                    oroot = parentpath.strip(b'/')
+                    nroot = path.strip(b'/')
                     children = self._iterfiles(oroot, prevnum)
                     for childpath in children:
                         childpath = childpath.replace(oroot, nroot)
-                        childpath = self.getrelpath("/" + childpath, pmodule)
+                        childpath = self.getrelpath(b"/" + childpath, pmodule)
                         if childpath:
                             removed.add(self.recode(childpath))
                 else:
-                    self.ui.debug('unknown path in revision %d: %s\n' %
-                                  (revnum, path))
+                    self.ui.debug(
+                        b'unknown path in revision %d: %s\n' % (revnum, path)
+                    )
             elif kind == svn.core.svn_node_dir:
-                if ent.action == 'M':
+                if ent.action == b'M':
                     # If the directory just had a prop change,
                     # then we shouldn't need to look for its children.
                     continue
-                if ent.action == 'R' and parents:
+                if ent.action == b'R' and parents:
                     # If a directory is replacing a file, mark the previous
                     # file as deleted
                     pmodule, prevnum = revsplit(parents[0])[1:]
@@ -808,12 +939,12 @@
                         # We do not know what files were kept or removed,
                         # mark them all as changed.
                         for childpath in self._iterfiles(pmodule, prevnum):
-                            childpath = self.getrelpath("/" + childpath)
+                            childpath = self.getrelpath(b"/" + childpath)
                             if childpath:
                                 changed.add(self.recode(childpath))
 
                 for childpath in self._iterfiles(path, revnum):
-                    childpath = self.getrelpath("/" + childpath)
+                    childpath = self.getrelpath(b"/" + childpath)
                     if childpath:
                         changed.add(self.recode(childpath))
 
@@ -828,14 +959,16 @@
                 copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
                 if not copyfrompath:
                     continue
-                self.ui.debug("mark %s came from %s:%d\n"
-                              % (path, copyfrompath, ent.copyfrom_rev))
+                self.ui.debug(
+                    b"mark %s came from %s:%d\n"
+                    % (path, copyfrompath, ent.copyfrom_rev)
+                )
                 children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev)
                 for childpath in children:
-                    childpath = self.getrelpath("/" + childpath, pmodule)
+                    childpath = self.getrelpath(b"/" + childpath, pmodule)
                     if not childpath:
                         continue
-                    copytopath = path + childpath[len(copyfrompath):]
+                    copytopath = path + childpath[len(copyfrompath) :]
                     copytopath = self.getrelpath(copytopath)
                     copies[self.recode(copytopath)] = self.recode(childpath)
 
@@ -853,8 +986,10 @@
             """Return the parsed commit object or None, and True if
             the revision is a branch root.
             """
-            self.ui.debug("parsing revision %d (%d changes)\n" %
-                          (revnum, len(orig_paths)))
+            self.ui.debug(
+                b"parsing revision %d (%d changes)\n"
+                % (revnum, len(orig_paths))
+            )
 
             branched = False
             rev = self.revid(revnum)
@@ -866,14 +1001,15 @@
             parents = []
             # check whether this revision is the start of a branch or part
             # of a branch renaming
-            orig_paths = sorted(orig_paths.iteritems())
-            root_paths = [(p, e) for p, e in orig_paths
-                          if self.module.startswith(p)]
+            orig_paths = sorted(pycompat.iteritems(orig_paths))
+            root_paths = [
+                (p, e) for p, e in orig_paths if self.module.startswith(p)
+            ]
             if root_paths:
                 path, ent = root_paths[-1]
                 if ent.copyfrom_path:
                     branched = True
-                    newpath = ent.copyfrom_path + self.module[len(path):]
+                    newpath = ent.copyfrom_path + self.module[len(path) :]
                     # ent.copyfrom_rev may not be the actual last revision
                     previd = self.latest(newpath, ent.copyfrom_rev)
                     if previd is not None:
@@ -881,10 +1017,11 @@
                         if prevnum >= self.startrev:
                             parents = [previd]
                             self.ui.note(
-                                _('found parent of branch %s at %d: %s\n') %
-                                (self.module, prevnum, prevmodule))
+                                _(b'found parent of branch %s at %d: %s\n')
+                                % (self.module, prevnum, prevmodule)
+                            )
                 else:
-                    self.ui.debug("no copyfrom path, don't know what to do.\n")
+                    self.ui.debug(b"no copyfrom path, don't know what to do.\n")
 
             paths = []
             # filter out unrelated paths
@@ -896,33 +1033,37 @@
             # Example SVN datetime. Includes microseconds.
             # ISO-8601 conformant
             # '2007-01-04T17:35:00.902377Z'
-            date = dateutil.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
-            if self.ui.configbool('convert', 'localtimezone'):
+            date = dateutil.parsedate(
+                date[:19] + b" UTC", [b"%Y-%m-%dT%H:%M:%S"]
+            )
+            if self.ui.configbool(b'convert', b'localtimezone'):
                 date = makedatetimestamp(date[0])
 
             if message:
                 log = self.recode(message)
             else:
-                log = ''
+                log = b''
 
             if author:
                 author = self.recode(author)
             else:
-                author = ''
+                author = b''
 
             try:
-                branch = self.module.split("/")[-1]
+                branch = self.module.split(b"/")[-1]
                 if branch == self.trunkname:
                     branch = None
             except IndexError:
                 branch = None
 
-            cset = commit(author=author,
-                          date=dateutil.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
-                          desc=log,
-                          parents=parents,
-                          branch=branch,
-                          rev=rev)
+            cset = commit(
+                author=author,
+                date=dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2'),
+                desc=log,
+                parents=parents,
+                branch=branch,
+                rev=rev,
+            )
 
             self.commits[rev] = cset
             # The parents list is *shared* among self.paths and the
@@ -933,8 +1074,10 @@
             self.child_cset = cset
             return cset, branched
 
-        self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
-                     (self.module, from_revnum, to_revnum))
+        self.ui.note(
+            _(b'fetching revision log for "%s" from %d to %d\n')
+            % (self.module, from_revnum, to_revnum)
+        )
 
         try:
             firstcset = None
@@ -947,13 +1090,14 @@
                         lastonbranch = True
                         break
                     if not paths:
-                        self.ui.debug('revision %d has no entries\n' % revnum)
+                        self.ui.debug(b'revision %d has no entries\n' % revnum)
                         # If we ever leave the loop on an empty
                         # revision, do not try to get a parent branch
                         lastonbranch = lastonbranch or revnum == 0
                         continue
-                    cset, lastonbranch = parselogentry(paths, revnum, author,
-                                                       date, message)
+                    cset, lastonbranch = parselogentry(
+                        paths, revnum, author, date, message
+                    )
                     if cset:
                         firstcset = cset
                     if lastonbranch:
@@ -976,8 +1120,9 @@
         except svn.core.SubversionException as xxx_todo_changeme:
             (inst, num) = xxx_todo_changeme.args
             if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
-                raise error.Abort(_('svn: branch has no revision %s')
-                                 % to_revnum)
+                raise error.Abort(
+                    _(b'svn: branch has no revision %s') % to_revnum
+                )
             raise
 
     def getfile(self, file, rev):
@@ -997,30 +1142,35 @@
             io.close()
             if isinstance(info, list):
                 info = info[-1]
-            mode = ("svn:executable" in info) and 'x' or ''
-            mode = ("svn:special" in info) and 'l' or mode
+            mode = (b"svn:executable" in info) and b'x' or b''
+            mode = (b"svn:special" in info) and b'l' or mode
         except svn.core.SubversionException as e:
-            notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
-                svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
-            if e.apr_err in notfound: # File not found
+            notfound = (
+                svn.core.SVN_ERR_FS_NOT_FOUND,
+                svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND,
+            )
+            if e.apr_err in notfound:  # File not found
                 return None, None
             raise
-        if mode == 'l':
-            link_prefix = "link "
+        if mode == b'l':
+            link_prefix = b"link "
             if data.startswith(link_prefix):
-                data = data[len(link_prefix):]
+                data = data[len(link_prefix) :]
         return data, mode
 
     def _iterfiles(self, path, revnum):
         """Enumerate all files in path at revnum, recursively."""
-        path = path.strip('/')
+        path = path.strip(b'/')
         pool = svn.core.Pool()
-        rpath = '/'.join([self.baseurl, quote(path)]).strip('/')
+        rpath = b'/'.join([self.baseurl, quote(path)]).strip(b'/')
         entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool)
         if path:
-            path += '/'
-        return ((path + p) for p, e in entries.iteritems()
-                if e.kind == svn.core.svn_node_file)
+            path += b'/'
+        return (
+            (path + p)
+            for p, e in pycompat.iteritems(entries)
+            if e.kind == svn.core.svn_node_file
+        )
 
     def getrelpath(self, path, module=None):
         if module is None:
@@ -1032,54 +1182,73 @@
         #   "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
         # that is to say "tests/PloneTestCase.py"
         if path.startswith(module):
-            relative = path.rstrip('/')[len(module):]
-            if relative.startswith('/'):
+            relative = path.rstrip(b'/')[len(module) :]
+            if relative.startswith(b'/'):
                 return relative[1:]
-            elif relative == '':
+            elif relative == b'':
                 return relative
 
         # The path is outside our tracked tree...
-        self.ui.debug('%r is not under %r, ignoring\n' % (path, module))
+        self.ui.debug(b'%r is not under %r, ignoring\n' % (path, module))
         return None
 
     def _checkpath(self, path, revnum, module=None):
         if module is not None:
-            prevmodule = self.reparent('')
-            path = module + '/' + path
+            prevmodule = self.reparent(b'')
+            path = module + b'/' + path
         try:
             # ra.check_path does not like leading slashes very much, it leads
             # to PROPFIND subversion errors
-            return svn.ra.check_path(self.ra, path.strip('/'), revnum)
+            return svn.ra.check_path(self.ra, path.strip(b'/'), revnum)
         finally:
             if module is not None:
                 self.reparent(prevmodule)
 
-    def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
-                strict_node_history=False):
+    def _getlog(
+        self,
+        paths,
+        start,
+        end,
+        limit=0,
+        discover_changed_paths=True,
+        strict_node_history=False,
+    ):
         # Normalize path names, svn >= 1.5 only wants paths relative to
         # supplied URL
         relpaths = []
         for p in paths:
-            if not p.startswith('/'):
-                p = self.module + '/' + p
-            relpaths.append(p.strip('/'))
-        args = [self.baseurl, relpaths, start, end, limit,
-                discover_changed_paths, strict_node_history]
+            if not p.startswith(b'/'):
+                p = self.module + b'/' + p
+            relpaths.append(p.strip(b'/'))
+        args = [
+            self.baseurl,
+            relpaths,
+            start,
+            end,
+            limit,
+            discover_changed_paths,
+            strict_node_history,
+        ]
         # developer config: convert.svn.debugsvnlog
-        if not self.ui.configbool('convert', 'svn.debugsvnlog'):
+        if not self.ui.configbool(b'convert', b'svn.debugsvnlog'):
             return directlogstream(*args)
         arg = encodeargs(args)
         hgexe = procutil.hgexecutable()
-        cmd = '%s debugsvnlog' % procutil.shellquote(hgexe)
+        cmd = b'%s debugsvnlog' % procutil.shellquote(hgexe)
         stdin, stdout = procutil.popen2(procutil.quotecommand(cmd))
         stdin.write(arg)
         try:
             stdin.close()
         except IOError:
-            raise error.Abort(_('Mercurial failed to run itself, check'
-                               ' hg executable is in PATH'))
+            raise error.Abort(
+                _(
+                    b'Mercurial failed to run itself, check'
+                    b' hg executable is in PATH'
+                )
+            )
         return logstream(stdout)
 
+
 pre_revprop_change = b'''#!/bin/sh
 
 REPOS="$1"
@@ -1096,6 +1265,7 @@
 exit 1
 '''
 
+
 class svn_sink(converter_sink, commandline):
     commit_re = re.compile(br'Committed revision (\d+).', re.M)
     uuid_re = re.compile(br'Repository UUID:\s*(\S+)', re.M)
@@ -1109,18 +1279,18 @@
             os.chdir(self.cwd)
 
     def join(self, name):
-        return os.path.join(self.wc, '.svn', name)
+        return os.path.join(self.wc, b'.svn', name)
 
     def revmapfile(self):
-        return self.join('hg-shamap')
+        return self.join(b'hg-shamap')
 
     def authorfile(self):
-        return self.join('hg-authormap')
+        return self.join(b'hg-authormap')
 
     def __init__(self, ui, repotype, path):
 
         converter_sink.__init__(self, ui, repotype, path)
-        commandline.__init__(self, ui, 'svn')
+        commandline.__init__(self, ui, b'svn')
         self.delete = []
         self.setexec = []
         self.delexec = []
@@ -1129,46 +1299,53 @@
         self.cwd = encoding.getcwd()
 
         created = False
-        if os.path.isfile(os.path.join(path, '.svn', 'entries')):
+        if os.path.isfile(os.path.join(path, b'.svn', b'entries')):
             self.wc = os.path.realpath(path)
-            self.run0('update')
+            self.run0(b'update')
         else:
             if not re.search(br'^(file|http|https|svn|svn\+ssh)\://', path):
                 path = os.path.realpath(path)
                 if os.path.isdir(os.path.dirname(path)):
-                    if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
-                        ui.status(_("initializing svn repository '%s'\n") %
-                                  os.path.basename(path))
-                        commandline(ui, 'svnadmin').run0('create', path)
+                    if not os.path.exists(
+                        os.path.join(path, b'db', b'fs-type')
+                    ):
+                        ui.status(
+                            _(b"initializing svn repository '%s'\n")
+                            % os.path.basename(path)
+                        )
+                        commandline(ui, b'svnadmin').run0(b'create', path)
                         created = path
                     path = util.normpath(path)
-                    if not path.startswith('/'):
-                        path = '/' + path
-                    path = 'file://' + path
+                    if not path.startswith(b'/'):
+                        path = b'/' + path
+                    path = b'file://' + path
 
-            wcpath = os.path.join(encoding.getcwd(), os.path.basename(path) +
-                                '-wc')
-            ui.status(_("initializing svn working copy '%s'\n")
-                      % os.path.basename(wcpath))
-            self.run0('checkout', path, wcpath)
+            wcpath = os.path.join(
+                encoding.getcwd(), os.path.basename(path) + b'-wc'
+            )
+            ui.status(
+                _(b"initializing svn working copy '%s'\n")
+                % os.path.basename(wcpath)
+            )
+            self.run0(b'checkout', path, wcpath)
 
             self.wc = wcpath
         self.opener = vfsmod.vfs(self.wc)
         self.wopener = vfsmod.vfs(self.wc)
-        self.childmap = mapfile(ui, self.join('hg-childmap'))
+        self.childmap = mapfile(ui, self.join(b'hg-childmap'))
         if util.checkexec(self.wc):
             self.is_exec = util.isexec
         else:
             self.is_exec = None
 
         if created:
-            hook = os.path.join(created, 'hooks', 'pre-revprop-change')
-            fp = open(hook, 'wb')
+            hook = os.path.join(created, b'hooks', b'pre-revprop-change')
+            fp = open(hook, b'wb')
             fp.write(pre_revprop_change)
             fp.close()
             util.setflags(hook, False, True)
 
-        output = self.run0('info')
+        output = self.run0(b'info')
         self.uuid = self.uuid_re.search(output).group(1).strip()
 
     def wjoin(self, *names):
@@ -1180,14 +1357,15 @@
         # already tracked entries, so we have to track and filter them
         # ourselves.
         m = set()
-        output = self.run0('ls', recursive=True, xml=True)
+        output = self.run0(b'ls', recursive=True, xml=True)
         doc = xml.dom.minidom.parseString(output)
         for e in doc.getElementsByTagName(r'entry'):
             for n in e.childNodes:
                 if n.nodeType != n.ELEMENT_NODE or n.tagName != r'name':
                     continue
-                name = r''.join(c.data for c in n.childNodes
-                                if c.nodeType == c.TEXT_NODE)
+                name = r''.join(
+                    c.data for c in n.childNodes if c.nodeType == c.TEXT_NODE
+                )
                 # Entries are compared with names coming from
                 # mercurial, so bytes with undefined encoding. Our
                 # best bet is to assume they are in local
@@ -1198,7 +1376,7 @@
         return m
 
     def putfile(self, filename, flags, data):
-        if 'l' in flags:
+        if b'l' in flags:
             self.wopener.symlink(data, filename)
         else:
             try:
@@ -1218,12 +1396,12 @@
 
             if self.is_exec:
                 if wasexec:
-                    if 'x' not in flags:
+                    if b'x' not in flags:
                         self.delexec.append(filename)
                 else:
-                    if 'x' in flags:
+                    if b'x' in flags:
                         self.setexec.append(filename)
-                util.setflags(self.wjoin(filename), False, 'x' in flags)
+                util.setflags(self.wjoin(filename), False, b'x' in flags)
 
     def _copyfile(self, source, dest):
         # SVN's copy command pukes if the destination file exists, but
@@ -1233,12 +1411,13 @@
         exists = os.path.lexists(wdest)
         if exists:
             fd, tempname = pycompat.mkstemp(
-                prefix='hg-copy-', dir=os.path.dirname(wdest))
+                prefix=b'hg-copy-', dir=os.path.dirname(wdest)
+            )
             os.close(fd)
             os.unlink(tempname)
             os.rename(wdest, tempname)
         try:
-            self.run0('copy', source, dest)
+            self.run0(b'copy', source, dest)
         finally:
             self.manifest.add(dest)
             if exists:
@@ -1254,33 +1433,35 @@
             if os.path.isdir(self.wjoin(f)):
                 dirs.add(f)
             i = len(f)
-            for i in iter(lambda: f.rfind('/', 0, i), -1):
+            for i in iter(lambda: f.rfind(b'/', 0, i), -1):
                 dirs.add(f[:i])
         return dirs
 
     def add_dirs(self, files):
-        add_dirs = [d for d in sorted(self.dirs_of(files))
-                    if d not in self.manifest]
+        add_dirs = [
+            d for d in sorted(self.dirs_of(files)) if d not in self.manifest
+        ]
         if add_dirs:
             self.manifest.update(add_dirs)
-            self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
+            self.xargs(add_dirs, b'add', non_recursive=True, quiet=True)
         return add_dirs
 
     def add_files(self, files):
         files = [f for f in files if f not in self.manifest]
         if files:
             self.manifest.update(files)
-            self.xargs(files, 'add', quiet=True)
+            self.xargs(files, b'add', quiet=True)
         return files
 
     def addchild(self, parent, child):
         self.childmap[parent] = child
 
     def revid(self, rev):
-        return "svn:%s@%s" % (self.uuid, rev)
+        return b"svn:%s@%s" % (self.uuid, rev)
 
-    def putcommit(self, files, copies, parents, commit, source, revmap, full,
-                  cleanp2):
+    def putcommit(
+        self, files, copies, parents, commit, source, revmap, full, cleanp2
+    ):
         for parent in parents:
             try:
                 return self.revid(self.childmap[parent])
@@ -1308,41 +1489,53 @@
                 self._copyfile(s, d)
             self.copies = []
         if self.delete:
-            self.xargs(self.delete, 'delete')
+            self.xargs(self.delete, b'delete')
             for f in self.delete:
                 self.manifest.remove(f)
             self.delete = []
         entries.update(self.add_files(files.difference(entries)))
         if self.delexec:
-            self.xargs(self.delexec, 'propdel', 'svn:executable')
+            self.xargs(self.delexec, b'propdel', b'svn:executable')
             self.delexec = []
         if self.setexec:
-            self.xargs(self.setexec, 'propset', 'svn:executable', '*')
+            self.xargs(self.setexec, b'propset', b'svn:executable', b'*')
             self.setexec = []
 
-        fd, messagefile = pycompat.mkstemp(prefix='hg-convert-')
+        fd, messagefile = pycompat.mkstemp(prefix=b'hg-convert-')
         fp = os.fdopen(fd, r'wb')
         fp.write(util.tonativeeol(commit.desc))
         fp.close()
         try:
-            output = self.run0('commit',
-                               username=stringutil.shortuser(commit.author),
-                               file=messagefile,
-                               encoding='utf-8')
+            output = self.run0(
+                b'commit',
+                username=stringutil.shortuser(commit.author),
+                file=messagefile,
+                encoding=b'utf-8',
+            )
             try:
                 rev = self.commit_re.search(output).group(1)
             except AttributeError:
                 if not files:
-                    return parents[0] if parents else 'None'
-                self.ui.warn(_('unexpected svn output:\n'))
+                    return parents[0] if parents else b'None'
+                self.ui.warn(_(b'unexpected svn output:\n'))
                 self.ui.warn(output)
-                raise error.Abort(_('unable to cope with svn output'))
+                raise error.Abort(_(b'unable to cope with svn output'))
             if commit.rev:
-                self.run('propset', 'hg:convert-rev', commit.rev,
-                         revprop=True, revision=rev)
-            if commit.branch and commit.branch != 'default':
-                self.run('propset', 'hg:convert-branch', commit.branch,
-                         revprop=True, revision=rev)
+                self.run(
+                    b'propset',
+                    b'hg:convert-rev',
+                    commit.rev,
+                    revprop=True,
+                    revision=rev,
+                )
+            if commit.branch and commit.branch != b'default':
+                self.run(
+                    b'propset',
+                    b'hg:convert-branch',
+                    commit.branch,
+                    revprop=True,
+                    revision=rev,
+                )
             for parent in parents:
                 self.addchild(parent, rev)
             return self.revid(rev)
@@ -1350,7 +1543,7 @@
             os.unlink(messagefile)
 
     def puttags(self, tags):
-        self.ui.warn(_('writing Subversion tags is not yet implemented\n'))
+        self.ui.warn(_(b'writing Subversion tags is not yet implemented\n'))
         return None, None
 
     def hascommitfrommap(self, rev):
@@ -1363,6 +1556,10 @@
         # repository and childmap would not list all revisions. Too bad.
         if rev in self.childmap:
             return True
-        raise error.Abort(_('splice map revision %s not found in subversion '
-                           'child map (revision lookups are not implemented)')
-                         % rev)
+        raise error.Abort(
+            _(
+                b'splice map revision %s not found in subversion '
+                b'child map (revision lookups are not implemented)'
+            )
+            % rev
+        )
--- a/hgext/convert/transport.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/convert/transport.py	Mon Oct 21 11:09:48 2019 -0400
@@ -25,9 +25,8 @@
 Pool = svn.core.Pool
 SubversionException = svn.core.SubversionException
 
-from mercurial import (
-    util,
-)
+from mercurial.pycompat import getattr
+from mercurial import util
 
 # Some older versions of the Python bindings need to be
 # explicitly initialized. But what we want to do probably
@@ -36,9 +35,11 @@
 
 svn_config = None
 
+
 def _create_auth_baton(pool):
     """Create a Subversion authentication baton. """
     import svn.client
+
     # Give the client context baton a suite of authentication
     # providers.h
     providers = [
@@ -47,38 +48,42 @@
         svn.client.get_ssl_client_cert_file_provider(pool),
         svn.client.get_ssl_client_cert_pw_file_provider(pool),
         svn.client.get_ssl_server_trust_file_provider(pool),
-        ]
+    ]
     # Platform-dependent authentication methods
-    getprovider = getattr(svn.core, 'svn_auth_get_platform_specific_provider',
-                          None)
+    getprovider = getattr(
+        svn.core, 'svn_auth_get_platform_specific_provider', None
+    )
     if getprovider:
         # Available in svn >= 1.6
-        for name in ('gnome_keyring', 'keychain', 'kwallet', 'windows'):
-            for type in ('simple', 'ssl_client_cert_pw', 'ssl_server_trust'):
+        for name in (b'gnome_keyring', b'keychain', b'kwallet', b'windows'):
+            for type in (b'simple', b'ssl_client_cert_pw', b'ssl_server_trust'):
                 p = getprovider(name, type, pool)
                 if p:
                     providers.append(p)
     else:
-        if util.safehasattr(svn.client, 'get_windows_simple_provider'):
+        if util.safehasattr(svn.client, b'get_windows_simple_provider'):
             providers.append(svn.client.get_windows_simple_provider(pool))
 
     return svn.core.svn_auth_open(providers, pool)
 
+
 class NotBranchError(SubversionException):
     pass
 
+
 class SvnRaTransport(object):
     """
     Open an ra connection to a Subversion repository.
     """
-    def __init__(self, url="", ra=None):
+
+    def __init__(self, url=b"", ra=None):
         self.pool = Pool()
         self.svn_url = url
-        self.username = ''
-        self.password = ''
+        self.username = b''
+        self.password = b''
 
         # Only Subversion 1.4 has reparent()
-        if ra is None or not util.safehasattr(svn.ra, 'reparent'):
+        if ra is None or not util.safehasattr(svn.ra, b'reparent'):
             self.client = svn.client.create_context(self.pool)
             ab = _create_auth_baton(self.pool)
             self.client.auth_baton = ab
@@ -88,13 +93,15 @@
             self.client.config = svn_config
             try:
                 self.ra = svn.client.open_ra_session(
-                    self.svn_url,
-                    self.client, self.pool)
+                    self.svn_url, self.client, self.pool
+                )
             except SubversionException as xxx_todo_changeme:
                 (inst, num) = xxx_todo_changeme.args
-                if num in (svn.core.SVN_ERR_RA_ILLEGAL_URL,
-                           svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
-                           svn.core.SVN_ERR_BAD_URL):
+                if num in (
+                    svn.core.SVN_ERR_RA_ILLEGAL_URL,
+                    svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
+                    svn.core.SVN_ERR_BAD_URL,
+                ):
                     raise NotBranchError(url)
                 raise
         else:
@@ -106,27 +113,46 @@
             self._reporter, self._baton = reporter_data
 
         def set_path(self, path, revnum, start_empty, lock_token, pool=None):
-            svn.ra.reporter2_invoke_set_path(self._reporter, self._baton,
-                        path, revnum, start_empty, lock_token, pool)
+            svn.ra.reporter2_invoke_set_path(
+                self._reporter,
+                self._baton,
+                path,
+                revnum,
+                start_empty,
+                lock_token,
+                pool,
+            )
 
         def delete_path(self, path, pool=None):
-            svn.ra.reporter2_invoke_delete_path(self._reporter, self._baton,
-                    path, pool)
+            svn.ra.reporter2_invoke_delete_path(
+                self._reporter, self._baton, path, pool
+            )
 
-        def link_path(self, path, url, revision, start_empty, lock_token,
-                      pool=None):
-            svn.ra.reporter2_invoke_link_path(self._reporter, self._baton,
-                    path, url, revision, start_empty, lock_token,
-                    pool)
+        def link_path(
+            self, path, url, revision, start_empty, lock_token, pool=None
+        ):
+            svn.ra.reporter2_invoke_link_path(
+                self._reporter,
+                self._baton,
+                path,
+                url,
+                revision,
+                start_empty,
+                lock_token,
+                pool,
+            )
 
         def finish_report(self, pool=None):
-            svn.ra.reporter2_invoke_finish_report(self._reporter,
-                    self._baton, pool)
+            svn.ra.reporter2_invoke_finish_report(
+                self._reporter, self._baton, pool
+            )
 
         def abort_report(self, pool=None):
-            svn.ra.reporter2_invoke_abort_report(self._reporter,
-                    self._baton, pool)
+            svn.ra.reporter2_invoke_abort_report(
+                self._reporter, self._baton, pool
+            )
 
     def do_update(self, revnum, path, *args, **kwargs):
-        return self.Reporter(svn.ra.do_update(self.ra, revnum, path,
-                                              *args, **kwargs))
+        return self.Reporter(
+            svn.ra.do_update(self.ra, revnum, path, *args, **kwargs)
+        )
--- a/hgext/eol.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/eol.py	Mon Oct 21 11:09:48 2019 -0400
@@ -106,117 +106,143 @@
     scmutil,
     util,
 )
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.utils import stringutil
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('eol', 'fix-trailing-newline',
-    default=False,
+configitem(
+    b'eol', b'fix-trailing-newline', default=False,
 )
-configitem('eol', 'native',
-    default=pycompat.oslinesep,
+configitem(
+    b'eol', b'native', default=pycompat.oslinesep,
 )
-configitem('eol', 'only-consistent',
-    default=True,
+configitem(
+    b'eol', b'only-consistent', default=True,
 )
 
 # Matches a lone LF, i.e., one that is not part of CRLF.
-singlelf = re.compile('(^|[^\r])\n')
+singlelf = re.compile(b'(^|[^\r])\n')
+
 
 def inconsistenteol(data):
-    return '\r\n' in data and singlelf.search(data)
+    return b'\r\n' in data and singlelf.search(data)
+
 
 def tolf(s, params, ui, **kwargs):
     """Filter to convert to LF EOLs."""
     if stringutil.binary(s):
         return s
-    if ui.configbool('eol', 'only-consistent') and inconsistenteol(s):
+    if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s):
         return s
-    if (ui.configbool('eol', 'fix-trailing-newline')
-        and s and not s.endswith('\n')):
-        s = s + '\n'
+    if (
+        ui.configbool(b'eol', b'fix-trailing-newline')
+        and s
+        and not s.endswith(b'\n')
+    ):
+        s = s + b'\n'
     return util.tolf(s)
 
+
 def tocrlf(s, params, ui, **kwargs):
     """Filter to convert to CRLF EOLs."""
     if stringutil.binary(s):
         return s
-    if ui.configbool('eol', 'only-consistent') and inconsistenteol(s):
+    if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s):
         return s
-    if (ui.configbool('eol', 'fix-trailing-newline')
-        and s and not s.endswith('\n')):
-        s = s + '\n'
+    if (
+        ui.configbool(b'eol', b'fix-trailing-newline')
+        and s
+        and not s.endswith(b'\n')
+    ):
+        s = s + b'\n'
     return util.tocrlf(s)
 
-def isbinary(s, params):
+
+def isbinary(s, params, ui, **kwargs):
     """Filter to do nothing with the file."""
     return s
 
+
 filters = {
-    'to-lf': tolf,
-    'to-crlf': tocrlf,
-    'is-binary': isbinary,
+    b'to-lf': tolf,
+    b'to-crlf': tocrlf,
+    b'is-binary': isbinary,
     # The following provide backwards compatibility with win32text
-    'cleverencode:': tolf,
-    'cleverdecode:': tocrlf
+    b'cleverencode:': tolf,
+    b'cleverdecode:': tocrlf,
 }
 
+
 class eolfile(object):
     def __init__(self, ui, root, data):
-        self._decode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'}
-        self._encode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'}
+        self._decode = {
+            b'LF': b'to-lf',
+            b'CRLF': b'to-crlf',
+            b'BIN': b'is-binary',
+        }
+        self._encode = {
+            b'LF': b'to-lf',
+            b'CRLF': b'to-crlf',
+            b'BIN': b'is-binary',
+        }
 
         self.cfg = config.config()
         # Our files should not be touched. The pattern must be
         # inserted first override a '** = native' pattern.
-        self.cfg.set('patterns', '.hg*', 'BIN', 'eol')
+        self.cfg.set(b'patterns', b'.hg*', b'BIN', b'eol')
         # We can then parse the user's patterns.
-        self.cfg.parse('.hgeol', data)
+        self.cfg.parse(b'.hgeol', data)
 
-        isrepolf = self.cfg.get('repository', 'native') != 'CRLF'
-        self._encode['NATIVE'] = isrepolf and 'to-lf' or 'to-crlf'
-        iswdlf = ui.config('eol', 'native') in ('LF', '\n')
-        self._decode['NATIVE'] = iswdlf and 'to-lf' or 'to-crlf'
+        isrepolf = self.cfg.get(b'repository', b'native') != b'CRLF'
+        self._encode[b'NATIVE'] = isrepolf and b'to-lf' or b'to-crlf'
+        iswdlf = ui.config(b'eol', b'native') in (b'LF', b'\n')
+        self._decode[b'NATIVE'] = iswdlf and b'to-lf' or b'to-crlf'
 
         include = []
         exclude = []
         self.patterns = []
-        for pattern, style in self.cfg.items('patterns'):
+        for pattern, style in self.cfg.items(b'patterns'):
             key = style.upper()
-            if key == 'BIN':
+            if key == b'BIN':
                 exclude.append(pattern)
             else:
                 include.append(pattern)
-            m = match.match(root, '', [pattern])
+            m = match.match(root, b'', [pattern])
             self.patterns.append((pattern, key, m))
         # This will match the files for which we need to care
         # about inconsistent newlines.
-        self.match = match.match(root, '', [], include, exclude)
+        self.match = match.match(root, b'', [], include, exclude)
 
     def copytoui(self, ui):
+        newpatterns = set(pattern for pattern, key, m in self.patterns)
+        for section in (b'decode', b'encode'):
+            for oldpattern, _filter in ui.configitems(section):
+                if oldpattern not in newpatterns:
+                    if ui.configsource(section, oldpattern) == b'eol':
+                        ui.setconfig(section, oldpattern, b'!', b'eol')
         for pattern, key, m in self.patterns:
             try:
-                ui.setconfig('decode', pattern, self._decode[key], 'eol')
-                ui.setconfig('encode', pattern, self._encode[key], 'eol')
+                ui.setconfig(b'decode', pattern, self._decode[key], b'eol')
+                ui.setconfig(b'encode', pattern, self._encode[key], b'eol')
             except KeyError:
-                ui.warn(_("ignoring unknown EOL style '%s' from %s\n")
-                        % (key, self.cfg.source('patterns', pattern)))
+                ui.warn(
+                    _(b"ignoring unknown EOL style '%s' from %s\n")
+                    % (key, self.cfg.source(b'patterns', pattern))
+                )
         # eol.only-consistent can be specified in ~/.hgrc or .hgeol
-        for k, v in self.cfg.items('eol'):
-            ui.setconfig('eol', k, v, 'eol')
+        for k, v in self.cfg.items(b'eol'):
+            ui.setconfig(b'eol', k, v, b'eol')
 
     def checkrev(self, repo, ctx, files):
         failed = []
-        for f in (files or ctx.files()):
+        for f in files or ctx.files():
             if f not in ctx:
                 continue
             for pattern, key, m in self.patterns:
@@ -224,12 +250,17 @@
                     continue
                 target = self._encode[key]
                 data = ctx[f].data()
-                if (target == "to-lf" and "\r\n" in data
-                    or target == "to-crlf" and singlelf.search(data)):
+                if (
+                    target == b"to-lf"
+                    and b"\r\n" in data
+                    or target == b"to-crlf"
+                    and singlelf.search(data)
+                ):
                     failed.append((f, target, bytes(ctx)))
                 break
         return failed
 
+
 def parseeol(ui, repo, nodes):
     try:
         for node in nodes:
@@ -237,17 +268,23 @@
                 if node is None:
                     # Cannot use workingctx.data() since it would load
                     # and cache the filters before we configure them.
-                    data = repo.wvfs('.hgeol').read()
+                    data = repo.wvfs(b'.hgeol').read()
                 else:
-                    data = repo[node]['.hgeol'].data()
+                    data = repo[node][b'.hgeol'].data()
                 return eolfile(ui, repo.root, data)
             except (IOError, LookupError):
                 pass
     except errormod.ParseError as inst:
-        ui.warn(_("warning: ignoring .hgeol file due to parse error "
-                  "at %s: %s\n") % (inst.args[1], inst.args[0]))
+        ui.warn(
+            _(
+                b"warning: ignoring .hgeol file due to parse error "
+                b"at %s: %s\n"
+            )
+            % (inst.args[1], inst.args[0])
+        )
     return None
 
+
 def ensureenabled(ui):
     """make sure the extension is enabled when used as hook
 
@@ -256,10 +293,11 @@
     never loaded. This function ensure the extension is enabled when running
     hooks.
     """
-    if 'eol' in ui._knownconfig:
+    if b'eol' in ui._knownconfig:
         return
-    ui.setconfig('extensions', 'eol', '', source='internal')
-    extensions.loadall(ui, ['eol'])
+    ui.setconfig(b'extensions', b'eol', b'', source=b'internal')
+    extensions.loadall(ui, [b'eol'])
+
 
 def _checkhook(ui, repo, node, headsonly):
     # Get revisions to check and touched files at the same time
@@ -281,37 +319,51 @@
             failed.extend(eol.checkrev(repo, ctx, files))
 
     if failed:
-        eols = {'to-lf': 'CRLF', 'to-crlf': 'LF'}
+        eols = {b'to-lf': b'CRLF', b'to-crlf': b'LF'}
         msgs = []
         for f, target, node in sorted(failed):
-            msgs.append(_("  %s in %s should not have %s line endings") %
-                        (f, node, eols[target]))
-        raise errormod.Abort(_("end-of-line check failed:\n") + "\n".join(msgs))
+            msgs.append(
+                _(b"  %s in %s should not have %s line endings")
+                % (f, node, eols[target])
+            )
+        raise errormod.Abort(
+            _(b"end-of-line check failed:\n") + b"\n".join(msgs)
+        )
+
 
 def checkallhook(ui, repo, node, hooktype, **kwargs):
     """verify that files have expected EOLs"""
     _checkhook(ui, repo, node, False)
 
+
 def checkheadshook(ui, repo, node, hooktype, **kwargs):
     """verify that files have expected EOLs"""
     _checkhook(ui, repo, node, True)
 
+
 # "checkheadshook" used to be called "hook"
 hook = checkheadshook
 
+
 def preupdate(ui, repo, hooktype, parent1, parent2):
     p1node = scmutil.resolvehexnodeidprefix(repo, parent1)
     repo.loadeol([p1node])
     return False
 
+
 def uisetup(ui):
-    ui.setconfig('hooks', 'preupdate.eol', preupdate, 'eol')
+    ui.setconfig(b'hooks', b'preupdate.eol', preupdate, b'eol')
+
 
 def extsetup(ui):
     try:
-        extensions.find('win32text')
-        ui.warn(_("the eol extension is incompatible with the "
-                  "win32text extension\n"))
+        extensions.find(b'win32text')
+        ui.warn(
+            _(
+                b"the eol extension is incompatible with the "
+                b"win32text extension\n"
+            )
+        )
     except KeyError:
         pass
 
@@ -321,13 +373,12 @@
 
     if not repo.local():
         return
-    for name, fn in filters.iteritems():
+    for name, fn in pycompat.iteritems(filters):
         repo.adddatafilter(name, fn)
 
-    ui.setconfig('patch', 'eol', 'auto', 'eol')
+    ui.setconfig(b'patch', b'eol', b'auto', b'eol')
 
     class eolrepo(repo.__class__):
-
         def loadeol(self, nodes):
             eol = parseeol(self.ui, self, nodes)
             if eol is None:
@@ -336,37 +387,37 @@
             return eol.match
 
         def _hgcleardirstate(self):
-            self._eolmatch = self.loadeol([None, 'tip'])
+            self._eolmatch = self.loadeol([None])
             if not self._eolmatch:
                 self._eolmatch = util.never
                 return
 
             oldeol = None
             try:
-                cachemtime = os.path.getmtime(self.vfs.join("eol.cache"))
+                cachemtime = os.path.getmtime(self.vfs.join(b"eol.cache"))
             except OSError:
                 cachemtime = 0
             else:
-                olddata = self.vfs.read("eol.cache")
+                olddata = self.vfs.read(b"eol.cache")
                 if olddata:
                     oldeol = eolfile(self.ui, self.root, olddata)
 
             try:
-                eolmtime = os.path.getmtime(self.wjoin(".hgeol"))
+                eolmtime = os.path.getmtime(self.wjoin(b".hgeol"))
             except OSError:
                 eolmtime = 0
 
-            if eolmtime > cachemtime:
-                self.ui.debug("eol: detected change in .hgeol\n")
+            if eolmtime >= cachemtime and eolmtime > 0:
+                self.ui.debug(b"eol: detected change in .hgeol\n")
 
-                hgeoldata = self.wvfs.read('.hgeol')
+                hgeoldata = self.wvfs.read(b'.hgeol')
                 neweol = eolfile(self.ui, self.root, hgeoldata)
 
                 wlock = None
                 try:
                     wlock = self.wlock()
                     for f in self.dirstate:
-                        if self.dirstate[f] != 'n':
+                        if self.dirstate[f] != b'n':
                             continue
                         if oldeol is not None:
                             if not oldeol.match(f) and not neweol.match(f):
@@ -387,7 +438,7 @@
                         # the new .hgeol file specify a different filter
                         self.dirstate.normallookup(f)
                     # Write the cache to update mtime and cache .hgeol
-                    with self.vfs("eol.cache", "w") as f:
+                    with self.vfs(b"eol.cache", b"w") as f:
                         f.write(hgeoldata)
                 except errormod.LockUnavailable:
                     # If we cannot lock the repository and clear the
@@ -414,8 +465,10 @@
                     # have all non-binary files taken care of.
                     continue
                 if inconsistenteol(data):
-                    raise errormod.Abort(_("inconsistent newline style "
-                                           "in %s\n") % f)
+                    raise errormod.Abort(
+                        _(b"inconsistent newline style in %s\n") % f
+                    )
             return super(eolrepo, self).commitctx(ctx, error, origctx)
+
     repo.__class__ = eolrepo
     repo._hgcleardirstate()
--- a/hgext/extdiff.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/extdiff.py	Mon Oct 21 11:09:48 2019 -0400
@@ -117,60 +117,68 @@
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('extdiff', br'opts\..*',
-    default='',
-    generic=True,
+configitem(
+    b'extdiff', br'opts\..*', default=b'', generic=True,
 )
 
-configitem('extdiff', br'gui\..*',
-    generic=True,
+configitem(
+    b'extdiff', br'gui\..*', generic=True,
 )
 
-configitem('diff-tools', br'.*\.diffargs$',
-    default=None,
-    generic=True,
+configitem(
+    b'diff-tools', br'.*\.diffargs$', default=None, generic=True,
 )
 
-configitem('diff-tools', br'.*\.gui$',
-    generic=True,
+configitem(
+    b'diff-tools', br'.*\.gui$', generic=True,
 )
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
 def snapshot(ui, repo, files, node, tmproot, listsubrepos):
     '''snapshot files as of some revision
     if not using snapshot, -I/-X does not work and recursive diff
     in tools like kdiff3 and meld displays too many files.'''
     dirname = os.path.basename(repo.root)
-    if dirname == "":
-        dirname = "root"
+    if dirname == b"":
+        dirname = b"root"
     if node is not None:
-        dirname = '%s.%s' % (dirname, short(node))
+        dirname = b'%s.%s' % (dirname, short(node))
     base = os.path.join(tmproot, dirname)
     os.mkdir(base)
     fnsandstat = []
 
     if node is not None:
-        ui.note(_('making snapshot of %d files from rev %s\n') %
-                (len(files), short(node)))
+        ui.note(
+            _(b'making snapshot of %d files from rev %s\n')
+            % (len(files), short(node))
+        )
     else:
-        ui.note(_('making snapshot of %d files from working directory\n') %
-            (len(files)))
+        ui.note(
+            _(b'making snapshot of %d files from working directory\n')
+            % (len(files))
+        )
 
     if files:
-        repo.ui.setconfig("ui", "archivemeta", False)
+        repo.ui.setconfig(b"ui", b"archivemeta", False)
 
-        archival.archive(repo, base, node, 'files',
-                         match=scmutil.matchfiles(repo, files),
-                         subrepos=listsubrepos)
+        archival.archive(
+            repo,
+            base,
+            node,
+            b'files',
+            match=scmutil.matchfiles(repo, files),
+            subrepos=listsubrepos,
+        )
 
         for fn in sorted(files):
             wfn = util.pconvert(fn)
-            ui.note('  %s\n' % wfn)
+            ui.note(b'  %s\n' % wfn)
 
             if node is None:
                 dest = os.path.join(base, wfn)
@@ -178,45 +186,82 @@
                 fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest)))
     return dirname, fnsandstat
 
-def formatcmdline(cmdline, repo_root, do3way,
-                  parent1, plabel1, parent2, plabel2, child, clabel):
+
+def formatcmdline(
+    cmdline,
+    repo_root,
+    do3way,
+    parent1,
+    plabel1,
+    parent2,
+    plabel2,
+    child,
+    clabel,
+):
     # Function to quote file/dir names in the argument string.
     # When not operating in 3-way mode, an empty string is
     # returned for parent2
-    replace = {'parent': parent1, 'parent1': parent1, 'parent2': parent2,
-               'plabel1': plabel1, 'plabel2': plabel2,
-               'child': child, 'clabel': clabel,
-               'root': repo_root}
+    replace = {
+        b'parent': parent1,
+        b'parent1': parent1,
+        b'parent2': parent2,
+        b'plabel1': plabel1,
+        b'plabel2': plabel2,
+        b'child': child,
+        b'clabel': clabel,
+        b'root': repo_root,
+    }
+
     def quote(match):
         pre = match.group(2)
         key = match.group(3)
-        if not do3way and key == 'parent2':
+        if not do3way and key == b'parent2':
             return pre
         return pre + procutil.shellquote(replace[key])
 
     # Match parent2 first, so 'parent1?' will match both parent1 and parent
-    regex = (br'''(['"]?)([^\s'"$]*)'''
-             br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
+    regex = (
+        br'''(['"]?)([^\s'"$]*)'''
+        br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1'
+    )
     if not do3way and not re.search(regex, cmdline):
-        cmdline += ' $parent1 $child'
+        cmdline += b' $parent1 $child'
     return re.sub(regex, quote, cmdline)
 
+
 def _systembackground(cmd, environ=None, cwd=None):
     ''' like 'procutil.system', but returns the Popen object directly
         so we don't have to wait on it.
     '''
     cmd = procutil.quotecommand(cmd)
     env = procutil.shellenviron(environ)
-    proc = subprocess.Popen(procutil.tonativestr(cmd),
-                            shell=True, close_fds=procutil.closefds,
-                            env=procutil.tonativeenv(env),
-                            cwd=pycompat.rapply(procutil.tonativestr, cwd))
+    proc = subprocess.Popen(
+        procutil.tonativestr(cmd),
+        shell=True,
+        close_fds=procutil.closefds,
+        env=procutil.tonativeenv(env),
+        cwd=pycompat.rapply(procutil.tonativestr, cwd),
+    )
     return proc
 
-def _runperfilediff(cmdline, repo_root, ui, guitool, do3way, confirm,
-                    commonfiles, tmproot, dir1a, dir1b,
-                    dir2root, dir2,
-                    rev1a, rev1b, rev2):
+
+def _runperfilediff(
+    cmdline,
+    repo_root,
+    ui,
+    guitool,
+    do3way,
+    confirm,
+    commonfiles,
+    tmproot,
+    dir1a,
+    dir1b,
+    dir2root,
+    dir2,
+    rev1a,
+    rev1b,
+    rev2,
+):
     # Note that we need to sort the list of files because it was
     # built in an "unstable" way and it's annoying to get files in a
     # random order, especially when "confirm" mode is enabled.
@@ -228,8 +273,8 @@
         if not os.path.isfile(path1a):
             path1a = os.devnull
 
-        path1b = ''
-        label1b = ''
+        path1b = b''
+        label1b = b''
         if do3way:
             path1b = os.path.join(tmproot, dir1b, commonfile)
             label1b = commonfile + rev1b
@@ -241,31 +286,42 @@
 
         if confirm:
             # Prompt before showing this diff
-            difffiles = _('diff %s (%d of %d)') % (commonfile, idx + 1,
-                                                   totalfiles)
-            responses = _('[Yns?]'
-                          '$$ &Yes, show diff'
-                          '$$ &No, skip this diff'
-                          '$$ &Skip remaining diffs'
-                          '$$ &? (display help)')
-            r = ui.promptchoice('%s %s' % (difffiles, responses))
-            if r == 3: # ?
+            difffiles = _(b'diff %s (%d of %d)') % (
+                commonfile,
+                idx + 1,
+                totalfiles,
+            )
+            responses = _(
+                b'[Yns?]'
+                b'$$ &Yes, show diff'
+                b'$$ &No, skip this diff'
+                b'$$ &Skip remaining diffs'
+                b'$$ &? (display help)'
+            )
+            r = ui.promptchoice(b'%s %s' % (difffiles, responses))
+            if r == 3:  # ?
                 while r == 3:
                     for c, t in ui.extractchoices(responses)[1]:
-                        ui.write('%s - %s\n' % (c, encoding.lower(t)))
-                    r = ui.promptchoice('%s %s' % (difffiles, responses))
-            if r == 0: # yes
+                        ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
+                    r = ui.promptchoice(b'%s %s' % (difffiles, responses))
+            if r == 0:  # yes
                 pass
-            elif r == 1: # no
+            elif r == 1:  # no
                 continue
-            elif r == 2: # skip
+            elif r == 2:  # skip
                 break
 
         curcmdline = formatcmdline(
-            cmdline, repo_root, do3way=do3way,
-            parent1=path1a, plabel1=label1a,
-            parent2=path1b, plabel2=label1b,
-            child=path2, clabel=label2)
+            cmdline,
+            repo_root,
+            do3way=do3way,
+            parent1=path1a,
+            plabel1=label1a,
+            parent2=path1b,
+            plabel2=label1b,
+            child=path2,
+            clabel=label2,
+        )
 
         if confirm or not guitool:
             # Run the comparison program and wait for it to exit
@@ -274,23 +330,27 @@
             # from the user between each invocation, or because, as far
             # as we know, the tool doesn't have a GUI, in which case
             # we can't run multiple CLI programs at the same time.
-            ui.debug('running %r in %s\n' %
-                     (pycompat.bytestr(curcmdline), tmproot))
-            ui.system(curcmdline, cwd=tmproot, blockedtag='extdiff')
+            ui.debug(
+                b'running %r in %s\n' % (pycompat.bytestr(curcmdline), tmproot)
+            )
+            ui.system(curcmdline, cwd=tmproot, blockedtag=b'extdiff')
         else:
             # Run the comparison program but don't wait, as we're
             # going to rapid-fire each file diff and then wait on
             # the whole group.
-            ui.debug('running %r in %s (backgrounded)\n' %
-                     (pycompat.bytestr(curcmdline), tmproot))
+            ui.debug(
+                b'running %r in %s (backgrounded)\n'
+                % (pycompat.bytestr(curcmdline), tmproot)
+            )
             proc = _systembackground(curcmdline, cwd=tmproot)
             waitprocs.append(proc)
 
     if waitprocs:
-        with ui.timeblockedsection('extdiff'):
+        with ui.timeblockedsection(b'extdiff'):
             for proc in waitprocs:
                 proc.wait()
 
+
 def dodiff(ui, repo, cmdline, pats, opts, guitool=False):
     '''Do the actual diff:
 
@@ -300,12 +360,12 @@
     - just invoke the diff for a single file in the working dir
     '''
 
-    revs = opts.get('rev')
-    change = opts.get('change')
-    do3way = '$parent2' in cmdline
+    revs = opts.get(b'rev')
+    change = opts.get(b'change')
+    do3way = b'$parent2' in cmdline
 
     if revs and change:
-        msg = _('cannot specify --rev and --change at the same time')
+        msg = _(b'cannot specify --rev and --change at the same time')
         raise error.Abort(msg)
     elif change:
         ctx2 = scmutil.revsingle(repo, change, None)
@@ -317,8 +377,8 @@
         else:
             ctx1b = repo[nullid]
 
-    perfile = opts.get('per_file')
-    confirm = opts.get('confirm')
+    perfile = opts.get(b'per_file')
+    confirm = opts.get(b'confirm')
 
     node1a = ctx1a.node()
     node1b = ctx1b.node()
@@ -329,24 +389,26 @@
         if node1b == nullid:
             do3way = False
 
-    subrepos=opts.get('subrepos')
+    subrepos = opts.get(b'subrepos')
 
     matcher = scmutil.match(repo[node2], pats, opts)
 
-    if opts.get('patch'):
+    if opts.get(b'patch'):
         if subrepos:
-            raise error.Abort(_('--patch cannot be used with --subrepos'))
+            raise error.Abort(_(b'--patch cannot be used with --subrepos'))
         if perfile:
-            raise error.Abort(_('--patch cannot be used with --per-file'))
+            raise error.Abort(_(b'--patch cannot be used with --per-file'))
         if node2 is None:
-            raise error.Abort(_('--patch requires two revisions'))
+            raise error.Abort(_(b'--patch requires two revisions'))
     else:
-        mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher,
-                                                   listsubrepos=subrepos)[:3])
+        mod_a, add_a, rem_a = map(
+            set, repo.status(node1a, node2, matcher, listsubrepos=subrepos)[:3]
+        )
         if do3way:
-            mod_b, add_b, rem_b = map(set,
-                                      repo.status(node1b, node2, matcher,
-                                                  listsubrepos=subrepos)[:3])
+            mod_b, add_b, rem_b = map(
+                set,
+                repo.status(node1b, node2, matcher, listsubrepos=subrepos)[:3],
+            )
         else:
             mod_b, add_b, rem_b = set(), set(), set()
         modadd = mod_a | add_a | mod_b | add_b
@@ -354,41 +416,44 @@
         if not common:
             return 0
 
-    tmproot = pycompat.mkdtemp(prefix='extdiff.')
+    tmproot = pycompat.mkdtemp(prefix=b'extdiff.')
     try:
-        if not opts.get('patch'):
+        if not opts.get(b'patch'):
             # Always make a copy of node1a (and node1b, if applicable)
             dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
-            dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot,
-                             subrepos)[0]
-            rev1a = '@%d' % repo[node1a].rev()
+            dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot, subrepos)[
+                0
+            ]
+            rev1a = b'@%d' % repo[node1a].rev()
             if do3way:
                 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
-                dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot,
-                                 subrepos)[0]
-                rev1b = '@%d' % repo[node1b].rev()
+                dir1b = snapshot(
+                    ui, repo, dir1b_files, node1b, tmproot, subrepos
+                )[0]
+                rev1b = b'@%d' % repo[node1b].rev()
             else:
                 dir1b = None
-                rev1b = ''
+                rev1b = b''
 
             fnsandstat = []
 
             # If node2 in not the wc or there is >1 change, copy it
-            dir2root = ''
-            rev2 = ''
+            dir2root = b''
+            rev2 = b''
             if node2:
                 dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0]
-                rev2 = '@%d' % repo[node2].rev()
+                rev2 = b'@%d' % repo[node2].rev()
             elif len(common) > 1:
-                #we only actually need to get the files to copy back to
-                #the working dir in this case (because the other cases
-                #are: diffing 2 revisions or single file -- in which case
-                #the file is already directly passed to the diff tool).
-                dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot,
-                                            subrepos)
+                # we only actually need to get the files to copy back to
+                # the working dir in this case (because the other cases
+                # are: diffing 2 revisions or single file -- in which case
+                # the file is already directly passed to the diff tool).
+                dir2, fnsandstat = snapshot(
+                    ui, repo, modadd, None, tmproot, subrepos
+                )
             else:
                 # This lets the diff tool open the changed file directly
-                dir2 = ''
+                dir2 = b''
                 dir2root = repo.root
 
             label1a = rev1a
@@ -411,12 +476,15 @@
                 dir2 = os.path.join(dir2root, dir2, common_file)
                 label2 = common_file + rev2
         else:
-            template = 'hg-%h.patch'
-            with formatter.nullformatter(ui, 'extdiff', {}) as fm:
-                cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()],
-                               fm,
-                               fntemplate=repo.vfs.reljoin(tmproot, template),
-                               match=matcher)
+            template = b'hg-%h.patch'
+            with formatter.nullformatter(ui, b'extdiff', {}) as fm:
+                cmdutil.export(
+                    repo,
+                    [repo[node1a].rev(), repo[node2].rev()],
+                    fm,
+                    fntemplate=repo.vfs.reljoin(tmproot, template),
+                    match=matcher,
+                )
             label1a = cmdutil.makefilename(repo[node1a], template)
             label2 = cmdutil.makefilename(repo[node2], template)
             dir1a = repo.vfs.reljoin(tmproot, label1a)
@@ -428,21 +496,39 @@
         if not perfile:
             # Run the external tool on the 2 temp directories or the patches
             cmdline = formatcmdline(
-                cmdline, repo.root, do3way=do3way,
-                parent1=dir1a, plabel1=label1a,
-                parent2=dir1b, plabel2=label1b,
-                child=dir2, clabel=label2)
-            ui.debug('running %r in %s\n' % (pycompat.bytestr(cmdline),
-                                             tmproot))
-            ui.system(cmdline, cwd=tmproot, blockedtag='extdiff')
+                cmdline,
+                repo.root,
+                do3way=do3way,
+                parent1=dir1a,
+                plabel1=label1a,
+                parent2=dir1b,
+                plabel2=label1b,
+                child=dir2,
+                clabel=label2,
+            )
+            ui.debug(
+                b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot)
+            )
+            ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff')
         else:
             # Run the external tool once for each pair of files
             _runperfilediff(
-                cmdline, repo.root, ui, guitool=guitool,
-                do3way=do3way, confirm=confirm,
-                commonfiles=common, tmproot=tmproot, dir1a=dir1a, dir1b=dir1b,
-                dir2root=dir2root, dir2=dir2,
-                rev1a=rev1a, rev1b=rev1b, rev2=rev2)
+                cmdline,
+                repo.root,
+                ui,
+                guitool=guitool,
+                do3way=do3way,
+                confirm=confirm,
+                commonfiles=common,
+                tmproot=tmproot,
+                dir1a=dir1a,
+                dir1b=dir1b,
+                dir2root=dir2root,
+                dir2=dir2,
+                rev1a=rev1a,
+                rev1b=rev1b,
+                rev2=rev2,
+            )
 
         for copy_fn, working_fn, st in fnsandstat:
             cpstat = os.lstat(copy_fn)
@@ -453,36 +539,61 @@
             # copyfile() carries over the permission, so the mode check could
             # be in an 'elif' branch, but for the case where the file has
             # changed without affecting mtime or size.
-            if (cpstat[stat.ST_MTIME] != st[stat.ST_MTIME]
+            if (
+                cpstat[stat.ST_MTIME] != st[stat.ST_MTIME]
                 or cpstat.st_size != st.st_size
-                or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)):
-                ui.debug('file changed while diffing. '
-                         'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
+                or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)
+            ):
+                ui.debug(
+                    b'file changed while diffing. '
+                    b'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn)
+                )
                 util.copyfile(copy_fn, working_fn)
 
         return 1
     finally:
-        ui.note(_('cleaning up temp directory\n'))
+        ui.note(_(b'cleaning up temp directory\n'))
         shutil.rmtree(tmproot)
 
-extdiffopts = [
-    ('o', 'option', [],
-     _('pass option to comparison program'), _('OPT')),
-    ('r', 'rev', [], _('revision'), _('REV')),
-    ('c', 'change', '', _('change made by revision'), _('REV')),
-    ('', 'per-file', False,
-     _('compare each file instead of revision snapshots')),
-    ('', 'confirm', False,
-     _('prompt user before each external program invocation')),
-    ('', 'patch', None, _('compare patches for two revisions'))
-    ] + cmdutil.walkopts + cmdutil.subrepoopts
 
-@command('extdiff',
-    [('p', 'program', '', _('comparison program to run'), _('CMD')),
-     ] + extdiffopts,
-    _('hg extdiff [OPT]... [FILE]...'),
+extdiffopts = (
+    [
+        (
+            b'o',
+            b'option',
+            [],
+            _(b'pass option to comparison program'),
+            _(b'OPT'),
+        ),
+        (b'r', b'rev', [], _(b'revision'), _(b'REV')),
+        (b'c', b'change', b'', _(b'change made by revision'), _(b'REV')),
+        (
+            b'',
+            b'per-file',
+            False,
+            _(b'compare each file instead of revision snapshots'),
+        ),
+        (
+            b'',
+            b'confirm',
+            False,
+            _(b'prompt user before each external program invocation'),
+        ),
+        (b'', b'patch', None, _(b'compare patches for two revisions')),
+    ]
+    + cmdutil.walkopts
+    + cmdutil.subrepoopts
+)
+
+
+@command(
+    b'extdiff',
+    [(b'p', b'program', b'', _(b'comparison program to run'), _(b'CMD')),]
+    + extdiffopts,
+    _(b'hg extdiff [OPT]... [FILE]...'),
     helpcategory=command.CATEGORY_FILE_CONTENTS,
-    inferrepo=True)
+    inferrepo=True,
+)
 def extdiff(ui, repo, *pats, **opts):
     '''use external program to diff repository (or selected files)
 
@@ -515,14 +626,15 @@
     the external program. It is ignored if --per-file isn't specified.
     '''
     opts = pycompat.byteskwargs(opts)
-    program = opts.get('program')
-    option = opts.get('option')
+    program = opts.get(b'program')
+    option = opts.get(b'option')
     if not program:
-        program = 'diff'
-        option = option or ['-Npru']
-    cmdline = ' '.join(map(procutil.shellquote, [program] + option))
+        program = b'diff'
+        option = option or [b'-Npru']
+    cmdline = b' '.join(map(procutil.shellquote, [program] + option))
     return dodiff(ui, repo, cmdline, pats, opts)
 
+
 class savedcmd(object):
     """use external program to diff repository (or selected files)
 
@@ -549,27 +661,29 @@
 
     def __call__(self, ui, repo, *pats, **opts):
         opts = pycompat.byteskwargs(opts)
-        options = ' '.join(map(procutil.shellquote, opts['option']))
+        options = b' '.join(map(procutil.shellquote, opts[b'option']))
         if options:
-            options = ' ' + options
-        return dodiff(ui, repo, self._cmdline + options, pats, opts,
-                      guitool=self._isgui)
+            options = b' ' + options
+        return dodiff(
+            ui, repo, self._cmdline + options, pats, opts, guitool=self._isgui
+        )
+
 
 def uisetup(ui):
-    for cmd, path in ui.configitems('extdiff'):
+    for cmd, path in ui.configitems(b'extdiff'):
         path = util.expandpath(path)
-        if cmd.startswith('cmd.'):
+        if cmd.startswith(b'cmd.'):
             cmd = cmd[4:]
             if not path:
                 path = procutil.findexe(cmd)
                 if path is None:
                     path = filemerge.findexternaltool(ui, cmd) or cmd
-            diffopts = ui.config('extdiff', 'opts.' + cmd)
+            diffopts = ui.config(b'extdiff', b'opts.' + cmd)
             cmdline = procutil.shellquote(path)
             if diffopts:
-                cmdline += ' ' + diffopts
-            isgui = ui.configbool('extdiff', 'gui.' + cmd)
-        elif cmd.startswith('opts.') or cmd.startswith('gui.'):
+                cmdline += b' ' + diffopts
+            isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
+        elif cmd.startswith(b'opts.') or cmd.startswith(b'gui.'):
             continue
         else:
             if path:
@@ -583,20 +697,25 @@
                     path = filemerge.findexternaltool(ui, cmd) or cmd
                 cmdline = procutil.shellquote(path)
                 diffopts = False
-            isgui = ui.configbool('extdiff', 'gui.' + cmd)
+            isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
         # look for diff arguments in [diff-tools] then [merge-tools]
         if not diffopts:
-            key = cmd + '.diffargs'
-            for section in ('diff-tools', 'merge-tools'):
+            key = cmd + b'.diffargs'
+            for section in (b'diff-tools', b'merge-tools'):
                 args = ui.config(section, key)
                 if args:
-                    cmdline += ' ' + args
+                    cmdline += b' ' + args
                     if isgui is None:
-                        isgui = ui.configbool(section, cmd + '.gui') or False
+                        isgui = ui.configbool(section, cmd + b'.gui') or False
                     break
-        command(cmd, extdiffopts[:], _('hg %s [OPTION]... [FILE]...') % cmd,
-                helpcategory=command.CATEGORY_FILE_CONTENTS,
-                inferrepo=True)(savedcmd(path, cmdline, isgui))
+        command(
+            cmd,
+            extdiffopts[:],
+            _(b'hg %s [OPTION]... [FILE]...') % cmd,
+            helpcategory=command.CATEGORY_FILE_CONTENTS,
+            inferrepo=True,
+        )(savedcmd(path, cmdline, isgui))
+
 
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = [savedcmd]
--- a/hgext/factotum.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/factotum.py	Mon Oct 21 11:09:48 2019 -0400
@@ -49,9 +49,8 @@
 
 import os
 from mercurial.i18n import _
-from mercurial.utils import (
-    procutil,
-)
+from mercurial.pycompat import setattr
+from mercurial.utils import procutil
 from mercurial import (
     error,
     httpconnection,
@@ -70,53 +69,58 @@
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('factotum', 'executable',
-    default='/bin/auth/factotum',
+configitem(
+    b'factotum', b'executable', default=b'/bin/auth/factotum',
 )
-configitem('factotum', 'mountpoint',
-    default='/mnt/factotum',
+configitem(
+    b'factotum', b'mountpoint', default=b'/mnt/factotum',
 )
-configitem('factotum', 'service',
-    default='hg',
+configitem(
+    b'factotum', b'service', default=b'hg',
 )
 
+
 def auth_getkey(self, params):
     if not self.ui.interactive():
-        raise error.Abort(_('factotum not interactive'))
-    if 'user=' not in params:
-        params = '%s user?' % params
-    params = '%s !password?' % params
-    os.system(procutil.tonativestr("%s -g '%s'" % (_executable, params)))
+        raise error.Abort(_(b'factotum not interactive'))
+    if b'user=' not in params:
+        params = b'%s user?' % params
+    params = b'%s !password?' % params
+    os.system(procutil.tonativestr(b"%s -g '%s'" % (_executable, params)))
+
 
 def auth_getuserpasswd(self, getkey, params):
-    params = 'proto=pass %s' % params
+    params = b'proto=pass %s' % params
     while True:
-        fd = os.open('%s/rpc' % _mountpoint, os.O_RDWR)
+        fd = os.open(b'%s/rpc' % _mountpoint, os.O_RDWR)
         try:
-            os.write(fd, 'start %s' % params)
+            os.write(fd, b'start %s' % params)
             l = os.read(fd, ERRMAX).split()
-            if l[0] == 'ok':
-                os.write(fd, 'read')
+            if l[0] == b'ok':
+                os.write(fd, b'read')
                 status, user, passwd = os.read(fd, ERRMAX).split(None, 2)
-                if status == 'ok':
-                    if passwd.startswith("'"):
-                        if passwd.endswith("'"):
-                            passwd = passwd[1:-1].replace("''", "'")
+                if status == b'ok':
+                    if passwd.startswith(b"'"):
+                        if passwd.endswith(b"'"):
+                            passwd = passwd[1:-1].replace(b"''", b"'")
                         else:
-                            raise error.Abort(_('malformed password string'))
+                            raise error.Abort(_(b'malformed password string'))
                     return (user, passwd)
         except (OSError, IOError):
-            raise error.Abort(_('factotum not responding'))
+            raise error.Abort(_(b'factotum not responding'))
         finally:
             os.close(fd)
         getkey(self, params)
 
+
 def monkeypatch_method(cls):
     def decorator(func):
         setattr(cls, func.__name__, func)
         return func
+
     return decorator
 
+
 @monkeypatch_method(passwordmgr)
 def find_user_password(self, realm, authuri):
     user, passwd = self.passwddb.find_user_password(realm, authuri)
@@ -124,28 +128,29 @@
         self._writedebug(user, passwd)
         return (user, passwd)
 
-    prefix = ''
+    prefix = b''
     res = httpconnection.readauthforuri(self.ui, authuri, user)
     if res:
         _, auth = res
-        prefix = auth.get('prefix')
-        user, passwd = auth.get('username'), auth.get('password')
+        prefix = auth.get(b'prefix')
+        user, passwd = auth.get(b'username'), auth.get(b'password')
     if not user or not passwd:
         if not prefix:
-            prefix = realm.split(' ')[0].lower()
-        params = 'service=%s prefix=%s' % (_service, prefix)
+            prefix = realm.split(b' ')[0].lower()
+        params = b'service=%s prefix=%s' % (_service, prefix)
         if user:
-            params = '%s user=%s' % (params, user)
+            params = b'%s user=%s' % (params, user)
         user, passwd = auth_getuserpasswd(self, auth_getkey, params)
 
     self.add_password(realm, authuri, user, passwd)
     self._writedebug(user, passwd)
     return (user, passwd)
 
+
 def uisetup(ui):
     global _executable
-    _executable = ui.config('factotum', 'executable')
+    _executable = ui.config(b'factotum', b'executable')
     global _mountpoint
-    _mountpoint = ui.config('factotum', 'mountpoint')
+    _mountpoint = ui.config(b'factotum', b'mountpoint')
     global _service
-    _service = ui.config('factotum', 'service')
+    _service = ui.config(b'factotum', b'service')
--- a/hgext/fastannotate/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/fastannotate/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -62,13 +62,6 @@
     # the server. (default: 10)
     clientfetchthreshold = 10
 
-    # use flock instead of the file existence lock
-    # flock may not work well on some network filesystems, but they avoid
-    # creating and deleting files frequently, which is faster when updating
-    # the annotate cache in batch. if you have issues with this option, set it
-    # to False. (default: True if flock is supported, False otherwise)
-    useflock = True
-
     # for "fctx" mode, always follow renames regardless of command line option.
     # this is a BC with the original command but will reduced the space needed
     # for annotate cache, and is useful for client-server setup since the
@@ -100,8 +93,6 @@
 #
 # * rename the config knob for updating the local cache from a remote server
 #
-# * move `flock` based locking to a common area
-#
 # * revise wireprotocol for sharing annotate files
 #
 # * figure out a sensible default for `mainbranch` (with the caveat
@@ -114,7 +105,6 @@
 
 from mercurial.i18n import _
 from mercurial import (
-    configitems,
     error as hgerror,
     localrepo,
     registrar,
@@ -122,7 +112,6 @@
 
 from . import (
     commands,
-    context,
     protocol,
 )
 
@@ -130,64 +119,56 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 cmdtable = commands.cmdtable
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('fastannotate', 'modes', default=['fastannotate'])
-configitem('fastannotate', 'server', default=False)
-configitem('fastannotate', 'useflock', default=configitems.dynamicdefault)
-configitem('fastannotate', 'client', default=False)
-configitem('fastannotate', 'unfilteredrepo', default=True)
-configitem('fastannotate', 'defaultformat', default=['number'])
-configitem('fastannotate', 'perfhack', default=False)
-configitem('fastannotate', 'mainbranch')
-configitem('fastannotate', 'forcetext', default=True)
-configitem('fastannotate', 'forcefollow', default=True)
-configitem('fastannotate', 'clientfetchthreshold', default=10)
-configitem('fastannotate', 'serverbuildondemand', default=True)
-configitem('fastannotate', 'remotepath', default='default')
+configitem(b'fastannotate', b'modes', default=[b'fastannotate'])
+configitem(b'fastannotate', b'server', default=False)
+configitem(b'fastannotate', b'client', default=False)
+configitem(b'fastannotate', b'unfilteredrepo', default=True)
+configitem(b'fastannotate', b'defaultformat', default=[b'number'])
+configitem(b'fastannotate', b'perfhack', default=False)
+configitem(b'fastannotate', b'mainbranch')
+configitem(b'fastannotate', b'forcetext', default=True)
+configitem(b'fastannotate', b'forcefollow', default=True)
+configitem(b'fastannotate', b'clientfetchthreshold', default=10)
+configitem(b'fastannotate', b'serverbuildondemand', default=True)
+configitem(b'fastannotate', b'remotepath', default=b'default')
 
-def _flockavailable():
-    try:
-        import fcntl
-        fcntl.flock
-    except (AttributeError, ImportError):
-        return False
-    else:
-        return True
 
 def uisetup(ui):
-    modes = set(ui.configlist('fastannotate', 'modes'))
-    if 'fctx' in modes:
-        modes.discard('hgweb')
+    modes = set(ui.configlist(b'fastannotate', b'modes'))
+    if b'fctx' in modes:
+        modes.discard(b'hgweb')
     for name in modes:
-        if name == 'fastannotate':
+        if name == b'fastannotate':
             commands.registercommand()
-        elif name == 'hgweb':
+        elif name == b'hgweb':
             from . import support
+
             support.replacehgwebannotate()
-        elif name == 'fctx':
+        elif name == b'fctx':
             from . import support
+
             support.replacefctxannotate()
             commands.wrapdefault()
         else:
-            raise hgerror.Abort(_('fastannotate: invalid mode: %s') % name)
+            raise hgerror.Abort(_(b'fastannotate: invalid mode: %s') % name)
 
-    if ui.configbool('fastannotate', 'server'):
+    if ui.configbool(b'fastannotate', b'server'):
         protocol.serveruisetup(ui)
 
-    if ui.configbool('fastannotate', 'useflock', _flockavailable()):
-        context.pathhelper.lock = context.pathhelper._lockflock
 
 def extsetup(ui):
     # fastannotate has its own locking, without depending on repo lock
     # TODO: avoid mutating this unless the specific repo has it enabled
-    localrepo.localrepository._wlockfreeprefix.add('fastannotate/')
+    localrepo.localrepository._wlockfreeprefix.add(b'fastannotate/')
+
 
 def reposetup(ui, repo):
-    if ui.configbool('fastannotate', 'client'):
+    if ui.configbool(b'fastannotate', b'client'):
         protocol.clientreposetup(ui, repo)
--- a/hgext/fastannotate/commands.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/fastannotate/commands.py	Mon Oct 21 11:09:48 2019 -0400
@@ -31,9 +31,10 @@
 cmdtable = {}
 command = registrar.command(cmdtable)
 
+
 def _matchpaths(repo, rev, pats, opts, aopts=facontext.defaultopts):
     """generate paths matching given patterns"""
-    perfhack = repo.ui.configbool('fastannotate', 'perfhack')
+    perfhack = repo.ui.configbool(b'fastannotate', b'perfhack')
 
     # disable perfhack if:
     # a) any walkopt is used
@@ -43,18 +44,25 @@
         # cwd related to reporoot
         reporoot = os.path.dirname(repo.path)
         reldir = os.path.relpath(encoding.getcwd(), reporoot)
-        if reldir == '.':
-            reldir = ''
-        if any(opts.get(o[1]) for o in commands.walkopts): # a)
+        if reldir == b'.':
+            reldir = b''
+        if any(opts.get(o[1]) for o in commands.walkopts):  # a)
             perfhack = False
-        else: # b)
-            relpats = [os.path.relpath(p, reporoot) if os.path.isabs(p) else p
-                       for p in pats]
+        else:  # b)
+            relpats = [
+                os.path.relpath(p, reporoot) if os.path.isabs(p) else p
+                for p in pats
+            ]
             # disable perfhack on '..' since it allows escaping from the repo
-            if any(('..' in f or
-                    not os.path.isfile(
-                        facontext.pathhelper(repo, f, aopts).linelogpath))
-                   for f in relpats):
+            if any(
+                (
+                    b'..' in f
+                    or not os.path.isfile(
+                        facontext.pathhelper(repo, f, aopts).linelogpath
+                    )
+                )
+                for f in relpats
+            ):
                 perfhack = False
 
     # perfhack: emit paths directory without checking with manifest
@@ -63,36 +71,73 @@
         for p in relpats:
             yield os.path.join(reldir, p)
     else:
+
         def bad(x, y):
-            raise error.Abort("%s: %s" % (x, y))
+            raise error.Abort(b"%s: %s" % (x, y))
+
         ctx = scmutil.revsingle(repo, rev)
         m = scmutil.match(ctx, pats, opts, badfn=bad)
         for p in ctx.walk(m):
             yield p
 
+
 fastannotatecommandargs = {
     r'options': [
-        ('r', 'rev', '.', _('annotate the specified revision'), _('REV')),
-        ('u', 'user', None, _('list the author (long with -v)')),
-        ('f', 'file', None, _('list the filename')),
-        ('d', 'date', None, _('list the date (short with -q)')),
-        ('n', 'number', None, _('list the revision number (default)')),
-        ('c', 'changeset', None, _('list the changeset')),
-        ('l', 'line-number', None, _('show line number at the first '
-                                     'appearance')),
-        ('e', 'deleted', None, _('show deleted lines (slow) (EXPERIMENTAL)')),
-        ('', 'no-content', None, _('do not show file content (EXPERIMENTAL)')),
-        ('', 'no-follow', None, _("don't follow copies and renames")),
-        ('', 'linear', None, _('enforce linear history, ignore second parent '
-                               'of merges (EXPERIMENTAL)')),
-        ('', 'long-hash', None, _('show long changeset hash (EXPERIMENTAL)')),
-        ('', 'rebuild', None, _('rebuild cache even if it exists '
-                                '(EXPERIMENTAL)')),
-    ] + commands.diffwsopts + commands.walkopts + commands.formatteropts,
-    r'synopsis': _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
+        (b'r', b'rev', b'.', _(b'annotate the specified revision'), _(b'REV')),
+        (b'u', b'user', None, _(b'list the author (long with -v)')),
+        (b'f', b'file', None, _(b'list the filename')),
+        (b'd', b'date', None, _(b'list the date (short with -q)')),
+        (b'n', b'number', None, _(b'list the revision number (default)')),
+        (b'c', b'changeset', None, _(b'list the changeset')),
+        (
+            b'l',
+            b'line-number',
+            None,
+            _(b'show line number at the first appearance'),
+        ),
+        (
+            b'e',
+            b'deleted',
+            None,
+            _(b'show deleted lines (slow) (EXPERIMENTAL)'),
+        ),
+        (
+            b'',
+            b'no-content',
+            None,
+            _(b'do not show file content (EXPERIMENTAL)'),
+        ),
+        (b'', b'no-follow', None, _(b"don't follow copies and renames")),
+        (
+            b'',
+            b'linear',
+            None,
+            _(
+                b'enforce linear history, ignore second parent '
+                b'of merges (EXPERIMENTAL)'
+            ),
+        ),
+        (
+            b'',
+            b'long-hash',
+            None,
+            _(b'show long changeset hash (EXPERIMENTAL)'),
+        ),
+        (
+            b'',
+            b'rebuild',
+            None,
+            _(b'rebuild cache even if it exists (EXPERIMENTAL)'),
+        ),
+    ]
+    + commands.diffwsopts
+    + commands.walkopts
+    + commands.formatteropts,
+    r'synopsis': _(b'[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
     r'inferrepo': True,
 }
 
+
 def fastannotate(ui, repo, *pats, **opts):
     """show changeset information by line for each file
 
@@ -125,43 +170,49 @@
         affecting results are used.
     """
     if not pats:
-        raise error.Abort(_('at least one filename or pattern is required'))
+        raise error.Abort(_(b'at least one filename or pattern is required'))
 
     # performance hack: filtered repo can be slow. unfilter by default.
-    if ui.configbool('fastannotate', 'unfilteredrepo'):
+    if ui.configbool(b'fastannotate', b'unfilteredrepo'):
         repo = repo.unfiltered()
 
     opts = pycompat.byteskwargs(opts)
 
-    rev = opts.get('rev', '.')
-    rebuild = opts.get('rebuild', False)
+    rev = opts.get(b'rev', b'.')
+    rebuild = opts.get(b'rebuild', False)
 
-    diffopts = patch.difffeatureopts(ui, opts, section='annotate',
-                                     whitespace=True)
+    diffopts = patch.difffeatureopts(
+        ui, opts, section=b'annotate', whitespace=True
+    )
     aopts = facontext.annotateopts(
         diffopts=diffopts,
-        followmerge=not opts.get('linear', False),
-        followrename=not opts.get('no_follow', False))
+        followmerge=not opts.get(b'linear', False),
+        followrename=not opts.get(b'no_follow', False),
+    )
 
-    if not any(opts.get(s)
-               for s in ['user', 'date', 'file', 'number', 'changeset']):
+    if not any(
+        opts.get(s)
+        for s in [b'user', b'date', b'file', b'number', b'changeset']
+    ):
         # default 'number' for compatibility. but fastannotate is more
         # efficient with "changeset", "line-number" and "no-content".
-        for name in ui.configlist('fastannotate', 'defaultformat', ['number']):
+        for name in ui.configlist(
+            b'fastannotate', b'defaultformat', [b'number']
+        ):
             opts[name] = True
 
-    ui.pager('fastannotate')
-    template = opts.get('template')
-    if template == 'json':
+    ui.pager(b'fastannotate')
+    template = opts.get(b'template')
+    if template == b'json':
         formatter = faformatter.jsonformatter(ui, repo, opts)
     else:
         formatter = faformatter.defaultformatter(ui, repo, opts)
-    showdeleted = opts.get('deleted', False)
-    showlines = not bool(opts.get('no_content'))
-    showpath = opts.get('file', False)
+    showdeleted = opts.get(b'deleted', False)
+    showlines = not bool(opts.get(b'no_content'))
+    showpath = opts.get(b'file', False)
 
     # find the head of the main (master) branch
-    master = ui.config('fastannotate', 'mainbranch') or rev
+    master = ui.config(b'fastannotate', b'mainbranch') or rev
 
     # paths will be used for prefetching and the real annotating
     paths = list(_matchpaths(repo, rev, pats, opts, aopts))
@@ -175,20 +226,24 @@
         while True:
             try:
                 with facontext.annotatecontext(repo, path, aopts, rebuild) as a:
-                    result = a.annotate(rev, master=master, showpath=showpath,
-                                        showlines=(showlines and
-                                                   not showdeleted))
+                    result = a.annotate(
+                        rev,
+                        master=master,
+                        showpath=showpath,
+                        showlines=(showlines and not showdeleted),
+                    )
                     if showdeleted:
                         existinglines = set((l[0], l[1]) for l in result)
                         result = a.annotatealllines(
-                            rev, showpath=showpath, showlines=showlines)
+                            rev, showpath=showpath, showlines=showlines
+                        )
                 break
             except (faerror.CannotReuseError, faerror.CorruptedFileError):
                 # happens if master moves backwards, or the file was deleted
                 # and readded, or renamed to an existing name, or corrupted.
-                if rebuild: # give up since we have tried rebuild already
+                if rebuild:  # give up since we have tried rebuild already
                     raise
-                else: # try a second time rebuilding the cache (slow)
+                else:  # try a second time rebuilding the cache (slow)
                     rebuild = True
                     continue
 
@@ -198,18 +253,22 @@
         formatter.write(result, lines, existinglines=existinglines)
     formatter.end()
 
+
 _newopts = set()
-_knownopts = {opt[1].replace('-', '_') for opt in
-              (fastannotatecommandargs[r'options'] + commands.globalopts)}
+_knownopts = {
+    opt[1].replace(b'-', b'_')
+    for opt in (fastannotatecommandargs[r'options'] + commands.globalopts)
+}
+
 
 def _annotatewrapper(orig, ui, repo, *pats, **opts):
     """used by wrapdefault"""
     # we need this hack until the obsstore has 0.0 seconds perf impact
-    if ui.configbool('fastannotate', 'unfilteredrepo'):
+    if ui.configbool(b'fastannotate', b'unfilteredrepo'):
         repo = repo.unfiltered()
 
     # treat the file as text (skip the isbinary check)
-    if ui.configbool('fastannotate', 'forcetext'):
+    if ui.configbool(b'fastannotate', b'forcetext'):
         opts[r'text'] = True
 
     # check if we need to do prefetch (client-side)
@@ -220,19 +279,24 @@
 
     return orig(ui, repo, *pats, **opts)
 
+
 def registercommand():
     """register the fastannotate command"""
-    name = 'fastannotate|fastblame|fa'
+    name = b'fastannotate|fastblame|fa'
     command(name, helpbasic=True, **fastannotatecommandargs)(fastannotate)
 
+
 def wrapdefault():
     """wrap the default annotate command, to be aware of the protocol"""
-    extensions.wrapcommand(commands.table, 'annotate', _annotatewrapper)
+    extensions.wrapcommand(commands.table, b'annotate', _annotatewrapper)
+
 
-@command('debugbuildannotatecache',
-         [('r', 'rev', '', _('build up to the specific revision'), _('REV'))
-         ] + commands.walkopts,
-         _('[-r REV] FILE...'))
+@command(
+    b'debugbuildannotatecache',
+    [(b'r', b'rev', b'', _(b'build up to the specific revision'), _(b'REV'))]
+    + commands.walkopts,
+    _(b'[-r REV] FILE...'),
+)
 def debugbuildannotatecache(ui, repo, *pats, **opts):
     """incrementally build fastannotate cache up to REV for specified files
 
@@ -245,23 +309,25 @@
     options and lives in '.hg/fastannotate/default'.
     """
     opts = pycompat.byteskwargs(opts)
-    rev = opts.get('REV') or ui.config('fastannotate', 'mainbranch')
+    rev = opts.get(b'REV') or ui.config(b'fastannotate', b'mainbranch')
     if not rev:
-        raise error.Abort(_('you need to provide a revision'),
-                          hint=_('set fastannotate.mainbranch or use --rev'))
-    if ui.configbool('fastannotate', 'unfilteredrepo'):
+        raise error.Abort(
+            _(b'you need to provide a revision'),
+            hint=_(b'set fastannotate.mainbranch or use --rev'),
+        )
+    if ui.configbool(b'fastannotate', b'unfilteredrepo'):
         repo = repo.unfiltered()
     ctx = scmutil.revsingle(repo, rev)
     m = scmutil.match(ctx, pats, opts)
     paths = list(ctx.walk(m))
     if util.safehasattr(repo, 'prefetchfastannotate'):
         # client
-        if opts.get('REV'):
-            raise error.Abort(_('--rev cannot be used for client'))
+        if opts.get(b'REV'):
+            raise error.Abort(_(b'--rev cannot be used for client'))
         repo.prefetchfastannotate(paths)
     else:
         # server, or full repo
-        progress = ui.makeprogress(_('building'), total=len(paths))
+        progress = ui.makeprogress(_(b'building'), total=len(paths))
         for i, path in enumerate(paths):
             progress.update(i)
             with facontext.annotatecontext(repo, path) as actx:
@@ -272,14 +338,20 @@
                 except (faerror.CannotReuseError, faerror.CorruptedFileError):
                     # the cache is broken (could happen with renaming so the
                     # file history gets invalidated). rebuild and try again.
-                    ui.debug('fastannotate: %s: rebuilding broken cache\n'
-                             % path)
+                    ui.debug(
+                        b'fastannotate: %s: rebuilding broken cache\n' % path
+                    )
                     actx.rebuild()
                     try:
                         actx.annotate(rev, rev)
                     except Exception as ex:
                         # possibly a bug, but should not stop us from building
                         # cache for other files.
-                        ui.warn(_('fastannotate: %s: failed to '
-                                  'build cache: %r\n') % (path, ex))
+                        ui.warn(
+                            _(
+                                b'fastannotate: %s: failed to '
+                                b'build cache: %r\n'
+                            )
+                            % (path, ex)
+                        )
         progress.complete()
--- a/hgext/fastannotate/context.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/fastannotate/context.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,6 +13,11 @@
 import os
 
 from mercurial.i18n import _
+from mercurial.pycompat import (
+    getattr,
+    open,
+    setattr,
+)
 from mercurial import (
     error,
     linelog as linelogmod,
@@ -23,9 +28,7 @@
     scmutil,
     util,
 )
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.utils import stringutil
 
 from . import (
     error as faerror,
@@ -37,6 +40,7 @@
 def _getflog(repo, path):
     return repo.file(path)
 
+
 # extracted from mercurial.context.basefilectx.annotate
 def _parents(f, follow=True):
     # Cut _descendantrev here to mitigate the penalty of lazy linkrev
@@ -53,20 +57,22 @@
     # renamed filectx won't have a filelog yet, so set it
     # from the cache to save time
     for p in pl:
-        if not '_filelog' in p.__dict__:
+        if not b'_filelog' in p.__dict__:
             p._filelog = _getflog(f._repo, p.path())
 
     return pl
 
+
 # extracted from mercurial.context.basefilectx.annotate. slightly modified
 # so it takes a fctx instead of a pair of text and fctx.
 def _decorate(fctx):
     text = fctx.data()
-    linecount = text.count('\n')
-    if text and not text.endswith('\n'):
+    linecount = text.count(b'\n')
+    if text and not text.endswith(b'\n'):
         linecount += 1
     return ([(fctx, i) for i in pycompat.xrange(linecount)], text)
 
+
 # extracted from mercurial.context.basefilectx.annotate. slightly modified
 # so it takes an extra "blocks" parameter calculated elsewhere, instead of
 # calculating diff here.
@@ -74,14 +80,16 @@
     for (a1, a2, b1, b2), t in blocks:
         # Changed blocks ('!') or blocks made only of blank lines ('~')
         # belong to the child.
-        if t == '=':
+        if t == b'=':
             child[0][b1:b2] = parent[0][a1:a2]
     return child
 
+
 # like scmutil.revsingle, but with lru cache, so their states (like manifests)
 # could be reused
 _revsingle = util.lrucachefunc(scmutil.revsingle)
 
+
 def resolvefctx(repo, rev, path, resolverev=False, adjustctx=None):
     """(repo, str, str) -> fctx
 
@@ -116,7 +124,7 @@
             fctx = repo.filectx(path, changeid=ctx.rev())
     else:
         fctx = ctx[path]
-        if adjustctx == 'linkrev':
+        if adjustctx == b'linkrev':
             introrev = fctx.linkrev()
         else:
             introrev = fctx.introrev()
@@ -125,23 +133,27 @@
             fctx._changectx = repo[introrev]
     return fctx
 
+
 # like mercurial.store.encodedir, but use linelog suffixes: .m, .l, .lock
 def encodedir(path):
-    return (path
-            .replace('.hg/', '.hg.hg/')
-            .replace('.l/', '.l.hg/')
-            .replace('.m/', '.m.hg/')
-            .replace('.lock/', '.lock.hg/'))
+    return (
+        path.replace(b'.hg/', b'.hg.hg/')
+        .replace(b'.l/', b'.l.hg/')
+        .replace(b'.m/', b'.m.hg/')
+        .replace(b'.lock/', b'.lock.hg/')
+    )
+
 
 def hashdiffopts(diffopts):
-    diffoptstr = stringutil.pprint(sorted(
-        (k, getattr(diffopts, k))
-        for k in mdiff.diffopts.defaults
-    ))
+    diffoptstr = stringutil.pprint(
+        sorted((k, getattr(diffopts, k)) for k in mdiff.diffopts.defaults)
+    )
     return node.hex(hashlib.sha1(diffoptstr).digest())[:6]
 
+
 _defaultdiffopthash = hashdiffopts(mdiff.defaultopts)
 
+
 class annotateopts(object):
     """like mercurial.mdiff.diffopts, but is for annotate
 
@@ -150,33 +162,35 @@
     """
 
     defaults = {
-        'diffopts': None,
-        'followrename': True,
-        'followmerge': True,
+        b'diffopts': None,
+        b'followrename': True,
+        b'followmerge': True,
     }
 
     def __init__(self, **opts):
         opts = pycompat.byteskwargs(opts)
-        for k, v in self.defaults.iteritems():
+        for k, v in pycompat.iteritems(self.defaults):
             setattr(self, k, opts.get(k, v))
 
     @util.propertycache
     def shortstr(self):
         """represent opts in a short string, suitable for a directory name"""
-        result = ''
+        result = b''
         if not self.followrename:
-            result += 'r0'
+            result += b'r0'
         if not self.followmerge:
-            result += 'm0'
+            result += b'm0'
         if self.diffopts is not None:
             assert isinstance(self.diffopts, mdiff.diffopts)
             diffopthash = hashdiffopts(self.diffopts)
             if diffopthash != _defaultdiffopthash:
-                result += 'i' + diffopthash
-        return result or 'default'
+                result += b'i' + diffopthash
+        return result or b'default'
+
 
 defaultopts = annotateopts()
 
+
 class _annotatecontext(object):
     """do not use this class directly as it does not use lock to protect
     writes. use "with annotatecontext(...)" instead.
@@ -191,13 +205,13 @@
         self.revmappath = revmappath
         self._linelog = None
         self._revmap = None
-        self._node2path = {} # {str: str}
+        self._node2path = {}  # {str: str}
 
     @property
     def linelog(self):
         if self._linelog is None:
             if os.path.exists(self.linelogpath):
-                with open(self.linelogpath, 'rb') as f:
+                with open(self.linelogpath, b'rb') as f:
                     try:
                         self._linelog = linelogmod.linelog.fromdata(f.read())
                     except linelogmod.LineLogError:
@@ -217,7 +231,7 @@
             self._revmap.flush()
             self._revmap = None
         if self._linelog is not None:
-            with open(self.linelogpath, 'wb') as f:
+            with open(self.linelogpath, b'wb') as f:
                 f.write(self._linelog.encode())
             self._linelog = None
 
@@ -298,23 +312,27 @@
         directly, revfctx = self.canannotatedirectly(rev)
         if directly:
             if self.ui.debugflag:
-                self.ui.debug('fastannotate: %s: using fast path '
-                              '(resolved fctx: %s)\n'
-                              % (self.path,
-                                 stringutil.pprint(util.safehasattr(revfctx,
-                                                                    'node'))))
+                self.ui.debug(
+                    b'fastannotate: %s: using fast path '
+                    b'(resolved fctx: %s)\n'
+                    % (
+                        self.path,
+                        stringutil.pprint(util.safehasattr(revfctx, b'node')),
+                    )
+                )
             return self.annotatedirectly(revfctx, showpath, showlines)
 
         # resolve master
         masterfctx = None
         if master:
             try:
-                masterfctx = self._resolvefctx(master, resolverev=True,
-                                               adjustctx=True)
-            except LookupError: # master does not have the file
+                masterfctx = self._resolvefctx(
+                    master, resolverev=True, adjustctx=True
+                )
+            except LookupError:  # master does not have the file
                 pass
             else:
-                if masterfctx in self.revmap: # no need to update linelog
+                if masterfctx in self.revmap:  # no need to update linelog
                     masterfctx = None
 
         #                  ... - @ <- rev (can be an arbitrary changeset,
@@ -342,18 +360,20 @@
         initvisit = [revfctx]
         if masterfctx:
             if masterfctx.rev() is None:
-                raise error.Abort(_('cannot update linelog to wdir()'),
-                                  hint=_('set fastannotate.mainbranch'))
+                raise error.Abort(
+                    _(b'cannot update linelog to wdir()'),
+                    hint=_(b'set fastannotate.mainbranch'),
+                )
             initvisit.append(masterfctx)
         visit = initvisit[:]
         pcache = {}
         needed = {revfctx: 1}
-        hist = {} # {fctx: ([(llrev or fctx, linenum)], text)}
+        hist = {}  # {fctx: ([(llrev or fctx, linenum)], text)}
         while visit:
             f = visit.pop()
             if f in pcache or f in hist:
                 continue
-            if f in self.revmap: # in the old main branch, it's a joint
+            if f in self.revmap:  # in the old main branch, it's a joint
                 llrev = self.revmap.hsh2rev(f.node())
                 self.linelog.annotate(llrev)
                 result = self.linelog.annotateresult
@@ -387,19 +407,24 @@
 
         if self.ui.debugflag:
             if newmainbranch:
-                self.ui.debug('fastannotate: %s: %d new changesets in the main'
-                              ' branch\n' % (self.path, len(newmainbranch)))
-            elif not hist: # no joints, no updates
-                self.ui.debug('fastannotate: %s: linelog cannot help in '
-                              'annotating this revision\n' % self.path)
+                self.ui.debug(
+                    b'fastannotate: %s: %d new changesets in the main'
+                    b' branch\n' % (self.path, len(newmainbranch))
+                )
+            elif not hist:  # no joints, no updates
+                self.ui.debug(
+                    b'fastannotate: %s: linelog cannot help in '
+                    b'annotating this revision\n' % self.path
+                )
 
         # prepare annotateresult so we can update linelog incrementally
         self.linelog.annotate(self.linelog.maxrev)
 
         # 3rd DFS does the actual annotate
         visit = initvisit[:]
-        progress = self.ui.makeprogress(('building cache'),
-                                        total=len(newmainbranch))
+        progress = self.ui.makeprogress(
+            b'building cache', total=len(newmainbranch)
+        )
         while visit:
             f = visit[-1]
             if f in hist:
@@ -416,8 +441,8 @@
                 continue
 
             visit.pop()
-            blocks = None # mdiff blocks, used for appending linelog
-            ismainbranch = (f in newmainbranch)
+            blocks = None  # mdiff blocks, used for appending linelog
+            ismainbranch = f in newmainbranch
             # curr is the same as the traditional annotate algorithm,
             # if we only care about linear history (do not follow merge),
             # then curr is not actually used.
@@ -437,22 +462,23 @@
             hist[f] = curr
             del pcache[f]
 
-            if ismainbranch: # need to write to linelog
+            if ismainbranch:  # need to write to linelog
                 progress.increment()
                 bannotated = None
-                if len(pl) == 2 and self.opts.followmerge: # merge
+                if len(pl) == 2 and self.opts.followmerge:  # merge
                     bannotated = curr[0]
-                if blocks is None: # no parents, add an empty one
-                    blocks = list(self._diffblocks('', curr[1]))
+                if blocks is None:  # no parents, add an empty one
+                    blocks = list(self._diffblocks(b'', curr[1]))
                 self._appendrev(f, blocks, bannotated)
-            elif showpath: # not append linelog, but we need to record path
+            elif showpath:  # not append linelog, but we need to record path
                 self._node2path[f.node()] = f.path()
 
         progress.complete()
 
         result = [
             ((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l)
-            for fr, l in hist[revfctx][0]] # [(node, linenumber)]
+            for fr, l in hist[revfctx][0]
+        ]  # [(node, linenumber)]
         return self._refineannotateresult(result, revfctx, showpath, showlines)
 
     def canannotatedirectly(self, rev):
@@ -469,7 +495,7 @@
             if hsh is not None and (hsh, self.path) in self.revmap:
                 f = hsh
         if f is None:
-            adjustctx = 'linkrev' if self._perfhack else True
+            adjustctx = b'linkrev' if self._perfhack else True
             f = self._resolvefctx(rev, adjustctx=adjustctx, resolverev=True)
             result = f in self.revmap
             if not result and self._perfhack:
@@ -490,7 +516,7 @@
         # find a chain from rev to anything in the mainbranch
         if revfctx not in self.revmap:
             chain = [revfctx]
-            a = ''
+            a = b''
             while True:
                 f = chain[-1]
                 pl = self._parentfunc(f)
@@ -552,24 +578,26 @@
             # find an unresolved line and its linelog rev to annotate
             hsh = None
             try:
-                for (rev, _linenum), idxs in key2idxs.iteritems():
+                for (rev, _linenum), idxs in pycompat.iteritems(key2idxs):
                     if revmap.rev2flag(rev) & revmapmod.sidebranchflag:
                         continue
                     hsh = annotateresult[idxs[0]][0]
                     break
-            except StopIteration: # no more unresolved lines
+            except StopIteration:  # no more unresolved lines
                 return result
             if hsh is None:
                 # the remaining key2idxs are not in main branch, resolving them
                 # using the hard way...
                 revlines = {}
-                for (rev, linenum), idxs in key2idxs.iteritems():
+                for (rev, linenum), idxs in pycompat.iteritems(key2idxs):
                     if rev not in revlines:
                         hsh = annotateresult[idxs[0]][0]
                         if self.ui.debugflag:
-                            self.ui.debug('fastannotate: reading %s line #%d '
-                                          'to resolve lines %r\n'
-                                          % (node.short(hsh), linenum, idxs))
+                            self.ui.debug(
+                                b'fastannotate: reading %s line #%d '
+                                b'to resolve lines %r\n'
+                                % (node.short(hsh), linenum, idxs)
+                            )
                         fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
                         lines = mdiff.splitnewlines(fctx.data())
                         revlines[rev] = lines
@@ -579,14 +607,16 @@
                 return result
 
             # run the annotate and the lines should match to the file content
-            self.ui.debug('fastannotate: annotate %s to resolve lines\n'
-                          % node.short(hsh))
+            self.ui.debug(
+                b'fastannotate: annotate %s to resolve lines\n'
+                % node.short(hsh)
+            )
             linelog.annotate(rev)
             fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
             annotated = linelog.annotateresult
             lines = mdiff.splitnewlines(fctx.data())
             if len(lines) != len(annotated):
-                raise faerror.CorruptedFileError('unexpected annotated lines')
+                raise faerror.CorruptedFileError(b'unexpected annotated lines')
             # resolve lines from the annotate result
             for i, line in enumerate(lines):
                 k = annotated[i]
@@ -608,14 +638,17 @@
             hsh = f.node()
         llrev = self.revmap.hsh2rev(hsh)
         if not llrev:
-            raise faerror.CorruptedFileError('%s is not in revmap'
-                                             % node.hex(hsh))
+            raise faerror.CorruptedFileError(
+                b'%s is not in revmap' % node.hex(hsh)
+            )
         if (self.revmap.rev2flag(llrev) & revmapmod.sidebranchflag) != 0:
-            raise faerror.CorruptedFileError('%s is not in revmap mainbranch'
-                                             % node.hex(hsh))
+            raise faerror.CorruptedFileError(
+                b'%s is not in revmap mainbranch' % node.hex(hsh)
+            )
         self.linelog.annotate(llrev)
-        result = [(self.revmap.rev2hsh(r), l)
-                  for r, l in self.linelog.annotateresult]
+        result = [
+            (self.revmap.rev2hsh(r), l) for r, l in self.linelog.annotateresult
+        ]
         return self._refineannotateresult(result, f, showpath, showlines)
 
     def _refineannotateresult(self, result, f, showpath, showlines):
@@ -625,13 +658,13 @@
         if showpath:
             result = self._addpathtoresult(result)
         if showlines:
-            if isinstance(f, bytes): # f: node or fctx
+            if isinstance(f, bytes):  # f: node or fctx
                 llrev = self.revmap.hsh2rev(f)
                 fctx = self._resolvefctx(f, self.revmap.rev2path(llrev))
             else:
                 fctx = f
             lines = mdiff.splitnewlines(fctx.data())
-            if len(lines) != len(result): # linelog is probably corrupted
+            if len(lines) != len(result):  # linelog is probably corrupted
                 raise faerror.CorruptedFileError()
             result = (result, lines)
         return result
@@ -650,7 +683,7 @@
             """(fctx) -> int"""
             # f should not be a linelog revision
             if isinstance(f, int):
-                raise error.ProgrammingError('f should not be an int')
+                raise error.ProgrammingError(b'f should not be an int')
             # f is a fctx, allocate linelog rev on demand
             hsh = f.node()
             rev = revmap.hsh2rev(hsh)
@@ -660,15 +693,18 @@
 
         # append sidebranch revisions to revmap
         siderevs = []
-        siderevmap = {} # node: int
+        siderevmap = {}  # node: int
         if bannotated is not None:
             for (a1, a2, b1, b2), op in blocks:
-                if op != '=':
+                if op != b'=':
                     # f could be either linelong rev, or fctx.
-                    siderevs += [f for f, l in bannotated[b1:b2]
-                                 if not isinstance(f, int)]
+                    siderevs += [
+                        f
+                        for f, l in bannotated[b1:b2]
+                        if not isinstance(f, int)
+                    ]
         siderevs = set(siderevs)
-        if fctx in siderevs: # mainnode must be appended seperately
+        if fctx in siderevs:  # mainnode must be appended seperately
             siderevs.remove(fctx)
         for f in siderevs:
             siderevmap[f] = getllrev(f)
@@ -678,13 +714,15 @@
         siderevmap[fctx] = llrev
 
         for (a1, a2, b1, b2), op in reversed(blocks):
-            if op == '=':
+            if op == b'=':
                 continue
             if bannotated is None:
                 linelog.replacelines(llrev, a1, a2, b1, b2)
             else:
-                blines = [((r if isinstance(r, int) else siderevmap[r]), l)
-                          for r, l in bannotated[b1:b2]]
+                blines = [
+                    ((r if isinstance(r, int) else siderevmap[r]), l)
+                    for r, l in bannotated[b1:b2]
+                ]
                 linelog.replacelines_vec(llrev, a1, a2, blines)
 
     def _addpathtoresult(self, annotateresult, revmap=None):
@@ -717,20 +755,23 @@
         """-> (fctx) -> [fctx]"""
         followrename = self.opts.followrename
         followmerge = self.opts.followmerge
+
         def parents(f):
             pl = _parents(f, follow=followrename)
             if not followmerge:
                 pl = pl[:1]
             return pl
+
         return parents
 
     @util.propertycache
     def _perfhack(self):
-        return self.ui.configbool('fastannotate', 'perfhack')
+        return self.ui.configbool(b'fastannotate', b'perfhack')
 
     def _resolvefctx(self, rev, path=None, **kwds):
         return resolvefctx(self.repo, rev, (path or self.path), **kwds)
 
+
 def _unlinkpaths(paths):
     """silent, best-effort unlink"""
     for path in paths:
@@ -739,13 +780,15 @@
         except OSError:
             pass
 
+
 class pathhelper(object):
     """helper for getting paths for lockfile, linelog and revmap"""
 
     def __init__(self, repo, path, opts=defaultopts):
         # different options use different directories
-        self._vfspath = os.path.join('fastannotate',
-                                     opts.shortstr, encodedir(path))
+        self._vfspath = os.path.join(
+            b'fastannotate', opts.shortstr, encodedir(path)
+        )
         self._repo = repo
 
     @property
@@ -754,29 +797,15 @@
 
     @property
     def linelogpath(self):
-        return self._repo.vfs.join(self._vfspath + '.l')
+        return self._repo.vfs.join(self._vfspath + b'.l')
 
     def lock(self):
-        return lockmod.lock(self._repo.vfs, self._vfspath + '.lock')
-
-    @contextlib.contextmanager
-    def _lockflock(self):
-        """the same as 'lock' but use flock instead of lockmod.lock, to avoid
-        creating temporary symlinks."""
-        import fcntl
-        lockpath = self.linelogpath
-        util.makedirs(os.path.dirname(lockpath))
-        lockfd = os.open(lockpath, os.O_RDONLY | os.O_CREAT, 0o664)
-        fcntl.flock(lockfd, fcntl.LOCK_EX)
-        try:
-            yield
-        finally:
-            fcntl.flock(lockfd, fcntl.LOCK_UN)
-            os.close(lockfd)
+        return lockmod.lock(self._repo.vfs, self._vfspath + b'.lock')
 
     @property
     def revmappath(self):
-        return self._repo.vfs.join(self._vfspath + '.m')
+        return self._repo.vfs.join(self._vfspath + b'.m')
+
 
 @contextlib.contextmanager
 def annotatecontext(repo, path, opts=defaultopts, rebuild=False):
@@ -808,19 +837,20 @@
     except Exception:
         if actx is not None:
             actx.rebuild()
-        repo.ui.debug('fastannotate: %s: cache broken and deleted\n' % path)
+        repo.ui.debug(b'fastannotate: %s: cache broken and deleted\n' % path)
         raise
     finally:
         if actx is not None:
             actx.close()
 
+
 def fctxannotatecontext(fctx, follow=True, diffopts=None, rebuild=False):
     """like annotatecontext but get the context from a fctx. convenient when
     used in fctx.annotate
     """
     repo = fctx._repo
     path = fctx._path
-    if repo.ui.configbool('fastannotate', 'forcefollow', True):
+    if repo.ui.configbool(b'fastannotate', b'forcefollow', True):
         follow = True
     aopts = annotateopts(diffopts=diffopts, followrename=follow)
     return annotatecontext(repo, path, aopts, rebuild)
--- a/hgext/fastannotate/error.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/fastannotate/error.py	Mon Oct 21 11:09:48 2019 -0400
@@ -6,8 +6,10 @@
 # GNU General Public License version 2 or any later version.
 from __future__ import absolute_import
 
+
 class CorruptedFileError(Exception):
     pass
 
+
 class CannotReuseError(Exception):
     """cannot reuse or update the cache incrementally"""
--- a/hgext/fastannotate/formatter.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/fastannotate/formatter.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,9 +13,7 @@
     templatefilters,
     util,
 )
-from mercurial.utils import (
-        dateutil,
-)
+from mercurial.utils import dateutil
 
 # imitating mercurial.commands.annotate, not using the vanilla formatter since
 # the data structures are a bit different, and we have some fast paths.
@@ -35,82 +33,89 @@
         hexfunc = self._hexfunc
 
         # special handling working copy "changeset" and "rev" functions
-        if self.opts.get('rev') == 'wdir()':
+        if self.opts.get(b'rev') == b'wdir()':
             orig = hexfunc
             hexfunc = lambda x: None if x is None else orig(x)
-            wnode = hexfunc(repo['.'].node()) + '+'
-            wrev = '%d' % repo['.'].rev()
-            wrevpad = ''
-            if not opts.get('changeset'): # only show + if changeset is hidden
-                wrev += '+'
-                wrevpad = ' '
-            revenc = lambda x: wrev if x is None else ('%d' % x) + wrevpad
+            wnode = hexfunc(repo[b'.'].node()) + b'+'
+            wrev = b'%d' % repo[b'.'].rev()
+            wrevpad = b''
+            if not opts.get(b'changeset'):  # only show + if changeset is hidden
+                wrev += b'+'
+                wrevpad = b' '
+            revenc = lambda x: wrev if x is None else (b'%d' % x) + wrevpad
+
             def csetenc(x):
                 if x is None:
                     return wnode
-                return pycompat.bytestr(x) + ' '
+                return pycompat.bytestr(x) + b' '
+
         else:
             revenc = csetenc = pycompat.bytestr
 
         # opt name, separator, raw value (for json/plain), encoder (for plain)
-        opmap = [('user', ' ', lambda x: getctx(x).user(), ui.shortuser),
-                 ('number', ' ', lambda x: getctx(x).rev(), revenc),
-                 ('changeset', ' ', lambda x: hexfunc(x[0]), csetenc),
-                 ('date', ' ', lambda x: getctx(x).date(), datefunc),
-                 ('file', ' ', lambda x: x[2], pycompat.bytestr),
-                 ('line_number', ':', lambda x: x[1] + 1, pycompat.bytestr)]
-        fieldnamemap = {'number': 'rev', 'changeset': 'node'}
-        funcmap = [(get, sep, fieldnamemap.get(op, op), enc)
-                   for op, sep, get, enc in opmap
-                   if opts.get(op)]
+        opmap = [
+            (b'user', b' ', lambda x: getctx(x).user(), ui.shortuser),
+            (b'number', b' ', lambda x: getctx(x).rev(), revenc),
+            (b'changeset', b' ', lambda x: hexfunc(x[0]), csetenc),
+            (b'date', b' ', lambda x: getctx(x).date(), datefunc),
+            (b'file', b' ', lambda x: x[2], pycompat.bytestr),
+            (b'line_number', b':', lambda x: x[1] + 1, pycompat.bytestr),
+        ]
+        fieldnamemap = {b'number': b'rev', b'changeset': b'node'}
+        funcmap = [
+            (get, sep, fieldnamemap.get(op, op), enc)
+            for op, sep, get, enc in opmap
+            if opts.get(op)
+        ]
         # no separator for first column
         funcmap[0] = list(funcmap[0])
-        funcmap[0][1] = ''
+        funcmap[0][1] = b''
         self.funcmap = funcmap
 
     def write(self, annotatedresult, lines=None, existinglines=None):
         """(annotateresult, [str], set([rev, linenum])) -> None. write output.
         annotateresult can be [(node, linenum, path)], or [(node, linenum)]
         """
-        pieces = [] # [[str]]
-        maxwidths = [] # [int]
+        pieces = []  # [[str]]
+        maxwidths = []  # [int]
 
         # calculate padding
         for f, sep, name, enc in self.funcmap:
             l = [enc(f(x)) for x in annotatedresult]
             pieces.append(l)
-            if name in ['node', 'date']: # node and date has fixed size
+            if name in [b'node', b'date']:  # node and date has fixed size
                 l = l[:1]
             widths = pycompat.maplist(encoding.colwidth, set(l))
-            maxwidth = (max(widths) if widths else 0)
+            maxwidth = max(widths) if widths else 0
             maxwidths.append(maxwidth)
 
         # buffered output
-        result = ''
+        result = b''
         for i in pycompat.xrange(len(annotatedresult)):
             for j, p in enumerate(pieces):
                 sep = self.funcmap[j][1]
-                padding = ' ' * (maxwidths[j] - len(p[i]))
+                padding = b' ' * (maxwidths[j] - len(p[i]))
                 result += sep + padding + p[i]
             if lines:
                 if existinglines is None:
-                    result += ': ' + lines[i]
-                else: # extra formatting showing whether a line exists
+                    result += b': ' + lines[i]
+                else:  # extra formatting showing whether a line exists
                     key = (annotatedresult[i][0], annotatedresult[i][1])
                     if key in existinglines:
-                        result += ':  ' + lines[i]
+                        result += b':  ' + lines[i]
                     else:
-                        result += ': ' + self.ui.label('-' + lines[i],
-                                                       'diff.deleted')
+                        result += b': ' + self.ui.label(
+                            b'-' + lines[i], b'diff.deleted'
+                        )
 
-            if result[-1:] != '\n':
-                result += '\n'
+            if result[-1:] != b'\n':
+                result += b'\n'
 
         self.ui.write(result)
 
     @util.propertycache
     def _hexfunc(self):
-        if self.ui.debugflag or self.opts.get('long_hash'):
+        if self.ui.debugflag or self.opts.get(b'long_hash'):
             return node.hex
         else:
             return node.short
@@ -118,34 +123,39 @@
     def end(self):
         pass
 
+
 class jsonformatter(defaultformatter):
     def __init__(self, ui, repo, opts):
         super(jsonformatter, self).__init__(ui, repo, opts)
-        self.ui.write('[')
+        self.ui.write(b'[')
         self.needcomma = False
 
     def write(self, annotatedresult, lines=None, existinglines=None):
         if annotatedresult:
             self._writecomma()
 
-        pieces = [(name, pycompat.maplist(f, annotatedresult))
-                  for f, sep, name, enc in self.funcmap]
+        pieces = [
+            (name, pycompat.maplist(f, annotatedresult))
+            for f, sep, name, enc in self.funcmap
+        ]
         if lines is not None:
-            pieces.append(('line', lines))
+            pieces.append((b'line', lines))
         pieces.sort()
 
-        seps = [','] * len(pieces[:-1]) + ['']
+        seps = [b','] * len(pieces[:-1]) + [b'']
 
-        result = ''
+        result = b''
         lasti = len(annotatedresult) - 1
         for i in pycompat.xrange(len(annotatedresult)):
-            result += '\n {\n'
+            result += b'\n {\n'
             for j, p in enumerate(pieces):
                 k, vs = p
-                result += ('  "%s": %s%s\n'
-                           % (k, templatefilters.json(vs[i], paranoid=False),
-                              seps[j]))
-            result += ' }%s' % ('' if i == lasti else ',')
+                result += b'  "%s": %s%s\n' % (
+                    k,
+                    templatefilters.json(vs[i], paranoid=False),
+                    seps[j],
+                )
+            result += b' }%s' % (b'' if i == lasti else b',')
         if lasti >= 0:
             self.needcomma = True
 
@@ -153,7 +163,7 @@
 
     def _writecomma(self):
         if self.needcomma:
-            self.ui.write(',')
+            self.ui.write(b',')
             self.needcomma = False
 
     @util.propertycache
@@ -161,4 +171,4 @@
         return node.hex
 
     def end(self):
-        self.ui.write('\n]\n')
+        self.ui.write(b'\n]\n')
--- a/hgext/fastannotate/protocol.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/fastannotate/protocol.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,10 +10,12 @@
 import os
 
 from mercurial.i18n import _
+from mercurial.pycompat import open
 from mercurial import (
     error,
     extensions,
     hg,
+    pycompat,
     util,
     wireprotov1peer,
     wireprotov1server,
@@ -22,28 +24,37 @@
 
 # common
 
+
 def _getmaster(ui):
     """get the mainbranch, and enforce it is set"""
-    master = ui.config('fastannotate', 'mainbranch')
+    master = ui.config(b'fastannotate', b'mainbranch')
     if not master:
-        raise error.Abort(_('fastannotate.mainbranch is required '
-                            'for both the client and the server'))
+        raise error.Abort(
+            _(
+                b'fastannotate.mainbranch is required '
+                b'for both the client and the server'
+            )
+        )
     return master
 
+
 # server-side
 
+
 def _capabilities(orig, repo, proto):
     result = orig(repo, proto)
-    result.append('getannotate')
+    result.append(b'getannotate')
     return result
 
+
 def _getannotate(repo, proto, path, lastnode):
     # output:
     #   FILE := vfspath + '\0' + str(size) + '\0' + content
     #   OUTPUT := '' | FILE + OUTPUT
-    result = ''
-    buildondemand = repo.ui.configbool('fastannotate', 'serverbuildondemand',
-                                       True)
+    result = b''
+    buildondemand = repo.ui.configbool(
+        b'fastannotate', b'serverbuildondemand', True
+    )
     with context.annotatecontext(repo, path) as actx:
         if buildondemand:
             # update before responding to the client
@@ -57,7 +68,7 @@
                 try:
                     actx.annotate(master, master)
                 except Exception:
-                    actx.rebuild() # delete files
+                    actx.rebuild()  # delete files
             finally:
                 # although the "with" context will also do a close/flush, we
                 # need to do it early so we can send the correct respond to
@@ -71,39 +82,44 @@
             for p in [actx.revmappath, actx.linelogpath]:
                 if not os.path.exists(p):
                     continue
-                with open(p, 'rb') as f:
+                with open(p, b'rb') as f:
                     content = f.read()
-                vfsbaselen = len(repo.vfs.base + '/')
+                vfsbaselen = len(repo.vfs.base + b'/')
                 relpath = p[vfsbaselen:]
-                result += '%s\0%d\0%s' % (relpath, len(content), content)
+                result += b'%s\0%d\0%s' % (relpath, len(content), content)
     return result
 
+
 def _registerwireprotocommand():
-    if 'getannotate' in wireprotov1server.commands:
+    if b'getannotate' in wireprotov1server.commands:
         return
-    wireprotov1server.wireprotocommand(
-        'getannotate', 'path lastnode')(_getannotate)
+    wireprotov1server.wireprotocommand(b'getannotate', b'path lastnode')(
+        _getannotate
+    )
+
 
 def serveruisetup(ui):
     _registerwireprotocommand()
-    extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities)
+    extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities)
+
 
 # client-side
 
+
 def _parseresponse(payload):
     result = {}
     i = 0
     l = len(payload) - 1
-    state = 0 # 0: vfspath, 1: size
-    vfspath = size = ''
+    state = 0  # 0: vfspath, 1: size
+    vfspath = size = b''
     while i < l:
-        ch = payload[i:i + 1]
-        if ch == '\0':
+        ch = payload[i : i + 1]
+        if ch == b'\0':
             if state == 1:
-                result[vfspath] = payload[i + 1:i + 1 + int(size)]
+                result[vfspath] = payload[i + 1 : i + 1 + int(size)]
                 i += int(size)
                 state = 0
-                vfspath = size = ''
+                vfspath = size = b''
             elif state == 0:
                 state = 1
         else:
@@ -114,26 +130,30 @@
         i += 1
     return result
 
+
 def peersetup(ui, peer):
     class fastannotatepeer(peer.__class__):
         @wireprotov1peer.batchable
         def getannotate(self, path, lastnode=None):
-            if not self.capable('getannotate'):
-                ui.warn(_('remote peer cannot provide annotate cache\n'))
+            if not self.capable(b'getannotate'):
+                ui.warn(_(b'remote peer cannot provide annotate cache\n'))
                 yield None, None
             else:
-                args = {'path': path, 'lastnode': lastnode or ''}
+                args = {b'path': path, b'lastnode': lastnode or b''}
                 f = wireprotov1peer.future()
                 yield args, f
                 yield _parseresponse(f.value)
+
     peer.__class__ = fastannotatepeer
 
+
 @contextlib.contextmanager
 def annotatepeer(repo):
     ui = repo.ui
 
     remotepath = ui.expandpath(
-        ui.config('fastannotate', 'remotepath', 'default'))
+        ui.config(b'fastannotate', b'remotepath', b'default')
+    )
     peer = hg.peer(ui, {}, remotepath)
 
     try:
@@ -141,6 +161,7 @@
     finally:
         peer.close()
 
+
 def clientfetch(repo, paths, lastnodemap=None, peer=None):
     """download annotate cache from the server for paths"""
     if not paths:
@@ -156,35 +177,44 @@
     ui = repo.ui
     results = []
     with peer.commandexecutor() as batcher:
-        ui.debug('fastannotate: requesting %d files\n' % len(paths))
+        ui.debug(b'fastannotate: requesting %d files\n' % len(paths))
         for p in paths:
-            results.append(batcher.callcommand(
-                'getannotate',
-                {'path': p, 'lastnode':lastnodemap.get(p)}))
+            results.append(
+                batcher.callcommand(
+                    b'getannotate',
+                    {b'path': p, b'lastnode': lastnodemap.get(p)},
+                )
+            )
 
         for result in results:
             r = result.result()
             # TODO: pconvert these paths on the server?
-            r = {util.pconvert(p): v for p, v in r.iteritems()}
+            r = {util.pconvert(p): v for p, v in pycompat.iteritems(r)}
             for path in sorted(r):
                 # ignore malicious paths
-                if (not path.startswith('fastannotate/')
-                    or '/../' in (path + '/')):
-                    ui.debug('fastannotate: ignored malicious path %s\n' % path)
+                if not path.startswith(b'fastannotate/') or b'/../' in (
+                    path + b'/'
+                ):
+                    ui.debug(
+                        b'fastannotate: ignored malicious path %s\n' % path
+                    )
                     continue
                 content = r[path]
                 if ui.debugflag:
-                    ui.debug('fastannotate: writing %d bytes to %s\n'
-                             % (len(content), path))
+                    ui.debug(
+                        b'fastannotate: writing %d bytes to %s\n'
+                        % (len(content), path)
+                    )
                 repo.vfs.makedirs(os.path.dirname(path))
-                with repo.vfs(path, 'wb') as f:
+                with repo.vfs(path, b'wb') as f:
                     f.write(content)
 
+
 def _filterfetchpaths(repo, paths):
     """return a subset of paths whose history is long and need to fetch linelog
     from the server. works with remotefilelog and non-remotefilelog repos.
     """
-    threshold = repo.ui.configint('fastannotate', 'clientfetchthreshold', 10)
+    threshold = repo.ui.configint(b'fastannotate', b'clientfetchthreshold', 10)
     if threshold <= 0:
         return paths
 
@@ -193,11 +223,12 @@
         try:
             if len(repo.file(path)) >= threshold:
                 result.append(path)
-        except Exception: # file not found etc.
+        except Exception:  # file not found etc.
             result.append(path)
 
     return result
 
+
 def localreposetup(ui, repo):
     class fastannotaterepo(repo.__class__):
         def prefetchfastannotate(self, paths, peer=None):
@@ -214,9 +245,11 @@
                     clientfetch(self, needupdatepaths, lastnodemap, peer)
             except Exception as ex:
                 # could be directory not writable or so, not fatal
-                self.ui.debug('fastannotate: prefetch failed: %r\n' % ex)
+                self.ui.debug(b'fastannotate: prefetch failed: %r\n' % ex)
+
     repo.__class__ = fastannotaterepo
 
+
 def clientreposetup(ui, repo):
     _registerwireprotocommand()
     if repo.local():
--- a/hgext/fastannotate/revmap.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/fastannotate/revmap.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,6 +13,7 @@
 import struct
 
 from mercurial.node import hex
+from mercurial.pycompat import open
 from mercurial import (
     error as hgerror,
     pycompat,
@@ -47,6 +48,7 @@
 # len(mercurial.node.nullid)
 _hshlen = 20
 
+
 class revmap(object):
     """trivial hg bin hash - linelog rev bidirectional map
 
@@ -69,7 +71,7 @@
         # since rename does not happen frequently, do not store path for every
         # revision. self._renamerevs can be used for bisecting.
         self._renamerevs = [0]
-        self._renamepaths = ['']
+        self._renamepaths = [b'']
         self._lastmaxrev = -1
         if path:
             if os.path.exists(path):
@@ -97,9 +99,13 @@
         if flush is True, incrementally update the file.
         """
         if hsh in self._hsh2rev:
-            raise error.CorruptedFileError('%r is in revmap already' % hex(hsh))
+            raise error.CorruptedFileError(
+                b'%r is in revmap already' % hex(hsh)
+            )
         if len(hsh) != _hshlen:
-            raise hgerror.ProgrammingError('hsh must be %d-char long' % _hshlen)
+            raise hgerror.ProgrammingError(
+                b'hsh must be %d-char long' % _hshlen
+            )
         idx = len(self._rev2hsh)
         flag = 0
         if sidebranch:
@@ -148,7 +154,7 @@
         self._rev2hsh = [None]
         self._rev2flag = [None]
         self._hsh2rev = {}
-        self._rev2path = ['']
+        self._rev2path = [b'']
         self._lastmaxrev = -1
         if flush:
             self.flush()
@@ -157,15 +163,16 @@
         """write the state down to the file"""
         if not self.path:
             return
-        if self._lastmaxrev == -1: # write the entire file
-            with open(self.path, 'wb') as f:
+        if self._lastmaxrev == -1:  # write the entire file
+            with open(self.path, b'wb') as f:
                 f.write(self.HEADER)
                 for i in pycompat.xrange(1, len(self._rev2hsh)):
                     self._writerev(i, f)
-        else: # append incrementally
-            with open(self.path, 'ab') as f:
-                for i in pycompat.xrange(self._lastmaxrev + 1,
-                                         len(self._rev2hsh)):
+        else:  # append incrementally
+            with open(self.path, b'ab') as f:
+                for i in pycompat.xrange(
+                    self._lastmaxrev + 1, len(self._rev2hsh)
+                ):
                     self._writerev(i, f)
         self._lastmaxrev = self.maxrev
 
@@ -177,7 +184,7 @@
         # which is faster than both LOAD_CONST and LOAD_GLOBAL.
         flaglen = 1
         hshlen = _hshlen
-        with open(self.path, 'rb') as f:
+        with open(self.path, b'rb') as f:
             if f.read(len(self.HEADER)) != self.HEADER:
                 raise error.CorruptedFileError()
             self.clear(flush=False)
@@ -203,23 +210,23 @@
         """append a revision data to file"""
         flag = self._rev2flag[rev]
         hsh = self._rev2hsh[rev]
-        f.write(struct.pack('B', flag))
+        f.write(struct.pack(b'B', flag))
         if flag & renameflag:
             path = self.rev2path(rev)
             if path is None:
-                raise error.CorruptedFileError('cannot find path for %s' % rev)
+                raise error.CorruptedFileError(b'cannot find path for %s' % rev)
             f.write(path + b'\0')
         f.write(hsh)
 
     @staticmethod
     def _readcstr(f):
         """read a C-language-like '\0'-terminated string"""
-        buf = ''
+        buf = b''
         while True:
             ch = f.read(1)
-            if not ch: # unexpected eof
+            if not ch:  # unexpected eof
                 raise error.CorruptedFileError()
-            if ch == '\0':
+            if ch == b'\0':
                 break
             buf += ch
         return buf
@@ -229,9 +236,9 @@
         test if (node, path) is in the map, and is not in a side branch.
         f can be either a tuple of (node, path), or a fctx.
         """
-        if isinstance(f, tuple): # f: (node, path)
+        if isinstance(f, tuple):  # f: (node, path)
             hsh, path = f
-        else: # f: fctx
+        else:  # f: fctx
             hsh, path = f.node(), f.path()
         rev = self.hsh2rev(hsh)
         if rev is None:
@@ -240,13 +247,14 @@
             return False
         return (self.rev2flag(rev) & sidebranchflag) == 0
 
+
 def getlastnode(path):
     """return the last hash in a revmap, without loading its full content.
     this is equivalent to `m = revmap(path); m.rev2hsh(m.maxrev)`, but faster.
     """
     hsh = None
     try:
-        with open(path, 'rb') as f:
+        with open(path, b'rb') as f:
             f.seek(-_hshlen, io.SEEK_END)
             if f.tell() > len(revmap.HEADER):
                 hsh = f.read(_hshlen)
--- a/hgext/fastannotate/support.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/fastannotate/support.py	Mon Oct 21 11:09:48 2019 -0400
@@ -7,6 +7,7 @@
 
 from __future__ import absolute_import
 
+from mercurial.pycompat import getattr
 from mercurial import (
     context as hgcontext,
     dagop,
@@ -21,6 +22,7 @@
     revmap,
 )
 
+
 class _lazyfctx(object):
     """delegates to fctx but do not construct fctx when unnecessary"""
 
@@ -42,6 +44,7 @@
     def __getattr__(self, name):
         return getattr(self._fctx, name)
 
+
 def _convertoutputs(repo, annotated, contents):
     """convert fastannotate outputs to vanilla annotate format"""
     # fastannotate returns: [(nodeid, linenum, path)], [linecontent]
@@ -59,9 +62,11 @@
         results.append(annotateline(fctx=fctx, lineno=lineno, text=line))
     return results
 
+
 def _getmaster(fctx):
     """(fctx) -> str"""
-    return fctx._repo.ui.config('fastannotate', 'mainbranch') or 'default'
+    return fctx._repo.ui.config(b'fastannotate', b'mainbranch') or b'default'
+
 
 def _doannotate(fctx, follow=True, diffopts=None):
     """like the vanilla fctx.annotate, but do it via fastannotate, and make
@@ -73,49 +78,61 @@
 
     with context.fctxannotatecontext(fctx, follow, diffopts) as ac:
         try:
-            annotated, contents = ac.annotate(fctx.rev(), master=master,
-                                              showpath=True, showlines=True)
+            annotated, contents = ac.annotate(
+                fctx.rev(), master=master, showpath=True, showlines=True
+            )
         except Exception:
-            ac.rebuild() # try rebuild once
-            fctx._repo.ui.debug('fastannotate: %s: rebuilding broken cache\n'
-                                % fctx._path)
+            ac.rebuild()  # try rebuild once
+            fctx._repo.ui.debug(
+                b'fastannotate: %s: rebuilding broken cache\n' % fctx._path
+            )
             try:
-                annotated, contents = ac.annotate(fctx.rev(), master=master,
-                                                  showpath=True, showlines=True)
+                annotated, contents = ac.annotate(
+                    fctx.rev(), master=master, showpath=True, showlines=True
+                )
             except Exception:
                 raise
 
     assert annotated and contents
     return _convertoutputs(fctx._repo, annotated, contents)
 
+
 def _hgwebannotate(orig, fctx, ui):
-    diffopts = patch.difffeatureopts(ui, untrusted=True,
-                                     section='annotate', whitespace=True)
+    diffopts = patch.difffeatureopts(
+        ui, untrusted=True, section=b'annotate', whitespace=True
+    )
     return _doannotate(fctx, diffopts=diffopts)
 
-def _fctxannotate(orig, self, follow=False, linenumber=False, skiprevs=None,
-                  diffopts=None):
+
+def _fctxannotate(
+    orig, self, follow=False, linenumber=False, skiprevs=None, diffopts=None
+):
     if skiprevs:
         # skiprevs is not supported yet
-        return orig(self, follow, linenumber, skiprevs=skiprevs,
-                    diffopts=diffopts)
+        return orig(
+            self, follow, linenumber, skiprevs=skiprevs, diffopts=diffopts
+        )
     try:
         return _doannotate(self, follow, diffopts)
     except Exception as ex:
-        self._repo.ui.debug('fastannotate: falling back to the vanilla '
-                            'annotate: %r\n' % ex)
-        return orig(self, follow=follow, skiprevs=skiprevs,
-                    diffopts=diffopts)
+        self._repo.ui.debug(
+            b'fastannotate: falling back to the vanilla annotate: %r\n' % ex
+        )
+        return orig(self, follow=follow, skiprevs=skiprevs, diffopts=diffopts)
+
 
 def _remotefctxannotate(orig, self, follow=False, skiprevs=None, diffopts=None):
     # skipset: a set-like used to test if a fctx needs to be downloaded
     with context.fctxannotatecontext(self, follow, diffopts) as ac:
         skipset = revmap.revmap(ac.revmappath)
-    return orig(self, follow, skiprevs=skiprevs, diffopts=diffopts,
-                prefetchskip=skipset)
+    return orig(
+        self, follow, skiprevs=skiprevs, diffopts=diffopts, prefetchskip=skipset
+    )
+
 
 def replacehgwebannotate():
-    extensions.wrapfunction(hgweb.webutil, 'annotate', _hgwebannotate)
+    extensions.wrapfunction(hgweb.webutil, b'annotate', _hgwebannotate)
+
 
 def replacefctxannotate():
-    extensions.wrapfunction(hgcontext.basefilectx, 'annotate', _fctxannotate)
+    extensions.wrapfunction(hgcontext.basefilectx, b'annotate', _fctxannotate)
--- a/hgext/fetch.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/fetch.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,9 +10,7 @@
 from __future__ import absolute_import
 
 from mercurial.i18n import _
-from mercurial.node import (
-    short,
-)
+from mercurial.node import short
 from mercurial import (
     cmdutil,
     error,
@@ -32,18 +30,30 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
-@command('fetch',
-    [('r', 'rev', [],
-     _('a specific revision you would like to pull'), _('REV')),
-    ('', 'edit', None, _('invoke editor on commit messages')),
-    ('', 'force-editor', None, _('edit commit message (DEPRECATED)')),
-    ('', 'switch-parent', None, _('switch parents when merging')),
-    ] + cmdutil.commitopts + cmdutil.commitopts2 + cmdutil.remoteopts,
-    _('hg fetch [SOURCE]'),
-    helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT)
-def fetch(ui, repo, source='default', **opts):
+@command(
+    b'fetch',
+    [
+        (
+            b'r',
+            b'rev',
+            [],
+            _(b'a specific revision you would like to pull'),
+            _(b'REV'),
+        ),
+        (b'', b'edit', None, _(b'invoke editor on commit messages')),
+        (b'', b'force-editor', None, _(b'edit commit message (DEPRECATED)')),
+        (b'', b'switch-parent', None, _(b'switch parents when merging')),
+    ]
+    + cmdutil.commitopts
+    + cmdutil.commitopts2
+    + cmdutil.remoteopts,
+    _(b'hg fetch [SOURCE]'),
+    helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
+)
+def fetch(ui, repo, source=b'default', **opts):
     '''pull changes from a remote repository, merge new changes if needed.
 
     This finds all changes from the repository at the specified path
@@ -64,9 +74,9 @@
     '''
 
     opts = pycompat.byteskwargs(opts)
-    date = opts.get('date')
+    date = opts.get(b'date')
     if date:
-        opts['date'] = dateutil.parsedate(date)
+        opts[b'date'] = dateutil.parsedate(date)
 
     parent = repo.dirstate.p1()
     branch = repo.dirstate.branch()
@@ -75,8 +85,10 @@
     except error.RepoLookupError:
         branchnode = None
     if parent != branchnode:
-        raise error.Abort(_('working directory not at branch tip'),
-                         hint=_("use 'hg update' to check out branch tip"))
+        raise error.Abort(
+            _(b'working directory not at branch tip'),
+            hint=_(b"use 'hg update' to check out branch tip"),
+        )
 
     wlock = lock = None
     try:
@@ -88,19 +100,26 @@
         bheads = repo.branchheads(branch)
         bheads = [head for head in bheads if len(repo[head].children()) == 0]
         if len(bheads) > 1:
-            raise error.Abort(_('multiple heads in this branch '
-                               '(use "hg heads ." and "hg merge" to merge)'))
+            raise error.Abort(
+                _(
+                    b'multiple heads in this branch '
+                    b'(use "hg heads ." and "hg merge" to merge)'
+                )
+            )
 
         other = hg.peer(repo, opts, ui.expandpath(source))
-        ui.status(_('pulling from %s\n') %
-                  util.hidepassword(ui.expandpath(source)))
+        ui.status(
+            _(b'pulling from %s\n') % util.hidepassword(ui.expandpath(source))
+        )
         revs = None
-        if opts['rev']:
+        if opts[b'rev']:
             try:
-                revs = [other.lookup(rev) for rev in opts['rev']]
+                revs = [other.lookup(rev) for rev in opts[b'rev']]
             except error.CapabilityError:
-                err = _("other repository doesn't support revision lookup, "
-                        "so a rev cannot be specified.")
+                err = _(
+                    b"other repository doesn't support revision lookup, "
+                    b"so a rev cannot be specified."
+                )
                 raise error.Abort(err)
 
         # Are there any changes at all?
@@ -125,9 +144,13 @@
             hg.clean(repo, newparent)
         newheads = [n for n in newheads if n != newparent]
         if len(newheads) > 1:
-            ui.status(_('not merging with %d other new branch heads '
-                        '(use "hg heads ." and "hg merge" to merge them)\n') %
-                      (len(newheads) - 1))
+            ui.status(
+                _(
+                    b'not merging with %d other new branch heads '
+                    b'(use "hg heads ." and "hg merge" to merge them)\n'
+                )
+                % (len(newheads) - 1)
+            )
             return 1
 
         if not newheads:
@@ -139,29 +162,35 @@
             # By default, we consider the repository we're pulling
             # *from* as authoritative, so we merge our changes into
             # theirs.
-            if opts['switch_parent']:
+            if opts[b'switch_parent']:
                 firstparent, secondparent = newparent, newheads[0]
             else:
                 firstparent, secondparent = newheads[0], newparent
-                ui.status(_('updating to %d:%s\n') %
-                          (repo.changelog.rev(firstparent),
-                           short(firstparent)))
+                ui.status(
+                    _(b'updating to %d:%s\n')
+                    % (repo.changelog.rev(firstparent), short(firstparent))
+                )
             hg.clean(repo, firstparent)
-            ui.status(_('merging with %d:%s\n') %
-                      (repo.changelog.rev(secondparent), short(secondparent)))
+            ui.status(
+                _(b'merging with %d:%s\n')
+                % (repo.changelog.rev(secondparent), short(secondparent))
+            )
             err = hg.merge(repo, secondparent, remind=False)
 
         if not err:
             # we don't translate commit messages
-            message = (cmdutil.logmessage(ui, opts) or
-                       ('Automated merge with %s' %
-                        util.removeauth(other.url())))
-            editopt = opts.get('edit') or opts.get('force_editor')
-            editor = cmdutil.getcommiteditor(edit=editopt, editform='fetch')
-            n = repo.commit(message, opts['user'], opts['date'], editor=editor)
-            ui.status(_('new changeset %d:%s merges remote changes '
-                        'with local\n') % (repo.changelog.rev(n),
-                                           short(n)))
+            message = cmdutil.logmessage(ui, opts) or (
+                b'Automated merge with %s' % util.removeauth(other.url())
+            )
+            editopt = opts.get(b'edit') or opts.get(b'force_editor')
+            editor = cmdutil.getcommiteditor(edit=editopt, editform=b'fetch')
+            n = repo.commit(
+                message, opts[b'user'], opts[b'date'], editor=editor
+            )
+            ui.status(
+                _(b'new changeset %d:%s merges remote changes with local\n')
+                % (repo.changelog.rev(n), short(n))
+            )
 
         return err
 
--- a/hgext/fix.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/fix.py	Mon Oct 21 11:09:48 2019 -0400
@@ -36,9 +36,20 @@
   {first}   The 1-based line number of the first line in the modified range
   {last}    The 1-based line number of the last line in the modified range
 
+Deleted sections of a file will be ignored by :linerange, because there is no
+corresponding line range in the version being fixed.
+
+By default, tools that set :linerange will only be executed if there is at least
+one changed line range. This is meant to prevent accidents like running a code
+formatter in such a way that it unexpectedly reformats the whole file. If such a
+tool needs to operate on unchanged files, it should set the :skipclean suboption
+to false.
+
 The :pattern suboption determines which files will be passed through each
-configured tool. See :hg:`help patterns` for possible values. If there are file
-arguments to :hg:`fix`, the intersection of these patterns is used.
+configured tool. See :hg:`help patterns` for possible values. However, all
+patterns are relative to the repo root, even if that text says they are relative
+to the current working directory. If there are file arguments to :hg:`fix`, the
+intersection of these patterns is used.
 
 There is also a configurable limit for the maximum size of file that will be
 processed by :hg:`fix`::
@@ -102,6 +113,13 @@
     mapping fixer tool names to lists of metadata values returned from
     executions that modified a file. This aggregates the same metadata
     previously passed to the "postfixfile" hook.
+
+Fixer tools are run the in repository's root directory. This allows them to read
+configuration files from the working copy, or even write to the working copy.
+The working copy is not updated to match the revision being fixed. In fact,
+several revisions may be fixed in parallel. Writes to the working copy are not
+amended into the revision being fixed; fixer tools should always write fixed
+file content back to stdout as documented above.
 """
 
 from __future__ import absolute_import
@@ -117,15 +135,14 @@
 from mercurial.node import nullrev
 from mercurial.node import wdirrev
 
-from mercurial.utils import (
-    procutil,
-)
+from mercurial.utils import procutil
 
 from mercurial import (
     cmdutil,
     context,
     copies,
     error,
+    match as matchmod,
     mdiff,
     merge,
     obsolete,
@@ -140,7 +157,7 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 cmdtable = {}
 command = registrar.command(cmdtable)
@@ -150,47 +167,65 @@
 
 # Register the suboptions allowed for each configured fixer, and default values.
 FIXER_ATTRS = {
-    'command': None,
-    'linerange': None,
-    'fileset': None,
-    'pattern': None,
-    'priority': 0,
-    'metadata': False,
+    b'command': None,
+    b'linerange': None,
+    b'pattern': None,
+    b'priority': 0,
+    b'metadata': False,
+    b'skipclean': True,
+    b'enabled': True,
 }
 
 for key, default in FIXER_ATTRS.items():
-    configitem('fix', '.*(:%s)?' % key, default=default, generic=True)
+    configitem(b'fix', b'.*:%s$' % key, default=default, generic=True)
 
 # A good default size allows most source code files to be fixed, but avoids
 # letting fixer tools choke on huge inputs, which could be surprising to the
 # user.
-configitem('fix', 'maxfilesize', default='2MB')
+configitem(b'fix', b'maxfilesize', default=b'2MB')
 
 # Allow fix commands to exit non-zero if an executed fixer tool exits non-zero.
 # This helps users do shell scripts that stop when a fixer tool signals a
 # problem.
-configitem('fix', 'failure', default='continue')
+configitem(b'fix', b'failure', default=b'continue')
+
 
 def checktoolfailureaction(ui, message, hint=None):
     """Abort with 'message' if fix.failure=abort"""
-    action = ui.config('fix', 'failure')
-    if action not in ('continue', 'abort'):
-        raise error.Abort(_('unknown fix.failure action: %s') % (action,),
-                          hint=_('use "continue" or "abort"'))
-    if action == 'abort':
+    action = ui.config(b'fix', b'failure')
+    if action not in (b'continue', b'abort'):
+        raise error.Abort(
+            _(b'unknown fix.failure action: %s') % (action,),
+            hint=_(b'use "continue" or "abort"'),
+        )
+    if action == b'abort':
         raise error.Abort(message, hint=hint)
 
-allopt = ('', 'all', False, _('fix all non-public non-obsolete revisions'))
-baseopt = ('', 'base', [], _('revisions to diff against (overrides automatic '
-                             'selection, and applies to every revision being '
-                             'fixed)'), _('REV'))
-revopt = ('r', 'rev', [], _('revisions to fix'), _('REV'))
-wdiropt = ('w', 'working-dir', False, _('fix the working directory'))
-wholeopt = ('', 'whole', False, _('always fix every line of a file'))
-usage = _('[OPTION]... [FILE]...')
 
-@command('fix', [allopt, baseopt, revopt, wdiropt, wholeopt], usage,
-        helpcategory=command.CATEGORY_FILE_CONTENTS)
+allopt = (b'', b'all', False, _(b'fix all non-public non-obsolete revisions'))
+baseopt = (
+    b'',
+    b'base',
+    [],
+    _(
+        b'revisions to diff against (overrides automatic '
+        b'selection, and applies to every revision being '
+        b'fixed)'
+    ),
+    _(b'REV'),
+)
+revopt = (b'r', b'rev', [], _(b'revisions to fix'), _(b'REV'))
+wdiropt = (b'w', b'working-dir', False, _(b'fix the working directory'))
+wholeopt = (b'', b'whole', False, _(b'always fix every line of a file'))
+usage = _(b'[OPTION]... [FILE]...')
+
+
+@command(
+    b'fix',
+    [allopt, baseopt, revopt, wdiropt, wholeopt],
+    usage,
+    helpcategory=command.CATEGORY_FILE_CONTENTS,
+)
 def fix(ui, repo, *pats, **opts):
     """rewrite file content in changesets or working directory
 
@@ -215,16 +250,17 @@
     override this default behavior, though it is not usually desirable to do so.
     """
     opts = pycompat.byteskwargs(opts)
-    if opts['all']:
-        if opts['rev']:
-            raise error.Abort(_('cannot specify both "--rev" and "--all"'))
-        opts['rev'] = ['not public() and not obsolete()']
-        opts['working_dir'] = True
-    with repo.wlock(), repo.lock(), repo.transaction('fix'):
+    if opts[b'all']:
+        if opts[b'rev']:
+            raise error.Abort(_(b'cannot specify both "--rev" and "--all"'))
+        opts[b'rev'] = [b'not public() and not obsolete()']
+        opts[b'working_dir'] = True
+    with repo.wlock(), repo.lock(), repo.transaction(b'fix'):
         revstofix = getrevstofix(ui, repo, opts)
         basectxs = getbasectxs(repo, opts, revstofix)
-        workqueue, numitems = getworkqueue(ui, repo, pats, opts, revstofix,
-                                           basectxs)
+        workqueue, numitems = getworkqueue(
+            ui, repo, pats, opts, revstofix, basectxs
+        )
         fixers = getfixers(ui)
 
         # There are no data dependencies between the workers fixing each file
@@ -233,14 +269,21 @@
             for rev, path in items:
                 ctx = repo[rev]
                 olddata = ctx[path].data()
-                metadata, newdata = fixfile(ui, opts, fixers, ctx, path,
-                                            basectxs[rev])
+                metadata, newdata = fixfile(
+                    ui, repo, opts, fixers, ctx, path, basectxs[rev]
+                )
                 # Don't waste memory/time passing unchanged content back, but
                 # produce one result per item either way.
-                yield (rev, path, metadata,
-                       newdata if newdata != olddata else None)
-        results = worker.worker(ui, 1.0, getfixes, tuple(), workqueue,
-                                threadsafe=False)
+                yield (
+                    rev,
+                    path,
+                    metadata,
+                    newdata if newdata != olddata else None,
+                )
+
+        results = worker.worker(
+            ui, 1.0, getfixes, tuple(), workqueue, threadsafe=False
+        )
 
         # We have to hold on to the data for each successor revision in memory
         # until all its parents are committed. We ensure this by committing and
@@ -253,8 +296,9 @@
         replacements = {}
         wdirwritten = False
         commitorder = sorted(revstofix, reverse=True)
-        with ui.makeprogress(topic=_('fixing'), unit=_('files'),
-                             total=sum(numitems.values())) as progress:
+        with ui.makeprogress(
+            topic=_(b'fixing'), unit=_(b'files'), total=sum(numitems.values())
+        ) as progress:
             for rev, path, filerevmetadata, newdata in results:
                 progress.increment(item=path)
                 for fixername, fixermetadata in filerevmetadata.items():
@@ -262,12 +306,15 @@
                 if newdata is not None:
                     filedata[rev][path] = newdata
                     hookargs = {
-                      'rev': rev,
-                      'path': path,
-                      'metadata': filerevmetadata,
+                        b'rev': rev,
+                        b'path': path,
+                        b'metadata': filerevmetadata,
                     }
-                    repo.hook('postfixfile', throw=False,
-                              **pycompat.strkwargs(hookargs))
+                    repo.hook(
+                        b'postfixfile',
+                        throw=False,
+                        **pycompat.strkwargs(hookargs)
+                    )
                 numitems[rev] -= 1
                 # Apply the fixes for this and any other revisions that are
                 # ready and sitting at the front of the queue. Using a loop here
@@ -285,11 +332,12 @@
 
         cleanup(repo, replacements, wdirwritten)
         hookargs = {
-            'replacements': replacements,
-            'wdirwritten': wdirwritten,
-            'metadata': aggregatemetadata,
+            b'replacements': replacements,
+            b'wdirwritten': wdirwritten,
+            b'metadata': aggregatemetadata,
         }
-        repo.hook('postfix', throw=True, **pycompat.strkwargs(hookargs))
+        repo.hook(b'postfix', throw=True, **pycompat.strkwargs(hookargs))
+
 
 def cleanup(repo, replacements, wdirwritten):
     """Calls scmutil.cleanupnodes() with the given replacements.
@@ -304,8 +352,11 @@
     Useful as a hook point for extending "hg fix" with output summarizing the
     effects of the command, though we choose not to output anything here.
     """
-    replacements = {prec: [succ] for prec, succ in replacements.iteritems()}
-    scmutil.cleanupnodes(repo, replacements, 'fix', fixphase=True)
+    replacements = {
+        prec: [succ] for prec, succ in pycompat.iteritems(replacements)
+    }
+    scmutil.cleanupnodes(repo, replacements, b'fix', fixphase=True)
+
 
 def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
     """"Constructs the list of files to be fixed at specific revisions
@@ -326,57 +377,72 @@
     """
     workqueue = []
     numitems = collections.defaultdict(int)
-    maxfilesize = ui.configbytes('fix', 'maxfilesize')
+    maxfilesize = ui.configbytes(b'fix', b'maxfilesize')
     for rev in sorted(revstofix):
         fixctx = repo[rev]
         match = scmutil.match(fixctx, pats, opts)
-        for path in sorted(pathstofix(
-                        ui, repo, pats, opts, match, basectxs[rev], fixctx)):
+        for path in sorted(
+            pathstofix(ui, repo, pats, opts, match, basectxs[rev], fixctx)
+        ):
             fctx = fixctx[path]
             if fctx.islink():
                 continue
             if fctx.size() > maxfilesize:
-                ui.warn(_('ignoring file larger than %s: %s\n') %
-                        (util.bytecount(maxfilesize), path))
+                ui.warn(
+                    _(b'ignoring file larger than %s: %s\n')
+                    % (util.bytecount(maxfilesize), path)
+                )
                 continue
             workqueue.append((rev, path))
             numitems[rev] += 1
     return workqueue, numitems
 
+
 def getrevstofix(ui, repo, opts):
     """Returns the set of revision numbers that should be fixed"""
-    revs = set(scmutil.revrange(repo, opts['rev']))
+    revs = set(scmutil.revrange(repo, opts[b'rev']))
     for rev in revs:
         checkfixablectx(ui, repo, repo[rev])
     if revs:
         cmdutil.checkunfinished(repo)
         checknodescendants(repo, revs)
-    if opts.get('working_dir'):
+    if opts.get(b'working_dir'):
         revs.add(wdirrev)
         if list(merge.mergestate.read(repo).unresolved()):
-            raise error.Abort('unresolved conflicts', hint="use 'hg resolve'")
+            raise error.Abort(b'unresolved conflicts', hint=b"use 'hg resolve'")
     if not revs:
         raise error.Abort(
-            'no changesets specified', hint='use --rev or --working-dir')
+            b'no changesets specified', hint=b'use --rev or --working-dir'
+        )
     return revs
 
+
 def checknodescendants(repo, revs):
-    if (not obsolete.isenabled(repo, obsolete.allowunstableopt) and
-        repo.revs('(%ld::) - (%ld)', revs, revs)):
-        raise error.Abort(_('can only fix a changeset together '
-                            'with all its descendants'))
+    if not obsolete.isenabled(repo, obsolete.allowunstableopt) and repo.revs(
+        b'(%ld::) - (%ld)', revs, revs
+    ):
+        raise error.Abort(
+            _(b'can only fix a changeset together with all its descendants')
+        )
+
 
 def checkfixablectx(ui, repo, ctx):
     """Aborts if the revision shouldn't be replaced with a fixed one."""
     if not ctx.mutable():
-        raise error.Abort('can\'t fix immutable changeset %s' %
-                          (scmutil.formatchangeid(ctx),))
+        raise error.Abort(
+            b'can\'t fix immutable changeset %s'
+            % (scmutil.formatchangeid(ctx),)
+        )
     if ctx.obsolete():
         # It would be better to actually check if the revision has a successor.
-        allowdivergence = ui.configbool('experimental',
-                                        'evolution.allowdivergence')
+        allowdivergence = ui.configbool(
+            b'experimental', b'evolution.allowdivergence'
+        )
         if not allowdivergence:
-            raise error.Abort('fixing obsolete revision could cause divergence')
+            raise error.Abort(
+                b'fixing obsolete revision could cause divergence'
+            )
+
 
 def pathstofix(ui, repo, pats, opts, match, basectxs, fixctx):
     """Returns the set of files that should be fixed in a context
@@ -387,13 +453,19 @@
     """
     files = set()
     for basectx in basectxs:
-        stat = basectx.status(fixctx, match=match, listclean=bool(pats),
-                              listunknown=bool(pats))
+        stat = basectx.status(
+            fixctx, match=match, listclean=bool(pats), listunknown=bool(pats)
+        )
         files.update(
-            set(itertools.chain(stat.added, stat.modified, stat.clean,
-                                stat.unknown)))
+            set(
+                itertools.chain(
+                    stat.added, stat.modified, stat.clean, stat.unknown
+                )
+            )
+        )
     return files
 
+
 def lineranges(opts, path, basectxs, fixctx, content2):
     """Returns the set of line ranges that should be fixed in a file
 
@@ -406,10 +478,10 @@
     Another way to understand this is that we exclude line ranges that are
     common to the file in all base contexts.
     """
-    if opts.get('whole'):
+    if opts.get(b'whole'):
         # Return a range containing all lines. Rely on the diff implementation's
         # idea of how many lines are in the file, instead of reimplementing it.
-        return difflineranges('', content2)
+        return difflineranges(b'', content2)
 
     rangeslist = []
     for basectx in basectxs:
@@ -417,10 +489,11 @@
         if basepath in basectx:
             content1 = basectx[basepath].data()
         else:
-            content1 = ''
+            content1 = b''
         rangeslist.extend(difflineranges(content1, content2))
     return unionranges(rangeslist)
 
+
 def unionranges(rangeslist):
     """Return the union of some closed intervals
 
@@ -455,6 +528,7 @@
             unioned[-1] = (c, max(b, d))
     return unioned
 
+
 def difflineranges(content1, content2):
     """Return list of line number ranges in content2 that differ from content1.
 
@@ -497,10 +571,11 @@
     ranges = []
     for lines, kind in mdiff.allblocks(content1, content2):
         firstline, lastline = lines[2:4]
-        if kind == '!' and firstline != lastline:
+        if kind == b'!' and firstline != lastline:
             ranges.append((firstline + 1, lastline))
     return ranges
 
+
 def getbasectxs(repo, opts, revstofix):
     """Returns a map of the base contexts for each revision
 
@@ -511,8 +586,8 @@
     """
     # The --base flag overrides the usual logic, and we give every revision
     # exactly the set of baserevs that the user specified.
-    if opts.get('base'):
-        baserevs = set(scmutil.revrange(repo, opts.get('base')))
+    if opts.get(b'base'):
+        baserevs = set(scmutil.revrange(repo, opts.get(b'base')))
         if not baserevs:
             baserevs = {nullrev}
         basectxs = {repo[rev] for rev in baserevs}
@@ -530,7 +605,8 @@
                 basectxs[rev].add(pctx)
     return basectxs
 
-def fixfile(ui, opts, fixers, fixctx, path, basectxs):
+
+def fixfile(ui, repo, opts, fixers, fixctx, path, basectxs):
     """Run any configured fixers that should affect the file in this context
 
     Returns the file content that results from applying the fixers in some order
@@ -539,35 +615,39 @@
     (i.e. they will only avoid lines that are common to all basectxs).
 
     A fixer tool's stdout will become the file's new content if and only if it
-    exits with code zero.
+    exits with code zero. The fixer tool's working directory is the repository's
+    root.
     """
     metadata = {}
     newdata = fixctx[path].data()
-    for fixername, fixer in fixers.iteritems():
+    for fixername, fixer in pycompat.iteritems(fixers):
         if fixer.affects(opts, fixctx, path):
-            rangesfn = lambda: lineranges(opts, path, basectxs, fixctx, newdata)
-            command = fixer.command(ui, path, rangesfn)
+            ranges = lineranges(opts, path, basectxs, fixctx, newdata)
+            command = fixer.command(ui, path, ranges)
             if command is None:
                 continue
-            ui.debug('subprocess: %s\n' % (command,))
+            ui.debug(b'subprocess: %s\n' % (command,))
             proc = subprocess.Popen(
                 procutil.tonativestr(command),
                 shell=True,
-                cwd=procutil.tonativestr(b'/'),
+                cwd=procutil.tonativestr(repo.root),
                 stdin=subprocess.PIPE,
                 stdout=subprocess.PIPE,
-                stderr=subprocess.PIPE)
+                stderr=subprocess.PIPE,
+            )
             stdout, stderr = proc.communicate(newdata)
             if stderr:
                 showstderr(ui, fixctx.rev(), fixername, stderr)
             newerdata = stdout
             if fixer.shouldoutputmetadata():
                 try:
-                    metadatajson, newerdata = stdout.split('\0', 1)
+                    metadatajson, newerdata = stdout.split(b'\0', 1)
                     metadata[fixername] = json.loads(metadatajson)
                 except ValueError:
-                    ui.warn(_('ignored invalid output from fixer tool: %s\n') %
-                            (fixername,))
+                    ui.warn(
+                        _(b'ignored invalid output from fixer tool: %s\n')
+                        % (fixername,)
+                    )
                     continue
             else:
                 metadata[fixername] = None
@@ -575,14 +655,19 @@
                 newdata = newerdata
             else:
                 if not stderr:
-                    message = _('exited with status %d\n') % (proc.returncode,)
+                    message = _(b'exited with status %d\n') % (proc.returncode,)
                     showstderr(ui, fixctx.rev(), fixername, message)
                 checktoolfailureaction(
-                    ui, _('no fixes will be applied'),
-                    hint=_('use --config fix.failure=continue to apply any '
-                           'successful fixes anyway'))
+                    ui,
+                    _(b'no fixes will be applied'),
+                    hint=_(
+                        b'use --config fix.failure=continue to apply any '
+                        b'successful fixes anyway'
+                    ),
+                )
     return metadata, newdata
 
+
 def showstderr(ui, rev, fixername, stderr):
     """Writes the lines of the stderr string as warnings on the ui
 
@@ -591,14 +676,15 @@
     space and would tend to be included in the error message if they were
     relevant.
     """
-    for line in re.split('[\r\n]+', stderr):
+    for line in re.split(b'[\r\n]+', stderr):
         if line:
-            ui.warn(('['))
+            ui.warn(b'[')
             if rev is None:
-                ui.warn(_('wdir'), label='evolve.rev')
+                ui.warn(_(b'wdir'), label=b'evolve.rev')
             else:
-                ui.warn((str(rev)), label='evolve.rev')
-            ui.warn(('] %s: %s\n') % (fixername, line))
+                ui.warn((str(rev)), label=b'evolve.rev')
+            ui.warn(b'] %s: %s\n' % (fixername, line))
+
 
 def writeworkingdir(repo, ctx, filedata, replacements):
     """Write new content to the working copy and check out the new p1 if any
@@ -610,10 +696,10 @@
 
     Directly updates the dirstate for the affected files.
     """
-    for path, data in filedata.iteritems():
+    for path, data in pycompat.iteritems(filedata):
         fctx = ctx[path]
         fctx.write(data, fctx.flags())
-        if repo.dirstate[path] == 'n':
+        if repo.dirstate[path] == b'n':
             repo.dirstate.normallookup(path)
 
     oldparentnodes = repo.dirstate.parents()
@@ -621,6 +707,7 @@
     if newparentnodes != oldparentnodes:
         repo.setparents(*newparentnodes)
 
+
 def replacerev(ui, repo, ctx, filedata, replacements):
     """Commit a new revision like the given one, but with file content changes
 
@@ -652,9 +739,11 @@
     # intervention to evolve. We can't rely on commit() to avoid creating the
     # un-needed revision because the extra field added below produces a new hash
     # regardless of file content changes.
-    if (not filedata and
-        p1ctx.node() not in replacements and
-        p2ctx.node() not in replacements):
+    if (
+        not filedata
+        and p1ctx.node() not in replacements
+        and p2ctx.node() not in replacements
+    ):
         return
 
     def filectxfn(repo, memctx, path):
@@ -669,10 +758,11 @@
             data=filedata.get(path, fctx.data()),
             islink=fctx.islink(),
             isexec=fctx.isexec(),
-            copysource=copysource)
+            copysource=copysource,
+        )
 
     extra = ctx.extra().copy()
-    extra['fix_source'] = ctx.hex()
+    extra[b'fix_source'] = ctx.hex()
 
     memctx = context.memctx(
         repo,
@@ -684,14 +774,16 @@
         date=ctx.date(),
         extra=extra,
         branch=ctx.branch(),
-        editor=None)
+        editor=None,
+    )
     sucnode = memctx.commit()
     prenode = ctx.node()
     if prenode == sucnode:
-        ui.debug('node %s already existed\n' % (ctx.hex()))
+        ui.debug(b'node %s already existed\n' % (ctx.hex()))
     else:
         replacements[ctx.node()] = sucnode
 
+
 def getfixers(ui):
     """Returns a map of configured fixer tools indexed by their names
 
@@ -700,54 +792,92 @@
     """
     fixers = {}
     for name in fixernames(ui):
-        fixers[name] = Fixer()
-        attrs = ui.configsuboptions('fix', name)[1]
-        if 'fileset' in attrs and 'pattern' not in attrs:
-            ui.warn(_('the fix.tool:fileset config name is deprecated; '
-                      'please rename it to fix.tool:pattern\n'))
-            attrs['pattern'] = attrs['fileset']
-        for key, default in FIXER_ATTRS.items():
-            setattr(fixers[name], pycompat.sysstr('_' + key),
-                    attrs.get(key, default))
-        fixers[name]._priority = int(fixers[name]._priority)
+        enabled = ui.configbool(b'fix', name + b':enabled')
+        command = ui.config(b'fix', name + b':command')
+        pattern = ui.config(b'fix', name + b':pattern')
+        linerange = ui.config(b'fix', name + b':linerange')
+        priority = ui.configint(b'fix', name + b':priority')
+        metadata = ui.configbool(b'fix', name + b':metadata')
+        skipclean = ui.configbool(b'fix', name + b':skipclean')
+        # Don't use a fixer if it has no pattern configured. It would be
+        # dangerous to let it affect all files. It would be pointless to let it
+        # affect no files. There is no reasonable subset of files to use as the
+        # default.
+        if command is None:
+            ui.warn(
+                _(b'fixer tool has no command configuration: %s\n') % (name,)
+            )
+        elif pattern is None:
+            ui.warn(
+                _(b'fixer tool has no pattern configuration: %s\n') % (name,)
+            )
+        elif not enabled:
+            ui.debug(b'ignoring disabled fixer tool: %s\n' % (name,))
+        else:
+            fixers[name] = Fixer(
+                command, pattern, linerange, priority, metadata, skipclean
+            )
     return collections.OrderedDict(
-        sorted(fixers.items(), key=lambda item: item[1]._priority,
-               reverse=True))
+        sorted(fixers.items(), key=lambda item: item[1]._priority, reverse=True)
+    )
+
 
 def fixernames(ui):
     """Returns the names of [fix] config options that have suboptions"""
     names = set()
-    for k, v in ui.configitems('fix'):
-        if ':' in k:
-            names.add(k.split(':', 1)[0])
+    for k, v in ui.configitems(b'fix'):
+        if b':' in k:
+            names.add(k.split(b':', 1)[0])
     return names
 
+
 class Fixer(object):
     """Wraps the raw config values for a fixer with methods"""
 
+    def __init__(
+        self, command, pattern, linerange, priority, metadata, skipclean
+    ):
+        self._command = command
+        self._pattern = pattern
+        self._linerange = linerange
+        self._priority = priority
+        self._metadata = metadata
+        self._skipclean = skipclean
+
     def affects(self, opts, fixctx, path):
         """Should this fixer run on the file at the given path and context?"""
-        return scmutil.match(fixctx, [self._pattern], opts)(path)
+        repo = fixctx.repo()
+        matcher = matchmod.match(
+            repo.root, repo.root, [self._pattern], ctx=fixctx
+        )
+        return matcher(path)
 
     def shouldoutputmetadata(self):
         """Should the stdout of this fixer start with JSON and a null byte?"""
         return self._metadata
 
-    def command(self, ui, path, rangesfn):
+    def command(self, ui, path, ranges):
         """A shell command to use to invoke this fixer on the given file/lines
 
         May return None if there is no appropriate command to run for the given
         parameters.
         """
         expand = cmdutil.rendercommandtemplate
-        parts = [expand(ui, self._command,
-                        {'rootpath': path, 'basename': os.path.basename(path)})]
+        parts = [
+            expand(
+                ui,
+                self._command,
+                {b'rootpath': path, b'basename': os.path.basename(path)},
+            )
+        ]
         if self._linerange:
-            ranges = rangesfn()
-            if not ranges:
+            if self._skipclean and not ranges:
                 # No line ranges to fix, so don't run the fixer.
                 return None
             for first, last in ranges:
-                parts.append(expand(ui, self._linerange,
-                                    {'first': first, 'last': last}))
-        return ' '.join(parts)
+                parts.append(
+                    expand(
+                        ui, self._linerange, {b'first': first, b'last': last}
+                    )
+                )
+        return b' '.join(parts)
--- a/hgext/fsmonitor/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/fsmonitor/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -112,13 +112,12 @@
 import os
 import stat
 import sys
+import tempfile
 import weakref
 
 from mercurial.i18n import _
-from mercurial.node import (
-    hex,
-)
-
+from mercurial.node import hex
+from mercurial.pycompat import open
 from mercurial import (
     context,
     encoding,
@@ -144,51 +143,80 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('fsmonitor', 'mode',
-    default='on',
+configitem(
+    b'fsmonitor', b'mode', default=b'on',
 )
-configitem('fsmonitor', 'walk_on_invalidate',
-    default=False,
+configitem(
+    b'fsmonitor', b'walk_on_invalidate', default=False,
 )
-configitem('fsmonitor', 'timeout',
-    default='2',
+configitem(
+    b'fsmonitor', b'timeout', default=b'2',
 )
-configitem('fsmonitor', 'blacklistusers',
-    default=list,
+configitem(
+    b'fsmonitor', b'blacklistusers', default=list,
 )
-configitem('fsmonitor', 'watchman_exe',
-    default='watchman',
+configitem(
+    b'fsmonitor', b'watchman_exe', default=b'watchman',
 )
-configitem('fsmonitor', 'verbose',
-    default=True,
+configitem(
+    b'fsmonitor', b'verbose', default=True, experimental=True,
 )
-configitem('experimental', 'fsmonitor.transaction_notify',
-    default=False,
+configitem(
+    b'experimental', b'fsmonitor.transaction_notify', default=False,
 )
 
 # This extension is incompatible with the following blacklisted extensions
 # and will disable itself when encountering one of these:
-_blacklist = ['largefiles', 'eol']
+_blacklist = [b'largefiles', b'eol']
+
+
+def debuginstall(ui, fm):
+    fm.write(
+        b"fsmonitor-watchman",
+        _(b"fsmonitor checking for watchman binary... (%s)\n"),
+        ui.configpath(b"fsmonitor", b"watchman_exe"),
+    )
+    root = tempfile.mkdtemp()
+    c = watchmanclient.client(ui, root)
+    err = None
+    try:
+        v = c.command(b"version")
+        fm.write(
+            b"fsmonitor-watchman-version",
+            _(b" watchman binary version %s\n"),
+            v[b"version"],
+        )
+    except watchmanclient.Unavailable as e:
+        err = str(e)
+    fm.condwrite(
+        err,
+        b"fsmonitor-watchman-error",
+        _(b" watchman binary missing or broken: %s\n"),
+        err,
+    )
+    return 1 if err else 0
+
 
 def _handleunavailable(ui, state, ex):
     """Exception handler for Watchman interaction exceptions"""
     if isinstance(ex, watchmanclient.Unavailable):
         # experimental config: fsmonitor.verbose
-        if ex.warn and ui.configbool('fsmonitor', 'verbose'):
-            if 'illegal_fstypes' not in str(ex):
-                ui.warn(str(ex) + '\n')
+        if ex.warn and ui.configbool(b'fsmonitor', b'verbose'):
+            if b'illegal_fstypes' not in str(ex):
+                ui.warn(str(ex) + b'\n')
         if ex.invalidate:
             state.invalidate()
         # experimental config: fsmonitor.verbose
-        if ui.configbool('fsmonitor', 'verbose'):
-            ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg)
+        if ui.configbool(b'fsmonitor', b'verbose'):
+            ui.log(b'fsmonitor', b'Watchman unavailable: %s\n', ex.msg)
     else:
-        ui.log('fsmonitor', 'Watchman exception: %s\n', ex)
+        ui.log(b'fsmonitor', b'Watchman exception: %s\n', ex)
+
 
 def _hashignore(ignore):
     """Calculate hash for ignore patterns and filenames
@@ -202,10 +230,12 @@
     sha1.update(repr(ignore))
     return sha1.hexdigest()
 
+
 _watchmanencoding = pywatchman.encoding.get_local_encoding()
 _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
 _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding)
 
+
 def _watchmantofsencoding(path):
     """Fix path to match watchman and local filesystem encoding
 
@@ -215,7 +245,7 @@
     try:
         decoded = path.decode(_watchmanencoding)
     except UnicodeDecodeError as e:
-        raise error.Abort(str(e), hint='watchman encoding error')
+        raise error.Abort(str(e), hint=b'watchman encoding error')
 
     try:
         encoded = decoded.encode(_fsencoding, 'strict')
@@ -224,41 +254,43 @@
 
     return encoded
 
+
 def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
     '''Replacement for dirstate.walk, hooking into Watchman.
 
     Whenever full is False, ignored is False, and the Watchman client is
     available, use Watchman combined with saved state to possibly return only a
     subset of files.'''
+
     def bail(reason):
-        self._ui.debug('fsmonitor: fallback to core status, %s\n' % reason)
+        self._ui.debug(b'fsmonitor: fallback to core status, %s\n' % reason)
         return orig(match, subrepos, unknown, ignored, full=True)
 
     if full:
-        return bail('full rewalk requested')
+        return bail(b'full rewalk requested')
     if ignored:
-        return bail('listing ignored files')
+        return bail(b'listing ignored files')
     if not self._watchmanclient.available():
-        return bail('client unavailable')
+        return bail(b'client unavailable')
     state = self._fsmonitorstate
     clock, ignorehash, notefiles = state.get()
     if not clock:
         if state.walk_on_invalidate:
-            return bail('no clock')
+            return bail(b'no clock')
         # Initial NULL clock value, see
         # https://facebook.github.io/watchman/docs/clockspec.html
-        clock = 'c:0:0'
+        clock = b'c:0:0'
         notefiles = []
 
     ignore = self._ignore
     dirignore = self._dirignore
     if unknown:
-        if _hashignore(ignore) != ignorehash and clock != 'c:0:0':
+        if _hashignore(ignore) != ignorehash and clock != b'c:0:0':
             # ignore list changed -- can't rely on Watchman state any more
             if state.walk_on_invalidate:
-                return bail('ignore rules changed')
+                return bail(b'ignore rules changed')
             notefiles = []
-            clock = 'c:0:0'
+            clock = b'c:0:0'
     else:
         # always ignore
         ignore = util.always
@@ -267,7 +299,7 @@
     matchfn = match.matchfn
     matchalways = match.always()
     dmap = self._map
-    if util.safehasattr(dmap, '_map'):
+    if util.safehasattr(dmap, b'_map'):
         # for better performance, directly access the inner dirstate map if the
         # standard dirstate implementation is in use.
         dmap = dmap._map
@@ -307,7 +339,7 @@
     if not work and (exact or skipstep3):
         for s in subrepos:
             del results[s]
-        del results['.hg']
+        del results[b'.hg']
         return results
 
     # step 2: query Watchman
@@ -316,30 +348,35 @@
         # Add a little slack over the top of the user query to allow for
         # overheads while transferring the data
         self._watchmanclient.settimeout(state.timeout + 0.1)
-        result = self._watchmanclient.command('query', {
-            'fields': ['mode', 'mtime', 'size', 'exists', 'name'],
-            'since': clock,
-            'expression': [
-                'not', [
-                    'anyof', ['dirname', '.hg'],
-                    ['name', '.hg', 'wholename']
-                ]
-            ],
-            'sync_timeout': int(state.timeout * 1000),
-            'empty_on_fresh_instance': state.walk_on_invalidate,
-        })
+        result = self._watchmanclient.command(
+            b'query',
+            {
+                b'fields': [b'mode', b'mtime', b'size', b'exists', b'name'],
+                b'since': clock,
+                b'expression': [
+                    b'not',
+                    [
+                        b'anyof',
+                        [b'dirname', b'.hg'],
+                        [b'name', b'.hg', b'wholename'],
+                    ],
+                ],
+                b'sync_timeout': int(state.timeout * 1000),
+                b'empty_on_fresh_instance': state.walk_on_invalidate,
+            },
+        )
     except Exception as ex:
         _handleunavailable(self._ui, state, ex)
         self._watchmanclient.clearconnection()
-        return bail('exception during run')
+        return bail(b'exception during run')
     else:
         # We need to propagate the last observed clock up so that we
         # can use it for our next query
-        state.setlastclock(result['clock'])
-        if result['is_fresh_instance']:
+        state.setlastclock(result[b'clock'])
+        if result[b'is_fresh_instance']:
             if state.walk_on_invalidate:
                 state.invalidate()
-                return bail('fresh instance')
+                return bail(b'fresh instance')
             fresh_instance = True
             # Ignore any prior noteable files from the state info
             notefiles = []
@@ -349,7 +386,7 @@
     if normalize:
         foldmap = dict((normcase(k), k) for k in results)
 
-    switch_slashes = pycompat.ossep == '\\'
+    switch_slashes = pycompat.ossep == b'\\'
     # The order of the results is, strictly speaking, undefined.
     # For case changes on a case insensitive filesystem we may receive
     # two entries, one with exists=True and another with exists=False.
@@ -357,30 +394,33 @@
     # as being happens-after the exists=False entries due to the way that
     # Watchman tracks files.  We use this property to reconcile deletes
     # for name case changes.
-    for entry in result['files']:
-        fname = entry['name']
+    for entry in result[b'files']:
+        fname = entry[b'name']
         if _fixencoding:
             fname = _watchmantofsencoding(fname)
         if switch_slashes:
-            fname = fname.replace('\\', '/')
+            fname = fname.replace(b'\\', b'/')
         if normalize:
             normed = normcase(fname)
             fname = normalize(fname, True, True)
             foldmap[normed] = fname
-        fmode = entry['mode']
-        fexists = entry['exists']
+        fmode = entry[b'mode']
+        fexists = entry[b'exists']
         kind = getkind(fmode)
 
-        if '/.hg/' in fname or fname.endswith('/.hg'):
-            return bail('nested-repo-detected')
+        if b'/.hg/' in fname or fname.endswith(b'/.hg'):
+            return bail(b'nested-repo-detected')
 
         if not fexists:
             # if marked as deleted and we don't already have a change
             # record, mark it as deleted.  If we already have an entry
             # for fname then it was either part of walkexplicit or was
             # an earlier result that was a case change
-            if fname not in results and fname in dmap and (
-                    matchalways or matchfn(fname)):
+            if (
+                fname not in results
+                and fname in dmap
+                and (matchalways or matchfn(fname))
+            ):
                 results[fname] = None
         elif kind == dirkind:
             if fname in dmap and (matchalways or matchfn(fname)):
@@ -399,29 +439,45 @@
     if normalize:
         # any notable files that have changed case will already be handled
         # above, so just check membership in the foldmap
-        notefiles = set((normalize(f, True, True) for f in notefiles
-                         if normcase(f) not in foldmap))
-    visit = set((f for f in notefiles if (f not in results and matchfn(f)
-                                          and (f in dmap or not ignore(f)))))
+        notefiles = set(
+            (
+                normalize(f, True, True)
+                for f in notefiles
+                if normcase(f) not in foldmap
+            )
+        )
+    visit = set(
+        (
+            f
+            for f in notefiles
+            if (
+                f not in results and matchfn(f) and (f in dmap or not ignore(f))
+            )
+        )
+    )
 
     if not fresh_instance:
         if matchalways:
             visit.update(f for f in nonnormalset if f not in results)
             visit.update(f for f in copymap if f not in results)
         else:
-            visit.update(f for f in nonnormalset
-                         if f not in results and matchfn(f))
-            visit.update(f for f in copymap
-                         if f not in results and matchfn(f))
+            visit.update(
+                f for f in nonnormalset if f not in results and matchfn(f)
+            )
+            visit.update(f for f in copymap if f not in results and matchfn(f))
     else:
         if matchalways:
-            visit.update(f for f, st in dmap.iteritems() if f not in results)
+            visit.update(
+                f for f, st in pycompat.iteritems(dmap) if f not in results
+            )
             visit.update(f for f in copymap if f not in results)
         else:
-            visit.update(f for f, st in dmap.iteritems()
-                         if f not in results and matchfn(f))
-            visit.update(f for f in copymap
-                         if f not in results and matchfn(f))
+            visit.update(
+                f
+                for f, st in pycompat.iteritems(dmap)
+                if f not in results and matchfn(f)
+            )
+            visit.update(f for f in copymap if f not in results and matchfn(f))
 
     audit = pathutil.pathauditor(self._root, cached=True).check
     auditpass = [f for f in visit if audit(f)]
@@ -438,34 +494,43 @@
 
     for s in subrepos:
         del results[s]
-    del results['.hg']
+    del results[b'.hg']
     return results
 
+
 def overridestatus(
-        orig, self, node1='.', node2=None, match=None, ignored=False,
-        clean=False, unknown=False, listsubrepos=False):
+    orig,
+    self,
+    node1=b'.',
+    node2=None,
+    match=None,
+    ignored=False,
+    clean=False,
+    unknown=False,
+    listsubrepos=False,
+):
     listignored = ignored
     listclean = clean
     listunknown = unknown
 
     def _cmpsets(l1, l2):
         try:
-            if 'FSMONITOR_LOG_FILE' in encoding.environ:
-                fn = encoding.environ['FSMONITOR_LOG_FILE']
-                f = open(fn, 'wb')
+            if b'FSMONITOR_LOG_FILE' in encoding.environ:
+                fn = encoding.environ[b'FSMONITOR_LOG_FILE']
+                f = open(fn, b'wb')
             else:
-                fn = 'fsmonitorfail.log'
-                f = self.vfs.open(fn, 'wb')
+                fn = b'fsmonitorfail.log'
+                f = self.vfs.open(fn, b'wb')
         except (IOError, OSError):
-            self.ui.warn(_('warning: unable to write to %s\n') % fn)
+            self.ui.warn(_(b'warning: unable to write to %s\n') % fn)
             return
 
         try:
             for i, (s1, s2) in enumerate(zip(l1, l2)):
                 if set(s1) != set(s2):
-                    f.write('sets at position %d are unequal\n' % i)
-                    f.write('watchman returned: %s\n' % s1)
-                    f.write('stat returned: %s\n' % s2)
+                    f.write(b'sets at position %d are unequal\n' % i)
+                    f.write(b'watchman returned: %s\n' % s1)
+                    f.write(b'stat returned: %s\n' % s2)
         finally:
             f.close()
 
@@ -479,7 +544,7 @@
         ctx2 = self[node2]
 
     working = ctx2.rev() is None
-    parentworking = working and ctx1 == self['.']
+    parentworking = working and ctx1 == self[b'.']
     match = match or matchmod.always()
 
     # Maybe we can use this opportunity to update Watchman's state.
@@ -489,10 +554,12 @@
     # HG_PENDING is set in the environment when the dirstate is being updated
     # in the middle of a transaction; we must not update our state in that
     # case, or we risk forgetting about changes in the working copy.
-    updatestate = (parentworking and match.always() and
-                   not isinstance(ctx2, (context.workingcommitctx,
-                                         context.memctx)) and
-                   'HG_PENDING' not in encoding.environ)
+    updatestate = (
+        parentworking
+        and match.always()
+        and not isinstance(ctx2, (context.workingcommitctx, context.memctx))
+        and b'HG_PENDING' not in encoding.environ
+    )
 
     try:
         if self._fsmonitorstate.walk_on_invalidate:
@@ -509,15 +576,21 @@
             # and return the initial clock.  In this mode we assume that
             # the filesystem will be slower than parsing a potentially
             # very large Watchman result set.
-            self._watchmanclient.settimeout(
-                self._fsmonitorstate.timeout + 0.1)
+            self._watchmanclient.settimeout(self._fsmonitorstate.timeout + 0.1)
         startclock = self._watchmanclient.getcurrentclock()
     except Exception as ex:
         self._watchmanclient.clearconnection()
         _handleunavailable(self.ui, self._fsmonitorstate, ex)
         # boo, Watchman failed. bail
-        return orig(node1, node2, match, listignored, listclean,
-                    listunknown, listsubrepos)
+        return orig(
+            node1,
+            node2,
+            match,
+            listignored,
+            listclean,
+            listunknown,
+            listsubrepos,
+        )
 
     if updatestate:
         # We need info about unknown files. This may make things slower the
@@ -530,8 +603,9 @@
         ps = poststatus(startclock)
         self.addpostdsstatus(ps)
 
-    r = orig(node1, node2, match, listignored, listclean, stateunknown,
-             listsubrepos)
+    r = orig(
+        node1, node2, match, listignored, listclean, stateunknown, listsubrepos
+    )
     modified, added, removed, deleted, unknown, ignored, clean = r
 
     if not listunknown:
@@ -539,7 +613,7 @@
 
     # don't do paranoid checks if we're not going to query Watchman anyway
     full = listclean or match.traversedir is not None
-    if self._fsmonitorstate.mode == 'paranoid' and not full:
+    if self._fsmonitorstate.mode == b'paranoid' and not full:
         # run status again and fall back to the old walk this time
         self.dirstate._fsmonitordisable = True
 
@@ -547,12 +621,18 @@
         quiet = self.ui.quiet
         self.ui.quiet = True
         fout, ferr = self.ui.fout, self.ui.ferr
-        self.ui.fout = self.ui.ferr = open(os.devnull, 'wb')
+        self.ui.fout = self.ui.ferr = open(os.devnull, b'wb')
 
         try:
             rv2 = orig(
-                node1, node2, match, listignored, listclean, listunknown,
-                listsubrepos)
+                node1,
+                node2,
+                match,
+                listignored,
+                listclean,
+                listunknown,
+                listsubrepos,
+            )
         finally:
             self.dirstate._fsmonitordisable = False
             self.ui.quiet = quiet
@@ -562,11 +642,14 @@
         with self.wlock():
             _cmpsets(
                 [modified, added, removed, deleted, unknown, ignored, clean],
-                rv2)
+                rv2,
+            )
         modified, added, removed, deleted, unknown, ignored, clean = rv2
 
     return scmutil.status(
-        modified, added, removed, deleted, unknown, ignored, clean)
+        modified, added, removed, deleted, unknown, ignored, clean
+    )
+
 
 class poststatus(object):
     def __init__(self, startclock):
@@ -575,10 +658,16 @@
     def __call__(self, wctx, status):
         clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock
         hashignore = _hashignore(wctx.repo().dirstate._ignore)
-        notefiles = (status.modified + status.added + status.removed +
-                     status.deleted + status.unknown)
+        notefiles = (
+            status.modified
+            + status.added
+            + status.removed
+            + status.deleted
+            + status.unknown
+        )
         wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles)
 
+
 def makedirstate(repo, dirstate):
     class fsmonitordirstate(dirstate.__class__):
         def _fsmonitorinit(self, repo):
@@ -605,21 +694,25 @@
     dirstate.__class__ = fsmonitordirstate
     dirstate._fsmonitorinit(repo)
 
+
 def wrapdirstate(orig, self):
     ds = orig(self)
     # only override the dirstate when Watchman is available for the repo
-    if util.safehasattr(self, '_fsmonitorstate'):
+    if util.safehasattr(self, b'_fsmonitorstate'):
         makedirstate(self, ds)
     return ds
 
+
 def extsetup(ui):
     extensions.wrapfilecache(
-        localrepo.localrepository, 'dirstate', wrapdirstate)
+        localrepo.localrepository, b'dirstate', wrapdirstate
+    )
     if pycompat.isdarwin:
         # An assist for avoiding the dangling-symlink fsevents bug
-        extensions.wrapfunction(os, 'symlink', wrapsymlink)
+        extensions.wrapfunction(os, b'symlink', wrapsymlink)
 
-    extensions.wrapfunction(merge, 'update', wrapupdate)
+    extensions.wrapfunction(merge, b'update', wrapupdate)
+
 
 def wrapsymlink(orig, source, link_name):
     ''' if we create a dangling symlink, also touch the parent dir
@@ -632,6 +725,7 @@
         except OSError:
             pass
 
+
 class state_update(object):
     ''' This context manager is responsible for dispatching the state-enter
         and state-leave signals to the watchman service. The enter and leave
@@ -641,8 +735,15 @@
         leave, respectively. Similarly, if the distance is none, it will be
         calculated based on the oldnode and newnode in the leave method.'''
 
-    def __init__(self, repo, name, oldnode=None, newnode=None, distance=None,
-                 partial=False):
+    def __init__(
+        self,
+        repo,
+        name,
+        oldnode=None,
+        newnode=None,
+        distance=None,
+        partial=False,
+    ):
         self.repo = repo.unfiltered()
         self.name = name
         self.oldnode = oldnode
@@ -661,16 +762,14 @@
         # merge.update is going to take the wlock almost immediately. We are
         # effectively extending the lock around several short sanity checks.
         if self.oldnode is None:
-            self.oldnode = self.repo['.'].node()
+            self.oldnode = self.repo[b'.'].node()
 
         if self.repo.currentwlock() is None:
-            if util.safehasattr(self.repo, 'wlocknostateupdate'):
+            if util.safehasattr(self.repo, b'wlocknostateupdate'):
                 self._lock = self.repo.wlocknostateupdate()
             else:
                 self._lock = self.repo.wlock()
-        self.need_leave = self._state(
-            'state-enter',
-            hex(self.oldnode))
+        self.need_leave = self._state(b'state-enter', hex(self.oldnode))
         return self
 
     def __exit__(self, type_, value, tb):
@@ -680,88 +779,125 @@
     def exit(self, abort=False):
         try:
             if self.need_leave:
-                status = 'failed' if abort else 'ok'
+                status = b'failed' if abort else b'ok'
                 if self.newnode is None:
-                    self.newnode = self.repo['.'].node()
+                    self.newnode = self.repo[b'.'].node()
                 if self.distance is None:
                     self.distance = calcdistance(
-                        self.repo, self.oldnode, self.newnode)
-                self._state(
-                    'state-leave',
-                    hex(self.newnode),
-                    status=status)
+                        self.repo, self.oldnode, self.newnode
+                    )
+                self._state(b'state-leave', hex(self.newnode), status=status)
         finally:
             self.need_leave = False
             if self._lock:
                 self._lock.release()
 
-    def _state(self, cmd, commithash, status='ok'):
-        if not util.safehasattr(self.repo, '_watchmanclient'):
+    def _state(self, cmd, commithash, status=b'ok'):
+        if not util.safehasattr(self.repo, b'_watchmanclient'):
             return False
         try:
-            self.repo._watchmanclient.command(cmd, {
-                'name': self.name,
-                'metadata': {
-                    # the target revision
-                    'rev': commithash,
-                    # approximate number of commits between current and target
-                    'distance': self.distance if self.distance else 0,
-                    # success/failure (only really meaningful for state-leave)
-                    'status': status,
-                    # whether the working copy parent is changing
-                    'partial': self.partial,
-            }})
+            self.repo._watchmanclient.command(
+                cmd,
+                {
+                    b'name': self.name,
+                    b'metadata': {
+                        # the target revision
+                        b'rev': commithash,
+                        # approximate number of commits between current and target
+                        b'distance': self.distance if self.distance else 0,
+                        # success/failure (only really meaningful for state-leave)
+                        b'status': status,
+                        # whether the working copy parent is changing
+                        b'partial': self.partial,
+                    },
+                },
+            )
             return True
         except Exception as e:
             # Swallow any errors; fire and forget
             self.repo.ui.log(
-                'watchman', 'Exception %s while running %s\n', e, cmd)
+                b'watchman', b'Exception %s while running %s\n', e, cmd
+            )
             return False
 
+
 # Estimate the distance between two nodes
 def calcdistance(repo, oldnode, newnode):
     anc = repo.changelog.ancestor(oldnode, newnode)
     ancrev = repo[anc].rev()
-    distance = (abs(repo[oldnode].rev() - ancrev)
-        + abs(repo[newnode].rev() - ancrev))
+    distance = abs(repo[oldnode].rev() - ancrev) + abs(
+        repo[newnode].rev() - ancrev
+    )
     return distance
 
+
 # Bracket working copy updates with calls to the watchman state-enter
 # and state-leave commands.  This allows clients to perform more intelligent
 # settling during bulk file change scenarios
 # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
-def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None,
-               mergeancestor=False, labels=None, matcher=None, **kwargs):
+def wrapupdate(
+    orig,
+    repo,
+    node,
+    branchmerge,
+    force,
+    ancestor=None,
+    mergeancestor=False,
+    labels=None,
+    matcher=None,
+    **kwargs
+):
 
     distance = 0
     partial = True
-    oldnode = repo['.'].node()
+    oldnode = repo[b'.'].node()
     newnode = repo[node].node()
     if matcher is None or matcher.always():
         partial = False
         distance = calcdistance(repo.unfiltered(), oldnode, newnode)
 
-    with state_update(repo, name="hg.update", oldnode=oldnode, newnode=newnode,
-                      distance=distance, partial=partial):
+    with state_update(
+        repo,
+        name=b"hg.update",
+        oldnode=oldnode,
+        newnode=newnode,
+        distance=distance,
+        partial=partial,
+    ):
         return orig(
-            repo, node, branchmerge, force, ancestor, mergeancestor,
-            labels, matcher, **kwargs)
+            repo,
+            node,
+            branchmerge,
+            force,
+            ancestor,
+            mergeancestor,
+            labels,
+            matcher,
+            **kwargs
+        )
+
 
 def repo_has_depth_one_nested_repo(repo):
     for f in repo.wvfs.listdir():
-        if os.path.isdir(os.path.join(repo.root, f, '.hg')):
-            msg = 'fsmonitor: sub-repository %r detected, fsmonitor disabled\n'
+        if os.path.isdir(os.path.join(repo.root, f, b'.hg')):
+            msg = b'fsmonitor: sub-repository %r detected, fsmonitor disabled\n'
             repo.ui.debug(msg % f)
             return True
     return False
 
+
 def reposetup(ui, repo):
     # We don't work with largefiles or inotify
     exts = extensions.enabled()
     for ext in _blacklist:
         if ext in exts:
-            ui.warn(_('The fsmonitor extension is incompatible with the %s '
-                      'extension and has been disabled.\n') % ext)
+            ui.warn(
+                _(
+                    b'The fsmonitor extension is incompatible with the %s '
+                    b'extension and has been disabled.\n'
+                )
+                % ext
+            )
             return
 
     if repo.local():
@@ -769,18 +905,18 @@
         #
         # if repo[None].substate can cause a dirstate parse, which is too
         # slow. Instead, look for a file called hgsubstate,
-        if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'):
+        if repo.wvfs.exists(b'.hgsubstate') or repo.wvfs.exists(b'.hgsub'):
             return
 
         if repo_has_depth_one_nested_repo(repo):
             return
 
         fsmonitorstate = state.state(repo)
-        if fsmonitorstate.mode == 'off':
+        if fsmonitorstate.mode == b'off':
             return
 
         try:
-            client = watchmanclient.client(repo)
+            client = watchmanclient.client(repo.ui, repo._root)
         except Exception as ex:
             _handleunavailable(ui, fsmonitorstate, ex)
             return
@@ -788,7 +924,7 @@
         repo._fsmonitorstate = fsmonitorstate
         repo._watchmanclient = client
 
-        dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
+        dirstate, cached = localrepo.isfilecached(repo, b'dirstate')
         if cached:
             # at this point since fsmonitorstate wasn't present,
             # repo.dirstate is not a fsmonitordirstate
@@ -805,7 +941,8 @@
             def wlock(self, *args, **kwargs):
                 l = super(fsmonitorrepo, self).wlock(*args, **kwargs)
                 if not ui.configbool(
-                    "experimental", "fsmonitor.transaction_notify"):
+                    b"experimental", b"fsmonitor.transaction_notify"
+                ):
                     return l
                 if l.held != 1:
                     return l
@@ -820,13 +957,14 @@
 
                 try:
                     l.stateupdate = None
-                    l.stateupdate = state_update(self, name="hg.transaction")
+                    l.stateupdate = state_update(self, name=b"hg.transaction")
                     l.stateupdate.enter()
                     l.releasefn = staterelease
                 except Exception as e:
                     # Swallow any errors; fire and forget
                     self.ui.log(
-                        'watchman', 'Exception in state update %s\n', e)
+                        b'watchman', b'Exception in state update %s\n', e
+                    )
                 return l
 
         repo.__class__ = fsmonitorrepo
--- a/hgext/fsmonitor/state.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/fsmonitor/state.py	Mon Oct 21 11:09:48 2019 -0400
@@ -19,7 +19,8 @@
 )
 
 _version = 4
-_versionformat = ">I"
+_versionformat = b">I"
+
 
 class state(object):
     def __init__(self, repo):
@@ -29,14 +30,15 @@
         self._lastclock = None
         self._identity = util.filestat(None)
 
-        self.mode = self._ui.config('fsmonitor', 'mode')
+        self.mode = self._ui.config(b'fsmonitor', b'mode')
         self.walk_on_invalidate = self._ui.configbool(
-            'fsmonitor', 'walk_on_invalidate')
-        self.timeout = float(self._ui.config('fsmonitor', 'timeout'))
+            b'fsmonitor', b'walk_on_invalidate'
+        )
+        self.timeout = float(self._ui.config(b'fsmonitor', b'timeout'))
 
     def get(self):
         try:
-            file = self._vfs('fsmonitor.state', 'rb')
+            file = self._vfs(b'fsmonitor.state', b'rb')
         except IOError as inst:
             self._identity = util.filestat(None)
             if inst.errno != errno.ENOENT:
@@ -48,8 +50,10 @@
         versionbytes = file.read(4)
         if len(versionbytes) < 4:
             self._ui.log(
-                'fsmonitor', 'fsmonitor: state file only has %d bytes, '
-                'nuking state\n' % len(versionbytes))
+                b'fsmonitor',
+                b'fsmonitor: state file only has %d bytes, '
+                b'nuking state\n' % len(versionbytes),
+            )
             self.invalidate()
             return None, None, None
         try:
@@ -57,27 +61,35 @@
             if diskversion != _version:
                 # different version, nuke state and start over
                 self._ui.log(
-                    'fsmonitor', 'fsmonitor: version switch from %d to '
-                    '%d, nuking state\n' % (diskversion, _version))
+                    b'fsmonitor',
+                    b'fsmonitor: version switch from %d to '
+                    b'%d, nuking state\n' % (diskversion, _version),
+                )
                 self.invalidate()
                 return None, None, None
 
-            state = file.read().split('\0')
+            state = file.read().split(b'\0')
             # state = hostname\0clock\0ignorehash\0 + list of files, each
             # followed by a \0
             if len(state) < 3:
                 self._ui.log(
-                    'fsmonitor', 'fsmonitor: state file truncated (expected '
-                    '3 chunks, found %d), nuking state\n', len(state))
+                    b'fsmonitor',
+                    b'fsmonitor: state file truncated (expected '
+                    b'3 chunks, found %d), nuking state\n',
+                    len(state),
+                )
                 self.invalidate()
                 return None, None, None
             diskhostname = state[0]
             hostname = socket.gethostname()
             if diskhostname != hostname:
                 # file got moved to a different host
-                self._ui.log('fsmonitor', 'fsmonitor: stored hostname "%s" '
-                             'different from current "%s", nuking state\n' %
-                             (diskhostname, hostname))
+                self._ui.log(
+                    b'fsmonitor',
+                    b'fsmonitor: stored hostname "%s" '
+                    b'different from current "%s", nuking state\n'
+                    % (diskhostname, hostname),
+                )
                 self.invalidate()
                 return None, None, None
 
@@ -98,30 +110,33 @@
 
         # Read the identity from the file on disk rather than from the open file
         # pointer below, because the latter is actually a brand new file.
-        identity = util.filestat.frompath(self._vfs.join('fsmonitor.state'))
+        identity = util.filestat.frompath(self._vfs.join(b'fsmonitor.state'))
         if identity != self._identity:
-            self._ui.debug('skip updating fsmonitor.state: identity mismatch\n')
+            self._ui.debug(
+                b'skip updating fsmonitor.state: identity mismatch\n'
+            )
             return
 
         try:
-            file = self._vfs('fsmonitor.state', 'wb', atomictemp=True,
-                checkambig=True)
+            file = self._vfs(
+                b'fsmonitor.state', b'wb', atomictemp=True, checkambig=True
+            )
         except (IOError, OSError):
-            self._ui.warn(_("warning: unable to write out fsmonitor state\n"))
+            self._ui.warn(_(b"warning: unable to write out fsmonitor state\n"))
             return
 
         with file:
             file.write(struct.pack(_versionformat, _version))
-            file.write(socket.gethostname() + '\0')
-            file.write(clock + '\0')
-            file.write(ignorehash + '\0')
+            file.write(socket.gethostname() + b'\0')
+            file.write(clock + b'\0')
+            file.write(ignorehash + b'\0')
             if notefiles:
-                file.write('\0'.join(notefiles))
-                file.write('\0')
+                file.write(b'\0'.join(notefiles))
+                file.write(b'\0')
 
     def invalidate(self):
         try:
-            os.unlink(os.path.join(self._rootdir, '.hg', 'fsmonitor.state'))
+            os.unlink(os.path.join(self._rootdir, b'.hg', b'fsmonitor.state'))
         except OSError as inst:
             if inst.errno != errno.ENOENT:
                 raise
--- a/hgext/fsmonitor/watchmanclient.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/fsmonitor/watchmanclient.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,33 +13,36 @@
 
 from . import pywatchman
 
+
 class Unavailable(Exception):
     def __init__(self, msg, warn=True, invalidate=False):
         self.msg = msg
         self.warn = warn
-        if self.msg == 'timed out waiting for response':
+        if self.msg == b'timed out waiting for response':
             self.warn = False
         self.invalidate = invalidate
 
     def __str__(self):
         if self.warn:
-            return 'warning: Watchman unavailable: %s' % self.msg
+            return b'warning: Watchman unavailable: %s' % self.msg
         else:
-            return 'Watchman unavailable: %s' % self.msg
+            return b'Watchman unavailable: %s' % self.msg
+
 
 class WatchmanNoRoot(Unavailable):
     def __init__(self, root, msg):
         self.root = root
         super(WatchmanNoRoot, self).__init__(msg)
 
+
 class client(object):
-    def __init__(self, repo, timeout=1.0):
+    def __init__(self, ui, root, timeout=1.0):
         err = None
         if not self._user:
-            err = "couldn't get user"
+            err = b"couldn't get user"
             warn = True
-        if self._user in repo.ui.configlist('fsmonitor', 'blacklistusers'):
-            err = 'user %s in blacklist' % self._user
+        if self._user in ui.configlist(b'fsmonitor', b'blacklistusers'):
+            err = b'user %s in blacklist' % self._user
             warn = False
 
         if err:
@@ -47,8 +50,8 @@
 
         self._timeout = timeout
         self._watchmanclient = None
-        self._root = repo.root
-        self._ui = repo.ui
+        self._root = root
+        self._ui = ui
         self._firsttime = True
 
     def settimeout(self, timeout):
@@ -57,10 +60,11 @@
             self._watchmanclient.setTimeout(timeout)
 
     def getcurrentclock(self):
-        result = self.command('clock')
+        result = self.command(b'clock')
         if not util.safehasattr(result, 'clock'):
-            raise Unavailable('clock result is missing clock value',
-                              invalidate=True)
+            raise Unavailable(
+                b'clock result is missing clock value', invalidate=True
+            )
         return result.clock
 
     def clearconnection(self):
@@ -82,14 +86,17 @@
         try:
             if self._watchmanclient is None:
                 self._firsttime = False
-                watchman_exe = self._ui.configpath('fsmonitor', 'watchman_exe')
+                watchman_exe = self._ui.configpath(
+                    b'fsmonitor', b'watchman_exe'
+                )
                 self._watchmanclient = pywatchman.client(
                     timeout=self._timeout,
                     useImmutableBser=True,
-                    watchman_exe=watchman_exe)
+                    watchman_exe=watchman_exe,
+                )
             return self._watchmanclient.query(*watchmanargs)
         except pywatchman.CommandError as ex:
-            if 'unable to resolve root' in ex.msg:
+            if b'unable to resolve root' in ex.msg:
                 raise WatchmanNoRoot(self._root, ex.msg)
             raise Unavailable(ex.msg)
         except pywatchman.WatchmanError as ex:
@@ -102,7 +109,7 @@
             except WatchmanNoRoot:
                 # this 'watch' command can also raise a WatchmanNoRoot if
                 # watchman refuses to accept this root
-                self._command('watch')
+                self._command(b'watch')
                 return self._command(*args)
         except Unavailable:
             # this is in an outer scope to catch Unavailable form any of the
--- a/hgext/githelp.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/githelp.py	Mon Oct 21 11:09:48 2019 -0400
@@ -29,52 +29,58 @@
     registrar,
     scmutil,
 )
-from mercurial.utils import (
-    procutil,
-)
+from mercurial.utils import procutil
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 cmdtable = {}
 command = registrar.command(cmdtable)
 
+
 def convert(s):
-    if s.startswith("origin/"):
+    if s.startswith(b"origin/"):
         return s[7:]
-    if 'HEAD' in s:
-        s = s.replace('HEAD', '.')
+    if b'HEAD' in s:
+        s = s.replace(b'HEAD', b'.')
     # HEAD~ in git is .~1 in mercurial
-    s = re.sub('~$', '~1', s)
+    s = re.sub(b'~$', b'~1', s)
     return s
 
-@command('githelp|git', [
-    ], _('hg githelp'),
-    helpcategory=command.CATEGORY_HELP, helpbasic=True)
+
+@command(
+    b'githelp|git',
+    [],
+    _(b'hg githelp'),
+    helpcategory=command.CATEGORY_HELP,
+    helpbasic=True,
+)
 def githelp(ui, repo, *args, **kwargs):
     '''suggests the Mercurial equivalent of the given git command
 
     Usage: hg githelp -- <git command>
     '''
 
-    if len(args) == 0 or (len(args) == 1 and args[0] =='git'):
-        raise error.Abort(_('missing git command - '
-                            'usage: hg githelp -- <git command>'))
+    if len(args) == 0 or (len(args) == 1 and args[0] == b'git'):
+        raise error.Abort(
+            _(b'missing git command - usage: hg githelp -- <git command>')
+        )
 
-    if args[0] == 'git':
+    if args[0] == b'git':
         args = args[1:]
 
     cmd = args[0]
     if not cmd in gitcommands:
-        raise error.Abort(_("error: unknown git command %s") % (cmd))
+        raise error.Abort(_(b"error: unknown git command %s") % cmd)
 
-    ui.pager('githelp')
+    ui.pager(b'githelp')
     args = args[1:]
     return gitcommands[cmd](ui, repo, *args, **kwargs)
 
+
 def parseoptions(ui, cmdoptions, args):
     cmdoptions = list(cmdoptions)
     opts = {}
@@ -87,28 +93,36 @@
             if r"requires argument" in ex.msg:
                 raise
             if (r'--' + ex.opt) in ex.msg:
-                flag = '--' + pycompat.bytestr(ex.opt)
+                flag = b'--' + pycompat.bytestr(ex.opt)
             elif (r'-' + ex.opt) in ex.msg:
-                flag = '-' + pycompat.bytestr(ex.opt)
+                flag = b'-' + pycompat.bytestr(ex.opt)
             else:
-                raise error.Abort(_("unknown option %s") %
-                                  pycompat.bytestr(ex.opt))
+                raise error.Abort(
+                    _(b"unknown option %s") % pycompat.bytestr(ex.opt)
+                )
             try:
                 args.remove(flag)
             except Exception:
-                msg = _("unknown option '%s' packed with other options")
-                hint = _("please try passing the option as its own flag: -%s")
-                raise error.Abort(msg % pycompat.bytestr(ex.opt),
-                                  hint=hint % pycompat.bytestr(ex.opt))
+                msg = _(b"unknown option '%s' packed with other options")
+                hint = _(b"please try passing the option as its own flag: -%s")
+                raise error.Abort(
+                    msg % pycompat.bytestr(ex.opt),
+                    hint=hint % pycompat.bytestr(ex.opt),
+                )
 
-            ui.warn(_("ignoring unknown option %s\n") % flag)
+            ui.warn(_(b"ignoring unknown option %s\n") % flag)
 
     args = list([convert(x) for x in args])
-    opts = dict([(k, convert(v)) if isinstance(v, str) else (k, v)
-                                 for k, v in opts.iteritems()])
+    opts = dict(
+        [
+            (k, convert(v)) if isinstance(v, str) else (k, v)
+            for k, v in pycompat.iteritems(opts)
+        ]
+    )
 
     return args, opts
 
+
 class Command(object):
     def __init__(self, name):
         self.name = name
@@ -116,22 +130,22 @@
         self.opts = {}
 
     def __bytes__(self):
-        cmd = "hg " + self.name
+        cmd = b"hg " + self.name
         if self.opts:
-            for k, values in sorted(self.opts.iteritems()):
+            for k, values in sorted(pycompat.iteritems(self.opts)):
                 for v in values:
                     if v:
                         if isinstance(v, int):
-                            fmt = ' %s %d'
+                            fmt = b' %s %d'
                         else:
-                            fmt = ' %s %s'
+                            fmt = b' %s %s'
 
                         cmd += fmt % (k, v)
                     else:
-                        cmd += " %s" % (k,)
+                        cmd += b" %s" % (k,)
         if self.args:
-            cmd += " "
-            cmd += " ".join(self.args)
+            cmd += b" "
+            cmd += b" ".join(self.args)
         return cmd
 
     __str__ = encoding.strmethod(__bytes__)
@@ -149,115 +163,129 @@
     def __and__(self, other):
         return AndCommand(self, other)
 
+
 class AndCommand(object):
     def __init__(self, left, right):
         self.left = left
         self.right = right
 
     def __str__(self):
-        return "%s && %s" % (self.left, self.right)
+        return b"%s && %s" % (self.left, self.right)
 
     def __and__(self, other):
         return AndCommand(self, other)
 
+
 def add(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('A', 'all', None, ''),
-        ('p', 'patch', None, ''),
+        (b'A', b'all', None, b''),
+        (b'p', b'patch', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    if (opts.get('patch')):
-        ui.status(_("note: Mercurial will commit when complete, "
-                    "as there is no staging area in Mercurial\n\n"))
-        cmd = Command('commit --interactive')
+    if opts.get(b'patch'):
+        ui.status(
+            _(
+                b"note: Mercurial will commit when complete, "
+                b"as there is no staging area in Mercurial\n\n"
+            )
+        )
+        cmd = Command(b'commit --interactive')
     else:
-        cmd = Command("add")
+        cmd = Command(b"add")
 
-        if not opts.get('all'):
+        if not opts.get(b'all'):
             cmd.extend(args)
         else:
-            ui.status(_("note: use hg addremove to remove files that have "
-                        "been deleted\n\n"))
+            ui.status(
+                _(
+                    b"note: use hg addremove to remove files that have "
+                    b"been deleted\n\n"
+                )
+            )
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def am(ui, repo, *args, **kwargs):
-    cmdoptions=[
-    ]
+    cmdoptions = []
     args, opts = parseoptions(ui, cmdoptions, args)
-    cmd = Command('import')
-    ui.status(bytes(cmd), "\n")
+    cmd = Command(b'import')
+    ui.status(bytes(cmd), b"\n")
+
 
 def apply(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('p', 'p', int, ''),
-        ('', 'directory', '', ''),
+        (b'p', b'p', int, b''),
+        (b'', b'directory', b'', b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('import --no-commit')
-    if (opts.get('p')):
-        cmd['-p'] = opts.get('p')
-    if opts.get('directory'):
-        cmd['--prefix'] = opts.get('directory')
+    cmd = Command(b'import --no-commit')
+    if opts.get(b'p'):
+        cmd[b'-p'] = opts.get(b'p')
+    if opts.get(b'directory'):
+        cmd[b'--prefix'] = opts.get(b'directory')
     cmd.extend(args)
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def bisect(ui, repo, *args, **kwargs):
-    ui.status(_("see 'hg help bisect' for how to use bisect\n\n"))
+    ui.status(_(b"see 'hg help bisect' for how to use bisect\n\n"))
+
 
 def blame(ui, repo, *args, **kwargs):
-    cmdoptions = [
-    ]
+    cmdoptions = []
     args, opts = parseoptions(ui, cmdoptions, args)
-    cmd = Command('annotate -udl')
+    cmd = Command(b'annotate -udl')
     cmd.extend([convert(v) for v in args])
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def branch(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('', 'set-upstream', None, ''),
-        ('', 'set-upstream-to', '', ''),
-        ('d', 'delete', None, ''),
-        ('D', 'delete', None, ''),
-        ('m', 'move', None, ''),
-        ('M', 'move', None, ''),
+        (b'', b'set-upstream', None, b''),
+        (b'', b'set-upstream-to', b'', b''),
+        (b'd', b'delete', None, b''),
+        (b'D', b'delete', None, b''),
+        (b'm', b'move', None, b''),
+        (b'M', b'move', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command("bookmark")
+    cmd = Command(b"bookmark")
 
-    if opts.get('set_upstream') or opts.get('set_upstream_to'):
-        ui.status(_("Mercurial has no concept of upstream branches\n"))
+    if opts.get(b'set_upstream') or opts.get(b'set_upstream_to'):
+        ui.status(_(b"Mercurial has no concept of upstream branches\n"))
         return
-    elif opts.get('delete'):
-        cmd = Command("strip")
+    elif opts.get(b'delete'):
+        cmd = Command(b"strip")
         for branch in args:
-            cmd['-B'] = branch
+            cmd[b'-B'] = branch
         else:
-            cmd['-B'] = None
-    elif opts.get('move'):
+            cmd[b'-B'] = None
+    elif opts.get(b'move'):
         if len(args) > 0:
             if len(args) > 1:
                 old = args.pop(0)
             else:
                 # shell command to output the active bookmark for the active
                 # revision
-                old = '`hg log -T"{activebookmark}" -r .`'
+                old = b'`hg log -T"{activebookmark}" -r .`'
         else:
-            raise error.Abort(_('missing newbranch argument'))
+            raise error.Abort(_(b'missing newbranch argument'))
         new = args[0]
-        cmd['-m'] = old
+        cmd[b'-m'] = old
         cmd.append(new)
     else:
         if len(args) > 1:
-            cmd['-r'] = args[1]
+            cmd[b'-r'] = args[1]
             cmd.append(args[0])
         elif len(args) == 1:
             cmd.append(args[0])
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def ispath(repo, string):
     """
@@ -272,32 +300,33 @@
         return False
 
     cwd = repo.getcwd()
-    if cwd == '':
+    if cwd == b'':
         repopath = string
     else:
-        repopath = cwd + '/' + string
+        repopath = cwd + b'/' + string
 
     exists = repo.wvfs.exists(repopath)
     if exists:
         return True
 
-    manifest = repo['.'].manifest()
+    manifest = repo[b'.'].manifest()
 
     didexist = (repopath in manifest) or manifest.hasdir(repopath)
 
     return didexist
 
+
 def checkout(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('b', 'branch', '', ''),
-        ('B', 'branch', '', ''),
-        ('f', 'force', None, ''),
-        ('p', 'patch', None, ''),
+        (b'b', b'branch', b'', b''),
+        (b'B', b'branch', b'', b''),
+        (b'f', b'force', None, b''),
+        (b'p', b'patch', None, b''),
     ]
     paths = []
-    if '--' in args:
-        sepindex = args.index('--')
-        paths.extend(args[sepindex + 1:])
+    if b'--' in args:
+        sepindex = args.index(b'--')
+        paths.extend(args[sepindex + 1 :])
         args = args[:sepindex]
 
     args, opts = parseoptions(ui, cmdoptions, args)
@@ -309,806 +338,924 @@
         rev = args[0]
         paths = args[1:] + paths
 
-    cmd = Command('update')
+    cmd = Command(b'update')
 
-    if opts.get('force'):
+    if opts.get(b'force'):
         if paths or rev:
-            cmd['-C'] = None
+            cmd[b'-C'] = None
 
-    if opts.get('patch'):
-        cmd = Command('revert')
-        cmd['-i'] = None
+    if opts.get(b'patch'):
+        cmd = Command(b'revert')
+        cmd[b'-i'] = None
 
-    if opts.get('branch'):
+    if opts.get(b'branch'):
         if len(args) == 0:
-            cmd = Command('bookmark')
-            cmd.append(opts.get('branch'))
+            cmd = Command(b'bookmark')
+            cmd.append(opts.get(b'branch'))
         else:
             cmd.append(args[0])
-            bookcmd = Command('bookmark')
-            bookcmd.append(opts.get('branch'))
+            bookcmd = Command(b'bookmark')
+            bookcmd.append(opts.get(b'branch'))
             cmd = cmd & bookcmd
     # if there is any path argument supplied, use revert instead of update
     elif len(paths) > 0:
-        ui.status(_("note: use --no-backup to avoid creating .orig files\n\n"))
-        cmd = Command('revert')
-        if opts.get('patch'):
-            cmd['-i'] = None
+        ui.status(_(b"note: use --no-backup to avoid creating .orig files\n\n"))
+        cmd = Command(b'revert')
+        if opts.get(b'patch'):
+            cmd[b'-i'] = None
         if rev:
-            cmd['-r'] = rev
+            cmd[b'-r'] = rev
         cmd.extend(paths)
     elif rev:
-        if opts.get('patch'):
-            cmd['-r'] = rev
+        if opts.get(b'patch'):
+            cmd[b'-r'] = rev
         else:
             cmd.append(rev)
-    elif opts.get('force'):
-        cmd = Command('revert')
-        cmd['--all'] = None
+    elif opts.get(b'force'):
+        cmd = Command(b'revert')
+        cmd[b'--all'] = None
     else:
-        raise error.Abort(_("a commit must be specified"))
+        raise error.Abort(_(b"a commit must be specified"))
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def cherrypick(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('', 'continue', None, ''),
-        ('', 'abort', None, ''),
-        ('e', 'edit', None, ''),
+        (b'', b'continue', None, b''),
+        (b'', b'abort', None, b''),
+        (b'e', b'edit', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('graft')
+    cmd = Command(b'graft')
 
-    if opts.get('edit'):
-        cmd['--edit'] = None
-    if opts.get('continue'):
-        cmd['--continue'] = None
-    elif opts.get('abort'):
-        ui.status(_("note: hg graft does not have --abort\n\n"))
+    if opts.get(b'edit'):
+        cmd[b'--edit'] = None
+    if opts.get(b'continue'):
+        cmd[b'--continue'] = None
+    elif opts.get(b'abort'):
+        ui.status(_(b"note: hg graft does not have --abort\n\n"))
         return
     else:
         cmd.extend(args)
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def clean(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('d', 'd', None, ''),
-        ('f', 'force', None, ''),
-        ('x', 'x', None, ''),
+        (b'd', b'd', None, b''),
+        (b'f', b'force', None, b''),
+        (b'x', b'x', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('purge')
-    if opts.get('x'):
-        cmd['--all'] = None
+    cmd = Command(b'purge')
+    if opts.get(b'x'):
+        cmd[b'--all'] = None
     cmd.extend(args)
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def clone(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('', 'bare', None, ''),
-        ('n', 'no-checkout', None, ''),
-        ('b', 'branch', '', ''),
+        (b'', b'bare', None, b''),
+        (b'n', b'no-checkout', None, b''),
+        (b'b', b'branch', b'', b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
     if len(args) == 0:
-        raise error.Abort(_("a repository to clone must be specified"))
+        raise error.Abort(_(b"a repository to clone must be specified"))
 
-    cmd = Command('clone')
+    cmd = Command(b'clone')
     cmd.append(args[0])
     if len(args) > 1:
         cmd.append(args[1])
 
-    if opts.get('bare'):
-        cmd['-U'] = None
-        ui.status(_("note: Mercurial does not have bare clones. "
-                    "-U will clone the repo without checking out a commit\n\n"))
-    elif opts.get('no_checkout'):
-        cmd['-U'] = None
+    if opts.get(b'bare'):
+        cmd[b'-U'] = None
+        ui.status(
+            _(
+                b"note: Mercurial does not have bare clones. "
+                b"-U will clone the repo without checking out a commit\n\n"
+            )
+        )
+    elif opts.get(b'no_checkout'):
+        cmd[b'-U'] = None
 
-    if opts.get('branch'):
-        cocmd = Command("update")
-        cocmd.append(opts.get('branch'))
+    if opts.get(b'branch'):
+        cocmd = Command(b"update")
+        cocmd.append(opts.get(b'branch'))
         cmd = cmd & cocmd
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def commit(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('a', 'all', None, ''),
-        ('m', 'message', '', ''),
-        ('p', 'patch', None, ''),
-        ('C', 'reuse-message', '', ''),
-        ('F', 'file', '', ''),
-        ('', 'author', '', ''),
-        ('', 'date', '', ''),
-        ('', 'amend', None, ''),
-        ('', 'no-edit', None, ''),
+        (b'a', b'all', None, b''),
+        (b'm', b'message', b'', b''),
+        (b'p', b'patch', None, b''),
+        (b'C', b'reuse-message', b'', b''),
+        (b'F', b'file', b'', b''),
+        (b'', b'author', b'', b''),
+        (b'', b'date', b'', b''),
+        (b'', b'amend', None, b''),
+        (b'', b'no-edit', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('commit')
-    if opts.get('patch'):
-        cmd = Command('commit --interactive')
+    cmd = Command(b'commit')
+    if opts.get(b'patch'):
+        cmd = Command(b'commit --interactive')
 
-    if opts.get('amend'):
-        if opts.get('no_edit'):
-            cmd = Command('amend')
+    if opts.get(b'amend'):
+        if opts.get(b'no_edit'):
+            cmd = Command(b'amend')
         else:
-            cmd['--amend'] = None
+            cmd[b'--amend'] = None
 
-    if opts.get('reuse_message'):
-        cmd['-M'] = opts.get('reuse_message')
+    if opts.get(b'reuse_message'):
+        cmd[b'-M'] = opts.get(b'reuse_message')
+
+    if opts.get(b'message'):
+        cmd[b'-m'] = b"'%s'" % (opts.get(b'message'),)
 
-    if opts.get('message'):
-        cmd['-m'] = "'%s'" % (opts.get('message'),)
-
-    if opts.get('all'):
-        ui.status(_("note: Mercurial doesn't have a staging area, "
-                    "so there is no --all. -A will add and remove files "
-                    "for you though.\n\n"))
+    if opts.get(b'all'):
+        ui.status(
+            _(
+                b"note: Mercurial doesn't have a staging area, "
+                b"so there is no --all. -A will add and remove files "
+                b"for you though.\n\n"
+            )
+        )
 
-    if opts.get('file'):
-        cmd['-l'] = opts.get('file')
+    if opts.get(b'file'):
+        cmd[b'-l'] = opts.get(b'file')
 
-    if opts.get('author'):
-        cmd['-u'] = opts.get('author')
+    if opts.get(b'author'):
+        cmd[b'-u'] = opts.get(b'author')
 
-    if opts.get('date'):
-        cmd['-d'] = opts.get('date')
+    if opts.get(b'date'):
+        cmd[b'-d'] = opts.get(b'date')
 
     cmd.extend(args)
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def deprecated(ui, repo, *args, **kwargs):
-    ui.warn(_('this command has been deprecated in the git project, '
-              'thus isn\'t supported by this tool\n\n'))
+    ui.warn(
+        _(
+            b'this command has been deprecated in the git project, '
+            b'thus isn\'t supported by this tool\n\n'
+        )
+    )
+
 
 def diff(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('a', 'all', None, ''),
-        ('', 'cached', None, ''),
-        ('R', 'reverse', None, ''),
+        (b'a', b'all', None, b''),
+        (b'', b'cached', None, b''),
+        (b'R', b'reverse', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('diff')
+    cmd = Command(b'diff')
 
-    if opts.get('cached'):
-        ui.status(_('note: Mercurial has no concept of a staging area, '
-                    'so --cached does nothing\n\n'))
+    if opts.get(b'cached'):
+        ui.status(
+            _(
+                b'note: Mercurial has no concept of a staging area, '
+                b'so --cached does nothing\n\n'
+            )
+        )
 
-    if opts.get('reverse'):
-        cmd['--reverse'] = None
+    if opts.get(b'reverse'):
+        cmd[b'--reverse'] = None
 
     for a in list(args):
         args.remove(a)
         try:
             repo.revs(a)
-            cmd['-r'] = a
+            cmd[b'-r'] = a
         except Exception:
             cmd.append(a)
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def difftool(ui, repo, *args, **kwargs):
-    ui.status(_('Mercurial does not enable external difftool by default. You '
-        'need to enable the extdiff extension in your .hgrc file by adding\n'
-        'extdiff =\n'
-        'to the [extensions] section and then running\n\n'
-        'hg extdiff -p <program>\n\n'
-        'See \'hg help extdiff\' and \'hg help -e extdiff\' for more '
-        'information.\n'))
+    ui.status(
+        _(
+            b'Mercurial does not enable external difftool by default. You '
+            b'need to enable the extdiff extension in your .hgrc file by adding\n'
+            b'extdiff =\n'
+            b'to the [extensions] section and then running\n\n'
+            b'hg extdiff -p <program>\n\n'
+            b'See \'hg help extdiff\' and \'hg help -e extdiff\' for more '
+            b'information.\n'
+        )
+    )
+
 
 def fetch(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('', 'all', None, ''),
-        ('f', 'force', None, ''),
+        (b'', b'all', None, b''),
+        (b'f', b'force', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('pull')
+    cmd = Command(b'pull')
 
     if len(args) > 0:
         cmd.append(args[0])
         if len(args) > 1:
-            ui.status(_("note: Mercurial doesn't have refspecs. "
-                        "-r can be used to specify which commits you want to "
-                        "pull. -B can be used to specify which bookmark you "
-                        "want to pull.\n\n"))
+            ui.status(
+                _(
+                    b"note: Mercurial doesn't have refspecs. "
+                    b"-r can be used to specify which commits you want to "
+                    b"pull. -B can be used to specify which bookmark you "
+                    b"want to pull.\n\n"
+                )
+            )
             for v in args[1:]:
                 if v in repo._bookmarks:
-                    cmd['-B'] = v
+                    cmd[b'-B'] = v
                 else:
-                    cmd['-r'] = v
+                    cmd[b'-r'] = v
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def grep(ui, repo, *args, **kwargs):
-    cmdoptions = [
-    ]
+    cmdoptions = []
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('grep')
+    cmd = Command(b'grep')
 
     # For basic usage, git grep and hg grep are the same. They both have the
     # pattern first, followed by paths.
     cmd.extend(args)
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def init(ui, repo, *args, **kwargs):
-    cmdoptions = [
-    ]
+    cmdoptions = []
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('init')
+    cmd = Command(b'init')
 
     if len(args) > 0:
         cmd.append(args[0])
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def log(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('', 'follow', None, ''),
-        ('', 'decorate', None, ''),
-        ('n', 'number', '', ''),
-        ('1', '1', None, ''),
-        ('', 'pretty', '', ''),
-        ('', 'format', '', ''),
-        ('', 'oneline', None, ''),
-        ('', 'stat', None, ''),
-        ('', 'graph', None, ''),
-        ('p', 'patch', None, ''),
+        (b'', b'follow', None, b''),
+        (b'', b'decorate', None, b''),
+        (b'n', b'number', b'', b''),
+        (b'1', b'1', None, b''),
+        (b'', b'pretty', b'', b''),
+        (b'', b'format', b'', b''),
+        (b'', b'oneline', None, b''),
+        (b'', b'stat', None, b''),
+        (b'', b'graph', None, b''),
+        (b'p', b'patch', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
-    ui.status(_('note: -v prints the entire commit message like Git does. To '
-                'print just the first line, drop the -v.\n\n'))
-    ui.status(_("note: see hg help revset for information on how to filter "
-                "log output\n\n"))
+    ui.status(
+        _(
+            b'note: -v prints the entire commit message like Git does. To '
+            b'print just the first line, drop the -v.\n\n'
+        )
+    )
+    ui.status(
+        _(
+            b"note: see hg help revset for information on how to filter "
+            b"log output\n\n"
+        )
+    )
 
-    cmd = Command('log')
-    cmd['-v'] = None
+    cmd = Command(b'log')
+    cmd[b'-v'] = None
 
-    if opts.get('number'):
-        cmd['-l'] = opts.get('number')
-    if opts.get('1'):
-        cmd['-l'] = '1'
-    if opts.get('stat'):
-        cmd['--stat'] = None
-    if opts.get('graph'):
-        cmd['-G'] = None
-    if opts.get('patch'):
-        cmd['-p'] = None
+    if opts.get(b'number'):
+        cmd[b'-l'] = opts.get(b'number')
+    if opts.get(b'1'):
+        cmd[b'-l'] = b'1'
+    if opts.get(b'stat'):
+        cmd[b'--stat'] = None
+    if opts.get(b'graph'):
+        cmd[b'-G'] = None
+    if opts.get(b'patch'):
+        cmd[b'-p'] = None
 
-    if opts.get('pretty') or opts.get('format') or opts.get('oneline'):
-        format = opts.get('format', '')
-        if 'format:' in format:
-            ui.status(_("note: --format format:??? equates to Mercurial's "
-                        "--template. See hg help templates for more info.\n\n"))
-            cmd['--template'] = '???'
+    if opts.get(b'pretty') or opts.get(b'format') or opts.get(b'oneline'):
+        format = opts.get(b'format', b'')
+        if b'format:' in format:
+            ui.status(
+                _(
+                    b"note: --format format:??? equates to Mercurial's "
+                    b"--template. See hg help templates for more info.\n\n"
+                )
+            )
+            cmd[b'--template'] = b'???'
         else:
-            ui.status(_("note: --pretty/format/oneline equate to Mercurial's "
-                        "--style or --template. See hg help templates for "
-                        "more info.\n\n"))
-            cmd['--style'] = '???'
+            ui.status(
+                _(
+                    b"note: --pretty/format/oneline equate to Mercurial's "
+                    b"--style or --template. See hg help templates for "
+                    b"more info.\n\n"
+                )
+            )
+            cmd[b'--style'] = b'???'
 
     if len(args) > 0:
-        if '..' in args[0]:
-            since, until = args[0].split('..')
-            cmd['-r'] = "'%s::%s'" % (since, until)
+        if b'..' in args[0]:
+            since, until = args[0].split(b'..')
+            cmd[b'-r'] = b"'%s::%s'" % (since, until)
             del args[0]
         cmd.extend(args)
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def lsfiles(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('c', 'cached', None, ''),
-        ('d', 'deleted', None, ''),
-        ('m', 'modified', None, ''),
-        ('o', 'others', None, ''),
-        ('i', 'ignored', None, ''),
-        ('s', 'stage', None, ''),
-        ('z', '_zero', None, ''),
+        (b'c', b'cached', None, b''),
+        (b'd', b'deleted', None, b''),
+        (b'm', b'modified', None, b''),
+        (b'o', b'others', None, b''),
+        (b'i', b'ignored', None, b''),
+        (b's', b'stage', None, b''),
+        (b'z', b'_zero', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    if (opts.get('modified') or opts.get('deleted')
-        or opts.get('others') or opts.get('ignored')):
-        cmd = Command('status')
-        if opts.get('deleted'):
-            cmd['-d'] = None
-        if opts.get('modified'):
-            cmd['-m'] = None
-        if opts.get('others'):
-            cmd['-o'] = None
-        if opts.get('ignored'):
-            cmd['-i'] = None
+    if (
+        opts.get(b'modified')
+        or opts.get(b'deleted')
+        or opts.get(b'others')
+        or opts.get(b'ignored')
+    ):
+        cmd = Command(b'status')
+        if opts.get(b'deleted'):
+            cmd[b'-d'] = None
+        if opts.get(b'modified'):
+            cmd[b'-m'] = None
+        if opts.get(b'others'):
+            cmd[b'-o'] = None
+        if opts.get(b'ignored'):
+            cmd[b'-i'] = None
     else:
-        cmd = Command('files')
-    if opts.get('stage'):
-        ui.status(_("note: Mercurial doesn't have a staging area, ignoring "
-                  "--stage\n"))
-    if opts.get('_zero'):
-        cmd['-0'] = None
-    cmd.append('.')
+        cmd = Command(b'files')
+    if opts.get(b'stage'):
+        ui.status(
+            _(
+                b"note: Mercurial doesn't have a staging area, ignoring "
+                b"--stage\n"
+            )
+        )
+    if opts.get(b'_zero'):
+        cmd[b'-0'] = None
+    cmd.append(b'.')
     for include in args:
-        cmd['-I'] = procutil.shellquote(include)
+        cmd[b'-I'] = procutil.shellquote(include)
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def merge(ui, repo, *args, **kwargs):
-    cmdoptions = [
-    ]
+    cmdoptions = []
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('merge')
+    cmd = Command(b'merge')
 
     if len(args) > 0:
         cmd.append(args[len(args) - 1])
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def mergebase(ui, repo, *args, **kwargs):
     cmdoptions = []
     args, opts = parseoptions(ui, cmdoptions, args)
 
     if len(args) != 2:
-        args = ['A', 'B']
+        args = [b'A', b'B']
+
+    cmd = Command(
+        b"log -T '{node}\\n' -r 'ancestor(%s,%s)'" % (args[0], args[1])
+    )
 
-    cmd = Command("log -T '{node}\\n' -r 'ancestor(%s,%s)'"
-                  % (args[0], args[1]))
+    ui.status(
+        _(b'note: ancestors() is part of the revset language\n'),
+        _(b"(learn more about revsets with 'hg help revsets')\n\n"),
+    )
+    ui.status((bytes(cmd)), b"\n")
 
-    ui.status(_('note: ancestors() is part of the revset language\n'),
-              _("(learn more about revsets with 'hg help revsets')\n\n"))
-    ui.status((bytes(cmd)), "\n")
 
 def mergetool(ui, repo, *args, **kwargs):
     cmdoptions = []
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command("resolve")
+    cmd = Command(b"resolve")
 
     if len(args) == 0:
-        cmd['--all'] = None
+        cmd[b'--all'] = None
     cmd.extend(args)
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def mv(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('f', 'force', None, ''),
-        ('n', 'dry-run', None, ''),
+        (b'f', b'force', None, b''),
+        (b'n', b'dry-run', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('mv')
+    cmd = Command(b'mv')
     cmd.extend(args)
 
-    if opts.get('force'):
-        cmd['-f'] = None
-    if opts.get('dry_run'):
-        cmd['-n'] = None
+    if opts.get(b'force'):
+        cmd[b'-f'] = None
+    if opts.get(b'dry_run'):
+        cmd[b'-n'] = None
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def pull(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('', 'all', None, ''),
-        ('f', 'force', None, ''),
-        ('r', 'rebase', None, ''),
+        (b'', b'all', None, b''),
+        (b'f', b'force', None, b''),
+        (b'r', b'rebase', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('pull')
-    cmd['--rebase'] = None
+    cmd = Command(b'pull')
+    cmd[b'--rebase'] = None
 
     if len(args) > 0:
         cmd.append(args[0])
         if len(args) > 1:
-            ui.status(_("note: Mercurial doesn't have refspecs. "
-                        "-r can be used to specify which commits you want to "
-                        "pull. -B can be used to specify which bookmark you "
-                        "want to pull.\n\n"))
+            ui.status(
+                _(
+                    b"note: Mercurial doesn't have refspecs. "
+                    b"-r can be used to specify which commits you want to "
+                    b"pull. -B can be used to specify which bookmark you "
+                    b"want to pull.\n\n"
+                )
+            )
             for v in args[1:]:
                 if v in repo._bookmarks:
-                    cmd['-B'] = v
+                    cmd[b'-B'] = v
                 else:
-                    cmd['-r'] = v
+                    cmd[b'-r'] = v
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def push(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('', 'all', None, ''),
-        ('f', 'force', None, ''),
+        (b'', b'all', None, b''),
+        (b'f', b'force', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('push')
+    cmd = Command(b'push')
 
     if len(args) > 0:
         cmd.append(args[0])
         if len(args) > 1:
-            ui.status(_("note: Mercurial doesn't have refspecs. "
-                        "-r can be used to specify which commits you want "
-                        "to push. -B can be used to specify which bookmark "
-                        "you want to push.\n\n"))
+            ui.status(
+                _(
+                    b"note: Mercurial doesn't have refspecs. "
+                    b"-r can be used to specify which commits you want "
+                    b"to push. -B can be used to specify which bookmark "
+                    b"you want to push.\n\n"
+                )
+            )
             for v in args[1:]:
                 if v in repo._bookmarks:
-                    cmd['-B'] = v
+                    cmd[b'-B'] = v
                 else:
-                    cmd['-r'] = v
+                    cmd[b'-r'] = v
 
-    if opts.get('force'):
-        cmd['-f'] = None
+    if opts.get(b'force'):
+        cmd[b'-f'] = None
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def rebase(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('', 'all', None, ''),
-        ('i', 'interactive', None, ''),
-        ('', 'onto', '', ''),
-        ('', 'abort', None, ''),
-        ('', 'continue', None, ''),
-        ('', 'skip', None, ''),
+        (b'', b'all', None, b''),
+        (b'i', b'interactive', None, b''),
+        (b'', b'onto', b'', b''),
+        (b'', b'abort', None, b''),
+        (b'', b'continue', None, b''),
+        (b'', b'skip', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    if opts.get('interactive'):
-        ui.status(_("note: hg histedit does not perform a rebase. "
-                    "It just edits history.\n\n"))
-        cmd = Command('histedit')
+    if opts.get(b'interactive'):
+        ui.status(
+            _(
+                b"note: hg histedit does not perform a rebase. "
+                b"It just edits history.\n\n"
+            )
+        )
+        cmd = Command(b'histedit')
         if len(args) > 0:
-            ui.status(_("also note: 'hg histedit' will automatically detect"
-                      " your stack, so no second argument is necessary\n\n"))
-        ui.status((bytes(cmd)), "\n")
+            ui.status(
+                _(
+                    b"also note: 'hg histedit' will automatically detect"
+                    b" your stack, so no second argument is necessary\n\n"
+                )
+            )
+        ui.status((bytes(cmd)), b"\n")
         return
 
-    if opts.get('skip'):
-        cmd = Command('revert --all -r .')
-        ui.status((bytes(cmd)), "\n")
+    if opts.get(b'skip'):
+        cmd = Command(b'revert --all -r .')
+        ui.status((bytes(cmd)), b"\n")
 
-    cmd = Command('rebase')
+    cmd = Command(b'rebase')
+
+    if opts.get(b'continue') or opts.get(b'skip'):
+        cmd[b'--continue'] = None
+    if opts.get(b'abort'):
+        cmd[b'--abort'] = None
 
-    if opts.get('continue') or opts.get('skip'):
-        cmd['--continue'] = None
-    if opts.get('abort'):
-        cmd['--abort'] = None
-
-    if opts.get('onto'):
-        ui.status(_("note: if you're trying to lift a commit off one branch, "
-                    "try hg rebase -d <destination commit> -s <commit to be "
-                    "lifted>\n\n"))
-        cmd['-d'] = convert(opts.get('onto'))
+    if opts.get(b'onto'):
+        ui.status(
+            _(
+                b"note: if you're trying to lift a commit off one branch, "
+                b"try hg rebase -d <destination commit> -s <commit to be "
+                b"lifted>\n\n"
+            )
+        )
+        cmd[b'-d'] = convert(opts.get(b'onto'))
         if len(args) < 2:
-            raise error.Abort(_("expected format: git rebase --onto X Y Z"))
-        cmd['-s'] = "'::%s - ::%s'" % (convert(args[1]), convert(args[0]))
+            raise error.Abort(_(b"expected format: git rebase --onto X Y Z"))
+        cmd[b'-s'] = b"'::%s - ::%s'" % (convert(args[1]), convert(args[0]))
     else:
         if len(args) == 1:
-            cmd['-d'] = convert(args[0])
+            cmd[b'-d'] = convert(args[0])
         elif len(args) == 2:
-            cmd['-d'] = convert(args[0])
-            cmd['-b'] = convert(args[1])
+            cmd[b'-d'] = convert(args[0])
+            cmd[b'-b'] = convert(args[1])
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def reflog(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('', 'all', None, ''),
+        (b'', b'all', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('journal')
-    if opts.get('all'):
-        cmd['--all'] = None
+    cmd = Command(b'journal')
+    if opts.get(b'all'):
+        cmd[b'--all'] = None
     if len(args) > 0:
         cmd.append(args[0])
 
-    ui.status(bytes(cmd), "\n\n")
-    ui.status(_("note: in hg commits can be deleted from repo but we always"
-              " have backups\n"))
+    ui.status(bytes(cmd), b"\n\n")
+    ui.status(
+        _(
+            b"note: in hg commits can be deleted from repo but we always"
+            b" have backups\n"
+        )
+    )
+
 
 def reset(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('', 'soft', None, ''),
-        ('', 'hard', None, ''),
-        ('', 'mixed', None, ''),
+        (b'', b'soft', None, b''),
+        (b'', b'hard', None, b''),
+        (b'', b'mixed', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    commit = convert(args[0] if len(args) > 0 else '.')
-    hard = opts.get('hard')
+    commit = convert(args[0] if len(args) > 0 else b'.')
+    hard = opts.get(b'hard')
 
-    if opts.get('mixed'):
-        ui.status(_('note: --mixed has no meaning since Mercurial has no '
-                    'staging area\n\n'))
-    if opts.get('soft'):
-        ui.status(_('note: --soft has no meaning since Mercurial has no '
-                    'staging area\n\n'))
+    if opts.get(b'mixed'):
+        ui.status(
+            _(
+                b'note: --mixed has no meaning since Mercurial has no '
+                b'staging area\n\n'
+            )
+        )
+    if opts.get(b'soft'):
+        ui.status(
+            _(
+                b'note: --soft has no meaning since Mercurial has no '
+                b'staging area\n\n'
+            )
+        )
 
-    cmd = Command('update')
+    cmd = Command(b'update')
     if hard:
-        cmd.append('--clean')
+        cmd.append(b'--clean')
 
     cmd.append(commit)
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def revert(ui, repo, *args, **kwargs):
-    cmdoptions = [
-    ]
+    cmdoptions = []
     args, opts = parseoptions(ui, cmdoptions, args)
 
     if len(args) > 1:
-        ui.status(_("note: hg backout doesn't support multiple commits at "
-                    "once\n\n"))
+        ui.status(
+            _(
+                b"note: hg backout doesn't support multiple commits at "
+                b"once\n\n"
+            )
+        )
 
-    cmd = Command('backout')
+    cmd = Command(b'backout')
     if args:
         cmd.append(args[0])
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def revparse(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('', 'show-cdup', None, ''),
-        ('', 'show-toplevel', None, ''),
+        (b'', b'show-cdup', None, b''),
+        (b'', b'show-toplevel', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    if opts.get('show_cdup') or opts.get('show_toplevel'):
-        cmd = Command('root')
-        if opts.get('show_cdup'):
-            ui.status(_("note: hg root prints the root of the repository\n\n"))
-        ui.status((bytes(cmd)), "\n")
+    if opts.get(b'show_cdup') or opts.get(b'show_toplevel'):
+        cmd = Command(b'root')
+        if opts.get(b'show_cdup'):
+            ui.status(_(b"note: hg root prints the root of the repository\n\n"))
+        ui.status((bytes(cmd)), b"\n")
     else:
-        ui.status(_("note: see hg help revset for how to refer to commits\n"))
+        ui.status(_(b"note: see hg help revset for how to refer to commits\n"))
+
 
 def rm(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('f', 'force', None, ''),
-        ('n', 'dry-run', None, ''),
+        (b'f', b'force', None, b''),
+        (b'n', b'dry-run', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('rm')
+    cmd = Command(b'rm')
     cmd.extend(args)
 
-    if opts.get('force'):
-        cmd['-f'] = None
-    if opts.get('dry_run'):
-        cmd['-n'] = None
+    if opts.get(b'force'):
+        cmd[b'-f'] = None
+    if opts.get(b'dry_run'):
+        cmd[b'-n'] = None
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def show(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('', 'name-status', None, ''),
-        ('', 'pretty', '', ''),
-        ('U', 'unified', int, ''),
+        (b'', b'name-status', None, b''),
+        (b'', b'pretty', b'', b''),
+        (b'U', b'unified', int, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    if opts.get('name_status'):
-        if opts.get('pretty') == 'format:':
-            cmd = Command('status')
-            cmd['--change'] = '.'
+    if opts.get(b'name_status'):
+        if opts.get(b'pretty') == b'format:':
+            cmd = Command(b'status')
+            cmd[b'--change'] = b'.'
         else:
-            cmd = Command('log')
-            cmd.append('--style status')
-            cmd.append('-r .')
+            cmd = Command(b'log')
+            cmd.append(b'--style status')
+            cmd.append(b'-r .')
     elif len(args) > 0:
         if ispath(repo, args[0]):
-            cmd = Command('cat')
+            cmd = Command(b'cat')
         else:
-            cmd = Command('export')
+            cmd = Command(b'export')
         cmd.extend(args)
-        if opts.get('unified'):
-            cmd.append('--config diff.unified=%d' % (opts['unified'],))
-    elif opts.get('unified'):
-        cmd = Command('export')
-        cmd.append('--config diff.unified=%d' % (opts['unified'],))
+        if opts.get(b'unified'):
+            cmd.append(b'--config diff.unified=%d' % (opts[b'unified'],))
+    elif opts.get(b'unified'):
+        cmd = Command(b'export')
+        cmd.append(b'--config diff.unified=%d' % (opts[b'unified'],))
     else:
-        cmd = Command('export')
+        cmd = Command(b'export')
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def stash(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('p', 'patch', None, ''),
+        (b'p', b'patch', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('shelve')
+    cmd = Command(b'shelve')
     action = args[0] if len(args) > 0 else None
 
-    if action == 'list':
-        cmd['-l'] = None
-        if opts.get('patch'):
-            cmd['-p'] = None
-    elif action == 'show':
-        if opts.get('patch'):
-            cmd['-p'] = None
+    if action == b'list':
+        cmd[b'-l'] = None
+        if opts.get(b'patch'):
+            cmd[b'-p'] = None
+    elif action == b'show':
+        if opts.get(b'patch'):
+            cmd[b'-p'] = None
         else:
-            cmd['--stat'] = None
+            cmd[b'--stat'] = None
         if len(args) > 1:
             cmd.append(args[1])
-    elif action == 'clear':
-        cmd['--cleanup'] = None
-    elif action == 'drop':
-        cmd['-d'] = None
+    elif action == b'clear':
+        cmd[b'--cleanup'] = None
+    elif action == b'drop':
+        cmd[b'-d'] = None
         if len(args) > 1:
             cmd.append(args[1])
         else:
-            cmd.append('<shelve name>')
-    elif action == 'pop' or action == 'apply':
-        cmd = Command('unshelve')
+            cmd.append(b'<shelve name>')
+    elif action == b'pop' or action == b'apply':
+        cmd = Command(b'unshelve')
         if len(args) > 1:
             cmd.append(args[1])
-        if action == 'apply':
-            cmd['--keep'] = None
-    elif action == 'branch' or action == 'create':
-        ui.status(_("note: Mercurial doesn't have equivalents to the "
-                    "git stash branch or create actions\n\n"))
+        if action == b'apply':
+            cmd[b'--keep'] = None
+    elif action == b'branch' or action == b'create':
+        ui.status(
+            _(
+                b"note: Mercurial doesn't have equivalents to the "
+                b"git stash branch or create actions\n\n"
+            )
+        )
         return
     else:
         if len(args) > 0:
-            if args[0] != 'save':
-                cmd['--name'] = args[0]
+            if args[0] != b'save':
+                cmd[b'--name'] = args[0]
             elif len(args) > 1:
-                cmd['--name'] = args[1]
+                cmd[b'--name'] = args[1]
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def status(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('', 'ignored', None, ''),
+        (b'', b'ignored', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('status')
+    cmd = Command(b'status')
     cmd.extend(args)
 
-    if opts.get('ignored'):
-        cmd['-i'] = None
+    if opts.get(b'ignored'):
+        cmd[b'-i'] = None
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def svn(ui, repo, *args, **kwargs):
     if not args:
-        raise error.Abort(_('missing svn command'))
+        raise error.Abort(_(b'missing svn command'))
     svncmd = args[0]
     if svncmd not in gitsvncommands:
-        raise error.Abort(_('unknown git svn command "%s"') % (svncmd))
+        raise error.Abort(_(b'unknown git svn command "%s"') % svncmd)
 
     args = args[1:]
     return gitsvncommands[svncmd](ui, repo, *args, **kwargs)
 
+
 def svndcommit(ui, repo, *args, **kwargs):
-    cmdoptions = [
-    ]
+    cmdoptions = []
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('push')
+    cmd = Command(b'push')
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def svnfetch(ui, repo, *args, **kwargs):
-    cmdoptions = [
-    ]
+    cmdoptions = []
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    cmd = Command('pull')
-    cmd.append('default-push')
+    cmd = Command(b'pull')
+    cmd.append(b'default-push')
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def svnfindrev(ui, repo, *args, **kwargs):
-    cmdoptions = [
-    ]
+    cmdoptions = []
     args, opts = parseoptions(ui, cmdoptions, args)
 
     if not args:
-        raise error.Abort(_('missing find-rev argument'))
+        raise error.Abort(_(b'missing find-rev argument'))
 
-    cmd = Command('log')
-    cmd['-r'] = args[0]
+    cmd = Command(b'log')
+    cmd[b'-r'] = args[0]
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def svnrebase(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('l', 'local', None, ''),
+        (b'l', b'local', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    pullcmd = Command('pull')
-    pullcmd.append('default-push')
-    rebasecmd = Command('rebase')
-    rebasecmd.append('tip')
+    pullcmd = Command(b'pull')
+    pullcmd.append(b'default-push')
+    rebasecmd = Command(b'rebase')
+    rebasecmd.append(b'tip')
 
     cmd = pullcmd & rebasecmd
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 def tag(ui, repo, *args, **kwargs):
     cmdoptions = [
-        ('f', 'force', None, ''),
-        ('l', 'list', None, ''),
-        ('d', 'delete', None, ''),
+        (b'f', b'force', None, b''),
+        (b'l', b'list', None, b''),
+        (b'd', b'delete', None, b''),
     ]
     args, opts = parseoptions(ui, cmdoptions, args)
 
-    if opts.get('list'):
-        cmd = Command('tags')
+    if opts.get(b'list'):
+        cmd = Command(b'tags')
     else:
-        cmd = Command('tag')
+        cmd = Command(b'tag')
 
         if not args:
-            raise error.Abort(_('missing tag argument'))
+            raise error.Abort(_(b'missing tag argument'))
 
         cmd.append(args[0])
         if len(args) > 1:
-            cmd['-r'] = args[1]
+            cmd[b'-r'] = args[1]
 
-        if opts.get('delete'):
-            cmd['--remove'] = None
+        if opts.get(b'delete'):
+            cmd[b'--remove'] = None
 
-        if opts.get('force'):
-            cmd['-f'] = None
+        if opts.get(b'force'):
+            cmd[b'-f'] = None
 
-    ui.status((bytes(cmd)), "\n")
+    ui.status((bytes(cmd)), b"\n")
+
 
 gitcommands = {
-    'add': add,
-    'am': am,
-    'apply': apply,
-    'bisect': bisect,
-    'blame': blame,
-    'branch': branch,
-    'checkout': checkout,
-    'cherry-pick': cherrypick,
-    'clean': clean,
-    'clone': clone,
-    'commit': commit,
-    'diff': diff,
-    'difftool': difftool,
-    'fetch': fetch,
-    'grep': grep,
-    'init': init,
-    'log': log,
-    'ls-files': lsfiles,
-    'merge': merge,
-    'merge-base': mergebase,
-    'mergetool': mergetool,
-    'mv': mv,
-    'pull': pull,
-    'push': push,
-    'rebase': rebase,
-    'reflog': reflog,
-    'reset': reset,
-    'revert': revert,
-    'rev-parse': revparse,
-    'rm': rm,
-    'show': show,
-    'stash': stash,
-    'status': status,
-    'svn': svn,
-    'tag': tag,
-    'whatchanged': deprecated,
+    b'add': add,
+    b'am': am,
+    b'apply': apply,
+    b'bisect': bisect,
+    b'blame': blame,
+    b'branch': branch,
+    b'checkout': checkout,
+    b'cherry-pick': cherrypick,
+    b'clean': clean,
+    b'clone': clone,
+    b'commit': commit,
+    b'diff': diff,
+    b'difftool': difftool,
+    b'fetch': fetch,
+    b'grep': grep,
+    b'init': init,
+    b'log': log,
+    b'ls-files': lsfiles,
+    b'merge': merge,
+    b'merge-base': mergebase,
+    b'mergetool': mergetool,
+    b'mv': mv,
+    b'pull': pull,
+    b'push': push,
+    b'rebase': rebase,
+    b'reflog': reflog,
+    b'reset': reset,
+    b'revert': revert,
+    b'rev-parse': revparse,
+    b'rm': rm,
+    b'show': show,
+    b'stash': stash,
+    b'status': status,
+    b'svn': svn,
+    b'tag': tag,
+    b'whatchanged': deprecated,
 }
 
 gitsvncommands = {
-    'dcommit': svndcommit,
-    'fetch': svnfetch,
-    'find-rev': svnfindrev,
-    'rebase': svnrebase,
+    b'dcommit': svndcommit,
+    b'fetch': svnfetch,
+    b'find-rev': svnfindrev,
+    b'rebase': svnrebase,
 }
--- a/hgext/gpg.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/gpg.py	Mon Oct 21 11:09:48 2019 -0400
@@ -31,37 +31,36 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('gpg', 'cmd',
-    default='gpg',
+configitem(
+    b'gpg', b'cmd', default=b'gpg',
 )
-configitem('gpg', 'key',
-    default=None,
+configitem(
+    b'gpg', b'key', default=None,
 )
-configitem('gpg', '.*',
-    default=None,
-    generic=True,
+configitem(
+    b'gpg', b'.*', default=None, generic=True,
 )
 
 # Custom help category
-_HELP_CATEGORY = 'gpg'
+_HELP_CATEGORY = b'gpg'
 help.CATEGORY_ORDER.insert(
-    help.CATEGORY_ORDER.index(registrar.command.CATEGORY_HELP),
-    _HELP_CATEGORY
+    help.CATEGORY_ORDER.index(registrar.command.CATEGORY_HELP), _HELP_CATEGORY
 )
-help.CATEGORY_NAMES[_HELP_CATEGORY] = 'Signing changes (GPG)'
+help.CATEGORY_NAMES[_HELP_CATEGORY] = b'Signing changes (GPG)'
+
 
 class gpg(object):
     def __init__(self, path, key=None):
         self.path = path
-        self.key = (key and " --local-user \"%s\"" % key) or ""
+        self.key = (key and b" --local-user \"%s\"" % key) or b""
 
     def sign(self, data):
-        gpgcmd = "%s --sign --detach-sign%s" % (self.path, self.key)
+        gpgcmd = b"%s --sign --detach-sign%s" % (self.path, self.key)
         return procutil.filter(data, gpgcmd)
 
     def verify(self, data, sig):
@@ -69,17 +68,20 @@
         sigfile = datafile = None
         try:
             # create temporary files
-            fd, sigfile = pycompat.mkstemp(prefix="hg-gpg-", suffix=".sig")
+            fd, sigfile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".sig")
             fp = os.fdopen(fd, r'wb')
             fp.write(sig)
             fp.close()
-            fd, datafile = pycompat.mkstemp(prefix="hg-gpg-", suffix=".txt")
+            fd, datafile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".txt")
             fp = os.fdopen(fd, r'wb')
             fp.write(data)
             fp.close()
-            gpgcmd = ("%s --logger-fd 1 --status-fd 1 --verify "
-                      "\"%s\" \"%s\"" % (self.path, sigfile, datafile))
-            ret = procutil.filter("", gpgcmd)
+            gpgcmd = b"%s --logger-fd 1 --status-fd 1 --verify \"%s\" \"%s\"" % (
+                self.path,
+                sigfile,
+                datafile,
+            )
+            ret = procutil.filter(b"", gpgcmd)
         finally:
             for f in (sigfile, datafile):
                 try:
@@ -92,68 +94,74 @@
         for l in ret.splitlines():
             # see DETAILS in the gnupg documentation
             # filter the logger output
-            if not l.startswith("[GNUPG:]"):
+            if not l.startswith(b"[GNUPG:]"):
                 continue
             l = l[9:]
-            if l.startswith("VALIDSIG"):
+            if l.startswith(b"VALIDSIG"):
                 # fingerprint of the primary key
                 fingerprint = l.split()[10]
-            elif l.startswith("ERRSIG"):
-                key = l.split(" ", 3)[:2]
-                key.append("")
+            elif l.startswith(b"ERRSIG"):
+                key = l.split(b" ", 3)[:2]
+                key.append(b"")
                 fingerprint = None
-            elif (l.startswith("GOODSIG") or
-                  l.startswith("EXPSIG") or
-                  l.startswith("EXPKEYSIG") or
-                  l.startswith("BADSIG")):
+            elif (
+                l.startswith(b"GOODSIG")
+                or l.startswith(b"EXPSIG")
+                or l.startswith(b"EXPKEYSIG")
+                or l.startswith(b"BADSIG")
+            ):
                 if key is not None:
                     keys.append(key + [fingerprint])
-                key = l.split(" ", 2)
+                key = l.split(b" ", 2)
                 fingerprint = None
         if key is not None:
             keys.append(key + [fingerprint])
         return keys
 
+
 def newgpg(ui, **opts):
     """create a new gpg instance"""
-    gpgpath = ui.config("gpg", "cmd")
+    gpgpath = ui.config(b"gpg", b"cmd")
     gpgkey = opts.get(r'key')
     if not gpgkey:
-        gpgkey = ui.config("gpg", "key")
+        gpgkey = ui.config(b"gpg", b"key")
     return gpg(gpgpath, gpgkey)
 
+
 def sigwalk(repo):
     """
     walk over every sigs, yields a couple
     ((node, version, sig), (filename, linenumber))
     """
+
     def parsefile(fileiter, context):
         ln = 1
         for l in fileiter:
             if not l:
                 continue
-            yield (l.split(" ", 2), (context, ln))
+            yield (l.split(b" ", 2), (context, ln))
             ln += 1
 
     # read the heads
-    fl = repo.file(".hgsigs")
+    fl = repo.file(b".hgsigs")
     for r in reversed(fl.heads()):
-        fn = ".hgsigs|%s" % hgnode.short(r)
+        fn = b".hgsigs|%s" % hgnode.short(r)
         for item in parsefile(fl.read(r).splitlines(), fn):
             yield item
     try:
         # read local signatures
-        fn = "localsigs"
+        fn = b"localsigs"
         for item in parsefile(repo.vfs(fn), fn):
             yield item
     except IOError:
         pass
 
+
 def getkeys(ui, repo, mygpg, sigdata, context):
     """get the keys who signed a data"""
     fn, ln = context
     node, version, sig = sigdata
-    prefix = "%s:%d" % (fn, ln)
+    prefix = b"%s:%d" % (fn, ln)
     node = hgnode.bin(node)
 
     data = node2txt(repo, node, version)
@@ -163,22 +171,27 @@
     validkeys = []
     # warn for expired key and/or sigs
     for key in keys:
-        if key[0] == "ERRSIG":
-            ui.write(_("%s Unknown key ID \"%s\"\n") % (prefix, key[1]))
+        if key[0] == b"ERRSIG":
+            ui.write(_(b"%s Unknown key ID \"%s\"\n") % (prefix, key[1]))
             continue
-        if key[0] == "BADSIG":
-            ui.write(_("%s Bad signature from \"%s\"\n") % (prefix, key[2]))
+        if key[0] == b"BADSIG":
+            ui.write(_(b"%s Bad signature from \"%s\"\n") % (prefix, key[2]))
             continue
-        if key[0] == "EXPSIG":
-            ui.write(_("%s Note: Signature has expired"
-                       " (signed by: \"%s\")\n") % (prefix, key[2]))
-        elif key[0] == "EXPKEYSIG":
-            ui.write(_("%s Note: This key has expired"
-                       " (signed by: \"%s\")\n") % (prefix, key[2]))
+        if key[0] == b"EXPSIG":
+            ui.write(
+                _(b"%s Note: Signature has expired (signed by: \"%s\")\n")
+                % (prefix, key[2])
+            )
+        elif key[0] == b"EXPKEYSIG":
+            ui.write(
+                _(b"%s Note: This key has expired (signed by: \"%s\")\n")
+                % (prefix, key[2])
+            )
         validkeys.append((key[1], key[2], key[3]))
     return validkeys
 
-@command("sigs", [], _('hg sigs'), helpcategory=_HELP_CATEGORY)
+
+@command(b"sigs", [], _(b'hg sigs'), helpcategory=_HELP_CATEGORY)
 def sigs(ui, repo):
     """list signed changesets"""
     mygpg = newgpg(ui)
@@ -190,7 +203,7 @@
         try:
             n = repo.lookup(node)
         except KeyError:
-            ui.warn(_("%s:%d node does not exist\n") % (fn, ln))
+            ui.warn(_(b"%s:%d node does not exist\n") % (fn, ln))
             continue
         r = repo.changelog.rev(n)
         keys = getkeys(ui, repo, mygpg, data, context)
@@ -200,10 +213,11 @@
         revs[r].extend(keys)
     for rev in sorted(revs, reverse=True):
         for k in revs[rev]:
-            r = "%5d:%s" % (rev, hgnode.hex(repo.changelog.node(rev)))
-            ui.write("%-30s %s\n" % (keystr(ui, k), r))
+            r = b"%5d:%s" % (rev, hgnode.hex(repo.changelog.node(rev)))
+            ui.write(b"%-30s %s\n" % (keystr(ui, k), r))
 
-@command("sigcheck", [], _('hg sigcheck REV'), helpcategory=_HELP_CATEGORY)
+
+@command(b"sigcheck", [], _(b'hg sigcheck REV'), helpcategory=_HELP_CATEGORY)
 def sigcheck(ui, repo, rev):
     """verify all the signatures there may be for a particular revision"""
     mygpg = newgpg(ui)
@@ -219,35 +233,44 @@
                 keys.extend(k)
 
     if not keys:
-        ui.write(_("no valid signature for %s\n") % hgnode.short(rev))
+        ui.write(_(b"no valid signature for %s\n") % hgnode.short(rev))
         return
 
     # print summary
-    ui.write(_("%s is signed by:\n") % hgnode.short(rev))
+    ui.write(_(b"%s is signed by:\n") % hgnode.short(rev))
     for key in keys:
-        ui.write(" %s\n" % keystr(ui, key))
+        ui.write(b" %s\n" % keystr(ui, key))
+
 
 def keystr(ui, key):
     """associate a string to a key (username, comment)"""
     keyid, user, fingerprint = key
-    comment = ui.config("gpg", fingerprint)
+    comment = ui.config(b"gpg", fingerprint)
     if comment:
-        return "%s (%s)" % (user, comment)
+        return b"%s (%s)" % (user, comment)
     else:
         return user
 
-@command("sign",
-         [('l', 'local', None, _('make the signature local')),
-          ('f', 'force', None, _('sign even if the sigfile is modified')),
-          ('', 'no-commit', None, _('do not commit the sigfile after signing')),
-          ('k', 'key', '',
-           _('the key id to sign with'), _('ID')),
-          ('m', 'message', '',
-           _('use text as commit message'), _('TEXT')),
-          ('e', 'edit', False, _('invoke editor on commit messages')),
-         ] + cmdutil.commitopts2,
-         _('hg sign [OPTION]... [REV]...'),
-         helpcategory=_HELP_CATEGORY)
+
+@command(
+    b"sign",
+    [
+        (b'l', b'local', None, _(b'make the signature local')),
+        (b'f', b'force', None, _(b'sign even if the sigfile is modified')),
+        (
+            b'',
+            b'no-commit',
+            None,
+            _(b'do not commit the sigfile after signing'),
+        ),
+        (b'k', b'key', b'', _(b'the key id to sign with'), _(b'ID')),
+        (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
+        (b'e', b'edit', False, _(b'invoke editor on commit messages')),
+    ]
+    + cmdutil.commitopts2,
+    _(b'hg sign [OPTION]... [REV]...'),
+    helpcategory=_HELP_CATEGORY,
+)
 def sign(ui, repo, *revs, **opts):
     """add a signature for the current or given revision
 
@@ -262,85 +285,98 @@
     with repo.wlock():
         return _dosign(ui, repo, *revs, **opts)
 
+
 def _dosign(ui, repo, *revs, **opts):
     mygpg = newgpg(ui, **opts)
     opts = pycompat.byteskwargs(opts)
-    sigver = "0"
-    sigmessage = ""
+    sigver = b"0"
+    sigmessage = b""
 
-    date = opts.get('date')
+    date = opts.get(b'date')
     if date:
-        opts['date'] = dateutil.parsedate(date)
+        opts[b'date'] = dateutil.parsedate(date)
 
     if revs:
         nodes = [repo.lookup(n) for n in revs]
     else:
-        nodes = [node for node in repo.dirstate.parents()
-                 if node != hgnode.nullid]
+        nodes = [
+            node for node in repo.dirstate.parents() if node != hgnode.nullid
+        ]
         if len(nodes) > 1:
-            raise error.Abort(_('uncommitted merge - please provide a '
-                               'specific revision'))
+            raise error.Abort(
+                _(b'uncommitted merge - please provide a specific revision')
+            )
         if not nodes:
             nodes = [repo.changelog.tip()]
 
     for n in nodes:
         hexnode = hgnode.hex(n)
-        ui.write(_("signing %d:%s\n") % (repo.changelog.rev(n),
-                                         hgnode.short(n)))
+        ui.write(
+            _(b"signing %d:%s\n") % (repo.changelog.rev(n), hgnode.short(n))
+        )
         # build data
         data = node2txt(repo, n, sigver)
         sig = mygpg.sign(data)
         if not sig:
-            raise error.Abort(_("error while signing"))
+            raise error.Abort(_(b"error while signing"))
         sig = binascii.b2a_base64(sig)
-        sig = sig.replace("\n", "")
-        sigmessage += "%s %s %s\n" % (hexnode, sigver, sig)
+        sig = sig.replace(b"\n", b"")
+        sigmessage += b"%s %s %s\n" % (hexnode, sigver, sig)
 
     # write it
-    if opts['local']:
-        repo.vfs.append("localsigs", sigmessage)
+    if opts[b'local']:
+        repo.vfs.append(b"localsigs", sigmessage)
         return
 
-    if not opts["force"]:
-        msigs = match.exact(['.hgsigs'])
+    if not opts[b"force"]:
+        msigs = match.exact([b'.hgsigs'])
         if any(repo.status(match=msigs, unknown=True, ignored=True)):
-            raise error.Abort(_("working copy of .hgsigs is changed "),
-                             hint=_("please commit .hgsigs manually"))
+            raise error.Abort(
+                _(b"working copy of .hgsigs is changed "),
+                hint=_(b"please commit .hgsigs manually"),
+            )
 
-    sigsfile = repo.wvfs(".hgsigs", "ab")
+    sigsfile = repo.wvfs(b".hgsigs", b"ab")
     sigsfile.write(sigmessage)
     sigsfile.close()
 
-    if '.hgsigs' not in repo.dirstate:
-        repo[None].add([".hgsigs"])
+    if b'.hgsigs' not in repo.dirstate:
+        repo[None].add([b".hgsigs"])
 
-    if opts["no_commit"]:
+    if opts[b"no_commit"]:
         return
 
-    message = opts['message']
+    message = opts[b'message']
     if not message:
         # we don't translate commit messages
-        message = "\n".join(["Added signature for changeset %s"
-                             % hgnode.short(n)
-                             for n in nodes])
+        message = b"\n".join(
+            [
+                b"Added signature for changeset %s" % hgnode.short(n)
+                for n in nodes
+            ]
+        )
     try:
-        editor = cmdutil.getcommiteditor(editform='gpg.sign',
-                                         **pycompat.strkwargs(opts))
-        repo.commit(message, opts['user'], opts['date'], match=msigs,
-                    editor=editor)
+        editor = cmdutil.getcommiteditor(
+            editform=b'gpg.sign', **pycompat.strkwargs(opts)
+        )
+        repo.commit(
+            message, opts[b'user'], opts[b'date'], match=msigs, editor=editor
+        )
     except ValueError as inst:
         raise error.Abort(pycompat.bytestr(inst))
 
+
 def node2txt(repo, node, ver):
     """map a manifest into some text"""
-    if ver == "0":
-        return "%s\n" % hgnode.hex(node)
+    if ver == b"0":
+        return b"%s\n" % hgnode.hex(node)
     else:
-        raise error.Abort(_("unknown signature version"))
+        raise error.Abort(_(b"unknown signature version"))
+
 
 def extsetup(ui):
     # Add our category before "Repository maintenance".
     help.CATEGORY_ORDER.insert(
-        help.CATEGORY_ORDER.index(command.CATEGORY_MAINTENANCE),
-        _HELP_CATEGORY)
-    help.CATEGORY_NAMES[_HELP_CATEGORY] = 'GPG signing'
+        help.CATEGORY_ORDER.index(command.CATEGORY_MAINTENANCE), _HELP_CATEGORY
+    )
+    help.CATEGORY_NAMES[_HELP_CATEGORY] = b'GPG signing'
--- a/hgext/graphlog.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/graphlog.py	Mon Oct 21 11:09:48 2019 -0400
@@ -30,32 +30,86 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
-@command('glog',
-    [('f', 'follow', None,
-     _('follow changeset history, or file history across copies and renames')),
-    ('', 'follow-first', None,
-     _('only follow the first parent of merge changesets (DEPRECATED)')),
-    ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
-    ('C', 'copies', None, _('show copied files')),
-    ('k', 'keyword', [],
-     _('do case-insensitive search for a given text'), _('TEXT')),
-    ('r', 'rev', [], _('show the specified revision or revset'), _('REV')),
-    ('', 'removed', None, _('include revisions where files were removed')),
-    ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
-    ('u', 'user', [], _('revisions committed by user'), _('USER')),
-    ('', 'only-branch', [],
-     _('show only changesets within the given named branch (DEPRECATED)'),
-     _('BRANCH')),
-    ('b', 'branch', [],
-     _('show changesets within the given named branch'), _('BRANCH')),
-    ('P', 'prune', [],
-     _('do not display revision or any of its ancestors'), _('REV')),
-    ] + cmdutil.logopts + cmdutil.walkopts,
-    _('[OPTION]... [FILE]'),
+@command(
+    b'glog',
+    [
+        (
+            b'f',
+            b'follow',
+            None,
+            _(
+                b'follow changeset history, or file history across copies and renames'
+            ),
+        ),
+        (
+            b'',
+            b'follow-first',
+            None,
+            _(b'only follow the first parent of merge changesets (DEPRECATED)'),
+        ),
+        (
+            b'd',
+            b'date',
+            b'',
+            _(b'show revisions matching date spec'),
+            _(b'DATE'),
+        ),
+        (b'C', b'copies', None, _(b'show copied files')),
+        (
+            b'k',
+            b'keyword',
+            [],
+            _(b'do case-insensitive search for a given text'),
+            _(b'TEXT'),
+        ),
+        (
+            b'r',
+            b'rev',
+            [],
+            _(b'show the specified revision or revset'),
+            _(b'REV'),
+        ),
+        (
+            b'',
+            b'removed',
+            None,
+            _(b'include revisions where files were removed'),
+        ),
+        (b'm', b'only-merges', None, _(b'show only merges (DEPRECATED)')),
+        (b'u', b'user', [], _(b'revisions committed by user'), _(b'USER')),
+        (
+            b'',
+            b'only-branch',
+            [],
+            _(
+                b'show only changesets within the given named branch (DEPRECATED)'
+            ),
+            _(b'BRANCH'),
+        ),
+        (
+            b'b',
+            b'branch',
+            [],
+            _(b'show changesets within the given named branch'),
+            _(b'BRANCH'),
+        ),
+        (
+            b'P',
+            b'prune',
+            [],
+            _(b'do not display revision or any of its ancestors'),
+            _(b'REV'),
+        ),
+    ]
+    + cmdutil.logopts
+    + cmdutil.walkopts,
+    _(b'[OPTION]... [FILE]'),
     helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
-    inferrepo=True)
+    inferrepo=True,
+)
 def glog(ui, repo, *pats, **opts):
     """show revision history alongside an ASCII revision graph
 
--- a/hgext/hgk.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/hgk.py	Mon Oct 21 11:09:48 2019 -0400
@@ -59,24 +59,29 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('hgk', 'path',
-    default='hgk',
+configitem(
+    b'hgk', b'path', default=b'hgk',
 )
 
-@command('debug-diff-tree',
-    [('p', 'patch', None, _('generate patch')),
-    ('r', 'recursive', None, _('recursive')),
-    ('P', 'pretty', None, _('pretty')),
-    ('s', 'stdin', None, _('stdin')),
-    ('C', 'copy', None, _('detect copies')),
-    ('S', 'search', "", _('search'))],
-    ('[OPTION]... NODE1 NODE2 [FILE]...'),
-    inferrepo=True)
+
+@command(
+    b'debug-diff-tree',
+    [
+        (b'p', b'patch', None, _(b'generate patch')),
+        (b'r', b'recursive', None, _(b'recursive')),
+        (b'P', b'pretty', None, _(b'pretty')),
+        (b's', b'stdin', None, _(b'stdin')),
+        (b'C', b'copy', None, _(b'detect copies')),
+        (b'S', b'search', b"", _(b'search')),
+    ],
+    b'[OPTION]... NODE1 NODE2 [FILE]...',
+    inferrepo=True,
+)
 def difftree(ui, repo, node1=None, node2=None, *files, **opts):
     """diff trees from two commits"""
 
@@ -87,19 +92,26 @@
         mmap = repo[node1].manifest()
         mmap2 = repo[node2].manifest()
         m = scmutil.match(repo[node1], files)
-        modified, added, removed  = repo.status(node1, node2, m)[:3]
+        modified, added, removed = repo.status(node1, node2, m)[:3]
         empty = short(nullid)
 
         for f in modified:
             # TODO get file permissions
-            ui.write((":100664 100664 %s %s M\t%s\t%s\n") %
-                     (short(mmap[f]), short(mmap2[f]), f, f))
+            ui.writenoi18n(
+                b":100664 100664 %s %s M\t%s\t%s\n"
+                % (short(mmap[f]), short(mmap2[f]), f, f)
+            )
         for f in added:
-            ui.write((":000000 100664 %s %s N\t%s\t%s\n") %
-                     (empty, short(mmap2[f]), f, f))
+            ui.writenoi18n(
+                b":000000 100664 %s %s N\t%s\t%s\n"
+                % (empty, short(mmap2[f]), f, f)
+            )
         for f in removed:
-            ui.write((":100664 000000 %s %s D\t%s\t%s\n") %
-                     (short(mmap[f]), empty, f, f))
+            ui.writenoi18n(
+                b":100664 000000 %s %s D\t%s\t%s\n"
+                % (short(mmap[f]), empty, f, f)
+            )
+
     ##
 
     while True:
@@ -121,12 +133,11 @@
             node1 = repo.changelog.parents(node1)[0]
         if opts[r'patch']:
             if opts[r'pretty']:
-                catcommit(ui, repo, node2, "")
+                catcommit(ui, repo, node2, b"")
             m = scmutil.match(repo[node1], files)
             diffopts = patch.difffeatureopts(ui)
             diffopts.git = True
-            chunks = patch.diff(repo, node1, node2, match=m,
-                                opts=diffopts)
+            chunks = patch.diff(repo, node1, node2, match=m, opts=diffopts)
             for chunk in chunks:
                 ui.write(chunk)
         else:
@@ -134,70 +145,76 @@
         if not opts[r'stdin']:
             break
 
+
 def catcommit(ui, repo, n, prefix, ctx=None):
-    nlprefix = '\n' + prefix
+    nlprefix = b'\n' + prefix
     if ctx is None:
         ctx = repo[n]
     # use ctx.node() instead ??
-    ui.write(("tree %s\n" % short(ctx.changeset()[0])))
+    ui.write((b"tree %s\n" % short(ctx.changeset()[0])))
     for p in ctx.parents():
-        ui.write(("parent %s\n" % p))
+        ui.write((b"parent %s\n" % p))
 
     date = ctx.date()
-    description = ctx.description().replace("\0", "")
-    ui.write(("author %s %d %d\n" % (ctx.user(), int(date[0]), date[1])))
+    description = ctx.description().replace(b"\0", b"")
+    ui.write((b"author %s %d %d\n" % (ctx.user(), int(date[0]), date[1])))
 
-    if 'committer' in ctx.extra():
-        ui.write(("committer %s\n" % ctx.extra()['committer']))
+    if b'committer' in ctx.extra():
+        ui.write((b"committer %s\n" % ctx.extra()[b'committer']))
 
-    ui.write(("revision %d\n" % ctx.rev()))
-    ui.write(("branch %s\n" % ctx.branch()))
+    ui.write((b"revision %d\n" % ctx.rev()))
+    ui.write((b"branch %s\n" % ctx.branch()))
     if obsolete.isenabled(repo, obsolete.createmarkersopt):
         if ctx.obsolete():
-            ui.write(("obsolete\n"))
-    ui.write(("phase %s\n\n" % ctx.phasestr()))
+            ui.writenoi18n(b"obsolete\n")
+    ui.write((b"phase %s\n\n" % ctx.phasestr()))
 
-    if prefix != "":
-        ui.write("%s%s\n" % (prefix,
-                             description.replace('\n', nlprefix).strip()))
+    if prefix != b"":
+        ui.write(
+            b"%s%s\n" % (prefix, description.replace(b'\n', nlprefix).strip())
+        )
     else:
-        ui.write(description + "\n")
+        ui.write(description + b"\n")
     if prefix:
-        ui.write('\0')
+        ui.write(b'\0')
 
-@command('debug-merge-base', [], _('REV REV'))
+
+@command(b'debug-merge-base', [], _(b'REV REV'))
 def base(ui, repo, node1, node2):
     """output common ancestor information"""
     node1 = repo.lookup(node1)
     node2 = repo.lookup(node2)
     n = repo.changelog.ancestor(node1, node2)
-    ui.write(short(n) + "\n")
+    ui.write(short(n) + b"\n")
+
 
-@command('debug-cat-file',
-    [('s', 'stdin', None, _('stdin'))],
-    _('[OPTION]... TYPE FILE'),
-    inferrepo=True)
+@command(
+    b'debug-cat-file',
+    [(b's', b'stdin', None, _(b'stdin'))],
+    _(b'[OPTION]... TYPE FILE'),
+    inferrepo=True,
+)
 def catfile(ui, repo, type=None, r=None, **opts):
     """cat a specific revision"""
     # in stdin mode, every line except the commit is prefixed with two
     # spaces.  This way the our caller can find the commit without magic
     # strings
     #
-    prefix = ""
+    prefix = b""
     if opts[r'stdin']:
         line = ui.fin.readline()
         if not line:
             return
         (type, r) = line.rstrip(pycompat.oslinesep).split(b' ')
-        prefix = "    "
+        prefix = b"    "
     else:
         if not type or not r:
-            ui.warn(_("cat-file: type or revision not supplied\n"))
-            commands.help_(ui, 'cat-file')
+            ui.warn(_(b"cat-file: type or revision not supplied\n"))
+            commands.help_(ui, b'cat-file')
 
     while r:
-        if type != "commit":
-            ui.warn(_("aborting hg cat-file only understands commits\n"))
+        if type != b"commit":
+            ui.warn(_(b"aborting hg cat-file only understands commits\n"))
             return 1
         n = repo.lookup(r)
         catcommit(ui, repo, n, prefix)
@@ -209,12 +226,13 @@
         else:
             break
 
+
 # git rev-tree is a confusing thing.  You can supply a number of
 # commit sha1s on the command line, and it walks the commit history
 # telling you which commits are reachable from the supplied ones via
 # a bitmask based on arg position.
 # you can specify a commit to stop at by starting the sha1 with ^
-def revtree(ui, args, repo, full="tree", maxnr=0, parents=False):
+def revtree(ui, args, repo, full=b"tree", maxnr=0, parents=False):
     def chlogwalk():
         count = len(repo)
         i = count
@@ -229,12 +247,12 @@
 
             for x in pycompat.xrange(chunk):
                 if i + x >= count:
-                    l[chunk - x:] = [0] * (chunk - x)
+                    l[chunk - x :] = [0] * (chunk - x)
                     break
                 if full is not None:
                     if (i + x) in repo:
                         l[x] = repo[i + x]
-                        l[x].changeset() # force reading
+                        l[x].changeset()  # force reading
                 else:
                     if (i + x) in repo:
                         l[x] = 1
@@ -263,11 +281,11 @@
     # figure out which commits they are asking for and which ones they
     # want us to stop on
     for i, arg in enumerate(args):
-        if arg.startswith('^'):
+        if arg.startswith(b'^'):
             s = repo.lookup(arg[1:])
             stop_sha1.append(s)
             want_sha1.append(s)
-        elif arg != 'HEAD':
+        elif arg != b'HEAD':
             want_sha1.append(repo.lookup(arg))
 
     # calculate the graph for the supplied commits
@@ -294,67 +312,76 @@
         n = repo.changelog.node(i)
         mask = is_reachable(want_sha1, reachable, n)
         if mask:
-            parentstr = ""
+            parentstr = b""
             if parents:
                 pp = repo.changelog.parents(n)
                 if pp[0] != nullid:
-                    parentstr += " " + short(pp[0])
+                    parentstr += b" " + short(pp[0])
                 if pp[1] != nullid:
-                    parentstr += " " + short(pp[1])
+                    parentstr += b" " + short(pp[1])
             if not full:
-                ui.write("%s%s\n" % (short(n), parentstr))
-            elif full == "commit":
-                ui.write("%s%s\n" % (short(n), parentstr))
-                catcommit(ui, repo, n, '    ', ctx)
+                ui.write(b"%s%s\n" % (short(n), parentstr))
+            elif full == b"commit":
+                ui.write(b"%s%s\n" % (short(n), parentstr))
+                catcommit(ui, repo, n, b'    ', ctx)
             else:
                 (p1, p2) = repo.changelog.parents(n)
                 (h, h1, h2) = map(short, (n, p1, p2))
                 (i1, i2) = map(repo.changelog.rev, (p1, p2))
 
                 date = ctx.date()[0]
-                ui.write("%s %s:%s" % (date, h, mask))
+                ui.write(b"%s %s:%s" % (date, h, mask))
                 mask = is_reachable(want_sha1, reachable, p1)
                 if i1 != nullrev and mask > 0:
-                    ui.write("%s:%s " % (h1, mask)),
+                    ui.write(b"%s:%s " % (h1, mask)),
                 mask = is_reachable(want_sha1, reachable, p2)
                 if i2 != nullrev and mask > 0:
-                    ui.write("%s:%s " % (h2, mask))
-                ui.write("\n")
+                    ui.write(b"%s:%s " % (h2, mask))
+                ui.write(b"\n")
             if maxnr and count >= maxnr:
                 break
             count += 1
 
+
 # git rev-list tries to order things by date, and has the ability to stop
 # at a given commit without walking the whole repo.  TODO add the stop
 # parameter
-@command('debug-rev-list',
-    [('H', 'header', None, _('header')),
-    ('t', 'topo-order', None, _('topo-order')),
-    ('p', 'parents', None, _('parents')),
-    ('n', 'max-count', 0, _('max-count'))],
-    ('[OPTION]... REV...'))
+@command(
+    b'debug-rev-list',
+    [
+        (b'H', b'header', None, _(b'header')),
+        (b't', b'topo-order', None, _(b'topo-order')),
+        (b'p', b'parents', None, _(b'parents')),
+        (b'n', b'max-count', 0, _(b'max-count')),
+    ],
+    b'[OPTION]... REV...',
+)
 def revlist(ui, repo, *revs, **opts):
     """print revisions"""
-    if opts['header']:
-        full = "commit"
+    if opts[b'header']:
+        full = b"commit"
     else:
         full = None
     copy = [x for x in revs]
     revtree(ui, copy, repo, full, opts[r'max_count'], opts[r'parents'])
 
-@command('view',
-    [('l', 'limit', '',
-     _('limit number of changes displayed'), _('NUM'))],
-    _('[-l LIMIT] [REVRANGE]'),
-    helpcategory=command.CATEGORY_CHANGE_NAVIGATION)
+
+@command(
+    b'view',
+    [(b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM'))],
+    _(b'[-l LIMIT] [REVRANGE]'),
+    helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
+)
 def view(ui, repo, *etc, **opts):
-    "start interactive history viewer"
+    b"start interactive history viewer"
     opts = pycompat.byteskwargs(opts)
     os.chdir(repo.root)
-    optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v])
+    optstr = b' '.join(
+        [b'--%s %s' % (k, v) for k, v in pycompat.iteritems(opts) if v]
+    )
     if repo.filtername is None:
-        optstr += '--hidden'
+        optstr += b'--hidden'
 
-    cmd = ui.config("hgk", "path") + " %s %s" % (optstr, " ".join(etc))
-    ui.debug("running %s\n" % cmd)
-    ui.system(cmd, blockedtag='hgk_view')
+    cmd = ui.config(b"hgk", b"path") + b" %s %s" % (optstr, b" ".join(etc))
+    ui.debug(b"running %s\n" % cmd)
+    ui.system(cmd, blockedtag=b'hgk_view')
--- a/hgext/highlight/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/highlight/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -36,27 +36,31 @@
 
 from mercurial import (
     extensions,
+    pycompat,
 )
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
 def pygmentize(web, field, fctx, tmpl):
-    style = web.config('web', 'pygments_style', 'colorful')
-    expr = web.config('web', 'highlightfiles', "size('<5M')")
-    filenameonly = web.configbool('web', 'highlightonlymatchfilename', False)
+    style = web.config(b'web', b'pygments_style', b'colorful')
+    expr = web.config(b'web', b'highlightfiles', b"size('<5M')")
+    filenameonly = web.configbool(b'web', b'highlightonlymatchfilename', False)
 
     ctx = fctx.changectx()
     m = ctx.matchfileset(expr)
     if m(fctx.path()):
-        highlight.pygmentize(field, fctx, style, tmpl,
-                guessfilenameonly=filenameonly)
+        highlight.pygmentize(
+            field, fctx, style, tmpl, guessfilenameonly=filenameonly
+        )
+
 
 def filerevision_highlight(orig, web, fctx):
-    mt = web.res.headers['Content-Type']
+    mt = web.res.headers[b'Content-Type']
     # only pygmentize for mimetype containing 'html' so we both match
     # 'text/html' and possibly 'application/xhtml+xml' in the future
     # so that we don't have to touch the extension when the mimetype
@@ -64,33 +68,42 @@
     # raw file is sent using rawfile() and doesn't call us, so we
     # can't clash with the file's content-type here in case we
     # pygmentize a html file
-    if 'html' in mt:
-        pygmentize(web, 'fileline', fctx, web.tmpl)
+    if b'html' in mt:
+        pygmentize(web, b'fileline', fctx, web.tmpl)
 
     return orig(web, fctx)
 
+
 def annotate_highlight(orig, web):
-    mt = web.res.headers['Content-Type']
-    if 'html' in mt:
+    mt = web.res.headers[b'Content-Type']
+    if b'html' in mt:
         fctx = webutil.filectx(web.repo, web.req)
-        pygmentize(web, 'annotateline', fctx, web.tmpl)
+        pygmentize(web, b'annotateline', fctx, web.tmpl)
 
     return orig(web)
 
+
 def generate_css(web):
-    pg_style = web.config('web', 'pygments_style', 'colorful')
-    fmter = highlight.HtmlFormatter(style=pg_style)
-    web.res.headers['Content-Type'] = 'text/css'
-    web.res.setbodybytes(''.join([
-        '/* pygments_style = %s */\n\n' % pg_style,
-        fmter.get_style_defs(''),
-    ]))
+    pg_style = web.config(b'web', b'pygments_style', b'colorful')
+    fmter = highlight.HtmlFormatter(style=pycompat.sysstr(pg_style))
+    web.res.headers[b'Content-Type'] = b'text/css'
+    style_defs = fmter.get_style_defs(pycompat.sysstr(b''))
+    web.res.setbodybytes(
+        b''.join(
+            [
+                b'/* pygments_style = %s */\n\n' % pg_style,
+                pycompat.bytestr(style_defs),
+            ]
+        )
+    )
     return web.res.sendresponse()
 
+
 def extsetup(ui):
     # monkeypatch in the new version
-    extensions.wrapfunction(webcommands, '_filerevision',
-                            filerevision_highlight)
-    extensions.wrapfunction(webcommands, 'annotate', annotate_highlight)
+    extensions.wrapfunction(
+        webcommands, b'_filerevision', filerevision_highlight
+    )
+    extensions.wrapfunction(webcommands, b'annotate', annotate_highlight)
     webcommands.highlightcss = generate_css
-    webcommands.__all__.append('highlightcss')
+    webcommands.__all__.append(b'highlightcss')
--- a/hgext/highlight/highlight.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/highlight/highlight.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,15 +11,15 @@
 from __future__ import absolute_import
 
 from mercurial import demandimport
-demandimport.IGNORES.update(['pkgutil', 'pkg_resources', '__main__'])
+
+demandimport.IGNORES.update([b'pkgutil', b'pkg_resources', b'__main__'])
 
 from mercurial import (
     encoding,
+    pycompat,
 )
 
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.utils import stringutil
 
 with demandimport.deactivated():
     import pygments
@@ -38,35 +38,37 @@
 TextLexer = pygments.lexers.TextLexer
 HtmlFormatter = pygments.formatters.HtmlFormatter
 
-SYNTAX_CSS = ('\n<link rel="stylesheet" href="{url}highlightcss" '
-              'type="text/css" />')
+SYNTAX_CSS = (
+    b'\n<link rel="stylesheet" href="{url}highlightcss" type="text/css" />'
+)
+
 
 def pygmentize(field, fctx, style, tmpl, guessfilenameonly=False):
 
     # append a <link ...> to the syntax highlighting css
-    tmpl.load('header')
-    old_header = tmpl.cache['header']
+    tmpl.load(b'header')
+    old_header = tmpl.cache[b'header']
     if SYNTAX_CSS not in old_header:
         new_header = old_header + SYNTAX_CSS
-        tmpl.cache['header'] = new_header
+        tmpl.cache[b'header'] = new_header
 
     text = fctx.data()
     if stringutil.binary(text):
         return
 
     # str.splitlines() != unicode.splitlines() because "reasons"
-    for c in "\x0c\x1c\x1d\x1e":
+    for c in b"\x0c\x1c\x1d\x1e":
         if c in text:
-            text = text.replace(c, '')
+            text = text.replace(c, b'')
 
     # Pygments is best used with Unicode strings:
     # <http://pygments.org/docs/unicode/>
-    text = text.decode(encoding.encoding, 'replace')
+    text = text.decode(pycompat.sysstr(encoding.encoding), 'replace')
 
     # To get multi-line strings right, we can't format line-by-line
     try:
-        lexer = guess_lexer_for_filename(fctx.path(), text[:1024],
-                                         stripnl=False)
+        path = pycompat.sysstr(fctx.path())
+        lexer = guess_lexer_for_filename(path, text[:1024], stripnl=False)
     except (ClassNotFound, ValueError):
         # guess_lexer will return a lexer if *any* lexer matches. There is
         # no way to specify a minimum match score. This can give a high rate of
@@ -84,14 +86,16 @@
     if isinstance(lexer, TextLexer):
         return
 
-    formatter = HtmlFormatter(nowrap=True, style=style)
+    formatter = HtmlFormatter(nowrap=True, style=pycompat.sysstr(style))
 
     colorized = highlight(text, lexer, formatter)
-    coloriter = (s.encode(encoding.encoding, 'replace')
-                 for s in colorized.splitlines())
+    coloriter = (
+        s.encode(pycompat.sysstr(encoding.encoding), 'replace')
+        for s in colorized.splitlines()
+    )
 
-    tmpl._filters['colorize'] = lambda x: next(coloriter)
+    tmpl._filters[b'colorize'] = lambda x: next(coloriter)
 
     oldl = tmpl.cache[field]
-    newl = oldl.replace('line|escape', 'line|colorize')
+    newl = oldl.replace(b'line|escape', b'line|colorize')
     tmpl.cache[field] = newl
--- a/hgext/histedit.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/histedit.py	Mon Oct 21 11:09:48 2019 -0400
@@ -206,6 +206,10 @@
 import struct
 
 from mercurial.i18n import _
+from mercurial.pycompat import (
+    getattr,
+    open,
+)
 from mercurial import (
     bundle2,
     cmdutil,
@@ -240,32 +244,31 @@
 
 configtable = {}
 configitem = registrar.configitem(configtable)
-configitem('experimental', 'histedit.autoverb',
-    default=False,
+configitem(
+    b'experimental', b'histedit.autoverb', default=False,
 )
-configitem('histedit', 'defaultrev',
-    default=None,
+configitem(
+    b'histedit', b'defaultrev', default=None,
 )
-configitem('histedit', 'dropmissing',
-    default=False,
+configitem(
+    b'histedit', b'dropmissing', default=False,
 )
-configitem('histedit', 'linelen',
-    default=80,
-)
-configitem('histedit', 'singletransaction',
-    default=False,
+configitem(
+    b'histedit', b'linelen', default=80,
 )
-configitem('ui', 'interface.histedit',
-    default=None,
+configitem(
+    b'histedit', b'singletransaction', default=False,
 )
-configitem('histedit', 'summary-template',
-           default='{rev} {desc|firstline}')
+configitem(
+    b'ui', b'interface.histedit', default=None,
+)
+configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}')
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 actiontable = {}
 primaryactions = set()
@@ -273,6 +276,7 @@
 tertiaryactions = set()
 internalactions = set()
 
+
 def geteditcomment(ui, first, last):
     """ construct the editor comment
     The comment includes::
@@ -284,39 +288,45 @@
 
     Commands are only included once.
     """
-    intro = _("""Edit history between %s and %s
+    intro = _(
+        """Edit history between %s and %s
 
 Commits are listed from least to most recent
 
 You can reorder changesets by reordering the lines
 
 Commands:
-""")
+"""
+    )
     actions = []
+
     def addverb(v):
         a = actiontable[v]
-        lines = a.message.split("\n")
+        lines = a.message.split(b"\n")
         if len(a.verbs):
-            v = ', '.join(sorted(a.verbs, key=lambda v: len(v)))
-        actions.append(" %s = %s" % (v, lines[0]))
-        actions.extend(['  %s' for l in lines[1:]])
+            v = b', '.join(sorted(a.verbs, key=lambda v: len(v)))
+        actions.append(b" %s = %s" % (v, lines[0]))
+        actions.extend([b'  %s' for l in lines[1:]])
 
     for v in (
-         sorted(primaryactions) +
-         sorted(secondaryactions) +
-         sorted(tertiaryactions)
-        ):
+        sorted(primaryactions)
+        + sorted(secondaryactions)
+        + sorted(tertiaryactions)
+    ):
         addverb(v)
-    actions.append('')
+    actions.append(b'')
 
     hints = []
-    if ui.configbool('histedit', 'dropmissing'):
-        hints.append("Deleting a changeset from the list "
-                     "will DISCARD it from the edited history!")
-
-    lines = (intro % (first, last)).split('\n') + actions + hints
-
-    return ''.join(['# %s\n' % l if l else '#\n' for l in lines])
+    if ui.configbool(b'histedit', b'dropmissing'):
+        hints.append(
+            b"Deleting a changeset from the list "
+            b"will DISCARD it from the edited history!"
+        )
+
+    lines = (intro % (first, last)).split(b'\n') + actions + hints
+
+    return b''.join([b'# %s\n' % l if l else b'#\n' for l in lines])
+
 
 class histeditstate(object):
     def __init__(self, repo):
@@ -328,70 +338,84 @@
         self.lock = None
         self.wlock = None
         self.backupfile = None
-        self.stateobj = statemod.cmdstate(repo, 'histedit-state')
+        self.stateobj = statemod.cmdstate(repo, b'histedit-state')
         self.replacements = []
 
     def read(self):
         """Load histedit state from disk and set fields appropriately."""
         if not self.stateobj.exists():
-            cmdutil.wrongtooltocontinue(self.repo, _('histedit'))
+            cmdutil.wrongtooltocontinue(self.repo, _(b'histedit'))
 
         data = self._read()
 
-        self.parentctxnode = data['parentctxnode']
-        actions = parserules(data['rules'], self)
+        self.parentctxnode = data[b'parentctxnode']
+        actions = parserules(data[b'rules'], self)
         self.actions = actions
-        self.keep = data['keep']
-        self.topmost = data['topmost']
-        self.replacements = data['replacements']
-        self.backupfile = data['backupfile']
+        self.keep = data[b'keep']
+        self.topmost = data[b'topmost']
+        self.replacements = data[b'replacements']
+        self.backupfile = data[b'backupfile']
 
     def _read(self):
-        fp = self.repo.vfs.read('histedit-state')
-        if fp.startswith('v1\n'):
+        fp = self.repo.vfs.read(b'histedit-state')
+        if fp.startswith(b'v1\n'):
             data = self._load()
             parentctxnode, rules, keep, topmost, replacements, backupfile = data
         else:
             data = pickle.loads(fp)
             parentctxnode, rules, keep, topmost, replacements = data
             backupfile = None
-        rules = "\n".join(["%s %s" % (verb, rest) for [verb, rest] in rules])
-
-        return {'parentctxnode': parentctxnode, "rules": rules, "keep": keep,
-                "topmost": topmost, "replacements": replacements,
-                "backupfile": backupfile}
+        rules = b"\n".join([b"%s %s" % (verb, rest) for [verb, rest] in rules])
+
+        return {
+            b'parentctxnode': parentctxnode,
+            b"rules": rules,
+            b"keep": keep,
+            b"topmost": topmost,
+            b"replacements": replacements,
+            b"backupfile": backupfile,
+        }
 
     def write(self, tr=None):
         if tr:
-            tr.addfilegenerator('histedit-state', ('histedit-state',),
-                                self._write, location='plain')
+            tr.addfilegenerator(
+                b'histedit-state',
+                (b'histedit-state',),
+                self._write,
+                location=b'plain',
+            )
         else:
-            with self.repo.vfs("histedit-state", "w") as f:
+            with self.repo.vfs(b"histedit-state", b"w") as f:
                 self._write(f)
 
     def _write(self, fp):
-        fp.write('v1\n')
-        fp.write('%s\n' % node.hex(self.parentctxnode))
-        fp.write('%s\n' % node.hex(self.topmost))
-        fp.write('%s\n' % ('True' if self.keep else 'False'))
-        fp.write('%d\n' % len(self.actions))
+        fp.write(b'v1\n')
+        fp.write(b'%s\n' % node.hex(self.parentctxnode))
+        fp.write(b'%s\n' % node.hex(self.topmost))
+        fp.write(b'%s\n' % (b'True' if self.keep else b'False'))
+        fp.write(b'%d\n' % len(self.actions))
         for action in self.actions:
-            fp.write('%s\n' % action.tostate())
-        fp.write('%d\n' % len(self.replacements))
+            fp.write(b'%s\n' % action.tostate())
+        fp.write(b'%d\n' % len(self.replacements))
         for replacement in self.replacements:
-            fp.write('%s%s\n' % (node.hex(replacement[0]), ''.join(node.hex(r)
-                for r in replacement[1])))
+            fp.write(
+                b'%s%s\n'
+                % (
+                    node.hex(replacement[0]),
+                    b''.join(node.hex(r) for r in replacement[1]),
+                )
+            )
         backupfile = self.backupfile
         if not backupfile:
-            backupfile = ''
-        fp.write('%s\n' % backupfile)
+            backupfile = b''
+        fp.write(b'%s\n' % backupfile)
 
     def _load(self):
-        fp = self.repo.vfs('histedit-state', 'r')
+        fp = self.repo.vfs(b'histedit-state', b'r')
         lines = [l[:-1] for l in fp.readlines()]
 
         index = 0
-        lines[index] # version number
+        lines[index]  # version number
         index += 1
 
         parentctxnode = node.bin(lines[index])
@@ -400,7 +424,7 @@
         topmost = node.bin(lines[index])
         index += 1
 
-        keep = lines[index] == 'True'
+        keep = lines[index] == b'True'
         index += 1
 
         # Rules
@@ -421,8 +445,10 @@
         for i in pycompat.xrange(replacementlen):
             replacement = lines[index]
             original = node.bin(replacement[:40])
-            succ = [node.bin(replacement[i:i + 40]) for i in
-                    range(40, len(replacement), 40)]
+            succ = [
+                node.bin(replacement[i : i + 40])
+                for i in range(40, len(replacement), 40)
+            ]
             replacements.append((original, succ))
             index += 1
 
@@ -435,10 +461,10 @@
 
     def clear(self):
         if self.inprogress():
-            self.repo.vfs.unlink('histedit-state')
+            self.repo.vfs.unlink(b'histedit-state')
 
     def inprogress(self):
-        return self.repo.vfs.exists('histedit-state')
+        return self.repo.vfs.exists(b'histedit-state')
 
 
 class histeditaction(object):
@@ -451,7 +477,7 @@
     def fromrule(cls, state, rule):
         """Parses the given rule, returning an instance of the histeditaction.
         """
-        ruleid = rule.strip().split(' ', 1)[0]
+        ruleid = rule.strip().split(b' ', 1)[0]
         # ruleid can be anything from rev numbers, hashes, "bookmarks" etc
         # Check for validation of rule ids and get the rulehash
         try:
@@ -462,7 +488,7 @@
                 rulehash = _ctx.hex()
                 rev = node.bin(rulehash)
             except error.RepoLookupError:
-                raise error.ParseError(_("invalid changeset %s") % ruleid)
+                raise error.ParseError(_(b"invalid changeset %s") % ruleid)
         return cls(state, rev)
 
     def verify(self, prev, expected, seen):
@@ -471,19 +497,23 @@
         ha = node.hex(self.node)
         self.node = scmutil.resolvehexnodeidprefix(repo, ha)
         if self.node is None:
-            raise error.ParseError(_('unknown changeset %s listed') % ha[:12])
+            raise error.ParseError(_(b'unknown changeset %s listed') % ha[:12])
         self._verifynodeconstraints(prev, expected, seen)
 
     def _verifynodeconstraints(self, prev, expected, seen):
         # by default command need a node in the edited list
         if self.node not in expected:
-            raise error.ParseError(_('%s "%s" changeset was not a candidate')
-                                   % (self.verb, node.short(self.node)),
-                                   hint=_('only use listed changesets'))
+            raise error.ParseError(
+                _(b'%s "%s" changeset was not a candidate')
+                % (self.verb, node.short(self.node)),
+                hint=_(b'only use listed changesets'),
+            )
         # and only one command per node
         if self.node in seen:
-            raise error.ParseError(_('duplicated command for changeset %s') %
-                                   node.short(self.node))
+            raise error.ParseError(
+                _(b'duplicated command for changeset %s')
+                % node.short(self.node)
+            )
 
     def torule(self):
         """build a histedit rule line for an action
@@ -493,21 +523,25 @@
         """
         ctx = self.repo[self.node]
         ui = self.repo.ui
-        summary = cmdutil.rendertemplate(
-            ctx, ui.config('histedit', 'summary-template')) or ''
+        summary = (
+            cmdutil.rendertemplate(
+                ctx, ui.config(b'histedit', b'summary-template')
+            )
+            or b''
+        )
         summary = summary.splitlines()[0]
-        line = '%s %s %s' % (self.verb, ctx, summary)
+        line = b'%s %s %s' % (self.verb, ctx, summary)
         # trim to 75 columns by default so it's not stupidly wide in my editor
         # (the 5 more are left for verb)
-        maxlen = self.repo.ui.configint('histedit', 'linelen')
-        maxlen = max(maxlen, 22) # avoid truncating hash
+        maxlen = self.repo.ui.configint(b'histedit', b'linelen')
+        maxlen = max(maxlen, 22)  # avoid truncating hash
         return stringutil.ellipsis(line, maxlen)
 
     def tostate(self):
         """Print an action in format used by histedit state files
            (the first line is a verb, the remainder is the second)
         """
-        return "%s\n%s" % (self.verb, node.hex(self.node))
+        return b"%s\n%s" % (self.verb, node.hex(self.node))
 
     def run(self):
         """Runs the action. The default behavior is simply apply the action's
@@ -528,9 +562,10 @@
         repo.dirstate.setbranch(rulectx.branch())
         if stats.unresolvedcount:
             raise error.InterventionRequired(
-                _('Fix up the change (%s %s)') %
-                (self.verb, node.short(self.node)),
-                hint=_('hg histedit --continue to resume'))
+                _(b'Fix up the change (%s %s)')
+                % (self.verb, node.short(self.node)),
+                hint=_(b'hg histedit --continue to resume'),
+            )
 
     def continuedirty(self):
         """Continues the action when changes have been applied to the working
@@ -540,12 +575,17 @@
 
         editor = self.commiteditor()
         commit = commitfuncfor(repo, rulectx)
-        if repo.ui.configbool('rewrite', 'update-timestamp'):
+        if repo.ui.configbool(b'rewrite', b'update-timestamp'):
             date = dateutil.makedate()
         else:
             date = rulectx.date()
-        commit(text=rulectx.description(), user=rulectx.user(),
-               date=date, extra=rulectx.extra(), editor=editor)
+        commit(
+            text=rulectx.description(),
+            user=rulectx.user(),
+            date=date,
+            extra=rulectx.extra(),
+            editor=editor,
+        )
 
     def commiteditor(self):
         """The editor to be used to edit the commit message."""
@@ -555,16 +595,19 @@
         """Continues the action when the working copy is clean. The default
         behavior is to accept the current commit as the new version of the
         rulectx."""
-        ctx = self.repo['.']
+        ctx = self.repo[b'.']
         if ctx.node() == self.state.parentctxnode:
-            self.repo.ui.warn(_('%s: skipping changeset (no changes)\n') %
-                              node.short(self.node))
+            self.repo.ui.warn(
+                _(b'%s: skipping changeset (no changes)\n')
+                % node.short(self.node)
+            )
             return ctx, [(self.node, tuple())]
         if ctx.node() == self.node:
             # Nothing changed
             return ctx, []
         return ctx, [(self.node, (ctx.node(),))]
 
+
 def commitfuncfor(repo, src):
     """Build a commit function for the replacement of <src>
 
@@ -576,15 +619,18 @@
     different and not easily factored out of the fold method.
     """
     phasemin = src.phase()
+
     def commitfunc(**kwargs):
-        overrides = {('phases', 'new-commit'): phasemin}
-        with repo.ui.configoverride(overrides, 'histedit'):
+        overrides = {(b'phases', b'new-commit'): phasemin}
+        with repo.ui.configoverride(overrides, b'histedit'):
             extra = kwargs.get(r'extra', {}).copy()
-            extra['histedit_source'] = src.hex()
+            extra[b'histedit_source'] = src.hex()
             kwargs[r'extra'] = extra
             return repo.commit(**kwargs)
+
     return commitfunc
 
+
 def applychanges(ui, repo, ctx, opts):
     """Merge changeset from ctx (only) in the current working directory"""
     wcpar = repo.dirstate.p1()
@@ -598,13 +644,15 @@
     else:
         try:
             # ui.forcemerge is an internal variable, do not document
-            repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
-                              'histedit')
-            stats = mergemod.graft(repo, ctx, ctx.p1(), ['local', 'histedit'])
+            repo.ui.setconfig(
+                b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit'
+            )
+            stats = mergemod.graft(repo, ctx, ctx.p1(), [b'local', b'histedit'])
         finally:
-            repo.ui.setconfig('ui', 'forcemerge', '', 'histedit')
+            repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit')
     return stats
 
+
 def collapse(repo, firstctx, lastctx, commitopts, skipprompt=False):
     """collapse the set of revisions from first to last as new one.
 
@@ -615,13 +663,14 @@
     Commit message is edited in all cases.
 
     This function works in memory."""
-    ctxs = list(repo.set('%d::%d', firstctx.rev(), lastctx.rev()))
+    ctxs = list(repo.set(b'%d::%d', firstctx.rev(), lastctx.rev()))
     if not ctxs:
         return None
     for c in ctxs:
         if not c.mutable():
             raise error.ParseError(
-                _("cannot fold into public change %s") % node.short(c.node()))
+                _(b"cannot fold into public change %s") % node.short(c.node())
+            )
     base = firstctx.p1()
 
     # commit a new version of the old changeset, including the update
@@ -637,48 +686,62 @@
     files = [f for f in files if not cmdutil.samefile(f, lastctx, base)]
     # commit version of these files as defined by head
     headmf = lastctx.manifest()
+
     def filectxfn(repo, ctx, path):
         if path in headmf:
             fctx = lastctx[path]
             flags = fctx.flags()
-            mctx = context.memfilectx(repo, ctx,
-                                      fctx.path(), fctx.data(),
-                                      islink='l' in flags,
-                                      isexec='x' in flags,
-                                      copysource=copied.get(path))
+            mctx = context.memfilectx(
+                repo,
+                ctx,
+                fctx.path(),
+                fctx.data(),
+                islink=b'l' in flags,
+                isexec=b'x' in flags,
+                copysource=copied.get(path),
+            )
             return mctx
         return None
 
-    if commitopts.get('message'):
-        message = commitopts['message']
+    if commitopts.get(b'message'):
+        message = commitopts[b'message']
     else:
         message = firstctx.description()
-    user = commitopts.get('user')
-    date = commitopts.get('date')
-    extra = commitopts.get('extra')
+    user = commitopts.get(b'user')
+    date = commitopts.get(b'date')
+    extra = commitopts.get(b'extra')
 
     parents = (firstctx.p1().node(), firstctx.p2().node())
     editor = None
     if not skipprompt:
-        editor = cmdutil.getcommiteditor(edit=True, editform='histedit.fold')
-    new = context.memctx(repo,
-                         parents=parents,
-                         text=message,
-                         files=files,
-                         filectxfn=filectxfn,
-                         user=user,
-                         date=date,
-                         extra=extra,
-                         editor=editor)
+        editor = cmdutil.getcommiteditor(edit=True, editform=b'histedit.fold')
+    new = context.memctx(
+        repo,
+        parents=parents,
+        text=message,
+        files=files,
+        filectxfn=filectxfn,
+        user=user,
+        date=date,
+        extra=extra,
+        editor=editor,
+    )
     return repo.commitctx(new)
 
+
 def _isdirtywc(repo):
     return repo[None].dirty(missing=True)
 
+
 def abortdirty():
-    raise error.Abort(_('working copy has pending changes'),
-        hint=_('amend, commit, or revert them and run histedit '
-            '--continue, or abort with histedit --abort'))
+    raise error.Abort(
+        _(b'working copy has pending changes'),
+        hint=_(
+            b'amend, commit, or revert them and run histedit '
+            b'--continue, or abort with histedit --abort'
+        ),
+    )
+
 
 def action(verbs, message, priority=False, internal=False):
     def wrap(cls):
@@ -699,23 +762,22 @@
         for verb in verbs:
             actiontable[verb] = cls
         return cls
+
     return wrap
 
-@action(['pick', 'p'],
-        _('use commit'),
-        priority=True)
+
+@action([b'pick', b'p'], _(b'use commit'), priority=True)
 class pick(histeditaction):
     def run(self):
         rulectx = self.repo[self.node]
         if rulectx.p1().node() == self.state.parentctxnode:
-            self.repo.ui.debug('node %s unchanged\n' % node.short(self.node))
+            self.repo.ui.debug(b'node %s unchanged\n' % node.short(self.node))
             return rulectx, []
 
         return super(pick, self).run()
 
-@action(['edit', 'e'],
-        _('use commit, but stop for amending'),
-        priority=True)
+
+@action([b'edit', b'e'], _(b'use commit, but stop for amending'), priority=True)
 class edit(histeditaction):
     def run(self):
         repo = self.repo
@@ -723,15 +785,16 @@
         hg.update(repo, self.state.parentctxnode, quietempty=True)
         applychanges(repo.ui, repo, rulectx, {})
         raise error.InterventionRequired(
-            _('Editing (%s), you may commit or record as needed now.')
+            _(b'Editing (%s), you may commit or record as needed now.')
             % node.short(self.node),
-            hint=_('hg histedit --continue to resume'))
+            hint=_(b'hg histedit --continue to resume'),
+        )
 
     def commiteditor(self):
-        return cmdutil.getcommiteditor(edit=True, editform='histedit.edit')
-
-@action(['fold', 'f'],
-        _('use commit, but combine it with the one above'))
+        return cmdutil.getcommiteditor(edit=True, editform=b'histedit.edit')
+
+
+@action([b'fold', b'f'], _(b'use commit, but combine it with the one above'))
 class fold(histeditaction):
     def verify(self, prev, expected, seen):
         """ Verifies semantic correctness of the fold rule"""
@@ -739,49 +802,57 @@
         repo = self.repo
         if not prev:
             c = repo[self.node].p1()
-        elif not prev.verb in ('pick', 'base'):
+        elif not prev.verb in (b'pick', b'base'):
             return
         else:
             c = repo[prev.node]
         if not c.mutable():
             raise error.ParseError(
-                _("cannot fold into public change %s") % node.short(c.node()))
-
+                _(b"cannot fold into public change %s") % node.short(c.node())
+            )
 
     def continuedirty(self):
         repo = self.repo
         rulectx = repo[self.node]
 
         commit = commitfuncfor(repo, rulectx)
-        commit(text='fold-temp-revision %s' % node.short(self.node),
-               user=rulectx.user(), date=rulectx.date(),
-               extra=rulectx.extra())
+        commit(
+            text=b'fold-temp-revision %s' % node.short(self.node),
+            user=rulectx.user(),
+            date=rulectx.date(),
+            extra=rulectx.extra(),
+        )
 
     def continueclean(self):
         repo = self.repo
-        ctx = repo['.']
+        ctx = repo[b'.']
         rulectx = repo[self.node]
         parentctxnode = self.state.parentctxnode
         if ctx.node() == parentctxnode:
-            repo.ui.warn(_('%s: empty changeset\n') %
-                              node.short(self.node))
+            repo.ui.warn(_(b'%s: empty changeset\n') % node.short(self.node))
             return ctx, [(self.node, (parentctxnode,))]
 
         parentctx = repo[parentctxnode]
-        newcommits = set(c.node() for c in repo.set('(%d::. - %d)',
-                                                    parentctx.rev(),
-                                                    parentctx.rev()))
+        newcommits = set(
+            c.node()
+            for c in repo.set(b'(%d::. - %d)', parentctx.rev(), parentctx.rev())
+        )
         if not newcommits:
-            repo.ui.warn(_('%s: cannot fold - working copy is not a '
-                           'descendant of previous commit %s\n') %
-                           (node.short(self.node), node.short(parentctxnode)))
+            repo.ui.warn(
+                _(
+                    b'%s: cannot fold - working copy is not a '
+                    b'descendant of previous commit %s\n'
+                )
+                % (node.short(self.node), node.short(parentctxnode))
+            )
             return ctx, [(self.node, (ctx.node(),))]
 
         middlecommits = newcommits.copy()
         middlecommits.discard(ctx.node())
 
-        return self.finishfold(repo.ui, repo, parentctx, rulectx, ctx.node(),
-                               middlecommits)
+        return self.finishfold(
+            repo.ui, repo, parentctx, rulectx, ctx.node(), middlecommits
+        )
 
     def skipprompt(self):
         """Returns true if the rule should skip the message editor.
@@ -813,53 +884,65 @@
         hg.updaterepo(repo, parent, overwrite=False)
         ### prepare new commit data
         commitopts = {}
-        commitopts['user'] = ctx.user()
+        commitopts[b'user'] = ctx.user()
         # commit message
         if not self.mergedescs():
             newmessage = ctx.description()
         else:
-            newmessage = '\n***\n'.join(
-                [ctx.description()] +
-                [repo[r].description() for r in internalchanges] +
-                [oldctx.description()]) + '\n'
-        commitopts['message'] = newmessage
+            newmessage = (
+                b'\n***\n'.join(
+                    [ctx.description()]
+                    + [repo[r].description() for r in internalchanges]
+                    + [oldctx.description()]
+                )
+                + b'\n'
+            )
+        commitopts[b'message'] = newmessage
         # date
         if self.firstdate():
-            commitopts['date'] = ctx.date()
+            commitopts[b'date'] = ctx.date()
         else:
-            commitopts['date'] = max(ctx.date(), oldctx.date())
+            commitopts[b'date'] = max(ctx.date(), oldctx.date())
         # if date is to be updated to current
-        if ui.configbool('rewrite', 'update-timestamp'):
-            commitopts['date'] = dateutil.makedate()
+        if ui.configbool(b'rewrite', b'update-timestamp'):
+            commitopts[b'date'] = dateutil.makedate()
 
         extra = ctx.extra().copy()
         # histedit_source
         # note: ctx is likely a temporary commit but that the best we can do
         #       here. This is sufficient to solve issue3681 anyway.
-        extra['histedit_source'] = '%s,%s' % (ctx.hex(), oldctx.hex())
-        commitopts['extra'] = extra
+        extra[b'histedit_source'] = b'%s,%s' % (ctx.hex(), oldctx.hex())
+        commitopts[b'extra'] = extra
         phasemin = max(ctx.phase(), oldctx.phase())
-        overrides = {('phases', 'new-commit'): phasemin}
-        with repo.ui.configoverride(overrides, 'histedit'):
-            n = collapse(repo, ctx, repo[newnode], commitopts,
-                         skipprompt=self.skipprompt())
+        overrides = {(b'phases', b'new-commit'): phasemin}
+        with repo.ui.configoverride(overrides, b'histedit'):
+            n = collapse(
+                repo,
+                ctx,
+                repo[newnode],
+                commitopts,
+                skipprompt=self.skipprompt(),
+            )
         if n is None:
             return ctx, []
         hg.updaterepo(repo, n, overwrite=False)
-        replacements = [(oldctx.node(), (newnode,)),
-                        (ctx.node(), (n,)),
-                        (newnode, (n,)),
-                       ]
+        replacements = [
+            (oldctx.node(), (newnode,)),
+            (ctx.node(), (n,)),
+            (newnode, (n,)),
+        ]
         for ich in internalchanges:
             replacements.append((ich, (n,)))
         return repo[n], replacements
 
-@action(['base', 'b'],
-        _('checkout changeset and apply further changesets from there'))
+
+@action(
+    [b'base', b'b'],
+    _(b'checkout changeset and apply further changesets from there'),
+)
 class base(histeditaction):
-
     def run(self):
-        if self.repo['.'].node() != self.node:
+        if self.repo[b'.'].node() != self.node:
             mergemod.update(self.repo, self.node, branchmerge=False, force=True)
         return self.continueclean()
 
@@ -867,34 +950,42 @@
         abortdirty()
 
     def continueclean(self):
-        basectx = self.repo['.']
+        basectx = self.repo[b'.']
         return basectx, []
 
     def _verifynodeconstraints(self, prev, expected, seen):
         # base can only be use with a node not in the edited set
         if self.node in expected:
-            msg = _('%s "%s" changeset was an edited list candidate')
+            msg = _(b'%s "%s" changeset was an edited list candidate')
             raise error.ParseError(
                 msg % (self.verb, node.short(self.node)),
-                hint=_('base must only use unlisted changesets'))
-
-@action(['_multifold'],
-        _(
-    """fold subclass used for when multiple folds happen in a row
+                hint=_(b'base must only use unlisted changesets'),
+            )
+
+
+@action(
+    [b'_multifold'],
+    _(
+        """fold subclass used for when multiple folds happen in a row
 
     We only want to fire the editor for the folded message once when
     (say) four changes are folded down into a single change. This is
     similar to rollup, but we should preserve both messages so that
     when the last fold operation runs we can show the user all the
     commit messages in their editor.
-    """),
-        internal=True)
+    """
+    ),
+    internal=True,
+)
 class _multifold(fold):
     def skipprompt(self):
         return True
 
-@action(["roll", "r"],
-        _("like fold, but discard this commit's description and date"))
+
+@action(
+    [b"roll", b"r"],
+    _(b"like fold, but discard this commit's description and date"),
+)
 class rollup(fold):
     def mergedescs(self):
         return False
@@ -905,19 +996,23 @@
     def firstdate(self):
         return True
 
-@action(["drop", "d"],
-        _('remove commit from history'))
+
+@action([b"drop", b"d"], _(b'remove commit from history'))
 class drop(histeditaction):
     def run(self):
         parentctx = self.repo[self.state.parentctxnode]
         return parentctx, [(self.node, tuple())]
 
-@action(["mess", "m"],
-        _('edit commit message without changing commit content'),
-        priority=True)
+
+@action(
+    [b"mess", b"m"],
+    _(b'edit commit message without changing commit content'),
+    priority=True,
+)
 class message(histeditaction):
     def commiteditor(self):
-        return cmdutil.getcommiteditor(edit=True, editform='histedit.mess')
+        return cmdutil.getcommiteditor(edit=True, editform=b'histedit.mess')
+
 
 def findoutgoing(ui, repo, remote=None, force=False, opts=None):
     """utility function to find the first outgoing changeset
@@ -925,9 +1020,9 @@
     Used by initialization code"""
     if opts is None:
         opts = {}
-    dest = ui.expandpath(remote or 'default-push', remote or 'default')
+    dest = ui.expandpath(remote or b'default-push', remote or b'default')
     dest, branches = hg.parseurl(dest, None)[:2]
-    ui.status(_('comparing with %s\n') % util.hidepassword(dest))
+    ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
 
     revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
     other = hg.peer(repo, opts, dest)
@@ -937,27 +1032,28 @@
 
     outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
     if not outgoing.missing:
-        raise error.Abort(_('no outgoing ancestors'))
-    roots = list(repo.revs("roots(%ln)", outgoing.missing))
+        raise error.Abort(_(b'no outgoing ancestors'))
+    roots = list(repo.revs(b"roots(%ln)", outgoing.missing))
     if len(roots) > 1:
-        msg = _('there are ambiguous outgoing revisions')
-        hint = _("see 'hg help histedit' for more detail")
+        msg = _(b'there are ambiguous outgoing revisions')
+        hint = _(b"see 'hg help histedit' for more detail")
         raise error.Abort(msg, hint=hint)
     return repo[roots[0]].node()
 
+
 # Curses Support
 try:
     import curses
 except ImportError:
     curses = None
 
-KEY_LIST = ['pick', 'edit', 'fold', 'drop', 'mess', 'roll']
+KEY_LIST = [b'pick', b'edit', b'fold', b'drop', b'mess', b'roll']
 ACTION_LABELS = {
-    'fold': '^fold',
-    'roll': '^roll',
+    b'fold': b'^fold',
+    b'roll': b'^roll',
 }
 
-COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN, COLOR_CURRENT  = 1, 2, 3, 4, 5
+COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN, COLOR_CURRENT = 1, 2, 3, 4, 5
 COLOR_DIFF_ADD_LINE, COLOR_DIFF_DEL_LINE, COLOR_DIFF_OFFSET = 6, 7, 8
 
 E_QUIT, E_HISTEDIT = 1, 2
@@ -965,55 +1061,56 @@
 MODE_INIT, MODE_PATCH, MODE_RULES, MODE_HELP = 0, 1, 2, 3
 
 KEYTABLE = {
-    'global': {
-        'h':         'next-action',
-        'KEY_RIGHT': 'next-action',
-        'l':         'prev-action',
-        'KEY_LEFT':  'prev-action',
-        'q':         'quit',
-        'c':         'histedit',
-        'C':         'histedit',
-        'v':         'showpatch',
-        '?':         'help',
+    b'global': {
+        b'h': b'next-action',
+        b'KEY_RIGHT': b'next-action',
+        b'l': b'prev-action',
+        b'KEY_LEFT': b'prev-action',
+        b'q': b'quit',
+        b'c': b'histedit',
+        b'C': b'histedit',
+        b'v': b'showpatch',
+        b'?': b'help',
     },
     MODE_RULES: {
-        'd':         'action-drop',
-        'e':         'action-edit',
-        'f':         'action-fold',
-        'm':         'action-mess',
-        'p':         'action-pick',
-        'r':         'action-roll',
-        ' ':         'select',
-        'j':         'down',
-        'k':         'up',
-        'KEY_DOWN':  'down',
-        'KEY_UP':    'up',
-        'J':         'move-down',
-        'K':         'move-up',
-        'KEY_NPAGE': 'move-down',
-        'KEY_PPAGE': 'move-up',
-        '0':         'goto',  # Used for 0..9
+        b'd': b'action-drop',
+        b'e': b'action-edit',
+        b'f': b'action-fold',
+        b'm': b'action-mess',
+        b'p': b'action-pick',
+        b'r': b'action-roll',
+        b' ': b'select',
+        b'j': b'down',
+        b'k': b'up',
+        b'KEY_DOWN': b'down',
+        b'KEY_UP': b'up',
+        b'J': b'move-down',
+        b'K': b'move-up',
+        b'KEY_NPAGE': b'move-down',
+        b'KEY_PPAGE': b'move-up',
+        b'0': b'goto',  # Used for 0..9
     },
     MODE_PATCH: {
-        ' ':         'page-down',
-        'KEY_NPAGE': 'page-down',
-        'KEY_PPAGE': 'page-up',
-        'j':         'line-down',
-        'k':         'line-up',
-        'KEY_DOWN':  'line-down',
-        'KEY_UP':    'line-up',
-        'J':         'down',
-        'K':         'up',
+        b' ': b'page-down',
+        b'KEY_NPAGE': b'page-down',
+        b'KEY_PPAGE': b'page-up',
+        b'j': b'line-down',
+        b'k': b'line-up',
+        b'KEY_DOWN': b'line-down',
+        b'KEY_UP': b'line-up',
+        b'J': b'down',
+        b'K': b'up',
     },
-    MODE_HELP: {
-    },
+    MODE_HELP: {},
 }
 
+
 def screen_size():
-    return struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '    '))
+    return struct.unpack(b'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, b'    '))
+
 
 class histeditrule(object):
-    def __init__(self, ctx, pos, action='pick'):
+    def __init__(self, ctx, pos, action=b'pick'):
         self.ctx = ctx
         self.action = action
         self.origpos = pos
@@ -1036,10 +1133,11 @@
         h = self.ctx.hex()[0:12]
         r = self.ctx.rev()
         desc = self.ctx.description().splitlines()[0].strip()
-        if self.action == 'roll':
-            desc = ''
-        return "#{0:<2} {1:<6} {2}:{3}   {4}".format(
-                self.origpos, action, r, h, desc)
+        if self.action == b'roll':
+            desc = b''
+        return b"#{0:<2} {1:<6} {2}:{3}   {4}".format(
+            self.origpos, action, r, h, desc
+        )
 
     def checkconflicts(self, other):
         if other.pos > self.pos and other.origpos <= self.origpos:
@@ -1051,40 +1149,44 @@
             self.conflicts.remove(other)
         return self.conflicts
 
+
 # ============ EVENTS ===============
 def movecursor(state, oldpos, newpos):
     '''Change the rule/changeset that the cursor is pointing to, regardless of
     current mode (you can switch between patches from the view patch window).'''
-    state['pos'] = newpos
-
-    mode, _ = state['mode']
+    state[b'pos'] = newpos
+
+    mode, _ = state[b'mode']
     if mode == MODE_RULES:
         # Scroll through the list by updating the view for MODE_RULES, so that
         # even if we are not currently viewing the rules, switching back will
         # result in the cursor's rule being visible.
-        modestate = state['modes'][MODE_RULES]
-        if newpos < modestate['line_offset']:
-            modestate['line_offset'] = newpos
-        elif newpos > modestate['line_offset'] + state['page_height'] - 1:
-            modestate['line_offset'] = newpos - state['page_height'] + 1
+        modestate = state[b'modes'][MODE_RULES]
+        if newpos < modestate[b'line_offset']:
+            modestate[b'line_offset'] = newpos
+        elif newpos > modestate[b'line_offset'] + state[b'page_height'] - 1:
+            modestate[b'line_offset'] = newpos - state[b'page_height'] + 1
 
     # Reset the patch view region to the top of the new patch.
-    state['modes'][MODE_PATCH]['line_offset'] = 0
+    state[b'modes'][MODE_PATCH][b'line_offset'] = 0
+
 
 def changemode(state, mode):
-    curmode, _ = state['mode']
-    state['mode'] = (mode, curmode)
+    curmode, _ = state[b'mode']
+    state[b'mode'] = (mode, curmode)
     if mode == MODE_PATCH:
-        state['modes'][MODE_PATCH]['patchcontents'] = patchcontents(state)
+        state[b'modes'][MODE_PATCH][b'patchcontents'] = patchcontents(state)
+
 
 def makeselection(state, pos):
-    state['selected'] = pos
+    state[b'selected'] = pos
+
 
 def swap(state, oldpos, newpos):
     """Swap two positions and calculate necessary conflicts in
     O(|newpos-oldpos|) time"""
 
-    rules = state['rules']
+    rules = state[b'rules']
     assert 0 <= oldpos < len(rules) and 0 <= newpos < len(rules)
 
     rules[oldpos], rules[newpos] = rules[newpos], rules[oldpos]
@@ -1099,19 +1201,21 @@
         rules[newpos].checkconflicts(rules[r])
         rules[oldpos].checkconflicts(rules[r])
 
-    if state['selected']:
+    if state[b'selected']:
         makeselection(state, newpos)
 
+
 def changeaction(state, pos, action):
     """Change the action state on the given position to the new action"""
-    rules = state['rules']
+    rules = state[b'rules']
     assert 0 <= pos < len(rules)
     rules[pos].action = action
 
+
 def cycleaction(state, pos, next=False):
     """Changes the action state the next or the previous action from
     the action list"""
-    rules = state['rules']
+    rules = state[b'rules']
     assert 0 <= pos < len(rules)
     current = rules[pos].action
 
@@ -1124,20 +1228,22 @@
         index -= 1
     changeaction(state, pos, KEY_LIST[index % len(KEY_LIST)])
 
+
 def changeview(state, delta, unit):
     '''Change the region of whatever is being viewed (a patch or the list of
     changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'.'''
-    mode, _ = state['mode']
+    mode, _ = state[b'mode']
     if mode != MODE_PATCH:
         return
-    mode_state = state['modes'][mode]
-    num_lines = len(mode_state['patchcontents'])
-    page_height = state['page_height']
-    unit = page_height if unit == 'page' else 1
+    mode_state = state[b'modes'][mode]
+    num_lines = len(mode_state[b'patchcontents'])
+    page_height = state[b'page_height']
+    unit = page_height if unit == b'page' else 1
     num_pages = 1 + (num_lines - 1) / page_height
     max_offset = (num_pages - 1) * page_height
-    newline = mode_state['line_offset'] + delta * unit
-    mode_state['line_offset'] = max(0, min(max_offset, newline))
+    newline = mode_state[b'line_offset'] + delta * unit
+    mode_state[b'line_offset'] = max(0, min(max_offset, newline))
+
 
 def event(state, ch):
     """Change state based on the current character input
@@ -1145,76 +1251,80 @@
     This takes the current state and based on the current character input from
     the user we change the state.
     """
-    selected = state['selected']
-    oldpos = state['pos']
-    rules = state['rules']
-
-    if ch in (curses.KEY_RESIZE, "KEY_RESIZE"):
+    selected = state[b'selected']
+    oldpos = state[b'pos']
+    rules = state[b'rules']
+
+    if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"):
         return E_RESIZE
 
     lookup_ch = ch
-    if '0' <= ch <= '9':
-        lookup_ch = '0'
-
-    curmode, prevmode = state['mode']
-    action = KEYTABLE[curmode].get(lookup_ch, KEYTABLE['global'].get(lookup_ch))
+    if ch is not None and b'0' <= ch <= b'9':
+        lookup_ch = b'0'
+
+    curmode, prevmode = state[b'mode']
+    action = KEYTABLE[curmode].get(
+        lookup_ch, KEYTABLE[b'global'].get(lookup_ch)
+    )
     if action is None:
         return
-    if action in ('down', 'move-down'):
+    if action in (b'down', b'move-down'):
         newpos = min(oldpos + 1, len(rules) - 1)
         movecursor(state, oldpos, newpos)
-        if selected is not None or action == 'move-down':
+        if selected is not None or action == b'move-down':
             swap(state, oldpos, newpos)
-    elif action in ('up', 'move-up'):
+    elif action in (b'up', b'move-up'):
         newpos = max(0, oldpos - 1)
         movecursor(state, oldpos, newpos)
-        if selected is not None or action == 'move-up':
+        if selected is not None or action == b'move-up':
             swap(state, oldpos, newpos)
-    elif action == 'next-action':
+    elif action == b'next-action':
         cycleaction(state, oldpos, next=True)
-    elif action == 'prev-action':
+    elif action == b'prev-action':
         cycleaction(state, oldpos, next=False)
-    elif action == 'select':
+    elif action == b'select':
         selected = oldpos if selected is None else None
         makeselection(state, selected)
-    elif action == 'goto' and int(ch) < len(rules) and len(rules) <= 10:
+    elif action == b'goto' and int(ch) < len(rules) and len(rules) <= 10:
         newrule = next((r for r in rules if r.origpos == int(ch)))
         movecursor(state, oldpos, newrule.pos)
         if selected is not None:
             swap(state, oldpos, newrule.pos)
-    elif action.startswith('action-'):
+    elif action.startswith(b'action-'):
         changeaction(state, oldpos, action[7:])
-    elif action == 'showpatch':
+    elif action == b'showpatch':
         changemode(state, MODE_PATCH if curmode != MODE_PATCH else prevmode)
-    elif action == 'help':
+    elif action == b'help':
         changemode(state, MODE_HELP if curmode != MODE_HELP else prevmode)
-    elif action == 'quit':
+    elif action == b'quit':
         return E_QUIT
-    elif action == 'histedit':
+    elif action == b'histedit':
         return E_HISTEDIT
-    elif action == 'page-down':
+    elif action == b'page-down':
         return E_PAGEDOWN
-    elif action == 'page-up':
+    elif action == b'page-up':
         return E_PAGEUP
-    elif action == 'line-down':
+    elif action == b'line-down':
         return E_LINEDOWN
-    elif action == 'line-up':
+    elif action == b'line-up':
         return E_LINEUP
 
+
 def makecommands(rules):
     """Returns a list of commands consumable by histedit --commands based on
     our list of rules"""
     commands = []
     for rules in rules:
-        commands.append("{0} {1}\n".format(rules.action, rules.ctx))
+        commands.append(b"{0} {1}\n".format(rules.action, rules.ctx))
     return commands
 
+
 def addln(win, y, x, line, color=None):
     """Add a line to the given window left padding but 100% filled with
     whitespace characters, so that the color appears on the whole line"""
     maxy, maxx = win.getmaxyx()
     length = maxx - 1 - x
-    line = ("{0:<%d}" % length).format(str(line).strip())[:length]
+    line = (b"{0:<%d}" % length).format(str(line).strip())[:length]
     if y < 0:
         y = maxy + y
     if x < 0:
@@ -1224,27 +1334,32 @@
     else:
         win.addstr(y, x, line)
 
+
 def _trunc_head(line, n):
     if len(line) <= n:
         return line
-    return '> ' + line[-(n - 2):]
+    return b'> ' + line[-(n - 2) :]
+
+
 def _trunc_tail(line, n):
     if len(line) <= n:
         return line
-    return line[:n - 2] + ' >'
+    return line[: n - 2] + b' >'
+
 
 def patchcontents(state):
-    repo = state['repo']
-    rule = state['rules'][state['pos']]
-    displayer = logcmdutil.changesetdisplayer(repo.ui, repo, {
-        "patch": True,  "template": "status"
-    }, buffered=True)
-    overrides = {('ui',  'verbose'): True}
-    with repo.ui.configoverride(overrides, source='histedit'):
+    repo = state[b'repo']
+    rule = state[b'rules'][state[b'pos']]
+    displayer = logcmdutil.changesetdisplayer(
+        repo.ui, repo, {b"patch": True, b"template": b"status"}, buffered=True
+    )
+    overrides = {(b'ui', b'verbose'): True}
+    with repo.ui.configoverride(overrides, source=b'histedit'):
         displayer.show(rule.ctx)
         displayer.close()
     return displayer.hunk[rule.ctx.rev()].splitlines()
 
+
 def _chisteditmain(repo, rules, stdscr):
     try:
         curses.use_default_colors()
@@ -1270,8 +1385,8 @@
     def rendercommit(win, state):
         """Renders the commit window that shows the log of the current selected
         commit"""
-        pos = state['pos']
-        rules = state['rules']
+        pos = state[b'pos']
+        rules = state[b'rules']
         rule = rules[pos]
 
         ctx = rule.ctx
@@ -1280,20 +1395,20 @@
         maxy, maxx = win.getmaxyx()
         length = maxx - 3
 
-        line = "changeset: {0}:{1:<12}".format(ctx.rev(), ctx)
+        line = b"changeset: {0}:{1:<12}".format(ctx.rev(), ctx)
         win.addstr(1, 1, line[:length])
 
-        line = "user:      {0}".format(ctx.user())
+        line = b"user:      {0}".format(ctx.user())
         win.addstr(2, 1, line[:length])
 
         bms = repo.nodebookmarks(ctx.node())
-        line = "bookmark:  {0}".format(' '.join(bms))
+        line = b"bookmark:  {0}".format(b' '.join(bms))
         win.addstr(3, 1, line[:length])
 
-        line = "summary:   {0}".format(ctx.description().splitlines()[0])
+        line = b"summary:   {0}".format(ctx.description().splitlines()[0])
         win.addstr(4, 1, line[:length])
 
-        line = "files:     "
+        line = b"files:     "
         win.addstr(5, 1, line)
         fnx = 1 + len(line)
         fnmaxx = length - fnx + 1
@@ -1302,7 +1417,7 @@
         files = ctx.files()
         for i, line1 in enumerate(files):
             if len(files) > fnmaxn and i == fnmaxn - 1:
-                win.addstr(y, fnx, _trunc_tail(','.join(files[i:]), fnmaxx))
+                win.addstr(y, fnx, _trunc_tail(b','.join(files[i:]), fnmaxx))
                 y = y + 1
                 break
             win.addstr(y, fnx, _trunc_head(line1, fnmaxx))
@@ -1310,22 +1425,22 @@
 
         conflicts = rule.conflicts
         if len(conflicts) > 0:
-            conflictstr = ','.join(map(lambda r: str(r.ctx), conflicts))
-            conflictstr = "changed files overlap with {0}".format(conflictstr)
+            conflictstr = b','.join(map(lambda r: str(r.ctx), conflicts))
+            conflictstr = b"changed files overlap with {0}".format(conflictstr)
         else:
-            conflictstr = 'no overlap'
+            conflictstr = b'no overlap'
 
         win.addstr(y, 1, conflictstr[:length])
         win.noutrefresh()
 
     def helplines(mode):
         if mode == MODE_PATCH:
-            help = """\
+            help = b"""\
 ?: help, k/up: line up, j/down: line down, v: stop viewing patch
 pgup: prev page, space/pgdn: next page, c: commit, q: abort
 """
         else:
-            help = """\
+            help = b"""\
 ?: help, k/up: move up, j/down: move down, space: select, v: view patch
 d: drop, e: edit, f: fold, m: mess, p: pick, r: roll
 pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort
@@ -1334,7 +1449,7 @@
 
     def renderhelp(win, state):
         maxy, maxx = win.getmaxyx()
-        mode, _ = state['mode']
+        mode, _ = state[b'mode']
         for y, line in enumerate(helplines(mode)):
             if y >= maxy:
                 break
@@ -1342,28 +1457,33 @@
         win.noutrefresh()
 
     def renderrules(rulesscr, state):
-        rules = state['rules']
-        pos = state['pos']
-        selected = state['selected']
-        start = state['modes'][MODE_RULES]['line_offset']
+        rules = state[b'rules']
+        pos = state[b'pos']
+        selected = state[b'selected']
+        start = state[b'modes'][MODE_RULES][b'line_offset']
 
         conflicts = [r.ctx for r in rules if r.conflicts]
         if len(conflicts) > 0:
-            line = "potential conflict in %s" % ','.join(map(str, conflicts))
+            line = b"potential conflict in %s" % b','.join(map(str, conflicts))
             addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN))
 
         for y, rule in enumerate(rules[start:]):
-            if y >= state['page_height']:
+            if y >= state[b'page_height']:
                 break
             if len(rule.conflicts) > 0:
-                rulesscr.addstr(y, 0, " ", curses.color_pair(COLOR_WARN))
+                rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN))
             else:
-                rulesscr.addstr(y, 0, " ", curses.COLOR_BLACK)
+                rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK)
             if y + start == selected:
                 addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))
             elif y + start == pos:
-                addln(rulesscr, y, 2, rule,
-                      curses.color_pair(COLOR_CURRENT) | curses.A_BOLD)
+                addln(
+                    rulesscr,
+                    y,
+                    2,
+                    rule,
+                    curses.color_pair(COLOR_CURRENT) | curses.A_BOLD,
+                )
             else:
                 addln(rulesscr, y, 2, rule)
         rulesscr.noutrefresh()
@@ -1374,15 +1494,16 @@
         for y in range(0, length):
             line = output[y]
             if diffcolors:
-                if line and line[0] == '+':
-                    win.addstr(
-                        y, 0, line, curses.color_pair(COLOR_DIFF_ADD_LINE))
-                elif line and line[0] == '-':
+                if line and line[0] == b'+':
                     win.addstr(
-                        y, 0, line, curses.color_pair(COLOR_DIFF_DEL_LINE))
-                elif line.startswith('@@ '):
+                        y, 0, line, curses.color_pair(COLOR_DIFF_ADD_LINE)
+                    )
+                elif line and line[0] == b'-':
                     win.addstr(
-                        y, 0, line, curses.color_pair(COLOR_DIFF_OFFSET))
+                        y, 0, line, curses.color_pair(COLOR_DIFF_DEL_LINE)
+                    )
+                elif line.startswith(b'@@ '):
+                    win.addstr(y, 0, line, curses.color_pair(COLOR_DIFF_OFFSET))
                 else:
                     win.addstr(y, 0, line)
             else:
@@ -1390,17 +1511,17 @@
         win.noutrefresh()
 
     def renderpatch(win, state):
-        start = state['modes'][MODE_PATCH]['line_offset']
-        content = state['modes'][MODE_PATCH]['patchcontents']
+        start = state[b'modes'][MODE_PATCH][b'line_offset']
+        content = state[b'modes'][MODE_PATCH][b'patchcontents']
         renderstring(win, state, content[start:], diffcolors=True)
 
     def layout(mode):
         maxy, maxx = stdscr.getmaxyx()
         helplen = len(helplines(mode))
         return {
-            'commit': (12, maxx),
-            'help': (helplen, maxx),
-            'main': (maxy - helplen - 12, maxx),
+            b'commit': (12, maxx),
+            b'help': (helplen, maxx),
+            b'main': (maxy - helplen - 12, maxx),
         }
 
     def drawvertwin(size, y, x):
@@ -1409,20 +1530,16 @@
         return win, y, x
 
     state = {
-        'pos': 0,
-        'rules': rules,
-        'selected': None,
-        'mode': (MODE_INIT, MODE_INIT),
-        'page_height': None,
-        'modes': {
-            MODE_RULES: {
-                'line_offset': 0,
-            },
-            MODE_PATCH: {
-                'line_offset': 0,
-            }
+        b'pos': 0,
+        b'rules': rules,
+        b'selected': None,
+        b'mode': (MODE_INIT, MODE_INIT),
+        b'page_height': None,
+        b'modes': {
+            MODE_RULES: {b'line_offset': 0,},
+            MODE_PATCH: {b'line_offset': 0,},
         },
-        'repo': repo,
+        b'repo': repo,
     }
 
     # eventloop
@@ -1431,7 +1548,7 @@
     stdscr.refresh()
     while True:
         try:
-            oldmode, _ = state['mode']
+            oldmode, _ = state[b'mode']
             if oldmode == MODE_INIT:
                 changemode(state, MODE_RULES)
             e = event(state, ch)
@@ -1439,36 +1556,36 @@
             if e == E_QUIT:
                 return False
             if e == E_HISTEDIT:
-                return state['rules']
+                return state[b'rules']
             else:
                 if e == E_RESIZE:
                     size = screen_size()
                     if size != stdscr.getmaxyx():
                         curses.resizeterm(*size)
 
-                curmode, _ = state['mode']
+                curmode, _ = state[b'mode']
                 sizes = layout(curmode)
                 if curmode != oldmode:
-                    state['page_height'] = sizes['main'][0]
+                    state[b'page_height'] = sizes[b'main'][0]
                     # Adjust the view to fit the current screen size.
-                    movecursor(state, state['pos'], state['pos'])
+                    movecursor(state, state[b'pos'], state[b'pos'])
 
                 # Pack the windows against the top, each pane spread across the
                 # full width of the screen.
                 y, x = (0, 0)
-                helpwin, y, x = drawvertwin(sizes['help'], y, x)
-                mainwin, y, x = drawvertwin(sizes['main'], y, x)
-                commitwin, y, x = drawvertwin(sizes['commit'], y, x)
+                helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
+                mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
+                commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
 
                 if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
                     if e == E_PAGEDOWN:
-                        changeview(state, +1, 'page')
+                        changeview(state, +1, b'page')
                     elif e == E_PAGEUP:
-                        changeview(state, -1, 'page')
+                        changeview(state, -1, b'page')
                     elif e == E_LINEDOWN:
-                        changeview(state, +1, 'line')
+                        changeview(state, +1, b'line')
                     elif e == E_LINEUP:
-                        changeview(state, -1, 'line')
+                        changeview(state, -1, b'line')
 
                 # start rendering
                 commitwin.erase()
@@ -1488,6 +1605,7 @@
         except curses.error:
             pass
 
+
 def _chistedit(ui, repo, *freeargs, **opts):
     """interactively edit changeset history via a curses interface
 
@@ -1495,20 +1613,24 @@
     to see an extensive help. Requires python-curses to be installed."""
 
     if curses is None:
-        raise error.Abort(_("Python curses library required"))
+        raise error.Abort(_(b"Python curses library required"))
 
     # disable color
     ui._colormode = None
 
     try:
-        keep = opts.get('keep')
-        revs = opts.get('rev', [])[:]
+        keep = opts.get(b'keep')
+        revs = opts.get(b'rev', [])[:]
         cmdutil.checkunfinished(repo)
         cmdutil.bailifchanged(repo)
 
-        if os.path.exists(os.path.join(repo.path, 'histedit-state')):
-            raise error.Abort(_('history edit already in progress, try '
-                               '--continue or --abort'))
+        if os.path.exists(os.path.join(repo.path, b'histedit-state')):
+            raise error.Abort(
+                _(
+                    b'history edit already in progress, try '
+                    b'--continue or --abort'
+                )
+            )
         revs.extend(freeargs)
         if not revs:
             defaultrev = destutil.desthistedit(ui, repo)
@@ -1516,19 +1638,26 @@
                 revs.append(defaultrev)
         if len(revs) != 1:
             raise error.Abort(
-                _('histedit requires exactly one ancestor revision'))
-
-        rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs)))
+                _(b'histedit requires exactly one ancestor revision')
+            )
+
+        rr = list(repo.set(b'roots(%ld)', scmutil.revrange(repo, revs)))
         if len(rr) != 1:
-            raise error.Abort(_('The specified revisions must have '
-                'exactly one common root'))
+            raise error.Abort(
+                _(
+                    b'The specified revisions must have '
+                    b'exactly one common root'
+                )
+            )
         root = rr[0].node()
 
         topmost = repo.dirstate.p1()
         revs = between(repo, root, topmost, keep)
         if not revs:
-            raise error.Abort(_('%s is not an ancestor of working directory') %
-                             node.short(root))
+            raise error.Abort(
+                _(b'%s is not an ancestor of working directory')
+                % node.short(root)
+            )
 
         ctxs = []
         for i, r in enumerate(revs):
@@ -1541,36 +1670,54 @@
         curses.echo()
         curses.endwin()
         if rc is False:
-            ui.write(_("histedit aborted\n"))
+            ui.write(_(b"histedit aborted\n"))
             return 0
         if type(rc) is list:
-            ui.status(_("performing changes\n"))
+            ui.status(_(b"performing changes\n"))
             rules = makecommands(rc)
-            filename = repo.vfs.join('chistedit')
-            with open(filename, 'w+') as fp:
+            filename = repo.vfs.join(b'chistedit')
+            with open(filename, b'w+') as fp:
                 for r in rules:
                     fp.write(r)
-            opts['commands'] = filename
+            opts[b'commands'] = filename
             return _texthistedit(ui, repo, *freeargs, **opts)
     except KeyboardInterrupt:
         pass
     return -1
 
-@command('histedit',
-    [('', 'commands', '',
-      _('read history edits from the specified file'), _('FILE')),
-     ('c', 'continue', False, _('continue an edit already in progress')),
-     ('', 'edit-plan', False, _('edit remaining actions list')),
-     ('k', 'keep', False,
-      _("don't strip old nodes after edit is complete")),
-     ('', 'abort', False, _('abort an edit in progress')),
-     ('o', 'outgoing', False, _('changesets not found in destination')),
-     ('f', 'force', False,
-      _('force outgoing even for unrelated repositories')),
-     ('r', 'rev', [], _('first revision to be edited'), _('REV'))] +
-    cmdutil.formatteropts,
-     _("[OPTIONS] ([ANCESTOR] | --outgoing [URL])"),
-    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
+
+@command(
+    b'histedit',
+    [
+        (
+            b'',
+            b'commands',
+            b'',
+            _(b'read history edits from the specified file'),
+            _(b'FILE'),
+        ),
+        (b'c', b'continue', False, _(b'continue an edit already in progress')),
+        (b'', b'edit-plan', False, _(b'edit remaining actions list')),
+        (
+            b'k',
+            b'keep',
+            False,
+            _(b"don't strip old nodes after edit is complete"),
+        ),
+        (b'', b'abort', False, _(b'abort an edit in progress')),
+        (b'o', b'outgoing', False, _(b'changesets not found in destination')),
+        (
+            b'f',
+            b'force',
+            False,
+            _(b'force outgoing even for unrelated repositories'),
+        ),
+        (b'r', b'rev', [], _(b'first revision to be edited'), _(b'REV')),
+    ]
+    + cmdutil.formatteropts,
+    _(b"[OPTIONS] ([ANCESTOR] | --outgoing [URL])"),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
+)
 def histedit(ui, repo, *freeargs, **opts):
     """interactively edit changeset history
 
@@ -1673,11 +1820,14 @@
     # kludge: _chistedit only works for starting an edit, not aborting
     # or continuing, so fall back to regular _texthistedit for those
     # operations.
-    if ui.interface('histedit') == 'curses' and  _getgoal(
-            pycompat.byteskwargs(opts)) == goalnew:
+    if (
+        ui.interface(b'histedit') == b'curses'
+        and _getgoal(pycompat.byteskwargs(opts)) == goalnew
+    ):
         return _chistedit(ui, repo, *freeargs, **opts)
     return _texthistedit(ui, repo, *freeargs, **opts)
 
+
 def _texthistedit(ui, repo, *freeargs, **opts):
     state = histeditstate(repo)
     with repo.wlock() as wlock, repo.lock() as lock:
@@ -1685,10 +1835,12 @@
         state.lock = lock
         _histedit(ui, repo, state, *freeargs, **opts)
 
-goalcontinue = 'continue'
-goalabort = 'abort'
-goaleditplan = 'edit-plan'
-goalnew = 'new'
+
+goalcontinue = b'continue'
+goalabort = b'abort'
+goaleditplan = b'edit-plan'
+goalnew = b'new'
+
 
 def _getgoal(opts):
     if opts.get(b'continue'):
@@ -1699,48 +1851,56 @@
         return goaleditplan
     return goalnew
 
+
 def _readfile(ui, path):
-    if path == '-':
-        with ui.timeblockedsection('histedit'):
+    if path == b'-':
+        with ui.timeblockedsection(b'histedit'):
             return ui.fin.read()
     else:
-        with open(path, 'rb') as f:
+        with open(path, b'rb') as f:
             return f.read()
 
+
 def _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs):
     # TODO only abort if we try to histedit mq patches, not just
     # blanket if mq patches are applied somewhere
     mq = getattr(repo, 'mq', None)
     if mq and mq.applied:
-        raise error.Abort(_('source has mq patches applied'))
+        raise error.Abort(_(b'source has mq patches applied'))
 
     # basic argument incompatibility processing
-    outg = opts.get('outgoing')
-    editplan = opts.get('edit_plan')
-    abort = opts.get('abort')
-    force = opts.get('force')
+    outg = opts.get(b'outgoing')
+    editplan = opts.get(b'edit_plan')
+    abort = opts.get(b'abort')
+    force = opts.get(b'force')
     if force and not outg:
-        raise error.Abort(_('--force only allowed with --outgoing'))
-    if goal == 'continue':
+        raise error.Abort(_(b'--force only allowed with --outgoing'))
+    if goal == b'continue':
         if any((outg, abort, revs, freeargs, rules, editplan)):
-            raise error.Abort(_('no arguments allowed with --continue'))
-    elif goal == 'abort':
+            raise error.Abort(_(b'no arguments allowed with --continue'))
+    elif goal == b'abort':
         if any((outg, revs, freeargs, rules, editplan)):
-            raise error.Abort(_('no arguments allowed with --abort'))
-    elif goal == 'edit-plan':
+            raise error.Abort(_(b'no arguments allowed with --abort'))
+    elif goal == b'edit-plan':
         if any((outg, revs, freeargs)):
-            raise error.Abort(_('only --commands argument allowed with '
-                               '--edit-plan'))
+            raise error.Abort(
+                _(b'only --commands argument allowed with --edit-plan')
+            )
     else:
         if state.inprogress():
-            raise error.Abort(_('history edit already in progress, try '
-                               '--continue or --abort'))
+            raise error.Abort(
+                _(
+                    b'history edit already in progress, try '
+                    b'--continue or --abort'
+                )
+            )
         if outg:
             if revs:
-                raise error.Abort(_('no revisions allowed with --outgoing'))
+                raise error.Abort(_(b'no revisions allowed with --outgoing'))
             if len(freeargs) > 1:
                 raise error.Abort(
-                    _('only one repo argument allowed with --outgoing'))
+                    _(b'only one repo argument allowed with --outgoing')
+                )
         else:
             revs.extend(freeargs)
             if len(revs) == 0:
@@ -1750,17 +1910,19 @@
 
             if len(revs) != 1:
                 raise error.Abort(
-                    _('histedit requires exactly one ancestor revision'))
+                    _(b'histedit requires exactly one ancestor revision')
+                )
+
 
 def _histedit(ui, repo, state, *freeargs, **opts):
     opts = pycompat.byteskwargs(opts)
-    fm = ui.formatter('histedit', opts)
+    fm = ui.formatter(b'histedit', opts)
     fm.startitem()
     goal = _getgoal(opts)
-    revs = opts.get('rev', [])
-    nobackup = not ui.configbool('rewrite', 'backup-bundle')
-    rules = opts.get('commands', '')
-    state.keep = opts.get('keep', False)
+    revs = opts.get(b'rev', [])
+    nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
+    rules = opts.get(b'commands', b'')
+    state.keep = opts.get(b'keep', False)
 
     _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs)
 
@@ -1769,15 +1931,19 @@
         revs = scmutil.revrange(repo, revs)
         ctxs = [repo[rev] for rev in revs]
         for ctx in ctxs:
-            tags = [tag for tag in ctx.tags() if tag != 'tip']
+            tags = [tag for tag in ctx.tags() if tag != b'tip']
             if not hastags:
                 hastags = len(tags)
     if hastags:
-        if ui.promptchoice(_('warning: tags associated with the given'
-                             ' changeset will be lost after histedit.\n'
-                             'do you want to continue (yN)? $$ &Yes $$ &No'),
-                           default=1):
-            raise error.Abort(_('histedit cancelled\n'))
+        if ui.promptchoice(
+            _(
+                b'warning: tags associated with the given'
+                b' changeset will be lost after histedit.\n'
+                b'do you want to continue (yN)? $$ &Yes $$ &No'
+            ),
+            default=1,
+        ):
+            raise error.Abort(_(b'histedit cancelled\n'))
     # rebuild state
     if goal == goalcontinue:
         state.read()
@@ -1796,6 +1962,7 @@
     _finishhistedit(ui, repo, state, fm)
     fm.end()
 
+
 def _continuehistedit(ui, repo, state):
     """This function runs after either:
     - bootstrapcontinue (if the goal is 'continue')
@@ -1804,9 +1971,8 @@
     # preprocess rules so that we can hide inner folds from the user
     # and only show one editor
     actions = state.actions[:]
-    for idx, (action, nextact) in enumerate(
-            zip(actions, actions[1:] + [None])):
-        if action.verb == 'fold' and nextact and nextact.verb == 'fold':
+    for idx, (action, nextact) in enumerate(zip(actions, actions[1:] + [None])):
+        if action.verb == b'fold' and nextact and nextact.verb == b'fold':
             state.actions[idx].__class__ = _multifold
 
     # Force an initial state file write, so the user can run --abort/continue
@@ -1817,20 +1983,22 @@
     # Don't use singletransaction by default since it rolls the entire
     # transaction back if an unexpected exception happens (like a
     # pretxncommit hook throws, or the user aborts the commit msg editor).
-    if ui.configbool("histedit", "singletransaction"):
+    if ui.configbool(b"histedit", b"singletransaction"):
         # Don't use a 'with' for the transaction, since actions may close
         # and reopen a transaction. For example, if the action executes an
         # external process it may choose to commit the transaction first.
-        tr = repo.transaction('histedit')
-    progress = ui.makeprogress(_("editing"), unit=_('changes'),
-                               total=len(state.actions))
+        tr = repo.transaction(b'histedit')
+    progress = ui.makeprogress(
+        _(b"editing"), unit=_(b'changes'), total=len(state.actions)
+    )
     with progress, util.acceptintervention(tr):
         while state.actions:
             state.write(tr=tr)
             actobj = state.actions[0]
             progress.increment(item=actobj.torule())
-            ui.debug('histedit: processing %s %s\n' % (actobj.verb,
-                                                       actobj.torule()))
+            ui.debug(
+                b'histedit: processing %s %s\n' % (actobj.verb, actobj.torule())
+            )
             parentctx, replacement_ = actobj.run()
             state.parentctxnode = parentctx.node()
             state.replacements.extend(replacement_)
@@ -1838,20 +2006,23 @@
 
     state.write()
 
+
 def _finishhistedit(ui, repo, state, fm):
     """This action runs when histedit is finishing its session"""
     hg.updaterepo(repo, state.parentctxnode, overwrite=False)
 
     mapping, tmpnodes, created, ntm = processreplacement(state)
     if mapping:
-        for prec, succs in mapping.iteritems():
+        for prec, succs in pycompat.iteritems(mapping):
             if not succs:
-                ui.debug('histedit: %s is dropped\n' % node.short(prec))
+                ui.debug(b'histedit: %s is dropped\n' % node.short(prec))
             else:
-                ui.debug('histedit: %s is replaced by %s\n' % (
-                    node.short(prec), node.short(succs[0])))
+                ui.debug(
+                    b'histedit: %s is replaced by %s\n'
+                    % (node.short(prec), node.short(succs[0]))
+                )
                 if len(succs) > 1:
-                    m = 'histedit:                            %s'
+                    m = b'histedit:                            %s'
                     for n in succs[1:]:
                         ui.debug(m % node.short(n))
 
@@ -1868,84 +2039,106 @@
 
     # remove entries about unknown nodes
     nodemap = repo.unfiltered().changelog.nodemap
-    mapping = {k: v for k, v in mapping.items()
-               if k in nodemap and all(n in nodemap for n in v)}
-    scmutil.cleanupnodes(repo, mapping, 'histedit')
+    mapping = {
+        k: v
+        for k, v in mapping.items()
+        if k in nodemap and all(n in nodemap for n in v)
+    }
+    scmutil.cleanupnodes(repo, mapping, b'histedit')
     hf = fm.hexfunc
     fl = fm.formatlist
     fd = fm.formatdict
-    nodechanges = fd({hf(oldn): fl([hf(n) for n in newn], name='node')
-                      for oldn, newn in mapping.iteritems()},
-                     key="oldnode", value="newnodes")
+    nodechanges = fd(
+        {
+            hf(oldn): fl([hf(n) for n in newn], name=b'node')
+            for oldn, newn in pycompat.iteritems(mapping)
+        },
+        key=b"oldnode",
+        value=b"newnodes",
+    )
     fm.data(nodechanges=nodechanges)
 
     state.clear()
-    if os.path.exists(repo.sjoin('undo')):
-        os.unlink(repo.sjoin('undo'))
-    if repo.vfs.exists('histedit-last-edit.txt'):
-        repo.vfs.unlink('histedit-last-edit.txt')
+    if os.path.exists(repo.sjoin(b'undo')):
+        os.unlink(repo.sjoin(b'undo'))
+    if repo.vfs.exists(b'histedit-last-edit.txt'):
+        repo.vfs.unlink(b'histedit-last-edit.txt')
+
 
 def _aborthistedit(ui, repo, state, nobackup=False):
     try:
         state.read()
         __, leafs, tmpnodes, __ = processreplacement(state)
-        ui.debug('restore wc to old parent %s\n'
-                % node.short(state.topmost))
+        ui.debug(b'restore wc to old parent %s\n' % node.short(state.topmost))
 
         # Recover our old commits if necessary
         if not state.topmost in repo and state.backupfile:
             backupfile = repo.vfs.join(state.backupfile)
             f = hg.openpath(ui, backupfile)
             gen = exchange.readbundle(ui, f, backupfile)
-            with repo.transaction('histedit.abort') as tr:
-                bundle2.applybundle(repo, gen, tr, source='histedit',
-                                    url='bundle:' + backupfile)
+            with repo.transaction(b'histedit.abort') as tr:
+                bundle2.applybundle(
+                    repo,
+                    gen,
+                    tr,
+                    source=b'histedit',
+                    url=b'bundle:' + backupfile,
+                )
 
             os.remove(backupfile)
 
         # check whether we should update away
-        if repo.unfiltered().revs('parents() and (%n  or %ln::)',
-                                state.parentctxnode, leafs | tmpnodes):
+        if repo.unfiltered().revs(
+            b'parents() and (%n  or %ln::)',
+            state.parentctxnode,
+            leafs | tmpnodes,
+        ):
             hg.clean(repo, state.topmost, show_stats=True, quietempty=True)
         cleanupnode(ui, repo, tmpnodes, nobackup=nobackup)
         cleanupnode(ui, repo, leafs, nobackup=nobackup)
     except Exception:
         if state.inprogress():
-            ui.warn(_('warning: encountered an exception during histedit '
-                '--abort; the repository may not have been completely '
-                'cleaned up\n'))
+            ui.warn(
+                _(
+                    b'warning: encountered an exception during histedit '
+                    b'--abort; the repository may not have been completely '
+                    b'cleaned up\n'
+                )
+            )
         raise
     finally:
-            state.clear()
+        state.clear()
+
 
 def hgaborthistedit(ui, repo):
     state = histeditstate(repo)
-    nobackup = not ui.configbool('rewrite', 'backup-bundle')
+    nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
     with repo.wlock() as wlock, repo.lock() as lock:
         state.wlock = wlock
         state.lock = lock
         _aborthistedit(ui, repo, state, nobackup=nobackup)
 
+
 def _edithisteditplan(ui, repo, state, rules):
     state.read()
     if not rules:
-        comment = geteditcomment(ui,
-                                 node.short(state.parentctxnode),
-                                 node.short(state.topmost))
+        comment = geteditcomment(
+            ui, node.short(state.parentctxnode), node.short(state.topmost)
+        )
         rules = ruleeditor(repo, ui, state.actions, comment)
     else:
         rules = _readfile(ui, rules)
     actions = parserules(rules, state)
-    ctxs = [repo[act.node]
-            for act in state.actions if act.node]
+    ctxs = [repo[act.node] for act in state.actions if act.node]
     warnverifyactions(ui, repo, actions, state, ctxs)
     state.actions = actions
     state.write()
 
+
 def _newhistedit(ui, repo, state, revs, freeargs, opts):
-    outg = opts.get('outgoing')
-    rules = opts.get('commands', '')
-    force = opts.get('force')
+    outg = opts.get(b'outgoing')
+    rules = opts.get(b'commands', b'')
+    force = opts.get(b'force')
 
     cmdutil.checkunfinished(repo)
     cmdutil.bailifchanged(repo)
@@ -1958,18 +2151,55 @@
             remote = None
         root = findoutgoing(ui, repo, remote, force, opts)
     else:
-        rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs)))
+        rr = list(repo.set(b'roots(%ld)', scmutil.revrange(repo, revs)))
         if len(rr) != 1:
-            raise error.Abort(_('The specified revisions must have '
-                'exactly one common root'))
+            raise error.Abort(
+                _(
+                    b'The specified revisions must have '
+                    b'exactly one common root'
+                )
+            )
         root = rr[0].node()
 
     revs = between(repo, root, topmost, state.keep)
     if not revs:
-        raise error.Abort(_('%s is not an ancestor of working directory') %
-                         node.short(root))
+        raise error.Abort(
+            _(b'%s is not an ancestor of working directory') % node.short(root)
+        )
 
     ctxs = [repo[r] for r in revs]
+
+    wctx = repo[None]
+    # Please don't ask me why `ancestors` is this value. I figured it
+    # out with print-debugging, not by actually understanding what the
+    # merge code is doing. :(
+    ancs = [repo[b'.']]
+    # Sniff-test to make sure we won't collide with untracked files in
+    # the working directory. If we don't do this, we can get a
+    # collision after we've started histedit and backing out gets ugly
+    # for everyone, especially the user.
+    for c in [ctxs[0].p1()] + ctxs:
+        try:
+            mergemod.calculateupdates(
+                repo,
+                wctx,
+                c,
+                ancs,
+                # These parameters were determined by print-debugging
+                # what happens later on inside histedit.
+                branchmerge=False,
+                force=False,
+                acceptremote=False,
+                followcopies=False,
+            )
+        except error.Abort:
+            raise error.Abort(
+                _(
+                    b"untracked files in working directory conflict with files in %s"
+                )
+                % c
+            )
+
     if not rules:
         comment = geteditcomment(ui, node.short(root), node.short(topmost))
         actions = [pick(state, r) for r in revs]
@@ -1986,24 +2216,31 @@
     state.topmost = topmost
     state.replacements = []
 
-    ui.log("histedit", "%d actions to histedit\n", len(actions),
-           histedit_num_actions=len(actions))
+    ui.log(
+        b"histedit",
+        b"%d actions to histedit\n",
+        len(actions),
+        histedit_num_actions=len(actions),
+    )
 
     # Create a backup so we can always abort completely.
     backupfile = None
     if not obsolete.isenabled(repo, obsolete.createmarkersopt):
-        backupfile = repair.backupbundle(repo, [parentctxnode],
-                                         [topmost], root, 'histedit')
+        backupfile = repair.backupbundle(
+            repo, [parentctxnode], [topmost], root, b'histedit'
+        )
     state.backupfile = backupfile
 
+
 def _getsummary(ctx):
     # a common pattern is to extract the summary but default to the empty
     # string
-    summary = ctx.description() or ''
+    summary = ctx.description() or b''
     if summary:
         summary = summary.splitlines()[0]
     return summary
 
+
 def bootstrapcontinue(ui, state, opts):
     repo = state.repo
 
@@ -2025,47 +2262,56 @@
 
     return state
 
+
 def between(repo, old, new, keep):
     """select and validate the set of revision to edit
 
     When keep is false, the specified set can't have children."""
-    revs = repo.revs('%n::%n', old, new)
+    revs = repo.revs(b'%n::%n', old, new)
     if revs and not keep:
-        if (not obsolete.isenabled(repo, obsolete.allowunstableopt) and
-            repo.revs('(%ld::) - (%ld)', revs, revs)):
-            raise error.Abort(_('can only histedit a changeset together '
-                                'with all its descendants'))
-        if repo.revs('(%ld) and merge()', revs):
-            raise error.Abort(_('cannot edit history that contains merges'))
+        if not obsolete.isenabled(
+            repo, obsolete.allowunstableopt
+        ) and repo.revs(b'(%ld::) - (%ld)', revs, revs):
+            raise error.Abort(
+                _(
+                    b'can only histedit a changeset together '
+                    b'with all its descendants'
+                )
+            )
+        if repo.revs(b'(%ld) and merge()', revs):
+            raise error.Abort(_(b'cannot edit history that contains merges'))
         root = repo[revs.first()]  # list is already sorted by repo.revs()
         if not root.mutable():
-            raise error.Abort(_('cannot edit public changeset: %s') % root,
-                             hint=_("see 'hg help phases' for details"))
+            raise error.Abort(
+                _(b'cannot edit public changeset: %s') % root,
+                hint=_(b"see 'hg help phases' for details"),
+            )
     return pycompat.maplist(repo.changelog.node, revs)
 
-def ruleeditor(repo, ui, actions, editcomment=""):
+
+def ruleeditor(repo, ui, actions, editcomment=b""):
     """open an editor to edit rules
 
     rules are in the format [ [act, ctx], ...] like in state.rules
     """
-    if repo.ui.configbool("experimental", "histedit.autoverb"):
+    if repo.ui.configbool(b"experimental", b"histedit.autoverb"):
         newact = util.sortdict()
         for act in actions:
             ctx = repo[act.node]
             summary = _getsummary(ctx)
-            fword = summary.split(' ', 1)[0].lower()
+            fword = summary.split(b' ', 1)[0].lower()
             added = False
 
             # if it doesn't end with the special character '!' just skip this
-            if fword.endswith('!'):
+            if fword.endswith(b'!'):
                 fword = fword[:-1]
                 if fword in primaryactions | secondaryactions | tertiaryactions:
                     act.verb = fword
                     # get the target summary
-                    tsum = summary[len(fword) + 1:].lstrip()
+                    tsum = summary[len(fword) + 1 :].lstrip()
                     # safe but slow: reverse iterate over the actions so we
                     # don't clash on two commits having the same summary
-                    for na, l in reversed(list(newact.iteritems())):
+                    for na, l in reversed(list(pycompat.iteritems(newact))):
                         actx = repo[na.node]
                         asum = _getsummary(actx)
                         if asum == tsum:
@@ -2078,50 +2324,65 @@
 
         # copy over and flatten the new list
         actions = []
-        for na, l in newact.iteritems():
+        for na, l in pycompat.iteritems(newact):
             actions.append(na)
             actions += l
 
-    rules = '\n'.join([act.torule() for act in actions])
-    rules += '\n\n'
+    rules = b'\n'.join([act.torule() for act in actions])
+    rules += b'\n\n'
     rules += editcomment
-    rules = ui.edit(rules, ui.username(), {'prefix': 'histedit'},
-                    repopath=repo.path, action='histedit')
+    rules = ui.edit(
+        rules,
+        ui.username(),
+        {b'prefix': b'histedit'},
+        repopath=repo.path,
+        action=b'histedit',
+    )
 
     # Save edit rules in .hg/histedit-last-edit.txt in case
     # the user needs to ask for help after something
     # surprising happens.
-    with repo.vfs('histedit-last-edit.txt', 'wb') as f:
+    with repo.vfs(b'histedit-last-edit.txt', b'wb') as f:
         f.write(rules)
 
     return rules
 
+
 def parserules(rules, state):
     """Read the histedit rules string and return list of action objects """
-    rules = [l for l in (r.strip() for r in rules.splitlines())
-                if l and not l.startswith('#')]
+    rules = [
+        l
+        for l in (r.strip() for r in rules.splitlines())
+        if l and not l.startswith(b'#')
+    ]
     actions = []
     for r in rules:
-        if ' ' not in r:
-            raise error.ParseError(_('malformed line "%s"') % r)
-        verb, rest = r.split(' ', 1)
+        if b' ' not in r:
+            raise error.ParseError(_(b'malformed line "%s"') % r)
+        verb, rest = r.split(b' ', 1)
 
         if verb not in actiontable:
-            raise error.ParseError(_('unknown action "%s"') % verb)
+            raise error.ParseError(_(b'unknown action "%s"') % verb)
 
         action = actiontable[verb].fromrule(state, rest)
         actions.append(action)
     return actions
 
+
 def warnverifyactions(ui, repo, actions, state, ctxs):
     try:
         verifyactions(actions, state, ctxs)
     except error.ParseError:
-        if repo.vfs.exists('histedit-last-edit.txt'):
-            ui.warn(_('warning: histedit rules saved '
-                      'to: .hg/histedit-last-edit.txt\n'))
+        if repo.vfs.exists(b'histedit-last-edit.txt'):
+            ui.warn(
+                _(
+                    b'warning: histedit rules saved '
+                    b'to: .hg/histedit-last-edit.txt\n'
+                )
+            )
         raise
 
+
 def verifyactions(actions, state, ctxs):
     """Verify that there exists exactly one action per given changeset and
     other constraints.
@@ -2133,9 +2394,10 @@
     seen = set()
     prev = None
 
-    if actions and actions[0].verb in ['roll', 'fold']:
-        raise error.ParseError(_('first changeset cannot use verb "%s"') %
-                               actions[0].verb)
+    if actions and actions[0].verb in [b'roll', b'fold']:
+        raise error.ParseError(
+            _(b'first changeset cannot use verb "%s"') % actions[0].verb
+        )
 
     for action in actions:
         action.verify(prev, expected, seen)
@@ -2144,21 +2406,27 @@
             seen.add(action.node)
     missing = sorted(expected - seen)  # sort to stabilize output
 
-    if state.repo.ui.configbool('histedit', 'dropmissing'):
+    if state.repo.ui.configbool(b'histedit', b'dropmissing'):
         if len(actions) == 0:
-            raise error.ParseError(_('no rules provided'),
-                    hint=_('use strip extension to remove commits'))
+            raise error.ParseError(
+                _(b'no rules provided'),
+                hint=_(b'use strip extension to remove commits'),
+            )
 
         drops = [drop(state, n) for n in missing]
         # put the in the beginning so they execute immediately and
         # don't show in the edit-plan in the future
         actions[:0] = drops
     elif missing:
-        raise error.ParseError(_('missing rules for changeset %s') %
-                node.short(missing[0]),
-                hint=_('use "drop %s" to discard, see also: '
-                       "'hg help -e histedit.config'")
-                       % node.short(missing[0]))
+        raise error.ParseError(
+            _(b'missing rules for changeset %s') % node.short(missing[0]),
+            hint=_(
+                b'use "drop %s" to discard, see also: '
+                b"'hg help -e histedit.config'"
+            )
+            % node.short(missing[0]),
+        )
+
 
 def adjustreplacementsfrommarkers(repo, oldreplacements):
     """Adjust replacements from obsolescence markers
@@ -2176,7 +2444,9 @@
     newreplacements = list(oldreplacements)
     oldsuccs = [r[1] for r in oldreplacements]
     # successors that have already been added to succstocheck once
-    seensuccs = set().union(*oldsuccs) # create a set from an iterable of tuples
+    seensuccs = set().union(
+        *oldsuccs
+    )  # create a set from an iterable of tuples
     succstocheck = list(seensuccs)
     while succstocheck:
         n = succstocheck.pop()
@@ -2195,6 +2465,7 @@
 
     return newreplacements
 
+
 def processreplacement(state):
     """process the list of replacements to return
 
@@ -2256,6 +2527,7 @@
 
     return final, tmpnodes, new, newtopmost
 
+
 def movetopmostbookmarks(repo, oldtopmost, newtopmost):
     """Move bookmark from oldtopmost to newly created topmost
 
@@ -2266,13 +2538,14 @@
         return
     oldbmarks = repo.nodebookmarks(oldtopmost)
     if oldbmarks:
-        with repo.lock(), repo.transaction('histedit') as tr:
+        with repo.lock(), repo.transaction(b'histedit') as tr:
             marks = repo._bookmarks
             changes = []
             for name in oldbmarks:
                 changes.append((name, newtopmost))
             marks.applychanges(repo, tr, changes)
 
+
 def cleanupnode(ui, repo, nodes, nobackup=False):
     """strip a group of nodes from the repository
 
@@ -2286,26 +2559,32 @@
         # (we use %lr instead of %ln to silently ignore unknown items)
         nm = repo.changelog.nodemap
         nodes = sorted(n for n in nodes if n in nm)
-        roots = [c.node() for c in repo.set("roots(%ln)", nodes)]
+        roots = [c.node() for c in repo.set(b"roots(%ln)", nodes)]
         if roots:
             backup = not nobackup
             repair.strip(ui, repo, roots, backup=backup)
 
+
 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
     if isinstance(nodelist, str):
         nodelist = [nodelist]
     state = histeditstate(repo)
     if state.inprogress():
         state.read()
-        histedit_nodes = {action.node for action
-                          in state.actions if action.node}
+        histedit_nodes = {
+            action.node for action in state.actions if action.node
+        }
         common_nodes = histedit_nodes & set(nodelist)
         if common_nodes:
-            raise error.Abort(_("histedit in progress, can't strip %s")
-                             % ', '.join(node.short(x) for x in common_nodes))
+            raise error.Abort(
+                _(b"histedit in progress, can't strip %s")
+                % b', '.join(node.short(x) for x in common_nodes)
+            )
     return orig(ui, repo, nodelist, *args, **kwargs)
 
-extensions.wrapfunction(repair, 'strip', stripwrapper)
+
+extensions.wrapfunction(repair, b'strip', stripwrapper)
+
 
 def summaryhook(ui, repo):
     state = histeditstate(repo)
@@ -2314,11 +2593,21 @@
     state.read()
     if state.actions:
         # i18n: column positioning for "hg summary"
-        ui.write(_('hist:   %s (histedit --continue)\n') %
-                 (ui.label(_('%d remaining'), 'histedit.remaining') %
-                  len(state.actions)))
+        ui.write(
+            _(b'hist:   %s (histedit --continue)\n')
+            % (
+                ui.label(_(b'%d remaining'), b'histedit.remaining')
+                % len(state.actions)
+            )
+        )
+
 
 def extsetup(ui):
-    cmdutil.summaryhooks.add('histedit', summaryhook)
-    statemod.addunfinished('histedit', fname='histedit-state', allowcommit=True,
-                            continueflag=True, abortfunc=hgaborthistedit)
+    cmdutil.summaryhooks.add(b'histedit', summaryhook)
+    statemod.addunfinished(
+        b'histedit',
+        fname=b'histedit-state',
+        allowcommit=True,
+        continueflag=True,
+        abortfunc=hgaborthistedit,
+    )
--- a/hgext/infinitepush/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/infinitepush/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -6,6 +6,11 @@
 # GNU General Public License version 2 or any later version.
 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
 
+IMPORTANT: if you use this extension, please contact
+mercurial-devel@mercurial-scm.org ASAP. This extension is believed to
+be unused and barring learning of users of this functionality, we will
+delete this code at the end of 2020.
+
     [infinitepush]
     # Server-side and client-side option. Pattern of the infinitepush bookmark
     branchpattern = PATTERN
@@ -103,6 +108,11 @@
 
 from mercurial.i18n import _
 
+from mercurial.pycompat import (
+    getattr,
+    open,
+)
+
 from mercurial.utils import (
     procutil,
     stringutil,
@@ -138,48 +148,48 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('infinitepush', 'server',
-    default=False,
+configitem(
+    b'infinitepush', b'server', default=False,
 )
-configitem('infinitepush', 'storetype',
-    default='',
+configitem(
+    b'infinitepush', b'storetype', default=b'',
 )
-configitem('infinitepush', 'indextype',
-    default='',
+configitem(
+    b'infinitepush', b'indextype', default=b'',
 )
-configitem('infinitepush', 'indexpath',
-    default='',
+configitem(
+    b'infinitepush', b'indexpath', default=b'',
 )
-configitem('infinitepush', 'storeallparts',
-    default=False,
+configitem(
+    b'infinitepush', b'storeallparts', default=False,
 )
-configitem('infinitepush', 'reponame',
-    default='',
+configitem(
+    b'infinitepush', b'reponame', default=b'',
 )
-configitem('scratchbranch', 'storepath',
-    default='',
+configitem(
+    b'scratchbranch', b'storepath', default=b'',
 )
-configitem('infinitepush', 'branchpattern',
-    default='',
+configitem(
+    b'infinitepush', b'branchpattern', default=b'',
 )
-configitem('infinitepush', 'pushtobundlestore',
-    default=False,
+configitem(
+    b'infinitepush', b'pushtobundlestore', default=False,
 )
-configitem('experimental', 'server-bundlestore-bookmark',
-    default='',
+configitem(
+    b'experimental', b'server-bundlestore-bookmark', default=b'',
 )
-configitem('experimental', 'infinitepush-scratchpush',
-    default=False,
+configitem(
+    b'experimental', b'infinitepush-scratchpush', default=False,
 )
 
-experimental = 'experimental'
-configbookmark = 'server-bundlestore-bookmark'
-configscratchpush = 'infinitepush-scratchpush'
+experimental = b'experimental'
+configbookmark = b'server-bundlestore-bookmark'
+configscratchpush = b'infinitepush-scratchpush'
 
 scratchbranchparttype = bundleparts.scratchbranchparttype
 revsetpredicate = registrar.revsetpredicate()
@@ -187,43 +197,57 @@
 _scratchbranchmatcher = lambda x: False
 _maybehash = re.compile(r'^[a-f0-9]+$').search
 
+
 def _buildexternalbundlestore(ui):
-    put_args = ui.configlist('infinitepush', 'put_args', [])
-    put_binary = ui.config('infinitepush', 'put_binary')
+    put_args = ui.configlist(b'infinitepush', b'put_args', [])
+    put_binary = ui.config(b'infinitepush', b'put_binary')
     if not put_binary:
-        raise error.Abort('put binary is not specified')
-    get_args = ui.configlist('infinitepush', 'get_args', [])
-    get_binary = ui.config('infinitepush', 'get_binary')
+        raise error.Abort(b'put binary is not specified')
+    get_args = ui.configlist(b'infinitepush', b'get_args', [])
+    get_binary = ui.config(b'infinitepush', b'get_binary')
     if not get_binary:
-        raise error.Abort('get binary is not specified')
+        raise error.Abort(b'get binary is not specified')
     from . import store
+
     return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
 
+
 def _buildsqlindex(ui):
-    sqlhost = ui.config('infinitepush', 'sqlhost')
+    sqlhost = ui.config(b'infinitepush', b'sqlhost')
     if not sqlhost:
-        raise error.Abort(_('please set infinitepush.sqlhost'))
-    host, port, db, user, password = sqlhost.split(':')
-    reponame = ui.config('infinitepush', 'reponame')
+        raise error.Abort(_(b'please set infinitepush.sqlhost'))
+    host, port, db, user, password = sqlhost.split(b':')
+    reponame = ui.config(b'infinitepush', b'reponame')
     if not reponame:
-        raise error.Abort(_('please set infinitepush.reponame'))
+        raise error.Abort(_(b'please set infinitepush.reponame'))
+
+    logfile = ui.config(b'infinitepush', b'logfile', b'')
+    waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
+    locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
+    from . import sqlindexapi
 
-    logfile = ui.config('infinitepush', 'logfile', '')
-    waittimeout = ui.configint('infinitepush', 'waittimeout', 300)
-    locktimeout = ui.configint('infinitepush', 'locktimeout', 120)
-    from . import sqlindexapi
     return sqlindexapi.sqlindexapi(
-        reponame, host, port, db, user, password,
-        logfile, _getloglevel(ui), waittimeout=waittimeout,
-        locktimeout=locktimeout)
+        reponame,
+        host,
+        port,
+        db,
+        user,
+        password,
+        logfile,
+        _getloglevel(ui),
+        waittimeout=waittimeout,
+        locktimeout=locktimeout,
+    )
+
 
 def _getloglevel(ui):
-    loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG')
+    loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
     numeric_loglevel = getattr(logging, loglevel.upper(), None)
     if not isinstance(numeric_loglevel, int):
-        raise error.Abort(_('invalid log level %s') % loglevel)
+        raise error.Abort(_(b'invalid log level %s') % loglevel)
     return numeric_loglevel
 
+
 def _tryhoist(ui, remotebookmark):
     '''returns a bookmarks with hoisted part removed
 
@@ -234,41 +258,49 @@
     '''
 
     if common.isremotebooksenabled(ui):
-        hoist = ui.config('remotenames', 'hoistedpeer') + '/'
+        hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
         if remotebookmark.startswith(hoist):
-            return remotebookmark[len(hoist):]
+            return remotebookmark[len(hoist) :]
     return remotebookmark
 
+
 class bundlestore(object):
     def __init__(self, repo):
         self._repo = repo
-        storetype = self._repo.ui.config('infinitepush', 'storetype')
-        if storetype == 'disk':
+        storetype = self._repo.ui.config(b'infinitepush', b'storetype')
+        if storetype == b'disk':
             from . import store
+
             self.store = store.filebundlestore(self._repo.ui, self._repo)
-        elif storetype == 'external':
+        elif storetype == b'external':
             self.store = _buildexternalbundlestore(self._repo.ui)
         else:
             raise error.Abort(
-                _('unknown infinitepush store type specified %s') % storetype)
+                _(b'unknown infinitepush store type specified %s') % storetype
+            )
 
-        indextype = self._repo.ui.config('infinitepush', 'indextype')
-        if indextype == 'disk':
+        indextype = self._repo.ui.config(b'infinitepush', b'indextype')
+        if indextype == b'disk':
             from . import fileindexapi
+
             self.index = fileindexapi.fileindexapi(self._repo)
-        elif indextype == 'sql':
+        elif indextype == b'sql':
             self.index = _buildsqlindex(self._repo.ui)
         else:
             raise error.Abort(
-                _('unknown infinitepush index type specified %s') % indextype)
+                _(b'unknown infinitepush index type specified %s') % indextype
+            )
+
 
 def _isserver(ui):
-    return ui.configbool('infinitepush', 'server')
+    return ui.configbool(b'infinitepush', b'server')
+
 
 def reposetup(ui, repo):
     if _isserver(ui) and repo.local():
         repo.bundlestore = bundlestore(repo)
 
+
 def extsetup(ui):
     commonsetup(ui)
     if _isserver(ui):
@@ -276,99 +308,123 @@
     else:
         clientextsetup(ui)
 
+
 def commonsetup(ui):
-    wireprotov1server.commands['listkeyspatterns'] = (
-        wireprotolistkeyspatterns, 'namespace patterns')
-    scratchbranchpat = ui.config('infinitepush', 'branchpattern')
+    wireprotov1server.commands[b'listkeyspatterns'] = (
+        wireprotolistkeyspatterns,
+        b'namespace patterns',
+    )
+    scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
     if scratchbranchpat:
         global _scratchbranchmatcher
-        kind, pat, _scratchbranchmatcher = (
-                stringutil.stringmatcher(scratchbranchpat))
+        kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
+            scratchbranchpat
+        )
+
 
 def serverextsetup(ui):
-    origpushkeyhandler = bundle2.parthandlermapping['pushkey']
+    origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
 
     def newpushkeyhandler(*args, **kwargs):
         bundle2pushkey(origpushkeyhandler, *args, **kwargs)
+
     newpushkeyhandler.params = origpushkeyhandler.params
-    bundle2.parthandlermapping['pushkey'] = newpushkeyhandler
+    bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
 
-    orighandlephasehandler = bundle2.parthandlermapping['phase-heads']
+    orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
     newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
-        orighandlephasehandler, *args, **kwargs)
+        orighandlephasehandler, *args, **kwargs
+    )
     newphaseheadshandler.params = orighandlephasehandler.params
-    bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler
+    bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
 
-    extensions.wrapfunction(localrepo.localrepository, 'listkeys',
-                            localrepolistkeys)
-    wireprotov1server.commands['lookup'] = (
-        _lookupwrap(wireprotov1server.commands['lookup'][0]), 'key')
-    extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks)
+    extensions.wrapfunction(
+        localrepo.localrepository, b'listkeys', localrepolistkeys
+    )
+    wireprotov1server.commands[b'lookup'] = (
+        _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
+        b'key',
+    )
+    extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
 
-    extensions.wrapfunction(bundle2, 'processparts', processparts)
+    extensions.wrapfunction(bundle2, b'processparts', processparts)
+
 
 def clientextsetup(ui):
-    entry = extensions.wrapcommand(commands.table, 'push', _push)
+    entry = extensions.wrapcommand(commands.table, b'push', _push)
 
     entry[1].append(
-        ('', 'bundle-store', None,
-         _('force push to go to bundle store (EXPERIMENTAL)')))
+        (
+            b'',
+            b'bundle-store',
+            None,
+            _(b'force push to go to bundle store (EXPERIMENTAL)'),
+        )
+    )
 
-    extensions.wrapcommand(commands.table, 'pull', _pull)
+    extensions.wrapcommand(commands.table, b'pull', _pull)
 
-    extensions.wrapfunction(discovery, 'checkheads', _checkheads)
+    extensions.wrapfunction(discovery, b'checkheads', _checkheads)
 
     wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
 
     partorder = exchange.b2partsgenorder
-    index = partorder.index('changeset')
+    index = partorder.index(b'changeset')
     partorder.insert(
-        index, partorder.pop(partorder.index(scratchbranchparttype)))
+        index, partorder.pop(partorder.index(scratchbranchparttype))
+    )
+
 
 def _checkheads(orig, pushop):
     if pushop.ui.configbool(experimental, configscratchpush, False):
         return
     return orig(pushop)
 
+
 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
     patterns = wireprototypes.decodelist(patterns)
-    d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems()
+    d = pycompat.iteritems(repo.listkeys(encoding.tolocal(namespace), patterns))
     return pushkey.encodekeys(d)
 
+
 def localrepolistkeys(orig, self, namespace, patterns=None):
-    if namespace == 'bookmarks' and patterns:
+    if namespace == b'bookmarks' and patterns:
         index = self.bundlestore.index
         results = {}
         bookmarks = orig(self, namespace)
         for pattern in patterns:
             results.update(index.getbookmarks(pattern))
-            if pattern.endswith('*'):
-                pattern = 're:^' + pattern[:-1] + '.*'
+            if pattern.endswith(b'*'):
+                pattern = b're:^' + pattern[:-1] + b'.*'
             kind, pat, matcher = stringutil.stringmatcher(pattern)
-            for bookmark, node in bookmarks.iteritems():
+            for bookmark, node in pycompat.iteritems(bookmarks):
                 if matcher(bookmark):
                     results[bookmark] = node
         return results
     else:
         return orig(self, namespace)
 
+
 @wireprotov1peer.batchable
 def listkeyspatterns(self, namespace, patterns):
-    if not self.capable('pushkey'):
+    if not self.capable(b'pushkey'):
         yield {}, None
     f = wireprotov1peer.future()
-    self.ui.debug('preparing listkeys for "%s"\n' % namespace)
+    self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
     yield {
-        'namespace': encoding.fromlocal(namespace),
-        'patterns': wireprototypes.encodelist(patterns)
+        b'namespace': encoding.fromlocal(namespace),
+        b'patterns': wireprototypes.encodelist(patterns),
     }, f
     d = f.value
-    self.ui.debug('received listkey for "%s": %i bytes\n'
-                  % (namespace, len(d)))
+    self.ui.debug(
+        b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
+    )
     yield pushkey.decodekeys(d)
 
+
 def _readbundlerevs(bundlerepo):
-    return list(bundlerepo.revs('bundle()'))
+    return list(bundlerepo.revs(b'bundle()'))
+
 
 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
     '''Tells remotefilelog to include all changed files to the changegroup
@@ -384,21 +440,22 @@
     if not changedfiles:
         return bundlecaps
 
-    changedfiles = '\0'.join(changedfiles)
+    changedfiles = b'\0'.join(changedfiles)
     newcaps = []
     appended = False
-    for cap in (bundlecaps or []):
-        if cap.startswith('excludepattern='):
-            newcaps.append('\0'.join((cap, changedfiles)))
+    for cap in bundlecaps or []:
+        if cap.startswith(b'excludepattern='):
+            newcaps.append(b'\0'.join((cap, changedfiles)))
             appended = True
         else:
             newcaps.append(cap)
     if not appended:
         # Not found excludepattern cap. Just append it
-        newcaps.append('excludepattern=' + changedfiles)
+        newcaps.append(b'excludepattern=' + changedfiles)
 
     return newcaps
 
+
 def _rebundle(bundlerepo, bundleroots, unknownhead):
     '''
     Bundle may include more revision then user requested. For example,
@@ -407,17 +464,19 @@
     '''
     parts = []
 
-    version = '02'
-    outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots,
-                                  missingheads=[unknownhead])
-    cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull')
+    version = b'02'
+    outgoing = discovery.outgoing(
+        bundlerepo, commonheads=bundleroots, missingheads=[unknownhead]
+    )
+    cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
     cgstream = util.chunkbuffer(cgstream).read()
-    cgpart = bundle2.bundlepart('changegroup', data=cgstream)
-    cgpart.addparam('version', version)
+    cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
+    cgpart.addparam(b'version', version)
     parts.append(cgpart)
 
     return parts
 
+
 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
     cl = bundlerepo.changelog
     bundleroots = []
@@ -431,10 +490,13 @@
                 bundleroots.append(parent)
     return bundleroots
 
+
 def _needsrebundling(head, bundlerepo):
-    bundleheads = list(bundlerepo.revs('heads(bundle())'))
-    return not (len(bundleheads) == 1 and
-                bundlerepo[bundleheads[0]].node() == head)
+    bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
+    return not (
+        len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
+    )
+
 
 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
     '''generates bundle that will be send to the user
@@ -443,35 +505,38 @@
     '''
     parts = []
     if not _needsrebundling(head, bundlerepo):
-        with util.posixfile(bundlefile, "rb") as f:
+        with util.posixfile(bundlefile, b"rb") as f:
             unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
             if isinstance(unbundler, changegroup.cg1unpacker):
-                part = bundle2.bundlepart('changegroup',
-                                          data=unbundler._stream.read())
-                part.addparam('version', '01')
+                part = bundle2.bundlepart(
+                    b'changegroup', data=unbundler._stream.read()
+                )
+                part.addparam(b'version', b'01')
                 parts.append(part)
             elif isinstance(unbundler, bundle2.unbundle20):
                 haschangegroup = False
                 for part in unbundler.iterparts():
-                    if part.type == 'changegroup':
+                    if part.type == b'changegroup':
                         haschangegroup = True
                     newpart = bundle2.bundlepart(part.type, data=part.read())
-                    for key, value in part.params.iteritems():
+                    for key, value in pycompat.iteritems(part.params):
                         newpart.addparam(key, value)
                     parts.append(newpart)
 
                 if not haschangegroup:
                     raise error.Abort(
-                        'unexpected bundle without changegroup part, ' +
-                        'head: %s' % hex(head),
-                        hint='report to administrator')
+                        b'unexpected bundle without changegroup part, '
+                        + b'head: %s' % hex(head),
+                        hint=b'report to administrator',
+                    )
             else:
-                raise error.Abort('unknown bundle type')
+                raise error.Abort(b'unknown bundle type')
     else:
         parts = _rebundle(bundlerepo, bundleroots, head)
 
     return parts
 
+
 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
     heads = heads or []
     # newheads are parents of roots of scratch bundles that were requested
@@ -486,23 +551,28 @@
             if head not in repo.changelog.nodemap:
                 if head not in nodestobundle:
                     newbundlefile = common.downloadbundle(repo, head)
-                    bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile)
+                    bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
                     bundlerepo = hg.repository(repo.ui, bundlepath)
 
                     allbundlestocleanup.append((bundlerepo, newbundlefile))
                     bundlerevs = set(_readbundlerevs(bundlerepo))
                     bundlecaps = _includefilelogstobundle(
-                        bundlecaps, bundlerepo, bundlerevs, repo.ui)
+                        bundlecaps, bundlerepo, bundlerevs, repo.ui
+                    )
                     cl = bundlerepo.changelog
                     bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
                     for rev in bundlerevs:
                         node = cl.node(rev)
                         newphases[hex(node)] = str(phases.draft)
-                        nodestobundle[node] = (bundlerepo, bundleroots,
-                                               newbundlefile)
+                        nodestobundle[node] = (
+                            bundlerepo,
+                            bundleroots,
+                            newbundlefile,
+                        )
 
                 scratchbundles.append(
-                    _generateoutputparts(head, *nodestobundle[head]))
+                    _generateoutputparts(head, *nodestobundle[head])
+                )
                 newheads.extend(bundleroots)
                 scratchheads.append(head)
     finally:
@@ -518,8 +588,9 @@
     pullfrombundlestore = bool(scratchbundles)
     wrappedchangegrouppart = False
     wrappedlistkeys = False
-    oldchangegrouppart = exchange.getbundle2partsmapping['changegroup']
+    oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
     try:
+
         def _changegrouppart(bundler, *args, **kwargs):
             # Order is important here. First add non-scratch part
             # and only then add parts with scratch bundles because
@@ -530,32 +601,36 @@
                     bundler.addpart(part)
             return result
 
-        exchange.getbundle2partsmapping['changegroup'] = _changegrouppart
+        exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
         wrappedchangegrouppart = True
 
         def _listkeys(orig, self, namespace):
             origvalues = orig(self, namespace)
-            if namespace == 'phases' and pullfrombundlestore:
-                if origvalues.get('publishing') == 'True':
+            if namespace == b'phases' and pullfrombundlestore:
+                if origvalues.get(b'publishing') == b'True':
                     # Make repo non-publishing to preserve draft phase
-                    del origvalues['publishing']
+                    del origvalues[b'publishing']
                 origvalues.update(newphases)
             return origvalues
 
-        extensions.wrapfunction(localrepo.localrepository, 'listkeys',
-                                _listkeys)
+        extensions.wrapfunction(
+            localrepo.localrepository, b'listkeys', _listkeys
+        )
         wrappedlistkeys = True
         heads = list((set(newheads) | set(heads)) - set(scratchheads))
-        result = orig(repo, source, heads=heads,
-                      bundlecaps=bundlecaps, **kwargs)
+        result = orig(
+            repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
+        )
     finally:
         if wrappedchangegrouppart:
-            exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart
+            exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
         if wrappedlistkeys:
-            extensions.unwrapfunction(localrepo.localrepository, 'listkeys',
-                                      _listkeys)
+            extensions.unwrapfunction(
+                localrepo.localrepository, b'listkeys', _listkeys
+            )
     return result
 
+
 def _lookupwrap(orig):
     def _lookup(repo, proto, key):
         localkey = encoding.tolocal(key)
@@ -563,60 +638,68 @@
         if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
             scratchnode = repo.bundlestore.index.getnode(localkey)
             if scratchnode:
-                return "%d %s\n" % (1, scratchnode)
+                return b"%d %s\n" % (1, scratchnode)
             else:
-                return "%d %s\n" % (0, 'scratch branch %s not found' % localkey)
+                return b"%d %s\n" % (
+                    0,
+                    b'scratch branch %s not found' % localkey,
+                )
         else:
             try:
                 r = hex(repo.lookup(localkey))
-                return "%d %s\n" % (1, r)
+                return b"%d %s\n" % (1, r)
             except Exception as inst:
                 if repo.bundlestore.index.getbundle(localkey):
-                    return "%d %s\n" % (1, localkey)
+                    return b"%d %s\n" % (1, localkey)
                 else:
                     r = stringutil.forcebytestr(inst)
-                    return "%d %s\n" % (0, r)
+                    return b"%d %s\n" % (0, r)
+
     return _lookup
 
-def _pull(orig, ui, repo, source="default", **opts):
+
+def _pull(orig, ui, repo, source=b"default", **opts):
     opts = pycompat.byteskwargs(opts)
     # Copy paste from `pull` command
-    source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
+    source, branches = hg.parseurl(ui.expandpath(source), opts.get(b'branch'))
 
     scratchbookmarks = {}
     unfi = repo.unfiltered()
     unknownnodes = []
-    for rev in opts.get('rev', []):
+    for rev in opts.get(b'rev', []):
         if rev not in unfi:
             unknownnodes.append(rev)
-    if opts.get('bookmark'):
+    if opts.get(b'bookmark'):
         bookmarks = []
-        revs = opts.get('rev') or []
-        for bookmark in opts.get('bookmark'):
+        revs = opts.get(b'rev') or []
+        for bookmark in opts.get(b'bookmark'):
             if _scratchbranchmatcher(bookmark):
                 # rev is not known yet
                 # it will be fetched with listkeyspatterns next
-                scratchbookmarks[bookmark] = 'REVTOFETCH'
+                scratchbookmarks[bookmark] = b'REVTOFETCH'
             else:
                 bookmarks.append(bookmark)
 
         if scratchbookmarks:
             other = hg.peer(repo, opts, source)
             fetchedbookmarks = other.listkeyspatterns(
-                'bookmarks', patterns=scratchbookmarks)
+                b'bookmarks', patterns=scratchbookmarks
+            )
             for bookmark in scratchbookmarks:
                 if bookmark not in fetchedbookmarks:
-                    raise error.Abort('remote bookmark %s not found!' %
-                                      bookmark)
+                    raise error.Abort(
+                        b'remote bookmark %s not found!' % bookmark
+                    )
                 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
                 revs.append(fetchedbookmarks[bookmark])
-        opts['bookmark'] = bookmarks
-        opts['rev'] = revs
+        opts[b'bookmark'] = bookmarks
+        opts[b'rev'] = revs
 
     if scratchbookmarks or unknownnodes:
         # Set anyincoming to True
-        extensions.wrapfunction(discovery, 'findcommonincoming',
-                                _findcommonincoming)
+        extensions.wrapfunction(
+            discovery, b'findcommonincoming', _findcommonincoming
+        )
     try:
         # Remote scratch bookmarks will be deleted because remotenames doesn't
         # know about them. Let's save it before pull and restore after
@@ -633,11 +716,12 @@
         return result
     finally:
         if scratchbookmarks:
-            extensions.unwrapfunction(discovery, 'findcommonincoming')
+            extensions.unwrapfunction(discovery, b'findcommonincoming')
+
 
 def _readscratchremotebookmarks(ui, repo, other):
     if common.isremotebooksenabled(ui):
-        remotenamesext = extensions.find('remotenames')
+        remotenamesext = extensions.find(b'remotenames')
         remotepath = remotenamesext.activepath(repo.ui, other)
         result = {}
         # Let's refresh remotenames to make sure we have it up to date
@@ -645,19 +729,21 @@
         # and it results in deleting scratch bookmarks. Our best guess how to
         # fix it is to use `clearnames()`
         repo._remotenames.clearnames()
-        for remotebookmark in repo.names['remotebookmarks'].listnames(repo):
+        for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
             path, bookname = remotenamesext.splitremotename(remotebookmark)
             if path == remotepath and _scratchbranchmatcher(bookname):
-                nodes = repo.names['remotebookmarks'].nodes(repo,
-                                                            remotebookmark)
+                nodes = repo.names[b'remotebookmarks'].nodes(
+                    repo, remotebookmark
+                )
                 if nodes:
                     result[bookname] = hex(nodes[0])
         return result
     else:
         return {}
 
+
 def _saveremotebookmarks(repo, newbookmarks, remote):
-    remotenamesext = extensions.find('remotenames')
+    remotenamesext = extensions.find(b'remotenames')
     remotepath = remotenamesext.activepath(repo.ui, remote)
     branches = collections.defaultdict(list)
     bookmarks = {}
@@ -665,68 +751,73 @@
     for hexnode, nametype, remote, rname in remotenames:
         if remote != remotepath:
             continue
-        if nametype == 'bookmarks':
+        if nametype == b'bookmarks':
             if rname in newbookmarks:
                 # It's possible if we have a normal bookmark that matches
                 # scratch branch pattern. In this case just use the current
                 # bookmark node
                 del newbookmarks[rname]
             bookmarks[rname] = hexnode
-        elif nametype == 'branches':
+        elif nametype == b'branches':
             # saveremotenames expects 20 byte binary nodes for branches
             branches[rname].append(bin(hexnode))
 
-    for bookmark, hexnode in newbookmarks.iteritems():
+    for bookmark, hexnode in pycompat.iteritems(newbookmarks):
         bookmarks[bookmark] = hexnode
     remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
 
+
 def _savelocalbookmarks(repo, bookmarks):
     if not bookmarks:
         return
-    with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
+    with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
         changes = []
-        for scratchbook, node in bookmarks.iteritems():
+        for scratchbook, node in pycompat.iteritems(bookmarks):
             changectx = repo[node]
             changes.append((scratchbook, changectx.node()))
         repo._bookmarks.applychanges(repo, tr, changes)
 
+
 def _findcommonincoming(orig, *args, **kwargs):
     common, inc, remoteheads = orig(*args, **kwargs)
     return common, True, remoteheads
 
+
 def _push(orig, ui, repo, dest=None, *args, **opts):
     opts = pycompat.byteskwargs(opts)
-    bookmark = opts.get('bookmark')
+    bookmark = opts.get(b'bookmark')
     # we only support pushing one infinitepush bookmark at once
     if len(bookmark) == 1:
         bookmark = bookmark[0]
     else:
-        bookmark = ''
+        bookmark = b''
 
     oldphasemove = None
     overrides = {(experimental, configbookmark): bookmark}
 
-    with ui.configoverride(overrides, 'infinitepush'):
-        scratchpush = opts.get('bundle_store')
+    with ui.configoverride(overrides, b'infinitepush'):
+        scratchpush = opts.get(b'bundle_store')
         if _scratchbranchmatcher(bookmark):
             scratchpush = True
             # bundle2 can be sent back after push (for example, bundle2
             # containing `pushkey` part to update bookmarks)
-            ui.setconfig(experimental, 'bundle2.pushback', True)
+            ui.setconfig(experimental, b'bundle2.pushback', True)
 
         if scratchpush:
             # this is an infinitepush, we don't want the bookmark to be applied
             # rather that should be stored in the bundlestore
-            opts['bookmark'] = []
+            opts[b'bookmark'] = []
             ui.setconfig(experimental, configscratchpush, True)
-            oldphasemove = extensions.wrapfunction(exchange,
-                                                   '_localphasemove',
-                                                   _phasemove)
+            oldphasemove = extensions.wrapfunction(
+                exchange, b'_localphasemove', _phasemove
+            )
         # Copy-paste from `push` command
-        path = ui.paths.getpath(dest, default=('default-push', 'default'))
+        path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
         if not path:
-            raise error.Abort(_('default repository not configured!'),
-                             hint=_("see 'hg help config.paths'"))
+            raise error.Abort(
+                _(b'default repository not configured!'),
+                hint=_(b"see 'hg help config.paths'"),
+            )
         destpath = path.pushloc or path.loc
         # Remote scratch bookmarks will be deleted because remotenames doesn't
         # know about them. Let's save it before push and restore after
@@ -735,47 +826,59 @@
         if common.isremotebooksenabled(ui):
             if bookmark and scratchpush:
                 other = hg.peer(repo, opts, destpath)
-                fetchedbookmarks = other.listkeyspatterns('bookmarks',
-                                                          patterns=[bookmark])
+                fetchedbookmarks = other.listkeyspatterns(
+                    b'bookmarks', patterns=[bookmark]
+                )
                 remotescratchbookmarks.update(fetchedbookmarks)
             _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
     if oldphasemove:
         exchange._localphasemove = oldphasemove
     return result
 
+
 def _deleteinfinitepushbookmarks(ui, repo, path, names):
     """Prune remote names by removing the bookmarks we don't want anymore,
     then writing the result back to disk
     """
-    remotenamesext = extensions.find('remotenames')
+    remotenamesext = extensions.find(b'remotenames')
 
     # remotename format is:
     # (node, nametype ("branches" or "bookmarks"), remote, name)
     nametype_idx = 1
     remote_idx = 2
     name_idx = 3
-    remotenames = [remotename for remotename in
-                   remotenamesext.readremotenames(repo)
-                   if remotename[remote_idx] == path]
-    remote_bm_names = [remotename[name_idx] for remotename in
-                       remotenames if remotename[nametype_idx] == "bookmarks"]
+    remotenames = [
+        remotename
+        for remotename in remotenamesext.readremotenames(repo)
+        if remotename[remote_idx] == path
+    ]
+    remote_bm_names = [
+        remotename[name_idx]
+        for remotename in remotenames
+        if remotename[nametype_idx] == b"bookmarks"
+    ]
 
     for name in names:
         if name not in remote_bm_names:
-            raise error.Abort(_("infinitepush bookmark '{}' does not exist "
-                                "in path '{}'").format(name, path))
+            raise error.Abort(
+                _(
+                    b"infinitepush bookmark '{}' does not exist "
+                    b"in path '{}'"
+                ).format(name, path)
+            )
 
     bookmarks = {}
     branches = collections.defaultdict(list)
     for node, nametype, remote, name in remotenames:
-        if nametype == "bookmarks" and name not in names:
+        if nametype == b"bookmarks" and name not in names:
             bookmarks[name] = node
-        elif nametype == "branches":
+        elif nametype == b"branches":
             # saveremotenames wants binary nodes for branches
             branches[name].append(bin(node))
 
     remotenamesext.saveremotenames(repo, path, branches, bookmarks)
 
+
 def _phasemove(orig, pushop, nodes, phase=phases.public):
     """prevent commits from being marked public
 
@@ -785,32 +888,31 @@
     if phase != phases.public:
         orig(pushop, nodes, phase)
 
+
 @exchange.b2partsgenerator(scratchbranchparttype)
 def partgen(pushop, bundler):
     bookmark = pushop.ui.config(experimental, configbookmark)
     scratchpush = pushop.ui.configbool(experimental, configscratchpush)
-    if 'changesets' in pushop.stepsdone or not scratchpush:
+    if b'changesets' in pushop.stepsdone or not scratchpush:
         return
 
     if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
         return
 
-    pushop.stepsdone.add('changesets')
+    pushop.stepsdone.add(b'changesets')
     if not pushop.outgoing.missing:
-        pushop.ui.status(_('no changes found\n'))
+        pushop.ui.status(_(b'no changes found\n'))
         pushop.cgresult = 0
         return
 
     # This parameter tells the server that the following bundle is an
     # infinitepush. This let's it switch the part processing to our infinitepush
     # code path.
-    bundler.addparam("infinitepush", "True")
+    bundler.addparam(b"infinitepush", b"True")
 
-    scratchparts = bundleparts.getscratchbranchparts(pushop.repo,
-                                                     pushop.remote,
-                                                     pushop.outgoing,
-                                                     pushop.ui,
-                                                     bookmark)
+    scratchparts = bundleparts.getscratchbranchparts(
+        pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
+    )
 
     for scratchpart in scratchparts:
         bundler.addpart(scratchpart)
@@ -821,44 +923,56 @@
 
     return handlereply
 
+
 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
 
+
 def _getrevs(bundle, oldnode, force, bookmark):
-    'extracts and validates the revs to be imported'
-    revs = [bundle[r] for r in bundle.revs('sort(bundle())')]
+    b'extracts and validates the revs to be imported'
+    revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
 
     # new bookmark
     if oldnode is None:
         return revs
 
     # Fast forward update
-    if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)):
+    if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
         return revs
 
     return revs
 
+
 @contextlib.contextmanager
 def logservicecall(logger, service, **kwargs):
     start = time.time()
-    logger(service, eventtype='start', **kwargs)
+    logger(service, eventtype=b'start', **kwargs)
     try:
         yield
-        logger(service, eventtype='success',
-               elapsedms=(time.time() - start) * 1000, **kwargs)
+        logger(
+            service,
+            eventtype=b'success',
+            elapsedms=(time.time() - start) * 1000,
+            **kwargs
+        )
     except Exception as e:
-        logger(service, eventtype='failure',
-               elapsedms=(time.time() - start) * 1000, errormsg=str(e),
-               **kwargs)
+        logger(
+            service,
+            eventtype=b'failure',
+            elapsedms=(time.time() - start) * 1000,
+            errormsg=str(e),
+            **kwargs
+        )
         raise
 
+
 def _getorcreateinfinitepushlogger(op):
-    logger = op.records['infinitepushlogger']
+    logger = op.records[b'infinitepushlogger']
     if not logger:
         ui = op.repo.ui
         try:
             username = procutil.getuser()
         except Exception:
-            username = 'unknown'
+            username = b'unknown'
         # Generate random request id to be able to find all logged entries
         # for the same request. Since requestid is pseudo-generated it may
         # not be unique, but we assume that (hostname, username, requestid)
@@ -866,45 +980,49 @@
         random.seed()
         requestid = random.randint(0, 2000000000)
         hostname = socket.gethostname()
-        logger = functools.partial(ui.log, 'infinitepush', user=username,
-                                   requestid=requestid, hostname=hostname,
-                                   reponame=ui.config('infinitepush',
-                                                      'reponame'))
-        op.records.add('infinitepushlogger', logger)
+        logger = functools.partial(
+            ui.log,
+            b'infinitepush',
+            user=username,
+            requestid=requestid,
+            hostname=hostname,
+            reponame=ui.config(b'infinitepush', b'reponame'),
+        )
+        op.records.add(b'infinitepushlogger', logger)
     else:
         logger = logger[0]
     return logger
 
+
 def storetobundlestore(orig, repo, op, unbundler):
     """stores the incoming bundle coming from push command to the bundlestore
     instead of applying on the revlogs"""
 
-    repo.ui.status(_("storing changesets on the bundlestore\n"))
+    repo.ui.status(_(b"storing changesets on the bundlestore\n"))
     bundler = bundle2.bundle20(repo.ui)
 
     # processing each part and storing it in bundler
     with bundle2.partiterator(repo, op, unbundler) as parts:
         for part in parts:
             bundlepart = None
-            if part.type == 'replycaps':
+            if part.type == b'replycaps':
                 # This configures the current operation to allow reply parts.
                 bundle2._processpart(op, part)
             else:
                 bundlepart = bundle2.bundlepart(part.type, data=part.read())
-                for key, value in part.params.iteritems():
+                for key, value in pycompat.iteritems(part.params):
                     bundlepart.addparam(key, value)
 
                 # Certain parts require a response
-                if part.type in ('pushkey', 'changegroup'):
+                if part.type in (b'pushkey', b'changegroup'):
                     if op.reply is not None:
-                        rpart = op.reply.newpart('reply:%s' % part.type)
-                        rpart.addparam('in-reply-to', b'%d' % part.id,
-                                       mandatory=False)
-                        rpart.addparam('return', '1', mandatory=False)
+                        rpart = op.reply.newpart(b'reply:%s' % part.type)
+                        rpart.addparam(
+                            b'in-reply-to', b'%d' % part.id, mandatory=False
+                        )
+                        rpart.addparam(b'return', b'1', mandatory=False)
 
-            op.records.add(part.type, {
-                'return': 1,
-            })
+            op.records.add(part.type, {b'return': 1,})
             if bundlepart:
                 bundler.addpart(bundlepart)
 
@@ -925,27 +1043,28 @@
             # we would rather see the original exception
             pass
 
+
 def processparts(orig, repo, op, unbundler):
 
     # make sure we don't wrap processparts in case of `hg unbundle`
-    if op.source == 'unbundle':
+    if op.source == b'unbundle':
         return orig(repo, op, unbundler)
 
     # this server routes each push to bundle store
-    if repo.ui.configbool('infinitepush', 'pushtobundlestore'):
+    if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
         return storetobundlestore(orig, repo, op, unbundler)
 
-    if unbundler.params.get('infinitepush') != 'True':
+    if unbundler.params.get(b'infinitepush') != b'True':
         return orig(repo, op, unbundler)
 
-    handleallparts = repo.ui.configbool('infinitepush', 'storeallparts')
+    handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
 
     bundler = bundle2.bundle20(repo.ui)
     cgparams = None
     with bundle2.partiterator(repo, op, unbundler) as parts:
         for part in parts:
             bundlepart = None
-            if part.type == 'replycaps':
+            if part.type == b'replycaps':
                 # This configures the current operation to allow reply parts.
                 bundle2._processpart(op, part)
             elif part.type == bundleparts.scratchbranchparttype:
@@ -954,18 +1073,23 @@
                 # when we upload to the store. Eventually those parameters will
                 # be put on the actual bundle instead of this part, then we can
                 # send a vanilla changegroup instead of the scratchbranch part.
-                cgversion = part.params.get('cgversion', '01')
-                bundlepart = bundle2.bundlepart('changegroup', data=part.read())
-                bundlepart.addparam('version', cgversion)
+                cgversion = part.params.get(b'cgversion', b'01')
+                bundlepart = bundle2.bundlepart(
+                    b'changegroup', data=part.read()
+                )
+                bundlepart.addparam(b'version', cgversion)
                 cgparams = part.params
 
                 # If we're not dumping all parts into the new bundle, we need to
                 # alert the future pushkey and phase-heads handler to skip
                 # the part.
                 if not handleallparts:
-                    op.records.add(scratchbranchparttype + '_skippushkey', True)
-                    op.records.add(scratchbranchparttype + '_skipphaseheads',
-                                   True)
+                    op.records.add(
+                        scratchbranchparttype + b'_skippushkey', True
+                    )
+                    op.records.add(
+                        scratchbranchparttype + b'_skipphaseheads', True
+                    )
             else:
                 if handleallparts:
                     # Ideally we would not process any parts, and instead just
@@ -973,23 +1097,22 @@
                     # differs from previous behavior, we need to put it behind a
                     # config flag for incremental rollout.
                     bundlepart = bundle2.bundlepart(part.type, data=part.read())
-                    for key, value in part.params.iteritems():
+                    for key, value in pycompat.iteritems(part.params):
                         bundlepart.addparam(key, value)
 
                     # Certain parts require a response
-                    if part.type == 'pushkey':
+                    if part.type == b'pushkey':
                         if op.reply is not None:
-                            rpart = op.reply.newpart('reply:pushkey')
-                            rpart.addparam('in-reply-to', str(part.id),
-                                           mandatory=False)
-                            rpart.addparam('return', '1', mandatory=False)
+                            rpart = op.reply.newpart(b'reply:pushkey')
+                            rpart.addparam(
+                                b'in-reply-to', str(part.id), mandatory=False
+                            )
+                            rpart.addparam(b'return', b'1', mandatory=False)
                 else:
                     bundle2._processpart(op, part)
 
             if handleallparts:
-                op.records.add(part.type, {
-                    'return': 1,
-                })
+                op.records.add(part.type, {b'return': 1,})
             if bundlepart:
                 bundler.addpart(bundlepart)
 
@@ -1011,46 +1134,48 @@
                 # we would rather see the original exception
                 pass
 
+
 def storebundle(op, params, bundlefile):
     log = _getorcreateinfinitepushlogger(op)
     parthandlerstart = time.time()
-    log(scratchbranchparttype, eventtype='start')
+    log(scratchbranchparttype, eventtype=b'start')
     index = op.repo.bundlestore.index
     store = op.repo.bundlestore.store
-    op.records.add(scratchbranchparttype + '_skippushkey', True)
+    op.records.add(scratchbranchparttype + b'_skippushkey', True)
 
     bundle = None
     try:  # guards bundle
-        bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
+        bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
         bundle = hg.repository(op.repo.ui, bundlepath)
 
-        bookmark = params.get('bookmark')
-        bookprevnode = params.get('bookprevnode', '')
-        force = params.get('force')
+        bookmark = params.get(b'bookmark')
+        bookprevnode = params.get(b'bookprevnode', b'')
+        force = params.get(b'force')
 
         if bookmark:
             oldnode = index.getnode(bookmark)
         else:
             oldnode = None
-        bundleheads = bundle.revs('heads(bundle())')
+        bundleheads = bundle.revs(b'heads(bundle())')
         if bookmark and len(bundleheads) > 1:
             raise error.Abort(
-                _('cannot push more than one head to a scratch branch'))
+                _(b'cannot push more than one head to a scratch branch')
+            )
 
         revs = _getrevs(bundle, oldnode, force, bookmark)
 
         # Notify the user of what is being pushed
-        plural = 's' if len(revs) > 1 else ''
-        op.repo.ui.warn(_("pushing %d commit%s:\n") % (len(revs), plural))
+        plural = b's' if len(revs) > 1 else b''
+        op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
         maxoutput = 10
         for i in range(0, min(len(revs), maxoutput)):
-            firstline = bundle[revs[i]].description().split('\n')[0][:50]
-            op.repo.ui.warn(("    %s  %s\n") % (revs[i], firstline))
+            firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
+            op.repo.ui.warn(b"    %s  %s\n" % (revs[i], firstline))
 
         if len(revs) > maxoutput + 1:
-            op.repo.ui.warn(("    ...\n"))
-            firstline = bundle[revs[-1]].description().split('\n')[0][:50]
-            op.repo.ui.warn(("    %s  %s\n") % (revs[-1], firstline))
+            op.repo.ui.warn(b"    ...\n")
+            firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
+            op.repo.ui.warn(b"    %s  %s\n" % (revs[-1], firstline))
 
         nodesctx = [bundle[rev] for rev in revs]
         inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
@@ -1065,46 +1190,64 @@
         bookmarknode = nodesctx[-1].hex() if nodesctx else None
         key = None
         if newheadscount:
-            with open(bundlefile, 'rb') as f:
+            with open(bundlefile, b'rb') as f:
                 bundledata = f.read()
-                with logservicecall(log, 'bundlestore',
-                                    bundlesize=len(bundledata)):
+                with logservicecall(
+                    log, b'bundlestore', bundlesize=len(bundledata)
+                ):
                     bundlesizelimit = 100 * 1024 * 1024  # 100 MB
                     if len(bundledata) > bundlesizelimit:
-                        error_msg = ('bundle is too big: %d bytes. ' +
-                                     'max allowed size is 100 MB')
+                        error_msg = (
+                            b'bundle is too big: %d bytes. '
+                            + b'max allowed size is 100 MB'
+                        )
                         raise error.Abort(error_msg % (len(bundledata),))
                     key = store.write(bundledata)
 
-        with logservicecall(log, 'index', newheadscount=newheadscount), index:
+        with logservicecall(log, b'index', newheadscount=newheadscount), index:
             if key:
                 index.addbundle(key, nodesctx)
             if bookmark:
                 index.addbookmark(bookmark, bookmarknode)
-                _maybeaddpushbackpart(op, bookmark, bookmarknode,
-                                      bookprevnode, params)
-        log(scratchbranchparttype, eventtype='success',
-            elapsedms=(time.time() - parthandlerstart) * 1000)
+                _maybeaddpushbackpart(
+                    op, bookmark, bookmarknode, bookprevnode, params
+                )
+        log(
+            scratchbranchparttype,
+            eventtype=b'success',
+            elapsedms=(time.time() - parthandlerstart) * 1000,
+        )
 
     except Exception as e:
-        log(scratchbranchparttype, eventtype='failure',
+        log(
+            scratchbranchparttype,
+            eventtype=b'failure',
             elapsedms=(time.time() - parthandlerstart) * 1000,
-            errormsg=str(e))
+            errormsg=str(e),
+        )
         raise
     finally:
         if bundle:
             bundle.close()
 
-@bundle2.parthandler(scratchbranchparttype,
-                     ('bookmark', 'bookprevnode', 'force',
-                      'pushbackbookmarks', 'cgversion'))
+
+@bundle2.parthandler(
+    scratchbranchparttype,
+    (
+        b'bookmark',
+        b'bookprevnode',
+        b'force',
+        b'pushbackbookmarks',
+        b'cgversion',
+    ),
+)
 def bundle2scratchbranch(op, part):
     '''unbundle a bundle2 part containing a changegroup to store'''
 
     bundler = bundle2.bundle20(op.repo.ui)
-    cgversion = part.params.get('cgversion', '01')
-    cgpart = bundle2.bundlepart('changegroup', data=part.read())
-    cgpart.addparam('version', cgversion)
+    cgversion = part.params.get(b'cgversion', b'01')
+    cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
+    cgpart.addparam(b'version', cgversion)
     bundler.addpart(cgpart)
     buf = util.chunkbuffer(bundler.getchunks())
 
@@ -1125,16 +1268,20 @@
 
     return 1
 
+
 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
-    if params.get('pushbackbookmarks'):
-        if op.reply and 'pushback' in op.reply.capabilities:
+    if params.get(b'pushbackbookmarks'):
+        if op.reply and b'pushback' in op.reply.capabilities:
             params = {
-                'namespace': 'bookmarks',
-                'key': bookmark,
-                'new': newnode,
-                'old': oldnode,
+                b'namespace': b'bookmarks',
+                b'key': bookmark,
+                b'new': newnode,
+                b'old': oldnode,
             }
-            op.reply.newpart('pushkey', mandatoryparams=params.iteritems())
+            op.reply.newpart(
+                b'pushkey', mandatoryparams=pycompat.iteritems(params)
+            )
+
 
 def bundle2pushkey(orig, op, part):
     '''Wrapper of bundle2.handlepushkey()
@@ -1142,15 +1289,16 @@
     The only goal is to skip calling the original function if flag is set.
     It's set if infinitepush push is happening.
     '''
-    if op.records[scratchbranchparttype + '_skippushkey']:
+    if op.records[scratchbranchparttype + b'_skippushkey']:
         if op.reply is not None:
-            rpart = op.reply.newpart('reply:pushkey')
-            rpart.addparam('in-reply-to', str(part.id), mandatory=False)
-            rpart.addparam('return', '1', mandatory=False)
+            rpart = op.reply.newpart(b'reply:pushkey')
+            rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
+            rpart.addparam(b'return', b'1', mandatory=False)
         return 1
 
     return orig(op, part)
 
+
 def bundle2handlephases(orig, op, part):
     '''Wrapper of bundle2.handlephases()
 
@@ -1158,11 +1306,12 @@
     It's set if infinitepush push is happening.
     '''
 
-    if op.records[scratchbranchparttype + '_skipphaseheads']:
+    if op.records[scratchbranchparttype + b'_skipphaseheads']:
         return
 
     return orig(op, part)
 
+
 def _asyncsavemetadata(root, nodes):
     '''starts a separate process that fills metadata for the nodes
 
@@ -1175,12 +1324,21 @@
         return
     nodesargs = []
     for node in nodes:
-        nodesargs.append('--node')
+        nodesargs.append(b'--node')
         nodesargs.append(node)
-    with open(os.devnull, 'w+b') as devnull:
-        cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata',
-                   '-R', root] + nodesargs
+    with open(os.devnull, b'w+b') as devnull:
+        cmdline = [
+            util.hgexecutable(),
+            b'debugfillinfinitepushmetadata',
+            b'-R',
+            root,
+        ] + nodesargs
         # Process will run in background. We don't care about the return code
-        subprocess.Popen(pycompat.rapply(procutil.tonativestr, cmdline),
-                         close_fds=True, shell=False,
-                         stdin=devnull, stdout=devnull, stderr=devnull)
+        subprocess.Popen(
+            pycompat.rapply(procutil.tonativestr, cmdline),
+            close_fds=True,
+            shell=False,
+            stdin=devnull,
+            stdout=devnull,
+            stderr=devnull,
+        )
--- a/hgext/infinitepush/bundleparts.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/infinitepush/bundleparts.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,6 +12,8 @@
     changegroup,
     error,
     extensions,
+    node as nodemod,
+    pycompat,
     revsetlang,
     util,
 )
@@ -20,62 +22,72 @@
 
 isremotebooksenabled = common.isremotebooksenabled
 
-scratchbranchparttype = 'b2x:infinitepush'
+scratchbranchparttype = b'b2x:infinitepush'
+
 
 def getscratchbranchparts(repo, peer, outgoing, ui, bookmark):
     if not outgoing.missing:
-        raise error.Abort(_('no commits to push'))
+        raise error.Abort(_(b'no commits to push'))
 
     if scratchbranchparttype not in bundle2.bundle2caps(peer):
-        raise error.Abort(_('no server support for %r') % scratchbranchparttype)
+        raise error.Abort(
+            _(b'no server support for %r') % scratchbranchparttype
+        )
 
-    _validaterevset(repo, revsetlang.formatspec('%ln', outgoing.missing),
-                    bookmark)
+    _validaterevset(
+        repo, revsetlang.formatspec(b'%ln', outgoing.missing), bookmark
+    )
 
     supportedversions = changegroup.supportedoutgoingversions(repo)
     # Explicitly avoid using '01' changegroup version in infinitepush to
     # support general delta
-    supportedversions.discard('01')
+    supportedversions.discard(b'01')
     cgversion = min(supportedversions)
     _handlelfs(repo, outgoing.missing)
-    cg = changegroup.makestream(repo, outgoing, cgversion, 'push')
+    cg = changegroup.makestream(repo, outgoing, cgversion, b'push')
 
     params = {}
-    params['cgversion'] = cgversion
+    params[b'cgversion'] = cgversion
     if bookmark:
-        params['bookmark'] = bookmark
+        params[b'bookmark'] = bookmark
         # 'prevbooknode' is necessary for pushkey reply part
-        params['bookprevnode'] = ''
+        params[b'bookprevnode'] = b''
         bookmarks = repo._bookmarks
         if bookmark in bookmarks:
-            params['bookprevnode'] = bookmarks.changectx(bookmark).hex()
+            params[b'bookprevnode'] = nodemod.hex(bookmarks[bookmark])
 
     # Do not send pushback bundle2 part with bookmarks if remotenames extension
     # is enabled. It will be handled manually in `_push()`
     if not isremotebooksenabled(ui):
-        params['pushbackbookmarks'] = '1'
+        params[b'pushbackbookmarks'] = b'1'
 
     parts = []
 
     # .upper() marks this as a mandatory part: server will abort if there's no
     #  handler
-    parts.append(bundle2.bundlepart(
-        scratchbranchparttype.upper(),
-        advisoryparams=params.iteritems(),
-        data=cg))
+    parts.append(
+        bundle2.bundlepart(
+            scratchbranchparttype.upper(),
+            advisoryparams=pycompat.iteritems(params),
+            data=cg,
+        )
+    )
 
     return parts
 
+
 def _validaterevset(repo, revset, bookmark):
     """Abort if the revs to be pushed aren't valid for a scratch branch."""
     if not repo.revs(revset):
-        raise error.Abort(_('nothing to push'))
+        raise error.Abort(_(b'nothing to push'))
     if bookmark:
         # Allow bundle with many heads only if no bookmark is specified
-        heads = repo.revs('heads(%r)', revset)
+        heads = repo.revs(b'heads(%r)', revset)
         if len(heads) > 1:
             raise error.Abort(
-                _('cannot push more than one head to a scratch branch'))
+                _(b'cannot push more than one head to a scratch branch')
+            )
+
 
 def _handlelfs(repo, missing):
     '''Special case if lfs is enabled
@@ -84,12 +96,13 @@
     to make sure large files are uploaded to lfs
     '''
     try:
-        lfsmod = extensions.find('lfs')
+        lfsmod = extensions.find(b'lfs')
         lfsmod.wrapper.uploadblobsfromrevs(repo, missing)
     except KeyError:
         # Ignore if lfs extension is not enabled
         return
 
+
 class copiedpart(object):
     """a copy of unbundlepart content that can be consumed later"""
 
--- a/hgext/infinitepush/common.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/infinitepush/common.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,19 +15,23 @@
     pycompat,
 )
 
+
 def isremotebooksenabled(ui):
-    return ('remotenames' in extensions._extensions and
-            ui.configbool('remotenames', 'bookmarks'))
+    return b'remotenames' in extensions._extensions and ui.configbool(
+        b'remotenames', b'bookmarks'
+    )
+
 
 def downloadbundle(repo, unknownbinhead):
     index = repo.bundlestore.index
     store = repo.bundlestore.store
     bundleid = index.getbundle(hex(unknownbinhead))
     if bundleid is None:
-        raise error.Abort('%s head is not known' % hex(unknownbinhead))
+        raise error.Abort(b'%s head is not known' % hex(unknownbinhead))
     bundleraw = store.read(bundleid)
     return _makebundlefromraw(bundleraw)
 
+
 def _makebundlefromraw(data):
     fp = None
     fd, bundlefile = pycompat.mkstemp()
--- a/hgext/infinitepush/fileindexapi.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/infinitepush/fileindexapi.py	Mon Oct 21 11:09:48 2019 -0400
@@ -21,17 +21,18 @@
 
 from . import indexapi
 
+
 class fileindexapi(indexapi.indexapi):
     def __init__(self, repo):
         super(fileindexapi, self).__init__()
         self._repo = repo
-        root = repo.ui.config('infinitepush', 'indexpath')
+        root = repo.ui.config(b'infinitepush', b'indexpath')
         if not root:
-            root = os.path.join('scratchbranches', 'index')
+            root = os.path.join(b'scratchbranches', b'index')
 
-        self._nodemap = os.path.join(root, 'nodemap')
-        self._bookmarkmap = os.path.join(root, 'bookmarkmap')
-        self._metadatamap = os.path.join(root, 'nodemetadatamap')
+        self._nodemap = os.path.join(root, b'nodemap')
+        self._bookmarkmap = os.path.join(root, b'bookmarkmap')
+        self._metadatamap = os.path.join(root, b'nodemetadatamap')
         self._lock = None
 
     def __enter__(self):
@@ -77,8 +78,8 @@
         vfs.write(os.path.join(self._metadatamap, node), jsonmetadata)
 
     def _listbookmarks(self, pattern):
-        if pattern.endswith('*'):
-            pattern = 're:^' + pattern[:-1] + '.*'
+        if pattern.endswith(b'*'):
+            pattern = b're:^' + pattern[:-1] + b'.*'
         kind, pat, matcher = stringutil.stringmatcher(pattern)
         prefixlen = len(self._bookmarkmap) + 1
         for dirpath, _, books in self._repo.vfs.walk(self._bookmarkmap):
--- a/hgext/infinitepush/indexapi.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/infinitepush/indexapi.py	Mon Oct 21 11:09:48 2019 -0400
@@ -7,6 +7,7 @@
 
 from __future__ import absolute_import
 
+
 class indexapi(object):
     """Class that manages access to infinitepush index.
 
@@ -66,5 +67,6 @@
         """Saves optional metadata for a given node"""
         raise NotImplementedError()
 
+
 class indexexception(Exception):
     pass
--- a/hgext/infinitepush/sqlindexapi.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/infinitepush/sqlindexapi.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,31 +14,45 @@
 import warnings
 import mysql.connector
 
+from mercurial import pycompat
+
 from . import indexapi
 
+
 def _convertbookmarkpattern(pattern):
-    pattern = pattern.replace('_', '\\_')
-    pattern = pattern.replace('%', '\\%')
-    if pattern.endswith('*'):
-        pattern = pattern[:-1] + '%'
+    pattern = pattern.replace(b'_', b'\\_')
+    pattern = pattern.replace(b'%', b'\\%')
+    if pattern.endswith(b'*'):
+        pattern = pattern[:-1] + b'%'
     return pattern
 
+
 class sqlindexapi(indexapi.indexapi):
     '''
     Sql backend for infinitepush index. See schema.sql
     '''
 
-    def __init__(self, reponame, host, port,
-                 database, user, password, logfile, loglevel,
-                 waittimeout=300, locktimeout=120):
+    def __init__(
+        self,
+        reponame,
+        host,
+        port,
+        database,
+        user,
+        password,
+        logfile,
+        loglevel,
+        waittimeout=300,
+        locktimeout=120,
+    ):
         super(sqlindexapi, self).__init__()
         self.reponame = reponame
         self.sqlargs = {
-            'host': host,
-            'port': port,
-            'database': database,
-            'user': user,
-            'password': password,
+            b'host': host,
+            b'port': port,
+            b'database': database,
+            b'user': user,
+            b'password': password,
         }
         self.sqlconn = None
         self.sqlcursor = None
@@ -53,10 +67,11 @@
 
     def sqlconnect(self):
         if self.sqlconn:
-            raise indexapi.indexexception("SQL connection already open")
+            raise indexapi.indexexception(b"SQL connection already open")
         if self.sqlcursor:
-            raise indexapi.indexexception("SQL cursor already open without"
-                                          " connection")
+            raise indexapi.indexexception(
+                b"SQL cursor already open without connection"
+            )
         retry = 3
         while True:
             try:
@@ -77,18 +92,19 @@
                     raise
                 time.sleep(0.2)
 
-        waittimeout = self.sqlconn.converter.escape('%s' % self._waittimeout)
+        waittimeout = self.sqlconn.converter.escape(b'%s' % self._waittimeout)
 
         self.sqlcursor = self.sqlconn.cursor()
-        self.sqlcursor.execute("SET wait_timeout=%s" % waittimeout)
-        self.sqlcursor.execute("SET innodb_lock_wait_timeout=%s" %
-                               self._locktimeout)
+        self.sqlcursor.execute(b"SET wait_timeout=%s" % waittimeout)
+        self.sqlcursor.execute(
+            b"SET innodb_lock_wait_timeout=%s" % self._locktimeout
+        )
         self._connected = True
 
     def close(self):
         """Cleans up the metadata store connection."""
         with warnings.catch_warnings():
-            warnings.simplefilter("ignore")
+            warnings.simplefilter(b"ignore")
             self.sqlcursor.close()
             self.sqlconn.close()
         self.sqlcursor = None
@@ -108,31 +124,40 @@
     def addbundle(self, bundleid, nodesctx):
         if not self._connected:
             self.sqlconnect()
-        self.log.info("ADD BUNDLE %r %r" % (self.reponame, bundleid))
+        self.log.info(b"ADD BUNDLE %r %r" % (self.reponame, bundleid))
         self.sqlcursor.execute(
-            "INSERT INTO bundles(bundle, reponame) VALUES "
-            "(%s, %s)", params=(bundleid, self.reponame))
+            b"INSERT INTO bundles(bundle, reponame) VALUES (%s, %s)",
+            params=(bundleid, self.reponame),
+        )
         for ctx in nodesctx:
             self.sqlcursor.execute(
-                "INSERT INTO nodestobundle(node, bundle, reponame) "
-                "VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE "
-                "bundle=VALUES(bundle)",
-                params=(ctx.hex(), bundleid, self.reponame))
+                b"INSERT INTO nodestobundle(node, bundle, reponame) "
+                b"VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE "
+                b"bundle=VALUES(bundle)",
+                params=(ctx.hex(), bundleid, self.reponame),
+            )
 
             extra = ctx.extra()
             author_name = ctx.user()
-            committer_name = extra.get('committer', ctx.user())
+            committer_name = extra.get(b'committer', ctx.user())
             author_date = int(ctx.date()[0])
-            committer_date = int(extra.get('committer_date', author_date))
+            committer_date = int(extra.get(b'committer_date', author_date))
             self.sqlcursor.execute(
-                "INSERT IGNORE INTO nodesmetadata(node, message, p1, p2, "
-                "author, committer, author_date, committer_date, "
-                "reponame) VALUES "
-                "(%s, %s, %s, %s, %s, %s, %s, %s, %s)",
-                params=(ctx.hex(), ctx.description(),
-                        ctx.p1().hex(), ctx.p2().hex(), author_name,
-                        committer_name, author_date, committer_date,
-                        self.reponame)
+                b"INSERT IGNORE INTO nodesmetadata(node, message, p1, p2, "
+                b"author, committer, author_date, committer_date, "
+                b"reponame) VALUES "
+                b"(%s, %s, %s, %s, %s, %s, %s, %s, %s)",
+                params=(
+                    ctx.hex(),
+                    ctx.description(),
+                    ctx.p1().hex(),
+                    ctx.p2().hex(),
+                    author_name,
+                    committer_name,
+                    author_date,
+                    committer_date,
+                    self.reponame,
+                ),
             )
 
     def addbookmark(self, bookmark, node):
@@ -141,27 +166,30 @@
         if not self._connected:
             self.sqlconnect()
         self.log.info(
-            "ADD BOOKMARKS %r bookmark: %r node: %r" %
-            (self.reponame, bookmark, node))
+            b"ADD BOOKMARKS %r bookmark: %r node: %r"
+            % (self.reponame, bookmark, node)
+        )
         self.sqlcursor.execute(
-            "INSERT INTO bookmarkstonode(bookmark, node, reponame) "
-            "VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE node=VALUES(node)",
-            params=(bookmark, node, self.reponame))
+            b"INSERT INTO bookmarkstonode(bookmark, node, reponame) "
+            b"VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE node=VALUES(node)",
+            params=(bookmark, node, self.reponame),
+        )
 
     def addmanybookmarks(self, bookmarks):
         if not self._connected:
             self.sqlconnect()
         args = []
         values = []
-        for bookmark, node in bookmarks.iteritems():
-            args.append('(%s, %s, %s)')
+        for bookmark, node in pycompat.iteritems(bookmarks):
+            args.append(b'(%s, %s, %s)')
             values.extend((bookmark, node, self.reponame))
-        args = ','.join(args)
+        args = b','.join(args)
 
         self.sqlcursor.execute(
-            "INSERT INTO bookmarkstonode(bookmark, node, reponame) "
-            "VALUES %s ON DUPLICATE KEY UPDATE node=VALUES(node)" % args,
-            params=values)
+            b"INSERT INTO bookmarkstonode(bookmark, node, reponame) "
+            b"VALUES %s ON DUPLICATE KEY UPDATE node=VALUES(node)" % args,
+            params=values,
+        )
 
     def deletebookmarks(self, patterns):
         """Accepts list of bookmark patterns and deletes them.
@@ -170,28 +198,31 @@
         """
         if not self._connected:
             self.sqlconnect()
-        self.log.info("DELETE BOOKMARKS: %s" % patterns)
+        self.log.info(b"DELETE BOOKMARKS: %s" % patterns)
         for pattern in patterns:
             pattern = _convertbookmarkpattern(pattern)
             self.sqlcursor.execute(
-                "DELETE from bookmarkstonode WHERE bookmark LIKE (%s) "
-                "and reponame = %s",
-                params=(pattern, self.reponame))
+                b"DELETE from bookmarkstonode WHERE bookmark LIKE (%s) "
+                b"and reponame = %s",
+                params=(pattern, self.reponame),
+            )
 
     def getbundle(self, node):
         """Returns the bundleid for the bundle that contains the given node."""
         if not self._connected:
             self.sqlconnect()
-        self.log.info("GET BUNDLE %r %r" % (self.reponame, node))
+        self.log.info(b"GET BUNDLE %r %r" % (self.reponame, node))
         self.sqlcursor.execute(
-            "SELECT bundle from nodestobundle "
-            "WHERE node = %s AND reponame = %s", params=(node, self.reponame))
+            b"SELECT bundle from nodestobundle "
+            b"WHERE node = %s AND reponame = %s",
+            params=(node, self.reponame),
+        )
         result = self.sqlcursor.fetchall()
         if len(result) != 1 or len(result[0]) != 1:
-            self.log.info("No matching node")
+            self.log.info(b"No matching node")
             return None
         bundle = result[0][0]
-        self.log.info("Found bundle %r" % bundle)
+        self.log.info(b"Found bundle %r" % bundle)
         return bundle
 
     def getnode(self, bookmark):
@@ -199,33 +230,38 @@
         if not self._connected:
             self.sqlconnect()
         self.log.info(
-            "GET NODE reponame: %r bookmark: %r" % (self.reponame, bookmark))
+            b"GET NODE reponame: %r bookmark: %r" % (self.reponame, bookmark)
+        )
         self.sqlcursor.execute(
-            "SELECT node from bookmarkstonode WHERE "
-            "bookmark = %s AND reponame = %s", params=(bookmark, self.reponame))
+            b"SELECT node from bookmarkstonode WHERE "
+            b"bookmark = %s AND reponame = %s",
+            params=(bookmark, self.reponame),
+        )
         result = self.sqlcursor.fetchall()
         if len(result) != 1 or len(result[0]) != 1:
-            self.log.info("No matching bookmark")
+            self.log.info(b"No matching bookmark")
             return None
         node = result[0][0]
-        self.log.info("Found node %r" % node)
+        self.log.info(b"Found node %r" % node)
         return node
 
     def getbookmarks(self, query):
         if not self._connected:
             self.sqlconnect()
         self.log.info(
-            "QUERY BOOKMARKS reponame: %r query: %r" % (self.reponame, query))
+            b"QUERY BOOKMARKS reponame: %r query: %r" % (self.reponame, query)
+        )
         query = _convertbookmarkpattern(query)
         self.sqlcursor.execute(
-            "SELECT bookmark, node from bookmarkstonode WHERE "
-            "reponame = %s AND bookmark LIKE %s",
-            params=(self.reponame, query))
+            b"SELECT bookmark, node from bookmarkstonode WHERE "
+            b"reponame = %s AND bookmark LIKE %s",
+            params=(self.reponame, query),
+        )
         result = self.sqlcursor.fetchall()
         bookmarks = {}
         for row in result:
             if len(row) != 2:
-                self.log.info("Bad row returned: %s" % row)
+                self.log.info(b"Bad row returned: %s" % row)
                 continue
             bookmarks[row[0]] = row[1]
         return bookmarks
@@ -234,18 +270,24 @@
         if not self._connected:
             self.sqlconnect()
         self.log.info(
-            ("INSERT METADATA, QUERY BOOKMARKS reponame: %r " +
-             "node: %r, jsonmetadata: %s") %
-            (self.reponame, node, jsonmetadata))
+            (
+                b"INSERT METADATA, QUERY BOOKMARKS reponame: %r "
+                + b"node: %r, jsonmetadata: %s"
+            )
+            % (self.reponame, node, jsonmetadata)
+        )
 
         self.sqlcursor.execute(
-            "UPDATE nodesmetadata SET optional_json_metadata=%s WHERE "
-            "reponame=%s AND node=%s",
-            params=(jsonmetadata, self.reponame, node))
+            b"UPDATE nodesmetadata SET optional_json_metadata=%s WHERE "
+            b"reponame=%s AND node=%s",
+            params=(jsonmetadata, self.reponame, node),
+        )
+
 
 class CustomConverter(mysql.connector.conversion.MySQLConverter):
     """Ensure that all values being returned are returned as python string
     (versus the default byte arrays)."""
+
     def _STRING_to_python(self, value, dsc=None):
         return str(value)
 
--- a/hgext/infinitepush/store.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/infinitepush/store.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,22 +11,24 @@
 import subprocess
 import tempfile
 
+from mercurial.pycompat import open
 from mercurial import (
     node,
     pycompat,
 )
-from mercurial.utils import (
-    procutil,
-)
+from mercurial.utils import procutil
 
 NamedTemporaryFile = tempfile.NamedTemporaryFile
 
+
 class BundleWriteException(Exception):
     pass
 
+
 class BundleReadException(Exception):
     pass
 
+
 class abstractbundlestore(object):
     """Defines the interface for bundle stores.
 
@@ -35,6 +37,7 @@
     be any Python object understood by the corresponding bundle index (see
     ``abstractbundleindex`` below).
     """
+
     __metaclass__ = abc.ABCMeta
 
     @abc.abstractmethod
@@ -56,18 +59,21 @@
         and close().
         """
 
+
 class filebundlestore(object):
     """bundle store in filesystem
 
     meant for storing bundles somewhere on disk and on network filesystems
     """
+
     def __init__(self, ui, repo):
         self.ui = ui
         self.repo = repo
-        self.storepath = ui.configpath('scratchbranch', 'storepath')
+        self.storepath = ui.configpath(b'scratchbranch', b'storepath')
         if not self.storepath:
-            self.storepath = self.repo.vfs.join("scratchbranches",
-                                                "filebundlestore")
+            self.storepath = self.repo.vfs.join(
+                b"scratchbranches", b"filebundlestore"
+            )
         if not os.path.exists(self.storepath):
             os.makedirs(self.storepath)
 
@@ -87,18 +93,19 @@
         if not os.path.exists(dirpath):
             os.makedirs(dirpath)
 
-        with open(self._filepath(filename), 'wb') as f:
+        with open(self._filepath(filename), b'wb') as f:
             f.write(data)
 
         return filename
 
     def read(self, key):
         try:
-            with open(self._filepath(key), 'rb') as f:
+            with open(self._filepath(key), b'rb') as f:
                 return f.read()
         except IOError:
             return None
 
+
 class externalbundlestore(abstractbundlestore):
     def __init__(self, put_binary, put_args, get_binary, get_args):
         """
@@ -120,8 +127,10 @@
     def _call_binary(self, args):
         p = subprocess.Popen(
             pycompat.rapply(procutil.tonativestr, args),
-            stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-            close_fds=True)
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            close_fds=True,
+        )
         stdout, stderr = p.communicate()
         returncode = p.returncode
         return returncode, stdout, stderr
@@ -135,20 +144,24 @@
             temp.write(data)
             temp.flush()
             temp.seek(0)
-            formatted_args = [arg.format(filename=temp.name)
-                              for arg in self.put_args]
+            formatted_args = [
+                arg.format(filename=temp.name) for arg in self.put_args
+            ]
             returncode, stdout, stderr = self._call_binary(
-                [self.put_binary] + formatted_args)
+                [self.put_binary] + formatted_args
+            )
 
             if returncode != 0:
                 raise BundleWriteException(
-                    'Failed to upload to external store: %s' % stderr)
+                    b'Failed to upload to external store: %s' % stderr
+                )
             stdout_lines = stdout.splitlines()
             if len(stdout_lines) == 1:
                 return stdout_lines[0]
             else:
                 raise BundleWriteException(
-                    'Bad output from %s: %s' % (self.put_binary, stdout))
+                    b'Bad output from %s: %s' % (self.put_binary, stdout)
+                )
 
     def read(self, handle):
         # Won't work on windows because you can't open file second time without
@@ -156,12 +169,16 @@
         # TODO: rewrite without str.format() and replace NamedTemporaryFile()
         # with pycompat.namedtempfile()
         with NamedTemporaryFile() as temp:
-            formatted_args = [arg.format(filename=temp.name, handle=handle)
-                              for arg in self.get_args]
+            formatted_args = [
+                arg.format(filename=temp.name, handle=handle)
+                for arg in self.get_args
+            ]
             returncode, stdout, stderr = self._call_binary(
-                [self.get_binary] + formatted_args)
+                [self.get_binary] + formatted_args
+            )
 
             if returncode != 0:
                 raise BundleReadException(
-                    'Failed to download from external store: %s' % stderr)
+                    b'Failed to download from external store: %s' % stderr
+                )
             return temp.read()
--- a/hgext/journal.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/journal.py	Mon Oct 21 11:09:48 2019 -0400
@@ -49,14 +49,14 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 # storage format version; increment when the format changes
 storageversion = 0
 
 # namespaces
-bookmarktype = 'bookmark'
-wdirparenttype = 'wdirparent'
+bookmarktype = b'bookmark'
+wdirparenttype = b'wdirparent'
 # In a shared repository, what shared feature name is used
 # to indicate this namespace is shared with the source?
 sharednamespaces = {
@@ -65,33 +65,38 @@
 
 # Journal recording, register hooks and storage object
 def extsetup(ui):
-    extensions.wrapfunction(dispatch, 'runcommand', runcommand)
-    extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
+    extensions.wrapfunction(dispatch, b'runcommand', runcommand)
+    extensions.wrapfunction(bookmarks.bmstore, b'_write', recordbookmarks)
     extensions.wrapfilecache(
-        localrepo.localrepository, 'dirstate', wrapdirstate)
-    extensions.wrapfunction(hg, 'postshare', wrappostshare)
-    extensions.wrapfunction(hg, 'copystore', unsharejournal)
+        localrepo.localrepository, b'dirstate', wrapdirstate
+    )
+    extensions.wrapfunction(hg, b'postshare', wrappostshare)
+    extensions.wrapfunction(hg, b'copystore', unsharejournal)
+
 
 def reposetup(ui, repo):
     if repo.local():
         repo.journal = journalstorage(repo)
-        repo._wlockfreeprefix.add('namejournal')
+        repo._wlockfreeprefix.add(b'namejournal')
 
-        dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
+        dirstate, cached = localrepo.isfilecached(repo, b'dirstate')
         if cached:
             # already instantiated dirstate isn't yet marked as
             # "journal"-ing, even though repo.dirstate() was already
             # wrapped by own wrapdirstate()
             _setupdirstate(repo, dirstate)
 
+
 def runcommand(orig, lui, repo, cmd, fullargs, *args):
     """Track the command line options for recording in the journal"""
     journalstorage.recordcommand(*fullargs)
     return orig(lui, repo, cmd, fullargs, *args)
 
+
 def _setupdirstate(repo, dirstate):
     dirstate.journalstorage = repo.journal
-    dirstate.addparentchangecallback('journal', recorddirstateparents)
+    dirstate.addparentchangecallback(b'journal', recorddirstateparents)
+
 
 # hooks to record dirstate changes
 def wrapdirstate(orig, repo):
@@ -101,6 +106,7 @@
         _setupdirstate(repo, dirstate)
     return dirstate
 
+
 def recorddirstateparents(dirstate, old, new):
     """Records all dirstate parent changes in the journal."""
     old = list(old)
@@ -110,7 +116,9 @@
         oldhashes = old[:1] if old[1] == node.nullid else old
         newhashes = new[:1] if new[1] == node.nullid else new
         dirstate.journalstorage.record(
-            wdirparenttype, '.', oldhashes, newhashes)
+            wdirparenttype, b'.', oldhashes, newhashes
+        )
+
 
 # hooks to record bookmark changes (both local and remote)
 def recordbookmarks(orig, store, fp):
@@ -118,22 +126,24 @@
     repo = store._repo
     if util.safehasattr(repo, 'journal'):
         oldmarks = bookmarks.bmstore(repo)
-        for mark, value in store.iteritems():
+        for mark, value in pycompat.iteritems(store):
             oldvalue = oldmarks.get(mark, node.nullid)
             if value != oldvalue:
                 repo.journal.record(bookmarktype, mark, oldvalue, value)
     return orig(store, fp)
 
+
 # shared repository support
 def _readsharedfeatures(repo):
     """A set of shared features for this repository"""
     try:
-        return set(repo.vfs.read('shared').splitlines())
+        return set(repo.vfs.read(b'shared').splitlines())
     except IOError as inst:
         if inst.errno != errno.ENOENT:
             raise
         return set()
 
+
 def _mergeentriesiter(*iterables, **kwargs):
     """Given a set of sorted iterables, yield the next entry in merged order
 
@@ -154,7 +164,7 @@
             pass
 
     while iterable_map:
-        value, key, it = order(iterable_map.itervalues())
+        value, key, it = order(pycompat.itervalues(iterable_map))
         yield value
         try:
             iterable_map[key][0] = next(it)
@@ -162,40 +172,52 @@
             # this iterable is empty, remove it from consideration
             del iterable_map[key]
 
+
 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
     """Mark this shared working copy as sharing journal information"""
     with destrepo.wlock():
         orig(sourcerepo, destrepo, **kwargs)
-        with destrepo.vfs('shared', 'a') as fp:
-            fp.write('journal\n')
+        with destrepo.vfs(b'shared', b'a') as fp:
+            fp.write(b'journal\n')
+
 
 def unsharejournal(orig, ui, repo, repopath):
     """Copy shared journal entries into this repo when unsharing"""
-    if (repo.path == repopath and repo.shared() and
-            util.safehasattr(repo, 'journal')):
+    if (
+        repo.path == repopath
+        and repo.shared()
+        and util.safehasattr(repo, 'journal')
+    ):
         sharedrepo = hg.sharedreposource(repo)
         sharedfeatures = _readsharedfeatures(repo)
-        if sharedrepo and sharedfeatures > {'journal'}:
+        if sharedrepo and sharedfeatures > {b'journal'}:
             # there is a shared repository and there are shared journal entries
             # to copy. move shared date over from source to destination but
             # move the local file first
-            if repo.vfs.exists('namejournal'):
-                journalpath = repo.vfs.join('namejournal')
-                util.rename(journalpath, journalpath + '.bak')
+            if repo.vfs.exists(b'namejournal'):
+                journalpath = repo.vfs.join(b'namejournal')
+                util.rename(journalpath, journalpath + b'.bak')
             storage = repo.journal
             local = storage._open(
-                repo.vfs, filename='namejournal.bak', _newestfirst=False)
+                repo.vfs, filename=b'namejournal.bak', _newestfirst=False
+            )
             shared = (
-                e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
-                if sharednamespaces.get(e.namespace) in sharedfeatures)
+                e
+                for e in storage._open(sharedrepo.vfs, _newestfirst=False)
+                if sharednamespaces.get(e.namespace) in sharedfeatures
+            )
             for entry in _mergeentriesiter(local, shared, order=min):
                 storage._write(repo.vfs, entry)
 
     return orig(ui, repo, repopath)
 
-class journalentry(collections.namedtuple(
+
+class journalentry(
+    collections.namedtuple(
         r'journalentry',
-        r'timestamp user command namespace name oldhashes newhashes')):
+        r'timestamp user command namespace name oldhashes newhashes',
+    )
+):
     """Individual journal entry
 
     * timestamp: a mercurial (time, timezone) tuple
@@ -212,29 +234,52 @@
     timestamp and timezone are separated by a space.
 
     """
+
     @classmethod
     def fromstorage(cls, line):
-        (time, user, command, namespace, name,
-         oldhashes, newhashes) = line.split('\n')
+        (
+            time,
+            user,
+            command,
+            namespace,
+            name,
+            oldhashes,
+            newhashes,
+        ) = line.split(b'\n')
         timestamp, tz = time.split()
         timestamp, tz = float(timestamp), int(tz)
-        oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
-        newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
+        oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(b','))
+        newhashes = tuple(node.bin(hash) for hash in newhashes.split(b','))
         return cls(
-            (timestamp, tz), user, command, namespace, name,
-            oldhashes, newhashes)
+            (timestamp, tz),
+            user,
+            command,
+            namespace,
+            name,
+            oldhashes,
+            newhashes,
+        )
 
     def __bytes__(self):
         """bytes representation for storage"""
-        time = ' '.join(map(pycompat.bytestr, self.timestamp))
-        oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
-        newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
-        return '\n'.join((
-            time, self.user, self.command, self.namespace, self.name,
-            oldhashes, newhashes))
+        time = b' '.join(map(pycompat.bytestr, self.timestamp))
+        oldhashes = b','.join([node.hex(hash) for hash in self.oldhashes])
+        newhashes = b','.join([node.hex(hash) for hash in self.newhashes])
+        return b'\n'.join(
+            (
+                time,
+                self.user,
+                self.command,
+                self.namespace,
+                self.name,
+                oldhashes,
+                newhashes,
+            )
+        )
 
     __str__ = encoding.strmethod(__bytes__)
 
+
 class journalstorage(object):
     """Storage for journal entries
 
@@ -252,6 +297,7 @@
     the dirstate).
 
     """
+
     _currentcommand = ()
     _lockref = None
 
@@ -265,18 +311,19 @@
         if repo.shared():
             features = _readsharedfeatures(repo)
             sharedrepo = hg.sharedreposource(repo)
-            if sharedrepo is not None and 'journal' in features:
+            if sharedrepo is not None and b'journal' in features:
                 self.sharedvfs = sharedrepo.vfs
                 self.sharedfeatures = features
 
     # track the current command for recording in journal entries
     @property
     def command(self):
-        commandstr = ' '.join(
-            map(procutil.shellquote, journalstorage._currentcommand))
-        if '\n' in commandstr:
+        commandstr = b' '.join(
+            map(procutil.shellquote, journalstorage._currentcommand)
+        )
+        if b'\n' in commandstr:
             # truncate multi-line commands
-            commandstr = commandstr.partition('\n')[0] + ' ...'
+            commandstr = commandstr.partition(b'\n')[0] + b' ...'
         return commandstr
 
     @classmethod
@@ -301,18 +348,22 @@
     def jlock(self, vfs):
         """Create a lock for the journal file"""
         if self._currentlock(self._lockref) is not None:
-            raise error.Abort(_('journal lock does not support nesting'))
-        desc = _('journal of %s') % vfs.base
+            raise error.Abort(_(b'journal lock does not support nesting'))
+        desc = _(b'journal of %s') % vfs.base
         try:
-            l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
+            l = lock.lock(vfs, b'namejournal.lock', 0, desc=desc)
         except error.LockHeld as inst:
             self.ui.warn(
-                _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
+                _(b"waiting for lock on %s held by %r\n") % (desc, inst.locker)
+            )
             # default to 600 seconds timeout
             l = lock.lock(
-                vfs, 'namejournal.lock',
-                self.ui.configint("ui", "timeout"), desc=desc)
-            self.ui.warn(_("got lock after %s seconds\n") % l.delay)
+                vfs,
+                b'namejournal.lock',
+                self.ui.configint(b"ui", b"timeout"),
+                desc=desc,
+            )
+            self.ui.warn(_(b"got lock after %s seconds\n") % l.delay)
         self._lockref = weakref.ref(l)
         return l
 
@@ -334,8 +385,14 @@
             newhashes = [newhashes]
 
         entry = journalentry(
-            dateutil.makedate(), self.user, self.command, namespace, name,
-            oldhashes, newhashes)
+            dateutil.makedate(),
+            self.user,
+            self.command,
+            namespace,
+            name,
+            oldhashes,
+            newhashes,
+        )
 
         vfs = self.vfs
         if self.sharedvfs is not None:
@@ -349,24 +406,25 @@
     def _write(self, vfs, entry):
         with self.jlock(vfs):
             # open file in amend mode to ensure it is created if missing
-            with vfs('namejournal', mode='a+b') as f:
+            with vfs(b'namejournal', mode=b'a+b') as f:
                 f.seek(0, os.SEEK_SET)
                 # Read just enough bytes to get a version number (up to 2
                 # digits plus separator)
-                version = f.read(3).partition('\0')[0]
-                if version and version != "%d" % storageversion:
+                version = f.read(3).partition(b'\0')[0]
+                if version and version != b"%d" % storageversion:
                     # different version of the storage. Exit early (and not
                     # write anything) if this is not a version we can handle or
                     # the file is corrupt. In future, perhaps rotate the file
                     # instead?
                     self.ui.warn(
-                        _("unsupported journal file version '%s'\n") % version)
+                        _(b"unsupported journal file version '%s'\n") % version
+                    )
                     return
                 if not version:
                     # empty file, write version first
-                    f.write(("%d" % storageversion) + '\0')
+                    f.write((b"%d" % storageversion) + b'\0')
                 f.seek(0, os.SEEK_END)
-                f.write(bytes(entry) + '\0')
+                f.write(bytes(entry) + b'\0')
 
     def filtered(self, namespace=None, name=None):
         """Yield all journal entries with the given namespace or name
@@ -403,22 +461,24 @@
         # iterate over both local and shared entries, but only those
         # shared entries that are among the currently shared features
         shared = (
-            e for e in self._open(self.sharedvfs)
-            if sharednamespaces.get(e.namespace) in self.sharedfeatures)
+            e
+            for e in self._open(self.sharedvfs)
+            if sharednamespaces.get(e.namespace) in self.sharedfeatures
+        )
         return _mergeentriesiter(local, shared)
 
-    def _open(self, vfs, filename='namejournal', _newestfirst=True):
+    def _open(self, vfs, filename=b'namejournal', _newestfirst=True):
         if not vfs.exists(filename):
             return
 
         with vfs(filename) as f:
             raw = f.read()
 
-        lines = raw.split('\0')
+        lines = raw.split(b'\0')
         version = lines and lines[0]
-        if version != "%d" % storageversion:
-            version = version or _('not available')
-            raise error.Abort(_("unknown journal file version '%s'") % version)
+        if version != b"%d" % storageversion:
+            version = version or _(b'not available')
+            raise error.Abort(_(b"unknown journal file version '%s'") % version)
 
         # Skip the first line, it's a version number. Normally we iterate over
         # these in reverse order to list newest first; only when copying across
@@ -431,16 +491,22 @@
                 continue
             yield journalentry.fromstorage(line)
 
+
 # journal reading
 # log options that don't make sense for journal
-_ignoreopts = ('no-merges', 'graph')
+_ignoreopts = (b'no-merges', b'graph')
+
+
 @command(
-    'journal', [
-        ('', 'all', None, 'show history for all names'),
-        ('c', 'commits', None, 'show commit metadata'),
-    ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
-    '[OPTION]... [BOOKMARKNAME]',
-    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+    b'journal',
+    [
+        (b'', b'all', None, b'show history for all names'),
+        (b'c', b'commits', None, b'show commit metadata'),
+    ]
+    + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
+    b'[OPTION]... [BOOKMARKNAME]',
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def journal(ui, repo, *args, **opts):
     """show the previous position of bookmarks and the working copy
 
@@ -467,61 +533,72 @@
 
     """
     opts = pycompat.byteskwargs(opts)
-    name = '.'
-    if opts.get('all'):
+    name = b'.'
+    if opts.get(b'all'):
         if args:
             raise error.Abort(
-                _("You can't combine --all and filtering on a name"))
+                _(b"You can't combine --all and filtering on a name")
+            )
         name = None
     if args:
         name = args[0]
 
-    fm = ui.formatter('journal', opts)
+    fm = ui.formatter(b'journal', opts)
+
     def formatnodes(nodes):
-        return fm.formatlist(map(fm.hexfunc, nodes), name='node', sep=',')
+        return fm.formatlist(map(fm.hexfunc, nodes), name=b'node', sep=b',')
 
-    if opts.get("template") != "json":
+    if opts.get(b"template") != b"json":
         if name is None:
-            displayname = _('the working copy and bookmarks')
+            displayname = _(b'the working copy and bookmarks')
         else:
-            displayname = "'%s'" % name
-        ui.status(_("previous locations of %s:\n") % displayname)
+            displayname = b"'%s'" % name
+        ui.status(_(b"previous locations of %s:\n") % displayname)
 
     limit = logcmdutil.getlimit(opts)
     entry = None
-    ui.pager('journal')
+    ui.pager(b'journal')
     for count, entry in enumerate(repo.journal.filtered(name=name)):
         if count == limit:
             break
 
         fm.startitem()
-        fm.condwrite(ui.verbose, 'oldnodes', '%s -> ',
-                     formatnodes(entry.oldhashes))
-        fm.write('newnodes', '%s', formatnodes(entry.newhashes))
-        fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
+        fm.condwrite(
+            ui.verbose, b'oldnodes', b'%s -> ', formatnodes(entry.oldhashes)
+        )
+        fm.write(b'newnodes', b'%s', formatnodes(entry.newhashes))
+        fm.condwrite(ui.verbose, b'user', b' %-8s', entry.user)
         fm.condwrite(
-            opts.get('all') or name.startswith('re:'),
-            'name', '  %-8s', entry.name)
+            opts.get(b'all') or name.startswith(b're:'),
+            b'name',
+            b'  %-8s',
+            entry.name,
+        )
 
-        fm.condwrite(ui.verbose, 'date', ' %s',
-                     fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2'))
-        fm.write('command', '  %s\n', entry.command)
+        fm.condwrite(
+            ui.verbose,
+            b'date',
+            b' %s',
+            fm.formatdate(entry.timestamp, b'%Y-%m-%d %H:%M %1%2'),
+        )
+        fm.write(b'command', b'  %s\n', entry.command)
 
-        if opts.get("commits"):
+        if opts.get(b"commits"):
             if fm.isplain():
                 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
             else:
                 displayer = logcmdutil.changesetformatter(
-                    ui, repo, fm.nested('changesets'), diffopts=opts)
+                    ui, repo, fm.nested(b'changesets'), diffopts=opts
+                )
             for hash in entry.newhashes:
                 try:
                     ctx = repo[hash]
                     displayer.show(ctx)
                 except error.RepoLookupError as e:
-                    fm.plain("%s\n\n" % pycompat.bytestr(e))
+                    fm.plain(b"%s\n\n" % pycompat.bytestr(e))
             displayer.close()
 
     fm.end()
 
     if entry is None:
-        ui.status(_("no recorded locations\n"))
+        ui.status(_(b"no recorded locations\n"))
--- a/hgext/keyword.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/keyword.py	Mon Oct 21 11:09:48 2019 -0400
@@ -90,6 +90,7 @@
 import weakref
 
 from mercurial.i18n import _
+from mercurial.pycompat import getattr
 from mercurial.hgweb import webcommands
 
 from mercurial import (
@@ -122,29 +123,33 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 # hg commands that do not act on keywords
-nokwcommands = ('add addremove annotate bundle export grep incoming init log'
-                ' outgoing push tip verify convert email glog')
+nokwcommands = (
+    b'add addremove annotate bundle export grep incoming init log'
+    b' outgoing push tip verify convert email glog'
+)
 
 # webcommands that do not act on keywords
-nokwwebcommands = ('annotate changeset rev filediff diff comparison')
+nokwwebcommands = b'annotate changeset rev filediff diff comparison'
 
 # hg commands that trigger expansion only when writing to working dir,
 # not when reading filelog, and unexpand when reading from working dir
-restricted = ('merge kwexpand kwshrink record qrecord resolve transplant'
-              ' unshelve rebase graft backout histedit fetch')
+restricted = (
+    b'merge kwexpand kwshrink record qrecord resolve transplant'
+    b' unshelve rebase graft backout histedit fetch'
+)
 
 # names of extensions using dorecord
-recordextensions = 'record'
+recordextensions = b'record'
 
 colortable = {
-    'kwfiles.enabled': 'green bold',
-    'kwfiles.deleted': 'cyan bold underline',
-    'kwfiles.enabledunknown': 'green',
-    'kwfiles.ignored': 'bold',
-    'kwfiles.ignoredunknown': 'none'
+    b'kwfiles.enabled': b'green bold',
+    b'kwfiles.deleted': b'cyan bold underline',
+    b'kwfiles.enabledunknown': b'green',
+    b'kwfiles.ignored': b'bold',
+    b'kwfiles.ignoredunknown': b'none',
 }
 
 templatefilter = registrar.templatefilter()
@@ -152,64 +157,75 @@
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('keywordset', 'svn',
-    default=False,
+configitem(
+    b'keywordset', b'svn', default=False,
 )
 # date like in cvs' $Date
-@templatefilter('utcdate', intype=templateutil.date)
+@templatefilter(b'utcdate', intype=templateutil.date)
 def utcdate(date):
     '''Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
     '''
-    dateformat = '%Y/%m/%d %H:%M:%S'
+    dateformat = b'%Y/%m/%d %H:%M:%S'
     return dateutil.datestr((date[0], 0), dateformat)
+
+
 # date like in svn's $Date
-@templatefilter('svnisodate', intype=templateutil.date)
+@templatefilter(b'svnisodate', intype=templateutil.date)
 def svnisodate(date):
     '''Date. Returns a date in this format: "2009-08-18 13:00:13
     +0200 (Tue, 18 Aug 2009)".
     '''
-    return dateutil.datestr(date, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
+    return dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
+
+
 # date like in svn's $Id
-@templatefilter('svnutcdate', intype=templateutil.date)
+@templatefilter(b'svnutcdate', intype=templateutil.date)
 def svnutcdate(date):
     '''Date. Returns a UTC-date in this format: "2009-08-18
     11:00:13Z".
     '''
-    dateformat = '%Y-%m-%d %H:%M:%SZ'
+    dateformat = b'%Y-%m-%d %H:%M:%SZ'
     return dateutil.datestr((date[0], 0), dateformat)
 
+
 # make keyword tools accessible
-kwtools = {'hgcmd': ''}
+kwtools = {b'hgcmd': b''}
+
 
 def _defaultkwmaps(ui):
     '''Returns default keywordmaps according to keywordset configuration.'''
     templates = {
-        'Revision': '{node|short}',
-        'Author': '{author|user}',
+        b'Revision': b'{node|short}',
+        b'Author': b'{author|user}',
     }
-    kwsets = ({
-        'Date': '{date|utcdate}',
-        'RCSfile': '{file|basename},v',
-        'RCSFile': '{file|basename},v', # kept for backwards compatibility
-                                        # with hg-keyword
-        'Source': '{root}/{file},v',
-        'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
-        'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
-    }, {
-        'Date': '{date|svnisodate}',
-        'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
-        'LastChangedRevision': '{node|short}',
-        'LastChangedBy': '{author|user}',
-        'LastChangedDate': '{date|svnisodate}',
-    })
-    templates.update(kwsets[ui.configbool('keywordset', 'svn')])
+    kwsets = (
+        {
+            b'Date': b'{date|utcdate}',
+            b'RCSfile': b'{file|basename},v',
+            b'RCSFile': b'{file|basename},v',  # kept for backwards compatibility
+            # with hg-keyword
+            b'Source': b'{root}/{file},v',
+            b'Id': b'{file|basename},v {node|short} {date|utcdate} {author|user}',
+            b'Header': b'{root}/{file},v {node|short} {date|utcdate} {author|user}',
+        },
+        {
+            b'Date': b'{date|svnisodate}',
+            b'Id': b'{file|basename},v {node|short} {date|svnutcdate} {author|user}',
+            b'LastChangedRevision': b'{node|short}',
+            b'LastChangedBy': b'{author|user}',
+            b'LastChangedDate': b'{date|svnisodate}',
+        },
+    )
+    templates.update(kwsets[ui.configbool(b'keywordset', b'svn')])
     return templates
 
+
 def _shrinktext(text, subfunc):
     '''Helper for keyword expansion removal in text.
     Depending on subfunc also returns number of substitutions.'''
     return subfunc(br'$\1$', text)
 
+
 def _preselect(wstatus, changed):
     '''Retrieves modified and added files from a working directory state
     and returns the subset of each contained in given changed files
@@ -228,12 +244,12 @@
     def __init__(self, ui, repo, inc, exc):
         self.ui = ui
         self._repo = weakref.ref(repo)
-        self.match = match.match(repo.root, '', [], inc, exc)
-        self.restrict = kwtools['hgcmd'] in restricted.split()
+        self.match = match.match(repo.root, b'', [], inc, exc)
+        self.restrict = kwtools[b'hgcmd'] in restricted.split()
         self.postcommit = False
 
-        kwmaps = self.ui.configitems('keywordmaps')
-        if kwmaps: # override default templates
+        kwmaps = self.ui.configitems(b'keywordmaps')
+        if kwmaps:  # override default templates
             self.templates = dict(kwmaps)
         else:
             self.templates = _defaultkwmaps(self.ui)
@@ -245,7 +261,7 @@
     @util.propertycache
     def escape(self):
         '''Returns bar-separated and escaped keywords.'''
-        return '|'.join(map(stringutil.reescape, self.templates.keys()))
+        return b'|'.join(map(stringutil.reescape, self.templates.keys()))
 
     @util.propertycache
     def rekw(self):
@@ -259,14 +275,17 @@
 
     def substitute(self, data, path, ctx, subfunc):
         '''Replaces keywords in data with expanded template.'''
+
         def kwsub(mobj):
             kw = mobj.group(1)
-            ct = logcmdutil.maketemplater(self.ui, self.repo,
-                                          self.templates[kw])
+            ct = logcmdutil.maketemplater(
+                self.ui, self.repo, self.templates[kw]
+            )
             self.ui.pushbuffer()
             ct.show(ctx, root=self.repo.root, file=path)
             ekw = templatefilters.firstline(self.ui.popbuffer())
-            return '$%s: %s $' % (kw, ekw)
+            return b'$%s: %s $' % (kw, ekw)
+
         return subfunc(kwsub, data)
 
     def linkctx(self, path, fileid):
@@ -275,8 +294,11 @@
 
     def expand(self, path, node, data):
         '''Returns data with keywords expanded.'''
-        if (not self.restrict and self.match(path)
-            and not stringutil.binary(data)):
+        if (
+            not self.restrict
+            and self.match(path)
+            and not stringutil.binary(data)
+        ):
             ctx = self.linkctx(path, node)
             return self.substitute(data, path, ctx, self.rekw.sub)
         return data
@@ -284,15 +306,15 @@
     def iskwfile(self, cand, ctx):
         '''Returns subset of candidates which are configured for keyword
         expansion but are not symbolic links.'''
-        return [f for f in cand if self.match(f) and 'l' not in ctx.flags(f)]
+        return [f for f in cand if self.match(f) and b'l' not in ctx.flags(f)]
 
     def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
         '''Overwrites selected files expanding/shrinking keywords.'''
-        if self.restrict or lookup or self.postcommit: # exclude kw_copy
+        if self.restrict or lookup or self.postcommit:  # exclude kw_copy
             candidates = self.iskwfile(candidates, ctx)
         if not candidates:
             return
-        kwcmd = self.restrict and lookup # kwexpand/kwshrink
+        kwcmd = self.restrict and lookup  # kwexpand/kwshrink
         if self.restrict or expand and lookup:
             mf = ctx.manifest()
         if self.restrict or rekw:
@@ -300,9 +322,9 @@
         else:
             re_kw = self.rekwexp
         if expand:
-            msg = _('overwriting %s expanding keywords\n')
+            msg = _(b'overwriting %s expanding keywords\n')
         else:
-            msg = _('overwriting %s shrinking keywords\n')
+            msg = _(b'overwriting %s shrinking keywords\n')
         for f in candidates:
             if self.restrict:
                 data = self.repo.file(f).read(mf[f])
@@ -329,7 +351,7 @@
                 data, found = _shrinktext(data, re_kw.subn)
             if found:
                 self.ui.note(msg % f)
-                fp = self.repo.wvfs(f, "wb", atomictemp=True)
+                fp = self.repo.wvfs(f, b"wb", atomictemp=True)
                 fp.write(data)
                 fp.close()
                 if kwcmd:
@@ -346,7 +368,7 @@
     def shrinklines(self, fname, lines):
         '''Returns lines with keyword substitutions removed.'''
         if self.match(fname):
-            text = ''.join(lines)
+            text = b''.join(lines)
             if not stringutil.binary(text):
                 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
         return lines
@@ -358,11 +380,13 @@
             return self.shrink(fname, data)
         return data
 
+
 class kwfilelog(filelog.filelog):
     '''
     Subclass of filelog to hook into its read, add, cmp methods.
     Keywords are "stored" unexpanded, and processed on reading.
     '''
+
     def __init__(self, opener, kwt, path):
         super(kwfilelog, self).__init__(opener, path)
         self.kwt = kwt
@@ -385,35 +409,44 @@
         text = self.kwt.shrink(self.path, text)
         return super(kwfilelog, self).cmp(node, text)
 
+
 def _status(ui, repo, wctx, kwt, *pats, **opts):
     '''Bails out if [keyword] configuration is not active.
     Returns status of working directory.'''
     if kwt:
         opts = pycompat.byteskwargs(opts)
-        return repo.status(match=scmutil.match(wctx, pats, opts), clean=True,
-                           unknown=opts.get('unknown') or opts.get('all'))
-    if ui.configitems('keyword'):
-        raise error.Abort(_('[keyword] patterns cannot match'))
-    raise error.Abort(_('no [keyword] patterns configured'))
+        return repo.status(
+            match=scmutil.match(wctx, pats, opts),
+            clean=True,
+            unknown=opts.get(b'unknown') or opts.get(b'all'),
+        )
+    if ui.configitems(b'keyword'):
+        raise error.Abort(_(b'[keyword] patterns cannot match'))
+    raise error.Abort(_(b'no [keyword] patterns configured'))
+
 
 def _kwfwrite(ui, repo, expand, *pats, **opts):
     '''Selects files and passes them to kwtemplater.overwrite.'''
     wctx = repo[None]
     if len(wctx.parents()) > 1:
-        raise error.Abort(_('outstanding uncommitted merge'))
+        raise error.Abort(_(b'outstanding uncommitted merge'))
     kwt = getattr(repo, '_keywordkwt', None)
     with repo.wlock():
         status = _status(ui, repo, wctx, kwt, *pats, **opts)
         if status.modified or status.added or status.removed or status.deleted:
-            raise error.Abort(_('outstanding uncommitted changes'))
+            raise error.Abort(_(b'outstanding uncommitted changes'))
         kwt.overwrite(wctx, status.clean, True, expand)
 
-@command('kwdemo',
-         [('d', 'default', None, _('show default keyword template maps')),
-          ('f', 'rcfile', '',
-           _('read maps from rcfile'), _('FILE'))],
-         _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
-         optionalrepo=True)
+
+@command(
+    b'kwdemo',
+    [
+        (b'd', b'default', None, _(b'show default keyword template maps')),
+        (b'f', b'rcfile', b'', _(b'read maps from rcfile'), _(b'FILE')),
+    ],
+    _(b'hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
+    optionalrepo=True,
+)
 def demo(ui, repo, *args, **opts):
     '''print [keywordmaps] configuration and an expansion example
 
@@ -427,56 +460,57 @@
 
     See :hg:`help templates` for information on templates and filters.
     '''
+
     def demoitems(section, items):
-        ui.write('[%s]\n' % section)
+        ui.write(b'[%s]\n' % section)
         for k, v in sorted(items):
             if isinstance(v, bool):
                 v = stringutil.pprint(v)
-            ui.write('%s = %s\n' % (k, v))
+            ui.write(b'%s = %s\n' % (k, v))
 
-    fn = 'demo.txt'
-    tmpdir = pycompat.mkdtemp('', 'kwdemo.')
-    ui.note(_('creating temporary repository at %s\n') % tmpdir)
+    fn = b'demo.txt'
+    tmpdir = pycompat.mkdtemp(b'', b'kwdemo.')
+    ui.note(_(b'creating temporary repository at %s\n') % tmpdir)
     if repo is None:
         baseui = ui
     else:
         baseui = repo.baseui
     repo = localrepo.instance(baseui, tmpdir, create=True)
-    ui.setconfig('keyword', fn, '', 'keyword')
-    svn = ui.configbool('keywordset', 'svn')
+    ui.setconfig(b'keyword', fn, b'', b'keyword')
+    svn = ui.configbool(b'keywordset', b'svn')
     # explicitly set keywordset for demo output
-    ui.setconfig('keywordset', 'svn', svn, 'keyword')
+    ui.setconfig(b'keywordset', b'svn', svn, b'keyword')
 
-    uikwmaps = ui.configitems('keywordmaps')
+    uikwmaps = ui.configitems(b'keywordmaps')
     if args or opts.get(r'rcfile'):
-        ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
+        ui.status(_(b'\n\tconfiguration using custom keyword template maps\n'))
         if uikwmaps:
-            ui.status(_('\textending current template maps\n'))
+            ui.status(_(b'\textending current template maps\n'))
         if opts.get(r'default') or not uikwmaps:
             if svn:
-                ui.status(_('\toverriding default svn keywordset\n'))
+                ui.status(_(b'\toverriding default svn keywordset\n'))
             else:
-                ui.status(_('\toverriding default cvs keywordset\n'))
+                ui.status(_(b'\toverriding default cvs keywordset\n'))
         if opts.get(r'rcfile'):
-            ui.readconfig(opts.get('rcfile'))
+            ui.readconfig(opts.get(b'rcfile'))
         if args:
             # simulate hgrc parsing
-            rcmaps = '[keywordmaps]\n%s\n' % '\n'.join(args)
-            repo.vfs.write('hgrc', rcmaps)
-            ui.readconfig(repo.vfs.join('hgrc'))
-        kwmaps = dict(ui.configitems('keywordmaps'))
+            rcmaps = b'[keywordmaps]\n%s\n' % b'\n'.join(args)
+            repo.vfs.write(b'hgrc', rcmaps)
+            ui.readconfig(repo.vfs.join(b'hgrc'))
+        kwmaps = dict(ui.configitems(b'keywordmaps'))
     elif opts.get(r'default'):
         if svn:
-            ui.status(_('\n\tconfiguration using default svn keywordset\n'))
+            ui.status(_(b'\n\tconfiguration using default svn keywordset\n'))
         else:
-            ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
+            ui.status(_(b'\n\tconfiguration using default cvs keywordset\n'))
         kwmaps = _defaultkwmaps(ui)
         if uikwmaps:
-            ui.status(_('\tdisabling current template maps\n'))
-            for k, v in kwmaps.iteritems():
-                ui.setconfig('keywordmaps', k, v, 'keyword')
+            ui.status(_(b'\tdisabling current template maps\n'))
+            for k, v in pycompat.iteritems(kwmaps):
+                ui.setconfig(b'keywordmaps', k, v, b'keyword')
     else:
-        ui.status(_('\n\tconfiguration using current keyword template maps\n'))
+        ui.status(_(b'\n\tconfiguration using current keyword template maps\n'))
         if uikwmaps:
             kwmaps = dict(uikwmaps)
         else:
@@ -484,31 +518,34 @@
 
     uisetup(ui)
     reposetup(ui, repo)
-    ui.write(('[extensions]\nkeyword =\n'))
-    demoitems('keyword', ui.configitems('keyword'))
-    demoitems('keywordset', ui.configitems('keywordset'))
-    demoitems('keywordmaps', kwmaps.iteritems())
-    keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
+    ui.writenoi18n(b'[extensions]\nkeyword =\n')
+    demoitems(b'keyword', ui.configitems(b'keyword'))
+    demoitems(b'keywordset', ui.configitems(b'keywordset'))
+    demoitems(b'keywordmaps', pycompat.iteritems(kwmaps))
+    keywords = b'$' + b'$\n$'.join(sorted(kwmaps.keys())) + b'$\n'
     repo.wvfs.write(fn, keywords)
     repo[None].add([fn])
-    ui.note(_('\nkeywords written to %s:\n') % fn)
+    ui.note(_(b'\nkeywords written to %s:\n') % fn)
     ui.note(keywords)
     with repo.wlock():
-        repo.dirstate.setbranch('demobranch')
-    for name, cmd in ui.configitems('hooks'):
-        if name.split('.', 1)[0].find('commit') > -1:
-            repo.ui.setconfig('hooks', name, '', 'keyword')
-    msg = _('hg keyword configuration and expansion example')
-    ui.note(("hg ci -m '%s'\n" % msg))
+        repo.dirstate.setbranch(b'demobranch')
+    for name, cmd in ui.configitems(b'hooks'):
+        if name.split(b'.', 1)[0].find(b'commit') > -1:
+            repo.ui.setconfig(b'hooks', name, b'', b'keyword')
+    msg = _(b'hg keyword configuration and expansion example')
+    ui.note((b"hg ci -m '%s'\n" % msg))
     repo.commit(text=msg)
-    ui.status(_('\n\tkeywords expanded\n'))
+    ui.status(_(b'\n\tkeywords expanded\n'))
     ui.write(repo.wread(fn))
     repo.wvfs.rmtree(repo.root)
 
-@command('kwexpand',
+
+@command(
+    b'kwexpand',
     cmdutil.walkopts,
-    _('hg kwexpand [OPTION]... [FILE]...'),
-    inferrepo=True)
+    _(b'hg kwexpand [OPTION]... [FILE]...'),
+    inferrepo=True,
+)
 def expand(ui, repo, *pats, **opts):
     '''expand keywords in the working directory
 
@@ -519,13 +556,18 @@
     # 3rd argument sets expansion to True
     _kwfwrite(ui, repo, True, *pats, **opts)
 
-@command('kwfiles',
-         [('A', 'all', None, _('show keyword status flags of all files')),
-          ('i', 'ignore', None, _('show files excluded from expansion')),
-          ('u', 'unknown', None, _('only show unknown (not tracked) files')),
-         ] + cmdutil.walkopts,
-         _('hg kwfiles [OPTION]... [FILE]...'),
-         inferrepo=True)
+
+@command(
+    b'kwfiles',
+    [
+        (b'A', b'all', None, _(b'show keyword status flags of all files')),
+        (b'i', b'ignore', None, _(b'show files excluded from expansion')),
+        (b'u', b'unknown', None, _(b'only show unknown (not tracked) files')),
+    ]
+    + cmdutil.walkopts,
+    _(b'hg kwfiles [OPTION]... [FILE]...'),
+    inferrepo=True,
+)
 def files(ui, repo, *pats, **opts):
     '''show files configured for keyword expansion
 
@@ -553,39 +595,44 @@
     if pats:
         cwd = repo.getcwd()
     else:
-        cwd = ''
+        cwd = b''
     files = []
     opts = pycompat.byteskwargs(opts)
-    if not opts.get('unknown') or opts.get('all'):
+    if not opts.get(b'unknown') or opts.get(b'all'):
         files = sorted(status.modified + status.added + status.clean)
     kwfiles = kwt.iskwfile(files, wctx)
     kwdeleted = kwt.iskwfile(status.deleted, wctx)
     kwunknown = kwt.iskwfile(status.unknown, wctx)
-    if not opts.get('ignore') or opts.get('all'):
+    if not opts.get(b'ignore') or opts.get(b'all'):
         showfiles = kwfiles, kwdeleted, kwunknown
     else:
         showfiles = [], [], []
-    if opts.get('all') or opts.get('ignore'):
-        showfiles += ([f for f in files if f not in kwfiles],
-                      [f for f in status.unknown if f not in kwunknown])
-    kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
-    kwstates = zip(kwlabels, pycompat.bytestr('K!kIi'), showfiles)
-    fm = ui.formatter('kwfiles', opts)
-    fmt = '%.0s%s\n'
-    if opts.get('all') or ui.verbose:
-        fmt = '%s %s\n'
+    if opts.get(b'all') or opts.get(b'ignore'):
+        showfiles += (
+            [f for f in files if f not in kwfiles],
+            [f for f in status.unknown if f not in kwunknown],
+        )
+    kwlabels = b'enabled deleted enabledunknown ignored ignoredunknown'.split()
+    kwstates = zip(kwlabels, pycompat.bytestr(b'K!kIi'), showfiles)
+    fm = ui.formatter(b'kwfiles', opts)
+    fmt = b'%.0s%s\n'
+    if opts.get(b'all') or ui.verbose:
+        fmt = b'%s %s\n'
     for kwstate, char, filenames in kwstates:
-        label = 'kwfiles.' + kwstate
+        label = b'kwfiles.' + kwstate
         for f in filenames:
             fm.startitem()
             fm.data(kwstatus=char, path=f)
             fm.plain(fmt % (char, repo.pathto(f, cwd)), label=label)
     fm.end()
 
-@command('kwshrink',
+
+@command(
+    b'kwshrink',
     cmdutil.walkopts,
-    _('hg kwshrink [OPTION]... [FILE]...'),
-    inferrepo=True)
+    _(b'hg kwshrink [OPTION]... [FILE]...'),
+    inferrepo=True,
+)
 def shrink(ui, repo, *pats, **opts):
     '''revert expanded keywords in the working directory
 
@@ -596,8 +643,10 @@
     # 3rd argument sets expansion to False
     _kwfwrite(ui, repo, False, *pats, **opts)
 
+
 # monkeypatches
 
+
 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
     '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
     rejects or conflicts due to expanded keywords in working dir.'''
@@ -607,6 +656,7 @@
         # shrink keywords read from working dir
         self.lines = kwt.shrinklines(self.fname, self.lines)
 
+
 def kwdiff(orig, repo, *args, **kwargs):
     '''Monkeypatch patch.diff to avoid expansion.'''
     kwt = getattr(repo, '_keywordkwt', None)
@@ -620,6 +670,7 @@
         if kwt:
             kwt.restrict = restrict
 
+
 def kwweb_skip(orig, web):
     '''Wraps webcommands.x turning off keyword expansion.'''
     kwt = getattr(web.repo, '_keywordkwt', None)
@@ -633,6 +684,7 @@
         if kwt:
             kwt.match = origmatch
 
+
 def kw_amend(orig, ui, repo, old, extra, pats, opts):
     '''Wraps cmdutil.amend expanding keywords after amend.'''
     kwt = getattr(repo, '_keywordkwt', None)
@@ -648,6 +700,7 @@
             kwt.restrict = False
         return newid
 
+
 def kw_copy(orig, ui, repo, pats, opts, rename=False):
     '''Wraps cmdutil.copy so that copy/rename destinations do not
     contain expanded keywords.
@@ -663,7 +716,7 @@
         return orig(ui, repo, pats, opts, rename)
     with repo.wlock():
         orig(ui, repo, pats, opts, rename)
-        if opts.get('dry_run'):
+        if opts.get(b'dry_run'):
             return
         wctx = repo[None]
         cwd = repo.getcwd()
@@ -673,15 +726,20 @@
             expansion or a symlink which points to a file configured for
             expansion. '''
             source = repo.dirstate.copied(dest)
-            if 'l' in wctx.flags(source):
-                source = pathutil.canonpath(repo.root, cwd,
-                                           os.path.realpath(source))
+            if b'l' in wctx.flags(source):
+                source = pathutil.canonpath(
+                    repo.root, cwd, os.path.realpath(source)
+                )
             return kwt.match(source)
 
-        candidates = [f for f in repo.dirstate.copies() if
-                      'l' not in wctx.flags(f) and haskwsource(f)]
+        candidates = [
+            f
+            for f in repo.dirstate.copies()
+            if b'l' not in wctx.flags(f) and haskwsource(f)
+        ]
         kwt.overwrite(wctx, candidates, False, False)
 
+
 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
     '''Wraps record.dorecord expanding keywords after recording.'''
     kwt = getattr(repo, '_keywordkwt', None)
@@ -691,10 +749,10 @@
         # record returns 0 even when nothing has changed
         # therefore compare nodes before and after
         kwt.postcommit = True
-        ctx = repo['.']
+        ctx = repo[b'.']
         wstatus = ctx.status()
         ret = orig(ui, repo, commitfunc, *pats, **opts)
-        recctx = repo['.']
+        recctx = repo[b'.']
         if ctx != recctx:
             modified, added = _preselect(wstatus, recctx.files())
             kwt.restrict = False
@@ -703,6 +761,7 @@
             kwt.restrict = True
         return ret
 
+
 def kwfilectx_cmp(orig, self, fctx):
     if fctx._customcmp:
         return fctx.cmp(self)
@@ -711,14 +770,20 @@
         return orig(self, fctx)
     # keyword affects data size, comparing wdir and filelog size does
     # not make sense
-    if (fctx._filenode is None and
-        (self._repo._encodefilterpats or
-         kwt.match(fctx.path()) and 'l' not in fctx.flags() or
-         self.size() - 4 == fctx.size()) or
-        self.size() == fctx.size()):
+    if (
+        fctx._filenode is None
+        and (
+            self._repo._encodefilterpats
+            or kwt.match(fctx.path())
+            and b'l' not in fctx.flags()
+            or self.size() - 4 == fctx.size()
+        )
+        or self.size() == fctx.size()
+    ):
         return self._filelog.cmp(self._filenode, fctx.data())
     return True
 
+
 def uisetup(ui):
     ''' Monkeypatches dispatch._parse to retrieve user command.
     Overrides file method to return kwfilelog instead of filelog
@@ -730,34 +795,38 @@
     def kwdispatch_parse(orig, ui, args):
         '''Monkeypatch dispatch._parse to obtain running hg command.'''
         cmd, func, args, options, cmdoptions = orig(ui, args)
-        kwtools['hgcmd'] = cmd
+        kwtools[b'hgcmd'] = cmd
         return cmd, func, args, options, cmdoptions
 
-    extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
+    extensions.wrapfunction(dispatch, b'_parse', kwdispatch_parse)
 
-    extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
-    extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
-    extensions.wrapfunction(patch, 'diff', kwdiff)
-    extensions.wrapfunction(cmdutil, 'amend', kw_amend)
-    extensions.wrapfunction(cmdutil, 'copy', kw_copy)
-    extensions.wrapfunction(cmdutil, 'dorecord', kw_dorecord)
+    extensions.wrapfunction(context.filectx, b'cmp', kwfilectx_cmp)
+    extensions.wrapfunction(patch.patchfile, b'__init__', kwpatchfile_init)
+    extensions.wrapfunction(patch, b'diff', kwdiff)
+    extensions.wrapfunction(cmdutil, b'amend', kw_amend)
+    extensions.wrapfunction(cmdutil, b'copy', kw_copy)
+    extensions.wrapfunction(cmdutil, b'dorecord', kw_dorecord)
     for c in nokwwebcommands.split():
         extensions.wrapfunction(webcommands, c, kwweb_skip)
 
+
 def reposetup(ui, repo):
     '''Sets up repo as kwrepo for keyword substitution.'''
 
     try:
-        if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
-            or '.hg' in util.splitpath(repo.root)
-            or repo._url.startswith('bundle:')):
+        if (
+            not repo.local()
+            or kwtools[b'hgcmd'] in nokwcommands.split()
+            or b'.hg' in util.splitpath(repo.root)
+            or repo._url.startswith(b'bundle:')
+        ):
             return
     except AttributeError:
         pass
 
-    inc, exc = [], ['.hg*']
-    for pat, opt in ui.configitems('keyword'):
-        if opt != 'ignore':
+    inc, exc = [], [b'.hg*']
+    for pat, opt in ui.configitems(b'keyword'):
+        if opt != b'ignore':
             inc.append(pat)
         else:
             exc.append(pat)
@@ -768,7 +837,7 @@
 
     class kwrepo(repo.__class__):
         def file(self, f):
-            if f[0] == '/':
+            if f[0] == b'/':
                 f = f[1:]
             return kwfilelog(self.svfs, kwt, f)
 
@@ -791,8 +860,9 @@
             if not kwt.postcommit:
                 restrict = kwt.restrict
                 kwt.restrict = True
-                kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
-                              False, True)
+                kwt.overwrite(
+                    self[n], sorted(ctx.added() + ctx.modified()), False, True
+                )
                 kwt.restrict = restrict
             return n
 
@@ -801,10 +871,10 @@
                 origrestrict = kwt.restrict
                 try:
                     if not dryrun:
-                        changed = self['.'].files()
+                        changed = self[b'.'].files()
                     ret = super(kwrepo, self).rollback(dryrun, force)
                     if not dryrun:
-                        ctx = self['.']
+                        ctx = self[b'.']
                         modified, added = _preselect(ctx.status(), changed)
                         kwt.restrict = False
                         kwt.overwrite(ctx, modified, True, True)
--- a/hgext/largefiles/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/largefiles/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -128,21 +128,21 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 eh = exthelper.exthelper()
 eh.merge(lfcommands.eh)
 eh.merge(overrides.eh)
 eh.merge(proto.eh)
 
-eh.configitem('largefiles', 'minsize',
-    default=eh.configitem.dynamicdefault,
+eh.configitem(
+    b'largefiles', b'minsize', default=eh.configitem.dynamicdefault,
 )
-eh.configitem('largefiles', 'patterns',
-    default=list,
+eh.configitem(
+    b'largefiles', b'patterns', default=list,
 )
-eh.configitem('largefiles', 'usercache',
-    default=None,
+eh.configitem(
+    b'largefiles', b'usercache', default=None,
 )
 
 cmdtable = eh.cmdtable
@@ -151,30 +151,37 @@
 reposetup = reposetup.reposetup
 uisetup = eh.finaluisetup
 
+
 def featuresetup(ui, supported):
     # don't die on seeing a repo with the largefiles requirement
-    supported |= {'largefiles'}
+    supported |= {b'largefiles'}
+
 
 @eh.uisetup
 def _uisetup(ui):
     localrepo.featuresetupfuncs.add(featuresetup)
     hg.wirepeersetupfuncs.append(proto.wirereposetup)
 
-    cmdutil.outgoinghooks.add('largefiles', overrides.outgoinghook)
-    cmdutil.summaryremotehooks.add('largefiles', overrides.summaryremotehook)
+    cmdutil.outgoinghooks.add(b'largefiles', overrides.outgoinghook)
+    cmdutil.summaryremotehooks.add(b'largefiles', overrides.summaryremotehook)
 
     # create the new wireproto commands ...
-    wireprotov1server.wireprotocommand('putlfile', 'sha', permission='push')(
-        proto.putlfile)
-    wireprotov1server.wireprotocommand('getlfile', 'sha', permission='pull')(
-        proto.getlfile)
-    wireprotov1server.wireprotocommand('statlfile', 'sha', permission='pull')(
-        proto.statlfile)
-    wireprotov1server.wireprotocommand('lheads', '', permission='pull')(
-        wireprotov1server.heads)
+    wireprotov1server.wireprotocommand(b'putlfile', b'sha', permission=b'push')(
+        proto.putlfile
+    )
+    wireprotov1server.wireprotocommand(b'getlfile', b'sha', permission=b'pull')(
+        proto.getlfile
+    )
+    wireprotov1server.wireprotocommand(
+        b'statlfile', b'sha', permission=b'pull'
+    )(proto.statlfile)
+    wireprotov1server.wireprotocommand(b'lheads', b'', permission=b'pull')(
+        wireprotov1server.heads
+    )
 
-    extensions.wrapfunction(wireprotov1server.commands['heads'], 'func',
-                            proto.heads)
+    extensions.wrapfunction(
+        wireprotov1server.commands[b'heads'], b'func', proto.heads
+    )
     # TODO also wrap wireproto.commandsv2 once heads is implemented there.
 
     # can't do this in reposetup because it needs to have happened before
@@ -186,9 +193,9 @@
 
     # override some extensions' stuff as well
     for name, module in extensions.extensions():
-        if name == 'rebase':
+        if name == b'rebase':
             # TODO: teach exthelper to handle this
-            extensions.wrapfunction(module, 'rebase',
-                                    overrides.overriderebase)
+            extensions.wrapfunction(module, b'rebase', overrides.overriderebase)
+
 
 revsetpredicate = eh.revsetpredicate
--- a/hgext/largefiles/basestore.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/largefiles/basestore.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,9 +15,11 @@
 
 from . import lfutil
 
+
 class StoreError(Exception):
     '''Raised when there is a problem getting files from or putting
     files to a central store.'''
+
     def __init__(self, filename, hash, url, detail):
         self.filename = filename
         self.hash = hash
@@ -25,12 +27,16 @@
         self.detail = detail
 
     def longmessage(self):
-        return (_("error getting id %s from url %s for file %s: %s\n") %
-                 (self.hash, util.hidepassword(self.url), self.filename,
-                  self.detail))
+        return _(b"error getting id %s from url %s for file %s: %s\n") % (
+            self.hash,
+            util.hidepassword(self.url),
+            self.filename,
+            self.detail,
+        )
 
     def __str__(self):
-        return "%s: %s" % (util.hidepassword(self.url), self.detail)
+        return b"%s: %s" % (util.hidepassword(self.url), self.detail)
+
 
 class basestore(object):
     def __init__(self, ui, repo, url):
@@ -40,12 +46,12 @@
 
     def put(self, source, hash):
         '''Put source file into the store so it can be retrieved by hash.'''
-        raise NotImplementedError('abstract method')
+        raise NotImplementedError(b'abstract method')
 
     def exists(self, hashes):
         '''Check to see if the store contains the given hashes. Given an
         iterable of hashes it returns a mapping from hash to bool.'''
-        raise NotImplementedError('abstract method')
+        raise NotImplementedError(b'abstract method')
 
     def get(self, files):
         '''Get the specified largefiles from the store and write to local
@@ -62,16 +68,19 @@
 
         at = 0
         available = self.exists(set(hash for (_filename, hash) in files))
-        with ui.makeprogress(_('getting largefiles'), unit=_('files'),
-                             total=len(files)) as progress:
+        with ui.makeprogress(
+            _(b'getting largefiles'), unit=_(b'files'), total=len(files)
+        ) as progress:
             for filename, hash in files:
                 progress.update(at)
                 at += 1
-                ui.note(_('getting %s:%s\n') % (filename, hash))
+                ui.note(_(b'getting %s:%s\n') % (filename, hash))
 
                 if not available.get(hash):
-                    ui.warn(_('%s: largefile %s not available from %s\n')
-                            % (filename, hash, util.hidepassword(self.url)))
+                    ui.warn(
+                        _(b'%s: largefile %s not available from %s\n')
+                        % (filename, hash, util.hidepassword(self.url))
+                    )
                     missing.append(filename)
                     continue
 
@@ -87,22 +96,25 @@
         store and in the usercache.
         filename is for informational messages only.
         """
-        util.makedirs(lfutil.storepath(self.repo, ''))
+        util.makedirs(lfutil.storepath(self.repo, b''))
         storefilename = lfutil.storepath(self.repo, hash)
 
-        tmpname = storefilename + '.tmp'
-        with util.atomictempfile(tmpname,
-                createmode=self.repo.store.createmode) as tmpfile:
+        tmpname = storefilename + b'.tmp'
+        with util.atomictempfile(
+            tmpname, createmode=self.repo.store.createmode
+        ) as tmpfile:
             try:
                 gothash = self._getfile(tmpfile, filename, hash)
             except StoreError as err:
                 self.ui.warn(err.longmessage())
-                gothash = ""
+                gothash = b""
 
         if gothash != hash:
-            if gothash != "":
-                self.ui.warn(_('%s: data corruption (expected %s, got %s)\n')
-                             % (filename, hash, gothash))
+            if gothash != b"":
+                self.ui.warn(
+                    _(b'%s: data corruption (expected %s, got %s)\n')
+                    % (filename, hash, gothash)
+                )
             util.unlink(tmpname)
             return False
 
@@ -115,13 +127,14 @@
         file revision referenced by every changeset in revs.
         Return 0 if all is well, non-zero on any errors.'''
 
-        self.ui.status(_('searching %d changesets for largefiles\n') %
-                       len(revs))
-        verified = set()                # set of (filename, filenode) tuples
-        filestocheck = []               # list of (cset, filename, expectedhash)
+        self.ui.status(
+            _(b'searching %d changesets for largefiles\n') % len(revs)
+        )
+        verified = set()  # set of (filename, filenode) tuples
+        filestocheck = []  # list of (cset, filename, expectedhash)
         for rev in revs:
             cctx = self.repo[rev]
-            cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
+            cset = b"%d:%s" % (cctx.rev(), node.short(cctx.node()))
 
             for standin in cctx:
                 filename = lfutil.splitstandin(standin)
@@ -139,12 +152,14 @@
         numlfiles = len({fname for (fname, fnode) in verified})
         if contents:
             self.ui.status(
-                _('verified contents of %d revisions of %d largefiles\n')
-                % (numrevs, numlfiles))
+                _(b'verified contents of %d revisions of %d largefiles\n')
+                % (numrevs, numlfiles)
+            )
         else:
             self.ui.status(
-                _('verified existence of %d revisions of %d largefiles\n')
-                % (numrevs, numlfiles))
+                _(b'verified existence of %d revisions of %d largefiles\n')
+                % (numrevs, numlfiles)
+            )
         return int(failed)
 
     def _getfile(self, tmpfile, filename, hash):
@@ -153,7 +168,7 @@
         downloads and return the hash.  Close tmpfile.  Raise
         StoreError if unable to download the file (e.g. it does not
         exist in the store).'''
-        raise NotImplementedError('abstract method')
+        raise NotImplementedError(b'abstract method')
 
     def _verifyfiles(self, contents, filestocheck):
         '''Perform the actual verification of files in the store.
@@ -161,4 +176,4 @@
         'filestocheck' is list of files to check.
         Returns _true_ if any problems are found!
         '''
-        raise NotImplementedError('abstract method')
+        raise NotImplementedError(b'abstract method')
--- a/hgext/largefiles/lfcommands.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/largefiles/lfcommands.py	Mon Oct 21 11:09:48 2019 -0400
@@ -35,10 +35,7 @@
     filemap,
 )
 
-from . import (
-    lfutil,
-    storefactory
-)
+from . import lfutil, storefactory
 
 release = lock.release
 
@@ -46,15 +43,28 @@
 
 eh = exthelper.exthelper()
 
-@eh.command('lfconvert',
-    [('s', 'size', '',
-      _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
-    ('', 'to-normal', False,
-     _('convert from a largefiles repo to a normal repo')),
+
+@eh.command(
+    b'lfconvert',
+    [
+        (
+            b's',
+            b'size',
+            b'',
+            _(b'minimum size (MB) for files to be converted as largefiles'),
+            b'SIZE',
+        ),
+        (
+            b'',
+            b'to-normal',
+            False,
+            _(b'convert from a largefiles repo to a normal repo'),
+        ),
     ],
-    _('hg lfconvert SOURCE DEST [FILE ...]'),
+    _(b'hg lfconvert SOURCE DEST [FILE ...]'),
     norepo=True,
-    inferrepo=True)
+    inferrepo=True,
+)
 def lfconvert(ui, src, dest, *pats, **opts):
     '''convert a normal repository to a largefiles repository
 
@@ -75,19 +85,19 @@
     this, the DEST repository can be used without largefiles at all.'''
 
     opts = pycompat.byteskwargs(opts)
-    if opts['to_normal']:
+    if opts[b'to_normal']:
         tolfile = False
     else:
         tolfile = True
-        size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
+        size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
 
     if not hg.islocal(src):
-        raise error.Abort(_('%s is not a local Mercurial repo') % src)
+        raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
     if not hg.islocal(dest):
-        raise error.Abort(_('%s is not a local Mercurial repo') % dest)
+        raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
 
     rsrc = hg.repository(ui, src)
-    ui.status(_('initializing destination %s\n') % dest)
+    ui.status(_(b'initializing destination %s\n') % dest)
     rdst = hg.repository(ui, dest, create=True)
 
     success = False
@@ -97,8 +107,10 @@
         # is to simply walk the changelog, using changelog.nodesbetween().
         # Take a look at mercurial/revlog.py:639 for more details.
         # Use a generator instead of a list to decrease memory usage
-        ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
-            rsrc.heads())[0])
+        ctxs = (
+            rsrc[ctx]
+            for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
+        )
         revmap = {node.nullid: node.nullid}
         if tolfile:
             # Lock destination to prevent modification while it is converted to.
@@ -110,20 +122,31 @@
             lfiles = set()
             normalfiles = set()
             if not pats:
-                pats = ui.configlist(lfutil.longname, 'patterns')
+                pats = ui.configlist(lfutil.longname, b'patterns')
             if pats:
-                matcher = matchmod.match(rsrc.root, '', list(pats))
+                matcher = matchmod.match(rsrc.root, b'', list(pats))
             else:
                 matcher = None
 
             lfiletohash = {}
-            with ui.makeprogress(_('converting revisions'),
-                                 unit=_('revisions'),
-                                 total=rsrc['tip'].rev()) as progress:
+            with ui.makeprogress(
+                _(b'converting revisions'),
+                unit=_(b'revisions'),
+                total=rsrc[b'tip'].rev(),
+            ) as progress:
                 for ctx in ctxs:
                     progress.update(ctx.rev())
-                    _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
-                        lfiles, normalfiles, matcher, size, lfiletohash)
+                    _lfconvert_addchangeset(
+                        rsrc,
+                        rdst,
+                        ctx,
+                        revmap,
+                        lfiles,
+                        normalfiles,
+                        matcher,
+                        size,
+                        lfiletohash,
+                    )
 
             if rdst.wvfs.exists(lfutil.shortname):
                 rdst.wvfs.rmtree(lfutil.shortname)
@@ -139,20 +162,23 @@
             # If there were any files converted to largefiles, add largefiles
             # to the destination repository's requirements.
             if lfiles:
-                rdst.requirements.add('largefiles')
+                rdst.requirements.add(b'largefiles')
                 rdst._writerequirements()
         else:
+
             class lfsource(filemap.filemap_source):
                 def __init__(self, ui, source):
                     super(lfsource, self).__init__(ui, source, None)
-                    self.filemapper.rename[lfutil.shortname] = '.'
+                    self.filemapper.rename[lfutil.shortname] = b'.'
 
                 def getfile(self, name, rev):
                     realname, realrev = rev
                     f = super(lfsource, self).getfile(name, rev)
 
-                    if (not realname.startswith(lfutil.shortnameslash)
-                            or f[0] is None):
+                    if (
+                        not realname.startswith(lfutil.shortnameslash)
+                        or f[0] is None
+                    ):
                         return f
 
                     # Substitute in the largefile data for the hash
@@ -160,26 +186,31 @@
                     path = lfutil.findfile(rsrc, hash)
 
                     if path is None:
-                        raise error.Abort(_("missing largefile for '%s' in %s")
-                                          % (realname, realrev))
+                        raise error.Abort(
+                            _(b"missing largefile for '%s' in %s")
+                            % (realname, realrev)
+                        )
                     return util.readfile(path), f[1]
 
             class converter(convcmd.converter):
                 def __init__(self, ui, source, dest, revmapfile, opts):
                     src = lfsource(ui, source)
 
-                    super(converter, self).__init__(ui, src, dest, revmapfile,
-                                                    opts)
+                    super(converter, self).__init__(
+                        ui, src, dest, revmapfile, opts
+                    )
 
             found, missing = downloadlfiles(ui, rsrc)
             if missing != 0:
-                raise error.Abort(_("all largefiles must be present locally"))
+                raise error.Abort(_(b"all largefiles must be present locally"))
 
             orig = convcmd.converter
             convcmd.converter = converter
 
             try:
-                convcmd.convert(ui, src, dest, source_type='hg', dest_type='hg')
+                convcmd.convert(
+                    ui, src, dest, source_type=b'hg', dest_type=b'hg'
+                )
             finally:
                 convcmd.converter = orig
         success = True
@@ -191,8 +222,10 @@
             # we failed, remove the new directory
             shutil.rmtree(rdst.root)
 
-def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
-        matcher, size, lfiletohash):
+
+def _lfconvert_addchangeset(
+    rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
+):
     # Convert src parents to dst parents
     parents = _convertparents(ctx, revmap)
 
@@ -214,11 +247,12 @@
                     renamed = False
                 renamedlfile = renamed and renamed in lfiles
                 islfile |= renamedlfile
-                if 'l' in fctx.flags():
+                if b'l' in fctx.flags():
                     if renamedlfile:
                         raise error.Abort(
-                            _('renamed/copied largefile %s becomes symlink')
-                            % f)
+                            _(b'renamed/copied largefile %s becomes symlink')
+                            % f
+                        )
                     islfile = False
             if islfile:
                 lfiles.add(f)
@@ -231,20 +265,21 @@
             # largefile in manifest if it has not been removed/renamed
             if f in ctx.manifest():
                 fctx = ctx.filectx(f)
-                if 'l' in fctx.flags():
+                if b'l' in fctx.flags():
                     renamed = fctx.copysource()
                     if renamed and renamed in lfiles:
-                        raise error.Abort(_('largefile %s becomes symlink') % f)
+                        raise error.Abort(
+                            _(b'largefile %s becomes symlink') % f
+                        )
 
                 # largefile was modified, update standins
-                m = hashlib.sha1('')
+                m = hashlib.sha1(b'')
                 m.update(ctx[f].data())
                 hash = node.hex(m.digest())
                 if f not in lfiletohash or lfiletohash[f] != hash:
                     rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
-                    executable = 'x' in ctx[f].flags()
-                    lfutil.writestandin(rdst, fstandin, hash,
-                        executable)
+                    executable = b'x' in ctx[f].flags()
+                    lfutil.writestandin(rdst, fstandin, hash, executable)
                     lfiletohash[f] = hash
         else:
             # normal file
@@ -265,24 +300,39 @@
                 # doesn't change after rename or copy
                 renamed = lfutil.standin(renamed)
 
-            return context.memfilectx(repo, memctx, f,
-                                      lfiletohash[srcfname] + '\n',
-                                      'l' in fctx.flags(), 'x' in fctx.flags(),
-                                      renamed)
+            return context.memfilectx(
+                repo,
+                memctx,
+                f,
+                lfiletohash[srcfname] + b'\n',
+                b'l' in fctx.flags(),
+                b'x' in fctx.flags(),
+                renamed,
+            )
         else:
             return _getnormalcontext(repo, ctx, f, revmap)
 
     # Commit
     _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
 
+
 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
-    mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
-                          getfilectx, ctx.user(), ctx.date(), ctx.extra())
+    mctx = context.memctx(
+        rdst,
+        parents,
+        ctx.description(),
+        dstfiles,
+        getfilectx,
+        ctx.user(),
+        ctx.date(),
+        ctx.extra(),
+    )
     ret = rdst.commitctx(mctx)
     lfutil.copyalltostore(rdst, ret)
     rdst.setparents(ret)
     revmap[ctx.node()] = rdst.changelog.tip()
 
+
 # Generate list of changed files
 def _getchangedfiles(ctx, parents):
     files = set(ctx.files())
@@ -293,6 +343,7 @@
                 files.add(fn)
     return files
 
+
 # Convert src parents to dst parents
 def _convertparents(ctx, revmap):
     parents = []
@@ -302,6 +353,7 @@
         parents.append(node.nullid)
     return parents
 
+
 # Get memfilectx for a normal file
 def _getnormalcontext(repo, ctx, f, revmap):
     try:
@@ -311,40 +363,40 @@
     renamed = fctx.copysource()
 
     data = fctx.data()
-    if f == '.hgtags':
-        data = _converttags (repo.ui, revmap, data)
-    return context.memfilectx(repo, ctx, f, data, 'l' in fctx.flags(),
-                              'x' in fctx.flags(), renamed)
+    if f == b'.hgtags':
+        data = _converttags(repo.ui, revmap, data)
+    return context.memfilectx(
+        repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
+    )
+
 
 # Remap tag data using a revision map
 def _converttags(ui, revmap, data):
     newdata = []
     for line in data.splitlines():
         try:
-            id, name = line.split(' ', 1)
+            id, name = line.split(b' ', 1)
         except ValueError:
-            ui.warn(_('skipping incorrectly formatted tag %s\n')
-                % line)
+            ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
             continue
         try:
             newid = node.bin(id)
         except TypeError:
-            ui.warn(_('skipping incorrectly formatted id %s\n')
-                % id)
+            ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
             continue
         try:
-            newdata.append('%s %s\n' % (node.hex(revmap[newid]),
-                name))
+            newdata.append(b'%s %s\n' % (node.hex(revmap[newid]), name))
         except KeyError:
-            ui.warn(_('no mapping for id %s\n') % id)
+            ui.warn(_(b'no mapping for id %s\n') % id)
             continue
-    return ''.join(newdata)
+    return b''.join(newdata)
+
 
 def _islfile(file, ctx, matcher, size):
     '''Return true if file should be considered a largefile, i.e.
     matcher matches it or it is larger than size.'''
     # never store special .hg* files as largefiles
-    if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
+    if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
         return False
     if matcher and matcher(file):
         return True
@@ -353,6 +405,7 @@
     except error.LookupError:
         return False
 
+
 def uploadlfiles(ui, rsrc, rdst, files):
     '''upload largefiles to the central store'''
 
@@ -362,23 +415,30 @@
     store = storefactory.openstore(rsrc, rdst, put=True)
 
     at = 0
-    ui.debug("sending statlfile command for %d largefiles\n" % len(files))
+    ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
     retval = store.exists(files)
     files = [h for h in files if not retval[h]]
-    ui.debug("%d largefiles need to be uploaded\n" % len(files))
+    ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
 
-    with ui.makeprogress(_('uploading largefiles'), unit=_('files'),
-                         total=len(files)) as progress:
+    with ui.makeprogress(
+        _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
+    ) as progress:
         for hash in files:
             progress.update(at)
             source = lfutil.findfile(rsrc, hash)
             if not source:
-                raise error.Abort(_('largefile %s missing from store'
-                                   ' (needs to be uploaded)') % hash)
+                raise error.Abort(
+                    _(
+                        b'largefile %s missing from store'
+                        b' (needs to be uploaded)'
+                    )
+                    % hash
+                )
             # XXX check for errors here
             store.put(source, hash)
             at += 1
 
+
 def verifylfiles(ui, repo, all=False, contents=False):
     '''Verify that every largefile revision in the current changeset
     exists in the central store.  With --contents, also verify that
@@ -386,13 +446,14 @@
     matches the revision ID).  With --all, check every changeset in
     this repository.'''
     if all:
-        revs = repo.revs('all()')
+        revs = repo.revs(b'all()')
     else:
-        revs = ['.']
+        revs = [b'.']
 
     store = storefactory.openstore(repo)
     return store.verify(revs, contents=contents)
 
+
 def cachelfiles(ui, repo, node, filelist=None):
     '''cachelfiles ensures that all largefiles needed by the specified revision
     are present in the repository's largefile cache.
@@ -411,7 +472,7 @@
             expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
         except IOError as err:
             if err.errno == errno.ENOENT:
-                continue # node must be None and standin wasn't found in wctx
+                continue  # node must be None and standin wasn't found in wctx
             raise
         if not lfutil.findfile(repo, expectedhash):
             toget.append((lfile, expectedhash))
@@ -423,25 +484,29 @@
 
     return ([], [])
 
+
 def downloadlfiles(ui, repo, rev=None):
     match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
+
     def prepare(ctx, fns):
         pass
+
     totalsuccess = 0
     totalmissing = 0
-    if rev != []: # walkchangerevs on empty list would return all revs
-        for ctx in cmdutil.walkchangerevs(repo, match, {'rev' : rev},
-                                          prepare):
+    if rev != []:  # walkchangerevs on empty list would return all revs
+        for ctx in cmdutil.walkchangerevs(repo, match, {b'rev': rev}, prepare):
             success, missing = cachelfiles(ui, repo, ctx.node())
             totalsuccess += len(success)
             totalmissing += len(missing)
-    ui.status(_("%d additional largefiles cached\n") % totalsuccess)
+    ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
     if totalmissing > 0:
-        ui.status(_("%d largefiles failed to download\n") % totalmissing)
+        ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
     return totalsuccess, totalmissing
 
-def updatelfiles(ui, repo, filelist=None, printmessage=None,
-                 normallookup=False):
+
+def updatelfiles(
+    ui, repo, filelist=None, printmessage=None, normallookup=False
+):
     '''Update largefiles according to standins in the working directory
 
     If ``printmessage`` is other than ``None``, it means "print (or
@@ -463,22 +528,20 @@
         wctx = repo[None]
         for lfile in lfiles:
             lfileorig = os.path.relpath(
-                scmutil.backuppath(ui, repo, lfile),
-                start=repo.root)
+                scmutil.backuppath(ui, repo, lfile), start=repo.root
+            )
             standin = lfutil.standin(lfile)
             standinorig = os.path.relpath(
-                scmutil.backuppath(ui, repo, standin),
-                start=repo.root)
+                scmutil.backuppath(ui, repo, standin), start=repo.root
+            )
             if wvfs.exists(standin):
-                if (wvfs.exists(standinorig) and
-                    wvfs.exists(lfile)):
-                    shutil.copyfile(wvfs.join(lfile),
-                                    wvfs.join(lfileorig))
+                if wvfs.exists(standinorig) and wvfs.exists(lfile):
+                    shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
                     wvfs.unlinkpath(standinorig)
                 expecthash = lfutil.readasstandin(wctx[standin])
-                if expecthash != '':
-                    if lfile not in wctx: # not switched to normal file
-                        if repo.dirstate[standin] != '?':
+                if expecthash != b'':
+                    if lfile not in wctx:  # not switched to normal file
+                        if repo.dirstate[standin] != b'?':
                             wvfs.unlinkpath(lfile, ignoremissing=True)
                         else:
                             dropped.add(lfile)
@@ -493,8 +556,10 @@
                 # lfile is added to the repository again. This happens when a
                 # largefile is converted back to a normal file: the standin
                 # disappears, but a new (normal) file appears as the lfile.
-                if (wvfs.exists(lfile) and
-                    repo.dirstate.normalize(lfile) not in wctx):
+                if (
+                    wvfs.exists(lfile)
+                    and repo.dirstate.normalize(lfile) not in wctx
+                ):
                     wvfs.unlinkpath(lfile)
                     removed += 1
 
@@ -511,7 +576,7 @@
                 # the M state.
                 lfutil.synclfdirstate(repo, lfdirstate, f, normallookup)
 
-            statuswriter(_('getting changed largefiles\n'))
+            statuswriter(_(b'getting changed largefiles\n'))
             cachelfiles(ui, repo, None, lfiles)
 
         for lfile in lfiles:
@@ -549,14 +614,18 @@
 
         lfdirstate.write()
         if lfiles:
-            statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
-                removed))
+            statuswriter(
+                _(b'%d largefiles updated, %d removed\n') % (updated, removed)
+            )
+
 
-@eh.command('lfpull',
-    [('r', 'rev', [], _('pull largefiles for these revisions'))
-    ] + cmdutil.remoteopts,
-    _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
-def lfpull(ui, repo, source="default", **opts):
+@eh.command(
+    b'lfpull',
+    [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
+    + cmdutil.remoteopts,
+    _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
+)
+def lfpull(ui, repo, source=b"default", **opts):
     """pull largefiles for the specified revisions from the specified source
 
     Pull largefiles that are referenced from local changesets but missing
@@ -581,21 +650,20 @@
 
     revs = opts.get(r'rev', [])
     if not revs:
-        raise error.Abort(_('no revisions specified'))
+        raise error.Abort(_(b'no revisions specified'))
     revs = scmutil.revrange(repo, revs)
 
     numcached = 0
     for rev in revs:
-        ui.note(_('pulling largefiles for revision %d\n') % rev)
+        ui.note(_(b'pulling largefiles for revision %d\n') % rev)
         (cached, missing) = cachelfiles(ui, repo, rev)
         numcached += len(cached)
-    ui.status(_("%d largefiles cached\n") % numcached)
+    ui.status(_(b"%d largefiles cached\n") % numcached)
 
-@eh.command('debuglfput',
-    [] + cmdutil.remoteopts,
-    _('FILE'))
+
+@eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
 def debuglfput(ui, repo, filepath, **kwargs):
     hash = lfutil.hashfile(filepath)
     storefactory.openstore(repo).put(filepath, hash)
-    ui.write('%s\n' % hash)
+    ui.write(b'%s\n' % hash)
     return 0
--- a/hgext/largefiles/lfutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/largefiles/lfutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,6 +16,7 @@
 
 from mercurial.i18n import _
 from mercurial.node import hex
+from mercurial.pycompat import open
 
 from mercurial import (
     dirstate,
@@ -31,26 +32,29 @@
     vfs as vfsmod,
 )
 
-shortname = '.hglf'
-shortnameslash = shortname + '/'
-longname = 'largefiles'
+shortname = b'.hglf'
+shortnameslash = shortname + b'/'
+longname = b'largefiles'
 
 # -- Private worker functions ------------------------------------------
 
+
 def getminsize(ui, assumelfiles, opt, default=10):
     lfsize = opt
     if not lfsize and assumelfiles:
-        lfsize = ui.config(longname, 'minsize', default=default)
+        lfsize = ui.config(longname, b'minsize', default=default)
     if lfsize:
         try:
             lfsize = float(lfsize)
         except ValueError:
-            raise error.Abort(_('largefiles: size must be number (not %s)\n')
-                             % lfsize)
+            raise error.Abort(
+                _(b'largefiles: size must be number (not %s)\n') % lfsize
+            )
     if lfsize is None:
-        raise error.Abort(_('minimum size for largefiles must be specified'))
+        raise error.Abort(_(b'minimum size for largefiles must be specified'))
     return lfsize
 
+
 def link(src, dest):
     """Try to create hardlink - if that fails, efficiently make a copy."""
     util.makedirs(os.path.dirname(dest))
@@ -58,11 +62,12 @@
         util.oslink(src, dest)
     except OSError:
         # if hardlinks fail, fallback on atomic copy
-        with open(src, 'rb') as srcf, util.atomictempfile(dest) as dstf:
+        with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
             for chunk in util.filechunkiter(srcf):
                 dstf.write(chunk)
         os.chmod(dest, os.stat(src).st_mode)
 
+
 def usercachepath(ui, hash):
     '''Return the correct location in the "global" largefiles cache for a file
     with the given hash.
@@ -70,74 +75,89 @@
     to preserve download bandwidth and storage space.'''
     return os.path.join(_usercachedir(ui), hash)
 
+
 def _usercachedir(ui, name=longname):
     '''Return the location of the "global" largefiles cache.'''
-    path = ui.configpath(name, 'usercache')
+    path = ui.configpath(name, b'usercache')
     if path:
         return path
     if pycompat.iswindows:
-        appdata = encoding.environ.get('LOCALAPPDATA',
-                                       encoding.environ.get('APPDATA'))
+        appdata = encoding.environ.get(
+            b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
+        )
         if appdata:
             return os.path.join(appdata, name)
     elif pycompat.isdarwin:
-        home = encoding.environ.get('HOME')
+        home = encoding.environ.get(b'HOME')
         if home:
-            return os.path.join(home, 'Library', 'Caches', name)
+            return os.path.join(home, b'Library', b'Caches', name)
     elif pycompat.isposix:
-        path = encoding.environ.get('XDG_CACHE_HOME')
+        path = encoding.environ.get(b'XDG_CACHE_HOME')
         if path:
             return os.path.join(path, name)
-        home = encoding.environ.get('HOME')
+        home = encoding.environ.get(b'HOME')
         if home:
-            return os.path.join(home, '.cache', name)
+            return os.path.join(home, b'.cache', name)
     else:
-        raise error.Abort(_('unknown operating system: %s\n')
-                          % pycompat.osname)
-    raise error.Abort(_('unknown %s usercache location') % name)
+        raise error.Abort(
+            _(b'unknown operating system: %s\n') % pycompat.osname
+        )
+    raise error.Abort(_(b'unknown %s usercache location') % name)
+
 
 def inusercache(ui, hash):
     path = usercachepath(ui, hash)
     return os.path.exists(path)
 
+
 def findfile(repo, hash):
     '''Return store path of the largefile with the specified hash.
     As a side effect, the file might be linked from user cache.
     Return None if the file can't be found locally.'''
     path, exists = findstorepath(repo, hash)
     if exists:
-        repo.ui.note(_('found %s in store\n') % hash)
+        repo.ui.note(_(b'found %s in store\n') % hash)
         return path
     elif inusercache(repo.ui, hash):
-        repo.ui.note(_('found %s in system cache\n') % hash)
+        repo.ui.note(_(b'found %s in system cache\n') % hash)
         path = storepath(repo, hash)
         link(usercachepath(repo.ui, hash), path)
         return path
     return None
 
+
 class largefilesdirstate(dirstate.dirstate):
     def __getitem__(self, key):
         return super(largefilesdirstate, self).__getitem__(unixpath(key))
+
     def normal(self, f):
         return super(largefilesdirstate, self).normal(unixpath(f))
+
     def remove(self, f):
         return super(largefilesdirstate, self).remove(unixpath(f))
+
     def add(self, f):
         return super(largefilesdirstate, self).add(unixpath(f))
+
     def drop(self, f):
         return super(largefilesdirstate, self).drop(unixpath(f))
+
     def forget(self, f):
         return super(largefilesdirstate, self).forget(unixpath(f))
+
     def normallookup(self, f):
         return super(largefilesdirstate, self).normallookup(unixpath(f))
+
     def _ignore(self, f):
         return False
+
     def write(self, tr=False):
         # (1) disable PENDING mode always
         #     (lfdirstate isn't yet managed as a part of the transaction)
         # (2) avoid develwarn 'use dirstate.write with ....'
         super(largefilesdirstate, self).write(None)
 
+
 def openlfdirstate(ui, repo, create=True):
     '''
     Return a dirstate object that tracks largefiles: i.e. its root is
@@ -146,17 +166,22 @@
     vfs = repo.vfs
     lfstoredir = longname
     opener = vfsmod.vfs(vfs.join(lfstoredir))
-    lfdirstate = largefilesdirstate(opener, ui, repo.root,
-                                    repo.dirstate._validate,
-                                    lambda: sparse.matcher(repo))
+    lfdirstate = largefilesdirstate(
+        opener,
+        ui,
+        repo.root,
+        repo.dirstate._validate,
+        lambda: sparse.matcher(repo),
+    )
 
     # If the largefiles dirstate does not exist, populate and create
     # it. This ensures that we create it on the first meaningful
     # largefiles operation in a new clone.
-    if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
+    if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
         matcher = getstandinmatcher(repo)
-        standins = repo.dirstate.walk(matcher, subrepos=[], unknown=False,
-                                      ignored=False)
+        standins = repo.dirstate.walk(
+            matcher, subrepos=[], unknown=False, ignored=False
+        )
 
         if len(standins) > 0:
             vfs.makedirs(lfstoredir)
@@ -166,11 +191,13 @@
             lfdirstate.normallookup(lfile)
     return lfdirstate
 
+
 def lfdirstatestatus(lfdirstate, repo):
-    pctx = repo['.']
+    pctx = repo[b'.']
     match = matchmod.always()
-    unsure, s = lfdirstate.status(match, subrepos=[], ignored=False,
-                                  clean=False, unknown=False)
+    unsure, s = lfdirstate.status(
+        match, subrepos=[], ignored=False, clean=False, unknown=False
+    )
     modified, clean = s.modified, s.clean
     for lfile in unsure:
         try:
@@ -184,6 +211,7 @@
             lfdirstate.normal(lfile)
     return s
 
+
 def listlfiles(repo, rev=None, matcher=None):
     '''return a list of largefiles in the working copy or the
     specified changeset'''
@@ -192,14 +220,18 @@
         matcher = getstandinmatcher(repo)
 
     # ignore unknown files in working directory
-    return [splitstandin(f)
-            for f in repo[rev].walk(matcher)
-            if rev is not None or repo.dirstate[f] != '?']
+    return [
+        splitstandin(f)
+        for f in repo[rev].walk(matcher)
+        if rev is not None or repo.dirstate[f] != b'?'
+    ]
+
 
 def instore(repo, hash, forcelocal=False):
     '''Return true if a largefile with the given hash exists in the store'''
     return os.path.exists(storepath(repo, hash, forcelocal))
 
+
 def storepath(repo, hash, forcelocal=False):
     '''Return the correct location in the repository largefiles store for a
     file with the given hash.'''
@@ -207,6 +239,7 @@
         return repo.vfs.reljoin(repo.sharedpath, longname, hash)
     return repo.vfs.join(longname, hash)
 
+
 def findstorepath(repo, hash):
     '''Search through the local store path(s) to find the file for the given
     hash.  If the file is not found, its path in the primary store is returned.
@@ -224,6 +257,7 @@
 
     return (path, False)
 
+
 def copyfromcache(repo, hash, filename):
     '''Copy the specified largefile from the repo or system cache to
     filename in the repository. Return true on success or false if the
@@ -237,16 +271,18 @@
     wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
     # The write may fail before the file is fully written, but we
     # don't use atomic writes in the working copy.
-    with open(path, 'rb') as srcfd, wvfs(filename, 'wb') as destfd:
-        gothash = copyandhash(
-            util.filechunkiter(srcfd), destfd)
+    with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
+        gothash = copyandhash(util.filechunkiter(srcfd), destfd)
     if gothash != hash:
-        repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
-                     % (filename, path, gothash))
+        repo.ui.warn(
+            _(b'%s: data corruption in %s with hash %s\n')
+            % (filename, path, gothash)
+        )
         wvfs.unlink(filename)
         return False
     return True
 
+
 def copytostore(repo, ctx, file, fstandin):
     wvfs = repo.wvfs
     hash = readasstandin(ctx[fstandin])
@@ -255,8 +291,11 @@
     if wvfs.exists(file):
         copytostoreabsolute(repo, wvfs.join(file), hash)
     else:
-        repo.ui.warn(_("%s: largefile %s not available from local store\n") %
-                     (file, hash))
+        repo.ui.warn(
+            _(b"%s: largefile %s not available from local store\n")
+            % (file, hash)
+        )
+
 
 def copyalltostore(repo, node):
     '''Copy all largefiles in a given revision to the store'''
@@ -267,24 +306,28 @@
         if realfile is not None and filename in ctx.manifest():
             copytostore(repo, ctx, realfile, filename)
 
+
 def copytostoreabsolute(repo, file, hash):
     if inusercache(repo.ui, hash):
         link(usercachepath(repo.ui, hash), storepath(repo, hash))
     else:
         util.makedirs(os.path.dirname(storepath(repo, hash)))
-        with open(file, 'rb') as srcf:
-            with util.atomictempfile(storepath(repo, hash),
-                                     createmode=repo.store.createmode) as dstf:
+        with open(file, b'rb') as srcf:
+            with util.atomictempfile(
+                storepath(repo, hash), createmode=repo.store.createmode
+            ) as dstf:
                 for chunk in util.filechunkiter(srcf):
                     dstf.write(chunk)
         linktousercache(repo, hash)
 
+
 def linktousercache(repo, hash):
     '''Link / copy the largefile with the specified hash from the store
     to the cache.'''
     path = usercachepath(repo.ui, hash)
     link(storepath(repo, hash), path)
 
+
 def getstandinmatcher(repo, rmatcher=None):
     '''Return a match object that applies rmatcher to the standin directory'''
     wvfs = repo.wvfs
@@ -303,18 +346,22 @@
         match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
     return match
 
+
 def composestandinmatcher(repo, rmatcher):
     '''Return a matcher that accepts standins corresponding to the
     files accepted by rmatcher. Pass the list of files in the matcher
     as the paths specified by the user.'''
     smatcher = getstandinmatcher(repo, rmatcher)
     isstandin = smatcher.matchfn
+
     def composedmatchfn(f):
         return isstandin(f) and rmatcher.matchfn(splitstandin(f))
+
     smatcher.matchfn = composedmatchfn
 
     return smatcher
 
+
 def standin(filename):
     '''Return the repo-relative path to the standin for the specified big
     file.'''
@@ -327,21 +374,24 @@
     #    passed filenames from an external source (like the command line).
     return shortnameslash + util.pconvert(filename)
 
+
 def isstandin(filename):
     '''Return true if filename is a big file standin. filename must be
     in Mercurial's internal form (slash-separated).'''
     return filename.startswith(shortnameslash)
 
+
 def splitstandin(filename):
     # Split on / because that's what dirstate always uses, even on Windows.
     # Change local separator to / first just in case we are passed filenames
     # from an external source (like the command line).
-    bits = util.pconvert(filename).split('/', 1)
+    bits = util.pconvert(filename).split(b'/', 1)
     if len(bits) == 2 and bits[0] == shortname:
         return bits[1]
     else:
         return None
 
+
 def updatestandin(repo, lfile, standin):
     """Re-calculate hash value of lfile and write it into standin
 
@@ -353,7 +403,8 @@
         executable = getexecutable(file)
         writestandin(repo, standin, hash, executable)
     else:
-        raise error.Abort(_('%s: file not found!') % lfile)
+        raise error.Abort(_(b'%s: file not found!') % lfile)
+
 
 def readasstandin(fctx):
     '''read hex hash from given filectx of standin file
@@ -361,36 +412,43 @@
     This encapsulates how "standin" data is stored into storage layer.'''
     return fctx.data().strip()
 
+
 def writestandin(repo, standin, hash, executable):
     '''write hash to <repo.root>/<standin>'''
-    repo.wwrite(standin, hash + '\n', executable and 'x' or '')
+    repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
+
 
 def copyandhash(instream, outfile):
     '''Read bytes from instream (iterable) and write them to outfile,
     computing the SHA-1 hash of the data along the way. Return the hash.'''
-    hasher = hashlib.sha1('')
+    hasher = hashlib.sha1(b'')
     for data in instream:
         hasher.update(data)
         outfile.write(data)
     return hex(hasher.digest())
 
+
 def hashfile(file):
     if not os.path.exists(file):
-        return ''
-    with open(file, 'rb') as fd:
+        return b''
+    with open(file, b'rb') as fd:
         return hexsha1(fd)
 
+
 def getexecutable(filename):
     mode = os.stat(filename).st_mode
-    return ((mode & stat.S_IXUSR) and
-            (mode & stat.S_IXGRP) and
-            (mode & stat.S_IXOTH))
+    return (
+        (mode & stat.S_IXUSR)
+        and (mode & stat.S_IXGRP)
+        and (mode & stat.S_IXOTH)
+    )
+
 
 def urljoin(first, second, *arg):
     def join(left, right):
-        if not left.endswith('/'):
-            left += '/'
-        if right.startswith('/'):
+        if not left.endswith(b'/'):
+            left += b'/'
+        if right.startswith(b'/'):
             right = right[1:]
         return left + right
 
@@ -399,6 +457,7 @@
         url = join(url, a)
     return url
 
+
 def hexsha1(fileobj):
     """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
     object data"""
@@ -407,31 +466,38 @@
         h.update(chunk)
     return hex(h.digest())
 
+
 def httpsendfile(ui, filename):
-    return httpconnection.httpsendfile(ui, filename, 'rb')
+    return httpconnection.httpsendfile(ui, filename, b'rb')
+
 
 def unixpath(path):
     '''Return a version of path normalized for use with the lfdirstate.'''
     return util.pconvert(os.path.normpath(path))
 
+
 def islfilesrepo(repo):
     '''Return true if the repo is a largefile repo.'''
-    if ('largefiles' in repo.requirements and
-            any(shortnameslash in f[0] for f in repo.store.datafiles())):
+    if b'largefiles' in repo.requirements and any(
+        shortnameslash in f[0] for f in repo.store.datafiles()
+    ):
         return True
 
     return any(openlfdirstate(repo.ui, repo, False))
 
+
 class storeprotonotcapable(Exception):
     def __init__(self, storetypes):
         self.storetypes = storetypes
 
+
 def getstandinsstate(repo):
     standins = []
     matcher = getstandinmatcher(repo)
     wctx = repo[None]
-    for standin in repo.dirstate.walk(matcher, subrepos=[], unknown=False,
-                                      ignored=False):
+    for standin in repo.dirstate.walk(
+        matcher, subrepos=[], unknown=False, ignored=False
+    ):
         lfile = splitstandin(standin)
         try:
             hash = readasstandin(wctx[standin])
@@ -440,29 +506,30 @@
         standins.append((lfile, hash))
     return standins
 
+
 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
     lfstandin = standin(lfile)
     if lfstandin in repo.dirstate:
         stat = repo.dirstate._map[lfstandin]
         state, mtime = stat[0], stat[3]
     else:
-        state, mtime = '?', -1
-    if state == 'n':
-        if (normallookup or mtime < 0 or
-            not repo.wvfs.exists(lfile)):
+        state, mtime = b'?', -1
+    if state == b'n':
+        if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
             # state 'n' doesn't ensure 'clean' in this case
             lfdirstate.normallookup(lfile)
         else:
             lfdirstate.normal(lfile)
-    elif state == 'm':
+    elif state == b'm':
         lfdirstate.normallookup(lfile)
-    elif state == 'r':
+    elif state == b'r':
         lfdirstate.remove(lfile)
-    elif state == 'a':
+    elif state == b'a':
         lfdirstate.add(lfile)
-    elif state == '?':
+    elif state == b'?':
         lfdirstate.drop(lfile)
 
+
 def markcommitted(orig, ctx, node):
     repo = ctx.repo()
 
@@ -492,6 +559,7 @@
     # at merging.
     copyalltostore(repo, node)
 
+
 def getlfilestoupdate(oldstandins, newstandins):
     changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
     filelist = []
@@ -500,10 +568,14 @@
             filelist.append(f[0])
     return filelist
 
+
 def getlfilestoupload(repo, missing, addfunc):
     makeprogress = repo.ui.makeprogress
-    with makeprogress(_('finding outgoing largefiles'),
-                      unit=_('revisions'), total=len(missing)) as progress:
+    with makeprogress(
+        _(b'finding outgoing largefiles'),
+        unit=_(b'revisions'),
+        total=len(missing),
+    ) as progress:
         for i, n in enumerate(missing):
             progress.update(i)
             parents = [p for p in repo[n].parents() if p != node.nullid]
@@ -533,6 +605,7 @@
                 if isstandin(fn) and fn in ctx:
                     addfunc(fn, readasstandin(ctx[fn]))
 
+
 def updatestandinsbymatch(repo, match):
     '''Update standins in the working directory according to specified match
 
@@ -553,8 +626,9 @@
         # large.
         lfdirstate = openlfdirstate(ui, repo)
         dirtymatch = matchmod.always()
-        unsure, s = lfdirstate.status(dirtymatch, subrepos=[], ignored=False,
-                                      clean=False, unknown=False)
+        unsure, s = lfdirstate.status(
+            dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
+        )
         modifiedfiles = unsure + s.modified + s.added + s.removed
         lfiles = listlfiles(repo)
         # this only loops through largefiles that exist (not
@@ -577,8 +651,9 @@
     # Case 2: user calls commit with specified patterns: refresh
     # any matching big files.
     smatcher = composestandinmatcher(repo, match)
-    standins = repo.dirstate.walk(smatcher, subrepos=[], unknown=False,
-                                  ignored=False)
+    standins = repo.dirstate.walk(
+        smatcher, subrepos=[], unknown=False, ignored=False
+    )
 
     # No matching big files: get out of the way and pass control to
     # the usual commit() method.
@@ -593,7 +668,7 @@
     lfdirstate = openlfdirstate(ui, repo)
     for fstandin in standins:
         lfile = splitstandin(fstandin)
-        if lfdirstate[lfile] != 'r':
+        if lfdirstate[lfile] != b'r':
             updatestandin(repo, lfile, fstandin)
 
     # Cook up a new matcher that only matches regular files or
@@ -617,10 +692,10 @@
         # standin removal, drop the normal file if it is unknown to dirstate.
         # Thus, skip plain largefile names but keep the standin.
         if f in lfiles or fstandin in standins:
-            if repo.dirstate[fstandin] != 'r':
-                if repo.dirstate[f] != 'r':
+            if repo.dirstate[fstandin] != b'r':
+                if repo.dirstate[f] != b'r':
                     continue
-            elif repo.dirstate[f] == '?':
+            elif repo.dirstate[f] == b'?':
                 continue
 
         actualfiles.append(f)
@@ -636,6 +711,7 @@
 
     return match
 
+
 class automatedcommithook(object):
     '''Stateful hook to update standins at the 1st commit of resuming
 
@@ -647,16 +723,18 @@
     --continue``) should update them, because largefiles may be
     modified manually.
     '''
+
     def __init__(self, resuming):
         self.resuming = resuming
 
     def __call__(self, repo, match):
         if self.resuming:
-            self.resuming = False # avoids updating at subsequent commits
+            self.resuming = False  # avoids updating at subsequent commits
             return updatestandinsbymatch(repo, match)
         else:
             return match
 
+
 def getstatuswriter(ui, repo, forcibly=None):
     '''Return the function to write largefiles specific status out
 
@@ -666,10 +744,10 @@
     Otherwise, this returns the function to always write out (or
     ignore if ``not forcibly``) status.
     '''
-    if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
+    if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
         return repo._lfstatuswriters[-1]
     else:
         if forcibly:
-            return ui.status # forcibly WRITE OUT
+            return ui.status  # forcibly WRITE OUT
         else:
-            return lambda *msg, **opts: None # forcibly IGNORE
+            return lambda *msg, **opts: None  # forcibly IGNORE
--- a/hgext/largefiles/localstore.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/largefiles/localstore.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,6 +10,7 @@
 from __future__ import absolute_import
 
 from mercurial.i18n import _
+from mercurial.pycompat import open
 from mercurial import util
 
 from . import (
@@ -17,6 +18,7 @@
     lfutil,
 )
 
+
 class localstore(basestore.basestore):
     '''localstore first attempts to grab files out of the store in the remote
     Mercurial repository.  Failing that, it attempts to grab the files from
@@ -40,11 +42,11 @@
     def _getfile(self, tmpfile, filename, hash):
         path = lfutil.findfile(self.remote, hash)
         if not path:
-            raise basestore.StoreError(filename, hash, self.url,
-                _("can't get file locally"))
-        with open(path, 'rb') as fd:
-            return lfutil.copyandhash(
-                util.filechunkiter(fd), tmpfile)
+            raise basestore.StoreError(
+                filename, hash, self.url, _(b"can't get file locally")
+            )
+        with open(path, b'rb') as fd:
+            return lfutil.copyandhash(util.filechunkiter(fd), tmpfile)
 
     def _verifyfiles(self, contents, filestocheck):
         failed = False
@@ -52,17 +54,20 @@
             storepath, exists = lfutil.findstorepath(self.repo, expectedhash)
             if not exists:
                 storepath, exists = lfutil.findstorepath(
-                    self.remote, expectedhash)
+                    self.remote, expectedhash
+                )
             if not exists:
                 self.ui.warn(
-                    _('changeset %s: %s references missing %s\n')
-                    % (cset, filename, storepath))
+                    _(b'changeset %s: %s references missing %s\n')
+                    % (cset, filename, storepath)
+                )
                 failed = True
             elif contents:
                 actualhash = lfutil.hashfile(storepath)
                 if actualhash != expectedhash:
                     self.ui.warn(
-                        _('changeset %s: %s references corrupted %s\n')
-                        % (cset, filename, storepath))
+                        _(b'changeset %s: %s references corrupted %s\n')
+                        % (cset, filename, storepath)
+                    )
                     failed = True
         return failed
--- a/hgext/largefiles/overrides.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/largefiles/overrides.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,14 +9,15 @@
 '''Overridden Mercurial commands and functions for the largefiles extension'''
 from __future__ import absolute_import
 
+import contextlib
 import copy
 import os
 
 from mercurial.i18n import _
 
-from mercurial.hgweb import (
-    webcommands,
-)
+from mercurial.pycompat import open
+
+from mercurial.hgweb import webcommands
 
 from mercurial import (
     archival,
@@ -51,6 +52,7 @@
 
 # -- Utility functions: commonly/repeatedly needed functionality ---------------
 
+
 def composelargefilematcher(match, manifest):
     '''create a matcher that matches only the largefiles in the original
     matcher'''
@@ -63,14 +65,16 @@
     m.matchfn = lambda f: lfile(f) and origmatchfn(f)
     return m
 
+
 def composenormalfilematcher(match, manifest, exclude=None):
     excluded = set()
     if exclude is not None:
         excluded.update(exclude)
 
     m = copy.copy(match)
-    notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
-            manifest or f in excluded)
+    notlfile = lambda f: not (
+        lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
+    )
     m._files = [lf for lf in m._files if notlfile(lf)]
     m._fileset = set(m._files)
     m.always = lambda: False
@@ -78,16 +82,18 @@
     m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
     return m
 
+
 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
     large = opts.get(r'large')
     lfsize = lfutil.getminsize(
-        ui, lfutil.islfilesrepo(repo), opts.get(r'lfsize'))
+        ui, lfutil.islfilesrepo(repo), opts.get(r'lfsize')
+    )
 
     lfmatcher = None
     if lfutil.islfilesrepo(repo):
-        lfpats = ui.configlist(lfutil.longname, 'patterns')
+        lfpats = ui.configlist(lfutil.longname, b'patterns')
         if lfpats:
-            lfmatcher = matchmod.match(repo.root, '', list(lfpats))
+            lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
 
     lfnames = []
     m = matcher
@@ -103,7 +109,7 @@
         # The normal add code will do that for us.
         if exact and exists:
             if lfile:
-                ui.warn(_('%s already a largefile\n') % uipathfn(f))
+                ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
             continue
 
         if (exact or not exists) and not lfutil.isstandin(f):
@@ -112,12 +118,13 @@
             if not repo.wvfs.exists(f):
                 continue
 
-            abovemin = (lfsize and
-                        repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
+            abovemin = (
+                lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
+            )
             if large or abovemin or (lfmatcher and lfmatcher(f)):
                 lfnames.append(f)
                 if ui.verbose or not exact:
-                    ui.status(_('adding %s as a largefile\n') % uipathfn(f))
+                    ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
 
     bad = []
 
@@ -129,34 +136,48 @@
             lfdirstate = lfutil.openlfdirstate(ui, repo)
             for f in lfnames:
                 standinname = lfutil.standin(f)
-                lfutil.writestandin(repo, standinname, hash='',
-                    executable=lfutil.getexecutable(repo.wjoin(f)))
+                lfutil.writestandin(
+                    repo,
+                    standinname,
+                    hash=b'',
+                    executable=lfutil.getexecutable(repo.wjoin(f)),
+                )
                 standins.append(standinname)
-                if lfdirstate[f] == 'r':
+                if lfdirstate[f] == b'r':
                     lfdirstate.normallookup(f)
                 else:
                     lfdirstate.add(f)
             lfdirstate.write()
-            bad += [lfutil.splitstandin(f)
-                    for f in repo[None].add(standins)
-                    if f in m.files()]
+            bad += [
+                lfutil.splitstandin(f)
+                for f in repo[None].add(standins)
+                if f in m.files()
+            ]
 
         added = [f for f in lfnames if f not in bad]
     return added, bad
 
+
+@contextlib.contextmanager
+def lfstatus(repo):
+    oldvalue = getattr(repo, 'lfstatus', False)
+    repo.lfstatus = True
+    try:
+        yield
+    finally:
+        repo.lfstatus = oldvalue
+
+
 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
     after = opts.get(r'after')
     m = composelargefilematcher(matcher, repo[None].manifest())
-    try:
-        repo.lfstatus = True
+    with lfstatus(repo):
         s = repo.status(match=m, clean=not isaddremove)
-    finally:
-        repo.lfstatus = False
     manifest = repo[None].manifest()
-    modified, added, deleted, clean = [[f for f in list
-                                        if lfutil.standin(f) in manifest]
-                                       for list in (s.modified, s.added,
-                                                    s.deleted, s.clean)]
+    modified, added, deleted, clean = [
+        [f for f in list if lfutil.standin(f) in manifest]
+        for list in (s.modified, s.added, s.deleted, s.clean)
+    ]
 
     def warn(files, msg):
         for f in files:
@@ -165,14 +186,28 @@
 
     if after:
         remove = deleted
-        result = warn(modified + added + clean,
-                      _('not removing %s: file still exists\n'))
+        result = warn(
+            modified + added + clean, _(b'not removing %s: file still exists\n')
+        )
     else:
         remove = deleted + clean
-        result = warn(modified, _('not removing %s: file is modified (use -f'
-                                  ' to force removal)\n'))
-        result = warn(added, _('not removing %s: file has been marked for add'
-                               ' (use forget to undo)\n')) or result
+        result = warn(
+            modified,
+            _(
+                b'not removing %s: file is modified (use -f'
+                b' to force removal)\n'
+            ),
+        )
+        result = (
+            warn(
+                added,
+                _(
+                    b'not removing %s: file has been marked for add'
+                    b' (use forget to undo)\n'
+                ),
+            )
+            or result
+        )
 
     # Need to lock because standin files are deleted then removed from the
     # repository and we could race in-between.
@@ -180,7 +215,7 @@
         lfdirstate = lfutil.openlfdirstate(ui, repo)
         for f in sorted(remove):
             if ui.verbose or not m.exact(f):
-                ui.status(_('removing %s\n') % uipathfn(f))
+                ui.status(_(b'removing %s\n') % uipathfn(f))
 
             if not dryrun:
                 if not after:
@@ -198,82 +233,116 @@
         repo[None].forget(remove)
 
         for f in remove:
-            lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
-                                  False)
+            lfutil.synclfdirstate(
+                repo, lfdirstate, lfutil.splitstandin(f), False
+            )
 
         lfdirstate.write()
 
     return result
 
+
 # For overriding mercurial.hgweb.webcommands so that largefiles will
 # appear at their right place in the manifests.
-@eh.wrapfunction(webcommands, 'decodepath')
+@eh.wrapfunction(webcommands, b'decodepath')
 def decodepath(orig, path):
     return lfutil.splitstandin(path) or path
 
+
 # -- Wrappers: modify existing commands --------------------------------
 
-@eh.wrapcommand('add',
-    opts=[('', 'large', None, _('add as largefile')),
-          ('', 'normal', None, _('add as normal file')),
-          ('', 'lfsize', '', _('add all files above this size (in megabytes) '
-                               'as largefiles (default: 10)'))])
+
+@eh.wrapcommand(
+    b'add',
+    opts=[
+        (b'', b'large', None, _(b'add as largefile')),
+        (b'', b'normal', None, _(b'add as normal file')),
+        (
+            b'',
+            b'lfsize',
+            b'',
+            _(
+                b'add all files above this size (in megabytes) '
+                b'as largefiles (default: 10)'
+            ),
+        ),
+    ],
+)
 def overrideadd(orig, ui, repo, *pats, **opts):
     if opts.get(r'normal') and opts.get(r'large'):
-        raise error.Abort(_('--normal cannot be used with --large'))
+        raise error.Abort(_(b'--normal cannot be used with --large'))
     return orig(ui, repo, *pats, **opts)
 
-@eh.wrapfunction(cmdutil, 'add')
+
+@eh.wrapfunction(cmdutil, b'add')
 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
     # The --normal flag short circuits this override
     if opts.get(r'normal'):
         return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
 
     ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
-    normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
-                                             ladded)
+    normalmatcher = composenormalfilematcher(
+        matcher, repo[None].manifest(), ladded
+    )
     bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
 
     bad.extend(f for f in lbad)
     return bad
 
-@eh.wrapfunction(cmdutil, 'remove')
-def cmdutilremove(orig, ui, repo, matcher, prefix, uipathfn, after, force,
-                  subrepos, dryrun):
-    normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
-    result = orig(ui, repo, normalmatcher, prefix, uipathfn, after, force,
-                  subrepos, dryrun)
-    return removelargefiles(ui, repo, False, matcher, uipathfn, dryrun,
-                            after=after, force=force) or result
 
-@eh.wrapfunction(subrepo.hgsubrepo, 'status')
+@eh.wrapfunction(cmdutil, b'remove')
+def cmdutilremove(
+    orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
+):
+    normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
+    result = orig(
+        ui,
+        repo,
+        normalmatcher,
+        prefix,
+        uipathfn,
+        after,
+        force,
+        subrepos,
+        dryrun,
+    )
+    return (
+        removelargefiles(
+            ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
+        )
+        or result
+    )
+
+
+@eh.wrapfunction(subrepo.hgsubrepo, b'status')
 def overridestatusfn(orig, repo, rev2, **opts):
-    try:
-        repo._repo.lfstatus = True
+    with lfstatus(repo._repo):
         return orig(repo, rev2, **opts)
-    finally:
-        repo._repo.lfstatus = False
+
 
-@eh.wrapcommand('status')
+@eh.wrapcommand(b'status')
 def overridestatus(orig, ui, repo, *pats, **opts):
-    try:
-        repo.lfstatus = True
+    with lfstatus(repo):
         return orig(ui, repo, *pats, **opts)
-    finally:
-        repo.lfstatus = False
+
 
-@eh.wrapfunction(subrepo.hgsubrepo, 'dirty')
+@eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
-    try:
-        repo._repo.lfstatus = True
+    with lfstatus(repo._repo):
         return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
-    finally:
-        repo._repo.lfstatus = False
+
 
-@eh.wrapcommand('log')
+@eh.wrapcommand(b'log')
 def overridelog(orig, ui, repo, *pats, **opts):
-    def overridematchandpats(orig, ctx, pats=(), opts=None, globbed=False,
-            default='relpath', badfn=None):
+    def overridematchandpats(
+        orig,
+        ctx,
+        pats=(),
+        opts=None,
+        globbed=False,
+        default=b'relpath',
+        badfn=None,
+    ):
         """Matcher that merges root directory with .hglf, suitable for log.
         It is still possible to match .hglf directly.
         For any listed files run log on the standin too.
@@ -292,25 +361,25 @@
         pats = set(p)
 
         def fixpats(pat, tostandin=lfutil.standin):
-            if pat.startswith('set:'):
+            if pat.startswith(b'set:'):
                 return pat
 
             kindpat = matchmod._patsplit(pat, None)
 
             if kindpat[0] is not None:
-                return kindpat[0] + ':' + tostandin(kindpat[1])
+                return kindpat[0] + b':' + tostandin(kindpat[1])
             return tostandin(kindpat[1])
 
         cwd = repo.getcwd()
         if cwd:
             hglf = lfutil.shortname
-            back = util.pconvert(repo.pathto(hglf)[:-len(hglf)])
+            back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
 
             def tostandin(f):
                 # The file may already be a standin, so truncate the back
                 # prefix and test before mangling it.  This avoids turning
                 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
-                if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
+                if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
                     return f
 
                 # An absolute path is from outside the repo, so truncate the
@@ -318,20 +387,23 @@
                 # is somewhere in the repo, relative to root, and needs to be
                 # prepended before building the standin.
                 if os.path.isabs(cwd):
-                    f = f[len(back):]
+                    f = f[len(back) :]
                 else:
-                    f = cwd + '/' + f
+                    f = cwd + b'/' + f
                 return back + lfutil.standin(f)
+
         else:
+
             def tostandin(f):
                 if lfutil.isstandin(f):
                     return f
                 return lfutil.standin(f)
+
         pats.update(fixpats(f, tostandin) for f in p)
 
         for i in range(0, len(m._files)):
             # Don't add '.hglf' to m.files, since that is already covered by '.'
-            if m._files[i] == '.':
+            if m._files[i] == b'.':
                 continue
             standin = lfutil.standin(m._files[i])
             # If the "standin" is a directory, append instead of replace to
@@ -346,15 +418,17 @@
         m._fileset = set(m._files)
         m.always = lambda: False
         origmatchfn = m.matchfn
+
         def lfmatchfn(f):
             lf = lfutil.splitstandin(f)
             if lf is not None and origmatchfn(lf):
                 return True
             r = origmatchfn(f)
             return r
+
         m.matchfn = lfmatchfn
 
-        ui.debug('updated patterns: %s\n' % ', '.join(sorted(pats)))
+        ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
         return m, pats
 
     # For hg log --patch, the match object is used in two different senses:
@@ -363,25 +437,45 @@
     # The magic matchandpats override should be used for case (1) but not for
     # case (2).
     oldmatchandpats = scmutil.matchandpats
+
     def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
         wctx = repo[None]
         match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
         return lambda ctx: match
 
-    wrappedmatchandpats = extensions.wrappedfunction(scmutil, 'matchandpats',
-                                                     overridematchandpats)
+    wrappedmatchandpats = extensions.wrappedfunction(
+        scmutil, b'matchandpats', overridematchandpats
+    )
     wrappedmakefilematcher = extensions.wrappedfunction(
-        logcmdutil, '_makenofollowfilematcher', overridemakefilematcher)
+        logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
+    )
     with wrappedmatchandpats, wrappedmakefilematcher:
         return orig(ui, repo, *pats, **opts)
 
-@eh.wrapcommand('verify',
-    opts=[('', 'large', None,
-                _('verify that all largefiles in current revision exists')),
-          ('', 'lfa', None,
-                _('verify largefiles in all revisions, not just current')),
-          ('', 'lfc', None,
-                _('verify local largefile contents, not just existence'))])
+
+@eh.wrapcommand(
+    b'verify',
+    opts=[
+        (
+            b'',
+            b'large',
+            None,
+            _(b'verify that all largefiles in current revision exists'),
+        ),
+        (
+            b'',
+            b'lfa',
+            None,
+            _(b'verify largefiles in all revisions, not just current'),
+        ),
+        (
+            b'',
+            b'lfc',
+            None,
+            _(b'verify local largefile contents, not just existence'),
+        ),
+    ],
+)
 def overrideverify(orig, ui, repo, *pats, **opts):
     large = opts.pop(r'large', False)
     all = opts.pop(r'lfa', False)
@@ -392,17 +486,23 @@
         result = result or lfcommands.verifylfiles(ui, repo, all, contents)
     return result
 
-@eh.wrapcommand('debugstate',
-    opts=[('', 'large', None, _('display largefiles dirstate'))])
+
+@eh.wrapcommand(
+    b'debugstate',
+    opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
+)
 def overridedebugstate(orig, ui, repo, *pats, **opts):
     large = opts.pop(r'large', False)
     if large:
+
         class fakerepo(object):
             dirstate = lfutil.openlfdirstate(ui, repo)
+
         orig(ui, fakerepo, *pats, **opts)
     else:
         orig(ui, repo, *pats, **opts)
 
+
 # Before starting the manifest merge, merge.updates will call
 # _checkunknownfile to check if there are any files in the merged-in
 # changeset that collide with unknown files in the working copy.
@@ -413,12 +513,13 @@
 # The overridden function filters the unknown files by removing any
 # largefiles. This makes the merge proceed and we can then handle this
 # case further in the overridden calculateupdates function below.
-@eh.wrapfunction(merge, '_checkunknownfile')
+@eh.wrapfunction(merge, b'_checkunknownfile')
 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
     if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
         return False
     return origfn(repo, wctx, mctx, f, f2)
 
+
 # The manifest merge handles conflicts on the manifest level. We want
 # to handle changes in largefile-ness of files at this level too.
 #
@@ -445,12 +546,14 @@
 # Finally, the merge.applyupdates function will then take care of
 # writing the files into the working copy and lfcommands.updatelfiles
 # will update the largefiles.
-@eh.wrapfunction(merge, 'calculateupdates')
-def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
-                             acceptremote, *args, **kwargs):
+@eh.wrapfunction(merge, b'calculateupdates')
+def overridecalculateupdates(
+    origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
+):
     overwrite = force and not branchmerge
     actions, diverge, renamedelete = origfn(
-        repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
+        repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
+    )
 
     if overwrite:
         return actions, diverge, renamedelete
@@ -459,7 +562,7 @@
     lfiles = set()
     for f in actions:
         splitstandin = lfutil.splitstandin(f)
-        if splitstandin in p1:
+        if splitstandin is not None and splitstandin in p1:
             lfiles.add(splitstandin)
         elif lfutil.standin(f) in p1:
             lfiles.add(f)
@@ -468,57 +571,71 @@
         standin = lfutil.standin(lfile)
         (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
         (sm, sargs, smsg) = actions.get(standin, (None, None, None))
-        if sm in ('g', 'dc') and lm != 'r':
-            if sm == 'dc':
+        if sm in (b'g', b'dc') and lm != b'r':
+            if sm == b'dc':
                 f1, f2, fa, move, anc = sargs
                 sargs = (p2[f2].flags(), False)
             # Case 1: normal file in the working copy, largefile in
             # the second parent
-            usermsg = _('remote turned local normal file %s into a largefile\n'
-                        'use (l)argefile or keep (n)ormal file?'
-                        '$$ &Largefile $$ &Normal file') % lfile
-            if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
-                actions[lfile] = ('r', None, 'replaced by standin')
-                actions[standin] = ('g', sargs, 'replaces standin')
-            else: # keep local normal file
-                actions[lfile] = ('k', None, 'replaces standin')
+            usermsg = (
+                _(
+                    b'remote turned local normal file %s into a largefile\n'
+                    b'use (l)argefile or keep (n)ormal file?'
+                    b'$$ &Largefile $$ &Normal file'
+                )
+                % lfile
+            )
+            if repo.ui.promptchoice(usermsg, 0) == 0:  # pick remote largefile
+                actions[lfile] = (b'r', None, b'replaced by standin')
+                actions[standin] = (b'g', sargs, b'replaces standin')
+            else:  # keep local normal file
+                actions[lfile] = (b'k', None, b'replaces standin')
                 if branchmerge:
-                    actions[standin] = ('k', None, 'replaced by non-standin')
+                    actions[standin] = (b'k', None, b'replaced by non-standin')
                 else:
-                    actions[standin] = ('r', None, 'replaced by non-standin')
-        elif lm in ('g', 'dc') and sm != 'r':
-            if lm == 'dc':
+                    actions[standin] = (b'r', None, b'replaced by non-standin')
+        elif lm in (b'g', b'dc') and sm != b'r':
+            if lm == b'dc':
                 f1, f2, fa, move, anc = largs
                 largs = (p2[f2].flags(), False)
             # Case 2: largefile in the working copy, normal file in
             # the second parent
-            usermsg = _('remote turned local largefile %s into a normal file\n'
-                    'keep (l)argefile or use (n)ormal file?'
-                    '$$ &Largefile $$ &Normal file') % lfile
-            if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
+            usermsg = (
+                _(
+                    b'remote turned local largefile %s into a normal file\n'
+                    b'keep (l)argefile or use (n)ormal file?'
+                    b'$$ &Largefile $$ &Normal file'
+                )
+                % lfile
+            )
+            if repo.ui.promptchoice(usermsg, 0) == 0:  # keep local largefile
                 if branchmerge:
                     # largefile can be restored from standin safely
-                    actions[lfile] = ('k', None, 'replaced by standin')
-                    actions[standin] = ('k', None, 'replaces standin')
+                    actions[lfile] = (b'k', None, b'replaced by standin')
+                    actions[standin] = (b'k', None, b'replaces standin')
                 else:
                     # "lfile" should be marked as "removed" without
                     # removal of itself
-                    actions[lfile] = ('lfmr', None,
-                                      'forget non-standin largefile')
+                    actions[lfile] = (
+                        b'lfmr',
+                        None,
+                        b'forget non-standin largefile',
+                    )
 
                     # linear-merge should treat this largefile as 're-added'
-                    actions[standin] = ('a', None, 'keep standin')
-            else: # pick remote normal file
-                actions[lfile] = ('g', largs, 'replaces standin')
-                actions[standin] = ('r', None, 'replaced by non-standin')
+                    actions[standin] = (b'a', None, b'keep standin')
+            else:  # pick remote normal file
+                actions[lfile] = (b'g', largs, b'replaces standin')
+                actions[standin] = (b'r', None, b'replaced by non-standin')
 
     return actions, diverge, renamedelete
 
-@eh.wrapfunction(merge, 'recordupdates')
+
+@eh.wrapfunction(merge, b'recordupdates')
 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
-    if 'lfmr' in actions:
+    if b'lfmr' in actions:
         lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
-        for lfile, args, msg in actions['lfmr']:
+        for lfile, args, msg in actions[b'lfmr']:
             # this should be executed before 'orig', to execute 'remove'
             # before all other actions
             repo.dirstate.remove(lfile)
@@ -528,47 +645,60 @@
 
     return orig(repo, actions, branchmerge, getfiledata)
 
+
 # Override filemerge to prompt the user about how they wish to merge
 # largefiles. This will handle identical edits without prompting the user.
-@eh.wrapfunction(filemerge, '_filemerge')
-def overridefilemerge(origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca,
-                      labels=None):
+@eh.wrapfunction(filemerge, b'_filemerge')
+def overridefilemerge(
+    origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
+):
     if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
-        return origfn(premerge, repo, wctx, mynode, orig, fcd, fco, fca,
-                      labels=labels)
+        return origfn(
+            premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
+        )
 
     ahash = lfutil.readasstandin(fca).lower()
     dhash = lfutil.readasstandin(fcd).lower()
     ohash = lfutil.readasstandin(fco).lower()
-    if (ohash != ahash and
-        ohash != dhash and
-        (dhash == ahash or
-         repo.ui.promptchoice(
-             _('largefile %s has a merge conflict\nancestor was %s\n'
-               'you can keep (l)ocal %s or take (o)ther %s.\n'
-               'what do you want to do?'
-               '$$ &Local $$ &Other') %
-               (lfutil.splitstandin(orig), ahash, dhash, ohash),
-             0) == 1)):
+    if (
+        ohash != ahash
+        and ohash != dhash
+        and (
+            dhash == ahash
+            or repo.ui.promptchoice(
+                _(
+                    b'largefile %s has a merge conflict\nancestor was %s\n'
+                    b'you can keep (l)ocal %s or take (o)ther %s.\n'
+                    b'what do you want to do?'
+                    b'$$ &Local $$ &Other'
+                )
+                % (lfutil.splitstandin(orig), ahash, dhash, ohash),
+                0,
+            )
+            == 1
+        )
+    ):
         repo.wwrite(fcd.path(), fco.data(), fco.flags())
     return True, 0, False
 
-@eh.wrapfunction(copiesmod, 'pathcopies')
+
+@eh.wrapfunction(copiesmod, b'pathcopies')
 def copiespathcopies(orig, ctx1, ctx2, match=None):
     copies = orig(ctx1, ctx2, match=match)
     updated = {}
 
-    for k, v in copies.iteritems():
+    for k, v in pycompat.iteritems(copies):
         updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
 
     return updated
 
+
 # Copy first changes the matchers to match standins instead of
 # largefiles.  Then it overrides util.copyfile in that function it
 # checks if the destination largefile already exists. It also keeps a
 # list of copied files so that the largefiles can be copied and the
 # dirstate updated.
-@eh.wrapfunction(cmdutil, 'copy')
+@eh.wrapfunction(cmdutil, b'copy')
 def overridecopy(orig, ui, repo, pats, opts, rename=False):
     # doesn't remove largefile on rename
     if len(pats) < 2:
@@ -582,17 +712,26 @@
     nonormalfiles = False
     nolfiles = False
     manifest = repo[None].manifest()
-    def normalfilesmatchfn(orig, ctx, pats=(), opts=None, globbed=False,
-        default='relpath', badfn=None):
+
+    def normalfilesmatchfn(
+        orig,
+        ctx,
+        pats=(),
+        opts=None,
+        globbed=False,
+        default=b'relpath',
+        badfn=None,
+    ):
         if opts is None:
             opts = {}
         match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
         return composenormalfilematcher(match, manifest)
-    with extensions.wrappedfunction(scmutil, 'match', normalfilesmatchfn):
+
+    with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
         try:
             result = orig(ui, repo, pats, opts, rename)
         except error.Abort as e:
-            if pycompat.bytestr(e) != _('no files to copy'):
+            if pycompat.bytestr(e) != _(b'no files to copy'):
                 raise e
             else:
                 nonormalfiles = True
@@ -622,8 +761,16 @@
         wlock = repo.wlock()
 
         manifest = repo[None].manifest()
-        def overridematch(orig, ctx, pats=(), opts=None, globbed=False,
-                default='relpath', badfn=None):
+
+        def overridematch(
+            orig,
+            ctx,
+            pats=(),
+            opts=None,
+            globbed=False,
+            default=b'relpath',
+            badfn=None,
+        ):
             if opts is None:
                 opts = {}
             newpats = []
@@ -631,7 +778,7 @@
             # directory; we need to remove that now
             for pat in pats:
                 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
-                    newpats.append(pat.replace(lfutil.shortname, ''))
+                    newpats.append(pat.replace(lfutil.shortname, b''))
                 else:
                     newpats.append(pat)
             match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
@@ -640,14 +787,19 @@
             m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
             m._fileset = set(m._files)
             origmatchfn = m.matchfn
+
             def matchfn(f):
                 lfile = lfutil.splitstandin(f)
-                return (lfile is not None and
-                        (f in manifest) and
-                        origmatchfn(lfile) or
-                        None)
+                return (
+                    lfile is not None
+                    and (f in manifest)
+                    and origmatchfn(lfile)
+                    or None
+                )
+
             m.matchfn = matchfn
             return m
+
         listpats = []
         for pat in pats:
             if matchmod.patkind(pat) is not None:
@@ -656,26 +808,31 @@
                 listpats.append(makestandin(pat))
 
         copiedfiles = []
+
         def overridecopyfile(orig, src, dest, *args, **kwargs):
-            if (lfutil.shortname in src and
-                dest.startswith(repo.wjoin(lfutil.shortname))):
-                destlfile = dest.replace(lfutil.shortname, '')
-                if not opts['force'] and os.path.exists(destlfile):
-                    raise IOError('',
-                                  _('destination largefile already exists'))
+            if lfutil.shortname in src and dest.startswith(
+                repo.wjoin(lfutil.shortname)
+            ):
+                destlfile = dest.replace(lfutil.shortname, b'')
+                if not opts[b'force'] and os.path.exists(destlfile):
+                    raise IOError(
+                        b'', _(b'destination largefile already exists')
+                    )
             copiedfiles.append((src, dest))
             orig(src, dest, *args, **kwargs)
-        with extensions.wrappedfunction(util, 'copyfile', overridecopyfile):
-            with extensions.wrappedfunction(scmutil, 'match', overridematch):
+
+        with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
+            with extensions.wrappedfunction(scmutil, b'match', overridematch):
                 result += orig(ui, repo, listpats, opts, rename)
 
         lfdirstate = lfutil.openlfdirstate(ui, repo)
         for (src, dest) in copiedfiles:
-            if (lfutil.shortname in src and
-                dest.startswith(repo.wjoin(lfutil.shortname))):
-                srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
-                destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
-                destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
+            if lfutil.shortname in src and dest.startswith(
+                repo.wjoin(lfutil.shortname)
+            ):
+                srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
+                destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
+                destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
                 if not os.path.isdir(destlfiledir):
                     os.makedirs(destlfiledir)
                 if rename:
@@ -686,13 +843,12 @@
                     repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
                     lfdirstate.remove(srclfile)
                 else:
-                    util.copyfile(repo.wjoin(srclfile),
-                                  repo.wjoin(destlfile))
+                    util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
 
                 lfdirstate.add(destlfile)
         lfdirstate.write()
     except error.Abort as e:
-        if pycompat.bytestr(e) != _('no files to copy'):
+        if pycompat.bytestr(e) != _(b'no files to copy'):
             raise e
         else:
             nolfiles = True
@@ -700,10 +856,11 @@
         wlock.release()
 
     if nolfiles and nonormalfiles:
-        raise error.Abort(_('no files to copy'))
+        raise error.Abort(_(b'no files to copy'))
 
     return result
 
+
 # When the user calls revert, we have to be careful to not revert any
 # changes to other largefiles accidentally. This means we have to keep
 # track of the largefiles that are being reverted so we only pull down
@@ -713,7 +870,7 @@
 # commits. Update the standins then run the original revert, changing
 # the matcher to hit standins instead of largefiles. Based on the
 # resulting standins update the largefiles.
-@eh.wrapfunction(cmdutil, 'revert')
+@eh.wrapfunction(cmdutil, b'revert')
 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
     # Because we put the standins in a bad state (by updating them)
     # and then return them to a correct state we need to lock to
@@ -726,13 +883,20 @@
             lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
         for lfile in s.deleted:
             fstandin = lfutil.standin(lfile)
-            if (repo.wvfs.exists(fstandin)):
+            if repo.wvfs.exists(fstandin):
                 repo.wvfs.unlink(fstandin)
 
         oldstandins = lfutil.getstandinsstate(repo)
 
-        def overridematch(orig, mctx, pats=(), opts=None, globbed=False,
-                default='relpath', badfn=None):
+        def overridematch(
+            orig,
+            mctx,
+            pats=(),
+            opts=None,
+            globbed=False,
+            default=b'relpath',
+            badfn=None,
+        ):
             if opts is None:
                 opts = {}
             match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
@@ -742,8 +906,9 @@
             # currently doesn't work correctly in that case, this match is
             # called, so the lfdirstate above may not be the correct one for
             # this invocation of match.
-            lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
-                                               False)
+            lfdirstate = lfutil.openlfdirstate(
+                mctx.repo().ui, mctx.repo(), False
+            )
 
             wctx = repo[None]
             matchfiles = []
@@ -751,22 +916,24 @@
                 standin = lfutil.standin(f)
                 if standin in ctx or standin in mctx:
                     matchfiles.append(standin)
-                elif standin in wctx or lfdirstate[f] == 'r':
+                elif standin in wctx or lfdirstate[f] == b'r':
                     continue
                 else:
                     matchfiles.append(f)
             m._files = matchfiles
             m._fileset = set(m._files)
             origmatchfn = m.matchfn
+
             def matchfn(f):
                 lfile = lfutil.splitstandin(f)
                 if lfile is not None:
-                    return (origmatchfn(lfile) and
-                            (f in ctx or f in mctx))
+                    return origmatchfn(lfile) and (f in ctx or f in mctx)
                 return origmatchfn(f)
+
             m.matchfn = matchfn
             return m
-        with extensions.wrappedfunction(scmutil, 'match', overridematch):
+
+        with extensions.wrappedfunction(scmutil, b'match', overridematch):
             orig(ui, repo, ctx, parents, *pats, **opts)
 
         newstandins = lfutil.getstandinsstate(repo)
@@ -776,51 +943,77 @@
         # when target revision is explicitly specified: in such case,
         # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
         # of target (standin) file.
-        lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
-                                normallookup=True)
+        lfcommands.updatelfiles(
+            ui, repo, filelist, printmessage=False, normallookup=True
+        )
+
 
 # after pulling changesets, we need to take some extra care to get
 # largefiles updated remotely
-@eh.wrapcommand('pull',
-    opts=[('', 'all-largefiles', None,
-                _('download all pulled versions of largefiles (DEPRECATED)')),
-          ('', 'lfrev', [],
-                _('download largefiles for these revisions'), _('REV'))])
+@eh.wrapcommand(
+    b'pull',
+    opts=[
+        (
+            b'',
+            b'all-largefiles',
+            None,
+            _(b'download all pulled versions of largefiles (DEPRECATED)'),
+        ),
+        (
+            b'',
+            b'lfrev',
+            [],
+            _(b'download largefiles for these revisions'),
+            _(b'REV'),
+        ),
+    ],
+)
 def overridepull(orig, ui, repo, source=None, **opts):
     revsprepull = len(repo)
     if not source:
-        source = 'default'
+        source = b'default'
     repo.lfpullsource = source
     result = orig(ui, repo, source, **opts)
     revspostpull = len(repo)
     lfrevs = opts.get(r'lfrev', [])
     if opts.get(r'all_largefiles'):
-        lfrevs.append('pulled()')
+        lfrevs.append(b'pulled()')
     if lfrevs and revspostpull > revsprepull:
         numcached = 0
-        repo.firstpulled = revsprepull # for pulled() revset expression
+        repo.firstpulled = revsprepull  # for pulled() revset expression
         try:
             for rev in scmutil.revrange(repo, lfrevs):
-                ui.note(_('pulling largefiles for revision %d\n') % rev)
+                ui.note(_(b'pulling largefiles for revision %d\n') % rev)
                 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
                 numcached += len(cached)
         finally:
             del repo.firstpulled
-        ui.status(_("%d largefiles cached\n") % numcached)
+        ui.status(_(b"%d largefiles cached\n") % numcached)
     return result
 
-@eh.wrapcommand('push',
-    opts=[('', 'lfrev', [],
-               _('upload largefiles for these revisions'), _('REV'))])
+
+@eh.wrapcommand(
+    b'push',
+    opts=[
+        (
+            b'',
+            b'lfrev',
+            [],
+            _(b'upload largefiles for these revisions'),
+            _(b'REV'),
+        )
+    ],
+)
 def overridepush(orig, ui, repo, *args, **kwargs):
     """Override push command and store --lfrev parameters in opargs"""
     lfrevs = kwargs.pop(r'lfrev', None)
     if lfrevs:
         opargs = kwargs.setdefault(r'opargs', {})
-        opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
+        opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
     return orig(ui, repo, *args, **kwargs)
 
-@eh.wrapfunction(exchange, 'pushoperation')
+
+@eh.wrapfunction(exchange, b'pushoperation')
 def exchangepushoperation(orig, *args, **kwargs):
     """Override pushoperation constructor and store lfrevs parameter"""
     lfrevs = kwargs.pop(r'lfrevs', None)
@@ -828,7 +1021,8 @@
     pushop.lfrevs = lfrevs
     return pushop
 
-@eh.revsetpredicate('pulled()')
+
+@eh.revsetpredicate(b'pulled()')
 def pulledrevsetsymbol(repo, subset, x):
     """Changesets that just has been pulled.
 
@@ -851,24 +1045,35 @@
     try:
         firstpulled = repo.firstpulled
     except AttributeError:
-        raise error.Abort(_("pulled() only available in --lfrev"))
+        raise error.Abort(_(b"pulled() only available in --lfrev"))
     return smartset.baseset([r for r in subset if r >= firstpulled])
 
-@eh.wrapcommand('clone',
-    opts=[('', 'all-largefiles', None,
-               _('download all versions of all largefiles'))])
+
+@eh.wrapcommand(
+    b'clone',
+    opts=[
+        (
+            b'',
+            b'all-largefiles',
+            None,
+            _(b'download all versions of all largefiles'),
+        )
+    ],
+)
 def overrideclone(orig, ui, source, dest=None, **opts):
     d = dest
     if d is None:
         d = hg.defaultdest(source)
     if opts.get(r'all_largefiles') and not hg.islocal(d):
-            raise error.Abort(_(
-            '--all-largefiles is incompatible with non-local destination %s') %
-            d)
+        raise error.Abort(
+            _(b'--all-largefiles is incompatible with non-local destination %s')
+            % d
+        )
 
     return orig(ui, source, dest, **opts)
 
-@eh.wrapfunction(hg, 'clone')
+
+@eh.wrapfunction(hg, b'clone')
 def hgclone(orig, ui, opts, *args, **kwargs):
     result = orig(ui, opts, *args, **kwargs)
 
@@ -885,7 +1090,7 @@
         # Caching is implicitly limited to 'rev' option, since the dest repo was
         # truncated at that point.  The user may expect a download count with
         # this option, so attempt whether or not this is a largefile repo.
-        if opts.get('all_largefiles'):
+        if opts.get(b'all_largefiles'):
             success, missing = lfcommands.downloadlfiles(ui, repo, None)
 
             if missing != 0:
@@ -893,9 +1098,10 @@
 
     return result
 
-@eh.wrapcommand('rebase', extension='rebase')
+
+@eh.wrapcommand(b'rebase', extension=b'rebase')
 def overriderebase(orig, ui, repo, **opts):
-    if not util.safehasattr(repo, '_largefilesenabled'):
+    if not util.safehasattr(repo, b'_largefilesenabled'):
         return orig(ui, repo, **opts)
 
     resuming = opts.get(r'continue')
@@ -907,32 +1113,38 @@
         repo._lfstatuswriters.pop()
         repo._lfcommithooks.pop()
 
-@eh.wrapcommand('archive')
-def overridearchivecmd(orig, ui, repo, dest, **opts):
-    repo.unfiltered().lfstatus = True
 
-    try:
+@eh.wrapcommand(b'archive')
+def overridearchivecmd(orig, ui, repo, dest, **opts):
+    with lfstatus(repo.unfiltered()):
         return orig(ui, repo.unfiltered(), dest, **opts)
-    finally:
-        repo.unfiltered().lfstatus = False
+
 
-@eh.wrapfunction(webcommands, 'archive')
+@eh.wrapfunction(webcommands, b'archive')
 def hgwebarchive(orig, web):
-    web.repo.lfstatus = True
-
-    try:
+    with lfstatus(web.repo):
         return orig(web)
-    finally:
-        web.repo.lfstatus = False
+
 
-@eh.wrapfunction(archival, 'archive')
-def overridearchive(orig, repo, dest, node, kind, decode=True, match=None,
-            prefix='', mtime=None, subrepos=None):
+@eh.wrapfunction(archival, b'archive')
+def overridearchive(
+    orig,
+    repo,
+    dest,
+    node,
+    kind,
+    decode=True,
+    match=None,
+    prefix=b'',
+    mtime=None,
+    subrepos=None,
+):
     # For some reason setting repo.lfstatus in hgwebarchive only changes the
     # unfiltered repo's attr, so check that as well.
     if not repo.lfstatus and not repo.unfiltered().lfstatus:
-        return orig(repo, dest, node, kind, decode, match, prefix, mtime,
-                    subrepos)
+        return orig(
+            repo, dest, node, kind, decode, match, prefix, mtime, subrepos
+        )
 
     # No need to lock because we are only reading history and
     # largefile caches, neither of which are modified.
@@ -940,14 +1152,13 @@
         lfcommands.cachelfiles(repo.ui, repo, node)
 
     if kind not in archival.archivers:
-        raise error.Abort(_("unknown archive type '%s'") % kind)
+        raise error.Abort(_(b"unknown archive type '%s'") % kind)
 
     ctx = repo[node]
 
-    if kind == 'files':
+    if kind == b'files':
         if prefix:
-            raise error.Abort(
-                _('cannot give prefix when archiving to files'))
+            raise error.Abort(_(b'cannot give prefix when archiving to files'))
     else:
         prefix = archival.tidyprefix(dest, kind, prefix)
 
@@ -961,9 +1172,13 @@
 
     archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
 
-    if repo.ui.configbool("ui", "archivemeta"):
-        write('.hg_archival.txt', 0o644, False,
-              lambda: archival.buildmetadata(ctx))
+    if repo.ui.configbool(b"ui", b"archivemeta"):
+        write(
+            b'.hg_archival.txt',
+            0o644,
+            False,
+            lambda: archival.buildmetadata(ctx),
+        )
 
     for f in ctx:
         ff = ctx.flags(f)
@@ -975,33 +1190,37 @@
 
                 if path is None:
                     raise error.Abort(
-                       _('largefile %s not found in repo store or system cache')
-                       % lfile)
+                        _(
+                            b'largefile %s not found in repo store or system cache'
+                        )
+                        % lfile
+                    )
             else:
                 path = lfile
 
             f = lfile
 
             getdata = lambda: util.readfile(path)
-        write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
+        write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
 
     if subrepos:
         for subpath in sorted(ctx.substate):
             sub = ctx.workingsub(subpath)
             submatch = matchmod.subdirmatcher(subpath, match)
-            subprefix = prefix + subpath + '/'
-            sub._repo.lfstatus = True
-            sub.archive(archiver, subprefix, submatch)
+            subprefix = prefix + subpath + b'/'
+            with lfstatus(sub._repo):
+                sub.archive(archiver, subprefix, submatch)
 
     archiver.done()
 
-@eh.wrapfunction(subrepo.hgsubrepo, 'archive')
+
+@eh.wrapfunction(subrepo.hgsubrepo, b'archive')
 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
-    lfenabled = util.safehasattr(repo._repo, '_largefilesenabled')
+    lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
     if not lfenabled or not repo._repo.lfstatus:
         return orig(repo, archiver, prefix, match, decode)
 
-    repo._get(repo._state + ('hg',))
+    repo._get(repo._state + (b'hg',))
     rev = repo._state[1]
     ctx = repo._repo[rev]
 
@@ -1029,8 +1248,11 @@
 
                 if path is None:
                     raise error.Abort(
-                       _('largefile %s not found in repo store or system cache')
-                       % lfile)
+                        _(
+                            b'largefile %s not found in repo store or system cache'
+                        )
+                        % lfile
+                    )
             else:
                 path = lfile
 
@@ -1038,49 +1260,54 @@
 
             getdata = lambda: util.readfile(os.path.join(prefix, path))
 
-        write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
+        write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
 
     for subpath in sorted(ctx.substate):
         sub = ctx.workingsub(subpath)
         submatch = matchmod.subdirmatcher(subpath, match)
-        subprefix = prefix + subpath + '/'
-        sub._repo.lfstatus = True
-        sub.archive(archiver, subprefix, submatch, decode)
+        subprefix = prefix + subpath + b'/'
+        with lfstatus(sub._repo):
+            sub.archive(archiver, subprefix, submatch, decode)
+
 
 # If a largefile is modified, the change is not reflected in its
 # standin until a commit. cmdutil.bailifchanged() raises an exception
 # if the repo has uncommitted changes. Wrap it to also check if
 # largefiles were changed. This is used by bisect, backout and fetch.
-@eh.wrapfunction(cmdutil, 'bailifchanged')
+@eh.wrapfunction(cmdutil, b'bailifchanged')
 def overridebailifchanged(orig, repo, *args, **kwargs):
     orig(repo, *args, **kwargs)
-    repo.lfstatus = True
-    s = repo.status()
-    repo.lfstatus = False
+    with lfstatus(repo):
+        s = repo.status()
     if s.modified or s.added or s.removed or s.deleted:
-        raise error.Abort(_('uncommitted changes'))
+        raise error.Abort(_(b'uncommitted changes'))
 
-@eh.wrapfunction(cmdutil, 'postcommitstatus')
+
+@eh.wrapfunction(cmdutil, b'postcommitstatus')
 def postcommitstatus(orig, repo, *args, **kwargs):
-    repo.lfstatus = True
-    try:
+    with lfstatus(repo):
         return orig(repo, *args, **kwargs)
-    finally:
-        repo.lfstatus = False
+
 
-@eh.wrapfunction(cmdutil, 'forget')
-def cmdutilforget(orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun,
-                  interactive):
+@eh.wrapfunction(cmdutil, b'forget')
+def cmdutilforget(
+    orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
+):
     normalmatcher = composenormalfilematcher(match, repo[None].manifest())
-    bad, forgot = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly,
-                       dryrun, interactive)
+    bad, forgot = orig(
+        ui,
+        repo,
+        normalmatcher,
+        prefix,
+        uipathfn,
+        explicitonly,
+        dryrun,
+        interactive,
+    )
     m = composelargefilematcher(match, repo[None].manifest())
 
-    try:
-        repo.lfstatus = True
+    with lfstatus(repo):
         s = repo.status(match=m, clean=True)
-    finally:
-        repo.lfstatus = False
     manifest = repo[None].manifest()
     forget = sorted(s.modified + s.added + s.deleted + s.clean)
     forget = [f for f in forget if lfutil.standin(f) in manifest]
@@ -1088,20 +1315,21 @@
     for f in forget:
         fstandin = lfutil.standin(f)
         if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
-            ui.warn(_('not removing %s: file is already untracked\n')
-                    % uipathfn(f))
+            ui.warn(
+                _(b'not removing %s: file is already untracked\n') % uipathfn(f)
+            )
             bad.append(f)
 
     for f in forget:
         if ui.verbose or not m.exact(f):
-            ui.status(_('removing %s\n') % uipathfn(f))
+            ui.status(_(b'removing %s\n') % uipathfn(f))
 
     # Need to lock because standin files are deleted then removed from the
     # repository and we could race in-between.
     with repo.wlock():
         lfdirstate = lfutil.openlfdirstate(ui, repo)
         for f in forget:
-            if lfdirstate[f] == 'a':
+            if lfdirstate[f] == b'a':
                 lfdirstate.drop(f)
             else:
                 lfdirstate.remove(f)
@@ -1115,6 +1343,7 @@
     forgot.extend(f for f in forget if f not in rejected)
     return bad, forgot
 
+
 def _getoutgoings(repo, other, missing, addfunc):
     """get pairs of filename and largefile hash in outgoing revisions
     in 'missing'.
@@ -1126,96 +1355,113 @@
     """
     knowns = set()
     lfhashes = set()
+
     def dedup(fn, lfhash):
         k = (fn, lfhash)
         if k not in knowns:
             knowns.add(k)
             lfhashes.add(lfhash)
+
     lfutil.getlfilestoupload(repo, missing, dedup)
     if lfhashes:
         lfexists = storefactory.openstore(repo, other).exists(lfhashes)
         for fn, lfhash in knowns:
-            if not lfexists[lfhash]: # lfhash doesn't exist on "other"
+            if not lfexists[lfhash]:  # lfhash doesn't exist on "other"
                 addfunc(fn, lfhash)
 
+
 def outgoinghook(ui, repo, other, opts, missing):
-    if opts.pop('large', None):
+    if opts.pop(b'large', None):
         lfhashes = set()
         if ui.debugflag:
             toupload = {}
+
             def addfunc(fn, lfhash):
                 if fn not in toupload:
                     toupload[fn] = []
                 toupload[fn].append(lfhash)
                 lfhashes.add(lfhash)
+
             def showhashes(fn):
                 for lfhash in sorted(toupload[fn]):
-                    ui.debug('    %s\n' % (lfhash))
+                    ui.debug(b'    %s\n' % lfhash)
+
         else:
             toupload = set()
+
             def addfunc(fn, lfhash):
                 toupload.add(fn)
                 lfhashes.add(lfhash)
+
             def showhashes(fn):
                 pass
+
         _getoutgoings(repo, other, missing, addfunc)
 
         if not toupload:
-            ui.status(_('largefiles: no files to upload\n'))
+            ui.status(_(b'largefiles: no files to upload\n'))
         else:
-            ui.status(_('largefiles to upload (%d entities):\n')
-                      % (len(lfhashes)))
+            ui.status(
+                _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
+            )
             for file in sorted(toupload):
-                ui.status(lfutil.splitstandin(file) + '\n')
+                ui.status(lfutil.splitstandin(file) + b'\n')
                 showhashes(file)
-            ui.status('\n')
+            ui.status(b'\n')
+
 
-@eh.wrapcommand('outgoing',
-    opts=[('', 'large', None, _('display outgoing largefiles'))])
+@eh.wrapcommand(
+    b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
+)
 def _outgoingcmd(orig, *args, **kwargs):
     # Nothing to do here other than add the extra help option- the hook above
     # processes it.
     return orig(*args, **kwargs)
 
+
 def summaryremotehook(ui, repo, opts, changes):
-    largeopt = opts.get('large', False)
+    largeopt = opts.get(b'large', False)
     if changes is None:
         if largeopt:
-            return (False, True) # only outgoing check is needed
+            return (False, True)  # only outgoing check is needed
         else:
             return (False, False)
     elif largeopt:
         url, branch, peer, outgoing = changes[1]
         if peer is None:
             # i18n: column positioning for "hg summary"
-            ui.status(_('largefiles: (no remote repo)\n'))
+            ui.status(_(b'largefiles: (no remote repo)\n'))
             return
 
         toupload = set()
         lfhashes = set()
+
         def addfunc(fn, lfhash):
             toupload.add(fn)
             lfhashes.add(lfhash)
+
         _getoutgoings(repo, peer, outgoing.missing, addfunc)
 
         if not toupload:
             # i18n: column positioning for "hg summary"
-            ui.status(_('largefiles: (no files to upload)\n'))
+            ui.status(_(b'largefiles: (no files to upload)\n'))
         else:
             # i18n: column positioning for "hg summary"
-            ui.status(_('largefiles: %d entities for %d files to upload\n')
-                      % (len(lfhashes), len(toupload)))
+            ui.status(
+                _(b'largefiles: %d entities for %d files to upload\n')
+                % (len(lfhashes), len(toupload))
+            )
 
-@eh.wrapcommand('summary',
-    opts=[('', 'large', None, _('display outgoing largefiles'))])
+
+@eh.wrapcommand(
+    b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
+)
 def overridesummary(orig, ui, repo, *pats, **opts):
-    try:
-        repo.lfstatus = True
+    with lfstatus(repo):
         orig(ui, repo, *pats, **opts)
-    finally:
-        repo.lfstatus = False
+
 
-@eh.wrapfunction(scmutil, 'addremove')
+@eh.wrapfunction(scmutil, b'addremove')
 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
     if opts is None:
         opts = {}
@@ -1223,8 +1469,13 @@
         return orig(repo, matcher, prefix, uipathfn, opts)
     # Get the list of missing largefiles so we can remove them
     lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
-    unsure, s = lfdirstate.status(matchmod.always(), subrepos=[],
-                                  ignored=False, clean=False, unknown=False)
+    unsure, s = lfdirstate.status(
+        matchmod.always(),
+        subrepos=[],
+        ignored=False,
+        clean=False,
+        unknown=False,
+    )
 
     # Call into the normal remove code, but the removing of the standin, we want
     # to have handled by original addremove.  Monkey patching here makes sure
@@ -1240,21 +1491,30 @@
         matchfn = m.matchfn
         m.matchfn = lambda f: f in s.deleted and matchfn(f)
 
-        removelargefiles(repo.ui, repo, True, m, uipathfn, opts.get('dry_run'),
-                         **pycompat.strkwargs(opts))
+        removelargefiles(
+            repo.ui,
+            repo,
+            True,
+            m,
+            uipathfn,
+            opts.get(b'dry_run'),
+            **pycompat.strkwargs(opts)
+        )
     # Call into the normal add code, and any files that *should* be added as
     # largefiles will be
-    added, bad = addlargefiles(repo.ui, repo, True, matcher, uipathfn,
-                               **pycompat.strkwargs(opts))
+    added, bad = addlargefiles(
+        repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
+    )
     # Now that we've handled largefiles, hand off to the original addremove
     # function to take care of the rest.  Make sure it doesn't do anything with
     # largefiles by passing a matcher that will ignore them.
     matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
     return orig(repo, matcher, prefix, uipathfn, opts)
 
+
 # Calling purge with --all will cause the largefiles to be deleted.
 # Override repo.status to prevent this from happening.
-@eh.wrapcommand('purge', extension='purge')
+@eh.wrapcommand(b'purge', extension=b'purge')
 def overridepurge(orig, ui, repo, *dirs, **opts):
     # XXX Monkey patching a repoview will not work. The assigned attribute will
     # be set on the unfiltered repo, but we will only lookup attributes in the
@@ -1267,35 +1527,50 @@
     # cleaner instead.
     repo = repo.unfiltered()
     oldstatus = repo.status
-    def overridestatus(node1='.', node2=None, match=None, ignored=False,
-                        clean=False, unknown=False, listsubrepos=False):
-        r = oldstatus(node1, node2, match, ignored, clean, unknown,
-                      listsubrepos)
+
+    def overridestatus(
+        node1=b'.',
+        node2=None,
+        match=None,
+        ignored=False,
+        clean=False,
+        unknown=False,
+        listsubrepos=False,
+    ):
+        r = oldstatus(
+            node1, node2, match, ignored, clean, unknown, listsubrepos
+        )
         lfdirstate = lfutil.openlfdirstate(ui, repo)
-        unknown = [f for f in r.unknown if lfdirstate[f] == '?']
-        ignored = [f for f in r.ignored if lfdirstate[f] == '?']
-        return scmutil.status(r.modified, r.added, r.removed, r.deleted,
-                              unknown, ignored, r.clean)
+        unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
+        ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
+        return scmutil.status(
+            r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
+        )
+
     repo.status = overridestatus
     orig(ui, repo, *dirs, **opts)
     repo.status = oldstatus
 
-@eh.wrapcommand('rollback')
+
+@eh.wrapcommand(b'rollback')
 def overriderollback(orig, ui, repo, **opts):
     with repo.wlock():
         before = repo.dirstate.parents()
-        orphans = set(f for f in repo.dirstate
-                      if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
+        orphans = set(
+            f
+            for f in repo.dirstate
+            if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
+        )
         result = orig(ui, repo, **opts)
         after = repo.dirstate.parents()
         if before == after:
-            return result # no need to restore standins
+            return result  # no need to restore standins
 
-        pctx = repo['.']
+        pctx = repo[b'.']
         for f in repo.dirstate:
             if lfutil.isstandin(f):
                 orphans.discard(f)
-                if repo.dirstate[f] == 'r':
+                if repo.dirstate[f] == b'r':
                     repo.wvfs.unlinkpath(f, ignoremissing=True)
                 elif f in pctx:
                     fctx = pctx[f]
@@ -1303,7 +1578,7 @@
                 else:
                     # content of standin is not so important in 'a',
                     # 'm' or 'n' (coming from the 2nd parent) cases
-                    lfutil.writestandin(repo, f, '', False)
+                    lfutil.writestandin(repo, f, b'', False)
         for standin in orphans:
             repo.wvfs.unlinkpath(standin, ignoremissing=True)
 
@@ -1318,7 +1593,8 @@
         lfdirstate.write()
     return result
 
-@eh.wrapcommand('transplant', extension='transplant')
+
+@eh.wrapcommand(b'transplant', extension=b'transplant')
 def overridetransplant(orig, ui, repo, *revs, **opts):
     resuming = opts.get(r'continue')
     repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
@@ -1330,14 +1606,16 @@
         repo._lfcommithooks.pop()
     return result
 
-@eh.wrapcommand('cat')
+
+@eh.wrapcommand(b'cat')
 def overridecat(orig, ui, repo, file1, *pats, **opts):
     opts = pycompat.byteskwargs(opts)
-    ctx = scmutil.revsingle(repo, opts.get('rev'))
+    ctx = scmutil.revsingle(repo, opts.get(b'rev'))
     err = 1
     notbad = set()
     m = scmutil.match(ctx, (file1,) + pats, opts)
     origmatchfn = m.matchfn
+
     def lfmatchfn(f):
         if origmatchfn(f):
             return True
@@ -1346,14 +1624,18 @@
             return False
         notbad.add(lf)
         return origmatchfn(lf)
+
     m.matchfn = lfmatchfn
     origbadfn = m.bad
+
     def lfbadfn(f, msg):
         if not f in notbad:
             origbadfn(f, msg)
+
     m.bad = lfbadfn
 
     origvisitdirfn = m.visitdir
+
     def lfvisitdirfn(dir):
         if dir == lfutil.shortname:
             return True
@@ -1364,15 +1646,16 @@
         if lf is None:
             return False
         return origvisitdirfn(lf)
+
     m.visitdir = lfvisitdirfn
 
     for f in ctx.walk(m):
-        with cmdutil.makefileobj(ctx, opts.get('output'), pathname=f) as fp:
+        with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
             lf = lfutil.splitstandin(f)
             if lf is None or origmatchfn(f):
                 # duplicating unreachable code from commands.cat
                 data = ctx[f].data()
-                if opts.get('decode'):
+                if opts.get(b'decode'):
                     data = repo.wwritedata(f, data)
                 fp.write(data)
             else:
@@ -1382,18 +1665,22 @@
                     success, missing = store.get([(lf, hash)])
                     if len(success) != 1:
                         raise error.Abort(
-                            _('largefile %s is not in cache and could not be '
-                              'downloaded')  % lf)
+                            _(
+                                b'largefile %s is not in cache and could not be '
+                                b'downloaded'
+                            )
+                            % lf
+                        )
                 path = lfutil.usercachepath(repo.ui, hash)
-                with open(path, "rb") as fpin:
+                with open(path, b"rb") as fpin:
                     for chunk in util.filechunkiter(fpin):
                         fp.write(chunk)
         err = 0
     return err
 
-@eh.wrapfunction(merge, 'update')
-def mergeupdate(orig, repo, node, branchmerge, force,
-                *args, **kwargs):
+
+@eh.wrapfunction(merge, b'update')
+def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
     matcher = kwargs.get(r'matcher', None)
     # note if this is a partial update
     partial = matcher and not matcher.always()
@@ -1414,10 +1701,15 @@
         # (*1) deprecated, but used internally (e.g: "rebase --collapse")
 
         lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
-        unsure, s = lfdirstate.status(matchmod.always(), subrepos=[],
-                                      ignored=False, clean=True, unknown=False)
+        unsure, s = lfdirstate.status(
+            matchmod.always(),
+            subrepos=[],
+            ignored=False,
+            clean=True,
+            unknown=False,
+        )
         oldclean = set(s.clean)
-        pctx = repo['.']
+        pctx = repo[b'.']
         dctx = repo[node]
         for lfile in unsure + s.modified:
             lfileabs = repo.wvfs.join(lfile)
@@ -1425,10 +1717,12 @@
                 continue
             lfhash = lfutil.hashfile(lfileabs)
             standin = lfutil.standin(lfile)
-            lfutil.writestandin(repo, standin, lfhash,
-                                lfutil.getexecutable(lfileabs))
-            if (standin in pctx and
-                lfhash == lfutil.readasstandin(pctx[standin])):
+            lfutil.writestandin(
+                repo, standin, lfhash, lfutil.getexecutable(lfileabs)
+            )
+            if standin in pctx and lfhash == lfutil.readasstandin(
+                pctx[standin]
+            ):
                 oldclean.add(lfile)
         for lfile in s.added:
             fstandin = lfutil.standin(lfile)
@@ -1462,12 +1756,14 @@
         if branchmerge or force or partial:
             filelist.extend(s.deleted + s.removed)
 
-        lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
-                                normallookup=partial)
+        lfcommands.updatelfiles(
+            repo.ui, repo, filelist=filelist, normallookup=partial
+        )
 
         return result
 
-@eh.wrapfunction(scmutil, 'marktouched')
+
+@eh.wrapfunction(scmutil, b'marktouched')
 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
     result = orig(repo, files, *args, **kwargs)
 
@@ -1477,28 +1773,36 @@
         if lf is not None:
             filelist.append(lf)
     if filelist:
-        lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
-                                printmessage=False, normallookup=True)
+        lfcommands.updatelfiles(
+            repo.ui,
+            repo,
+            filelist=filelist,
+            printmessage=False,
+            normallookup=True,
+        )
 
     return result
 
-@eh.wrapfunction(upgrade, 'preservedrequirements')
-@eh.wrapfunction(upgrade, 'supporteddestrequirements')
+
+@eh.wrapfunction(upgrade, b'preservedrequirements')
+@eh.wrapfunction(upgrade, b'supporteddestrequirements')
 def upgraderequirements(orig, repo):
     reqs = orig(repo)
-    if 'largefiles' in repo.requirements:
-        reqs.add('largefiles')
+    if b'largefiles' in repo.requirements:
+        reqs.add(b'largefiles')
     return reqs
 
-_lfscheme = 'largefile://'
+
+_lfscheme = b'largefile://'
 
-@eh.wrapfunction(urlmod, 'open')
+
+@eh.wrapfunction(urlmod, b'open')
 def openlargefile(orig, ui, url_, data=None):
     if url_.startswith(_lfscheme):
         if data:
-            msg = "cannot use data on a 'largefile://' url"
+            msg = b"cannot use data on a 'largefile://' url"
             raise error.ProgrammingError(msg)
-        lfid = url_[len(_lfscheme):]
+        lfid = url_[len(_lfscheme) :]
         return storefactory.getlfile(ui, lfid)
     else:
         return orig(ui, url_, data=data)
--- a/hgext/largefiles/proto.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/largefiles/proto.py	Mon Oct 21 11:09:48 2019 -0400
@@ -8,6 +8,7 @@
 import re
 
 from mercurial.i18n import _
+from mercurial.pycompat import open
 
 from mercurial import (
     error,
@@ -19,16 +20,16 @@
     wireprotov1server,
 )
 
-from . import (
-    lfutil,
-)
+from . import lfutil
 
 urlerr = util.urlerr
 urlreq = util.urlreq
 
-LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
-                           '\n\nPlease enable it in your Mercurial config '
-                           'file.\n')
+LARGEFILES_REQUIRED_MSG = (
+    b'\nThis repository uses the largefiles extension.'
+    b'\n\nPlease enable it in your Mercurial config '
+    b'file.\n'
+)
 
 eh = exthelper.exthelper()
 
@@ -36,6 +37,7 @@
 ssholdcallstream = None
 httpoldcallstream = None
 
+
 def putlfile(repo, proto, sha):
     '''Server command for putting a largefile into a repository's local store
     and into the user cache.'''
@@ -49,27 +51,32 @@
                 tmpfp.write(p)
             tmpfp._fp.seek(0)
             if sha != lfutil.hexsha1(tmpfp._fp):
-                raise IOError(0, _('largefile contents do not match hash'))
+                raise IOError(0, _(b'largefile contents do not match hash'))
             tmpfp.close()
             lfutil.linktousercache(repo, sha)
         except IOError as e:
-            repo.ui.warn(_('largefiles: failed to put %s into store: %s\n') %
-                         (sha, e.strerror))
+            repo.ui.warn(
+                _(b'largefiles: failed to put %s into store: %s\n')
+                % (sha, e.strerror)
+            )
             return wireprototypes.pushres(
-                1, output.getvalue() if output else '')
+                1, output.getvalue() if output else b''
+            )
         finally:
             tmpfp.discard()
 
-    return wireprototypes.pushres(0, output.getvalue() if output else '')
+    return wireprototypes.pushres(0, output.getvalue() if output else b'')
+
 
 def getlfile(repo, proto, sha):
     '''Server command for retrieving a largefile from the repository-local
     cache or user cache.'''
     filename = lfutil.findfile(repo, sha)
     if not filename:
-        raise error.Abort(_('requested largefile %s not present in cache')
-                          % sha)
-    f = open(filename, 'rb')
+        raise error.Abort(
+            _(b'requested largefile %s not present in cache') % sha
+        )
+    f = open(filename, b'rb')
     length = os.fstat(f.fileno())[6]
 
     # Since we can't set an HTTP content-length header here, and
@@ -78,11 +85,13 @@
     # just send the length on the first line of the response, like the
     # ssh proto does for string responses.
     def generator():
-        yield '%d\n' % length
+        yield b'%d\n' % length
         for chunk in util.filechunkiter(f):
             yield chunk
+
     return wireprototypes.streamreslegacy(gen=generator())
 
+
 def statlfile(repo, proto, sha):
     '''Server command for checking if a largefile is present - returns '2\n' if
     the largefile is missing, '0\n' if it seems to be in good condition.
@@ -92,8 +101,9 @@
     server side.'''
     filename = lfutil.findfile(repo, sha)
     if not filename:
-        return wireprototypes.bytesresponse('2\n')
-    return wireprototypes.bytesresponse('0\n')
+        return wireprototypes.bytesresponse(b'2\n')
+    return wireprototypes.bytesresponse(b'0\n')
+
 
 def wirereposetup(ui, repo):
     class lfileswirerepository(repo.__class__):
@@ -102,41 +112,48 @@
             # input file-like into a bundle before sending it, so we can't use
             # it ...
             if issubclass(self.__class__, httppeer.httppeer):
-                res = self._call('putlfile', data=fd, sha=sha,
-                    headers={r'content-type': r'application/mercurial-0.1'})
+                res = self._call(
+                    b'putlfile',
+                    data=fd,
+                    sha=sha,
+                    headers={r'content-type': r'application/mercurial-0.1'},
+                )
                 try:
-                    d, output = res.split('\n', 1)
+                    d, output = res.split(b'\n', 1)
                     for l in output.splitlines(True):
-                        self.ui.warn(_('remote: '), l) # assume l ends with \n
+                        self.ui.warn(_(b'remote: '), l)  # assume l ends with \n
                     return int(d)
                 except ValueError:
-                    self.ui.warn(_('unexpected putlfile response: %r\n') % res)
+                    self.ui.warn(_(b'unexpected putlfile response: %r\n') % res)
                     return 1
             # ... but we can't use sshrepository._call because the data=
             # argument won't get sent, and _callpush does exactly what we want
             # in this case: send the data straight through
             else:
                 try:
-                    ret, output = self._callpush("putlfile", fd, sha=sha)
-                    if ret == "":
-                        raise error.ResponseError(_('putlfile failed:'),
-                                output)
+                    ret, output = self._callpush(b"putlfile", fd, sha=sha)
+                    if ret == b"":
+                        raise error.ResponseError(
+                            _(b'putlfile failed:'), output
+                        )
                     return int(ret)
                 except IOError:
                     return 1
                 except ValueError:
                     raise error.ResponseError(
-                        _('putlfile failed (unexpected response):'), ret)
+                        _(b'putlfile failed (unexpected response):'), ret
+                    )
 
         def getlfile(self, sha):
             """returns an iterable with the chunks of the file with sha sha"""
-            stream = self._callstream("getlfile", sha=sha)
+            stream = self._callstream(b"getlfile", sha=sha)
             length = stream.readline()
             try:
                 length = int(length)
             except ValueError:
-                self._abort(error.ResponseError(_("unexpected response:"),
-                                                length))
+                self._abort(
+                    error.ResponseError(_(b"unexpected response:"), length)
+                )
 
             # SSH streams will block if reading more than length
             for chunk in util.filechunkiter(stream, limit=length):
@@ -146,13 +163,14 @@
             if issubclass(self.__class__, httppeer.httppeer):
                 chunk = stream.read(1)
                 if chunk:
-                    self._abort(error.ResponseError(_("unexpected response:"),
-                                                    chunk))
+                    self._abort(
+                        error.ResponseError(_(b"unexpected response:"), chunk)
+                    )
 
         @wireprotov1peer.batchable
         def statlfile(self, sha):
             f = wireprotov1peer.future()
-            result = {'sha': sha}
+            result = {b'sha': sha}
             yield result, f
             try:
                 yield int(f.value)
@@ -165,14 +183,16 @@
 
     repo.__class__ = lfileswirerepository
 
+
 # advertise the largefiles=serve capability
-@eh.wrapfunction(wireprotov1server, '_capabilities')
+@eh.wrapfunction(wireprotov1server, b'_capabilities')
 def _capabilities(orig, repo, proto):
     '''announce largefile server capability'''
     caps = orig(repo, proto)
-    caps.append('largefiles=serve')
+    caps.append(b'largefiles=serve')
     return caps
 
+
 def heads(orig, repo, proto):
     '''Wrap server command - largefile capable clients will know to call
     lheads instead'''
@@ -181,18 +201,21 @@
 
     return orig(repo, proto)
 
+
 def sshrepocallstream(self, cmd, **args):
-    if cmd == 'heads' and self.capable('largefiles'):
-        cmd = 'lheads'
-    if cmd == 'batch' and self.capable('largefiles'):
-        args[r'cmds'] = args[r'cmds'].replace('heads ', 'lheads ')
+    if cmd == b'heads' and self.capable(b'largefiles'):
+        cmd = b'lheads'
+    if cmd == b'batch' and self.capable(b'largefiles'):
+        args[r'cmds'] = args[r'cmds'].replace(b'heads ', b'lheads ')
     return ssholdcallstream(self, cmd, **args)
 
+
 headsre = re.compile(br'(^|;)heads\b')
 
+
 def httprepocallstream(self, cmd, **args):
-    if cmd == 'heads' and self.capable('largefiles'):
-        cmd = 'lheads'
-    if cmd == 'batch' and self.capable('largefiles'):
-        args[r'cmds'] = headsre.sub('lheads', args[r'cmds'])
+    if cmd == b'heads' and self.capable(b'largefiles'):
+        cmd = b'lheads'
+    if cmd == b'batch' and self.capable(b'largefiles'):
+        args[r'cmds'] = headsre.sub(b'lheads', args[r'cmds'])
     return httpoldcallstream(self, cmd, **args)
--- a/hgext/largefiles/remotestore.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/largefiles/remotestore.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,12 +11,11 @@
 
 from mercurial import (
     error,
+    pycompat,
     util,
 )
 
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.utils import stringutil
 
 from . import (
     basestore,
@@ -27,8 +26,10 @@
 urlerr = util.urlerr
 urlreq = util.urlreq
 
+
 class remotestore(basestore.basestore):
     '''a largefile store accessed over a network'''
+
     def __init__(self, ui, repo, url):
         super(remotestore, self).__init__(ui, repo, url)
         self._lstore = None
@@ -38,25 +39,32 @@
     def put(self, source, hash):
         if self.sendfile(source, hash):
             raise error.Abort(
-                _('remotestore: could not put %s to remote store %s')
-                % (source, util.hidepassword(self.url)))
+                _(b'remotestore: could not put %s to remote store %s')
+                % (source, util.hidepassword(self.url))
+            )
         self.ui.debug(
-            _('remotestore: put %s to remote store %s\n')
-            % (source, util.hidepassword(self.url)))
+            _(b'remotestore: put %s to remote store %s\n')
+            % (source, util.hidepassword(self.url))
+        )
 
     def exists(self, hashes):
-        return dict((h, s == 0) for (h, s) in # dict-from-generator
-                    self._stat(hashes).iteritems())
+        return dict(
+            (h, s == 0)
+            for (h, s) in pycompat.iteritems(
+                self._stat(hashes)
+            )  # dict-from-generator
+        )
 
     def sendfile(self, filename, hash):
-        self.ui.debug('remotestore: sendfile(%s, %s)\n' % (filename, hash))
+        self.ui.debug(b'remotestore: sendfile(%s, %s)\n' % (filename, hash))
         try:
             with lfutil.httpsendfile(self.ui, filename) as fd:
                 return self._put(hash, fd)
         except IOError as e:
             raise error.Abort(
-                _('remotestore: could not open file %s: %s')
-                % (filename, stringutil.forcebytestr(e)))
+                _(b'remotestore: could not open file %s: %s')
+                % (filename, stringutil.forcebytestr(e))
+            )
 
     def _getfile(self, tmpfile, filename, hash):
         try:
@@ -64,17 +72,20 @@
         except urlerr.httperror as e:
             # 401s get converted to error.Aborts; everything else is fine being
             # turned into a StoreError
-            raise basestore.StoreError(filename, hash, self.url,
-                                       stringutil.forcebytestr(e))
+            raise basestore.StoreError(
+                filename, hash, self.url, stringutil.forcebytestr(e)
+            )
         except urlerr.urlerror as e:
             # This usually indicates a connection problem, so don't
             # keep trying with the other files... they will probably
             # all fail too.
-            raise error.Abort('%s: %s' %
-                             (util.hidepassword(self.url), e.reason))
+            raise error.Abort(
+                b'%s: %s' % (util.hidepassword(self.url), e.reason)
+            )
         except IOError as e:
-            raise basestore.StoreError(filename, hash, self.url,
-                                       stringutil.forcebytestr(e))
+            raise basestore.StoreError(
+                filename, hash, self.url, stringutil.forcebytestr(e)
+            )
 
         return lfutil.copyandhash(chunks, tmpfile)
 
@@ -85,17 +96,24 @@
 
     def _verifyfiles(self, contents, filestocheck):
         failed = False
-        expectedhashes = [expectedhash
-                          for cset, filename, expectedhash in filestocheck]
+        expectedhashes = [
+            expectedhash for cset, filename, expectedhash in filestocheck
+        ]
         localhashes = self._hashesavailablelocally(expectedhashes)
-        stats = self._stat([expectedhash for expectedhash in expectedhashes
-                            if expectedhash not in localhashes])
+        stats = self._stat(
+            [
+                expectedhash
+                for expectedhash in expectedhashes
+                if expectedhash not in localhashes
+            ]
+        )
 
         for cset, filename, expectedhash in filestocheck:
             if expectedhash in localhashes:
                 filetocheck = (cset, filename, expectedhash)
-                verifyresult = self._lstore._verifyfiles(contents,
-                                                         [filetocheck])
+                verifyresult = self._lstore._verifyfiles(
+                    contents, [filetocheck]
+                )
                 if verifyresult:
                     failed = True
             else:
@@ -103,30 +121,33 @@
                 if stat:
                     if stat == 1:
                         self.ui.warn(
-                            _('changeset %s: %s: contents differ\n')
-                            % (cset, filename))
+                            _(b'changeset %s: %s: contents differ\n')
+                            % (cset, filename)
+                        )
                         failed = True
                     elif stat == 2:
                         self.ui.warn(
-                            _('changeset %s: %s missing\n')
-                            % (cset, filename))
+                            _(b'changeset %s: %s missing\n') % (cset, filename)
+                        )
                         failed = True
                     else:
-                        raise RuntimeError('verify failed: unexpected response '
-                                           'from statlfile (%r)' % stat)
+                        raise RuntimeError(
+                            b'verify failed: unexpected response '
+                            b'from statlfile (%r)' % stat
+                        )
         return failed
 
     def _put(self, hash, fd):
         '''Put file with the given hash in the remote store.'''
-        raise NotImplementedError('abstract method')
+        raise NotImplementedError(b'abstract method')
 
     def _get(self, hash):
         '''Get a iterator for content with the given hash.'''
-        raise NotImplementedError('abstract method')
+        raise NotImplementedError(b'abstract method')
 
     def _stat(self, hashes):
         '''Get information about availability of files specified by
         hashes in the remote store. Return dictionary mapping hashes
         to return code where 0 means that file is available, other
         values if not.'''
-        raise NotImplementedError('abstract method')
+        raise NotImplementedError(b'abstract method')
--- a/hgext/largefiles/reposetup.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/largefiles/reposetup.py	Mon Oct 21 11:09:48 2019 -0400
@@ -25,6 +25,7 @@
     lfutil,
 )
 
+
 def reposetup(ui, repo):
     # wire repositories should be given new wireproto functions
     # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
@@ -36,6 +37,7 @@
         _largefilesenabled = True
 
         lfstatus = False
+
         def status_nolfiles(self, *args, **kwargs):
             return super(lfilesrepo, self).status(*args, **kwargs)
 
@@ -46,19 +48,25 @@
         def __getitem__(self, changeid):
             ctx = super(lfilesrepo, self).__getitem__(changeid)
             if self.lfstatus:
+
                 class lfilesctx(ctx.__class__):
                     def files(self):
                         filenames = super(lfilesctx, self).files()
                         return [lfutil.splitstandin(f) or f for f in filenames]
+
                     def manifest(self):
                         man1 = super(lfilesctx, self).manifest()
+
                         class lfilesmanifest(man1.__class__):
                             def __contains__(self, filename):
                                 orig = super(lfilesmanifest, self).__contains__
-                                return (orig(filename) or
-                                        orig(lfutil.standin(filename)))
+                                return orig(filename) or orig(
+                                    lfutil.standin(filename)
+                                )
+
                         man1.__class__ = lfilesmanifest
                         return man1
+
                     def filectx(self, path, fileid=None, filelog=None):
                         orig = super(lfilesctx, self).filectx
                         try:
@@ -70,13 +78,15 @@
                             # Adding a null character will cause Mercurial to
                             # identify this as a binary file.
                             if filelog is not None:
-                                result = orig(lfutil.standin(path), fileid,
-                                              filelog)
+                                result = orig(
+                                    lfutil.standin(path), fileid, filelog
+                                )
                             else:
                                 result = orig(lfutil.standin(path), fileid)
                             olddata = result.data
-                            result.data = lambda: olddata() + '\0'
+                            result.data = lambda: olddata() + b'\0'
                         return result
+
                 ctx.__class__ = lfilesctx
             return ctx
 
@@ -87,20 +97,35 @@
         # XXX large file status is buggy when used on repo proxy.
         # XXX this needs to be investigated.
         @localrepo.unfilteredmethod
-        def status(self, node1='.', node2=None, match=None, ignored=False,
-                clean=False, unknown=False, listsubrepos=False):
+        def status(
+            self,
+            node1=b'.',
+            node2=None,
+            match=None,
+            ignored=False,
+            clean=False,
+            unknown=False,
+            listsubrepos=False,
+        ):
             listignored, listclean, listunknown = ignored, clean, unknown
             orig = super(lfilesrepo, self).status
             if not self.lfstatus:
-                return orig(node1, node2, match, listignored, listclean,
-                            listunknown, listsubrepos)
+                return orig(
+                    node1,
+                    node2,
+                    match,
+                    listignored,
+                    listclean,
+                    listunknown,
+                    listsubrepos,
+                )
 
             # some calls in this function rely on the old version of status
             self.lfstatus = False
             ctx1 = self[node1]
             ctx2 = self[node2]
             working = ctx2.rev() is None
-            parentworking = working and ctx1 == self['.']
+            parentworking = working and ctx1 == self[b'.']
 
             if match is None:
                 match = matchmod.always()
@@ -124,8 +149,15 @@
                         if match(f):
                             break
                     else:
-                        return orig(node1, node2, match, listignored, listclean,
-                                    listunknown, listsubrepos)
+                        return orig(
+                            node1,
+                            node2,
+                            match,
+                            listignored,
+                            listclean,
+                            listunknown,
+                            listsubrepos,
+                        )
 
                 # Create a copy of match that matches standins instead
                 # of largefiles.
@@ -149,8 +181,9 @@
                 m = copy.copy(match)
                 m._files = tostandins(m._files)
 
-                result = orig(node1, node2, m, ignored, clean, unknown,
-                              listsubrepos)
+                result = orig(
+                    node1, node2, m, ignored, clean, unknown, listsubrepos
+                )
                 if working:
 
                     def sfindirstate(f):
@@ -158,24 +191,32 @@
                         dirstate = self.dirstate
                         return sf in dirstate or dirstate.hasdir(sf)
 
-                    match._files = [f for f in match._files
-                                    if sfindirstate(f)]
+                    match._files = [f for f in match._files if sfindirstate(f)]
                     # Don't waste time getting the ignored and unknown
                     # files from lfdirstate
-                    unsure, s = lfdirstate.status(match, subrepos=[],
-                                                  ignored=False,
-                                                  clean=listclean,
-                                                  unknown=False)
+                    unsure, s = lfdirstate.status(
+                        match,
+                        subrepos=[],
+                        ignored=False,
+                        clean=listclean,
+                        unknown=False,
+                    )
                     (modified, added, removed, deleted, clean) = (
-                        s.modified, s.added, s.removed, s.deleted, s.clean)
+                        s.modified,
+                        s.added,
+                        s.removed,
+                        s.deleted,
+                        s.clean,
+                    )
                     if parentworking:
                         for lfile in unsure:
                             standin = lfutil.standin(lfile)
                             if standin not in ctx1:
                                 # from second parent
                                 modified.append(lfile)
-                            elif (lfutil.readasstandin(ctx1[standin])
-                                  != lfutil.hashfile(self.wjoin(lfile))):
+                            elif lfutil.readasstandin(
+                                ctx1[standin]
+                            ) != lfutil.hashfile(self.wjoin(lfile)):
                                 modified.append(lfile)
                             else:
                                 if listclean:
@@ -190,11 +231,14 @@
                             standin = lfutil.standin(lfile)
                             if standin in ctx1:
                                 abslfile = self.wjoin(lfile)
-                                if ((lfutil.readasstandin(ctx1[standin]) !=
-                                     lfutil.hashfile(abslfile)) or
-                                    (checkexec and
-                                     ('x' in ctx1.flags(standin)) !=
-                                     bool(lfutil.getexecutable(abslfile)))):
+                                if (
+                                    lfutil.readasstandin(ctx1[standin])
+                                    != lfutil.hashfile(abslfile)
+                                ) or (
+                                    checkexec
+                                    and (b'x' in ctx1.flags(standin))
+                                    != bool(lfutil.getexecutable(abslfile))
+                                ):
                                     modified.append(lfile)
                                 elif listclean:
                                     clean.append(lfile)
@@ -205,8 +249,11 @@
                         # marked as 'R' in the working context.
                         # then, largefiles not managed also in the target
                         # context should be excluded from 'removed'.
-                        removed = [lfile for lfile in removed
-                                   if lfutil.standin(lfile) in ctx1]
+                        removed = [
+                            lfile
+                            for lfile in removed
+                            if lfutil.standin(lfile) in ctx1
+                        ]
 
                     # Standins no longer found in lfdirstate have been deleted
                     for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
@@ -229,27 +276,37 @@
                     # files are not really removed if they are still in
                     # lfdirstate. This happens in merges where files
                     # change type.
-                    removed = [f for f in removed
-                               if f not in self.dirstate]
-                    result[2] = [f for f in result[2]
-                                 if f not in lfdirstate]
+                    removed = [f for f in removed if f not in self.dirstate]
+                    result[2] = [f for f in result[2] if f not in lfdirstate]
 
-                    lfiles = set(lfdirstate._map)
+                    lfiles = set(lfdirstate)
                     # Unknown files
                     result[4] = set(result[4]).difference(lfiles)
                     # Ignored files
                     result[5] = set(result[5]).difference(lfiles)
                     # combine normal files and largefiles
-                    normals = [[fn for fn in filelist
-                                if not lfutil.isstandin(fn)]
-                               for filelist in result]
-                    lfstatus = (modified, added, removed, deleted, [], [],
-                                clean)
-                    result = [sorted(list1 + list2)
-                              for (list1, list2) in zip(normals, lfstatus)]
-                else: # not against working directory
-                    result = [[lfutil.splitstandin(f) or f for f in items]
-                              for items in result]
+                    normals = [
+                        [fn for fn in filelist if not lfutil.isstandin(fn)]
+                        for filelist in result
+                    ]
+                    lfstatus = (
+                        modified,
+                        added,
+                        removed,
+                        deleted,
+                        [],
+                        [],
+                        clean,
+                    )
+                    result = [
+                        sorted(list1 + list2)
+                        for (list1, list2) in zip(normals, lfstatus)
+                    ]
+                else:  # not against working directory
+                    result = [
+                        [lfutil.splitstandin(f) or f for f in items]
+                        for items in result
+                    ]
 
                 if wlock:
                     lfdirstate.write()
@@ -263,18 +320,28 @@
 
         def commitctx(self, ctx, *args, **kwargs):
             node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs)
+
             class lfilesctx(ctx.__class__):
                 def markcommitted(self, node):
                     orig = super(lfilesctx, self).markcommitted
                     return lfutil.markcommitted(orig, self, node)
+
             ctx.__class__ = lfilesctx
             return node
 
         # Before commit, largefile standins have not had their
         # contents updated to reflect the hash of their largefile.
         # Do that here.
-        def commit(self, text="", user=None, date=None, match=None,
-                force=False, editor=False, extra=None):
+        def commit(
+            self,
+            text=b"",
+            user=None,
+            date=None,
+            match=None,
+            force=False,
+            editor=False,
+            extra=None,
+        ):
             if extra is None:
                 extra = {}
             orig = super(lfilesrepo, self).commit
@@ -282,20 +349,30 @@
             with self.wlock():
                 lfcommithook = self._lfcommithooks[-1]
                 match = lfcommithook(self, match)
-                result = orig(text=text, user=user, date=date, match=match,
-                                force=force, editor=editor, extra=extra)
+                result = orig(
+                    text=text,
+                    user=user,
+                    date=date,
+                    match=match,
+                    force=force,
+                    editor=editor,
+                    extra=extra,
+                )
                 return result
 
         def push(self, remote, force=False, revs=None, newbranch=False):
             if remote.local():
                 missing = set(self.requirements) - remote.local().supported
                 if missing:
-                    msg = _("required features are not"
-                            " supported in the destination:"
-                            " %s") % (', '.join(sorted(missing)))
+                    msg = _(
+                        b"required features are not"
+                        b" supported in the destination:"
+                        b" %s"
+                    ) % (b', '.join(sorted(missing)))
                     raise error.Abort(msg)
-            return super(lfilesrepo, self).push(remote, force=force, revs=revs,
-                newbranch=newbranch)
+            return super(lfilesrepo, self).push(
+                remote, force=force, revs=revs, newbranch=newbranch
+            )
 
         # TODO: _subdirlfs should be moved into "lfutil.py", because
         # it is referred only from "lfutil.updatestandinsbymatch"
@@ -316,10 +393,11 @@
             regulars = []
 
             for f in files:
-                if lfutil.isstandin(f + '/'):
+                if lfutil.isstandin(f + b'/'):
                     raise error.Abort(
-                        _('file "%s" is a largefile standin') % f,
-                        hint=('commit the largefile itself instead'))
+                        _(b'file "%s" is a largefile standin') % f,
+                        hint=b'commit the largefile itself instead',
+                    )
                 # Scan directories
                 if self.wvfs.isdir(f):
                     dirs.append(f)
@@ -328,7 +406,7 @@
 
             for f in dirs:
                 matcheddir = False
-                d = self.dirstate.normalize(f) + '/'
+                d = self.dirstate.normalize(f) + b'/'
                 # Check for matched normal files
                 for mf in regulars:
                     if self.dirstate.normalize(mf).startswith(d):
@@ -347,7 +425,7 @@
                                 # forces status/dirstate to walk all files and
                                 # call the match function on the matcher, even
                                 # on case sensitive filesystems.
-                                actualfiles.append('.')
+                                actualfiles.append(b'.')
                                 matcheddir = True
                 # Nothing in dir, so readd it
                 # and let commit reject it
@@ -377,17 +455,19 @@
         if lfrevs:
             toupload = set()
             addfunc = lambda fn, lfhash: toupload.add(lfhash)
-            lfutil.getlfilestoupload(pushop.repo, lfrevs,
-                                     addfunc)
+            lfutil.getlfilestoupload(pushop.repo, lfrevs, addfunc)
             lfcommands.uploadlfiles(ui, pushop.repo, pushop.remote, toupload)
-    repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
+
+    repo.prepushoutgoinghooks.add(b"largefiles", prepushoutgoinghook)
 
     def checkrequireslfiles(ui, repo, **kwargs):
-        if 'largefiles' not in repo.requirements and any(
-                lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
-            repo.requirements.add('largefiles')
+        if b'largefiles' not in repo.requirements and any(
+            lfutil.shortname + b'/' in f[0] for f in repo.store.datafiles()
+        ):
+            repo.requirements.add(b'largefiles')
             repo._writerequirements()
 
-    ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
-                 'largefiles')
-    ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
+    ui.setconfig(
+        b'hooks', b'changegroup.lfiles', checkrequireslfiles, b'largefiles'
+    )
+    ui.setconfig(b'hooks', b'commit.lfiles', checkrequireslfiles, b'largefiles')
--- a/hgext/largefiles/storefactory.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/largefiles/storefactory.py	Mon Oct 21 11:09:48 2019 -0400
@@ -6,7 +6,7 @@
 import re
 
 from mercurial.i18n import _
-
+from mercurial.pycompat import getattr
 from mercurial import (
     error,
     hg,
@@ -31,18 +31,18 @@
         if lfpullsource:
             path = ui.expandpath(lfpullsource)
         elif put:
-            path = ui.expandpath('default-push', 'default')
+            path = ui.expandpath(b'default-push', b'default')
         else:
-            path = ui.expandpath('default')
+            path = ui.expandpath(b'default')
 
         # ui.expandpath() leaves 'default-push' and 'default' alone if
         # they cannot be expanded: fallback to the empty string,
         # meaning the current directory.
         if repo is None:
-            path = ui.expandpath('default')
+            path = ui.expandpath(b'default')
             path, _branches = hg.parseurl(path)
             remote = hg.peer(repo or ui, {}, path)
-        elif path == 'default-push' or path == 'default':
+        elif path == b'default-push' or path == b'default':
             remote = repo
         else:
             path, _branches = hg.parseurl(path)
@@ -50,18 +50,18 @@
 
     # The path could be a scheme so use Mercurial's normal functionality
     # to resolve the scheme to a repository and use its path
-    path = util.safehasattr(remote, 'url') and remote.url() or remote.path
+    path = util.safehasattr(remote, b'url') and remote.url() or remote.path
 
     match = _scheme_re.match(path)
-    if not match:                       # regular filesystem path
-        scheme = 'file'
+    if not match:  # regular filesystem path
+        scheme = b'file'
     else:
         scheme = match.group(1)
 
     try:
         storeproviders = _storeprovider[scheme]
     except KeyError:
-        raise error.Abort(_('unsupported URL scheme %r') % scheme)
+        raise error.Abort(_(b'unsupported URL scheme %r') % scheme)
 
     for classobj in storeproviders:
         try:
@@ -69,17 +69,21 @@
         except lfutil.storeprotonotcapable:
             pass
 
-    raise error.Abort(_('%s does not appear to be a largefile store') %
-                     util.hidepassword(path))
+    raise error.Abort(
+        _(b'%s does not appear to be a largefile store')
+        % util.hidepassword(path)
+    )
+
 
 _storeprovider = {
-    'file':  [localstore.localstore],
-    'http':  [wirestore.wirestore],
-    'https': [wirestore.wirestore],
-    'ssh': [wirestore.wirestore],
-    }
+    b'file': [localstore.localstore],
+    b'http': [wirestore.wirestore],
+    b'https': [wirestore.wirestore],
+    b'ssh': [wirestore.wirestore],
+}
 
 _scheme_re = re.compile(br'^([a-zA-Z0-9+-.]+)://')
 
+
 def getlfile(ui, hash):
     return util.chunkbuffer(openstore(ui=ui)._get(hash))
--- a/hgext/largefiles/wirestore.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/largefiles/wirestore.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,13 +11,14 @@
     remotestore,
 )
 
+
 class wirestore(remotestore.remotestore):
     def __init__(self, ui, repo, remote):
-        cap = remote.capable('largefiles')
+        cap = remote.capable(b'largefiles')
         if not cap:
             raise lfutil.storeprotonotcapable([])
-        storetypes = cap.split(',')
-        if 'serve' not in storetypes:
+        storetypes = cap.split(b',')
+        if b'serve' not in storetypes:
             raise lfutil.storeprotonotcapable(storetypes)
         self.remote = remote
         super(wirestore, self).__init__(ui, repo, remote.url())
@@ -36,8 +37,6 @@
         with self.remote.commandexecutor() as e:
             fs = []
             for hash in hashes:
-                fs.append((hash, e.callcommand('statlfile', {
-                    'sha': hash,
-                })))
+                fs.append((hash, e.callcommand(b'statlfile', {b'sha': hash,})))
 
             return {hash: f.result() for hash, f in fs}
--- a/hgext/lfs/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/lfs/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -53,31 +53,29 @@
 file to a usercache, to speed up future access.  See the `usercache`
 config setting described below.
 
-.hglfs::
+The extension reads its configuration from a versioned ``.hglfs``
+configuration file found in the root of the working directory. The
+``.hglfs`` file uses the same syntax as all other Mercurial
+configuration files. It uses a single section, ``[track]``.
 
-    The extension reads its configuration from a versioned ``.hglfs``
-    configuration file found in the root of the working directory. The
-    ``.hglfs`` file uses the same syntax as all other Mercurial
-    configuration files. It uses a single section, ``[track]``.
-
-    The ``[track]`` section specifies which files are stored as LFS (or
-    not). Each line is keyed by a file pattern, with a predicate value.
-    The first file pattern match is used, so put more specific patterns
-    first.  The available predicates are ``all()``, ``none()``, and
-    ``size()``. See "hg help filesets.size" for the latter.
+The ``[track]`` section specifies which files are stored as LFS (or
+not). Each line is keyed by a file pattern, with a predicate value.
+The first file pattern match is used, so put more specific patterns
+first.  The available predicates are ``all()``, ``none()``, and
+``size()``. See "hg help filesets.size" for the latter.
 
-    Example versioned ``.hglfs`` file::
+Example versioned ``.hglfs`` file::
 
-      [track]
-      # No Makefile or python file, anywhere, will be LFS
-      **Makefile = none()
-      **.py = none()
+  [track]
+  # No Makefile or python file, anywhere, will be LFS
+  **Makefile = none()
+  **.py = none()
 
-      **.zip = all()
-      **.exe = size(">1MB")
+  **.zip = all()
+  **.exe = size(">1MB")
 
-      # Catchall for everything not matched above
-      ** = size(">10MB")
+  # Catchall for everything not matched above
+  ** = size(">10MB")
 
 Configs::
 
@@ -141,13 +139,14 @@
     minifileset,
     node,
     pycompat,
-    repository,
     revlog,
     scmutil,
     templateutil,
     util,
 )
 
+from mercurial.interfaces import repository
+
 from . import (
     blobstore,
     wireprotolfsserver,
@@ -158,7 +157,7 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 eh = exthelper.exthelper()
 eh.merge(wrapper.eh)
@@ -172,34 +171,34 @@
 reposetup = eh.finalreposetup
 templatekeyword = eh.templatekeyword
 
-eh.configitem('experimental', 'lfs.serve',
-    default=True,
+eh.configitem(
+    b'experimental', b'lfs.serve', default=True,
 )
-eh.configitem('experimental', 'lfs.user-agent',
-    default=None,
+eh.configitem(
+    b'experimental', b'lfs.user-agent', default=None,
 )
-eh.configitem('experimental', 'lfs.disableusercache',
-    default=False,
+eh.configitem(
+    b'experimental', b'lfs.disableusercache', default=False,
 )
-eh.configitem('experimental', 'lfs.worker-enable',
-    default=False,
+eh.configitem(
+    b'experimental', b'lfs.worker-enable', default=False,
 )
 
-eh.configitem('lfs', 'url',
-    default=None,
+eh.configitem(
+    b'lfs', b'url', default=None,
 )
-eh.configitem('lfs', 'usercache',
-    default=None,
+eh.configitem(
+    b'lfs', b'usercache', default=None,
 )
 # Deprecated
-eh.configitem('lfs', 'threshold',
-    default=None,
+eh.configitem(
+    b'lfs', b'threshold', default=None,
 )
-eh.configitem('lfs', 'track',
-    default='none()',
+eh.configitem(
+    b'lfs', b'track', default=b'none()',
 )
-eh.configitem('lfs', 'retry',
-    default=5,
+eh.configitem(
+    b'lfs', b'retry', default=5,
 )
 
 lfsprocessor = (
@@ -208,14 +207,17 @@
     wrapper.bypasscheckhash,
 )
 
+
 def featuresetup(ui, supported):
     # don't die on seeing a repo with the lfs requirement
-    supported |= {'lfs'}
+    supported |= {b'lfs'}
+
 
 @eh.uisetup
 def _uisetup(ui):
     localrepo.featuresetupfuncs.add(featuresetup)
 
+
 @eh.reposetup
 def _reposetup(ui, repo):
     # Nothing to do with a remote repo
@@ -228,71 +230,78 @@
     class lfsrepo(repo.__class__):
         @localrepo.unfilteredmethod
         def commitctx(self, ctx, error=False, origctx=None):
-            repo.svfs.options['lfstrack'] = _trackedmatcher(self)
+            repo.svfs.options[b'lfstrack'] = _trackedmatcher(self)
             return super(lfsrepo, self).commitctx(ctx, error, origctx=origctx)
 
     repo.__class__ = lfsrepo
 
-    if 'lfs' not in repo.requirements:
+    if b'lfs' not in repo.requirements:
+
         def checkrequireslfs(ui, repo, **kwargs):
-            if 'lfs' in repo.requirements:
+            if b'lfs' in repo.requirements:
                 return 0
 
             last = kwargs.get(r'node_last')
             _bin = node.bin
             if last:
-                s = repo.set('%n:%n', _bin(kwargs[r'node']), _bin(last))
+                s = repo.set(b'%n:%n', _bin(kwargs[r'node']), _bin(last))
             else:
-                s = repo.set('%n', _bin(kwargs[r'node']))
+                s = repo.set(b'%n', _bin(kwargs[r'node']))
             match = repo._storenarrowmatch
             for ctx in s:
                 # TODO: is there a way to just walk the files in the commit?
-                if any(ctx[f].islfs() for f in ctx.files()
-                       if f in ctx and match(f)):
-                    repo.requirements.add('lfs')
+                if any(
+                    ctx[f].islfs() for f in ctx.files() if f in ctx and match(f)
+                ):
+                    repo.requirements.add(b'lfs')
                     repo.features.add(repository.REPO_FEATURE_LFS)
                     repo._writerequirements()
-                    repo.prepushoutgoinghooks.add('lfs', wrapper.prepush)
+                    repo.prepushoutgoinghooks.add(b'lfs', wrapper.prepush)
                     break
 
-        ui.setconfig('hooks', 'commit.lfs', checkrequireslfs, 'lfs')
-        ui.setconfig('hooks', 'pretxnchangegroup.lfs', checkrequireslfs, 'lfs')
+        ui.setconfig(b'hooks', b'commit.lfs', checkrequireslfs, b'lfs')
+        ui.setconfig(
+            b'hooks', b'pretxnchangegroup.lfs', checkrequireslfs, b'lfs'
+        )
     else:
-        repo.prepushoutgoinghooks.add('lfs', wrapper.prepush)
+        repo.prepushoutgoinghooks.add(b'lfs', wrapper.prepush)
+
 
 def _trackedmatcher(repo):
     """Return a function (path, size) -> bool indicating whether or not to
     track a given file with lfs."""
-    if not repo.wvfs.exists('.hglfs'):
+    if not repo.wvfs.exists(b'.hglfs'):
         # No '.hglfs' in wdir.  Fallback to config for now.
-        trackspec = repo.ui.config('lfs', 'track')
+        trackspec = repo.ui.config(b'lfs', b'track')
 
         # deprecated config: lfs.threshold
-        threshold = repo.ui.configbytes('lfs', 'threshold')
+        threshold = repo.ui.configbytes(b'lfs', b'threshold')
         if threshold:
             filesetlang.parse(trackspec)  # make sure syntax errors are confined
-            trackspec = "(%s) | size('>%d')" % (trackspec, threshold)
+            trackspec = b"(%s) | size('>%d')" % (trackspec, threshold)
 
         return minifileset.compile(trackspec)
 
-    data = repo.wvfs.tryread('.hglfs')
+    data = repo.wvfs.tryread(b'.hglfs')
     if not data:
         return lambda p, s: False
 
     # Parse errors here will abort with a message that points to the .hglfs file
     # and line number.
     cfg = config.config()
-    cfg.parse('.hglfs', data)
+    cfg.parse(b'.hglfs', data)
 
     try:
-        rules = [(minifileset.compile(pattern), minifileset.compile(rule))
-                 for pattern, rule in cfg.items('track')]
+        rules = [
+            (minifileset.compile(pattern), minifileset.compile(rule))
+            for pattern, rule in cfg.items(b'track')
+        ]
     except error.ParseError as e:
         # The original exception gives no indicator that the error is in the
         # .hglfs file, so add that.
 
         # TODO: See if the line number of the file can be made available.
-        raise error.Abort(_('parse error in .hglfs: %s') % e)
+        raise error.Abort(_(b'parse error in .hglfs: %s') % e)
 
     def _match(path, size):
         for pat, rule in rules:
@@ -303,6 +312,7 @@
 
     return _match
 
+
 # Called by remotefilelog
 def wrapfilelog(filelog):
     wrapfunction = extensions.wrapfunction
@@ -311,14 +321,17 @@
     wrapfunction(filelog, 'renamed', wrapper.filelogrenamed)
     wrapfunction(filelog, 'size', wrapper.filelogsize)
 
-@eh.wrapfunction(localrepo, 'resolverevlogstorevfsoptions')
+
+@eh.wrapfunction(localrepo, b'resolverevlogstorevfsoptions')
 def _resolverevlogstorevfsoptions(orig, ui, requirements, features):
     opts = orig(ui, requirements, features)
     for name, module in extensions.extensions(ui):
         if module is sys.modules[__name__]:
             if revlog.REVIDX_EXTSTORED in opts[b'flagprocessors']:
-                msg = (_(b"cannot register multiple processors on flag '%#x'.")
-                       % revlog.REVIDX_EXTSTORED)
+                msg = (
+                    _(b"cannot register multiple processors on flag '%#x'.")
+                    % revlog.REVIDX_EXTSTORED
+                )
                 raise error.Abort(msg)
 
             opts[b'flagprocessors'][revlog.REVIDX_EXTSTORED] = lfsprocessor
@@ -326,56 +339,64 @@
 
     return opts
 
+
 @eh.extsetup
 def _extsetup(ui):
     wrapfilelog(filelog.filelog)
 
     context.basefilectx.islfs = wrapper.filectxislfs
 
-    scmutil.fileprefetchhooks.add('lfs', wrapper._prefetchfiles)
+    scmutil.fileprefetchhooks.add(b'lfs', wrapper._prefetchfiles)
 
     # Make bundle choose changegroup3 instead of changegroup2. This affects
     # "hg bundle" command. Note: it does not cover all bundle formats like
     # "packed1". Using "packed1" with lfs will likely cause trouble.
-    exchange._bundlespeccontentopts["v2"]["cg.version"] = "03"
+    exchange._bundlespeccontentopts[b"v2"][b"cg.version"] = b"03"
 
-@eh.filesetpredicate('lfs()')
+
+@eh.filesetpredicate(b'lfs()')
 def lfsfileset(mctx, x):
     """File that uses LFS storage."""
     # i18n: "lfs" is a keyword
-    filesetlang.getargs(x, 0, 0, _("lfs takes no arguments"))
+    filesetlang.getargs(x, 0, 0, _(b"lfs takes no arguments"))
     ctx = mctx.ctx
+
     def lfsfilep(f):
         return wrapper.pointerfromctx(ctx, f, removed=True) is not None
-    return mctx.predicate(lfsfilep, predrepr='<lfs>')
+
+    return mctx.predicate(lfsfilep, predrepr=b'<lfs>')
 
-@eh.templatekeyword('lfs_files', requires={'ctx'})
+
+@eh.templatekeyword(b'lfs_files', requires={b'ctx'})
 def lfsfiles(context, mapping):
     """List of strings. All files modified, added, or removed by this
     changeset."""
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
 
-    pointers = wrapper.pointersfromctx(ctx, removed=True) # {path: pointer}
+    pointers = wrapper.pointersfromctx(ctx, removed=True)  # {path: pointer}
     files = sorted(pointers.keys())
 
     def pointer(v):
         # In the file spec, version is first and the other keys are sorted.
-        sortkeyfunc = lambda x: (x[0] != 'version', x)
-        items = sorted(pointers[v].iteritems(), key=sortkeyfunc)
+        sortkeyfunc = lambda x: (x[0] != b'version', x)
+        items = sorted(pycompat.iteritems(pointers[v]), key=sortkeyfunc)
         return util.sortdict(items)
 
     makemap = lambda v: {
-        'file': v,
-        'lfsoid': pointers[v].oid() if pointers[v] else None,
-        'lfspointer': templateutil.hybriddict(pointer(v)),
+        b'file': v,
+        b'lfsoid': pointers[v].oid() if pointers[v] else None,
+        b'lfspointer': templateutil.hybriddict(pointer(v)),
     }
 
     # TODO: make the separator ', '?
-    f = templateutil._showcompatlist(context, mapping, 'lfs_file', files)
+    f = templateutil._showcompatlist(context, mapping, b'lfs_file', files)
     return templateutil.hybrid(f, files, makemap, pycompat.identity)
 
-@eh.command('debuglfsupload',
-            [('r', 'rev', [], _('upload large files introduced by REV'))])
+
+@eh.command(
+    b'debuglfsupload',
+    [(b'r', b'rev', [], _(b'upload large files introduced by REV'))],
+)
 def debuglfsupload(ui, repo, **opts):
     """upload lfs blobs added by the working copy parent or given revisions"""
     revs = opts.get(r'rev', [])
--- a/hgext/lfs/blobstore.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/lfs/blobstore.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,6 +16,7 @@
 import socket
 
 from mercurial.i18n import _
+from mercurial.pycompat import getattr
 
 from mercurial import (
     encoding,
@@ -29,15 +30,14 @@
     worker,
 )
 
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.utils import stringutil
 
 from ..largefiles import lfutil
 
 # 64 bytes for SHA256
 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
 
+
 class lfsvfs(vfsmod.vfs):
     def join(self, path):
         """split the path at first two characters, like: XX/XXXXX..."""
@@ -56,17 +56,19 @@
         prefixlen = len(pathutil.normasprefix(root))
         oids = []
 
-        for dirpath, dirs, files in os.walk(self.reljoin(self.base, path
-                                                         or b''),
-                                            onerror=onerror):
+        for dirpath, dirs, files in os.walk(
+            self.reljoin(self.base, path or b''), onerror=onerror
+        ):
             dirpath = dirpath[prefixlen:]
 
             # Silently skip unexpected files and directories
             if len(dirpath) == 2:
-                oids.extend([dirpath + f for f in files
-                             if _lfsre.match(dirpath + f)])
+                oids.extend(
+                    [dirpath + f for f in files if _lfsre.match(dirpath + f)]
+                )
 
-        yield ('', [], oids)
+        yield (b'', [], oids)
+
 
 class nullvfs(lfsvfs):
     def __init__(self):
@@ -80,8 +82,10 @@
         # self.vfs.  Raise the same error as a normal vfs when asked to read a
         # file that doesn't exist.  The only difference is the full file path
         # isn't available in the error.
-        raise IOError(errno.ENOENT,
-                      pycompat.sysstr(b'%s: No such file or directory' % oid))
+        raise IOError(
+            errno.ENOENT,
+            pycompat.sysstr(b'%s: No such file or directory' % oid),
+        )
 
     def walk(self, path=None, onerror=None):
         return (b'', [], [])
@@ -89,6 +93,7 @@
     def write(self, oid, data):
         pass
 
+
 class filewithprogress(object):
     """a file-like object that supports __len__ and read.
 
@@ -97,7 +102,7 @@
 
     def __init__(self, fp, callback):
         self._fp = fp
-        self._callback = callback # func(readsize)
+        self._callback = callback  # func(readsize)
         fp.seek(0, os.SEEK_END)
         self._len = fp.tell()
         fp.seek(0)
@@ -117,6 +122,7 @@
             self._fp = None
         return data
 
+
 class local(object):
     """Local blobstore for large file contents.
 
@@ -161,8 +167,9 @@
 
             realoid = node.hex(sha256.digest())
             if realoid != oid:
-                raise LfsCorruptionError(_(b'corrupt remote lfs object: %s')
-                                         % oid)
+                raise LfsCorruptionError(
+                    _(b'corrupt remote lfs object: %s') % oid
+                )
 
         self._linktousercache(oid)
 
@@ -186,16 +193,16 @@
         blob, but that doesn't happen when the server tells the client that it
         already has the blob.
         """
-        if (not isinstance(self.cachevfs, nullvfs)
-            and not self.vfs.exists(oid)):
+        if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
             self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
             lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
 
     def _linktousercache(self, oid):
         # XXX: should we verify the content of the cache, and hardlink back to
         # the local store on success, but truncate, write and link on failure?
-        if (not self.cachevfs.exists(oid)
-            and not isinstance(self.cachevfs, nullvfs)):
+        if not self.cachevfs.exists(oid) and not isinstance(
+            self.cachevfs, nullvfs
+        ):
             self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
             lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
 
@@ -240,6 +247,7 @@
         False otherwise."""
         return self.cachevfs.exists(oid) or self.vfs.exists(oid)
 
+
 def _urlerrorreason(urlerror):
     '''Create a friendly message for the given URLError to be used in an
     LfsRemoteError message.
@@ -249,8 +257,8 @@
     if isinstance(urlerror.reason, Exception):
         inst = urlerror.reason
 
-    if util.safehasattr(inst, 'reason'):
-        try: # usually it is in the form (errno, strerror)
+    if util.safehasattr(inst, b'reason'):
+        try:  # usually it is in the form (errno, strerror)
             reason = inst.reason.args[1]
         except (AttributeError, IndexError):
             # it might be anything, for example a string
@@ -264,6 +272,7 @@
     else:
         return stringutil.forcebytestr(urlerror)
 
+
 class lfsauthhandler(util.urlreq.basehandler):
     handler_order = 480  # Before HTTPDigestAuthHandler (== 490)
 
@@ -277,13 +286,17 @@
 
             if scheme.lower() != r'basic':
                 msg = _(b'the server must support Basic Authentication')
-                raise util.urlerr.httperror(req.get_full_url(), code,
-                                            encoding.strfromlocal(msg), headers,
-                                            fp)
+                raise util.urlerr.httperror(
+                    req.get_full_url(),
+                    code,
+                    encoding.strfromlocal(msg),
+                    headers,
+                    fp,
+                )
         return None
 
+
 class _gitlfsremote(object):
-
     def __init__(self, repo, url):
         ui = repo.ui
         self.ui = ui
@@ -310,12 +323,15 @@
         Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
         See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
         """
-        objects = [{r'oid': pycompat.strurl(p.oid()),
-                    r'size': p.size()} for p in pointers]
-        requestdata = pycompat.bytesurl(json.dumps({
-            r'objects': objects,
-            r'operation': pycompat.strurl(action),
-        }))
+        objects = [
+            {r'oid': pycompat.strurl(p.oid()), r'size': p.size()}
+            for p in pointers
+        ]
+        requestdata = pycompat.bytesurl(
+            json.dumps(
+                {r'objects': objects, r'operation': pycompat.strurl(action),}
+            )
+        )
         url = b'%s/objects/batch' % self.baseurl
         batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
         batchreq.add_header(r'Accept', r'application/vnd.git-lfs+json')
@@ -325,45 +341,60 @@
                 rawjson = rsp.read()
         except util.urlerr.httperror as ex:
             hints = {
-                400: _(b'check that lfs serving is enabled on %s and "%s" is '
-                       b'supported') % (self.baseurl, action),
+                400: _(
+                    b'check that lfs serving is enabled on %s and "%s" is '
+                    b'supported'
+                )
+                % (self.baseurl, action),
                 404: _(b'the "lfs.url" config may be used to override %s')
-                       % self.baseurl,
+                % self.baseurl,
             }
             hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
             raise LfsRemoteError(
                 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
-                hint=hint)
+                hint=hint,
+            )
         except util.urlerr.urlerror as ex:
-            hint = (_(b'the "lfs.url" config may be used to override %s')
-                    % self.baseurl)
-            raise LfsRemoteError(_(b'LFS error: %s') % _urlerrorreason(ex),
-                                 hint=hint)
+            hint = (
+                _(b'the "lfs.url" config may be used to override %s')
+                % self.baseurl
+            )
+            raise LfsRemoteError(
+                _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
+            )
         try:
             response = json.loads(rawjson)
         except ValueError:
-            raise LfsRemoteError(_(b'LFS server returns invalid JSON: %s')
-                                 % rawjson.encode("utf-8"))
+            raise LfsRemoteError(
+                _(b'LFS server returns invalid JSON: %s')
+                % rawjson.encode("utf-8")
+            )
 
         if self.ui.debugflag:
             self.ui.debug(b'Status: %d\n' % rsp.status)
             # lfs-test-server and hg serve return headers in different order
             headers = pycompat.bytestr(rsp.info()).strip()
-            self.ui.debug(b'%s\n'
-                          % b'\n'.join(sorted(headers.splitlines())))
+            self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
 
             if r'objects' in response:
-                response[r'objects'] = sorted(response[r'objects'],
-                                              key=lambda p: p[r'oid'])
-            self.ui.debug(b'%s\n'
-                          % pycompat.bytesurl(
-                              json.dumps(response, indent=2,
-                                         separators=(r'', r': '),
-                                         sort_keys=True)))
+                response[r'objects'] = sorted(
+                    response[r'objects'], key=lambda p: p[r'oid']
+                )
+            self.ui.debug(
+                b'%s\n'
+                % pycompat.bytesurl(
+                    json.dumps(
+                        response,
+                        indent=2,
+                        separators=(r'', r': '),
+                        sort_keys=True,
+                    )
+                )
+            )
 
         def encodestr(x):
             if isinstance(x, pycompat.unicode):
-                return x.encode(u'utf-8')
+                return x.encode('utf-8')
             return x
 
         return pycompat.rapply(encodestr, response)
@@ -378,8 +409,9 @@
             # but just removes "download" from "actions". Treat that case
             # as the same as 404 error.
             if b'error' not in response:
-                if (action == b'download'
-                    and action not in response.get(b'actions', [])):
+                if action == b'download' and action not in response.get(
+                    b'actions', []
+                ):
                     code = 404
                 else:
                     continue
@@ -399,12 +431,14 @@
                     500: b'Internal server error',
                 }
                 msg = errors.get(code, b'status code %d' % code)
-                raise LfsRemoteError(_(b'LFS server error for "%s": %s')
-                                     % (filename, msg))
+                raise LfsRemoteError(
+                    _(b'LFS server error for "%s": %s') % (filename, msg)
+                )
             else:
                 raise LfsRemoteError(
                     _(b'LFS server error. Unsolicited response for oid %s')
-                    % response[b'oid'])
+                    % response[b'oid']
+                )
 
     def _extractobjects(self, response, pointers, action):
         """extract objects from response of the batch API
@@ -419,8 +453,9 @@
 
         # Filter objects with given action. Practically, this skips uploading
         # objects which exist in the server.
-        filteredobjects = [o for o in objects
-                           if action in o.get(b'actions', [])]
+        filteredobjects = [
+            o for o in objects if action in o.get(b'actions', [])
+        ]
 
         return filteredobjects
 
@@ -442,8 +477,10 @@
         if action == b'upload':
             # If uploading blobs, read data from local blobstore.
             if not localstore.verify(oid):
-                raise error.Abort(_(b'detected corrupt lfs object: %s') % oid,
-                                  hint=_(b'run hg verify'))
+                raise error.Abort(
+                    _(b'detected corrupt lfs object: %s') % oid,
+                    hint=_(b'run hg verify'),
+                )
             request.data = filewithprogress(localstore.open(oid), None)
             request.get_method = lambda: r'PUT'
             request.add_header(r'Content-Type', r'application/octet-stream')
@@ -461,8 +498,7 @@
                     # lfs-test-server and hg serve return headers in different
                     # order
                     headers = pycompat.bytestr(req.info()).strip()
-                    ui.debug(b'%s\n'
-                             % b'\n'.join(sorted(headers.splitlines())))
+                    ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
 
                 if action == b'download':
                     # If downloading blobs, store downloaded data to local
@@ -478,14 +514,20 @@
                         ui.debug(b'lfs %s response: %s' % (action, response))
         except util.urlerr.httperror as ex:
             if self.ui.debugflag:
-                self.ui.debug(b'%s: %s\n' % (oid, ex.read())) # XXX: also bytes?
-            raise LfsRemoteError(_(b'LFS HTTP error: %s (oid=%s, action=%s)')
-                                 % (stringutil.forcebytestr(ex), oid, action))
+                self.ui.debug(
+                    b'%s: %s\n' % (oid, ex.read())
+                )  # XXX: also bytes?
+            raise LfsRemoteError(
+                _(b'LFS HTTP error: %s (oid=%s, action=%s)')
+                % (stringutil.forcebytestr(ex), oid, action)
+            )
         except util.urlerr.urlerror as ex:
-            hint = (_(b'attempted connection to %s')
-                    % pycompat.bytesurl(util.urllibcompat.getfullurl(request)))
-            raise LfsRemoteError(_(b'LFS error: %s') % _urlerrorreason(ex),
-                                 hint=hint)
+            hint = _(b'attempted connection to %s') % pycompat.bytesurl(
+                util.urllibcompat.getfullurl(request)
+            )
+            raise LfsRemoteError(
+                _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
+            )
 
     def _batch(self, pointers, localstore, action):
         if action not in [b'upload', b'download']:
@@ -497,11 +539,15 @@
         sizes = {}
         for obj in objects:
             sizes[obj.get(b'oid')] = obj.get(b'size', 0)
-        topic = {b'upload': _(b'lfs uploading'),
-                 b'download': _(b'lfs downloading')}[action]
+        topic = {
+            b'upload': _(b'lfs uploading'),
+            b'download': _(b'lfs downloading'),
+        }[action]
         if len(objects) > 1:
-            self.ui.note(_(b'lfs: need to transfer %d objects (%s)\n')
-                         % (len(objects), util.bytecount(total)))
+            self.ui.note(
+                _(b'lfs: need to transfer %d objects (%s)\n')
+                % (len(objects), util.bytecount(total))
+            )
 
         def transfer(chunk):
             for obj in chunk:
@@ -511,8 +557,9 @@
                         msg = _(b'lfs: downloading %s (%s)\n')
                     elif action == b'upload':
                         msg = _(b'lfs: uploading %s (%s)\n')
-                    self.ui.note(msg % (obj.get(b'oid'),
-                                 util.bytecount(objsize)))
+                    self.ui.note(
+                        msg % (obj.get(b'oid'), util.bytecount(objsize))
+                    )
                 retry = self.retry
                 while True:
                     try:
@@ -523,15 +570,21 @@
                         if retry > 0:
                             self.ui.note(
                                 _(b'lfs: failed: %r (remaining retry %d)\n')
-                                % (stringutil.forcebytestr(ex), retry))
+                                % (stringutil.forcebytestr(ex), retry)
+                            )
                             retry -= 1
                             continue
                         raise
 
         # Until https multiplexing gets sorted out
         if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
-            oids = worker.worker(self.ui, 0.1, transfer, (),
-                                 sorted(objects, key=lambda o: o.get(b'oid')))
+            oids = worker.worker(
+                self.ui,
+                0.1,
+                transfer,
+                (),
+                sorted(objects, key=lambda o: o.get(b'oid')),
+            )
         else:
             oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
 
@@ -547,11 +600,15 @@
 
         if blobs > 0:
             if action == b'upload':
-                self.ui.status(_(b'lfs: uploaded %d files (%s)\n')
-                               % (blobs, util.bytecount(processed)))
+                self.ui.status(
+                    _(b'lfs: uploaded %d files (%s)\n')
+                    % (blobs, util.bytecount(processed))
+                )
             elif action == b'download':
-                self.ui.status(_(b'lfs: downloaded %d files (%s)\n')
-                               % (blobs, util.bytecount(processed)))
+                self.ui.status(
+                    _(b'lfs: downloaded %d files (%s)\n')
+                    % (blobs, util.bytecount(processed))
+                )
 
     def __del__(self):
         # copied from mercurial/httppeer.py
@@ -559,7 +616,8 @@
         if urlopener:
             for h in urlopener.handlers:
                 h.close()
-                getattr(h, "close_all", lambda : None)()
+                getattr(h, "close_all", lambda: None)()
+
 
 class _dummyremote(object):
     """Dummy store storing blobs to temp directory."""
@@ -579,6 +637,7 @@
             with self.vfs(p.oid(), b'rb') as fp:
                 tostore.download(p.oid(), fp)
 
+
 class _nullremote(object):
     """Null store storing blobs to /dev/null."""
 
@@ -591,6 +650,7 @@
     def readbatch(self, pointers, tostore):
         pass
 
+
 class _promptremote(object):
     """Prompt user to set lfs.url when accessed."""
 
@@ -606,6 +666,7 @@
     def _prompt(self):
         raise error.Abort(_(b'lfs.url needs to be configured'))
 
+
 _storemap = {
     b'https': _gitlfsremote,
     b'http': _gitlfsremote,
@@ -614,6 +675,7 @@
     None: _promptremote,
 }
 
+
 def _deduplicate(pointers):
     """Remove any duplicate oids that exist in the list"""
     reduced = util.sortdict()
@@ -621,11 +683,15 @@
         reduced[p.oid()] = p
     return reduced.values()
 
+
 def _verify(oid, content):
     realoid = node.hex(hashlib.sha256(content).digest())
     if realoid != oid:
-        raise LfsCorruptionError(_(b'detected corrupt lfs object: %s') % oid,
-                                 hint=_(b'run hg verify'))
+        raise LfsCorruptionError(
+            _(b'detected corrupt lfs object: %s') % oid,
+            hint=_(b'run hg verify'),
+        )
+
 
 def remote(repo, remote=None):
     """remotestore factory. return a store in _storemap depending on config
@@ -638,11 +704,11 @@
     https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
     """
     lfsurl = repo.ui.config(b'lfs', b'url')
-    url = util.url(lfsurl or '')
+    url = util.url(lfsurl or b'')
     if lfsurl is None:
         if remote:
             path = remote
-        elif util.safehasattr(repo, '_subtoppath'):
+        elif util.safehasattr(repo, b'_subtoppath'):
             # The pull command sets this during the optional update phase, which
             # tells exactly where the pull originated, whether 'paths.default'
             # or explicit.
@@ -669,9 +735,11 @@
         raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
     return _storemap[scheme](repo, url)
 
+
 class LfsRemoteError(error.StorageError):
     pass
 
+
 class LfsCorruptionError(error.Abort):
     """Raised when a corrupt blob is detected, aborting an operation
 
--- a/hgext/lfs/pointer.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/lfs/pointer.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,69 +15,75 @@
     error,
     pycompat,
 )
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.utils import stringutil
+
 
 class InvalidPointer(error.StorageError):
     pass
 
+
 class gitlfspointer(dict):
-    VERSION = 'https://git-lfs.github.com/spec/v1'
+    VERSION = b'https://git-lfs.github.com/spec/v1'
 
     def __init__(self, *args, **kwargs):
-        self['version'] = self.VERSION
+        self[b'version'] = self.VERSION
         super(gitlfspointer, self).__init__(*args)
         self.update(pycompat.byteskwargs(kwargs))
 
     @classmethod
     def deserialize(cls, text):
         try:
-            return cls(l.split(' ', 1) for l in text.splitlines()).validate()
-        except ValueError: # l.split returns 1 item instead of 2
-            raise InvalidPointer(_('cannot parse git-lfs text: %s')
-                                 % stringutil.pprint(text))
+            return cls(l.split(b' ', 1) for l in text.splitlines()).validate()
+        except ValueError:  # l.split returns 1 item instead of 2
+            raise InvalidPointer(
+                _(b'cannot parse git-lfs text: %s') % stringutil.pprint(text)
+            )
 
     def serialize(self):
-        sortkeyfunc = lambda x: (x[0] != 'version', x)
-        items = sorted(self.validate().iteritems(), key=sortkeyfunc)
-        return ''.join('%s %s\n' % (k, v) for k, v in items)
+        sortkeyfunc = lambda x: (x[0] != b'version', x)
+        items = sorted(pycompat.iteritems(self.validate()), key=sortkeyfunc)
+        return b''.join(b'%s %s\n' % (k, v) for k, v in items)
 
     def oid(self):
-        return self['oid'].split(':')[-1]
+        return self[b'oid'].split(b':')[-1]
 
     def size(self):
-        return int(self['size'])
+        return int(self[b'size'])
 
     # regular expressions used by _validate
     # see https://github.com/git-lfs/git-lfs/blob/master/docs/spec.md
     _keyre = re.compile(br'\A[a-z0-9.-]+\Z')
     _valuere = re.compile(br'\A[^\n]*\Z')
     _requiredre = {
-        'size': re.compile(br'\A[0-9]+\Z'),
-        'oid': re.compile(br'\Asha256:[0-9a-f]{64}\Z'),
-        'version': re.compile(br'\A%s\Z' % stringutil.reescape(VERSION)),
+        b'size': re.compile(br'\A[0-9]+\Z'),
+        b'oid': re.compile(br'\Asha256:[0-9a-f]{64}\Z'),
+        b'version': re.compile(br'\A%s\Z' % stringutil.reescape(VERSION)),
     }
 
     def validate(self):
         """raise InvalidPointer on error. return self if there is no error"""
         requiredcount = 0
-        for k, v in self.iteritems():
+        for k, v in pycompat.iteritems(self):
             if k in self._requiredre:
                 if not self._requiredre[k].match(v):
                     raise InvalidPointer(
-                        _('unexpected lfs pointer value: %s=%s')
-                        % (k, stringutil.pprint(v)))
+                        _(b'unexpected lfs pointer value: %s=%s')
+                        % (k, stringutil.pprint(v))
+                    )
                 requiredcount += 1
             elif not self._keyre.match(k):
-                raise InvalidPointer(_('unexpected lfs pointer key: %s') % k)
+                raise InvalidPointer(_(b'unexpected lfs pointer key: %s') % k)
             if not self._valuere.match(v):
-                raise InvalidPointer(_('unexpected lfs pointer value: %s=%s')
-                                     % (k, stringutil.pprint(v)))
+                raise InvalidPointer(
+                    _(b'unexpected lfs pointer value: %s=%s')
+                    % (k, stringutil.pprint(v))
+                )
         if len(self._requiredre) != requiredcount:
             miss = sorted(set(self._requiredre.keys()).difference(self.keys()))
-            raise InvalidPointer(_('missing lfs pointer keys: %s')
-                                 % ', '.join(miss))
+            raise InvalidPointer(
+                _(b'missing lfs pointer keys: %s') % b', '.join(miss)
+            )
         return self
 
+
 deserialize = gitlfspointer.deserialize
--- a/hgext/lfs/wireprotolfsserver.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/lfs/wireprotolfsserver.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,9 +12,7 @@
 import json
 import traceback
 
-from mercurial.hgweb import (
-    common as hgwebcommon,
-)
+from mercurial.hgweb import common as hgwebcommon
 
 from mercurial import (
     exthelper,
@@ -35,7 +33,8 @@
 
 eh = exthelper.exthelper()
 
-@eh.wrapfunction(wireprotoserver, 'handlewsgirequest')
+
+@eh.wrapfunction(wireprotoserver, b'handlewsgirequest')
 def handlewsgirequest(orig, rctx, req, res, checkperm):
     """Wrap wireprotoserver.handlewsgirequest() to possibly process an LFS
     request if it is left unprocessed by the wrapped method.
@@ -59,9 +58,9 @@
         # TODO: reserve and use a path in the proposed http wireprotocol /api/
         #       namespace?
         elif req.dispatchpath.startswith(b'.hg/lfs/objects'):
-            return _processbasictransfer(rctx.repo, req, res,
-                                         lambda perm:
-                                                checkperm(rctx, req, perm))
+            return _processbasictransfer(
+                rctx.repo, req, res, lambda perm: checkperm(rctx, req, perm)
+            )
         return False
     except hgwebcommon.ErrorResponse as e:
         # XXX: copied from the handler surrounding wireprotoserver._callhttp()
@@ -73,11 +72,13 @@
         res.setbodybytes(b'0\n%s\n' % pycompat.bytestr(e))
         return True
 
+
 def _sethttperror(res, code, message=None):
     res.status = hgwebcommon.statusmessage(code, message=message)
     res.headers[b'Content-Type'] = b'text/plain; charset=utf-8'
     res.setbodybytes(b'')
 
+
 def _logexception(req):
     """Write information about the current exception to wsgi.errors."""
     tb = pycompat.sysbytes(traceback.format_exc())
@@ -88,8 +89,10 @@
         uri += req.apppath
     uri += b'/' + req.dispatchpath
 
-    errorlog.write(b"Exception happened while processing request '%s':\n%s" %
-                   (uri, tb))
+    errorlog.write(
+        b"Exception happened while processing request '%s':\n%s" % (uri, tb)
+    )
+
 
 def _processbatchrequest(repo, req, res):
     """Handle a request for the Batch API, which is the gateway to granting file
@@ -134,22 +137,32 @@
 
     # If no transfer handlers are explicitly requested, 'basic' is assumed.
     if r'basic' not in lfsreq.get(r'transfers', [r'basic']):
-        _sethttperror(res, HTTP_BAD_REQUEST,
-                      b'Only the basic LFS transfer handler is supported')
+        _sethttperror(
+            res,
+            HTTP_BAD_REQUEST,
+            b'Only the basic LFS transfer handler is supported',
+        )
         return True
 
     operation = lfsreq.get(r'operation')
     operation = pycompat.bytestr(operation)
 
     if operation not in (b'upload', b'download'):
-        _sethttperror(res, HTTP_BAD_REQUEST,
-                      b'Unsupported LFS transfer operation: %s' % operation)
+        _sethttperror(
+            res,
+            HTTP_BAD_REQUEST,
+            b'Unsupported LFS transfer operation: %s' % operation,
+        )
         return True
 
     localstore = repo.svfs.lfslocalblobstore
 
-    objects = [p for p in _batchresponseobjects(req, lfsreq.get(r'objects', []),
-                                                operation, localstore)]
+    objects = [
+        p
+        for p in _batchresponseobjects(
+            req, lfsreq.get(r'objects', []), operation, localstore
+        )
+    ]
 
     rsp = {
         r'transfer': r'basic',
@@ -162,6 +175,7 @@
 
     return True
 
+
 def _batchresponseobjects(req, objects, action, store):
     """Yield one dictionary of attributes for the Batch API response for each
     object in the list.
@@ -197,7 +211,7 @@
         rsp = {
             r'oid': soid,
             r'size': obj.get(r'size'),  # XXX: should this check the local size?
-            #r'authenticated': True,
+            # r'authenticated': True,
         }
 
         exists = True
@@ -222,7 +236,7 @@
 
                 rsp[r'error'] = {
                     r'code': 500,
-                    r'message': inst.strerror or r'Internal Server Server'
+                    r'message': inst.strerror or r'Internal Server Server',
                 }
                 yield rsp
                 continue
@@ -235,15 +249,15 @@
             if not exists:
                 rsp[r'error'] = {
                     r'code': 404,
-                    r'message': r"The object does not exist"
+                    r'message': r"The object does not exist",
                 }
                 yield rsp
                 continue
 
             elif not verifies:
                 rsp[r'error'] = {
-                    r'code': 422,   # XXX: is this the right code?
-                    r'message': r"The object is corrupt"
+                    r'code': 422,  # XXX: is this the right code?
+                    r'message': r"The object is corrupt",
                 }
                 yield rsp
                 continue
@@ -258,9 +272,7 @@
             # The spec doesn't mention the Accept header here, but avoid
             # a gratuitous deviation from lfs-test-server in the test
             # output.
-            hdr = {
-                r'Accept': r'application/vnd.git-lfs'
-            }
+            hdr = {r'Accept': r'application/vnd.git-lfs'}
 
             auth = req.headers.get(b'Authorization', b'')
             if auth.startswith(b'Basic '):
@@ -269,9 +281,11 @@
             return hdr
 
         rsp[r'actions'] = {
-            r'%s' % pycompat.strurl(action): {
-                r'href': pycompat.strurl(b'%s%s/.hg/lfs/objects/%s'
-                    % (req.baseurl, req.apppath, oid)),
+            r'%s'
+            % pycompat.strurl(action): {
+                r'href': pycompat.strurl(
+                    b'%s%s/.hg/lfs/objects/%s' % (req.baseurl, req.apppath, oid)
+                ),
                 # datetime.isoformat() doesn't include the 'Z' suffix
                 r"expires_at": expiresat.strftime(r'%Y-%m-%dT%H:%M:%SZ'),
                 r'header': _buildheader(),
@@ -280,6 +294,7 @@
 
         yield rsp
 
+
 def _processbasictransfer(repo, req, res, checkperm):
     """Handle a single file upload (PUT) or download (GET) action for the Basic
     Transfer Adapter.
@@ -347,6 +362,9 @@
 
         return True
     else:
-        _sethttperror(res, HTTP_METHOD_NOT_ALLOWED,
-                      message=b'Unsupported LFS transfer method: %s' % method)
+        _sethttperror(
+            res,
+            HTTP_METHOD_NOT_ALLOWED,
+            message=b'Unsupported LFS transfer method: %s' % method,
+        )
         return True
--- a/hgext/lfs/wrapper.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/lfs/wrapper.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,6 +11,10 @@
 
 from mercurial.i18n import _
 from mercurial.node import bin, hex, nullid, short
+from mercurial.pycompat import (
+    getattr,
+    setattr,
+)
 
 from mercurial import (
     bundle2,
@@ -21,7 +25,7 @@
     exchange,
     exthelper,
     localrepo,
-    repository,
+    pycompat,
     revlog,
     scmutil,
     upgrade,
@@ -30,6 +34,8 @@
     wireprotov1server,
 )
 
+from mercurial.interfaces import repository
+
 from mercurial.utils import (
     storageutil,
     stringutil,
@@ -44,37 +50,42 @@
 
 eh = exthelper.exthelper()
 
-@eh.wrapfunction(localrepo, 'makefilestorage')
+
+@eh.wrapfunction(localrepo, b'makefilestorage')
 def localrepomakefilestorage(orig, requirements, features, **kwargs):
     if b'lfs' in requirements:
         features.add(repository.REPO_FEATURE_LFS)
 
     return orig(requirements=requirements, features=features, **kwargs)
 
-@eh.wrapfunction(changegroup, 'allsupportedversions')
+
+@eh.wrapfunction(changegroup, b'allsupportedversions')
 def allsupportedversions(orig, ui):
     versions = orig(ui)
-    versions.add('03')
+    versions.add(b'03')
     return versions
 
-@eh.wrapfunction(wireprotov1server, '_capabilities')
+
+@eh.wrapfunction(wireprotov1server, b'_capabilities')
 def _capabilities(orig, repo, proto):
     '''Wrap server command to announce lfs server capability'''
     caps = orig(repo, proto)
-    if util.safehasattr(repo.svfs, 'lfslocalblobstore'):
+    if util.safehasattr(repo.svfs, b'lfslocalblobstore'):
         # Advertise a slightly different capability when lfs is *required*, so
         # that the client knows it MUST load the extension.  If lfs is not
         # required on the server, there's no reason to autoload the extension
         # on the client.
         if b'lfs' in repo.requirements:
-            caps.append('lfs-serve')
+            caps.append(b'lfs-serve')
 
-        caps.append('lfs')
+        caps.append(b'lfs')
     return caps
 
+
 def bypasscheckhash(self, text):
     return False
 
+
 def readfromstore(self, text):
     """Read filelog content from local blobstore transform for flagprocessor.
 
@@ -95,15 +106,16 @@
     # pack hg filelog metadata
     hgmeta = {}
     for k in p.keys():
-        if k.startswith('x-hg-'):
-            name = k[len('x-hg-'):]
+        if k.startswith(b'x-hg-'):
+            name = k[len(b'x-hg-') :]
             hgmeta[name] = p[k]
-    if hgmeta or text.startswith('\1\n'):
+    if hgmeta or text.startswith(b'\1\n'):
         text = storageutil.packmeta(hgmeta, text)
 
-    return (text, True)
+    return (text, True, {})
 
-def writetostore(self, text):
+
+def writetostore(self, text, sidedata):
     # hg filelog metadata (includes rename, etc)
     hgmeta, offset = storageutil.parsemeta(text)
     if offset and offset > 0:
@@ -115,24 +127,25 @@
     self.opener.lfslocalblobstore.write(oid, text)
 
     # replace contents with metadata
-    longoid = 'sha256:%s' % oid
-    metadata = pointer.gitlfspointer(oid=longoid, size='%d' % len(text))
+    longoid = b'sha256:%s' % oid
+    metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text))
 
     # by default, we expect the content to be binary. however, LFS could also
     # be used for non-binary content. add a special entry for non-binary data.
     # this will be used by filectx.isbinary().
     if not stringutil.binary(text):
         # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
-        metadata['x-is-binary'] = '0'
+        metadata[b'x-is-binary'] = b'0'
 
     # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
     if hgmeta is not None:
-        for k, v in hgmeta.iteritems():
-            metadata['x-hg-%s' % k] = v
+        for k, v in pycompat.iteritems(hgmeta):
+            metadata[b'x-hg-%s' % k] = v
 
     rawtext = metadata.serialize()
     return (rawtext, False)
 
+
 def _islfs(rlog, node=None, rev=None):
     if rev is None:
         if node is None:
@@ -146,12 +159,23 @@
     flags = rlog._revlog.flags(rev)
     return bool(flags & revlog.REVIDX_EXTSTORED)
 
+
 # Wrapping may also be applied by remotefilelog
-def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
-                       cachedelta=None, node=None,
-                       flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
+def filelogaddrevision(
+    orig,
+    self,
+    text,
+    transaction,
+    link,
+    p1,
+    p2,
+    cachedelta=None,
+    node=None,
+    flags=revlog.REVIDX_DEFAULT_FLAGS,
+    **kwds
+):
     # The matcher isn't available if reposetup() wasn't called.
-    lfstrack = self._revlog.opener.options.get('lfstrack')
+    lfstrack = self._revlog.opener.options.get(b'lfstrack')
 
     if lfstrack:
         textlen = len(text)
@@ -163,32 +187,45 @@
         if lfstrack(self._revlog.filename, textlen):
             flags |= revlog.REVIDX_EXTSTORED
 
-    return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
-                node=node, flags=flags, **kwds)
+    return orig(
+        self,
+        text,
+        transaction,
+        link,
+        p1,
+        p2,
+        cachedelta=cachedelta,
+        node=node,
+        flags=flags,
+        **kwds
+    )
+
 
 # Wrapping may also be applied by remotefilelog
 def filelogrenamed(orig, self, node):
     if _islfs(self, node):
-        rawtext = self._revlog.revision(node, raw=True)
+        rawtext = self._revlog.rawdata(node)
         if not rawtext:
             return False
         metadata = pointer.deserialize(rawtext)
-        if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
-            return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
+        if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata:
+            return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev'])
         else:
             return False
     return orig(self, node)
 
+
 # Wrapping may also be applied by remotefilelog
 def filelogsize(orig, self, rev):
     if _islfs(self, rev=rev):
         # fast path: use lfs metadata to answer size
-        rawtext = self._revlog.revision(rev, raw=True)
+        rawtext = self._revlog.rawdata(rev)
         metadata = pointer.deserialize(rawtext)
-        return int(metadata['size'])
+        return int(metadata[b'size'])
     return orig(self, rev)
 
-@eh.wrapfunction(context.basefilectx, 'cmp')
+
+@eh.wrapfunction(context.basefilectx, b'cmp')
 def filectxcmp(orig, self, fctx):
     """returns True if text is different than fctx"""
     # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
@@ -199,41 +236,63 @@
         return p1.oid() != p2.oid()
     return orig(self, fctx)
 
-@eh.wrapfunction(context.basefilectx, 'isbinary')
+
+@eh.wrapfunction(context.basefilectx, b'isbinary')
 def filectxisbinary(orig, self):
     if self.islfs():
         # fast path: use lfs metadata to answer isbinary
         metadata = pointer.deserialize(self.rawdata())
         # if lfs metadata says nothing, assume it's binary by default
-        return bool(int(metadata.get('x-is-binary', 1)))
+        return bool(int(metadata.get(b'x-is-binary', 1)))
     return orig(self)
 
+
 def filectxislfs(self):
     return _islfs(self.filelog(), self.filenode())
 
-@eh.wrapfunction(cmdutil, '_updatecatformatter')
+
+@eh.wrapfunction(cmdutil, b'_updatecatformatter')
 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
     orig(fm, ctx, matcher, path, decode)
     fm.data(rawdata=ctx[path].rawdata())
 
-@eh.wrapfunction(scmutil, 'wrapconvertsink')
+
+@eh.wrapfunction(scmutil, b'wrapconvertsink')
 def convertsink(orig, sink):
     sink = orig(sink)
-    if sink.repotype == 'hg':
+    if sink.repotype == b'hg':
+
         class lfssink(sink.__class__):
-            def putcommit(self, files, copies, parents, commit, source, revmap,
-                          full, cleanp2):
+            def putcommit(
+                self,
+                files,
+                copies,
+                parents,
+                commit,
+                source,
+                revmap,
+                full,
+                cleanp2,
+            ):
                 pc = super(lfssink, self).putcommit
-                node = pc(files, copies, parents, commit, source, revmap, full,
-                          cleanp2)
+                node = pc(
+                    files,
+                    copies,
+                    parents,
+                    commit,
+                    source,
+                    revmap,
+                    full,
+                    cleanp2,
+                )
 
-                if 'lfs' not in self.repo.requirements:
+                if b'lfs' not in self.repo.requirements:
                     ctx = self.repo[node]
 
                     # The file list may contain removed files, so check for
                     # membership before assuming it is in the context.
                     if any(f in ctx and ctx[f].islfs() for f, n in files):
-                        self.repo.requirements.add('lfs')
+                        self.repo.requirements.add(b'lfs')
                         self.repo._writerequirements()
 
                 return node
@@ -242,25 +301,27 @@
 
     return sink
 
+
 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
 # options and blob stores are passed from othervfs to the new readonlyvfs.
-@eh.wrapfunction(vfsmod.readonlyvfs, '__init__')
+@eh.wrapfunction(vfsmod.readonlyvfs, b'__init__')
 def vfsinit(orig, self, othervfs):
     orig(self, othervfs)
     # copy lfs related options
     for k, v in othervfs.options.items():
-        if k.startswith('lfs'):
+        if k.startswith(b'lfs'):
             self.options[k] = v
     # also copy lfs blobstores. note: this can run before reposetup, so lfs
     # blobstore attributes are not always ready at this time.
-    for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
+    for name in [b'lfslocalblobstore', b'lfsremoteblobstore']:
         if util.safehasattr(othervfs, name):
             setattr(self, name, getattr(othervfs, name))
 
+
 def _prefetchfiles(repo, revs, match):
     """Ensure that required LFS blobs are present, fetching them as a group if
     needed."""
-    if not util.safehasattr(repo.svfs, 'lfslocalblobstore'):
+    if not util.safehasattr(repo.svfs, b'lfslocalblobstore'):
         return
 
     pointers = []
@@ -281,22 +342,25 @@
         # on the repo by a clone command to be used for the update.
         blobstore.remote(repo).readbatch(pointers, localstore)
 
+
 def _canskipupload(repo):
     # Skip if this hasn't been passed to reposetup()
-    if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'):
+    if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
         return True
 
     # if remotestore is a null store, upload is a no-op and can be skipped
     return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
 
+
 def candownload(repo):
     # Skip if this hasn't been passed to reposetup()
-    if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'):
+    if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
         return False
 
     # if remotestore is a null store, downloads will lead to nothing
     return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
 
+
 def uploadblobsfromrevs(repo, revs):
     '''upload lfs blobs introduced by revs
 
@@ -307,6 +371,7 @@
     pointers = extractpointers(repo, revs)
     uploadblobs(repo, pointers)
 
+
 def prepush(pushop):
     """Prepush hook.
 
@@ -316,20 +381,22 @@
     """
     return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
 
-@eh.wrapfunction(exchange, 'push')
+
+@eh.wrapfunction(exchange, b'push')
 def push(orig, repo, remote, *args, **kwargs):
     """bail on push if the extension isn't enabled on remote when needed, and
     update the remote store based on the destination path."""
-    if 'lfs' in repo.requirements:
+    if b'lfs' in repo.requirements:
         # If the remote peer is for a local repo, the requirement tests in the
         # base class method enforce lfs support.  Otherwise, some revisions in
         # this repo use lfs, and the remote repo needs the extension loaded.
-        if not remote.local() and not remote.capable('lfs'):
+        if not remote.local() and not remote.capable(b'lfs'):
             # This is a copy of the message in exchange.push() when requirements
             # are missing between local repos.
-            m = _("required features are not supported in the destination: %s")
-            raise error.Abort(m % 'lfs',
-                              hint=_('enable the lfs extension on the server'))
+            m = _(b"required features are not supported in the destination: %s")
+            raise error.Abort(
+                m % b'lfs', hint=_(b'enable the lfs extension on the server')
+            )
 
         # Repositories where this extension is disabled won't have the field.
         # But if there's a requirement, then the extension must be loaded AND
@@ -343,22 +410,28 @@
     else:
         return orig(repo, remote, *args, **kwargs)
 
+
 # when writing a bundle via "hg bundle" command, upload related LFS blobs
-@eh.wrapfunction(bundle2, 'writenewbundle')
-def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
-                   *args, **kwargs):
+@eh.wrapfunction(bundle2, b'writenewbundle')
+def writenewbundle(
+    orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
+):
     """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
     uploadblobsfromrevs(repo, outgoing.missing)
-    return orig(ui, repo, source, filename, bundletype, outgoing, *args,
-                **kwargs)
+    return orig(
+        ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
+    )
+
 
 def extractpointers(repo, revs):
     """return a list of lfs pointers added by given revs"""
-    repo.ui.debug('lfs: computing set of blobs to upload\n')
+    repo.ui.debug(b'lfs: computing set of blobs to upload\n')
     pointers = {}
 
     makeprogress = repo.ui.makeprogress
-    with makeprogress(_('lfs search'), _('changesets'), len(revs)) as progress:
+    with makeprogress(
+        _(b'lfs search'), _(b'changesets'), len(revs)
+    ) as progress:
         for r in revs:
             ctx = repo[r]
             for p in pointersfromctx(ctx).values():
@@ -366,6 +439,7 @@
             progress.increment()
         return sorted(pointers.values(), key=lambda p: p.oid())
 
+
 def pointerfromctx(ctx, f, removed=False):
     """return a pointer for the named file from the given changectx, or None if
     the file isn't LFS.
@@ -393,8 +467,11 @@
             return p
         return {}
     except pointer.InvalidPointer as ex:
-        raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
-                          % (f, short(_ctx.node()), ex))
+        raise error.Abort(
+            _(b'lfs: corrupted pointer (%s@%s): %s\n')
+            % (f, short(_ctx.node()), ex)
+        )
+
 
 def pointersfromctx(ctx, removed=False):
     """return a dict {path: pointer} for given single changectx.
@@ -414,6 +491,7 @@
             result[f] = p
     return result
 
+
 def uploadblobs(repo, pointers):
     """upload given pointers from local blobstore"""
     if not pointers:
@@ -422,25 +500,28 @@
     remoteblob = repo.svfs.lfsremoteblobstore
     remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
 
-@eh.wrapfunction(upgrade, '_finishdatamigration')
+
+@eh.wrapfunction(upgrade, b'_finishdatamigration')
 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
     orig(ui, srcrepo, dstrepo, requirements)
 
     # Skip if this hasn't been passed to reposetup()
-    if (util.safehasattr(srcrepo.svfs, 'lfslocalblobstore') and
-        util.safehasattr(dstrepo.svfs, 'lfslocalblobstore')):
+    if util.safehasattr(
+        srcrepo.svfs, b'lfslocalblobstore'
+    ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'):
         srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
         dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
 
         for dirpath, dirs, files in srclfsvfs.walk():
             for oid in files:
-                ui.write(_('copying lfs blob %s\n') % oid)
+                ui.write(_(b'copying lfs blob %s\n') % oid)
                 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
 
-@eh.wrapfunction(upgrade, 'preservedrequirements')
-@eh.wrapfunction(upgrade, 'supporteddestrequirements')
+
+@eh.wrapfunction(upgrade, b'preservedrequirements')
+@eh.wrapfunction(upgrade, b'supporteddestrequirements')
 def upgraderequirements(orig, repo):
     reqs = orig(repo)
-    if 'lfs' in repo.requirements:
-        reqs.add('lfs')
+    if b'lfs' in repo.requirements:
+        reqs.add(b'lfs')
     return reqs
--- a/hgext/logtoprocess.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/logtoprocess.py	Mon Oct 21 11:09:48 2019 -0400
@@ -36,15 +36,14 @@
 
 import os
 
-from mercurial.utils import (
-    procutil,
-)
+from mercurial.utils import procutil
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
 class processlogger(object):
     """Map log events to external commands
@@ -66,10 +65,12 @@
             b'MSG1': msg,
         }
         # keyword arguments get prefixed with OPT_ and uppercased
-        env.update((b'OPT_%s' % key.upper(), value)
-                   for key, value in opts.items())
+        env.update(
+            (b'OPT_%s' % key.upper(), value) for key, value in opts.items()
+        )
         fullenv = procutil.shellenviron(env)
         procutil.runbgcommand(script, fullenv, shell=True)
 
+
 def uipopulate(ui):
     ui.setlogger(b'logtoprocess', processlogger(ui))
--- a/hgext/mq.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/mq.py	Mon Oct 21 11:09:48 2019 -0400
@@ -76,6 +76,11 @@
     nullrev,
     short,
 )
+from mercurial.pycompat import (
+    delattr,
+    getattr,
+    open,
+)
 from mercurial import (
     cmdutil,
     commands,
@@ -104,7 +109,7 @@
 )
 
 release = lockmod.release
-seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
+seriesopts = [(b's', b'summary', None, _(b'print first line of patch header'))]
 
 cmdtable = {}
 command = registrar.command(cmdtable)
@@ -112,39 +117,42 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('mq', 'git',
-    default='auto',
+configitem(
+    b'mq', b'git', default=b'auto',
 )
-configitem('mq', 'keepchanges',
-    default=False,
+configitem(
+    b'mq', b'keepchanges', default=False,
 )
-configitem('mq', 'plain',
-    default=False,
+configitem(
+    b'mq', b'plain', default=False,
 )
-configitem('mq', 'secret',
-    default=False,
+configitem(
+    b'mq', b'secret', default=False,
 )
 
 # force load strip extension formerly included in mq and import some utility
 try:
-    stripext = extensions.find('strip')
+    stripext = extensions.find(b'strip')
 except KeyError:
     # note: load is lazy so we could avoid the try-except,
     # but I (marmoute) prefer this explicit code.
     class dummyui(object):
         def debug(self, msg):
             pass
+
         def log(self, event, msgfmt, *msgargs, **opts):
             pass
-    stripext = extensions.load(dummyui(), 'strip', '')
+
+    stripext = extensions.load(dummyui(), b'strip', b'')
 
 strip = stripext.strip
 
+
 def checksubstate(repo, baserev=None):
     '''return list of subrepos at a different revision than substate.
     Abort if any subrepos have uncommitted changes.'''
@@ -160,36 +168,40 @@
             inclsubs.append(s)
     return inclsubs
 
+
 # Patch names looks like unix-file names.
 # They must be joinable with queue directory and result in the patch path.
 normname = util.normpath
 
+
 class statusentry(object):
     def __init__(self, node, name):
         self.node, self.name = node, name
 
     def __bytes__(self):
-        return hex(self.node) + ':' + self.name
+        return hex(self.node) + b':' + self.name
 
     __str__ = encoding.strmethod(__bytes__)
     __repr__ = encoding.strmethod(__bytes__)
 
+
 # The order of the headers in 'hg export' HG patches:
 HGHEADERS = [
-#   '# HG changeset patch',
-    '# User ',
-    '# Date ',
-    '#      ',
-    '# Branch ',
-    '# Node ID ',
-    '# Parent  ', # can occur twice for merges - but that is not relevant for mq
-    ]
+    #   '# HG changeset patch',
+    b'# User ',
+    b'# Date ',
+    b'#      ',
+    b'# Branch ',
+    b'# Node ID ',
+    b'# Parent  ',  # can occur twice for merges - but that is not relevant for mq
+]
 # The order of headers in plain 'mail style' patches:
 PLAINHEADERS = {
-    'from': 0,
-    'date': 1,
-    'subject': 2,
-    }
+    b'from': 0,
+    b'date': 1,
+    b'subject': 2,
+}
+
 
 def inserthgheader(lines, header, value):
     """Assuming lines contains a HG patch header, add a header line with value.
@@ -214,12 +226,12 @@
     ...                b'# Date ', b'z')
     ['# HG changeset patch', '# Date z', '# Parent  y']
     """
-    start = lines.index('# HG changeset patch') + 1
+    start = lines.index(b'# HG changeset patch') + 1
     newindex = HGHEADERS.index(header)
     bestpos = len(lines)
     for i in range(start, len(lines)):
         line = lines[i]
-        if not line.startswith('# '):
+        if not line.startswith(b'# '):
             bestpos = min(bestpos, i)
             break
         for lineindex, h in enumerate(HGHEADERS):
@@ -229,10 +241,11 @@
                     return lines
                 if lineindex > newindex:
                     bestpos = min(bestpos, i)
-                break # next line
+                break  # next line
     lines.insert(bestpos, header + value)
     return lines
 
+
 def insertplainheader(lines, header, value):
     """For lines containing a plain patch header, add a header line with value.
     >>> insertplainheader([], b'Date', b'z')
@@ -253,34 +266,38 @@
     newprio = PLAINHEADERS[header.lower()]
     bestpos = len(lines)
     for i, line in enumerate(lines):
-        if ':' in line:
-            lheader = line.split(':', 1)[0].strip().lower()
+        if b':' in line:
+            lheader = line.split(b':', 1)[0].strip().lower()
             lprio = PLAINHEADERS.get(lheader, newprio + 1)
             if lprio == newprio:
-                lines[i] = '%s: %s' % (header, value)
+                lines[i] = b'%s: %s' % (header, value)
                 return lines
             if lprio > newprio and i < bestpos:
                 bestpos = i
         else:
             if line:
-                lines.insert(i, '')
+                lines.insert(i, b'')
             if i < bestpos:
                 bestpos = i
             break
-    lines.insert(bestpos, '%s: %s' % (header, value))
+    lines.insert(bestpos, b'%s: %s' % (header, value))
     return lines
 
+
 class patchheader(object):
     def __init__(self, pf, plainmode=False):
         def eatdiff(lines):
             while lines:
                 l = lines[-1]
-                if (l.startswith("diff -") or
-                    l.startswith("Index:") or
-                    l.startswith("===========")):
+                if (
+                    l.startswith(b"diff -")
+                    or l.startswith(b"Index:")
+                    or l.startswith(b"===========")
+                ):
                     del lines[-1]
                 else:
                     break
+
         def eatempty(lines):
             while lines:
                 if not lines[-1].strip():
@@ -299,50 +316,54 @@
         nodeid = None
         diffstart = 0
 
-        for line in open(pf, 'rb'):
+        for line in open(pf, b'rb'):
             line = line.rstrip()
-            if (line.startswith('diff --git')
-                or (diffstart and line.startswith('+++ '))):
+            if line.startswith(b'diff --git') or (
+                diffstart and line.startswith(b'+++ ')
+            ):
                 diffstart = 2
                 break
-            diffstart = 0 # reset
-            if line.startswith("--- "):
+            diffstart = 0  # reset
+            if line.startswith(b"--- "):
                 diffstart = 1
                 continue
-            elif format == "hgpatch":
+            elif format == b"hgpatch":
                 # parse values when importing the result of an hg export
-                if line.startswith("# User "):
+                if line.startswith(b"# User "):
                     user = line[7:]
-                elif line.startswith("# Date "):
+                elif line.startswith(b"# Date "):
                     date = line[7:]
-                elif line.startswith("# Parent "):
-                    parent = line[9:].lstrip() # handle double trailing space
-                elif line.startswith("# Branch "):
+                elif line.startswith(b"# Parent "):
+                    parent = line[9:].lstrip()  # handle double trailing space
+                elif line.startswith(b"# Branch "):
                     branch = line[9:]
-                elif line.startswith("# Node ID "):
+                elif line.startswith(b"# Node ID "):
                     nodeid = line[10:]
-                elif not line.startswith("# ") and line:
+                elif not line.startswith(b"# ") and line:
                     message.append(line)
                     format = None
-            elif line == '# HG changeset patch':
+            elif line == b'# HG changeset patch':
                 message = []
-                format = "hgpatch"
-            elif (format != "tagdone" and (line.startswith("Subject: ") or
-                                           line.startswith("subject: "))):
+                format = b"hgpatch"
+            elif format != b"tagdone" and (
+                line.startswith(b"Subject: ") or line.startswith(b"subject: ")
+            ):
                 subject = line[9:]
-                format = "tag"
-            elif (format != "tagdone" and (line.startswith("From: ") or
-                                           line.startswith("from: "))):
+                format = b"tag"
+            elif format != b"tagdone" and (
+                line.startswith(b"From: ") or line.startswith(b"from: ")
+            ):
                 user = line[6:]
-                format = "tag"
-            elif (format != "tagdone" and (line.startswith("Date: ") or
-                                           line.startswith("date: "))):
+                format = b"tag"
+            elif format != b"tagdone" and (
+                line.startswith(b"Date: ") or line.startswith(b"date: ")
+            ):
                 date = line[6:]
-                format = "tag"
-            elif format == "tag" and line == "":
+                format = b"tag"
+            elif format == b"tag" and line == b"":
                 # when looking for tags (subject: from: etc) they
                 # end once you find a blank line in the source
-                format = "tagdone"
+                format = b"tagdone"
             elif message or line:
                 message.append(line)
             comments.append(line)
@@ -356,7 +377,7 @@
         eatempty(comments)
 
         # make sure message isn't empty
-        if format and format.startswith("tag") and subject:
+        if format and format.startswith(b"tag") and subject:
             message.insert(0, subject)
 
         self.message = message
@@ -368,40 +389,43 @@
         self.nodeid = nodeid
         self.branch = branch
         self.haspatch = diffstart > 1
-        self.plainmode = (plainmode or
-                          '# HG changeset patch' not in self.comments and
-                          any(c.startswith('Date: ') or
-                                   c.startswith('From: ')
-                                   for c in self.comments))
+        self.plainmode = (
+            plainmode
+            or b'# HG changeset patch' not in self.comments
+            and any(
+                c.startswith(b'Date: ') or c.startswith(b'From: ')
+                for c in self.comments
+            )
+        )
 
     def setuser(self, user):
         try:
-            inserthgheader(self.comments, '# User ', user)
+            inserthgheader(self.comments, b'# User ', user)
         except ValueError:
             if self.plainmode:
-                insertplainheader(self.comments, 'From', user)
+                insertplainheader(self.comments, b'From', user)
             else:
-                tmp = ['# HG changeset patch', '# User ' + user]
+                tmp = [b'# HG changeset patch', b'# User ' + user]
                 self.comments = tmp + self.comments
         self.user = user
 
     def setdate(self, date):
         try:
-            inserthgheader(self.comments, '# Date ', date)
+            inserthgheader(self.comments, b'# Date ', date)
         except ValueError:
             if self.plainmode:
-                insertplainheader(self.comments, 'Date', date)
+                insertplainheader(self.comments, b'Date', date)
             else:
-                tmp = ['# HG changeset patch', '# Date ' + date]
+                tmp = [b'# HG changeset patch', b'# Date ' + date]
                 self.comments = tmp + self.comments
         self.date = date
 
     def setparent(self, parent):
         try:
-            inserthgheader(self.comments, '# Parent  ', parent)
+            inserthgheader(self.comments, b'# Parent  ', parent)
         except ValueError:
             if not self.plainmode:
-                tmp = ['# HG changeset patch', '# Parent  ' + parent]
+                tmp = [b'# HG changeset patch', b'# Parent  ' + parent]
                 self.comments = tmp + self.comments
         self.parent = parent
 
@@ -411,14 +435,14 @@
         self.message = [message]
         if message:
             if self.plainmode and self.comments and self.comments[-1]:
-                self.comments.append('')
+                self.comments.append(b'')
             self.comments.append(message)
 
     def __bytes__(self):
-        s = '\n'.join(self.comments).rstrip()
+        s = b'\n'.join(self.comments).rstrip()
         if not s:
-            return ''
-        return s + '\n\n'
+            return b''
+        return s + b'\n\n'
 
     __str__ = encoding.strmethod(__bytes__)
 
@@ -427,7 +451,7 @@
         If comments contains 'subject: ', message will prepend
         the field and a blank line.'''
         if self.message:
-            subj = 'subject: ' + self.message[0].lower()
+            subj = b'subject: ' + self.message[0].lower()
             for i in pycompat.xrange(len(self.comments)):
                 if subj == self.comments[i].lower():
                     del self.comments[i]
@@ -439,6 +463,7 @@
                 ci += 1
             del self.comments[ci]
 
+
 def newcommit(repo, phase, *args, **kwargs):
     """helper dedicated to ensure a commit respect mq.secret setting
 
@@ -447,31 +472,33 @@
     """
     repo = repo.unfiltered()
     if phase is None:
-        if repo.ui.configbool('mq', 'secret'):
+        if repo.ui.configbool(b'mq', b'secret'):
             phase = phases.secret
-    overrides = {('ui', 'allowemptycommit'): True}
+    overrides = {(b'ui', b'allowemptycommit'): True}
     if phase is not None:
-        overrides[('phases', 'new-commit')] = phase
-    with repo.ui.configoverride(overrides, 'mq'):
-        repo.ui.setconfig('ui', 'allowemptycommit', True)
+        overrides[(b'phases', b'new-commit')] = phase
+    with repo.ui.configoverride(overrides, b'mq'):
+        repo.ui.setconfig(b'ui', b'allowemptycommit', True)
         return repo.commit(*args, **kwargs)
 
+
 class AbortNoCleanup(error.Abort):
     pass
 
+
 class queue(object):
     def __init__(self, ui, baseui, path, patchdir=None):
         self.basepath = path
         try:
-            with open(os.path.join(path, 'patches.queue'), r'rb') as fh:
+            with open(os.path.join(path, b'patches.queue'), r'rb') as fh:
                 cur = fh.read().rstrip()
 
             if not cur:
-                curpath = os.path.join(path, 'patches')
+                curpath = os.path.join(path, b'patches')
             else:
-                curpath = os.path.join(path, 'patches-' + cur)
+                curpath = os.path.join(path, b'patches-' + cur)
         except IOError:
-            curpath = os.path.join(path, 'patches')
+            curpath = os.path.join(path, b'patches')
         self.path = patchdir or curpath
         self.opener = vfsmod.vfs(self.path)
         self.ui = ui
@@ -479,36 +506,39 @@
         self.applieddirty = False
         self.seriesdirty = False
         self.added = []
-        self.seriespath = "series"
-        self.statuspath = "status"
-        self.guardspath = "guards"
+        self.seriespath = b"series"
+        self.statuspath = b"status"
+        self.guardspath = b"guards"
         self.activeguards = None
         self.guardsdirty = False
         # Handle mq.git as a bool with extended values
-        gitmode = ui.config('mq', 'git').lower()
+        gitmode = ui.config(b'mq', b'git').lower()
         boolmode = stringutil.parsebool(gitmode)
         if boolmode is not None:
             if boolmode:
-                gitmode = 'yes'
+                gitmode = b'yes'
             else:
-                gitmode = 'no'
+                gitmode = b'no'
         self.gitmode = gitmode
         # deprecated config: mq.plain
-        self.plainmode = ui.configbool('mq', 'plain')
+        self.plainmode = ui.configbool(b'mq', b'plain')
         self.checkapplied = True
 
     @util.propertycache
     def applied(self):
         def parselines(lines):
             for l in lines:
-                entry = l.split(':', 1)
+                entry = l.split(b':', 1)
                 if len(entry) > 1:
                     n, name = entry
                     yield statusentry(bin(n), name)
                 elif l.strip():
-                    self.ui.warn(_('malformated mq status line: %s\n') %
-                                 stringutil.pprint(entry))
+                    self.ui.warn(
+                        _(b'malformated mq status line: %s\n')
+                        % stringutil.pprint(entry)
+                    )
                 # else we ignore empty lines
+
         try:
             lines = self.opener.read(self.statuspath).splitlines()
             return list(parselines(lines))
@@ -537,7 +567,7 @@
         return self.seriesguards
 
     def invalidate(self):
-        for a in 'applied fullseries series seriesguards'.split():
+        for a in b'applied fullseries series seriesguards'.split():
             if a in self.__dict__:
                 delattr(self, a)
         self.applieddirty = False
@@ -548,17 +578,24 @@
     def diffopts(self, opts=None, patchfn=None, plain=False):
         """Return diff options tweaked for this mq use, possibly upgrading to
         git format, and possibly plain and without lossy options."""
-        diffopts = patchmod.difffeatureopts(self.ui, opts,
-            git=True, whitespace=not plain, formatchanging=not plain)
-        if self.gitmode == 'auto':
+        diffopts = patchmod.difffeatureopts(
+            self.ui,
+            opts,
+            git=True,
+            whitespace=not plain,
+            formatchanging=not plain,
+        )
+        if self.gitmode == b'auto':
             diffopts.upgrade = True
-        elif self.gitmode == 'keep':
+        elif self.gitmode == b'keep':
             pass
-        elif self.gitmode in ('yes', 'no'):
-            diffopts.git = self.gitmode == 'yes'
+        elif self.gitmode in (b'yes', b'no'):
+            diffopts.git = self.gitmode == b'yes'
         else:
-            raise error.Abort(_('mq.git option can be auto/keep/yes/no'
-                               ' got %s') % self.gitmode)
+            raise error.Abort(
+                _(b'mq.git option can be auto/keep/yes/no got %s')
+                % self.gitmode
+            )
         if patchfn:
             diffopts = self.patchopts(diffopts, patchfn)
         return diffopts
@@ -568,12 +605,13 @@
         referenced patch is a git patch and should be preserved as such.
         """
         diffopts = diffopts.copy()
-        if not diffopts.git and self.gitmode == 'keep':
+        if not diffopts.git and self.gitmode == b'keep':
             for patchfn in patches:
-                patchf = self.opener(patchfn, 'r')
+                patchf = self.opener(patchfn, b'r')
                 # if the patch was a git patch, refresh it as a git patch
-                diffopts.git = any(line.startswith('diff --git')
-                                   for line in patchf)
+                diffopts.git = any(
+                    line.startswith(b'diff --git') for line in patchf
+                )
                 patchf.close()
         return diffopts
 
@@ -582,8 +620,9 @@
 
     def findseries(self, patch):
         def matchpatch(l):
-            l = l.split('#', 1)[0]
+            l = l.split(b'#', 1)[0]
             return l.strip() == patch
+
         for index, l in enumerate(self.fullseries):
             if matchpatch(l):
                 return index
@@ -595,10 +634,10 @@
         self.series = []
         self.seriesguards = []
         for l in self.fullseries:
-            h = l.find('#')
+            h = l.find(b'#')
             if h == -1:
                 patch = l
-                comment = ''
+                comment = b''
             elif h == 0:
                 continue
             else:
@@ -607,22 +646,26 @@
             patch = patch.strip()
             if patch:
                 if patch in self.series:
-                    raise error.Abort(_('%s appears more than once in %s') %
-                                     (patch, self.join(self.seriespath)))
+                    raise error.Abort(
+                        _(b'%s appears more than once in %s')
+                        % (patch, self.join(self.seriespath))
+                    )
                 self.series.append(patch)
                 self.seriesguards.append(self.guard_re.findall(comment))
 
     def checkguard(self, guard):
         if not guard:
-            return _('guard cannot be an empty string')
-        bad_chars = '# \t\r\n\f'
+            return _(b'guard cannot be an empty string')
+        bad_chars = b'# \t\r\n\f'
         first = guard[0]
-        if first in '-+':
-            return (_('guard %r starts with invalid character: %r') %
-                      (guard, first))
+        if first in b'-+':
+            return _(b'guard %r starts with invalid character: %r') % (
+                guard,
+                first,
+            )
         for c in bad_chars:
             if c in guard:
-                return _('invalid character in guard %r: %r') % (guard, c)
+                return _(b'invalid character in guard %r: %r') % (guard, c)
 
     def setactive(self, guards):
         for guard in guards:
@@ -630,7 +673,7 @@
             if bad:
                 raise error.Abort(bad)
         guards = sorted(set(guards))
-        self.ui.debug('active guards: %s\n' % ' '.join(guards))
+        self.ui.debug(b'active guards: %s\n' % b' '.join(guards))
         self.activeguards = guards
         self.guardsdirty = True
 
@@ -646,8 +689,10 @@
             for i, guard in enumerate(guards):
                 bad = self.checkguard(guard)
                 if bad:
-                    self.ui.warn('%s:%d: %s\n' %
-                                 (self.join(self.guardspath), i + 1, bad))
+                    self.ui.warn(
+                        b'%s:%d: %s\n'
+                        % (self.join(self.guardspath), i + 1, bad)
+                    )
                 else:
                     self.activeguards.append(guard)
         return self.activeguards
@@ -655,14 +700,14 @@
     def setguards(self, idx, guards):
         for g in guards:
             if len(g) < 2:
-                raise error.Abort(_('guard %r too short') % g)
-            if g[0] not in '-+':
-                raise error.Abort(_('guard %r starts with invalid char') % g)
+                raise error.Abort(_(b'guard %r too short') % g)
+            if g[0] not in b'-+':
+                raise error.Abort(_(b'guard %r starts with invalid char') % g)
             bad = self.checkguard(g[1:])
             if bad:
                 raise error.Abort(bad)
-        drop = self.guard_re.sub('', self.fullseries[idx])
-        self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
+        drop = self.guard_re.sub(b'', self.fullseries[idx])
+        self.fullseries[idx] = drop + b''.join([b' #' + g for g in guards])
         self.parseseries()
         self.seriesdirty = True
 
@@ -673,17 +718,18 @@
         if not patchguards:
             return True, None
         guards = self.active()
-        exactneg = [g for g in patchguards
-                    if g.startswith('-') and g[1:] in guards]
+        exactneg = [
+            g for g in patchguards if g.startswith(b'-') and g[1:] in guards
+        ]
         if exactneg:
             return False, stringutil.pprint(exactneg[0])
-        pos = [g for g in patchguards if g.startswith('+')]
+        pos = [g for g in patchguards if g.startswith(b'+')]
         exactpos = [g for g in pos if g[1:] in guards]
         if pos:
             if exactpos:
                 return True, stringutil.pprint(exactpos[0])
-            return False, ' '.join([stringutil.pprint(p) for p in pos])
-        return True, ''
+            return False, b' '.join([stringutil.pprint(p) for p in pos])
+        return True, b''
 
     def explainpushable(self, idx, all_patches=False):
         if all_patches:
@@ -697,29 +743,40 @@
             pushable, why = self.pushable(idx)
             if all_patches and pushable:
                 if why is None:
-                    write(_('allowing %s - no guards in effect\n') %
-                          self.series[idx])
+                    write(
+                        _(b'allowing %s - no guards in effect\n')
+                        % self.series[idx]
+                    )
                 else:
                     if not why:
-                        write(_('allowing %s - no matching negative guards\n') %
-                              self.series[idx])
+                        write(
+                            _(b'allowing %s - no matching negative guards\n')
+                            % self.series[idx]
+                        )
                     else:
-                        write(_('allowing %s - guarded by %s\n') %
-                              (self.series[idx], why))
+                        write(
+                            _(b'allowing %s - guarded by %s\n')
+                            % (self.series[idx], why)
+                        )
             if not pushable:
                 if why:
-                    write(_('skipping %s - guarded by %s\n') %
-                          (self.series[idx], why))
+                    write(
+                        _(b'skipping %s - guarded by %s\n')
+                        % (self.series[idx], why)
+                    )
                 else:
-                    write(_('skipping %s - no matching guards\n') %
-                          self.series[idx])
+                    write(
+                        _(b'skipping %s - no matching guards\n')
+                        % self.series[idx]
+                    )
 
     def savedirty(self):
         def writelist(items, path):
-            fp = self.opener(path, 'wb')
+            fp = self.opener(path, b'wb')
             for i in items:
-                fp.write("%s\n" % i)
+                fp.write(b"%s\n" % i)
             fp.close()
+
         if self.applieddirty:
             writelist(map(bytes, self.applied), self.statuspath)
             self.applieddirty = False
@@ -736,14 +793,15 @@
             self.added = []
 
     def removeundo(self, repo):
-        undo = repo.sjoin('undo')
+        undo = repo.sjoin(b'undo')
         if not os.path.exists(undo):
             return
         try:
             os.unlink(undo)
         except OSError as inst:
-            self.ui.warn(_('error removing undo: %s\n') %
-                         stringutil.forcebytestr(inst))
+            self.ui.warn(
+                _(b'error removing undo: %s\n') % stringutil.forcebytestr(inst)
+            )
 
     def backup(self, repo, files, copy=False):
         # backup local changes in --force case
@@ -751,35 +809,48 @@
             absf = repo.wjoin(f)
             if os.path.lexists(absf):
                 absorig = scmutil.backuppath(self.ui, repo, f)
-                self.ui.note(_('saving current version of %s as %s\n') %
-                             (f, os.path.relpath(absorig)))
+                self.ui.note(
+                    _(b'saving current version of %s as %s\n')
+                    % (f, os.path.relpath(absorig))
+                )
 
                 if copy:
                     util.copyfile(absf, absorig)
                 else:
                     util.rename(absf, absorig)
 
-    def printdiff(self, repo, diffopts, node1, node2=None, files=None,
-                  fp=None, changes=None, opts=None):
+    def printdiff(
+        self,
+        repo,
+        diffopts,
+        node1,
+        node2=None,
+        files=None,
+        fp=None,
+        changes=None,
+        opts=None,
+    ):
         if opts is None:
             opts = {}
-        stat = opts.get('stat')
+        stat = opts.get(b'stat')
         m = scmutil.match(repo[node1], files, opts)
-        logcmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
-                                  changes, stat, fp)
+        logcmdutil.diffordiffstat(
+            self.ui, repo, diffopts, node1, node2, m, changes, stat, fp
+        )
 
     def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
         # first try just applying the patch
-        (err, n) = self.apply(repo, [patch], update_status=False,
-                              strict=True, merge=rev)
+        (err, n) = self.apply(
+            repo, [patch], update_status=False, strict=True, merge=rev
+        )
 
         if err == 0:
             return (err, n)
 
         if n is None:
-            raise error.Abort(_("apply failed for patch %s") % patch)
-
-        self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
+            raise error.Abort(_(b"apply failed for patch %s") % patch)
+
+        self.ui.warn(_(b"patch didn't work out, merging %s\n") % patch)
 
         # apply failed, strip away that rev and merge.
         hg.clean(repo, head)
@@ -788,17 +859,17 @@
         ctx = repo[rev]
         ret = hg.merge(repo, rev)
         if ret:
-            raise error.Abort(_("update returned %d") % ret)
+            raise error.Abort(_(b"update returned %d") % ret)
         n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
         if n is None:
-            raise error.Abort(_("repo commit failed"))
+            raise error.Abort(_(b"repo commit failed"))
         try:
             ph = patchheader(mergeq.join(patch), self.plainmode)
         except Exception:
-            raise error.Abort(_("unable to read %s") % patch)
+            raise error.Abort(_(b"unable to read %s") % patch)
 
         diffopts = self.patchopts(diffopts, patch)
-        patchf = self.opener(patch, "w")
+        patchf = self.opener(patch, b"w")
         comments = bytes(ph)
         if comments:
             patchf.write(comments)
@@ -836,8 +907,8 @@
             # so, we insert a merge marker with only one parent.  This way
             # the first patch in the queue is never a merge patch
             #
-            pname = ".hg.patches.merge.marker"
-            n = newcommit(repo, None, '[mq]: merge marker', force=True)
+            pname = b".hg.patches.merge.marker"
+            n = newcommit(repo, None, b'[mq]: merge marker', force=True)
             self.removeundo(repo)
             self.applied.append(statusentry(n, pname))
             self.applieddirty = True
@@ -847,7 +918,7 @@
         for patch in series:
             patch = mergeq.lookup(patch, strict=True)
             if not patch:
-                self.ui.warn(_("patch %s does not exist\n") % patch)
+                self.ui.warn(_(b"patch %s does not exist\n") % patch)
                 return (1, None)
             pushable, reason = self.pushable(patch)
             if not pushable:
@@ -855,7 +926,7 @@
                 continue
             info = mergeq.isapplied(patch)
             if not info:
-                self.ui.warn(_("patch %s is not applied\n") % patch)
+                self.ui.warn(_(b"patch %s is not applied\n") % patch)
                 return (1, None)
             rev = info[1]
             err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
@@ -872,28 +943,48 @@
         patchfile: name of patch file'''
         files = set()
         try:
-            fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
-                                  files=files, eolmode=None)
+            fuzz = patchmod.patch(
+                self.ui, repo, patchfile, strip=1, files=files, eolmode=None
+            )
             return (True, list(files), fuzz)
         except Exception as inst:
-            self.ui.note(stringutil.forcebytestr(inst) + '\n')
+            self.ui.note(stringutil.forcebytestr(inst) + b'\n')
             if not self.ui.verbose:
-                self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
+                self.ui.warn(_(b"patch failed, unable to continue (try -v)\n"))
             self.ui.traceback()
             return (False, list(files), False)
 
-    def apply(self, repo, series, list=False, update_status=True,
-              strict=False, patchdir=None, merge=None, all_files=None,
-              tobackup=None, keepchanges=False):
+    def apply(
+        self,
+        repo,
+        series,
+        list=False,
+        update_status=True,
+        strict=False,
+        patchdir=None,
+        merge=None,
+        all_files=None,
+        tobackup=None,
+        keepchanges=False,
+    ):
         wlock = lock = tr = None
         try:
             wlock = repo.wlock()
             lock = repo.lock()
-            tr = repo.transaction("qpush")
+            tr = repo.transaction(b"qpush")
             try:
-                ret = self._apply(repo, series, list, update_status,
-                                  strict, patchdir, merge, all_files=all_files,
-                                  tobackup=tobackup, keepchanges=keepchanges)
+                ret = self._apply(
+                    repo,
+                    series,
+                    list,
+                    update_status,
+                    strict,
+                    patchdir,
+                    merge,
+                    all_files=all_files,
+                    tobackup=tobackup,
+                    keepchanges=keepchanges,
+                )
                 tr.close()
                 self.savedirty()
                 return ret
@@ -901,7 +992,7 @@
                 tr.close()
                 self.savedirty()
                 raise
-            except: # re-raises
+            except:  # re-raises
                 try:
                     tr.abort()
                 finally:
@@ -911,9 +1002,19 @@
             release(tr, lock, wlock)
             self.removeundo(repo)
 
-    def _apply(self, repo, series, list=False, update_status=True,
-               strict=False, patchdir=None, merge=None, all_files=None,
-               tobackup=None, keepchanges=False):
+    def _apply(
+        self,
+        repo,
+        series,
+        list=False,
+        update_status=True,
+        strict=False,
+        patchdir=None,
+        merge=None,
+        all_files=None,
+        tobackup=None,
+        keepchanges=False,
+    ):
         """returns (error, hash)
 
         error = 1 for unable to read, 2 for patch failed, 3 for patch
@@ -930,25 +1031,25 @@
             if not pushable:
                 self.explainpushable(patchname, all_patches=True)
                 continue
-            self.ui.status(_("applying %s\n") % patchname)
+            self.ui.status(_(b"applying %s\n") % patchname)
             pf = os.path.join(patchdir, patchname)
 
             try:
                 ph = patchheader(self.join(patchname), self.plainmode)
             except IOError:
-                self.ui.warn(_("unable to read %s\n") % patchname)
+                self.ui.warn(_(b"unable to read %s\n") % patchname)
                 err = 1
                 break
 
             message = ph.message
             if not message:
                 # The commit message should not be translated
-                message = "imported patch %s\n" % patchname
+                message = b"imported patch %s\n" % patchname
             else:
                 if list:
                     # The commit message should not be translated
-                    message.append("\nimported patch %s" % patchname)
-                message = '\n'.join(message)
+                    message.append(b"\nimported patch %s" % patchname)
+                message = b'\n'.join(message)
 
             if ph.haspatch:
                 if tobackup:
@@ -956,8 +1057,9 @@
                     touched = set(touched) & tobackup
                     if touched and keepchanges:
                         raise AbortNoCleanup(
-                            _("conflicting local changes found"),
-                            hint=_("did you forget to qrefresh?"))
+                            _(b"conflicting local changes found"),
+                            hint=_(b"did you forget to qrefresh?"),
+                        )
                     self.backup(repo, touched, copy=True)
                     tobackup = tobackup - touched
                 (patcherr, files, fuzz) = self.patch(repo, pf)
@@ -965,7 +1067,7 @@
                     all_files.update(files)
                 patcherr = not patcherr
             else:
-                self.ui.warn(_("patch %s is empty\n") % patchname)
+                self.ui.warn(_(b"patch %s is empty\n") % patchname)
                 patcherr, files, fuzz = 0, [], 0
 
             if merge and files:
@@ -985,34 +1087,39 @@
                     p1 = repo.dirstate.p1()
                     repo.setparents(p1, merge)
 
-            if all_files and '.hgsubstate' in all_files:
+            if all_files and b'.hgsubstate' in all_files:
                 wctx = repo[None]
-                pctx = repo['.']
+                pctx = repo[b'.']
                 overwrite = False
-                mergedsubstate = subrepoutil.submerge(repo, pctx, wctx, wctx,
-                                                      overwrite)
+                mergedsubstate = subrepoutil.submerge(
+                    repo, pctx, wctx, wctx, overwrite
+                )
                 files += mergedsubstate.keys()
 
             match = scmutil.matchfiles(repo, files or [])
             oldtip = repo.changelog.tip()
-            n = newcommit(repo, None, message, ph.user, ph.date, match=match,
-                          force=True)
+            n = newcommit(
+                repo, None, message, ph.user, ph.date, match=match, force=True
+            )
             if repo.changelog.tip() == oldtip:
-                raise error.Abort(_("qpush exactly duplicates child changeset"))
+                raise error.Abort(
+                    _(b"qpush exactly duplicates child changeset")
+                )
             if n is None:
-                raise error.Abort(_("repository commit failed"))
+                raise error.Abort(_(b"repository commit failed"))
 
             if update_status:
                 self.applied.append(statusentry(n, patchname))
 
             if patcherr:
-                self.ui.warn(_("patch failed, rejects left in working "
-                               "directory\n"))
+                self.ui.warn(
+                    _(b"patch failed, rejects left in working directory\n")
+                )
                 err = 2
                 break
 
             if fuzz and strict:
-                self.ui.warn(_("fuzz found when applying patch, stopping\n"))
+                self.ui.warn(_(b"fuzz found when applying patch, stopping\n"))
                 err = 3
                 break
         return (err, n)
@@ -1054,13 +1161,13 @@
 
         if unknown:
             if numrevs:
-                rev  = dict((entry.name, entry.node) for entry in qfinished)
+                rev = dict((entry.name, entry.node) for entry in qfinished)
                 for p in unknown:
-                    msg = _('revision %s refers to unknown patches: %s\n')
+                    msg = _(b'revision %s refers to unknown patches: %s\n')
                     self.ui.warn(msg % (short(rev[p]), p))
             else:
-                msg = _('unknown patches: %s\n')
-                raise error.Abort(''.join(msg % p for p in unknown))
+                msg = _(b'unknown patches: %s\n')
+                raise error.Abort(b''.join(msg % p for p in unknown))
 
         self.parseseries()
         self.seriesdirty = True
@@ -1072,18 +1179,18 @@
         for i, rev in enumerate(revs):
 
             if rev < firstrev:
-                raise error.Abort(_('revision %d is not managed') % rev)
+                raise error.Abort(_(b'revision %d is not managed') % rev)
 
             ctx = repo[rev]
             base = self.applied[i].node
             if ctx.node() != base:
-                msg = _('cannot delete revision %d above applied patches')
+                msg = _(b'cannot delete revision %d above applied patches')
                 raise error.Abort(msg % rev)
 
             patch = self.applied[i].name
-            for fmt in ('[mq]: %s', 'imported patch %s'):
+            for fmt in (b'[mq]: %s', b'imported patch %s'):
                 if ctx.description() == fmt % patch:
-                    msg = _('patch %s finalized without changeset message\n')
+                    msg = _(b'patch %s finalized without changeset message\n')
                     repo.ui.status(msg % patch)
                     break
 
@@ -1096,41 +1203,42 @@
         repo._phasecache
         patches = self._revpatches(repo, sorted(revs))
         qfinished = self._cleanup(patches, len(patches))
-        if qfinished and repo.ui.configbool('mq', 'secret'):
+        if qfinished and repo.ui.configbool(b'mq', b'secret'):
             # only use this logic when the secret option is added
             oldqbase = repo[qfinished[0]]
             tphase = phases.newcommitphase(repo.ui)
             if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
-                with repo.transaction('qfinish') as tr:
+                with repo.transaction(b'qfinish') as tr:
                     phases.advanceboundary(repo, tr, tphase, qfinished)
 
     def delete(self, repo, patches, opts):
-        if not patches and not opts.get('rev'):
-            raise error.Abort(_('qdelete requires at least one revision or '
-                               'patch name'))
+        if not patches and not opts.get(b'rev'):
+            raise error.Abort(
+                _(b'qdelete requires at least one revision or patch name')
+            )
 
         realpatches = []
         for patch in patches:
             patch = self.lookup(patch, strict=True)
             info = self.isapplied(patch)
             if info:
-                raise error.Abort(_("cannot delete applied patch %s") % patch)
+                raise error.Abort(_(b"cannot delete applied patch %s") % patch)
             if patch not in self.series:
-                raise error.Abort(_("patch %s not in series file") % patch)
+                raise error.Abort(_(b"patch %s not in series file") % patch)
             if patch not in realpatches:
                 realpatches.append(patch)
 
         numrevs = 0
-        if opts.get('rev'):
+        if opts.get(b'rev'):
             if not self.applied:
-                raise error.Abort(_('no patches applied'))
-            revs = scmutil.revrange(repo, opts.get('rev'))
+                raise error.Abort(_(b'no patches applied'))
+            revs = scmutil.revrange(repo, opts.get(b'rev'))
             revs.sort()
             revpatches = self._revpatches(repo, revs)
             realpatches += revpatches
             numrevs = len(revpatches)
 
-        self._cleanup(realpatches, numrevs, opts.get('keep'))
+        self._cleanup(realpatches, numrevs, opts.get(b'keep'))
 
     def checktoppatch(self, repo):
         '''check that working directory is at qtip'''
@@ -1138,75 +1246,84 @@
             top = self.applied[-1].node
             patch = self.applied[-1].name
             if repo.dirstate.p1() != top:
-                raise error.Abort(_("working directory revision is not qtip"))
+                raise error.Abort(_(b"working directory revision is not qtip"))
             return top, patch
         return None, None
 
     def putsubstate2changes(self, substatestate, changes):
         for files in changes[:3]:
-            if '.hgsubstate' in files:
-                return # already listed up
+            if b'.hgsubstate' in files:
+                return  # already listed up
         # not yet listed up
-        if substatestate in 'a?':
-            changes[1].append('.hgsubstate')
-        elif substatestate in 'r':
-            changes[2].append('.hgsubstate')
-        else: # modified
-            changes[0].append('.hgsubstate')
+        if substatestate in b'a?':
+            changes[1].append(b'.hgsubstate')
+        elif substatestate in b'r':
+            changes[2].append(b'.hgsubstate')
+        else:  # modified
+            changes[0].append(b'.hgsubstate')
 
     def checklocalchanges(self, repo, force=False, refresh=True):
-        excsuffix = ''
+        excsuffix = b''
         if refresh:
-            excsuffix = ', qrefresh first'
+            excsuffix = b', qrefresh first'
             # plain versions for i18n tool to detect them
-            _("local changes found, qrefresh first")
-            _("local changed subrepos found, qrefresh first")
+            _(b"local changes found, qrefresh first")
+            _(b"local changed subrepos found, qrefresh first")
 
         s = repo.status()
         if not force:
             cmdutil.checkunfinished(repo)
             if s.modified or s.added or s.removed or s.deleted:
-                _("local changes found") # i18n tool detection
-                raise error.Abort(_("local changes found" + excsuffix))
+                _(b"local changes found")  # i18n tool detection
+                raise error.Abort(_(b"local changes found" + excsuffix))
             if checksubstate(repo):
-                _("local changed subrepos found") # i18n tool detection
-                raise error.Abort(_("local changed subrepos found" + excsuffix))
+                _(b"local changed subrepos found")  # i18n tool detection
+                raise error.Abort(
+                    _(b"local changed subrepos found" + excsuffix)
+                )
         else:
             cmdutil.checkunfinished(repo, skipmerge=True)
         return s
 
-    _reserved = ('series', 'status', 'guards', '.', '..')
+    _reserved = (b'series', b'status', b'guards', b'.', b'..')
+
     def checkreservedname(self, name):
         if name in self._reserved:
-            raise error.Abort(_('"%s" cannot be used as the name of a patch')
-                             % name)
+            raise error.Abort(
+                _(b'"%s" cannot be used as the name of a patch') % name
+            )
         if name != name.strip():
             # whitespace is stripped by parseseries()
-            raise error.Abort(_('patch name cannot begin or end with '
-                                'whitespace'))
-        for prefix in ('.hg', '.mq'):
+            raise error.Abort(
+                _(b'patch name cannot begin or end with whitespace')
+            )
+        for prefix in (b'.hg', b'.mq'):
             if name.startswith(prefix):
-                raise error.Abort(_('patch name cannot begin with "%s"')
-                                 % prefix)
-        for c in ('#', ':', '\r', '\n'):
+                raise error.Abort(
+                    _(b'patch name cannot begin with "%s"') % prefix
+                )
+        for c in (b'#', b':', b'\r', b'\n'):
             if c in name:
-                raise error.Abort(_('%r cannot be used in the name of a patch')
-                                 % pycompat.bytestr(c))
+                raise error.Abort(
+                    _(b'%r cannot be used in the name of a patch')
+                    % pycompat.bytestr(c)
+                )
 
     def checkpatchname(self, name, force=False):
         self.checkreservedname(name)
         if not force and os.path.exists(self.join(name)):
             if os.path.isdir(self.join(name)):
-                raise error.Abort(_('"%s" already exists as a directory')
-                                 % name)
+                raise error.Abort(
+                    _(b'"%s" already exists as a directory') % name
+                )
             else:
-                raise error.Abort(_('patch "%s" already exists') % name)
+                raise error.Abort(_(b'patch "%s" already exists') % name)
 
     def makepatchname(self, title, fallbackname):
         """Return a suitable filename for title, adding a suffix to make
         it unique in the existing list"""
         namebase = re.sub(br'[\s\W_]+', b'_', title.lower()).strip(b'_')
-        namebase = namebase[:75] # avoid too long name (issue5117)
+        namebase = namebase[:75]  # avoid too long name (issue5117)
         if namebase:
             try:
                 self.checkreservedname(namebase)
@@ -1224,36 +1341,37 @@
                 except error.Abort:
                     pass
             i += 1
-            name = '%s__%d' % (namebase, i)
+            name = b'%s__%d' % (namebase, i)
         return name
 
     def checkkeepchanges(self, keepchanges, force):
         if force and keepchanges:
-            raise error.Abort(_('cannot use both --force and --keep-changes'))
+            raise error.Abort(_(b'cannot use both --force and --keep-changes'))
 
     def new(self, repo, patchfn, *pats, **opts):
         """options:
            msg: a string or a no-argument function returning a string
         """
         opts = pycompat.byteskwargs(opts)
-        msg = opts.get('msg')
-        edit = opts.get('edit')
-        editform = opts.get('editform', 'mq.qnew')
-        user = opts.get('user')
-        date = opts.get('date')
+        msg = opts.get(b'msg')
+        edit = opts.get(b'edit')
+        editform = opts.get(b'editform', b'mq.qnew')
+        user = opts.get(b'user')
+        date = opts.get(b'date')
         if date:
             date = dateutil.parsedate(date)
-        diffopts = self.diffopts({'git': opts.get('git')}, plain=True)
-        if opts.get('checkname', True):
+        diffopts = self.diffopts({b'git': opts.get(b'git')}, plain=True)
+        if opts.get(b'checkname', True):
             self.checkpatchname(patchfn)
         inclsubs = checksubstate(repo)
         if inclsubs:
-            substatestate = repo.dirstate['.hgsubstate']
-        if opts.get('include') or opts.get('exclude') or pats:
+            substatestate = repo.dirstate[b'.hgsubstate']
+        if opts.get(b'include') or opts.get(b'exclude') or pats:
             # detect missing files in pats
             def badfn(f, msg):
-                if f != '.hgsubstate': # .hgsubstate is auto-created
-                    raise error.Abort('%s: %s' % (f, msg))
+                if f != b'.hgsubstate':  # .hgsubstate is auto-created
+                    raise error.Abort(b'%s: %s' % (f, msg))
+
             match = scmutil.match(repo[None], pats, opts, badfn=badfn)
             changes = repo.status(match=match)
         else:
@@ -1263,38 +1381,52 @@
             commitfiles.extend(files)
         match = scmutil.matchfiles(repo, commitfiles)
         if len(repo[None].parents()) > 1:
-            raise error.Abort(_('cannot manage merge changesets'))
+            raise error.Abort(_(b'cannot manage merge changesets'))
         self.checktoppatch(repo)
         insert = self.fullseriesend()
         with repo.wlock():
             try:
                 # if patch file write fails, abort early
-                p = self.opener(patchfn, "w")
+                p = self.opener(patchfn, b"w")
             except IOError as e:
-                raise error.Abort(_('cannot write patch "%s": %s')
-                                 % (patchfn, encoding.strtolocal(e.strerror)))
+                raise error.Abort(
+                    _(b'cannot write patch "%s": %s')
+                    % (patchfn, encoding.strtolocal(e.strerror))
+                )
             try:
-                defaultmsg = "[mq]: %s" % patchfn
+                defaultmsg = b"[mq]: %s" % patchfn
                 editor = cmdutil.getcommiteditor(editform=editform)
                 if edit:
+
                     def finishdesc(desc):
                         if desc.rstrip():
                             return desc
                         else:
                             return defaultmsg
+
                     # i18n: this message is shown in editor with "HG: " prefix
-                    extramsg = _('Leave message empty to use default message.')
-                    editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
-                                                     extramsg=extramsg,
-                                                     editform=editform)
+                    extramsg = _(b'Leave message empty to use default message.')
+                    editor = cmdutil.getcommiteditor(
+                        finishdesc=finishdesc,
+                        extramsg=extramsg,
+                        editform=editform,
+                    )
                     commitmsg = msg
                 else:
                     commitmsg = msg or defaultmsg
 
-                n = newcommit(repo, None, commitmsg, user, date, match=match,
-                              force=True, editor=editor)
+                n = newcommit(
+                    repo,
+                    None,
+                    commitmsg,
+                    user,
+                    date,
+                    match=match,
+                    force=True,
+                    editor=editor,
+                )
                 if n is None:
-                    raise error.Abort(_("repo commit failed"))
+                    raise error.Abort(_(b"repo commit failed"))
                 try:
                     self.fullseries[insert:insert] = [patchfn]
                     self.applied.append(statusentry(n, patchfn))
@@ -1306,26 +1438,31 @@
                     if user:
                         ph.setuser(user)
                     if date:
-                        ph.setdate('%d %d' % date)
+                        ph.setdate(b'%d %d' % date)
                     ph.setparent(hex(nctx.p1().node()))
                     msg = nctx.description().strip()
                     if msg == defaultmsg.strip():
-                        msg = ''
+                        msg = b''
                     ph.setmessage(msg)
                     p.write(bytes(ph))
                     if commitfiles:
                         parent = self.qparents(repo, n)
                         if inclsubs:
                             self.putsubstate2changes(substatestate, changes)
-                        chunks = patchmod.diff(repo, node1=parent, node2=n,
-                                               changes=changes, opts=diffopts)
+                        chunks = patchmod.diff(
+                            repo,
+                            node1=parent,
+                            node2=n,
+                            changes=changes,
+                            opts=diffopts,
+                        )
                         for chunk in chunks:
                             p.write(chunk)
                     p.close()
                     r = self.qrepo()
                     if r:
                         r[None].add([patchfn])
-                except: # re-raises
+                except:  # re-raises
                     repo.rollback()
                     raise
             except Exception:
@@ -1333,7 +1470,7 @@
                 try:
                     os.unlink(patchpath)
                 except OSError:
-                    self.ui.warn(_('error unlinking %s\n') % patchpath)
+                    self.ui.warn(_(b'error unlinking %s\n') % patchpath)
                 raise
             self.removeundo(repo)
 
@@ -1356,16 +1493,16 @@
                 return s
             matches = [x for x in self.series if s in x]
             if len(matches) > 1:
-                self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
+                self.ui.warn(_(b'patch name "%s" is ambiguous:\n') % s)
                 for m in matches:
-                    self.ui.warn('  %s\n' % m)
+                    self.ui.warn(b'  %s\n' % m)
                 return None
             if matches:
                 return matches[0]
             if self.series and self.applied:
-                if s == 'qtip':
+                if s == b'qtip':
                     return self.series[self.seriesend(True) - 1]
-                if s == 'qbase':
+                if s == b'qbase':
                     return self.series[0]
             return None
 
@@ -1385,35 +1522,45 @@
                 res = partialname(patch)
                 if res:
                     return res
-                minus = patch.rfind('-')
+                minus = patch.rfind(b'-')
                 if minus >= 0:
                     res = partialname(patch[:minus])
                     if res:
                         i = self.series.index(res)
                         try:
-                            off = int(patch[minus + 1:] or 1)
+                            off = int(patch[minus + 1 :] or 1)
                         except (ValueError, OverflowError):
                             pass
                         else:
                             if i - off >= 0:
                                 return self.series[i - off]
-                plus = patch.rfind('+')
+                plus = patch.rfind(b'+')
                 if plus >= 0:
                     res = partialname(patch[:plus])
                     if res:
                         i = self.series.index(res)
                         try:
-                            off = int(patch[plus + 1:] or 1)
+                            off = int(patch[plus + 1 :] or 1)
                         except (ValueError, OverflowError):
                             pass
                         else:
                             if i + off < len(self.series):
                                 return self.series[i + off]
-        raise error.Abort(_("patch %s not in series") % patch)
-
-    def push(self, repo, patch=None, force=False, list=False, mergeq=None,
-             all=False, move=False, exact=False, nobackup=False,
-             keepchanges=False):
+        raise error.Abort(_(b"patch %s not in series") % patch)
+
+    def push(
+        self,
+        repo,
+        patch=None,
+        force=False,
+        list=False,
+        mergeq=None,
+        all=False,
+        move=False,
+        exact=False,
+        nobackup=False,
+        keepchanges=False,
+    ):
         self.checkkeepchanges(keepchanges, force)
         diffopts = self.diffopts()
         with repo.wlock():
@@ -1423,10 +1570,10 @@
             if not heads:
                 heads = [nullid]
             if repo.dirstate.p1() not in heads and not exact:
-                self.ui.status(_("(working directory not at a head)\n"))
+                self.ui.status(_(b"(working directory not at a head)\n"))
 
             if not self.series:
-                self.ui.warn(_('no patches in series\n'))
+                self.ui.warn(_(b'no patches in series\n'))
                 return 0
 
             # Suppose our series file is: A B C and the current 'top'
@@ -1438,25 +1585,29 @@
                 info = self.isapplied(patch)
                 if info and info[0] >= len(self.applied) - 1:
                     self.ui.warn(
-                        _('qpush: %s is already at the top\n') % patch)
+                        _(b'qpush: %s is already at the top\n') % patch
+                    )
                     return 0
 
                 pushable, reason = self.pushable(patch)
                 if pushable:
                     if self.series.index(patch) < self.seriesend():
                         raise error.Abort(
-                            _("cannot push to a previous patch: %s") % patch)
+                            _(b"cannot push to a previous patch: %s") % patch
+                        )
                 else:
                     if reason:
-                        reason = _('guarded by %s') % reason
+                        reason = _(b'guarded by %s') % reason
                     else:
-                        reason = _('no matching guards')
-                    self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
+                        reason = _(b'no matching guards')
+                    self.ui.warn(
+                        _(b"cannot push '%s' - %s\n") % (patch, reason)
+                    )
                     return 1
             elif all:
                 patch = self.series[-1]
                 if self.isapplied(patch):
-                    self.ui.warn(_('all patches are currently applied\n'))
+                    self.ui.warn(_(b'all patches are currently applied\n'))
                     return 0
 
             # Following the above example, starting at 'top' of B:
@@ -1466,7 +1617,7 @@
             # work as it detects an error when done
             start = self.seriesend()
             if start == len(self.series):
-                self.ui.warn(_('patch series already fully applied\n'))
+                self.ui.warn(_(b'patch series already fully applied\n'))
                 return 1
             if not force and not keepchanges:
                 self.checklocalchanges(repo, refresh=self.applied)
@@ -1474,24 +1625,28 @@
             if exact:
                 if keepchanges:
                     raise error.Abort(
-                        _("cannot use --exact and --keep-changes together"))
+                        _(b"cannot use --exact and --keep-changes together")
+                    )
                 if move:
-                    raise error.Abort(_('cannot use --exact and --move '
-                                       'together'))
+                    raise error.Abort(
+                        _(b'cannot use --exact and --move together')
+                    )
                 if self.applied:
-                    raise error.Abort(_('cannot push --exact with applied '
-                                       'patches'))
+                    raise error.Abort(
+                        _(b'cannot push --exact with applied patches')
+                    )
                 root = self.series[start]
                 target = patchheader(self.join(root), self.plainmode).parent
                 if not target:
                     raise error.Abort(
-                        _("%s does not have a parent recorded") % root)
-                if not repo[target] == repo['.']:
+                        _(b"%s does not have a parent recorded") % root
+                    )
+                if not repo[target] == repo[b'.']:
                     hg.update(repo, target)
 
             if move:
                 if not patch:
-                    raise error.Abort(_("please specify the patch to move"))
+                    raise error.Abort(_(b"please specify the patch to move"))
                 for fullstart, rpn in enumerate(self.fullseries):
                     # strip markers for patch guards
                     if self.guard_re.split(rpn, 1)[0] == self.series[start]:
@@ -1521,8 +1676,12 @@
             if (not nobackup and force) or keepchanges:
                 status = self.checklocalchanges(repo, force=True)
                 if keepchanges:
-                    tobackup.update(status.modified + status.added +
-                                    status.removed + status.deleted)
+                    tobackup.update(
+                        status.modified
+                        + status.added
+                        + status.removed
+                        + status.deleted
+                    )
                 else:
                     tobackup.update(status.modified + status.added)
 
@@ -1532,34 +1691,53 @@
                 if mergeq:
                     ret = self.mergepatch(repo, mergeq, s, diffopts)
                 else:
-                    ret = self.apply(repo, s, list, all_files=all_files,
-                                     tobackup=tobackup, keepchanges=keepchanges)
+                    ret = self.apply(
+                        repo,
+                        s,
+                        list,
+                        all_files=all_files,
+                        tobackup=tobackup,
+                        keepchanges=keepchanges,
+                    )
             except AbortNoCleanup:
                 raise
-            except: # re-raises
-                self.ui.warn(_('cleaning up working directory...\n'))
-                cmdutil.revert(self.ui, repo, repo['.'],
-                               repo.dirstate.parents(), no_backup=True)
+            except:  # re-raises
+                self.ui.warn(_(b'cleaning up working directory...\n'))
+                cmdutil.revert(
+                    self.ui,
+                    repo,
+                    repo[b'.'],
+                    repo.dirstate.parents(),
+                    no_backup=True,
+                )
                 # only remove unknown files that we know we touched or
                 # created while patching
                 for f in all_files:
                     if f not in repo.dirstate:
                         repo.wvfs.unlinkpath(f, ignoremissing=True)
-                self.ui.warn(_('done\n'))
+                self.ui.warn(_(b'done\n'))
                 raise
 
             if not self.applied:
                 return ret[0]
             top = self.applied[-1].name
             if ret[0] and ret[0] > 1:
-                msg = _("errors during apply, please fix and qrefresh %s\n")
+                msg = _(b"errors during apply, please fix and qrefresh %s\n")
                 self.ui.write(msg % top)
             else:
-                self.ui.write(_("now at: %s\n") % top)
+                self.ui.write(_(b"now at: %s\n") % top)
             return ret[0]
 
-    def pop(self, repo, patch=None, force=False, update=True, all=False,
-            nobackup=False, keepchanges=False):
+    def pop(
+        self,
+        repo,
+        patch=None,
+        force=False,
+        update=True,
+        all=False,
+        nobackup=False,
+        keepchanges=False,
+    ):
         self.checkkeepchanges(keepchanges, force)
         with repo.wlock():
             if patch:
@@ -1569,12 +1747,12 @@
                     patch = self.lookup(patch)
                 info = self.isapplied(patch)
                 if not info:
-                    raise error.Abort(_("patch %s is not applied") % patch)
+                    raise error.Abort(_(b"patch %s is not applied") % patch)
 
             if not self.applied:
                 # Allow qpop -a to work repeatedly,
                 # but not qpop without an argument
-                self.ui.warn(_("no patches applied\n"))
+                self.ui.warn(_(b"no patches applied\n"))
                 return not all
 
             if all:
@@ -1585,7 +1763,7 @@
                 start = len(self.applied) - 1
 
             if start >= len(self.applied):
-                self.ui.warn(_("qpop: %s is already at the top\n") % patch)
+                self.ui.warn(_(b"qpop: %s is already at the top\n") % patch)
                 return
 
             if not update:
@@ -1593,12 +1771,13 @@
                 rr = [x.node for x in self.applied]
                 for p in parents:
                     if p in rr:
-                        self.ui.warn(_("qpop: forcing dirstate update\n"))
+                        self.ui.warn(_(b"qpop: forcing dirstate update\n"))
                         update = True
             else:
                 parents = [p.node() for p in repo[None].parents()]
-                update = any(entry.node in parents
-                             for entry in self.applied[start:])
+                update = any(
+                    entry.node in parents for entry in self.applied[start:]
+                )
 
             tobackup = set()
             if update:
@@ -1607,8 +1786,9 @@
                     if not nobackup:
                         tobackup.update(s.modified + s.added)
                 elif keepchanges:
-                    tobackup.update(s.modified + s.added +
-                                    s.removed + s.deleted)
+                    tobackup.update(
+                        s.modified + s.added + s.removed + s.deleted
+                    )
 
             self.applieddirty = True
             end = len(self.applied)
@@ -1618,28 +1798,33 @@
                 heads = repo.changelog.heads(rev)
             except error.LookupError:
                 node = short(rev)
-                raise error.Abort(_('trying to pop unknown node %s') % node)
+                raise error.Abort(_(b'trying to pop unknown node %s') % node)
 
             if heads != [self.applied[-1].node]:
-                raise error.Abort(_("popping would remove a revision not "
-                                   "managed by this patch queue"))
+                raise error.Abort(
+                    _(
+                        b"popping would remove a revision not "
+                        b"managed by this patch queue"
+                    )
+                )
             if not repo[self.applied[-1].node].mutable():
                 raise error.Abort(
-                    _("popping would remove a public revision"),
-                    hint=_("see 'hg help phases' for details"))
+                    _(b"popping would remove a public revision"),
+                    hint=_(b"see 'hg help phases' for details"),
+                )
 
             # we know there are no local changes, so we can make a simplified
             # form of hg.update.
             if update:
                 qp = self.qparents(repo, rev)
                 ctx = repo[qp]
-                m, a, r, d = repo.status(qp, '.')[:4]
+                m, a, r, d = repo.status(qp, b'.')[:4]
                 if d:
-                    raise error.Abort(_("deletions found between repo revs"))
+                    raise error.Abort(_(b"deletions found between repo revs"))
 
                 tobackup = set(a + m + r) & tobackup
                 if keepchanges and tobackup:
-                    raise error.Abort(_("local changes found, qrefresh first"))
+                    raise error.Abort(_(b"local changes found, qrefresh first"))
                 self.backup(repo, tobackup)
                 with repo.dirstate.parentchange():
                     for f in a:
@@ -1651,23 +1836,23 @@
                         repo.dirstate.normal(f)
                     repo.setparents(qp, nullid)
             for patch in reversed(self.applied[start:end]):
-                self.ui.status(_("popping %s\n") % patch.name)
+                self.ui.status(_(b"popping %s\n") % patch.name)
             del self.applied[start:end]
             strip(self.ui, repo, [rev], update=False, backup=False)
-            for s, state in repo['.'].substate.items():
-                repo['.'].sub(s).get(state)
+            for s, state in repo[b'.'].substate.items():
+                repo[b'.'].sub(s).get(state)
             if self.applied:
-                self.ui.write(_("now at: %s\n") % self.applied[-1].name)
+                self.ui.write(_(b"now at: %s\n") % self.applied[-1].name)
             else:
-                self.ui.write(_("patch queue now empty\n"))
+                self.ui.write(_(b"patch queue now empty\n"))
 
     def diff(self, repo, pats, opts):
         top, patch = self.checktoppatch(repo)
         if not top:
-            self.ui.write(_("no patches applied\n"))
+            self.ui.write(_(b"no patches applied\n"))
             return
         qp = self.qparents(repo, top)
-        if opts.get('reverse'):
+        if opts.get(b'reverse'):
             node1, node2 = None, qp
         else:
             node1, node2 = qp, None
@@ -1677,36 +1862,41 @@
     def refresh(self, repo, pats=None, **opts):
         opts = pycompat.byteskwargs(opts)
         if not self.applied:
-            self.ui.write(_("no patches applied\n"))
+            self.ui.write(_(b"no patches applied\n"))
             return 1
-        msg = opts.get('msg', '').rstrip()
-        edit = opts.get('edit')
-        editform = opts.get('editform', 'mq.qrefresh')
-        newuser = opts.get('user')
-        newdate = opts.get('date')
+        msg = opts.get(b'msg', b'').rstrip()
+        edit = opts.get(b'edit')
+        editform = opts.get(b'editform', b'mq.qrefresh')
+        newuser = opts.get(b'user')
+        newdate = opts.get(b'date')
         if newdate:
-            newdate = '%d %d' % dateutil.parsedate(newdate)
+            newdate = b'%d %d' % dateutil.parsedate(newdate)
         wlock = repo.wlock()
 
         try:
             self.checktoppatch(repo)
             (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
             if repo.changelog.heads(top) != [top]:
-                raise error.Abort(_("cannot qrefresh a revision with children"))
+                raise error.Abort(
+                    _(b"cannot qrefresh a revision with children")
+                )
             if not repo[top].mutable():
-                raise error.Abort(_("cannot qrefresh public revision"),
-                                 hint=_("see 'hg help phases' for details"))
+                raise error.Abort(
+                    _(b"cannot qrefresh public revision"),
+                    hint=_(b"see 'hg help phases' for details"),
+                )
 
             cparents = repo.changelog.parents(top)
             patchparent = self.qparents(repo, top)
 
             inclsubs = checksubstate(repo, patchparent)
             if inclsubs:
-                substatestate = repo.dirstate['.hgsubstate']
+                substatestate = repo.dirstate[b'.hgsubstate']
 
             ph = patchheader(self.join(patchfn), self.plainmode)
-            diffopts = self.diffopts({'git': opts.get('git')}, patchfn,
-                                     plain=True)
+            diffopts = self.diffopts(
+                {b'git': opts.get(b'git')}, patchfn, plain=True
+            )
             if newuser:
                 ph.setuser(newuser)
             if newdate:
@@ -1714,7 +1904,7 @@
             ph.setparent(hex(patchparent))
 
             # only commit new patch when write is complete
-            patchf = self.opener(patchfn, 'w', atomictemp=True)
+            patchf = self.opener(patchfn, b'w', atomictemp=True)
 
             # update the dirstate in place, strip off the qtip commit
             # and then commit.
@@ -1729,7 +1919,7 @@
             match1 = scmutil.match(repo[None], pats, opts)
             # in short mode, we only diff the files included in the
             # patch already plus specified files
-            if opts.get('short'):
+            if opts.get(b'short'):
                 # if amending a patch, we start with existing
                 # files plus specified files - unfiltered
                 match = scmutil.matchfiles(repo, mm + aa + dd + match1.files())
@@ -1789,7 +1979,7 @@
 
             dsguard = None
             try:
-                dsguard = dirstateguard.dirstateguard(repo, 'mq.refresh')
+                dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh')
                 if diffopts.git or diffopts.upgrade:
                     copies = {}
                     for dst in a:
@@ -1804,13 +1994,14 @@
                         src = ctx[dst].copysource()
                         if src:
                             copies.setdefault(src, []).extend(
-                                copies.get(dst, []))
+                                copies.get(dst, [])
+                            )
                             if dst in a:
                                 copies[src].append(dst)
                         # we can't copy a file created by the patch itself
                         if dst in copies:
                             del copies[dst]
-                    for src, dsts in copies.iteritems():
+                    for src, dsts in pycompat.iteritems(copies):
                         for dst in dsts:
                             repo.dirstate.copy(src, dst)
                 else:
@@ -1851,25 +2042,29 @@
             try:
                 # might be nice to attempt to roll back strip after this
 
-                defaultmsg = "[mq]: %s" % patchfn
+                defaultmsg = b"[mq]: %s" % patchfn
                 editor = cmdutil.getcommiteditor(editform=editform)
                 if edit:
+
                     def finishdesc(desc):
                         if desc.rstrip():
                             ph.setmessage(desc)
                             return desc
                         return defaultmsg
+
                     # i18n: this message is shown in editor with "HG: " prefix
-                    extramsg = _('Leave message empty to use default message.')
-                    editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
-                                                     extramsg=extramsg,
-                                                     editform=editform)
-                    message = msg or "\n".join(ph.message)
+                    extramsg = _(b'Leave message empty to use default message.')
+                    editor = cmdutil.getcommiteditor(
+                        finishdesc=finishdesc,
+                        extramsg=extramsg,
+                        editform=editform,
+                    )
+                    message = msg or b"\n".join(ph.message)
                 elif not msg:
                     if not ph.message:
                         message = defaultmsg
                     else:
-                        message = "\n".join(ph.message)
+                        message = b"\n".join(ph.message)
                 else:
                     message = msg
                     ph.setmessage(msg)
@@ -1879,15 +2074,24 @@
                 lock = tr = None
                 try:
                     lock = repo.lock()
-                    tr = repo.transaction('mq')
-                    n = newcommit(repo, oldphase, message, user, ph.date,
-                              match=match, force=True, editor=editor)
+                    tr = repo.transaction(b'mq')
+                    n = newcommit(
+                        repo,
+                        oldphase,
+                        message,
+                        user,
+                        ph.date,
+                        match=match,
+                        force=True,
+                        editor=editor,
+                    )
                     # only write patch after a successful commit
                     c = [list(x) for x in refreshchanges]
                     if inclsubs:
                         self.putsubstate2changes(substatestate, c)
-                    chunks = patchmod.diff(repo, patchparent,
-                                           changes=c, opts=diffopts)
+                    chunks = patchmod.diff(
+                        repo, patchparent, changes=c, opts=diffopts
+                    )
                     comments = bytes(ph)
                     if comments:
                         patchf.write(comments)
@@ -1902,12 +2106,16 @@
                     self.applied.append(statusentry(n, patchfn))
                 finally:
                     lockmod.release(tr, lock)
-            except: # re-raises
+            except:  # re-raises
                 ctx = repo[cparents[0]]
                 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
                 self.savedirty()
-                self.ui.warn(_('qrefresh interrupted while patch was popped! '
-                               '(revert --all, qpush to recover)\n'))
+                self.ui.warn(
+                    _(
+                        b'qrefresh interrupted while patch was popped! '
+                        b'(revert --all, qpush to recover)\n'
+                    )
+                )
                 raise
         finally:
             wlock.release()
@@ -1915,7 +2123,7 @@
 
     def init(self, repo, create=False):
         if not create and os.path.isdir(self.path):
-            raise error.Abort(_("patch queue directory already exists"))
+            raise error.Abort(_(b"patch queue directory already exists"))
         try:
             os.mkdir(self.path)
         except OSError as inst:
@@ -1926,7 +2134,7 @@
 
     def unapplied(self, repo, patch=None):
         if patch and patch not in self.series:
-            raise error.Abort(_("patch %s is not in series file") % patch)
+            raise error.Abort(_(b"patch %s is not in series file") % patch)
         if not patch:
             start = self.seriesend()
         else:
@@ -1939,8 +2147,15 @@
             self.explainpushable(i)
         return unapplied
 
-    def qseries(self, repo, missing=None, start=0, length=None, status=None,
-                summary=False):
+    def qseries(
+        self,
+        repo,
+        missing=None,
+        start=0,
+        length=None,
+        status=None,
+        summary=False,
+    ):
         def displayname(pfx, patchname, state):
             if pfx:
                 self.ui.write(pfx)
@@ -1949,58 +2164,64 @@
                 if ph.message:
                     msg = ph.message[0]
                 else:
-                    msg = ''
+                    msg = b''
 
                 if self.ui.formatted():
                     width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
                     if width > 0:
                         msg = stringutil.ellipsis(msg, width)
                     else:
-                        msg = ''
-                self.ui.write(patchname, label='qseries.' + state)
-                self.ui.write(': ')
-                self.ui.write(msg, label='qseries.message.' + state)
+                        msg = b''
+                self.ui.write(patchname, label=b'qseries.' + state)
+                self.ui.write(b': ')
+                self.ui.write(msg, label=b'qseries.message.' + state)
             else:
-                self.ui.write(patchname, label='qseries.' + state)
-            self.ui.write('\n')
+                self.ui.write(patchname, label=b'qseries.' + state)
+            self.ui.write(b'\n')
 
         applied = {p.name for p in self.applied}
         if length is None:
             length = len(self.series) - start
         if not missing:
             if self.ui.verbose:
-                idxwidth = len("%d" % (start + length - 1))
+                idxwidth = len(b"%d" % (start + length - 1))
             for i in pycompat.xrange(start, start + length):
                 patch = self.series[i]
                 if patch in applied:
-                    char, state = 'A', 'applied'
+                    char, state = b'A', b'applied'
                 elif self.pushable(i)[0]:
-                    char, state = 'U', 'unapplied'
+                    char, state = b'U', b'unapplied'
                 else:
-                    char, state = 'G', 'guarded'
-                pfx = ''
+                    char, state = b'G', b'guarded'
+                pfx = b''
                 if self.ui.verbose:
-                    pfx = '%*d %s ' % (idxwidth, i, char)
+                    pfx = b'%*d %s ' % (idxwidth, i, char)
                 elif status and status != char:
                     continue
                 displayname(pfx, patch, state)
         else:
             msng_list = []
             for root, dirs, files in os.walk(self.path):
-                d = root[len(self.path) + 1:]
+                d = root[len(self.path) + 1 :]
                 for f in files:
                     fl = os.path.join(d, f)
-                    if (fl not in self.series and
-                        fl not in (self.statuspath, self.seriespath,
-                                   self.guardspath)
-                        and not fl.startswith('.')):
+                    if (
+                        fl not in self.series
+                        and fl
+                        not in (
+                            self.statuspath,
+                            self.seriespath,
+                            self.guardspath,
+                        )
+                        and not fl.startswith(b'.')
+                    ):
                         msng_list.append(fl)
             for x in sorted(msng_list):
-                pfx = self.ui.verbose and ('D ') or ''
-                displayname(pfx, x, 'missing')
+                pfx = self.ui.verbose and b'D ' or b''
+                displayname(pfx, x, b'missing')
 
     def issaveline(self, l):
-        if l.name == '.hg.patches.save.line':
+        if l.name == b'.hg.patches.save.line':
             return True
 
     def qrepo(self, create=False):
@@ -2009,11 +2230,19 @@
         if self.ui.pageractive and not ui.pageractive:
             ui.pageractive = self.ui.pageractive
             # internal config: ui.formatted
-            ui.setconfig('ui', 'formatted',
-                         self.ui.config('ui', 'formatted'), 'mqpager')
-            ui.setconfig('ui', 'interactive',
-                         self.ui.config('ui', 'interactive'), 'mqpager')
-        if create or os.path.isdir(self.join(".hg")):
+            ui.setconfig(
+                b'ui',
+                b'formatted',
+                self.ui.config(b'ui', b'formatted'),
+                b'mqpager',
+            )
+            ui.setconfig(
+                b'ui',
+                b'interactive',
+                self.ui.config(b'ui', b'interactive'),
+                b'mqpager',
+            )
+        if create or os.path.isdir(self.join(b".hg")):
             return hg.repository(ui, path=self.path, create=create)
 
     def restore(self, repo, rev, delete=None, qupdate=None):
@@ -2025,23 +2254,23 @@
         applied = []
         qpp = None
         for i, line in enumerate(lines):
-            if line == 'Patch Data:':
+            if line == b'Patch Data:':
                 datastart = i + 1
-            elif line.startswith('Dirstate:'):
+            elif line.startswith(b'Dirstate:'):
                 l = line.rstrip()
-                l = l[10:].split(' ')
+                l = l[10:].split(b' ')
                 qpp = [bin(x) for x in l]
             elif datastart is not None:
                 l = line.rstrip()
-                n, name = l.split(':', 1)
+                n, name = l.split(b':', 1)
                 if n:
                     applied.append(statusentry(bin(n), name))
                 else:
                     series.append(l)
         if datastart is None:
-            self.ui.warn(_("no saved patch data found\n"))
+            self.ui.warn(_(b"no saved patch data found\n"))
             return 1
-        self.ui.warn(_("restoring status: %s\n") % lines[0])
+        self.ui.warn(_(b"restoring status: %s\n") % lines[0])
         self.fullseries = series
         self.applied = applied
         self.parseseries()
@@ -2050,9 +2279,9 @@
         heads = repo.changelog.heads()
         if delete:
             if rev not in heads:
-                self.ui.warn(_("save entry has children, leaving it alone\n"))
+                self.ui.warn(_(b"save entry has children, leaving it alone\n"))
             else:
-                self.ui.warn(_("removing save entry %s\n") % short(rev))
+                self.ui.warn(_(b"removing save entry %s\n") % short(rev))
                 pp = repo.dirstate.parents()
                 if rev in pp:
                     update = True
@@ -2060,40 +2289,42 @@
                     update = False
                 strip(self.ui, repo, [rev], update=update, backup=False)
         if qpp:
-            self.ui.warn(_("saved queue repository parents: %s %s\n") %
-                         (short(qpp[0]), short(qpp[1])))
+            self.ui.warn(
+                _(b"saved queue repository parents: %s %s\n")
+                % (short(qpp[0]), short(qpp[1]))
+            )
             if qupdate:
-                self.ui.status(_("updating queue directory\n"))
+                self.ui.status(_(b"updating queue directory\n"))
                 r = self.qrepo()
                 if not r:
-                    self.ui.warn(_("unable to load queue repository\n"))
+                    self.ui.warn(_(b"unable to load queue repository\n"))
                     return 1
                 hg.clean(r, qpp[0])
 
     def save(self, repo, msg=None):
         if not self.applied:
-            self.ui.warn(_("save: no patches applied, exiting\n"))
+            self.ui.warn(_(b"save: no patches applied, exiting\n"))
             return 1
         if self.issaveline(self.applied[-1]):
-            self.ui.warn(_("status is already saved\n"))
+            self.ui.warn(_(b"status is already saved\n"))
             return 1
 
         if not msg:
-            msg = _("hg patches saved state")
+            msg = _(b"hg patches saved state")
         else:
-            msg = "hg patches: " + msg.rstrip('\r\n')
+            msg = b"hg patches: " + msg.rstrip(b'\r\n')
         r = self.qrepo()
         if r:
             pp = r.dirstate.parents()
-            msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
-        msg += "\n\nPatch Data:\n"
-        msg += ''.join('%s\n' % x for x in self.applied)
-        msg += ''.join(':%s\n' % x for x in self.fullseries)
+            msg += b"\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
+        msg += b"\n\nPatch Data:\n"
+        msg += b''.join(b'%s\n' % x for x in self.applied)
+        msg += b''.join(b':%s\n' % x for x in self.fullseries)
         n = repo.commit(msg, force=True)
         if not n:
-            self.ui.warn(_("repo commit failed\n"))
+            self.ui.warn(_(b"repo commit failed\n"))
             return 1
-        self.applied.append(statusentry(n, '.hg.patches.save.line'))
+        self.applied.append(statusentry(n, b'.hg.patches.save.line'))
         self.applieddirty = True
         self.removeundo(repo)
 
@@ -2112,6 +2343,7 @@
         index of the first patch past the last applied one.
         """
         end = 0
+
         def nextpatch(start):
             if all_patches or start >= len(self.series):
                 return start
@@ -2121,6 +2353,7 @@
                     return i
                 self.explainpushable(i)
             return len(self.series)
+
         if self.applied:
             p = self.applied[-1].name
             try:
@@ -2135,27 +2368,38 @@
         if not self.ui.verbose:
             p = pname
         else:
-            p = ("%d" % self.series.index(pname)) + " " + pname
+            p = (b"%d" % self.series.index(pname)) + b" " + pname
         return p
 
-    def qimport(self, repo, files, patchname=None, rev=None, existing=None,
-                force=None, git=False):
+    def qimport(
+        self,
+        repo,
+        files,
+        patchname=None,
+        rev=None,
+        existing=None,
+        force=None,
+        git=False,
+    ):
         def checkseries(patchname):
             if patchname in self.series:
-                raise error.Abort(_('patch %s is already in the series file')
-                                 % patchname)
+                raise error.Abort(
+                    _(b'patch %s is already in the series file') % patchname
+                )
 
         if rev:
             if files:
-                raise error.Abort(_('option "-r" not valid when importing '
-                                   'files'))
+                raise error.Abort(
+                    _(b'option "-r" not valid when importing files')
+                )
             rev = scmutil.revrange(repo, rev)
             rev.sort(reverse=True)
         elif not files:
-            raise error.Abort(_('no files or revisions specified'))
+            raise error.Abort(_(b'no files or revisions specified'))
         if (len(files) > 1 or len(rev) > 1) and patchname:
-            raise error.Abort(_('option "-n" not valid when importing multiple '
-                               'patches'))
+            raise error.Abort(
+                _(b'option "-n" not valid when importing multiple patches')
+            )
         imported = []
         if rev:
             # If mq patches are applied, we can only import revisions
@@ -2163,51 +2407,61 @@
             # Otherwise, they should form a linear path to a head.
             heads = repo.changelog.heads(repo.changelog.node(rev.first()))
             if len(heads) > 1:
-                raise error.Abort(_('revision %d is the root of more than one '
-                                   'branch') % rev.last())
+                raise error.Abort(
+                    _(b'revision %d is the root of more than one branch')
+                    % rev.last()
+                )
             if self.applied:
                 base = repo.changelog.node(rev.first())
                 if base in [n.node for n in self.applied]:
-                    raise error.Abort(_('revision %d is already managed')
-                                     % rev.first())
+                    raise error.Abort(
+                        _(b'revision %d is already managed') % rev.first()
+                    )
                 if heads != [self.applied[-1].node]:
-                    raise error.Abort(_('revision %d is not the parent of '
-                                       'the queue') % rev.first())
+                    raise error.Abort(
+                        _(b'revision %d is not the parent of the queue')
+                        % rev.first()
+                    )
                 base = repo.changelog.rev(self.applied[0].node)
                 lastparent = repo.changelog.parentrevs(base)[0]
             else:
                 if heads != [repo.changelog.node(rev.first())]:
-                    raise error.Abort(_('revision %d has unmanaged children')
-                                     % rev.first())
+                    raise error.Abort(
+                        _(b'revision %d has unmanaged children') % rev.first()
+                    )
                 lastparent = None
 
-            diffopts = self.diffopts({'git': git})
-            with repo.transaction('qimport') as tr:
+            diffopts = self.diffopts({b'git': git})
+            with repo.transaction(b'qimport') as tr:
                 for r in rev:
                     if not repo[r].mutable():
-                        raise error.Abort(_('revision %d is not mutable') % r,
-                                         hint=_("see 'hg help phases' "
-                                                'for details'))
+                        raise error.Abort(
+                            _(b'revision %d is not mutable') % r,
+                            hint=_(b"see 'hg help phases' " b'for details'),
+                        )
                     p1, p2 = repo.changelog.parentrevs(r)
                     n = repo.changelog.node(r)
                     if p2 != nullrev:
-                        raise error.Abort(_('cannot import merge revision %d')
-                                         % r)
+                        raise error.Abort(
+                            _(b'cannot import merge revision %d') % r
+                        )
                     if lastparent and lastparent != r:
-                        raise error.Abort(_('revision %d is not the parent of '
-                                           '%d')
-                                         % (r, lastparent))
+                        raise error.Abort(
+                            _(b'revision %d is not the parent of %d')
+                            % (r, lastparent)
+                        )
                     lastparent = p1
 
                     if not patchname:
                         patchname = self.makepatchname(
-                            repo[r].description().split('\n', 1)[0],
-                            '%d.diff' % r)
+                            repo[r].description().split(b'\n', 1)[0],
+                            b'%d.diff' % r,
+                        )
                     checkseries(patchname)
                     self.checkpatchname(patchname, force)
                     self.fullseries.insert(0, patchname)
 
-                    with self.opener(patchname, "w") as fp:
+                    with self.opener(patchname, b"w") as fp:
                         cmdutil.exportfile(repo, [n], fp, opts=diffopts)
 
                     se = statusentry(n, patchname)
@@ -2216,7 +2470,7 @@
                     self.added.append(patchname)
                     imported.append(patchname)
                     patchname = None
-                    if rev and repo.ui.configbool('mq', 'secret'):
+                    if rev and repo.ui.configbool(b'mq', b'secret'):
                         # if we added anything with --rev, move the secret root
                         phases.retractboundary(repo, tr, phases.secret, [n])
                     self.parseseries()
@@ -2225,42 +2479,49 @@
 
         for i, filename in enumerate(files):
             if existing:
-                if filename == '-':
-                    raise error.Abort(_('-e is incompatible with import from -')
-                                     )
+                if filename == b'-':
+                    raise error.Abort(
+                        _(b'-e is incompatible with import from -')
+                    )
                 filename = normname(filename)
                 self.checkreservedname(filename)
                 if util.url(filename).islocal():
                     originpath = self.join(filename)
                     if not os.path.isfile(originpath):
                         raise error.Abort(
-                            _("patch %s does not exist") % filename)
+                            _(b"patch %s does not exist") % filename
+                        )
 
                 if patchname:
                     self.checkpatchname(patchname, force)
 
-                    self.ui.write(_('renaming %s to %s\n')
-                                        % (filename, patchname))
+                    self.ui.write(
+                        _(b'renaming %s to %s\n') % (filename, patchname)
+                    )
                     util.rename(originpath, self.join(patchname))
                 else:
                     patchname = filename
 
             else:
-                if filename == '-' and not patchname:
-                    raise error.Abort(_('need --name to import a patch from -'))
+                if filename == b'-' and not patchname:
+                    raise error.Abort(
+                        _(b'need --name to import a patch from -')
+                    )
                 elif not patchname:
-                    patchname = normname(os.path.basename(filename.rstrip('/')))
+                    patchname = normname(
+                        os.path.basename(filename.rstrip(b'/'))
+                    )
                 self.checkpatchname(patchname, force)
                 try:
-                    if filename == '-':
+                    if filename == b'-':
                         text = self.ui.fin.read()
                     else:
                         fp = hg.openpath(self.ui, filename)
                         text = fp.read()
                         fp.close()
                 except (OSError, IOError):
-                    raise error.Abort(_("unable to read file %s") % filename)
-                patchf = self.opener(patchname, "w")
+                    raise error.Abort(_(b"unable to read file %s") % filename)
+                patchf = self.opener(patchname, b"w")
                 patchf.write(text)
                 patchf.close()
             if not force:
@@ -2270,7 +2531,7 @@
                 self.fullseries[index:index] = [patchname]
             self.parseseries()
             self.seriesdirty = True
-            self.ui.warn(_("adding %s to series file\n") % patchname)
+            self.ui.warn(_(b"adding %s to series file\n") % patchname)
             self.added.append(patchname)
             imported.append(patchname)
             patchname = None
@@ -2278,20 +2539,34 @@
         self.removeundo(repo)
         return imported
 
+
 def fixkeepchangesopts(ui, opts):
-    if (not ui.configbool('mq', 'keepchanges') or opts.get('force')
-        or opts.get('exact')):
+    if (
+        not ui.configbool(b'mq', b'keepchanges')
+        or opts.get(b'force')
+        or opts.get(b'exact')
+    ):
         return opts
     opts = dict(opts)
-    opts['keep_changes'] = True
+    opts[b'keep_changes'] = True
     return opts
 
-@command("qdelete|qremove|qrm",
-         [('k', 'keep', None, _('keep patch file')),
-          ('r', 'rev', [],
-           _('stop managing a revision (DEPRECATED)'), _('REV'))],
-         _('hg qdelete [-k] [PATCH]...'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+
+@command(
+    b"qdelete|qremove|qrm",
+    [
+        (b'k', b'keep', None, _(b'keep patch file')),
+        (
+            b'r',
+            b'rev',
+            [],
+            _(b'stop managing a revision (DEPRECATED)'),
+            _(b'REV'),
+        ),
+    ],
+    _(b'hg qdelete [-k] [PATCH]...'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def delete(ui, repo, *patches, **opts):
     """remove patches from queue
 
@@ -2306,11 +2581,14 @@
     q.savedirty()
     return 0
 
-@command("qapplied",
-         [('1', 'last', None, _('show only the preceding applied patch'))
-          ] + seriesopts,
-         _('hg qapplied [-1] [-s] [PATCH]'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+
+@command(
+    b"qapplied",
+    [(b'1', b'last', None, _(b'show only the preceding applied patch'))]
+    + seriesopts,
+    _(b'hg qapplied [-1] [-s] [PATCH]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def applied(ui, repo, patch=None, **opts):
     """print the patches already applied
 
@@ -2321,31 +2599,34 @@
 
     if patch:
         if patch not in q.series:
-            raise error.Abort(_("patch %s is not in series file") % patch)
+            raise error.Abort(_(b"patch %s is not in series file") % patch)
         end = q.series.index(patch) + 1
     else:
         end = q.seriesend(True)
 
-    if opts.get('last') and not end:
-        ui.write(_("no patches applied\n"))
+    if opts.get(b'last') and not end:
+        ui.write(_(b"no patches applied\n"))
         return 1
-    elif opts.get('last') and end == 1:
-        ui.write(_("only one patch applied\n"))
+    elif opts.get(b'last') and end == 1:
+        ui.write(_(b"only one patch applied\n"))
         return 1
-    elif opts.get('last'):
+    elif opts.get(b'last'):
         start = end - 2
         end = 1
     else:
         start = 0
 
-    q.qseries(repo, length=end, start=start, status='A',
-              summary=opts.get('summary'))
-
-
-@command("qunapplied",
-         [('1', 'first', None, _('show only the first patch'))] + seriesopts,
-         _('hg qunapplied [-1] [-s] [PATCH]'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+    q.qseries(
+        repo, length=end, start=start, status=b'A', summary=opts.get(b'summary')
+    )
+
+
+@command(
+    b"qunapplied",
+    [(b'1', b'first', None, _(b'show only the first patch'))] + seriesopts,
+    _(b'hg qunapplied [-1] [-s] [PATCH]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def unapplied(ui, repo, patch=None, **opts):
     """print the patches not yet applied
 
@@ -2355,33 +2636,47 @@
     opts = pycompat.byteskwargs(opts)
     if patch:
         if patch not in q.series:
-            raise error.Abort(_("patch %s is not in series file") % patch)
+            raise error.Abort(_(b"patch %s is not in series file") % patch)
         start = q.series.index(patch) + 1
     else:
         start = q.seriesend(True)
 
-    if start == len(q.series) and opts.get('first'):
-        ui.write(_("all patches applied\n"))
+    if start == len(q.series) and opts.get(b'first'):
+        ui.write(_(b"all patches applied\n"))
         return 1
 
-    if opts.get('first'):
+    if opts.get(b'first'):
         length = 1
     else:
         length = None
-    q.qseries(repo, start=start, length=length, status='U',
-              summary=opts.get('summary'))
-
-@command("qimport",
-         [('e', 'existing', None, _('import file in patch directory')),
-          ('n', 'name', '',
-           _('name of patch file'), _('NAME')),
-          ('f', 'force', None, _('overwrite existing files')),
-          ('r', 'rev', [],
-           _('place existing revisions under mq control'), _('REV')),
-          ('g', 'git', None, _('use git extended diff format')),
-          ('P', 'push', None, _('qpush after importing'))],
-         _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
-         helpcategory=command.CATEGORY_IMPORT_EXPORT)
+    q.qseries(
+        repo,
+        start=start,
+        length=length,
+        status=b'U',
+        summary=opts.get(b'summary'),
+    )
+
+
+@command(
+    b"qimport",
+    [
+        (b'e', b'existing', None, _(b'import file in patch directory')),
+        (b'n', b'name', b'', _(b'name of patch file'), _(b'NAME')),
+        (b'f', b'force', None, _(b'overwrite existing files')),
+        (
+            b'r',
+            b'rev',
+            [],
+            _(b'place existing revisions under mq control'),
+            _(b'REV'),
+        ),
+        (b'g', b'git', None, _(b'use git extended diff format')),
+        (b'P', b'push', None, _(b'qpush after importing')),
+    ],
+    _(b'hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
+    helpcategory=command.CATEGORY_IMPORT_EXPORT,
+)
 def qimport(ui, repo, *filename, **opts):
     """import a patch or existing changeset
 
@@ -2417,20 +2712,26 @@
     Returns 0 if import succeeded.
     """
     opts = pycompat.byteskwargs(opts)
-    with repo.lock(): # cause this may move phase
+    with repo.lock():  # cause this may move phase
         q = repo.mq
         try:
             imported = q.qimport(
-                repo, filename, patchname=opts.get('name'),
-                existing=opts.get('existing'), force=opts.get('force'),
-                rev=opts.get('rev'), git=opts.get('git'))
+                repo,
+                filename,
+                patchname=opts.get(b'name'),
+                existing=opts.get(b'existing'),
+                force=opts.get(b'force'),
+                rev=opts.get(b'rev'),
+                git=opts.get(b'git'),
+            )
         finally:
             q.savedirty()
 
-    if imported and opts.get('push') and not opts.get('rev'):
+    if imported and opts.get(b'push') and not opts.get(b'rev'):
         return q.push(repo, imported[-1])
     return 0
 
+
 def qinit(ui, repo, create):
     """initialize a new queue repository
 
@@ -2443,25 +2744,28 @@
     r = q.init(repo, create)
     q.savedirty()
     if r:
-        if not os.path.exists(r.wjoin('.hgignore')):
-            fp = r.wvfs('.hgignore', 'w')
-            fp.write('^\\.hg\n')
-            fp.write('^\\.mq\n')
-            fp.write('syntax: glob\n')
-            fp.write('status\n')
-            fp.write('guards\n')
+        if not os.path.exists(r.wjoin(b'.hgignore')):
+            fp = r.wvfs(b'.hgignore', b'w')
+            fp.write(b'^\\.hg\n')
+            fp.write(b'^\\.mq\n')
+            fp.write(b'syntax: glob\n')
+            fp.write(b'status\n')
+            fp.write(b'guards\n')
             fp.close()
-        if not os.path.exists(r.wjoin('series')):
-            r.wvfs('series', 'w').close()
-        r[None].add(['.hgignore', 'series'])
+        if not os.path.exists(r.wjoin(b'series')):
+            r.wvfs(b'series', b'w').close()
+        r[None].add([b'.hgignore', b'series'])
         commands.add(ui, r)
     return 0
 
-@command("qinit",
-         [('c', 'create-repo', None, _('create queue repository'))],
-         _('hg qinit [-c]'),
-         helpcategory=command.CATEGORY_REPO_CREATION,
-         helpbasic=True)
+
+@command(
+    b"qinit",
+    [(b'c', b'create-repo', None, _(b'create queue repository'))],
+    _(b'hg qinit [-c]'),
+    helpcategory=command.CATEGORY_REPO_CREATION,
+    helpbasic=True,
+)
 def init(ui, repo, **opts):
     """init a new queue repository (DEPRECATED)
 
@@ -2475,18 +2779,36 @@
     commands. With -c, use :hg:`init --mq` instead."""
     return qinit(ui, repo, create=opts.get(r'create_repo'))
 
-@command("qclone",
-         [('', 'pull', None, _('use pull protocol to copy metadata')),
-          ('U', 'noupdate', None,
-           _('do not update the new working directories')),
-          ('', 'uncompressed', None,
-           _('use uncompressed transfer (fast over LAN)')),
-          ('p', 'patches', '',
-           _('location of source patch repository'), _('REPO')),
-         ] + cmdutil.remoteopts,
-         _('hg qclone [OPTION]... SOURCE [DEST]'),
-         helpcategory=command.CATEGORY_REPO_CREATION,
-         norepo=True)
+
+@command(
+    b"qclone",
+    [
+        (b'', b'pull', None, _(b'use pull protocol to copy metadata')),
+        (
+            b'U',
+            b'noupdate',
+            None,
+            _(b'do not update the new working directories'),
+        ),
+        (
+            b'',
+            b'uncompressed',
+            None,
+            _(b'use uncompressed transfer (fast over LAN)'),
+        ),
+        (
+            b'p',
+            b'patches',
+            b'',
+            _(b'location of source patch repository'),
+            _(b'REPO'),
+        ),
+    ]
+    + cmdutil.remoteopts,
+    _(b'hg qclone [OPTION]... SOURCE [DEST]'),
+    helpcategory=command.CATEGORY_REPO_CREATION,
+    norepo=True,
+)
 def clone(ui, source, dest=None, **opts):
     '''clone main and patch repository at same time
 
@@ -2505,12 +2827,13 @@
     Return 0 on success.
     '''
     opts = pycompat.byteskwargs(opts)
+
     def patchdir(repo):
         """compute a patch repo url from a repo object"""
         url = repo.url()
-        if url.endswith('/'):
+        if url.endswith(b'/'):
             url = url[:-1]
-        return url + '/.hg/patches'
+        return url + b'/.hg/patches'
 
     # main repo (destination and sources)
     if dest is None:
@@ -2518,15 +2841,16 @@
     sr = hg.peer(ui, opts, ui.expandpath(source))
 
     # patches repo (source only)
-    if opts.get('patches'):
-        patchespath = ui.expandpath(opts.get('patches'))
+    if opts.get(b'patches'):
+        patchespath = ui.expandpath(opts.get(b'patches'))
     else:
         patchespath = patchdir(sr)
     try:
         hg.peer(ui, opts, patchespath)
     except error.RepoError:
-        raise error.Abort(_('versioned patch repository not found'
-                           ' (see init --mq)'))
+        raise error.Abort(
+            _(b'versioned patch repository not found (see init --mq)')
+        )
     qbase, destrev = None, None
     if sr.local():
         repo = sr.local()
@@ -2536,39 +2860,57 @@
                 heads = set(repo.heads())
                 destrev = list(heads.difference(repo.heads(qbase)))
                 destrev.append(repo.changelog.parents(qbase)[0])
-    elif sr.capable('lookup'):
+    elif sr.capable(b'lookup'):
         try:
-            qbase = sr.lookup('qbase')
+            qbase = sr.lookup(b'qbase')
         except error.RepoError:
             pass
 
-    ui.note(_('cloning main repository\n'))
-    sr, dr = hg.clone(ui, opts, sr.url(), dest,
-                      pull=opts.get('pull'),
-                      revs=destrev,
-                      update=False,
-                      stream=opts.get('uncompressed'))
-
-    ui.note(_('cloning patch repository\n'))
-    hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
-             pull=opts.get('pull'), update=not opts.get('noupdate'),
-             stream=opts.get('uncompressed'))
+    ui.note(_(b'cloning main repository\n'))
+    sr, dr = hg.clone(
+        ui,
+        opts,
+        sr.url(),
+        dest,
+        pull=opts.get(b'pull'),
+        revs=destrev,
+        update=False,
+        stream=opts.get(b'uncompressed'),
+    )
+
+    ui.note(_(b'cloning patch repository\n'))
+    hg.clone(
+        ui,
+        opts,
+        opts.get(b'patches') or patchdir(sr),
+        patchdir(dr),
+        pull=opts.get(b'pull'),
+        update=not opts.get(b'noupdate'),
+        stream=opts.get(b'uncompressed'),
+    )
 
     if dr.local():
         repo = dr.local()
         if qbase:
-            ui.note(_('stripping applied patches from destination '
-                      'repository\n'))
+            ui.note(
+                _(
+                    b'stripping applied patches from destination '
+                    b'repository\n'
+                )
+            )
             strip(ui, repo, [qbase], update=False, backup=None)
-        if not opts.get('noupdate'):
-            ui.note(_('updating destination repository\n'))
+        if not opts.get(b'noupdate'):
+            ui.note(_(b'updating destination repository\n'))
             hg.update(repo, repo.changelog.tip())
 
-@command("qcommit|qci",
-         commands.table["commit|ci"][1],
-         _('hg qcommit [OPTION]... [FILE]...'),
-         helpcategory=command.CATEGORY_COMMITTING,
-         inferrepo=True)
+
+@command(
+    b"qcommit|qci",
+    commands.table[b"commit|ci"][1],
+    _(b'hg qcommit [OPTION]... [FILE]...'),
+    helpcategory=command.CATEGORY_COMMITTING,
+    inferrepo=True,
+)
 def commit(ui, repo, *pats, **opts):
     """commit changes in the queue repository (DEPRECATED)
 
@@ -2576,24 +2918,32 @@
     q = repo.mq
     r = q.qrepo()
     if not r:
-        raise error.Abort('no queue repository')
+        raise error.Abort(b'no queue repository')
     commands.commit(r.ui, r, *pats, **opts)
 
-@command("qseries",
-         [('m', 'missing', None, _('print patches not in series')),
-         ] + seriesopts,
-          _('hg qseries [-ms]'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+
+@command(
+    b"qseries",
+    [(b'm', b'missing', None, _(b'print patches not in series')),] + seriesopts,
+    _(b'hg qseries [-ms]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def series(ui, repo, **opts):
     """print the entire series file
 
     Returns 0 on success."""
-    repo.mq.qseries(repo, missing=opts.get(r'missing'),
-                    summary=opts.get(r'summary'))
+    repo.mq.qseries(
+        repo, missing=opts.get(r'missing'), summary=opts.get(r'summary')
+    )
     return 0
 
-@command("qtop", seriesopts, _('hg qtop [-s]'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+
+@command(
+    b"qtop",
+    seriesopts,
+    _(b'hg qtop [-s]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def top(ui, repo, **opts):
     """print the name of the current patch
 
@@ -2605,14 +2955,24 @@
         t = 0
 
     if t:
-        q.qseries(repo, start=t - 1, length=1, status='A',
-                  summary=opts.get(r'summary'))
+        q.qseries(
+            repo,
+            start=t - 1,
+            length=1,
+            status=b'A',
+            summary=opts.get(r'summary'),
+        )
     else:
-        ui.write(_("no patches applied\n"))
+        ui.write(_(b"no patches applied\n"))
         return 1
 
-@command("qnext", seriesopts, _('hg qnext [-s]'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+
+@command(
+    b"qnext",
+    seriesopts,
+    _(b'hg qnext [-s]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def next(ui, repo, **opts):
     """print the name of the next pushable patch
 
@@ -2620,12 +2980,17 @@
     q = repo.mq
     end = q.seriesend()
     if end == len(q.series):
-        ui.write(_("all patches applied\n"))
+        ui.write(_(b"all patches applied\n"))
         return 1
     q.qseries(repo, start=end, length=1, summary=opts.get(r'summary'))
 
-@command("qprev", seriesopts, _('hg qprev [-s]'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+
+@command(
+    b"qprev",
+    seriesopts,
+    _(b'hg qprev [-s]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def prev(ui, repo, **opts):
     """print the name of the preceding applied patch
 
@@ -2633,35 +2998,42 @@
     q = repo.mq
     l = len(q.applied)
     if l == 1:
-        ui.write(_("only one patch applied\n"))
+        ui.write(_(b"only one patch applied\n"))
         return 1
     if not l:
-        ui.write(_("no patches applied\n"))
+        ui.write(_(b"no patches applied\n"))
         return 1
     idx = q.series.index(q.applied[-2].name)
-    q.qseries(repo, start=idx, length=1, status='A',
-              summary=opts.get(r'summary'))
+    q.qseries(
+        repo, start=idx, length=1, status=b'A', summary=opts.get(r'summary')
+    )
+
 
 def setupheaderopts(ui, opts):
-    if not opts.get('user') and opts.get('currentuser'):
-        opts['user'] = ui.username()
-    if not opts.get('date') and opts.get('currentdate'):
-        opts['date'] = "%d %d" % dateutil.makedate()
-
-@command("qnew",
-         [('e', 'edit', None, _('invoke editor on commit messages')),
-          ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
-          ('g', 'git', None, _('use git extended diff format')),
-          ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
-          ('u', 'user', '',
-           _('add "From: <USER>" to patch'), _('USER')),
-          ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
-          ('d', 'date', '',
-           _('add "Date: <DATE>" to patch'), _('DATE'))
-          ] + cmdutil.walkopts + cmdutil.commitopts,
-         _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
-         helpcategory=command.CATEGORY_COMMITTING, helpbasic=True,
-         inferrepo=True)
+    if not opts.get(b'user') and opts.get(b'currentuser'):
+        opts[b'user'] = ui.username()
+    if not opts.get(b'date') and opts.get(b'currentdate'):
+        opts[b'date'] = b"%d %d" % dateutil.makedate()
+
+
+@command(
+    b"qnew",
+    [
+        (b'e', b'edit', None, _(b'invoke editor on commit messages')),
+        (b'f', b'force', None, _(b'import uncommitted changes (DEPRECATED)')),
+        (b'g', b'git', None, _(b'use git extended diff format')),
+        (b'U', b'currentuser', None, _(b'add "From: <current user>" to patch')),
+        (b'u', b'user', b'', _(b'add "From: <USER>" to patch'), _(b'USER')),
+        (b'D', b'currentdate', None, _(b'add "Date: <current date>" to patch')),
+        (b'd', b'date', b'', _(b'add "Date: <DATE>" to patch'), _(b'DATE')),
+    ]
+    + cmdutil.walkopts
+    + cmdutil.commitopts,
+    _(b'hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
+    helpcategory=command.CATEGORY_COMMITTING,
+    helpbasic=True,
+    inferrepo=True,
+)
 def new(ui, repo, patch, *args, **opts):
     """create a new patch
 
@@ -2690,29 +3062,58 @@
     opts = pycompat.byteskwargs(opts)
     msg = cmdutil.logmessage(ui, opts)
     q = repo.mq
-    opts['msg'] = msg
+    opts[b'msg'] = msg
     setupheaderopts(ui, opts)
     q.new(repo, patch, *args, **pycompat.strkwargs(opts))
     q.savedirty()
     return 0
 
-@command("qrefresh",
-         [('e', 'edit', None, _('invoke editor on commit messages')),
-          ('g', 'git', None, _('use git extended diff format')),
-          ('s', 'short', None,
-           _('refresh only files already in the patch and specified files')),
-          ('U', 'currentuser', None,
-           _('add/update author field in patch with current user')),
-          ('u', 'user', '',
-           _('add/update author field in patch with given user'), _('USER')),
-          ('D', 'currentdate', None,
-           _('add/update date field in patch with current date')),
-          ('d', 'date', '',
-           _('add/update date field in patch with given date'), _('DATE'))
-          ] + cmdutil.walkopts + cmdutil.commitopts,
-         _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
-         helpcategory=command.CATEGORY_COMMITTING, helpbasic=True,
-         inferrepo=True)
+
+@command(
+    b"qrefresh",
+    [
+        (b'e', b'edit', None, _(b'invoke editor on commit messages')),
+        (b'g', b'git', None, _(b'use git extended diff format')),
+        (
+            b's',
+            b'short',
+            None,
+            _(b'refresh only files already in the patch and specified files'),
+        ),
+        (
+            b'U',
+            b'currentuser',
+            None,
+            _(b'add/update author field in patch with current user'),
+        ),
+        (
+            b'u',
+            b'user',
+            b'',
+            _(b'add/update author field in patch with given user'),
+            _(b'USER'),
+        ),
+        (
+            b'D',
+            b'currentdate',
+            None,
+            _(b'add/update date field in patch with current date'),
+        ),
+        (
+            b'd',
+            b'date',
+            b'',
+            _(b'add/update date field in patch with given date'),
+            _(b'DATE'),
+        ),
+    ]
+    + cmdutil.walkopts
+    + cmdutil.commitopts,
+    _(b'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
+    helpcategory=command.CATEGORY_COMMITTING,
+    helpbasic=True,
+    inferrepo=True,
+)
 def refresh(ui, repo, *pats, **opts):
     """update the current patch
 
@@ -2743,11 +3144,15 @@
         q.savedirty()
         return ret
 
-@command("qdiff",
-         cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
-         _('hg qdiff [OPTION]... [FILE]...'),
-         helpcategory=command.CATEGORY_FILE_CONTENTS, helpbasic=True,
-         inferrepo=True)
+
+@command(
+    b"qdiff",
+    cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
+    _(b'hg qdiff [OPTION]... [FILE]...'),
+    helpcategory=command.CATEGORY_FILE_CONTENTS,
+    helpbasic=True,
+    inferrepo=True,
+)
 def diff(ui, repo, *pats, **opts):
     """diff of the current patch and subsequent modifications
 
@@ -2763,16 +3168,21 @@
 
     Returns 0 on success.
     """
-    ui.pager('qdiff')
+    ui.pager(b'qdiff')
     repo.mq.diff(repo, pats, pycompat.byteskwargs(opts))
     return 0
 
-@command('qfold',
-         [('e', 'edit', None, _('invoke editor on commit messages')),
-          ('k', 'keep', None, _('keep folded patch files')),
-         ] + cmdutil.commitopts,
-         _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
-         helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
+
+@command(
+    b'qfold',
+    [
+        (b'e', b'edit', None, _(b'invoke editor on commit messages')),
+        (b'k', b'keep', None, _(b'keep folded patch files')),
+    ]
+    + cmdutil.commitopts,
+    _(b'hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
+)
 def fold(ui, repo, *files, **opts):
     """fold the named patches into the current patch
 
@@ -2790,23 +3200,24 @@
     opts = pycompat.byteskwargs(opts)
     q = repo.mq
     if not files:
-        raise error.Abort(_('qfold requires at least one patch name'))
+        raise error.Abort(_(b'qfold requires at least one patch name'))
     if not q.checktoppatch(repo)[0]:
-        raise error.Abort(_('no patches applied'))
+        raise error.Abort(_(b'no patches applied'))
     q.checklocalchanges(repo)
 
     message = cmdutil.logmessage(ui, opts)
 
-    parent = q.lookup('qtip')
+    parent = q.lookup(b'qtip')
     patches = []
     messages = []
     for f in files:
         p = q.lookup(f)
         if p in patches or p == parent:
-            ui.warn(_('skipping already folded patch %s\n') % p)
+            ui.warn(_(b'skipping already folded patch %s\n') % p)
         if q.isapplied(p):
-            raise error.Abort(_('qfold cannot fold already applied patch %s')
-                             % p)
+            raise error.Abort(
+                _(b'qfold cannot fold already applied patch %s') % p
+            )
         patches.append(p)
 
     for p in patches:
@@ -2817,7 +3228,7 @@
         pf = q.join(p)
         (patchsuccess, files, fuzz) = q.patch(repo, pf)
         if not patchsuccess:
-            raise error.Abort(_('error folding patch %s') % p)
+            raise error.Abort(_(b'error folding patch %s') % p)
 
     if not message:
         ph = patchheader(q.join(parent), q.plainmode)
@@ -2825,24 +3236,38 @@
         for msg in messages:
             if msg:
                 if message:
-                    message.append('* * *')
+                    message.append(b'* * *')
                 message.extend(msg)
-        message = '\n'.join(message)
+        message = b'\n'.join(message)
 
     diffopts = q.patchopts(q.diffopts(), *patches)
     with repo.wlock():
-        q.refresh(repo, msg=message, git=diffopts.git, edit=opts.get('edit'),
-                  editform='mq.qfold')
+        q.refresh(
+            repo,
+            msg=message,
+            git=diffopts.git,
+            edit=opts.get(b'edit'),
+            editform=b'mq.qfold',
+        )
         q.delete(repo, patches, opts)
         q.savedirty()
 
-@command("qgoto",
-         [('', 'keep-changes', None,
-           _('tolerate non-conflicting local changes')),
-          ('f', 'force', None, _('overwrite any local changes')),
-          ('', 'no-backup', None, _('do not save backup copies of files'))],
-         _('hg qgoto [OPTION]... PATCH'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+
+@command(
+    b"qgoto",
+    [
+        (
+            b'',
+            b'keep-changes',
+            None,
+            _(b'tolerate non-conflicting local changes'),
+        ),
+        (b'f', b'force', None, _(b'overwrite any local changes')),
+        (b'', b'no-backup', None, _(b'do not save backup copies of files')),
+    ],
+    _(b'hg qgoto [OPTION]... PATCH'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def goto(ui, repo, patch, **opts):
     '''push or pop patches until named patch is at top of stack
 
@@ -2851,22 +3276,37 @@
     opts = fixkeepchangesopts(ui, opts)
     q = repo.mq
     patch = q.lookup(patch)
-    nobackup = opts.get('no_backup')
-    keepchanges = opts.get('keep_changes')
+    nobackup = opts.get(b'no_backup')
+    keepchanges = opts.get(b'keep_changes')
     if q.isapplied(patch):
-        ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
-                    keepchanges=keepchanges)
+        ret = q.pop(
+            repo,
+            patch,
+            force=opts.get(b'force'),
+            nobackup=nobackup,
+            keepchanges=keepchanges,
+        )
     else:
-        ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
-                     keepchanges=keepchanges)
+        ret = q.push(
+            repo,
+            patch,
+            force=opts.get(b'force'),
+            nobackup=nobackup,
+            keepchanges=keepchanges,
+        )
     q.savedirty()
     return ret
 
-@command("qguard",
-         [('l', 'list', None, _('list all patches and guards')),
-          ('n', 'none', None, _('drop all guards'))],
-         _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+
+@command(
+    b"qguard",
+    [
+        (b'l', b'list', None, _(b'list all patches and guards')),
+        (b'n', b'none', None, _(b'drop all guards')),
+    ],
+    _(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def guard(ui, repo, *args, **opts):
     '''set or print guards for a patch
 
@@ -2889,57 +3329,65 @@
 
     Returns 0 on success.
     '''
+
     def status(idx):
-        guards = q.seriesguards[idx] or ['unguarded']
+        guards = q.seriesguards[idx] or [b'unguarded']
         if q.series[idx] in applied:
-            state = 'applied'
+            state = b'applied'
         elif q.pushable(idx)[0]:
-            state = 'unapplied'
+            state = b'unapplied'
         else:
-            state = 'guarded'
-        label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
-        ui.write('%s: ' % ui.label(q.series[idx], label))
+            state = b'guarded'
+        label = b'qguard.patch qguard.%s qseries.%s' % (state, state)
+        ui.write(b'%s: ' % ui.label(q.series[idx], label))
 
         for i, guard in enumerate(guards):
-            if guard.startswith('+'):
-                ui.write(guard, label='qguard.positive')
-            elif guard.startswith('-'):
-                ui.write(guard, label='qguard.negative')
+            if guard.startswith(b'+'):
+                ui.write(guard, label=b'qguard.positive')
+            elif guard.startswith(b'-'):
+                ui.write(guard, label=b'qguard.negative')
             else:
-                ui.write(guard, label='qguard.unguarded')
+                ui.write(guard, label=b'qguard.unguarded')
             if i != len(guards) - 1:
-                ui.write(' ')
-        ui.write('\n')
+                ui.write(b' ')
+        ui.write(b'\n')
+
     q = repo.mq
     applied = set(p.name for p in q.applied)
     patch = None
     args = list(args)
     if opts.get(r'list'):
         if args or opts.get(r'none'):
-            raise error.Abort(_('cannot mix -l/--list with options or '
-                               'arguments'))
+            raise error.Abort(
+                _(b'cannot mix -l/--list with options or arguments')
+            )
         for i in pycompat.xrange(len(q.series)):
             status(i)
         return
-    if not args or args[0][0:1] in '-+':
+    if not args or args[0][0:1] in b'-+':
         if not q.applied:
-            raise error.Abort(_('no patches applied'))
+            raise error.Abort(_(b'no patches applied'))
         patch = q.applied[-1].name
-    if patch is None and args[0][0:1] not in '-+':
+    if patch is None and args[0][0:1] not in b'-+':
         patch = args.pop(0)
     if patch is None:
-        raise error.Abort(_('no patch to work with'))
+        raise error.Abort(_(b'no patch to work with'))
     if args or opts.get(r'none'):
         idx = q.findseries(patch)
         if idx is None:
-            raise error.Abort(_('no patch named %s') % patch)
+            raise error.Abort(_(b'no patch named %s') % patch)
         q.setguards(idx, args)
         q.savedirty()
     else:
         status(q.series.index(q.lookup(patch)))
 
-@command("qheader", [], _('hg qheader [PATCH]'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+
+@command(
+    b"qheader",
+    [],
+    _(b'hg qheader [PATCH]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def header(ui, repo, patch=None):
     """print the header of the topmost or specified patch
 
@@ -2950,17 +3398,18 @@
         patch = q.lookup(patch)
     else:
         if not q.applied:
-            ui.write(_('no patches applied\n'))
+            ui.write(_(b'no patches applied\n'))
             return 1
-        patch = q.lookup('qtip')
+        patch = q.lookup(b'qtip')
     ph = patchheader(q.join(patch), q.plainmode)
 
-    ui.write('\n'.join(ph.message) + '\n')
+    ui.write(b'\n'.join(ph.message) + b'\n')
+
 
 def lastsavename(path):
     (directory, base) = os.path.split(path)
     names = os.listdir(directory)
-    namere = re.compile("%s.([0-9]+)" % base)
+    namere = re.compile(b"%s.([0-9]+)" % base)
     maxindex = None
     maxname = None
     for f in names:
@@ -2974,30 +3423,47 @@
         return (os.path.join(directory, maxname), maxindex)
     return (None, None)
 
+
 def savename(path):
     (last, index) = lastsavename(path)
     if last is None:
         index = 0
-    newpath = path + ".%d" % (index + 1)
+    newpath = path + b".%d" % (index + 1)
     return newpath
 
-@command("qpush",
-         [('', 'keep-changes', None,
-           _('tolerate non-conflicting local changes')),
-          ('f', 'force', None, _('apply on top of local changes')),
-          ('e', 'exact', None,
-           _('apply the target patch to its recorded parent')),
-          ('l', 'list', None, _('list patch name in commit text')),
-          ('a', 'all', None, _('apply all patches')),
-          ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
-          ('n', 'name', '',
-           _('merge queue name (DEPRECATED)'), _('NAME')),
-          ('', 'move', None,
-           _('reorder patch series and apply only the patch')),
-          ('', 'no-backup', None, _('do not save backup copies of files'))],
-         _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
-         helpbasic=True)
+
+@command(
+    b"qpush",
+    [
+        (
+            b'',
+            b'keep-changes',
+            None,
+            _(b'tolerate non-conflicting local changes'),
+        ),
+        (b'f', b'force', None, _(b'apply on top of local changes')),
+        (
+            b'e',
+            b'exact',
+            None,
+            _(b'apply the target patch to its recorded parent'),
+        ),
+        (b'l', b'list', None, _(b'list patch name in commit text')),
+        (b'a', b'all', None, _(b'apply all patches')),
+        (b'm', b'merge', None, _(b'merge from another queue (DEPRECATED)')),
+        (b'n', b'name', b'', _(b'merge queue name (DEPRECATED)'), _(b'NAME')),
+        (
+            b'',
+            b'move',
+            None,
+            _(b'reorder patch series and apply only the patch'),
+        ),
+        (b'', b'no-backup', None, _(b'do not save backup copies of files')),
+    ],
+    _(b'hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+    helpbasic=True,
+)
 def push(ui, repo, patch=None, **opts):
     """push the next patch onto the stack
 
@@ -3013,33 +3479,49 @@
 
     opts = pycompat.byteskwargs(opts)
     opts = fixkeepchangesopts(ui, opts)
-    if opts.get('merge'):
-        if opts.get('name'):
-            newpath = repo.vfs.join(opts.get('name'))
+    if opts.get(b'merge'):
+        if opts.get(b'name'):
+            newpath = repo.vfs.join(opts.get(b'name'))
         else:
             newpath, i = lastsavename(q.path)
         if not newpath:
-            ui.warn(_("no saved queues found, please use -n\n"))
+            ui.warn(_(b"no saved queues found, please use -n\n"))
             return 1
         mergeq = queue(ui, repo.baseui, repo.path, newpath)
-        ui.warn(_("merging with queue at: %s\n") % mergeq.path)
-    ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
-                 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
-                 exact=opts.get('exact'), nobackup=opts.get('no_backup'),
-                 keepchanges=opts.get('keep_changes'))
+        ui.warn(_(b"merging with queue at: %s\n") % mergeq.path)
+    ret = q.push(
+        repo,
+        patch,
+        force=opts.get(b'force'),
+        list=opts.get(b'list'),
+        mergeq=mergeq,
+        all=opts.get(b'all'),
+        move=opts.get(b'move'),
+        exact=opts.get(b'exact'),
+        nobackup=opts.get(b'no_backup'),
+        keepchanges=opts.get(b'keep_changes'),
+    )
     return ret
 
-@command("qpop",
-         [('a', 'all', None, _('pop all patches')),
-          ('n', 'name', '',
-           _('queue name to pop (DEPRECATED)'), _('NAME')),
-          ('', 'keep-changes', None,
-           _('tolerate non-conflicting local changes')),
-          ('f', 'force', None, _('forget any local changes to patched files')),
-          ('', 'no-backup', None, _('do not save backup copies of files'))],
-         _('hg qpop [-a] [-f] [PATCH | INDEX]'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
-         helpbasic=True)
+
+@command(
+    b"qpop",
+    [
+        (b'a', b'all', None, _(b'pop all patches')),
+        (b'n', b'name', b'', _(b'queue name to pop (DEPRECATED)'), _(b'NAME')),
+        (
+            b'',
+            b'keep-changes',
+            None,
+            _(b'tolerate non-conflicting local changes'),
+        ),
+        (b'f', b'force', None, _(b'forget any local changes to patched files')),
+        (b'', b'no-backup', None, _(b'do not save backup copies of files')),
+    ],
+    _(b'hg qpop [-a] [-f] [PATCH | INDEX]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+    helpbasic=True,
+)
 def pop(ui, repo, patch=None, **opts):
     """pop the current patch off the stack
 
@@ -3057,20 +3539,31 @@
     opts = pycompat.byteskwargs(opts)
     opts = fixkeepchangesopts(ui, opts)
     localupdate = True
-    if opts.get('name'):
-        q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get('name')))
-        ui.warn(_('using patch queue: %s\n') % q.path)
+    if opts.get(b'name'):
+        q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get(b'name')))
+        ui.warn(_(b'using patch queue: %s\n') % q.path)
         localupdate = False
     else:
         q = repo.mq
-    ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
-                all=opts.get('all'), nobackup=opts.get('no_backup'),
-                keepchanges=opts.get('keep_changes'))
+    ret = q.pop(
+        repo,
+        patch,
+        force=opts.get(b'force'),
+        update=localupdate,
+        all=opts.get(b'all'),
+        nobackup=opts.get(b'no_backup'),
+        keepchanges=opts.get(b'keep_changes'),
+    )
     q.savedirty()
     return ret
 
-@command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+
+@command(
+    b"qrename|qmv",
+    [],
+    _(b'hg qrename PATCH1 [PATCH2]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def rename(ui, repo, patch, name=None, **opts):
     """rename a patch
 
@@ -3087,19 +3580,19 @@
         patch = q.lookup(patch)
     else:
         if not q.applied:
-            ui.write(_('no patches applied\n'))
+            ui.write(_(b'no patches applied\n'))
             return
-        patch = q.lookup('qtip')
+        patch = q.lookup(b'qtip')
     absdest = q.join(name)
     if os.path.isdir(absdest):
         name = normname(os.path.join(name, os.path.basename(patch)))
         absdest = q.join(name)
     q.checkpatchname(name)
 
-    ui.note(_('renaming %s to %s\n') % (patch, name))
+    ui.note(_(b'renaming %s to %s\n') % (patch, name))
     i = q.findseries(patch)
     guards = q.guard_re.findall(q.fullseries[i])
-    q.fullseries[i] = name + ''.join([' #' + g for g in guards])
+    q.fullseries[i] = name + b''.join([b' #' + g for g in guards])
     q.parseseries()
     q.seriesdirty = True
 
@@ -3116,7 +3609,7 @@
     if r and patch in r.dirstate:
         wctx = r[None]
         with r.wlock():
-            if r.dirstate[patch] == 'a':
+            if r.dirstate[patch] == b'a':
                 r.dirstate.drop(patch)
                 r.dirstate.add(name)
             else:
@@ -3125,30 +3618,41 @@
 
     q.savedirty()
 
-@command("qrestore",
-         [('d', 'delete', None, _('delete save entry')),
-          ('u', 'update', None, _('update queue working directory'))],
-         _('hg qrestore [-d] [-u] REV'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+
+@command(
+    b"qrestore",
+    [
+        (b'd', b'delete', None, _(b'delete save entry')),
+        (b'u', b'update', None, _(b'update queue working directory')),
+    ],
+    _(b'hg qrestore [-d] [-u] REV'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def restore(ui, repo, rev, **opts):
     """restore the queue state saved by a revision (DEPRECATED)
 
     This command is deprecated, use :hg:`rebase` instead."""
     rev = repo.lookup(rev)
     q = repo.mq
-    q.restore(repo, rev, delete=opts.get(r'delete'),
-              qupdate=opts.get(r'update'))
+    q.restore(
+        repo, rev, delete=opts.get(r'delete'), qupdate=opts.get(r'update')
+    )
     q.savedirty()
     return 0
 
-@command("qsave",
-         [('c', 'copy', None, _('copy patch directory')),
-          ('n', 'name', '',
-           _('copy directory name'), _('NAME')),
-          ('e', 'empty', None, _('clear queue status file')),
-          ('f', 'force', None, _('force copy'))] + cmdutil.commitopts,
-         _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+
+@command(
+    b"qsave",
+    [
+        (b'c', b'copy', None, _(b'copy patch directory')),
+        (b'n', b'name', b'', _(b'copy directory name'), _(b'NAME')),
+        (b'e', b'empty', None, _(b'clear queue status file')),
+        (b'f', b'force', None, _(b'force copy')),
+    ]
+    + cmdutil.commitopts,
+    _(b'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def save(ui, repo, **opts):
     """save current queue state (DEPRECATED)
 
@@ -3159,36 +3663,43 @@
     ret = q.save(repo, msg=message)
     if ret:
         return ret
-    q.savedirty() # save to .hg/patches before copying
-    if opts.get('copy'):
+    q.savedirty()  # save to .hg/patches before copying
+    if opts.get(b'copy'):
         path = q.path
-        if opts.get('name'):
-            newpath = os.path.join(q.basepath, opts.get('name'))
+        if opts.get(b'name'):
+            newpath = os.path.join(q.basepath, opts.get(b'name'))
             if os.path.exists(newpath):
                 if not os.path.isdir(newpath):
-                    raise error.Abort(_('destination %s exists and is not '
-                                       'a directory') % newpath)
-                if not opts.get('force'):
-                    raise error.Abort(_('destination %s exists, '
-                                       'use -f to force') % newpath)
+                    raise error.Abort(
+                        _(b'destination %s exists and is not a directory')
+                        % newpath
+                    )
+                if not opts.get(b'force'):
+                    raise error.Abort(
+                        _(b'destination %s exists, use -f to force') % newpath
+                    )
         else:
             newpath = savename(path)
-        ui.warn(_("copy %s to %s\n") % (path, newpath))
+        ui.warn(_(b"copy %s to %s\n") % (path, newpath))
         util.copyfiles(path, newpath)
-    if opts.get('empty'):
+    if opts.get(b'empty'):
         del q.applied[:]
         q.applieddirty = True
         q.savedirty()
     return 0
 
 
-@command("qselect",
-         [('n', 'none', None, _('disable all guards')),
-          ('s', 'series', None, _('list all guards in series file')),
-          ('', 'pop', None, _('pop to before first guarded applied patch')),
-          ('', 'reapply', None, _('pop, then reapply patches'))],
-         _('hg qselect [OPTION]... [GUARD]...'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+@command(
+    b"qselect",
+    [
+        (b'n', b'none', None, _(b'disable all guards')),
+        (b's', b'series', None, _(b'list all guards in series file')),
+        (b'', b'pop', None, _(b'pop to before first guarded applied patch')),
+        (b'', b'reapply', None, _(b'pop, then reapply patches')),
+    ],
+    _(b'hg qselect [OPTION]... [GUARD]...'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def select(ui, repo, *args, **opts):
     '''set or print guarded patches to push
 
@@ -3228,27 +3739,37 @@
     opts = pycompat.byteskwargs(opts)
     guards = q.active()
     pushable = lambda i: q.pushable(q.applied[i].name)[0]
-    if args or opts.get('none'):
+    if args or opts.get(b'none'):
         old_unapplied = q.unapplied(repo)
-        old_guarded = [i for i in pycompat.xrange(len(q.applied))
-                       if not pushable(i)]
+        old_guarded = [
+            i for i in pycompat.xrange(len(q.applied)) if not pushable(i)
+        ]
         q.setactive(args)
         q.savedirty()
         if not args:
-            ui.status(_('guards deactivated\n'))
-        if not opts.get('pop') and not opts.get('reapply'):
+            ui.status(_(b'guards deactivated\n'))
+        if not opts.get(b'pop') and not opts.get(b'reapply'):
             unapplied = q.unapplied(repo)
-            guarded = [i for i in pycompat.xrange(len(q.applied))
-                       if not pushable(i)]
+            guarded = [
+                i for i in pycompat.xrange(len(q.applied)) if not pushable(i)
+            ]
             if len(unapplied) != len(old_unapplied):
-                ui.status(_('number of unguarded, unapplied patches has '
-                            'changed from %d to %d\n') %
-                          (len(old_unapplied), len(unapplied)))
+                ui.status(
+                    _(
+                        b'number of unguarded, unapplied patches has '
+                        b'changed from %d to %d\n'
+                    )
+                    % (len(old_unapplied), len(unapplied))
+                )
             if len(guarded) != len(old_guarded):
-                ui.status(_('number of guarded, applied patches has changed '
-                            'from %d to %d\n') %
-                          (len(old_guarded), len(guarded)))
-    elif opts.get('series'):
+                ui.status(
+                    _(
+                        b'number of guarded, applied patches has changed '
+                        b'from %d to %d\n'
+                    )
+                    % (len(old_guarded), len(guarded))
+                )
+    elif opts.get(b'series'):
         guards = {}
         noguards = 0
         for gs in q.seriesguards:
@@ -3258,29 +3779,29 @@
                 guards.setdefault(g, 0)
                 guards[g] += 1
         if ui.verbose:
-            guards['NONE'] = noguards
+            guards[b'NONE'] = noguards
         guards = list(guards.items())
         guards.sort(key=lambda x: x[0][1:])
         if guards:
-            ui.note(_('guards in series file:\n'))
+            ui.note(_(b'guards in series file:\n'))
             for guard, count in guards:
-                ui.note('%2d  ' % count)
-                ui.write(guard, '\n')
+                ui.note(b'%2d  ' % count)
+                ui.write(guard, b'\n')
         else:
-            ui.note(_('no guards in series file\n'))
+            ui.note(_(b'no guards in series file\n'))
     else:
         if guards:
-            ui.note(_('active guards:\n'))
+            ui.note(_(b'active guards:\n'))
             for g in guards:
-                ui.write(g, '\n')
+                ui.write(g, b'\n')
         else:
-            ui.write(_('no active guards\n'))
-    reapply = opts.get('reapply') and q.applied and q.applied[-1].name
+            ui.write(_(b'no active guards\n'))
+    reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name
     popped = False
-    if opts.get('pop') or opts.get('reapply'):
+    if opts.get(b'pop') or opts.get(b'reapply'):
         for i in pycompat.xrange(len(q.applied)):
             if not pushable(i):
-                ui.status(_('popping guarded patches\n'))
+                ui.status(_(b'popping guarded patches\n'))
                 popped = True
                 if i == 0:
                     q.pop(repo, all=True)
@@ -3290,15 +3811,18 @@
     if popped:
         try:
             if reapply:
-                ui.status(_('reapplying unguarded patches\n'))
+                ui.status(_(b'reapplying unguarded patches\n'))
                 q.push(repo, reapply)
         finally:
             q.savedirty()
 
-@command("qfinish",
-         [('a', 'applied', None, _('finish all applied changesets'))],
-         _('hg qfinish [-a] [REV]...'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+
+@command(
+    b"qfinish",
+    [(b'a', b'applied', None, _(b'finish all applied changesets'))],
+    _(b'hg qfinish [-a] [REV]...'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def finish(ui, repo, *revrange, **opts):
     """move applied patches into repository history
 
@@ -3318,18 +3842,18 @@
     Returns 0 on success.
     """
     if not opts.get(r'applied') and not revrange:
-        raise error.Abort(_('no revisions specified'))
+        raise error.Abort(_(b'no revisions specified'))
     elif opts.get(r'applied'):
-        revrange = ('qbase::qtip',) + revrange
+        revrange = (b'qbase::qtip',) + revrange
 
     q = repo.mq
     if not q.applied:
-        ui.status(_('no patches applied\n'))
+        ui.status(_(b'no patches applied\n'))
         return 0
 
     revs = scmutil.revrange(repo, revrange)
-    if repo['.'].rev() in revs and repo[None].files():
-        ui.warn(_('warning: uncommitted changes in the working directory\n'))
+    if repo[b'.'].rev() in revs and repo[None].files():
+        ui.warn(_(b'warning: uncommitted changes in the working directory\n'))
     # queue.finish may changes phases but leave the responsibility to lock the
     # repo to the caller to avoid deadlock with wlock. This command code is
     # responsibility for this locking.
@@ -3338,16 +3862,20 @@
         q.savedirty()
     return 0
 
-@command("qqueue",
-         [('l', 'list', False, _('list all available queues')),
-          ('', 'active', False, _('print name of active queue')),
-          ('c', 'create', False, _('create new queue')),
-          ('', 'rename', False, _('rename active queue')),
-          ('', 'delete', False, _('delete reference to queue')),
-          ('', 'purge', False, _('delete queue, and remove patch dir')),
-         ],
-         _('[OPTION] [QUEUE]'),
-         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+
+@command(
+    b"qqueue",
+    [
+        (b'l', b'list', False, _(b'list all available queues')),
+        (b'', b'active', False, _(b'print name of active queue')),
+        (b'c', b'create', False, _(b'create new queue')),
+        (b'', b'rename', False, _(b'rename active queue')),
+        (b'', b'delete', False, _(b'delete reference to queue')),
+        (b'', b'purge', False, _(b'delete queue, and remove patch dir')),
+    ],
+    _(b'[OPTION] [QUEUE]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def qqueue(ui, repo, name=None, **opts):
     '''manage multiple patch queues
 
@@ -3370,19 +3898,19 @@
     Returns 0 on success.
     '''
     q = repo.mq
-    _defaultqueue = 'patches'
-    _allqueues = 'patches.queues'
-    _activequeue = 'patches.queue'
+    _defaultqueue = b'patches'
+    _allqueues = b'patches.queues'
+    _activequeue = b'patches.queue'
 
     def _getcurrent():
         cur = os.path.basename(q.path)
-        if cur.startswith('patches-'):
+        if cur.startswith(b'patches-'):
             cur = cur[8:]
         return cur
 
     def _noqueues():
         try:
-            fh = repo.vfs(_allqueues, 'r')
+            fh = repo.vfs(_allqueues, b'r')
             fh.close()
         except IOError:
             return True
@@ -3393,7 +3921,7 @@
         current = _getcurrent()
 
         try:
-            fh = repo.vfs(_allqueues, 'r')
+            fh = repo.vfs(_allqueues, b'r')
             queues = [queue.strip() for queue in fh if queue.strip()]
             fh.close()
             if current not in queues:
@@ -3405,107 +3933,114 @@
 
     def _setactive(name):
         if q.applied:
-            raise error.Abort(_('new queue created, but cannot make active '
-                               'as patches are applied'))
+            raise error.Abort(
+                _(
+                    b'new queue created, but cannot make active '
+                    b'as patches are applied'
+                )
+            )
         _setactivenocheck(name)
 
     def _setactivenocheck(name):
-        fh = repo.vfs(_activequeue, 'w')
-        if name != 'patches':
+        fh = repo.vfs(_activequeue, b'w')
+        if name != b'patches':
             fh.write(name)
         fh.close()
 
     def _addqueue(name):
-        fh = repo.vfs(_allqueues, 'a')
-        fh.write('%s\n' % (name,))
+        fh = repo.vfs(_allqueues, b'a')
+        fh.write(b'%s\n' % (name,))
         fh.close()
 
     def _queuedir(name):
-        if name == 'patches':
-            return repo.vfs.join('patches')
+        if name == b'patches':
+            return repo.vfs.join(b'patches')
         else:
-            return repo.vfs.join('patches-' + name)
+            return repo.vfs.join(b'patches-' + name)
 
     def _validname(name):
         for n in name:
-            if n in ':\\/.':
+            if n in b':\\/.':
                 return False
         return True
 
     def _delete(name):
         if name not in existing:
-            raise error.Abort(_('cannot delete queue that does not exist'))
+            raise error.Abort(_(b'cannot delete queue that does not exist'))
 
         current = _getcurrent()
 
         if name == current:
-            raise error.Abort(_('cannot delete currently active queue'))
-
-        fh = repo.vfs('patches.queues.new', 'w')
+            raise error.Abort(_(b'cannot delete currently active queue'))
+
+        fh = repo.vfs(b'patches.queues.new', b'w')
         for queue in existing:
             if queue == name:
                 continue
-            fh.write('%s\n' % (queue,))
+            fh.write(b'%s\n' % (queue,))
         fh.close()
-        repo.vfs.rename('patches.queues.new', _allqueues)
+        repo.vfs.rename(b'patches.queues.new', _allqueues)
 
     opts = pycompat.byteskwargs(opts)
-    if not name or opts.get('list') or opts.get('active'):
+    if not name or opts.get(b'list') or opts.get(b'active'):
         current = _getcurrent()
-        if opts.get('active'):
-            ui.write('%s\n' % (current,))
+        if opts.get(b'active'):
+            ui.write(b'%s\n' % (current,))
             return
         for queue in _getqueues():
-            ui.write('%s' % (queue,))
+            ui.write(b'%s' % (queue,))
             if queue == current and not ui.quiet:
-                ui.write(_(' (active)\n'))
+                ui.write(_(b' (active)\n'))
             else:
-                ui.write('\n')
+                ui.write(b'\n')
         return
 
     if not _validname(name):
         raise error.Abort(
-                _('invalid queue name, may not contain the characters ":\\/."'))
+            _(b'invalid queue name, may not contain the characters ":\\/."')
+        )
 
     with repo.wlock():
         existing = _getqueues()
 
-        if opts.get('create'):
+        if opts.get(b'create'):
             if name in existing:
-                raise error.Abort(_('queue "%s" already exists') % name)
+                raise error.Abort(_(b'queue "%s" already exists') % name)
             if _noqueues():
                 _addqueue(_defaultqueue)
             _addqueue(name)
             _setactive(name)
-        elif opts.get('rename'):
+        elif opts.get(b'rename'):
             current = _getcurrent()
             if name == current:
-                raise error.Abort(_('can\'t rename "%s" to its current name')
-                                  % name)
+                raise error.Abort(
+                    _(b'can\'t rename "%s" to its current name') % name
+                )
             if name in existing:
-                raise error.Abort(_('queue "%s" already exists') % name)
+                raise error.Abort(_(b'queue "%s" already exists') % name)
 
             olddir = _queuedir(current)
             newdir = _queuedir(name)
 
             if os.path.exists(newdir):
-                raise error.Abort(_('non-queue directory "%s" already exists') %
-                        newdir)
-
-            fh = repo.vfs('patches.queues.new', 'w')
+                raise error.Abort(
+                    _(b'non-queue directory "%s" already exists') % newdir
+                )
+
+            fh = repo.vfs(b'patches.queues.new', b'w')
             for queue in existing:
                 if queue == current:
-                    fh.write('%s\n' % (name,))
+                    fh.write(b'%s\n' % (name,))
                     if os.path.exists(olddir):
                         util.rename(olddir, newdir)
                 else:
-                    fh.write('%s\n' % (queue,))
+                    fh.write(b'%s\n' % (queue,))
             fh.close()
-            repo.vfs.rename('patches.queues.new', _allqueues)
+            repo.vfs.rename(b'patches.queues.new', _allqueues)
             _setactivenocheck(name)
-        elif opts.get('delete'):
+        elif opts.get(b'delete'):
             _delete(name)
-        elif opts.get('purge'):
+        elif opts.get(b'purge'):
             if name in existing:
                 _delete(name)
             qdir = _queuedir(name)
@@ -3513,13 +4048,14 @@
                 shutil.rmtree(qdir)
         else:
             if name not in existing:
-                raise error.Abort(_('use --create to create a new queue'))
+                raise error.Abort(_(b'use --create to create a new queue'))
             _setactive(name)
 
+
 def mqphasedefaults(repo, roots):
     """callback used to set mq changeset as secret when no phase data exists"""
     if repo.mq.applied:
-        if repo.ui.configbool('mq', 'secret'):
+        if repo.ui.configbool(b'mq', b'secret'):
             mqphase = phases.secret
         else:
             mqphase = phases.draft
@@ -3527,6 +4063,7 @@
         roots[mqphase].add(qbase.node())
     return roots
 
+
 def reposetup(ui, repo):
     class mqrepo(repo.__class__):
         @localrepo.unfilteredpropertycache
@@ -3546,16 +4083,25 @@
                 if any(p in patches for p in parents):
                     raise error.Abort(errmsg)
 
-        def commit(self, text="", user=None, date=None, match=None,
-                   force=False, editor=False, extra=None):
+        def commit(
+            self,
+            text=b"",
+            user=None,
+            date=None,
+            match=None,
+            force=False,
+            editor=False,
+            extra=None,
+        ):
             if extra is None:
                 extra = {}
             self.abortifwdirpatched(
-                _('cannot commit over an applied mq patch'),
-                force)
-
-            return super(mqrepo, self).commit(text, user, date, match, force,
-                                              editor, extra)
+                _(b'cannot commit over an applied mq patch'), force
+            )
+
+            return super(mqrepo, self).commit(
+                text, user, date, match, force, editor, extra
+            )
 
         def checkpush(self, pushop):
             if self.mq.applied and self.mq.checkapplied and not pushop.force:
@@ -3573,7 +4119,7 @@
                 # looking for pushed and shared changeset
                 for node in outapplied:
                     if self[node].phase() < phases.secret:
-                        raise error.Abort(_('source has mq patches applied'))
+                        raise error.Abort(_(b'source has mq patches applied'))
                 # no non-secret patches pushed
             super(mqrepo, self).checkpush(pushop)
 
@@ -3591,8 +4137,10 @@
                 # for now ignore filtering business
                 self.unfiltered().changelog.rev(mqtags[-1][0])
             except error.LookupError:
-                self.ui.warn(_('mq status file refers to unknown node %s\n')
-                             % short(mqtags[-1][0]))
+                self.ui.warn(
+                    _(b'mq status file refers to unknown node %s\n')
+                    % short(mqtags[-1][0])
+                )
                 return result
 
             # do not add fake tags for filtered revisions
@@ -3601,14 +4149,16 @@
             if not mqtags:
                 return result
 
-            mqtags.append((mqtags[-1][0], 'qtip'))
-            mqtags.append((mqtags[0][0], 'qbase'))
-            mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
+            mqtags.append((mqtags[-1][0], b'qtip'))
+            mqtags.append((mqtags[0][0], b'qbase'))
+            mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent'))
             tags = result[0]
             for patch in mqtags:
                 if patch[1] in tags:
-                    self.ui.warn(_('tag %s overrides mq patch of the same '
-                                   'name\n') % patch[1])
+                    self.ui.warn(
+                        _(b'tag %s overrides mq patch of the same name\n')
+                        % patch[1]
+                    )
                 else:
                     tags[patch[1]] = patch[0]
 
@@ -3619,13 +4169,17 @@
 
         repo._phasedefaults.append(mqphasedefaults)
 
+
 def mqimport(orig, ui, repo, *args, **kwargs):
-    if (util.safehasattr(repo, 'abortifwdirpatched')
-        and not kwargs.get(r'no_commit', False)):
-        repo.abortifwdirpatched(_('cannot import over an applied patch'),
-                                   kwargs.get(r'force'))
+    if util.safehasattr(repo, b'abortifwdirpatched') and not kwargs.get(
+        r'no_commit', False
+    ):
+        repo.abortifwdirpatched(
+            _(b'cannot import over an applied patch'), kwargs.get(r'force')
+        )
     return orig(ui, repo, *args, **kwargs)
 
+
 def mqinit(orig, ui, *args, **kwargs):
     mq = kwargs.pop(r'mq', None)
 
@@ -3635,16 +4189,19 @@
     if args:
         repopath = args[0]
         if not hg.islocal(repopath):
-            raise error.Abort(_('only a local queue repository '
-                               'may be initialized'))
+            raise error.Abort(
+                _(b'only a local queue repository may be initialized')
+            )
     else:
         repopath = cmdutil.findrepo(encoding.getcwd())
         if not repopath:
-            raise error.Abort(_('there is no Mercurial repository here '
-                               '(.hg not found)'))
+            raise error.Abort(
+                _(b'there is no Mercurial repository here (.hg not found)')
+            )
     repo = hg.repository(ui, repopath)
     return qinit(ui, repo, True)
 
+
 def mqcommand(orig, ui, repo, *args, **kwargs):
     """Add --mq option to operate on patch repository instead of main"""
 
@@ -3657,50 +4214,55 @@
     q = repo.mq
     r = q.qrepo()
     if not r:
-        raise error.Abort(_('no queue repository'))
+        raise error.Abort(_(b'no queue repository'))
     return orig(r.ui, r, *args, **kwargs)
 
+
 def summaryhook(ui, repo):
     q = repo.mq
     m = []
     a, u = len(q.applied), len(q.unapplied(repo))
     if a:
-        m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
+        m.append(ui.label(_(b"%d applied"), b'qseries.applied') % a)
     if u:
-        m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
+        m.append(ui.label(_(b"%d unapplied"), b'qseries.unapplied') % u)
     if m:
         # i18n: column positioning for "hg summary"
-        ui.write(_("mq:     %s\n") % ', '.join(m))
+        ui.write(_(b"mq:     %s\n") % b', '.join(m))
     else:
         # i18n: column positioning for "hg summary"
-        ui.note(_("mq:     (empty queue)\n"))
+        ui.note(_(b"mq:     (empty queue)\n"))
+
 
 revsetpredicate = registrar.revsetpredicate()
 
-@revsetpredicate('mq()')
+
+@revsetpredicate(b'mq()')
 def revsetmq(repo, subset, x):
     """Changesets managed by MQ.
     """
-    revsetlang.getargs(x, 0, 0, _("mq takes no arguments"))
+    revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments"))
     applied = {repo[r.node].rev() for r in repo.mq.applied}
     return smartset.baseset([r for r in subset if r in applied])
 
+
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = [revsetmq]
 
+
 def extsetup(ui):
     # Ensure mq wrappers are called first, regardless of extension load order by
     # NOT wrapping in uisetup() and instead deferring to init stage two here.
-    mqopt = [('', 'mq', None, _("operate on patch repository"))]
-
-    extensions.wrapcommand(commands.table, 'import', mqimport)
-    cmdutil.summaryhooks.add('mq', summaryhook)
-
-    entry = extensions.wrapcommand(commands.table, 'init', mqinit)
+    mqopt = [(b'', b'mq', None, _(b"operate on patch repository"))]
+
+    extensions.wrapcommand(commands.table, b'import', mqimport)
+    cmdutil.summaryhooks.add(b'mq', summaryhook)
+
+    entry = extensions.wrapcommand(commands.table, b'init', mqinit)
     entry[1].extend(mqopt)
 
     def dotable(cmdtable):
-        for cmd, entry in cmdtable.iteritems():
+        for cmd, entry in pycompat.iteritems(cmdtable):
             cmd = cmdutil.parsealiases(cmd)[0]
             func = entry[0]
             if func.norepo:
@@ -3714,10 +4276,13 @@
         if extmodule.__file__ != __file__:
             dotable(getattr(extmodule, 'cmdtable', {}))
 
-colortable = {'qguard.negative': 'red',
-              'qguard.positive': 'yellow',
-              'qguard.unguarded': 'green',
-              'qseries.applied': 'blue bold underline',
-              'qseries.guarded': 'black bold',
-              'qseries.missing': 'red bold',
-              'qseries.unapplied': 'black bold'}
+
+colortable = {
+    b'qguard.negative': b'red',
+    b'qguard.positive': b'yellow',
+    b'qguard.unguarded': b'green',
+    b'qseries.applied': b'blue bold underline',
+    b'qseries.guarded': b'black bold',
+    b'qseries.missing': b'red bold',
+    b'qseries.unapplied': b'black bold',
+}
--- a/hgext/narrow/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/narrow/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,14 +12,15 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 from mercurial import (
     localrepo,
     registrar,
-    repository,
 )
 
+from mercurial.interfaces import repository
+
 from . import (
     narrowbundle2,
     narrowcommands,
@@ -39,17 +40,21 @@
 # of this writining in late 2017, all repositories large enough for
 # ellipsis nodes to be a hard requirement also enforce strictly linear
 # history for other scaling reasons.
-configitem('experimental', 'narrowservebrokenellipses',
-           default=False,
-           alias=[('narrow', 'serveellipses')],
+configitem(
+    b'experimental',
+    b'narrowservebrokenellipses',
+    default=False,
+    alias=[(b'narrow', b'serveellipses')],
 )
 
 # Export the commands table for Mercurial to see.
 cmdtable = narrowcommands.table
 
+
 def featuresetup(ui, features):
     features.add(repository.NARROW_REQUIREMENT)
 
+
 def uisetup(ui):
     """Wraps user-facing mercurial commands with narrow-aware versions."""
     localrepo.featuresetupfuncs.add(featuresetup)
@@ -57,15 +62,17 @@
     narrowcommands.setup()
     narrowwirepeer.uisetup()
 
+
 def reposetup(ui, repo):
     """Wraps local repositories with narrow repo support."""
     if not repo.local():
         return
 
-    repo.ui.setconfig('experimental', 'narrow', True, 'narrow-ext')
+    repo.ui.setconfig(b'experimental', b'narrow', True, b'narrow-ext')
     if repository.NARROW_REQUIREMENT in repo.requirements:
         narrowrepo.wraprepo(repo)
         narrowwirepeer.reposetup(repo)
 
+
 templatekeyword = narrowtemplates.templatekeyword
 revsetpredicate = narrowtemplates.revsetpredicate
--- a/hgext/narrow/narrowbundle2.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/narrow/narrowbundle2.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,10 +11,7 @@
 import struct
 
 from mercurial.i18n import _
-from mercurial.node import (
-    bin,
-    nullid,
-)
+from mercurial.node import nullid
 from mercurial import (
     bundle2,
     changegroup,
@@ -23,124 +20,151 @@
     localrepo,
     narrowspec,
     repair,
-    repository,
     util,
     wireprototypes,
 )
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.interfaces import repository
+from mercurial.utils import stringutil
 
-_NARROWACL_SECTION = 'narrowacl'
-_CHANGESPECPART = 'narrow:changespec'
-_RESSPECS = 'narrow:responsespec'
-_SPECPART = 'narrow:spec'
-_SPECPART_INCLUDE = 'include'
-_SPECPART_EXCLUDE = 'exclude'
-_KILLNODESIGNAL = 'KILL'
-_DONESIGNAL = 'DONE'
-_ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
-_ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
+_NARROWACL_SECTION = b'narrowacl'
+_CHANGESPECPART = b'narrow:changespec'
+_RESSPECS = b'narrow:responsespec'
+_SPECPART = b'narrow:spec'
+_SPECPART_INCLUDE = b'include'
+_SPECPART_EXCLUDE = b'exclude'
+_KILLNODESIGNAL = b'KILL'
+_DONESIGNAL = b'DONE'
+_ELIDEDCSHEADER = b'>20s20s20sl'  # cset id, p1, p2, len(text)
+_ELIDEDMFHEADER = b'>20s20s20s20sl'  # manifest id, p1, p2, link id, len(text)
 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
 
 # Serve a changegroup for a client with a narrow clone.
-def getbundlechangegrouppart_narrow(bundler, repo, source,
-                                    bundlecaps=None, b2caps=None, heads=None,
-                                    common=None, **kwargs):
-    assert repo.ui.configbool('experimental', 'narrowservebrokenellipses')
+def getbundlechangegrouppart_narrow(
+    bundler,
+    repo,
+    source,
+    bundlecaps=None,
+    b2caps=None,
+    heads=None,
+    common=None,
+    **kwargs
+):
+    assert repo.ui.configbool(b'experimental', b'narrowservebrokenellipses')
 
-    cgversions = b2caps.get('changegroup')
-    cgversions = [v for v in cgversions
-                  if v in changegroup.supportedoutgoingversions(repo)]
+    cgversions = b2caps.get(b'changegroup')
+    cgversions = [
+        v
+        for v in cgversions
+        if v in changegroup.supportedoutgoingversions(repo)
+    ]
     if not cgversions:
-        raise ValueError(_('no common changegroup version'))
+        raise ValueError(_(b'no common changegroup version'))
     version = max(cgversions)
 
-    oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
-    oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
-    newinclude = sorted(filter(bool, kwargs.get(r'includepats', [])))
-    newexclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
-    known = {bin(n) for n in kwargs.get(r'known', [])}
-    generateellipsesbundle2(bundler, repo, oldinclude, oldexclude, newinclude,
-                            newexclude, version, common, heads, known,
-                            kwargs.get(r'depth', None))
+    include = sorted(filter(bool, kwargs.get(r'includepats', [])))
+    exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
+    generateellipsesbundle2(
+        bundler,
+        repo,
+        include,
+        exclude,
+        version,
+        common,
+        heads,
+        kwargs.get(r'depth', None),
+    )
 
-def generateellipsesbundle2(bundler, repo, oldinclude, oldexclude, newinclude,
-                            newexclude, version, common, heads, known, depth):
-    newmatch = narrowspec.match(repo.root, include=newinclude,
-                                exclude=newexclude)
+
+def generateellipsesbundle2(
+    bundler, repo, include, exclude, version, common, heads, depth,
+):
+    match = narrowspec.match(repo.root, include=include, exclude=exclude)
     if depth is not None:
         depth = int(depth)
         if depth < 1:
-            raise error.Abort(_('depth must be positive, got %d') % depth)
+            raise error.Abort(_(b'depth must be positive, got %d') % depth)
 
     heads = set(heads or repo.heads())
     common = set(common or [nullid])
-    if known and (oldinclude != newinclude or oldexclude != newexclude):
-        # Steps:
-        # 1. Send kill for "$known & ::common"
-        #
-        # 2. Send changegroup for ::common
-        #
-        # 3. Proceed.
-        #
-        # In the future, we can send kills for only the specific
-        # nodes we know should go away or change shape, and then
-        # send a data stream that tells the client something like this:
-        #
-        # a) apply this changegroup
-        # b) apply nodes XXX, YYY, ZZZ that you already have
-        # c) goto a
-        #
-        # until they've built up the full new state.
-        # Convert to revnums and intersect with "common". The client should
-        # have made it a subset of "common" already, but let's be safe.
-        known = set(repo.revs("%ln & ::%ln", known, common))
-        # TODO: we could send only roots() of this set, and the
-        # list of nodes in common, and the client could work out
-        # what to strip, instead of us explicitly sending every
-        # single node.
-        deadrevs = known
-        def genkills():
-            for r in deadrevs:
-                yield _KILLNODESIGNAL
-                yield repo.changelog.node(r)
-            yield _DONESIGNAL
-        bundler.newpart(_CHANGESPECPART, data=genkills())
-        newvisit, newfull, newellipsis = exchange._computeellipsis(
-            repo, set(), common, known, newmatch)
-        if newvisit:
-            packer = changegroup.getbundler(version, repo,
-                                            matcher=newmatch,
-                                            ellipses=True,
-                                            shallow=depth is not None,
-                                            ellipsisroots=newellipsis,
-                                            fullnodes=newfull)
-            cgdata = packer.generate(common, newvisit, False, 'narrow_widen')
-
-            part = bundler.newpart('changegroup', data=cgdata)
-            part.addparam('version', version)
-            if 'treemanifest' in repo.requirements:
-                part.addparam('treemanifest', '1')
 
     visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
-        repo, common, heads, set(), newmatch, depth=depth)
+        repo, common, heads, set(), match, depth=depth
+    )
 
-    repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
+    repo.ui.debug(b'Found %d relevant revs\n' % len(relevant_nodes))
     if visitnodes:
-        packer = changegroup.getbundler(version, repo,
-                                        matcher=newmatch,
-                                        ellipses=True,
-                                        shallow=depth is not None,
-                                        ellipsisroots=ellipsisroots,
-                                        fullnodes=relevant_nodes)
-        cgdata = packer.generate(common, visitnodes, False, 'narrow_widen')
+        packer = changegroup.getbundler(
+            version,
+            repo,
+            matcher=match,
+            ellipses=True,
+            shallow=depth is not None,
+            ellipsisroots=ellipsisroots,
+            fullnodes=relevant_nodes,
+        )
+        cgdata = packer.generate(common, visitnodes, False, b'narrow_widen')
+
+        part = bundler.newpart(b'changegroup', data=cgdata)
+        part.addparam(b'version', version)
+        if b'treemanifest' in repo.requirements:
+            part.addparam(b'treemanifest', b'1')
+
 
-        part = bundler.newpart('changegroup', data=cgdata)
-        part.addparam('version', version)
-        if 'treemanifest' in repo.requirements:
-            part.addparam('treemanifest', '1')
+def generate_ellipses_bundle2_for_widening(
+    bundler, repo, oldmatch, newmatch, version, common, known,
+):
+    common = set(common or [nullid])
+    # Steps:
+    # 1. Send kill for "$known & ::common"
+    #
+    # 2. Send changegroup for ::common
+    #
+    # 3. Proceed.
+    #
+    # In the future, we can send kills for only the specific
+    # nodes we know should go away or change shape, and then
+    # send a data stream that tells the client something like this:
+    #
+    # a) apply this changegroup
+    # b) apply nodes XXX, YYY, ZZZ that you already have
+    # c) goto a
+    #
+    # until they've built up the full new state.
+    knownrevs = {repo.changelog.rev(n) for n in known}
+    # TODO: we could send only roots() of this set, and the
+    # list of nodes in common, and the client could work out
+    # what to strip, instead of us explicitly sending every
+    # single node.
+    deadrevs = knownrevs
+
+    def genkills():
+        for r in deadrevs:
+            yield _KILLNODESIGNAL
+            yield repo.changelog.node(r)
+        yield _DONESIGNAL
+
+    bundler.newpart(_CHANGESPECPART, data=genkills())
+    newvisit, newfull, newellipsis = exchange._computeellipsis(
+        repo, set(), common, knownrevs, newmatch
+    )
+    if newvisit:
+        packer = changegroup.getbundler(
+            version,
+            repo,
+            matcher=newmatch,
+            ellipses=True,
+            shallow=False,
+            ellipsisroots=newellipsis,
+            fullnodes=newfull,
+        )
+        cgdata = packer.generate(common, newvisit, False, b'narrow_widen')
+
+        part = bundler.newpart(b'changegroup', data=cgdata)
+        part.addparam(b'version', version)
+        if b'treemanifest' in repo.requirements:
+            part.addparam(b'treemanifest', b'1')
+
 
 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
 def _handlechangespec_2(op, inpart):
@@ -148,8 +172,8 @@
     # released. New servers will send a mandatory bundle2 part named
     # 'Narrowspec' and will send specs as data instead of params.
     # Refer to issue5952 and 6019
-    includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
-    excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
+    includepats = set(inpart.params.get(_SPECPART_INCLUDE, b'').splitlines())
+    excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, b'').splitlines())
     narrowspec.validatepatterns(includepats)
     narrowspec.validatepatterns(excludepats)
 
@@ -159,10 +183,11 @@
     op.repo.setnarrowpats(includepats, excludepats)
     narrowspec.copytoworkingcopy(op.repo)
 
+
 @bundle2.parthandler(_RESSPECS)
 def _handlenarrowspecs(op, inpart):
     data = inpart.read()
-    inc, exc = data.split('\0')
+    inc, exc = data.split(b'\0')
     includepats = set(inc.splitlines())
     excludepats = set(exc.splitlines())
     narrowspec.validatepatterns(includepats)
@@ -174,6 +199,7 @@
     op.repo.setnarrowpats(includepats, excludepats)
     narrowspec.copytoworkingcopy(op.repo)
 
+
 @bundle2.parthandler(_CHANGESPECPART)
 def _handlechangespec(op, inpart):
     repo = op.repo
@@ -197,18 +223,22 @@
                 clkills.add(ck)
         else:
             raise error.Abort(
-                _('unexpected changespec node chunk type: %s') % chunksignal)
+                _(b'unexpected changespec node chunk type: %s') % chunksignal
+            )
         chunksignal = changegroup.readexactly(inpart, 4)
 
     if clkills:
         # preserve bookmarks that repair.strip() would otherwise strip
         op._bookmarksbackup = repo._bookmarks
+
         class dummybmstore(dict):
             def applychanges(self, repo, tr, changes):
                 pass
+
         localrepo.localrepository._bookmarks.set(repo, dummybmstore())
-        chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
-                                 topic='widen')
+        chgrpfile = repair.strip(
+            op.ui, repo, list(clkills), backup=True, topic=b'widen'
+        )
         if chgrpfile:
             op._widen_uninterr = repo.ui.uninterruptible()
             op._widen_uninterr.__enter__()
@@ -221,6 +251,7 @@
     if util.safehasattr(repo, 'setnewnarrowpats'):
         repo.setnewnarrowpats()
 
+
 def handlechangegroup_widen(op, inpart):
     """Changegroup exchange handler which restores temporarily-stripped nodes"""
     # We saved a bundle with stripped node data we must now restore.
@@ -232,20 +263,22 @@
     del op._widen_bundle
     vfs = repo.vfs
 
-    ui.note(_("adding branch\n"))
-    f = vfs.open(chgrpfile, "rb")
+    ui.note(_(b"adding branch\n"))
+    f = vfs.open(chgrpfile, b"rb")
     try:
         gen = exchange.readbundle(ui, f, chgrpfile, vfs)
-        if not ui.verbose:
-            # silence internal shuffling chatter
-            ui.pushbuffer()
-        if isinstance(gen, bundle2.unbundle20):
-            with repo.transaction('strip') as tr:
-                bundle2.processbundle(repo, gen, lambda: tr)
-        else:
-            gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
-        if not ui.verbose:
-            ui.popbuffer()
+        # silence internal shuffling chatter
+        override = {(b'ui', b'quiet'): True}
+        if ui.verbose:
+            override = {}
+        with ui.configoverride(override):
+            if isinstance(gen, bundle2.unbundle20):
+                with repo.transaction(b'strip') as tr:
+                    bundle2.processbundle(repo, gen, lambda: tr)
+            else:
+                gen.apply(
+                    repo, b'strip', b'bundle:' + vfs.join(chgrpfile), True
+                )
     finally:
         f.close()
 
@@ -255,47 +288,55 @@
             undovfs.unlink(undofile)
         except OSError as e:
             if e.errno != errno.ENOENT:
-                ui.warn(_('error removing %s: %s\n') %
-                        (undovfs.join(undofile), stringutil.forcebytestr(e)))
+                ui.warn(
+                    _(b'error removing %s: %s\n')
+                    % (undovfs.join(undofile), stringutil.forcebytestr(e))
+                )
 
     # Remove partial backup only if there were no exceptions
     op._widen_uninterr.__exit__(None, None, None)
     vfs.unlink(chgrpfile)
 
+
 def setup():
     """Enable narrow repo support in bundle2-related extension points."""
     getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS
 
-    getbundleargs['narrow'] = 'boolean'
-    getbundleargs['depth'] = 'plain'
-    getbundleargs['oldincludepats'] = 'csv'
-    getbundleargs['oldexcludepats'] = 'csv'
-    getbundleargs['known'] = 'csv'
+    getbundleargs[b'narrow'] = b'boolean'
+    getbundleargs[b'depth'] = b'plain'
+    getbundleargs[b'oldincludepats'] = b'csv'
+    getbundleargs[b'oldexcludepats'] = b'csv'
+    getbundleargs[b'known'] = b'csv'
 
     # Extend changegroup serving to handle requests from narrow clients.
-    origcgfn = exchange.getbundle2partsmapping['changegroup']
+    origcgfn = exchange.getbundle2partsmapping[b'changegroup']
+
     def wrappedcgfn(*args, **kwargs):
         repo = args[1]
         if repo.ui.has_section(_NARROWACL_SECTION):
             kwargs = exchange.applynarrowacl(repo, kwargs)
 
-        if (kwargs.get(r'narrow', False) and
-            repo.ui.configbool('experimental', 'narrowservebrokenellipses')):
+        if kwargs.get(r'narrow', False) and repo.ui.configbool(
+            b'experimental', b'narrowservebrokenellipses'
+        ):
             getbundlechangegrouppart_narrow(*args, **kwargs)
         else:
             origcgfn(*args, **kwargs)
-    exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
+
+    exchange.getbundle2partsmapping[b'changegroup'] = wrappedcgfn
 
     # Extend changegroup receiver so client can fixup after widen requests.
-    origcghandler = bundle2.parthandlermapping['changegroup']
+    origcghandler = bundle2.parthandlermapping[b'changegroup']
+
     def wrappedcghandler(op, inpart):
         origcghandler(op, inpart)
         if util.safehasattr(op, '_widen_bundle'):
             handlechangegroup_widen(op, inpart)
         if util.safehasattr(op, '_bookmarksbackup'):
-            localrepo.localrepository._bookmarks.set(op.repo,
-                                                     op._bookmarksbackup)
+            localrepo.localrepository._bookmarks.set(
+                op.repo, op._bookmarksbackup
+            )
             del op._bookmarksbackup
 
     wrappedcghandler.params = origcghandler.params
-    bundle2.parthandlermapping['changegroup'] = wrappedcghandler
+    bundle2.parthandlermapping[b'changegroup'] = wrappedcghandler
--- a/hgext/narrow/narrowcommands.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/narrow/narrowcommands.py	Mon Oct 21 11:09:48 2019 -0400
@@ -25,80 +25,110 @@
     pycompat,
     registrar,
     repair,
-    repository,
     repoview,
     sparse,
     util,
     wireprototypes,
 )
+from mercurial.interfaces import repository
 
 table = {}
 command = registrar.command(table)
 
+
 def setup():
     """Wraps user-facing mercurial commands with narrow-aware versions."""
 
-    entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd)
-    entry[1].append(('', 'narrow', None,
-                     _("create a narrow clone of select files")))
-    entry[1].append(('', 'depth', '',
-                     _("limit the history fetched by distance from heads")))
-    entry[1].append(('', 'narrowspec', '',
-                     _("read narrowspecs from file")))
+    entry = extensions.wrapcommand(commands.table, b'clone', clonenarrowcmd)
+    entry[1].append(
+        (b'', b'narrow', None, _(b"create a narrow clone of select files"))
+    )
+    entry[1].append(
+        (
+            b'',
+            b'depth',
+            b'',
+            _(b"limit the history fetched by distance from heads"),
+        )
+    )
+    entry[1].append((b'', b'narrowspec', b'', _(b"read narrowspecs from file")))
     # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
-    if 'sparse' not in extensions.enabled():
-        entry[1].append(('', 'include', [],
-                         _("specifically fetch this file/directory")))
+    if b'sparse' not in extensions.enabled():
+        entry[1].append(
+            (b'', b'include', [], _(b"specifically fetch this file/directory"))
+        )
         entry[1].append(
-            ('', 'exclude', [],
-             _("do not fetch this file/directory, even if included")))
+            (
+                b'',
+                b'exclude',
+                [],
+                _(b"do not fetch this file/directory, even if included"),
+            )
+        )
 
-    entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd)
-    entry[1].append(('', 'depth', '',
-                     _("limit the history fetched by distance from heads")))
+    entry = extensions.wrapcommand(commands.table, b'pull', pullnarrowcmd)
+    entry[1].append(
+        (
+            b'',
+            b'depth',
+            b'',
+            _(b"limit the history fetched by distance from heads"),
+        )
+    )
 
-    extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd)
+    extensions.wrapcommand(commands.table, b'archive', archivenarrowcmd)
+
 
 def clonenarrowcmd(orig, ui, repo, *args, **opts):
     """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
     opts = pycompat.byteskwargs(opts)
     wrappedextraprepare = util.nullcontextmanager()
-    narrowspecfile = opts['narrowspec']
+    narrowspecfile = opts[b'narrowspec']
 
     if narrowspecfile:
         filepath = os.path.join(encoding.getcwd(), narrowspecfile)
-        ui.status(_("reading narrowspec from '%s'\n") % filepath)
+        ui.status(_(b"reading narrowspec from '%s'\n") % filepath)
         try:
             fdata = util.readfile(filepath)
         except IOError as inst:
-            raise error.Abort(_("cannot read narrowspecs from '%s': %s") %
-                              (filepath, encoding.strtolocal(inst.strerror)))
+            raise error.Abort(
+                _(b"cannot read narrowspecs from '%s': %s")
+                % (filepath, encoding.strtolocal(inst.strerror))
+            )
 
-        includes, excludes, profiles = sparse.parseconfig(ui, fdata, 'narrow')
+        includes, excludes, profiles = sparse.parseconfig(ui, fdata, b'narrow')
         if profiles:
-            raise error.Abort(_("cannot specify other files using '%include' in"
-                                " narrowspec"))
+            raise error.Abort(
+                _(
+                    b"cannot specify other files using '%include' in"
+                    b" narrowspec"
+                )
+            )
 
         narrowspec.validatepatterns(includes)
         narrowspec.validatepatterns(excludes)
 
         # narrowspec is passed so we should assume that user wants narrow clone
-        opts['narrow'] = True
-        opts['include'].extend(includes)
-        opts['exclude'].extend(excludes)
+        opts[b'narrow'] = True
+        opts[b'include'].extend(includes)
+        opts[b'exclude'].extend(excludes)
 
-    if opts['narrow']:
+    if opts[b'narrow']:
+
         def pullbundle2extraprepare_widen(orig, pullop, kwargs):
             orig(pullop, kwargs)
 
-            if opts.get('depth'):
-                kwargs['depth'] = opts['depth']
-        wrappedextraprepare = extensions.wrappedfunction(exchange,
-            '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
+            if opts.get(b'depth'):
+                kwargs[b'depth'] = opts[b'depth']
+
+        wrappedextraprepare = extensions.wrappedfunction(
+            exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
+        )
 
     with wrappedextraprepare:
         return orig(ui, repo, *args, **pycompat.strkwargs(opts))
 
+
 def pullnarrowcmd(orig, ui, repo, *args, **opts):
     """Wraps pull command to allow modifying narrow spec."""
     wrappedextraprepare = util.nullcontextmanager()
@@ -107,13 +137,16 @@
         def pullbundle2extraprepare_widen(orig, pullop, kwargs):
             orig(pullop, kwargs)
             if opts.get(r'depth'):
-                kwargs['depth'] = opts[r'depth']
-        wrappedextraprepare = extensions.wrappedfunction(exchange,
-            '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
+                kwargs[b'depth'] = opts[r'depth']
+
+        wrappedextraprepare = extensions.wrappedfunction(
+            exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
+        )
 
     with wrappedextraprepare:
         return orig(ui, repo, *args, **opts)
 
+
 def archivenarrowcmd(orig, ui, repo, *args, **opts):
     """Wraps archive command to narrow the default includes."""
     if repository.NARROW_REQUIREMENT in repo.requirements:
@@ -121,46 +154,62 @@
         includes = set(opts.get(r'include', []))
         excludes = set(opts.get(r'exclude', []))
         includes, excludes, unused_invalid = narrowspec.restrictpatterns(
-            includes, excludes, repo_includes, repo_excludes)
+            includes, excludes, repo_includes, repo_excludes
+        )
         if includes:
             opts[r'include'] = includes
         if excludes:
             opts[r'exclude'] = excludes
     return orig(ui, repo, *args, **opts)
 
+
 def pullbundle2extraprepare(orig, pullop, kwargs):
     repo = pullop.repo
     if repository.NARROW_REQUIREMENT not in repo.requirements:
         return orig(pullop, kwargs)
 
     if wireprototypes.NARROWCAP not in pullop.remote.capabilities():
-        raise error.Abort(_("server does not support narrow clones"))
+        raise error.Abort(_(b"server does not support narrow clones"))
     orig(pullop, kwargs)
-    kwargs['narrow'] = True
+    kwargs[b'narrow'] = True
     include, exclude = repo.narrowpats
-    kwargs['oldincludepats'] = include
-    kwargs['oldexcludepats'] = exclude
+    kwargs[b'oldincludepats'] = include
+    kwargs[b'oldexcludepats'] = exclude
     if include:
-        kwargs['includepats'] = include
+        kwargs[b'includepats'] = include
     if exclude:
-        kwargs['excludepats'] = exclude
+        kwargs[b'excludepats'] = exclude
     # calculate known nodes only in ellipses cases because in non-ellipses cases
     # we have all the nodes
     if wireprototypes.ELLIPSESCAP1 in pullop.remote.capabilities():
-        kwargs['known'] = [node.hex(ctx.node()) for ctx in
-                           repo.set('::%ln', pullop.common)
-                           if ctx.node() != node.nullid]
-        if not kwargs['known']:
+        kwargs[b'known'] = [
+            node.hex(ctx.node())
+            for ctx in repo.set(b'::%ln', pullop.common)
+            if ctx.node() != node.nullid
+        ]
+        if not kwargs[b'known']:
             # Mercurial serializes an empty list as '' and deserializes it as
             # [''], so delete it instead to avoid handling the empty string on
             # the server.
-            del kwargs['known']
+            del kwargs[b'known']
+
+
+extensions.wrapfunction(
+    exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare
+)
+
 
-extensions.wrapfunction(exchange,'_pullbundle2extraprepare',
-                        pullbundle2extraprepare)
-
-def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
-            newincludes, newexcludes, force):
+def _narrow(
+    ui,
+    repo,
+    remote,
+    commoninc,
+    oldincludes,
+    oldexcludes,
+    newincludes,
+    newexcludes,
+    force,
+):
     oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
     newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
 
@@ -168,53 +217,65 @@
     # commits. We will then check that the local-only commits don't
     # have any changes to files that will be untracked.
     unfi = repo.unfiltered()
-    outgoing = discovery.findcommonoutgoing(unfi, remote,
-                                            commoninc=commoninc)
-    ui.status(_('looking for local changes to affected paths\n'))
+    outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc)
+    ui.status(_(b'looking for local changes to affected paths\n'))
     localnodes = []
     for n in itertools.chain(outgoing.missing, outgoing.excluded):
         if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
             localnodes.append(n)
-    revstostrip = unfi.revs('descendants(%ln)', localnodes)
-    hiddenrevs = repoview.filterrevs(repo, 'visible')
-    visibletostrip = list(repo.changelog.node(r)
-                          for r in (revstostrip - hiddenrevs))
+    revstostrip = unfi.revs(b'descendants(%ln)', localnodes)
+    hiddenrevs = repoview.filterrevs(repo, b'visible')
+    visibletostrip = list(
+        repo.changelog.node(r) for r in (revstostrip - hiddenrevs)
+    )
     if visibletostrip:
-        ui.status(_('The following changeset(s) or their ancestors have '
-                    'local changes not on the remote:\n'))
+        ui.status(
+            _(
+                b'The following changeset(s) or their ancestors have '
+                b'local changes not on the remote:\n'
+            )
+        )
         maxnodes = 10
         if ui.verbose or len(visibletostrip) <= maxnodes:
             for n in visibletostrip:
-                ui.status('%s\n' % node.short(n))
+                ui.status(b'%s\n' % node.short(n))
         else:
             for n in visibletostrip[:maxnodes]:
-                ui.status('%s\n' % node.short(n))
-            ui.status(_('...and %d more, use --verbose to list all\n') %
-                      (len(visibletostrip) - maxnodes))
+                ui.status(b'%s\n' % node.short(n))
+            ui.status(
+                _(b'...and %d more, use --verbose to list all\n')
+                % (len(visibletostrip) - maxnodes)
+            )
         if not force:
-            raise error.Abort(_('local changes found'),
-                              hint=_('use --force-delete-local-changes to '
-                                     'ignore'))
+            raise error.Abort(
+                _(b'local changes found'),
+                hint=_(b'use --force-delete-local-changes to ignore'),
+            )
 
     with ui.uninterruptible():
         if revstostrip:
             tostrip = [unfi.changelog.node(r) for r in revstostrip]
-            if repo['.'].node() in tostrip:
+            if repo[b'.'].node() in tostrip:
                 # stripping working copy, so move to a different commit first
-                urev = max(repo.revs('(::%n) - %ln + null',
-                                     repo['.'].node(), visibletostrip))
+                urev = max(
+                    repo.revs(
+                        b'(::%n) - %ln + null',
+                        repo[b'.'].node(),
+                        visibletostrip,
+                    )
+                )
                 hg.clean(repo, urev)
-            overrides = {('devel', 'strip-obsmarkers'): False}
-            with ui.configoverride(overrides, 'narrow'):
-                repair.strip(ui, unfi, tostrip, topic='narrow')
+            overrides = {(b'devel', b'strip-obsmarkers'): False}
+            with ui.configoverride(overrides, b'narrow'):
+                repair.strip(ui, unfi, tostrip, topic=b'narrow')
 
         todelete = []
         for f, f2, size in repo.store.datafiles():
-            if f.startswith('data/'):
+            if f.startswith(b'data/'):
                 file = f[5:-2]
                 if not newmatch(file):
                     todelete.append(f)
-            elif f.startswith('meta/'):
+            elif f.startswith(b'meta/'):
                 dir = f[5:-13]
                 dirs = sorted(util.dirs({dir})) + [dir]
                 include = True
@@ -223,20 +284,20 @@
                     if not visit:
                         include = False
                         break
-                    if visit == 'all':
+                    if visit == b'all':
                         break
                 if not include:
                     todelete.append(f)
 
         repo.destroying()
 
-        with repo.transaction('narrowing'):
+        with repo.transaction(b'narrowing'):
             # Update narrowspec before removing revlogs, so repo won't be
             # corrupt in case of crash
             repo.setnarrowpats(newincludes, newexcludes)
 
             for f in todelete:
-                ui.status(_('deleting %s\n') % f)
+                ui.status(_(b'deleting %s\n') % f)
                 util.unlinkpath(repo.svfs.join(f))
                 repo.store.markremoved(f)
 
@@ -245,8 +306,17 @@
 
         repo.destroyed()
 
-def _widen(ui, repo, remote, commoninc, oldincludes, oldexcludes,
-           newincludes, newexcludes):
+
+def _widen(
+    ui,
+    repo,
+    remote,
+    commoninc,
+    oldincludes,
+    oldexcludes,
+    newincludes,
+    newexcludes,
+):
     # for now we assume that if a server has ellipses enabled, we will be
     # exchanging ellipses nodes. In future we should add ellipses as a client
     # side requirement (maybe) to distinguish a client is shallow or not and
@@ -254,29 +324,36 @@
     # Theoretically a non-ellipses repo should be able to use narrow
     # functionality from an ellipses enabled server
     remotecap = remote.capabilities()
-    ellipsesremote = any(cap in remotecap
-                         for cap in wireprototypes.SUPPORTED_ELLIPSESCAP)
+    ellipsesremote = any(
+        cap in remotecap for cap in wireprototypes.SUPPORTED_ELLIPSESCAP
+    )
 
     # check whether we are talking to a server which supports old version of
     # ellipses capabilities
-    isoldellipses = (ellipsesremote and wireprototypes.ELLIPSESCAP1 in
-                     remotecap and wireprototypes.ELLIPSESCAP not in remotecap)
+    isoldellipses = (
+        ellipsesremote
+        and wireprototypes.ELLIPSESCAP1 in remotecap
+        and wireprototypes.ELLIPSESCAP not in remotecap
+    )
 
     def pullbundle2extraprepare_widen(orig, pullop, kwargs):
         orig(pullop, kwargs)
         # The old{in,ex}cludepats have already been set by orig()
-        kwargs['includepats'] = newincludes
-        kwargs['excludepats'] = newexcludes
-    wrappedextraprepare = extensions.wrappedfunction(exchange,
-        '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
+        kwargs[b'includepats'] = newincludes
+        kwargs[b'excludepats'] = newexcludes
+
+    wrappedextraprepare = extensions.wrappedfunction(
+        exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
+    )
 
     # define a function that narrowbundle2 can call after creating the
     # backup bundle, but before applying the bundle from the server
     def setnewnarrowpats():
         repo.setnarrowpats(newincludes, newexcludes)
+
     repo.setnewnarrowpats = setnewnarrowpats
     # silence the devel-warning of applying an empty changegroup
-    overrides = {('devel', 'all-warnings'): False}
+    overrides = {(b'devel', b'all-warnings'): False}
 
     common = commoninc[0]
     with ui.uninterruptible():
@@ -291,25 +368,33 @@
         else:
             known = []
             if ellipsesremote:
-                known = [node.hex(ctx.node()) for ctx in
-                         repo.set('::%ln', common)
-                         if ctx.node() != node.nullid]
+                known = [
+                    ctx.node()
+                    for ctx in repo.set(b'::%ln', common)
+                    if ctx.node() != node.nullid
+                ]
             with remote.commandexecutor() as e:
-                bundle = e.callcommand('narrow_widen', {
-                    'oldincludes': oldincludes,
-                    'oldexcludes': oldexcludes,
-                    'newincludes': newincludes,
-                    'newexcludes': newexcludes,
-                    'cgversion': '03',
-                    'commonheads': common,
-                    'known': known,
-                    'ellipses': ellipsesremote,
-                }).result()
+                bundle = e.callcommand(
+                    b'narrow_widen',
+                    {
+                        b'oldincludes': oldincludes,
+                        b'oldexcludes': oldexcludes,
+                        b'newincludes': newincludes,
+                        b'newexcludes': newexcludes,
+                        b'cgversion': b'03',
+                        b'commonheads': common,
+                        b'known': known,
+                        b'ellipses': ellipsesremote,
+                    },
+                ).result()
 
-            trmanager = exchange.transactionmanager(repo, 'widen', remote.url())
-            with trmanager, repo.ui.configoverride(overrides, 'widen'):
-                op = bundle2.bundleoperation(repo, trmanager.transaction,
-                                             source='widen')
+            trmanager = exchange.transactionmanager(
+                repo, b'widen', remote.url()
+            )
+            with trmanager, repo.ui.configoverride(overrides, b'widen'):
+                op = bundle2.bundleoperation(
+                    repo, trmanager.transaction, source=b'widen'
+                )
                 # TODO: we should catch error.Abort here
                 bundle2.processbundle(repo, bundle, op=op)
 
@@ -317,26 +402,51 @@
             with ds.parentchange():
                 ds.setparents(p1, p2)
 
-        with repo.transaction('widening'):
+        with repo.transaction(b'widening'):
             repo.setnewnarrowpats()
             narrowspec.updateworkingcopy(repo)
             narrowspec.copytoworkingcopy(repo)
 
+
 # TODO(rdamazio): Make new matcher format and update description
-@command('tracked',
-    [('', 'addinclude', [], _('new paths to include')),
-     ('', 'removeinclude', [], _('old paths to no longer include')),
-     ('', 'addexclude', [], _('new paths to exclude')),
-     ('', 'import-rules', '', _('import narrowspecs from a file')),
-     ('', 'removeexclude', [], _('old paths to no longer exclude')),
-     ('', 'clear', False, _('whether to replace the existing narrowspec')),
-     ('', 'force-delete-local-changes', False,
-       _('forces deletion of local changes when narrowing')),
-     ('', 'update-working-copy', False,
-      _('update working copy when the store has changed')),
-    ] + commands.remoteopts,
-    _('[OPTIONS]... [REMOTE]'),
-    inferrepo=True)
+@command(
+    b'tracked',
+    [
+        (b'', b'addinclude', [], _(b'new paths to include')),
+        (b'', b'removeinclude', [], _(b'old paths to no longer include')),
+        (
+            b'',
+            b'auto-remove-includes',
+            False,
+            _(b'automatically choose unused includes to remove'),
+        ),
+        (b'', b'addexclude', [], _(b'new paths to exclude')),
+        (b'', b'import-rules', b'', _(b'import narrowspecs from a file')),
+        (b'', b'removeexclude', [], _(b'old paths to no longer exclude')),
+        (
+            b'',
+            b'clear',
+            False,
+            _(b'whether to replace the existing narrowspec'),
+        ),
+        (
+            b'',
+            b'force-delete-local-changes',
+            False,
+            _(b'forces deletion of local changes when narrowing'),
+        ),
+        (
+            b'',
+            b'update-working-copy',
+            False,
+            _(b'update working copy when the store has changed'),
+        ),
+    ]
+    + commands.remoteopts,
+    _(b'[OPTIONS]... [REMOTE]'),
+    inferrepo=True,
+    helpcategory=command.CATEGORY_MAINTENANCE,
+)
 def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
     """show or change the current narrowspec
 
@@ -361,45 +471,69 @@
     If --clear is specified without any further options, the narrowspec will be
     empty and will not match any files.
 
+    If --auto-remove-includes is specified, then those includes that don't match
+    any files modified by currently visible local commits (those not shared by
+    the remote) will be added to the set of explicitly specified includes to
+    remove.
+
     --import-rules accepts a path to a file containing rules, allowing you to
     add --addinclude, --addexclude rules in bulk. Like the other include and
     exclude switches, the changes are applied immediately.
     """
     opts = pycompat.byteskwargs(opts)
     if repository.NARROW_REQUIREMENT not in repo.requirements:
-        raise error.Abort(_('the tracked command is only supported on '
-                            'respositories cloned with --narrow'))
+        raise error.Abort(
+            _(
+                b'the tracked command is only supported on '
+                b'repositories cloned with --narrow'
+            )
+        )
 
     # Before supporting, decide whether it "hg tracked --clear" should mean
     # tracking no paths or all paths.
-    if opts['clear']:
-        raise error.Abort(_('the --clear option is not yet supported'))
+    if opts[b'clear']:
+        raise error.Abort(_(b'the --clear option is not yet supported'))
 
     # import rules from a file
-    newrules = opts.get('import_rules')
+    newrules = opts.get(b'import_rules')
     if newrules:
         try:
             filepath = os.path.join(encoding.getcwd(), newrules)
             fdata = util.readfile(filepath)
         except IOError as inst:
-            raise error.Abort(_("cannot read narrowspecs from '%s': %s") %
-                              (filepath, encoding.strtolocal(inst.strerror)))
-        includepats, excludepats, profiles = sparse.parseconfig(ui, fdata,
-                                                                'narrow')
+            raise error.Abort(
+                _(b"cannot read narrowspecs from '%s': %s")
+                % (filepath, encoding.strtolocal(inst.strerror))
+            )
+        includepats, excludepats, profiles = sparse.parseconfig(
+            ui, fdata, b'narrow'
+        )
         if profiles:
-            raise error.Abort(_("including other spec files using '%include' "
-                                "is not supported in narrowspec"))
-        opts['addinclude'].extend(includepats)
-        opts['addexclude'].extend(excludepats)
+            raise error.Abort(
+                _(
+                    b"including other spec files using '%include' "
+                    b"is not supported in narrowspec"
+                )
+            )
+        opts[b'addinclude'].extend(includepats)
+        opts[b'addexclude'].extend(excludepats)
 
-    addedincludes = narrowspec.parsepatterns(opts['addinclude'])
-    removedincludes = narrowspec.parsepatterns(opts['removeinclude'])
-    addedexcludes = narrowspec.parsepatterns(opts['addexclude'])
-    removedexcludes = narrowspec.parsepatterns(opts['removeexclude'])
+    addedincludes = narrowspec.parsepatterns(opts[b'addinclude'])
+    removedincludes = narrowspec.parsepatterns(opts[b'removeinclude'])
+    addedexcludes = narrowspec.parsepatterns(opts[b'addexclude'])
+    removedexcludes = narrowspec.parsepatterns(opts[b'removeexclude'])
+    autoremoveincludes = opts[b'auto_remove_includes']
 
-    update_working_copy = opts['update_working_copy']
-    only_show = not (addedincludes or removedincludes or addedexcludes or
-                     removedexcludes or newrules or update_working_copy)
+    update_working_copy = opts[b'update_working_copy']
+    only_show = not (
+        addedincludes
+        or removedincludes
+        or addedexcludes
+        or removedexcludes
+        or newrules
+        or autoremoveincludes
+        or update_working_copy
+    )
 
     oldincludes, oldexcludes = repo.narrowpats
 
@@ -415,27 +549,27 @@
 
     # Only print the current narrowspec.
     if only_show:
-        ui.pager('tracked')
-        fm = ui.formatter('narrow', opts)
+        ui.pager(b'tracked')
+        fm = ui.formatter(b'narrow', opts)
         for i in sorted(oldincludes):
             fm.startitem()
-            fm.write('status', '%s ', 'I', label='narrow.included')
-            fm.write('pat', '%s\n', i, label='narrow.included')
+            fm.write(b'status', b'%s ', b'I', label=b'narrow.included')
+            fm.write(b'pat', b'%s\n', i, label=b'narrow.included')
         for i in sorted(oldexcludes):
             fm.startitem()
-            fm.write('status', '%s ', 'X', label='narrow.excluded')
-            fm.write('pat', '%s\n', i, label='narrow.excluded')
+            fm.write(b'status', b'%s ', b'X', label=b'narrow.excluded')
+            fm.write(b'pat', b'%s\n', i, label=b'narrow.excluded')
         fm.end()
         return 0
 
     if update_working_copy:
-        with repo.wlock(), repo.lock(), repo.transaction('narrow-wc'):
+        with repo.wlock(), repo.lock(), repo.transaction(b'narrow-wc'):
             narrowspec.updateworkingcopy(repo)
             narrowspec.copytoworkingcopy(repo)
         return 0
 
-    if not widening and not narrowing:
-        ui.status(_("nothing to widen or narrow\n"))
+    if not (widening or narrowing or autoremoveincludes):
+        ui.status(_(b"nothing to widen or narrow\n"))
         return 0
 
     with repo.wlock(), repo.lock():
@@ -444,25 +578,63 @@
         # Find the revisions we have in common with the remote. These will
         # be used for finding local-only changes for narrowing. They will
         # also define the set of revisions to update for widening.
-        remotepath = ui.expandpath(remotepath or 'default')
+        remotepath = ui.expandpath(remotepath or b'default')
         url, branches = hg.parseurl(remotepath)
-        ui.status(_('comparing with %s\n') % util.hidepassword(url))
+        ui.status(_(b'comparing with %s\n') % util.hidepassword(url))
         remote = hg.peer(repo, opts, url)
 
         # check narrow support before doing anything if widening needs to be
         # performed. In future we should also abort if client is ellipses and
         # server does not support ellipses
         if widening and wireprototypes.NARROWCAP not in remote.capabilities():
-            raise error.Abort(_("server does not support narrow clones"))
+            raise error.Abort(_(b"server does not support narrow clones"))
 
         commoninc = discovery.findcommonincoming(repo, remote)
 
+        if autoremoveincludes:
+            outgoing = discovery.findcommonoutgoing(
+                repo, remote, commoninc=commoninc
+            )
+            ui.status(_(b'looking for unused includes to remove\n'))
+            localfiles = set()
+            for n in itertools.chain(outgoing.missing, outgoing.excluded):
+                localfiles.update(repo[n].files())
+            suggestedremovals = []
+            for include in sorted(oldincludes):
+                match = narrowspec.match(repo.root, [include], oldexcludes)
+                if not any(match(f) for f in localfiles):
+                    suggestedremovals.append(include)
+            if suggestedremovals:
+                for s in suggestedremovals:
+                    ui.status(b'%s\n' % s)
+                if (
+                    ui.promptchoice(
+                        _(
+                            b'remove these unused includes (yn)?'
+                            b'$$ &Yes $$ &No'
+                        )
+                    )
+                    == 0
+                ):
+                    removedincludes.update(suggestedremovals)
+                    narrowing = True
+            else:
+                ui.status(_(b'found no unused includes\n'))
+
         if narrowing:
             newincludes = oldincludes - removedincludes
             newexcludes = oldexcludes | addedexcludes
-            _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
-                    newincludes, newexcludes,
-                    opts['force_delete_local_changes'])
+            _narrow(
+                ui,
+                repo,
+                remote,
+                commoninc,
+                oldincludes,
+                oldexcludes,
+                newincludes,
+                newexcludes,
+                opts[b'force_delete_local_changes'],
+            )
             # _narrow() updated the narrowspec and _widen() below needs to
             # use the updated values as its base (otherwise removed includes
             # and addedexcludes will be lost in the resulting narrowspec)
@@ -472,7 +644,15 @@
         if widening:
             newincludes = oldincludes | addedincludes
             newexcludes = oldexcludes - removedexcludes
-            _widen(ui, repo, remote, commoninc, oldincludes, oldexcludes,
-                    newincludes, newexcludes)
+            _widen(
+                ui,
+                repo,
+                remote,
+                commoninc,
+                oldincludes,
+                oldexcludes,
+                newincludes,
+                newexcludes,
+            )
 
     return 0
--- a/hgext/narrow/narrowdirstate.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/narrow/narrowdirstate.py	Mon Oct 21 11:09:48 2019 -0400
@@ -8,9 +8,8 @@
 from __future__ import absolute_import
 
 from mercurial.i18n import _
-from mercurial import (
-    error,
-)
+from mercurial import error
+
 
 def wrapdirstate(repo, dirstate):
     """Add narrow spec dirstate ignore, block changes outside narrow spec."""
@@ -20,9 +19,15 @@
             narrowmatch = repo.narrowmatch()
             for f in args:
                 if f is not None and not narrowmatch(f) and f not in self:
-                    raise error.Abort(_("cannot track '%s' - it is outside " +
-                        "the narrow clone") % f)
+                    raise error.Abort(
+                        _(
+                            b"cannot track '%s' - it is outside "
+                            + b"the narrow clone"
+                        )
+                        % f
+                    )
             return fn(self, *args, **kwargs)
+
         return _wrapper
 
     class narrowdirstate(dirstate.__class__):
--- a/hgext/narrow/narrowrepo.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/narrow/narrowrepo.py	Mon Oct 21 11:09:48 2019 -0400
@@ -7,19 +7,15 @@
 
 from __future__ import absolute_import
 
-from mercurial import (
-    wireprototypes,
-)
+from mercurial import wireprototypes
 
-from . import (
-    narrowdirstate,
-)
+from . import narrowdirstate
+
 
 def wraprepo(repo):
     """Enables narrow clone functionality on a single local repository."""
 
     class narrowrepository(repo.__class__):
-
         def _makedirstate(self):
             dirstate = super(narrowrepository, self)._makedirstate()
             return narrowdirstate.wrapdirstate(self, dirstate)
--- a/hgext/narrow/narrowtemplates.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/narrow/narrowtemplates.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,33 +16,37 @@
 templatekeyword = registrar.templatekeyword(keywords)
 revsetpredicate = registrar.revsetpredicate()
 
+
 def _isellipsis(repo, rev):
     if repo.changelog.flags(rev) & revlog.REVIDX_ELLIPSIS:
         return True
     return False
 
-@templatekeyword('ellipsis', requires={'repo', 'ctx'})
+
+@templatekeyword(b'ellipsis', requires={b'repo', b'ctx'})
 def ellipsis(context, mapping):
     """String. 'ellipsis' if the change is an ellipsis node, else ''."""
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
     if _isellipsis(repo, ctx.rev()):
-        return 'ellipsis'
-    return ''
+        return b'ellipsis'
+    return b''
 
-@templatekeyword('outsidenarrow', requires={'repo', 'ctx'})
+
+@templatekeyword(b'outsidenarrow', requires={b'repo', b'ctx'})
 def outsidenarrow(context, mapping):
     """String. 'outsidenarrow' if the change affects no tracked files,
     else ''."""
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
     m = repo.narrowmatch()
     if ctx.files() and not m.always():
         if not any(m(f) for f in ctx.files()):
-            return 'outsidenarrow'
-    return ''
+            return b'outsidenarrow'
+    return b''
 
-@revsetpredicate('ellipsis()')
+
+@revsetpredicate(b'ellipsis()')
 def ellipsisrevset(repo, subset, x):
     """Changesets that are ellipsis nodes."""
     return subset.filter(lambda r: _isellipsis(repo, r))
--- a/hgext/narrow/narrowwirepeer.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/narrow/narrowwirepeer.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,7 +13,6 @@
     extensions,
     hg,
     narrowspec,
-    node as nodemod,
     pycompat,
     wireprototypes,
     wireprotov1peer,
@@ -22,29 +21,47 @@
 
 from . import narrowbundle2
 
+
 def uisetup():
     wireprotov1peer.wirepeer.narrow_widen = peernarrowwiden
 
+
 def reposetup(repo):
     def wirereposetup(ui, peer):
         def wrapped(orig, cmd, *args, **kwargs):
-            if cmd == 'unbundle':
+            if cmd == b'unbundle':
                 # TODO: don't blindly add include/exclude wireproto
                 # arguments to unbundle.
                 include, exclude = repo.narrowpats
-                kwargs[r"includepats"] = ','.join(include)
-                kwargs[r"excludepats"] = ','.join(exclude)
+                kwargs[r"includepats"] = b','.join(include)
+                kwargs[r"excludepats"] = b','.join(exclude)
             return orig(cmd, *args, **kwargs)
-        extensions.wrapfunction(peer, '_calltwowaystream', wrapped)
+
+        extensions.wrapfunction(peer, b'_calltwowaystream', wrapped)
+
     hg.wirepeersetupfuncs.append(wirereposetup)
 
-@wireprotov1server.wireprotocommand('narrow_widen', 'oldincludes oldexcludes'
-                                                    ' newincludes newexcludes'
-                                                    ' commonheads cgversion'
-                                                    ' known ellipses',
-                                    permission='pull')
-def narrow_widen(repo, proto, oldincludes, oldexcludes, newincludes,
-                 newexcludes, commonheads, cgversion, known, ellipses):
+
+@wireprotov1server.wireprotocommand(
+    b'narrow_widen',
+    b'oldincludes oldexcludes'
+    b' newincludes newexcludes'
+    b' commonheads cgversion'
+    b' known ellipses',
+    permission=b'pull',
+)
+def narrow_widen(
+    repo,
+    proto,
+    oldincludes,
+    oldexcludes,
+    newincludes,
+    newexcludes,
+    commonheads,
+    cgversion,
+    known,
+    ellipses,
+):
     """wireprotocol command to send data when a narrow clone is widen. We will
     be sending a changegroup here.
 
@@ -61,10 +78,15 @@
 
     preferuncompressed = False
     try:
-        oldincludes = wireprototypes.decodelist(oldincludes)
-        newincludes = wireprototypes.decodelist(newincludes)
-        oldexcludes = wireprototypes.decodelist(oldexcludes)
-        newexcludes = wireprototypes.decodelist(newexcludes)
+
+        def splitpaths(data):
+            # work around ''.split(',') => ['']
+            return data.split(b',') if data else []
+
+        oldincludes = splitpaths(oldincludes)
+        newincludes = splitpaths(newincludes)
+        oldexcludes = splitpaths(oldexcludes)
+        newexcludes = splitpaths(newexcludes)
         # validate the patterns
         narrowspec.validatepatterns(set(oldincludes))
         narrowspec.validatepatterns(set(newincludes))
@@ -73,43 +95,56 @@
 
         common = wireprototypes.decodelist(commonheads)
         known = wireprototypes.decodelist(known)
-        known = {nodemod.bin(n) for n in known}
-        if ellipses == '0':
+        if ellipses == b'0':
             ellipses = False
         else:
             ellipses = bool(ellipses)
         cgversion = cgversion
 
         bundler = bundle2.bundle20(repo.ui)
+        newmatch = narrowspec.match(
+            repo.root, include=newincludes, exclude=newexcludes
+        )
+        oldmatch = narrowspec.match(
+            repo.root, include=oldincludes, exclude=oldexcludes
+        )
         if not ellipses:
-            newmatch = narrowspec.match(repo.root, include=newincludes,
-                                        exclude=newexcludes)
-            oldmatch = narrowspec.match(repo.root, include=oldincludes,
-                                        exclude=oldexcludes)
-            bundle2.widen_bundle(bundler, repo, oldmatch, newmatch, common,
-                                 known, cgversion, ellipses)
+            bundle2.widen_bundle(
+                bundler,
+                repo,
+                oldmatch,
+                newmatch,
+                common,
+                known,
+                cgversion,
+                ellipses,
+            )
         else:
-            narrowbundle2.generateellipsesbundle2(bundler, repo, oldincludes,
-                    oldexcludes, newincludes, newexcludes, cgversion, common,
-                    list(common), known, None)
+            narrowbundle2.generate_ellipses_bundle2_for_widening(
+                bundler, repo, oldmatch, newmatch, cgversion, common, known,
+            )
     except error.Abort as exc:
         bundler = bundle2.bundle20(repo.ui)
-        manargs = [('message', pycompat.bytestr(exc))]
+        manargs = [(b'message', pycompat.bytestr(exc))]
         advargs = []
         if exc.hint is not None:
-            advargs.append(('hint', exc.hint))
-        bundler.addpart(bundle2.bundlepart('error:abort', manargs, advargs))
+            advargs.append((b'hint', exc.hint))
+        bundler.addpart(bundle2.bundlepart(b'error:abort', manargs, advargs))
         preferuncompressed = True
 
     chunks = bundler.getchunks()
-    return wireprototypes.streamres(gen=chunks,
-                                    prefer_uncompressed=preferuncompressed)
+    return wireprototypes.streamres(
+        gen=chunks, prefer_uncompressed=preferuncompressed
+    )
+
 
 def peernarrowwiden(remote, **kwargs):
-    for ch in (r'oldincludes', r'newincludes', r'oldexcludes', r'newexcludes',
-               r'commonheads', r'known'):
+    for ch in (r'commonheads', r'known'):
         kwargs[ch] = wireprototypes.encodelist(kwargs[ch])
 
-    kwargs[r'ellipses'] = '%i' % bool(kwargs[r'ellipses'])
-    f = remote._callcompressable('narrow_widen', **kwargs)
+    for ch in (r'oldincludes', r'newincludes', r'oldexcludes', r'newexcludes'):
+        kwargs[ch] = b','.join(kwargs[ch])
+
+    kwargs[r'ellipses'] = b'%i' % bool(kwargs[r'ellipses'])
+    f = remote._callcompressable(b'narrow_widen', **kwargs)
     return bundle2.getunbundler(remote.ui, f)
--- a/hgext/notify.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/notify.py	Mon Oct 21 11:09:48 2019 -0400
@@ -82,6 +82,12 @@
 
 notify.domain
   Default email domain for sender or recipients with no explicit domain.
+  It is also used for the domain part of the ``Message-Id`` when using
+  ``notify.messageidseed``.
+
+notify.messageidseed
+  Create deterministic ``Message-Id`` headers for the mails based on the seed
+  and the revision identifier of the first commit in the changeset.
 
 notify.style
   Style file to use when formatting emails.
@@ -144,6 +150,7 @@
 import email.errors as emailerrors
 import email.parser as emailparser
 import fnmatch
+import hashlib
 import socket
 import time
 
@@ -154,6 +161,7 @@
     logcmdutil,
     mail,
     patch,
+    pycompat,
     registrar,
     util,
 )
@@ -166,64 +174,67 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('notify', 'changegroup',
-    default=None,
+configitem(
+    b'notify', b'changegroup', default=None,
 )
-configitem('notify', 'config',
-    default=None,
+configitem(
+    b'notify', b'config', default=None,
 )
-configitem('notify', 'diffstat',
-    default=True,
+configitem(
+    b'notify', b'diffstat', default=True,
 )
-configitem('notify', 'domain',
-    default=None,
+configitem(
+    b'notify', b'domain', default=None,
 )
-configitem('notify', 'fromauthor',
-    default=None,
+configitem(
+    b'notify', b'messageidseed', default=None,
 )
-configitem('notify', 'incoming',
-    default=None,
+configitem(
+    b'notify', b'fromauthor', default=None,
 )
-configitem('notify', 'maxdiff',
-    default=300,
+configitem(
+    b'notify', b'incoming', default=None,
 )
-configitem('notify', 'maxdiffstat',
-    default=-1,
+configitem(
+    b'notify', b'maxdiff', default=300,
 )
-configitem('notify', 'maxsubject',
-    default=67,
+configitem(
+    b'notify', b'maxdiffstat', default=-1,
 )
-configitem('notify', 'mbox',
-    default=None,
+configitem(
+    b'notify', b'maxsubject', default=67,
 )
-configitem('notify', 'merge',
-    default=True,
+configitem(
+    b'notify', b'mbox', default=None,
 )
-configitem('notify', 'outgoing',
-    default=None,
+configitem(
+    b'notify', b'merge', default=True,
 )
-configitem('notify', 'sources',
-    default='serve',
+configitem(
+    b'notify', b'outgoing', default=None,
+)
+configitem(
+    b'notify', b'sources', default=b'serve',
 )
-configitem('notify', 'showfunc',
-    default=None,
+configitem(
+    b'notify', b'showfunc', default=None,
 )
-configitem('notify', 'strip',
-    default=0,
+configitem(
+    b'notify', b'strip', default=0,
 )
-configitem('notify', 'style',
-    default=None,
+configitem(
+    b'notify', b'style', default=None,
 )
-configitem('notify', 'template',
-    default=None,
+configitem(
+    b'notify', b'template', default=None,
 )
-configitem('notify', 'test',
-    default=True,
+configitem(
+    b'notify', b'test', default=True,
 )
 
 # template for single changeset can include email headers.
@@ -247,35 +258,38 @@
 '''
 
 deftemplates = {
-    'changegroup': multiple_template,
+    b'changegroup': multiple_template,
 }
 
+
 class notifier(object):
     '''email notification class.'''
 
     def __init__(self, ui, repo, hooktype):
         self.ui = ui
-        cfg = self.ui.config('notify', 'config')
+        cfg = self.ui.config(b'notify', b'config')
         if cfg:
-            self.ui.readconfig(cfg, sections=['usersubs', 'reposubs'])
+            self.ui.readconfig(cfg, sections=[b'usersubs', b'reposubs'])
         self.repo = repo
-        self.stripcount = int(self.ui.config('notify', 'strip'))
+        self.stripcount = int(self.ui.config(b'notify', b'strip'))
         self.root = self.strip(self.repo.root)
-        self.domain = self.ui.config('notify', 'domain')
-        self.mbox = self.ui.config('notify', 'mbox')
-        self.test = self.ui.configbool('notify', 'test')
+        self.domain = self.ui.config(b'notify', b'domain')
+        self.mbox = self.ui.config(b'notify', b'mbox')
+        self.test = self.ui.configbool(b'notify', b'test')
         self.charsets = mail._charsets(self.ui)
         self.subs = self.subscribers()
-        self.merge = self.ui.configbool('notify', 'merge')
-        self.showfunc = self.ui.configbool('notify', 'showfunc')
+        self.merge = self.ui.configbool(b'notify', b'merge')
+        self.showfunc = self.ui.configbool(b'notify', b'showfunc')
+        self.messageidseed = self.ui.config(b'notify', b'messageidseed')
         if self.showfunc is None:
-            self.showfunc = self.ui.configbool('diff', 'showfunc')
+            self.showfunc = self.ui.configbool(b'diff', b'showfunc')
 
         mapfile = None
-        template = (self.ui.config('notify', hooktype) or
-                    self.ui.config('notify', 'template'))
+        template = self.ui.config(b'notify', hooktype) or self.ui.config(
+            b'notify', b'template'
+        )
         if not template:
-            mapfile = self.ui.config('notify', 'style')
+            mapfile = self.ui.config(b'notify', b'style')
         if not mapfile and not template:
             template = deftemplates.get(hooktype) or single_template
         spec = logcmdutil.templatespec(template, mapfile)
@@ -287,10 +301,10 @@
         path = util.pconvert(path)
         count = self.stripcount
         while count > 0:
-            c = path.find('/')
+            c = path.find(b'/')
             if c == -1:
                 break
-            path = path[c + 1:]
+            path = path[c + 1 :]
             count -= 1
         return path
 
@@ -299,47 +313,54 @@
 
         addr = stringutil.email(addr.strip())
         if self.domain:
-            a = addr.find('@localhost')
+            a = addr.find(b'@localhost')
             if a != -1:
                 addr = addr[:a]
-            if '@' not in addr:
-                return addr + '@' + self.domain
+            if b'@' not in addr:
+                return addr + b'@' + self.domain
         return addr
 
     def subscribers(self):
         '''return list of email addresses of subscribers to this repo.'''
         subs = set()
-        for user, pats in self.ui.configitems('usersubs'):
-            for pat in pats.split(','):
-                if '#' in pat:
-                    pat, revs = pat.split('#', 1)
+        for user, pats in self.ui.configitems(b'usersubs'):
+            for pat in pats.split(b','):
+                if b'#' in pat:
+                    pat, revs = pat.split(b'#', 1)
                 else:
                     revs = None
                 if fnmatch.fnmatch(self.repo.root, pat.strip()):
                     subs.add((self.fixmail(user), revs))
-        for pat, users in self.ui.configitems('reposubs'):
-            if '#' in pat:
-                pat, revs = pat.split('#', 1)
+        for pat, users in self.ui.configitems(b'reposubs'):
+            if b'#' in pat:
+                pat, revs = pat.split(b'#', 1)
             else:
                 revs = None
             if fnmatch.fnmatch(self.repo.root, pat):
-                for user in users.split(','):
+                for user in users.split(b','):
                     subs.add((self.fixmail(user), revs))
-        return [(mail.addressencode(self.ui, s, self.charsets, self.test), r)
-                for s, r in sorted(subs)]
+        return [
+            (mail.addressencode(self.ui, s, self.charsets, self.test), r)
+            for s, r in sorted(subs)
+        ]
 
     def node(self, ctx, **props):
         '''format one changeset, unless it is a suppressed merge.'''
         if not self.merge and len(ctx.parents()) > 1:
             return False
-        self.t.show(ctx, changes=ctx.changeset(),
-                    baseurl=self.ui.config('web', 'baseurl'),
-                    root=self.repo.root, webroot=self.root, **props)
+        self.t.show(
+            ctx,
+            changes=ctx.changeset(),
+            baseurl=self.ui.config(b'web', b'baseurl'),
+            root=self.repo.root,
+            webroot=self.root,
+            **props
+        )
         return True
 
     def skipsource(self, source):
         '''true if incoming changes from this source should be skipped.'''
-        ok_sources = self.ui.config('notify', 'sources').split()
+        ok_sources = self.ui.config(b'notify', b'sources').split()
         return source not in ok_sources
 
     def send(self, ctx, count, data):
@@ -351,13 +372,14 @@
             if spec is None:
                 subs.add(sub)
                 continue
-            revs = self.repo.revs('%r and %d:', spec, ctx.rev())
+            revs = self.repo.revs(b'%r and %d:', spec, ctx.rev())
             if len(revs):
                 subs.add(sub)
                 continue
         if len(subs) == 0:
-            self.ui.debug('notify: no subscribers to selected repo '
-                          'and revset\n')
+            self.ui.debug(
+                b'notify: no subscribers to selected repo and revset\n'
+            )
             return
 
         p = emailparser.Parser()
@@ -387,51 +409,58 @@
                 msg[k] = v
 
         msg[r'Date'] = encoding.strfromlocal(
-            dateutil.datestr(format="%a, %d %b %Y %H:%M:%S %1%2"))
+            dateutil.datestr(format=b"%a, %d %b %Y %H:%M:%S %1%2")
+        )
 
         # try to make subject line exist and be useful
         if not subject:
             if count > 1:
-                subject = _('%s: %d new changesets') % (self.root, count)
+                subject = _(b'%s: %d new changesets') % (self.root, count)
             else:
-                s = ctx.description().lstrip().split('\n', 1)[0].rstrip()
-                subject = '%s: %s' % (self.root, s)
-        maxsubject = int(self.ui.config('notify', 'maxsubject'))
+                s = ctx.description().lstrip().split(b'\n', 1)[0].rstrip()
+                subject = b'%s: %s' % (self.root, s)
+        maxsubject = int(self.ui.config(b'notify', b'maxsubject'))
         if maxsubject:
             subject = stringutil.ellipsis(subject, maxsubject)
         msg[r'Subject'] = encoding.strfromlocal(
-            mail.headencode(self.ui, subject, self.charsets, self.test))
+            mail.headencode(self.ui, subject, self.charsets, self.test)
+        )
 
         # try to make message have proper sender
         if not sender:
-            sender = self.ui.config('email', 'from') or self.ui.username()
-        if '@' not in sender or '@localhost' in sender:
+            sender = self.ui.config(b'email', b'from') or self.ui.username()
+        if b'@' not in sender or b'@localhost' in sender:
             sender = self.fixmail(sender)
         msg[r'From'] = encoding.strfromlocal(
-            mail.addressencode(self.ui, sender, self.charsets, self.test))
+            mail.addressencode(self.ui, sender, self.charsets, self.test)
+        )
 
         msg[r'X-Hg-Notification'] = r'changeset %s' % ctx
         if not msg[r'Message-Id']:
-            msg[r'Message-Id'] = encoding.strfromlocal(
-                '<hg.%s.%d.%d@%s>' % (ctx, int(time.time()),
-                                      hash(self.repo.root),
-                                      encoding.strtolocal(socket.getfqdn())))
-        msg[r'To'] = encoding.strfromlocal(', '.join(sorted(subs)))
+            msg[r'Message-Id'] = messageid(ctx, self.domain, self.messageidseed)
+        msg[r'To'] = encoding.strfromlocal(b', '.join(sorted(subs)))
 
         msgtext = encoding.strtolocal(msg.as_string())
         if self.test:
             self.ui.write(msgtext)
-            if not msgtext.endswith('\n'):
-                self.ui.write('\n')
+            if not msgtext.endswith(b'\n'):
+                self.ui.write(b'\n')
         else:
-            self.ui.status(_('notify: sending %d subscribers %d changes\n') %
-                           (len(subs), count))
-            mail.sendmail(self.ui, stringutil.email(msg[r'From']),
-                          subs, msgtext, mbox=self.mbox)
+            self.ui.status(
+                _(b'notify: sending %d subscribers %d changes\n')
+                % (len(subs), count)
+            )
+            mail.sendmail(
+                self.ui,
+                stringutil.email(msg[r'From']),
+                subs,
+                msgtext,
+                mbox=self.mbox,
+            )
 
     def diff(self, ctx, ref=None):
 
-        maxdiff = int(self.ui.config('notify', 'maxdiff'))
+        maxdiff = int(self.ui.config(b'notify', b'maxdiff'))
         prev = ctx.p1().node()
         if ref:
             ref = ref.node()
@@ -440,31 +469,32 @@
         diffopts = patch.diffallopts(self.ui)
         diffopts.showfunc = self.showfunc
         chunks = patch.diff(self.repo, prev, ref, opts=diffopts)
-        difflines = ''.join(chunks).splitlines()
+        difflines = b''.join(chunks).splitlines()
 
-        if self.ui.configbool('notify', 'diffstat'):
-            maxdiffstat = int(self.ui.config('notify', 'maxdiffstat'))
+        if self.ui.configbool(b'notify', b'diffstat'):
+            maxdiffstat = int(self.ui.config(b'notify', b'maxdiffstat'))
             s = patch.diffstat(difflines)
             # s may be nil, don't include the header if it is
             if s:
-                if maxdiffstat >= 0 and s.count("\n") > maxdiffstat + 1:
-                    s = s.split("\n")
-                    msg = _('\ndiffstat (truncated from %d to %d lines):\n\n')
+                if maxdiffstat >= 0 and s.count(b"\n") > maxdiffstat + 1:
+                    s = s.split(b"\n")
+                    msg = _(b'\ndiffstat (truncated from %d to %d lines):\n\n')
                     self.ui.write(msg % (len(s) - 2, maxdiffstat))
-                    self.ui.write("\n".join(s[:maxdiffstat] + s[-2:]))
+                    self.ui.write(b"\n".join(s[:maxdiffstat] + s[-2:]))
                 else:
-                    self.ui.write(_('\ndiffstat:\n\n%s') % s)
+                    self.ui.write(_(b'\ndiffstat:\n\n%s') % s)
 
         if maxdiff == 0:
             return
         elif maxdiff > 0 and len(difflines) > maxdiff:
-            msg = _('\ndiffs (truncated from %d to %d lines):\n\n')
+            msg = _(b'\ndiffs (truncated from %d to %d lines):\n\n')
             self.ui.write(msg % (len(difflines), maxdiff))
             difflines = difflines[:maxdiff]
         elif difflines:
-            self.ui.write(_('\ndiffs (%d lines):\n\n') % len(difflines))
+            self.ui.write(_(b'\ndiffs (%d lines):\n\n') % len(difflines))
 
-        self.ui.write("\n".join(difflines))
+        self.ui.write(b"\n".join(difflines))
+
 
 def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
     '''send email notifications to interested subscribers.
@@ -476,17 +506,17 @@
     ctx = repo.unfiltered()[node]
 
     if not n.subs:
-        ui.debug('notify: no subscribers to repository %s\n' % n.root)
+        ui.debug(b'notify: no subscribers to repository %s\n' % n.root)
         return
     if n.skipsource(source):
-        ui.debug('notify: changes have source "%s" - skipping\n' % source)
+        ui.debug(b'notify: changes have source "%s" - skipping\n' % source)
         return
 
     ui.pushbuffer()
-    data = ''
+    data = b''
     count = 0
-    author = ''
-    if hooktype == 'changegroup' or hooktype == 'outgoing':
+    author = b''
+    if hooktype == b'changegroup' or hooktype == b'outgoing':
         for rev in repo.changelog.revs(start=ctx.rev()):
             if n.node(repo[rev]):
                 count += 1
@@ -494,16 +524,20 @@
                     author = repo[rev].user()
             else:
                 data += ui.popbuffer()
-                ui.note(_('notify: suppressing notification for merge %d:%s\n')
-                        % (rev, repo[rev].hex()[:12]))
+                ui.note(
+                    _(b'notify: suppressing notification for merge %d:%s\n')
+                    % (rev, repo[rev].hex()[:12])
+                )
                 ui.pushbuffer()
         if count:
-            n.diff(ctx, repo['tip'])
+            n.diff(ctx, repo[b'tip'])
     elif ctx.rev() in repo:
         if not n.node(ctx):
             ui.popbuffer()
-            ui.note(_('notify: suppressing notification for merge %d:%s\n') %
-                    (ctx.rev(), ctx.hex()[:12]))
+            ui.note(
+                _(b'notify: suppressing notification for merge %d:%s\n')
+                % (ctx.rev(), ctx.hex()[:12])
+            )
             return
         count += 1
         n.diff(ctx)
@@ -511,9 +545,30 @@
             author = ctx.user()
 
     data += ui.popbuffer()
-    fromauthor = ui.config('notify', 'fromauthor')
+    fromauthor = ui.config(b'notify', b'fromauthor')
     if author and fromauthor:
-        data = '\n'.join(['From: %s' % author, data])
+        data = b'\n'.join([b'From: %s' % author, data])
 
     if count:
         n.send(ctx, count, data)
+
+
+def messageid(ctx, domain, messageidseed):
+    if domain and messageidseed:
+        host = domain
+    else:
+        host = encoding.strtolocal(socket.getfqdn())
+    if messageidseed:
+        messagehash = hashlib.sha512(ctx.hex() + messageidseed)
+        messageid = b'<hg.%s@%s>' % (
+            pycompat.sysbytes(messagehash.hexdigest()[:64]),
+            host,
+        )
+    else:
+        messageid = b'<hg.%s.%d.%d@%s>' % (
+            ctx,
+            int(time.time()),
+            hash(ctx.repo().root),
+            host,
+        )
+    return encoding.strfromlocal(messageid)
--- a/hgext/pager.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/pager.py	Mon Oct 21 11:09:48 2019 -0400
@@ -29,38 +29,37 @@
     dispatch,
     extensions,
     registrar,
-    )
+)
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('pager', 'attend',
-        default=lambda: attended,
+configitem(
+    b'pager', b'attend', default=lambda: attended,
 )
 
+
 def uisetup(ui):
-
     def pagecmd(orig, ui, options, cmd, cmdfunc):
-        auto = options['pager'] == 'auto'
+        auto = options[b'pager'] == b'auto'
         if auto and not ui.pageractive:
             usepager = False
-            attend = ui.configlist('pager', 'attend')
-            ignore = ui.configlist('pager', 'ignore')
+            attend = ui.configlist(b'pager', b'attend')
+            ignore = ui.configlist(b'pager', b'ignore')
             cmds, _ = cmdutil.findcmd(cmd, commands.table)
 
             for cmd in cmds:
-                var = 'attend-%s' % cmd
-                if ui.config('pager', var, None):
-                    usepager = ui.configbool('pager', var, True)
+                var = b'attend-%s' % cmd
+                if ui.config(b'pager', var, None):
+                    usepager = ui.configbool(b'pager', var, True)
                     break
-                if (cmd in attend or
-                     (cmd not in ignore and not attend)):
+                if cmd in attend or (cmd not in ignore and not attend):
                     usepager = True
                     break
 
@@ -70,12 +69,13 @@
                 # core code doesn't know about attend, so we have to
                 # lobotomize the ignore list so that the extension's
                 # behavior is preserved.
-                ui.setconfig('pager', 'ignore', '', 'pager')
-                ui.pager('extension-via-attend-' + cmd)
+                ui.setconfig(b'pager', b'ignore', b'', b'pager')
+                ui.pager(b'extension-via-attend-' + cmd)
             else:
                 ui.disablepager()
         return orig(ui, options, cmd, cmdfunc)
 
-    extensions.wrapfunction(dispatch, '_runcommand', pagecmd)
+    extensions.wrapfunction(dispatch, b'_runcommand', pagecmd)
 
-attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
+
+attended = [b'annotate', b'cat', b'diff', b'export', b'glog', b'log', b'qdiff']
--- a/hgext/patchbomb.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/patchbomb.py	Mon Oct 21 11:09:48 2019 -0400
@@ -74,7 +74,6 @@
 from __future__ import absolute_import
 
 import email.encoders as emailencoders
-import email.generator as emailgen
 import email.mime.base as emimebase
 import email.mime.multipart as emimemultipart
 import email.utils as eutil
@@ -83,6 +82,7 @@
 import socket
 
 from mercurial.i18n import _
+from mercurial.pycompat import open
 from mercurial import (
     cmdutil,
     commands,
@@ -100,6 +100,7 @@
     util,
 )
 from mercurial.utils import dateutil
+
 stringio = util.stringio
 
 cmdtable = {}
@@ -108,47 +109,43 @@
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('patchbomb', 'bundletype',
-    default=None,
+configitem(
+    b'patchbomb', b'bundletype', default=None,
 )
-configitem('patchbomb', 'bcc',
-    default=None,
-)
-configitem('patchbomb', 'cc',
-    default=None,
+configitem(
+    b'patchbomb', b'bcc', default=None,
 )
-configitem('patchbomb', 'confirm',
-    default=False,
+configitem(
+    b'patchbomb', b'cc', default=None,
 )
-configitem('patchbomb', 'flagtemplate',
-    default=None,
+configitem(
+    b'patchbomb', b'confirm', default=False,
 )
-configitem('patchbomb', 'from',
-    default=None,
+configitem(
+    b'patchbomb', b'flagtemplate', default=None,
 )
-configitem('patchbomb', 'intro',
-    default='auto',
+configitem(
+    b'patchbomb', b'from', default=None,
 )
-configitem('patchbomb', 'publicurl',
-    default=None,
-)
-configitem('patchbomb', 'reply-to',
-    default=None,
+configitem(
+    b'patchbomb', b'intro', default=b'auto',
 )
-configitem('patchbomb', 'to',
-    default=None,
+configitem(
+    b'patchbomb', b'publicurl', default=None,
 )
-
-if pycompat.ispy3:
-    _bytesgenerator = emailgen.BytesGenerator
-else:
-    _bytesgenerator = emailgen.Generator
+configitem(
+    b'patchbomb', b'reply-to', default=None,
+)
+configitem(
+    b'patchbomb', b'to', default=None,
+)
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
 def _addpullheader(seq, ctx):
     """Add a header pointing to a public URL where the changeset is available
@@ -157,95 +154,118 @@
     # experimental config: patchbomb.publicurl
     # waiting for some logic that check that the changeset are available on the
     # destination before patchbombing anything.
-    publicurl = repo.ui.config('patchbomb', 'publicurl')
+    publicurl = repo.ui.config(b'patchbomb', b'publicurl')
     if publicurl:
-        return ('Available At %s\n'
-                '#              hg pull %s -r %s' % (publicurl, publicurl, ctx))
+        return b'Available At %s\n#              hg pull %s -r %s' % (
+            publicurl,
+            publicurl,
+            ctx,
+        )
     return None
 
+
 def uisetup(ui):
-    cmdutil.extraexport.append('pullurl')
-    cmdutil.extraexportmap['pullurl'] = _addpullheader
+    cmdutil.extraexport.append(b'pullurl')
+    cmdutil.extraexportmap[b'pullurl'] = _addpullheader
+
 
 def reposetup(ui, repo):
     if not repo.local():
         return
-    repo._wlockfreeprefix.add('last-email.txt')
+    repo._wlockfreeprefix.add(b'last-email.txt')
+
 
-def prompt(ui, prompt, default=None, rest=':'):
+def prompt(ui, prompt, default=None, rest=b':'):
     if default:
-        prompt += ' [%s]' % default
+        prompt += b' [%s]' % default
     return ui.prompt(prompt + rest, default)
 
+
 def introwanted(ui, opts, number):
     '''is an introductory message apparently wanted?'''
-    introconfig = ui.config('patchbomb', 'intro')
-    if opts.get('intro') or opts.get('desc'):
+    introconfig = ui.config(b'patchbomb', b'intro')
+    if opts.get(b'intro') or opts.get(b'desc'):
         intro = True
-    elif introconfig == 'always':
+    elif introconfig == b'always':
         intro = True
-    elif introconfig == 'never':
+    elif introconfig == b'never':
         intro = False
-    elif introconfig == 'auto':
+    elif introconfig == b'auto':
         intro = number > 1
     else:
-        ui.write_err(_('warning: invalid patchbomb.intro value "%s"\n')
-                     % introconfig)
-        ui.write_err(_('(should be one of always, never, auto)\n'))
+        ui.write_err(
+            _(b'warning: invalid patchbomb.intro value "%s"\n') % introconfig
+        )
+        ui.write_err(_(b'(should be one of always, never, auto)\n'))
         intro = number > 1
     return intro
 
+
 def _formatflags(ui, repo, rev, flags):
     """build flag string optionally by template"""
-    tmpl = ui.config('patchbomb', 'flagtemplate')
+    tmpl = ui.config(b'patchbomb', b'flagtemplate')
     if not tmpl:
-        return ' '.join(flags)
+        return b' '.join(flags)
     out = util.stringio()
-    opts = {'template': templater.unquotestring(tmpl)}
-    with formatter.templateformatter(ui, out, 'patchbombflag', opts) as fm:
+    spec = formatter.templatespec(b'', templater.unquotestring(tmpl), None)
+    with formatter.templateformatter(ui, out, b'patchbombflag', {}, spec) as fm:
         fm.startitem()
         fm.context(ctx=repo[rev])
-        fm.write('flags', '%s', fm.formatlist(flags, name='flag'))
+        fm.write(b'flags', b'%s', fm.formatlist(flags, name=b'flag'))
     return out.getvalue()
 
+
 def _formatprefix(ui, repo, rev, flags, idx, total, numbered):
     """build prefix to patch subject"""
     flag = _formatflags(ui, repo, rev, flags)
     if flag:
-        flag = ' ' + flag
+        flag = b' ' + flag
 
     if not numbered:
-        return '[PATCH%s]' % flag
+        return b'[PATCH%s]' % flag
     else:
-        tlen = len("%d" % total)
-        return '[PATCH %0*d of %d%s]' % (tlen, idx, total, flag)
+        tlen = len(b"%d" % total)
+        return b'[PATCH %0*d of %d%s]' % (tlen, idx, total, flag)
+
 
-def makepatch(ui, repo, rev, patchlines, opts, _charsets, idx, total, numbered,
-              patchname=None):
+def makepatch(
+    ui,
+    repo,
+    rev,
+    patchlines,
+    opts,
+    _charsets,
+    idx,
+    total,
+    numbered,
+    patchname=None,
+):
 
     desc = []
     node = None
-    body = ''
+    body = b''
 
     for line in patchlines:
-        if line.startswith('#'):
-            if line.startswith('# Node ID'):
+        if line.startswith(b'#'):
+            if line.startswith(b'# Node ID'):
                 node = line.split()[-1]
             continue
-        if line.startswith('diff -r') or line.startswith('diff --git'):
+        if line.startswith(b'diff -r') or line.startswith(b'diff --git'):
             break
         desc.append(line)
 
     if not patchname and not node:
         raise ValueError
 
-    if opts.get('attach') and not opts.get('body'):
-        body = ('\n'.join(desc[1:]).strip() or
-                'Patch subject is complete summary.')
-        body += '\n\n\n'
+    if opts.get(b'attach') and not opts.get(b'body'):
+        body = (
+            b'\n'.join(desc[1:]).strip()
+            or b'Patch subject is complete summary.'
+        )
+        body += b'\n\n\n'
 
-    if opts.get('plain'):
-        while patchlines and patchlines[0].startswith('# '):
+    if opts.get(b'plain'):
+        while patchlines and patchlines[0].startswith(b'# '):
             patchlines.pop(0)
         if patchlines:
             patchlines.pop(0)
@@ -253,68 +273,78 @@
             patchlines.pop(0)
 
     ds = patch.diffstat(patchlines)
-    if opts.get('diffstat'):
-        body += ds + '\n\n'
+    if opts.get(b'diffstat'):
+        body += ds + b'\n\n'
 
-    addattachment = opts.get('attach') or opts.get('inline')
-    if not addattachment or opts.get('body'):
-        body += '\n'.join(patchlines)
+    addattachment = opts.get(b'attach') or opts.get(b'inline')
+    if not addattachment or opts.get(b'body'):
+        body += b'\n'.join(patchlines)
 
     if addattachment:
         msg = emimemultipart.MIMEMultipart()
         if body:
-            msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
-        p = mail.mimetextpatch('\n'.join(patchlines), 'x-patch',
-                               opts.get('test'))
+            msg.attach(mail.mimeencode(ui, body, _charsets, opts.get(b'test')))
+        p = mail.mimetextpatch(
+            b'\n'.join(patchlines), b'x-patch', opts.get(b'test')
+        )
         binnode = nodemod.bin(node)
         # if node is mq patch, it will have the patch file's name as a tag
         if not patchname:
-            patchtags = [t for t in repo.nodetags(binnode)
-                         if t.endswith('.patch') or t.endswith('.diff')]
+            patchtags = [
+                t
+                for t in repo.nodetags(binnode)
+                if t.endswith(b'.patch') or t.endswith(b'.diff')
+            ]
             if patchtags:
                 patchname = patchtags[0]
             elif total > 1:
-                patchname = cmdutil.makefilename(repo[node], '%b-%n.patch',
-                                                 seqno=idx, total=total)
+                patchname = cmdutil.makefilename(
+                    repo[node], b'%b-%n.patch', seqno=idx, total=total
+                )
             else:
-                patchname = cmdutil.makefilename(repo[node], '%b.patch')
+                patchname = cmdutil.makefilename(repo[node], b'%b.patch')
         disposition = r'inline'
-        if opts.get('attach'):
+        if opts.get(b'attach'):
             disposition = r'attachment'
         p[r'Content-Disposition'] = (
-            disposition + r'; filename=' + encoding.strfromlocal(patchname))
+            disposition + r'; filename=' + encoding.strfromlocal(patchname)
+        )
         msg.attach(p)
     else:
-        msg = mail.mimetextpatch(body, display=opts.get('test'))
+        msg = mail.mimetextpatch(body, display=opts.get(b'test'))
 
-    prefix = _formatprefix(ui, repo, rev, opts.get('flag'), idx, total,
-                           numbered)
-    subj = desc[0].strip().rstrip('. ')
+    prefix = _formatprefix(
+        ui, repo, rev, opts.get(b'flag'), idx, total, numbered
+    )
+    subj = desc[0].strip().rstrip(b'. ')
     if not numbered:
-        subj = ' '.join([prefix, opts.get('subject') or subj])
+        subj = b' '.join([prefix, opts.get(b'subject') or subj])
     else:
-        subj = ' '.join([prefix, subj])
-    msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
-    msg['X-Mercurial-Node'] = node
-    msg['X-Mercurial-Series-Index'] = '%i' % idx
-    msg['X-Mercurial-Series-Total'] = '%i' % total
+        subj = b' '.join([prefix, subj])
+    msg[b'Subject'] = mail.headencode(ui, subj, _charsets, opts.get(b'test'))
+    msg[b'X-Mercurial-Node'] = node
+    msg[b'X-Mercurial-Series-Index'] = b'%i' % idx
+    msg[b'X-Mercurial-Series-Total'] = b'%i' % total
     return msg, subj, ds
 
+
 def _getpatches(repo, revs, **opts):
     """return a list of patches for a list of revisions
 
     Each patch in the list is itself a list of lines.
     """
     ui = repo.ui
-    prev = repo['.'].rev()
+    prev = repo[b'.'].rev()
     for r in revs:
         if r == prev and (repo[None].files() or repo[None].deleted()):
-            ui.warn(_('warning: working directory has '
-                      'uncommitted changes\n'))
+            ui.warn(_(b'warning: working directory has uncommitted changes\n'))
         output = stringio()
-        cmdutil.exportfile(repo, [r], output,
-                           opts=patch.difffeatureopts(ui, opts, git=True))
-        yield output.getvalue().split('\n')
+        cmdutil.exportfile(
+            repo, [r], output, opts=patch.difffeatureopts(ui, opts, git=True)
+        )
+        yield output.getvalue().split(b'\n')
+
+
 def _getbundle(repo, dest, **opts):
     """return a bundle containing changesets missing in "dest"
 
@@ -324,9 +354,9 @@
     The bundle is a returned as a single in-memory binary blob.
     """
     ui = repo.ui
-    tmpdir = pycompat.mkdtemp(prefix='hg-email-bundle-')
-    tmpfn = os.path.join(tmpdir, 'bundle')
-    btype = ui.config('patchbomb', 'bundletype')
+    tmpdir = pycompat.mkdtemp(prefix=b'hg-email-bundle-')
+    tmpfn = os.path.join(tmpdir, b'bundle')
+    btype = ui.config(b'patchbomb', b'bundletype')
     if btype:
         opts[r'type'] = btype
     try:
@@ -339,6 +369,7 @@
             pass
         os.rmdir(tmpdir)
 
+
 def _getdescription(repo, defaultbody, sender, **opts):
     """obtain the body of the introduction message and return it
 
@@ -351,16 +382,19 @@
     if opts.get(r'desc'):
         body = open(opts.get(r'desc')).read()
     else:
-        ui.write(_('\nWrite the introductory message for the '
-                   'patch series.\n\n'))
-        body = ui.edit(defaultbody, sender, repopath=repo.path,
-                       action='patchbombbody')
+        ui.write(
+            _(b'\nWrite the introductory message for the patch series.\n\n')
+        )
+        body = ui.edit(
+            defaultbody, sender, repopath=repo.path, action=b'patchbombbody'
+        )
         # Save series description in case sendmail fails
-        msgfile = repo.vfs('last-email.txt', 'wb')
+        msgfile = repo.vfs(b'last-email.txt', b'wb')
         msgfile.write(body)
         msgfile.close()
     return body
 
+
 def _getbundlemsgs(repo, sender, bundle, **opts):
     """Get the full email for sending a given bundle
 
@@ -369,23 +403,28 @@
     """
     ui = repo.ui
     _charsets = mail._charsets(ui)
-    subj = (opts.get(r'subject')
-            or prompt(ui, 'Subject:', 'A bundle for your repository'))
+    subj = opts.get(r'subject') or prompt(
+        ui, b'Subject:', b'A bundle for your repository'
+    )
 
-    body = _getdescription(repo, '', sender, **opts)
+    body = _getdescription(repo, b'', sender, **opts)
     msg = emimemultipart.MIMEMultipart()
     if body:
         msg.attach(mail.mimeencode(ui, body, _charsets, opts.get(r'test')))
     datapart = emimebase.MIMEBase(r'application', r'x-mercurial-bundle')
     datapart.set_payload(bundle)
-    bundlename = '%s.hg' % opts.get(r'bundlename', 'bundle')
-    datapart.add_header(r'Content-Disposition', r'attachment',
-                        filename=encoding.strfromlocal(bundlename))
+    bundlename = b'%s.hg' % opts.get(r'bundlename', b'bundle')
+    datapart.add_header(
+        r'Content-Disposition',
+        r'attachment',
+        filename=encoding.strfromlocal(bundlename),
+    )
     emailencoders.encode_base64(datapart)
     msg.attach(datapart)
-    msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get(r'test'))
+    msg[b'Subject'] = mail.headencode(ui, subj, _charsets, opts.get(r'test'))
     return [(msg, subj, None)]
 
+
 def _makeintro(repo, sender, revs, patches, **opts):
     """make an introduction email, asking the user for content if needed
 
@@ -394,29 +433,31 @@
     _charsets = mail._charsets(ui)
 
     # use the last revision which is likely to be a bookmarked head
-    prefix = _formatprefix(ui, repo, revs.last(), opts.get(r'flag'),
-                           0, len(patches), numbered=True)
-    subj = (opts.get(r'subject') or
-            prompt(ui, '(optional) Subject: ', rest=prefix, default=''))
+    prefix = _formatprefix(
+        ui, repo, revs.last(), opts.get(r'flag'), 0, len(patches), numbered=True
+    )
+    subj = opts.get(r'subject') or prompt(
+        ui, b'(optional) Subject: ', rest=prefix, default=b''
+    )
     if not subj:
-        return None         # skip intro if the user doesn't bother
+        return None  # skip intro if the user doesn't bother
 
-    subj = prefix + ' ' + subj
+    subj = prefix + b' ' + subj
 
-    body = ''
+    body = b''
     if opts.get(r'diffstat'):
         # generate a cumulative diffstat of the whole patch series
         diffstat = patch.diffstat(sum(patches, []))
-        body = '\n' + diffstat
+        body = b'\n' + diffstat
     else:
         diffstat = None
 
     body = _getdescription(repo, body, sender, **opts)
     msg = mail.mimeencode(ui, body, _charsets, opts.get(r'test'))
-    msg['Subject'] = mail.headencode(ui, subj, _charsets,
-                                     opts.get(r'test'))
+    msg[b'Subject'] = mail.headencode(ui, subj, _charsets, opts.get(r'test'))
     return (msg, subj, diffstat)
 
+
 def _getpatchmsgs(repo, sender, revs, patchnames=None, **opts):
     """return a list of emails from a list of patches
 
@@ -430,8 +471,7 @@
     patches = list(_getpatches(repo, revs, **opts))
     msgs = []
 
-    ui.write(_('this patch series consists of %d patches.\n\n')
-             % len(patches))
+    ui.write(_(b'this patch series consists of %d patches.\n\n') % len(patches))
 
     # build the intro message, or skip it if the user declines
     if introwanted(ui, bytesopts, len(patches)):
@@ -448,75 +488,171 @@
     for i, (r, p) in enumerate(zip(revs, patches)):
         if patchnames:
             name = patchnames[i]
-        msg = makepatch(ui, repo, r, p, bytesopts, _charsets,
-                        i + 1, len(patches), numbered, name)
+        msg = makepatch(
+            ui,
+            repo,
+            r,
+            p,
+            bytesopts,
+            _charsets,
+            i + 1,
+            len(patches),
+            numbered,
+            name,
+        )
         msgs.append(msg)
 
     return msgs
 
+
 def _getoutgoing(repo, dest, revs):
     '''Return the revisions present locally but not in dest'''
     ui = repo.ui
-    url = ui.expandpath(dest or 'default-push', dest or 'default')
+    url = ui.expandpath(dest or b'default-push', dest or b'default')
     url = hg.parseurl(url)[0]
-    ui.status(_('comparing with %s\n') % util.hidepassword(url))
+    ui.status(_(b'comparing with %s\n') % util.hidepassword(url))
 
     revs = [r for r in revs if r >= 0]
     if not revs:
         revs = [repo.changelog.tiprev()]
-    revs = repo.revs('outgoing(%s) and ::%ld', dest or '', revs)
+    revs = repo.revs(b'outgoing(%s) and ::%ld', dest or b'', revs)
     if not revs:
-        ui.status(_("no changes found\n"))
+        ui.status(_(b"no changes found\n"))
     return revs
 
+
 def _msgid(node, timestamp):
     hostname = encoding.strtolocal(socket.getfqdn())
-    hostname = encoding.environ.get('HGHOSTNAME', hostname)
-    return '<%s.%d@%s>' % (node, timestamp, hostname)
+    hostname = encoding.environ.get(b'HGHOSTNAME', hostname)
+    return b'<%s.%d@%s>' % (node, timestamp, hostname)
+
 
 emailopts = [
-    ('', 'body', None, _('send patches as inline message text (default)')),
-    ('a', 'attach', None, _('send patches as attachments')),
-    ('i', 'inline', None, _('send patches as inline attachments')),
-    ('', 'bcc', [],
-     _('email addresses of blind carbon copy recipients'), _('EMAIL')),
-    ('c', 'cc', [], _('email addresses of copy recipients'), _('EMAIL')),
-    ('', 'confirm', None, _('ask for confirmation before sending')),
-    ('d', 'diffstat', None, _('add diffstat output to messages')),
-    ('', 'date', '', _('use the given date as the sending date'), _('DATE')),
-    ('', 'desc', '',
-     _('use the given file as the series description'), _('FILE')),
-    ('f', 'from', '', _('email address of sender'), _('EMAIL')),
-    ('n', 'test', None, _('print messages that would be sent')),
-    ('m', 'mbox', '',
-     _('write messages to mbox file instead of sending them'), _('FILE')),
-    ('', 'reply-to', [],
-     _('email addresses replies should be sent to'), _('EMAIL')),
-    ('s', 'subject', '',
-     _('subject of first message (intro or single patch)'), _('TEXT')),
-    ('', 'in-reply-to', '', _('message identifier to reply to'), _('MSGID')),
-    ('', 'flag', [], _('flags to add in subject prefixes'), _('FLAG')),
-    ('t', 'to', [], _('email addresses of recipients'), _('EMAIL'))]
+    (b'', b'body', None, _(b'send patches as inline message text (default)')),
+    (b'a', b'attach', None, _(b'send patches as attachments')),
+    (b'i', b'inline', None, _(b'send patches as inline attachments')),
+    (
+        b'',
+        b'bcc',
+        [],
+        _(b'email addresses of blind carbon copy recipients'),
+        _(b'EMAIL'),
+    ),
+    (b'c', b'cc', [], _(b'email addresses of copy recipients'), _(b'EMAIL')),
+    (b'', b'confirm', None, _(b'ask for confirmation before sending')),
+    (b'd', b'diffstat', None, _(b'add diffstat output to messages')),
+    (
+        b'',
+        b'date',
+        b'',
+        _(b'use the given date as the sending date'),
+        _(b'DATE'),
+    ),
+    (
+        b'',
+        b'desc',
+        b'',
+        _(b'use the given file as the series description'),
+        _(b'FILE'),
+    ),
+    (b'f', b'from', b'', _(b'email address of sender'), _(b'EMAIL')),
+    (b'n', b'test', None, _(b'print messages that would be sent')),
+    (
+        b'm',
+        b'mbox',
+        b'',
+        _(b'write messages to mbox file instead of sending them'),
+        _(b'FILE'),
+    ),
+    (
+        b'',
+        b'reply-to',
+        [],
+        _(b'email addresses replies should be sent to'),
+        _(b'EMAIL'),
+    ),
+    (
+        b's',
+        b'subject',
+        b'',
+        _(b'subject of first message (intro or single patch)'),
+        _(b'TEXT'),
+    ),
+    (
+        b'',
+        b'in-reply-to',
+        b'',
+        _(b'message identifier to reply to'),
+        _(b'MSGID'),
+    ),
+    (b'', b'flag', [], _(b'flags to add in subject prefixes'), _(b'FLAG')),
+    (b't', b'to', [], _(b'email addresses of recipients'), _(b'EMAIL')),
+]
+
 
-@command('email',
-    [('g', 'git', None, _('use git extended diff format')),
-    ('', 'plain', None, _('omit hg patch header')),
-    ('o', 'outgoing', None,
-     _('send changes not found in the target repository')),
-    ('b', 'bundle', None, _('send changes not in target as a binary bundle')),
-    ('B', 'bookmark', '',
-     _('send changes only reachable by given bookmark'), _('BOOKMARK')),
-    ('', 'bundlename', 'bundle',
-     _('name of the bundle attachment file'), _('NAME')),
-    ('r', 'rev', [], _('a revision to send'), _('REV')),
-    ('', 'force', None, _('run even when remote repository is unrelated '
-       '(with -b/--bundle)')),
-    ('', 'base', [], _('a base changeset to specify instead of a destination '
-       '(with -b/--bundle)'), _('REV')),
-    ('', 'intro', None, _('send an introduction email for a single patch')),
-    ] + emailopts + cmdutil.remoteopts,
-    _('hg email [OPTION]... [DEST]...'),
-    helpcategory=command.CATEGORY_IMPORT_EXPORT)
+@command(
+    b'email',
+    [
+        (b'g', b'git', None, _(b'use git extended diff format')),
+        (b'', b'plain', None, _(b'omit hg patch header')),
+        (
+            b'o',
+            b'outgoing',
+            None,
+            _(b'send changes not found in the target repository'),
+        ),
+        (
+            b'b',
+            b'bundle',
+            None,
+            _(b'send changes not in target as a binary bundle'),
+        ),
+        (
+            b'B',
+            b'bookmark',
+            b'',
+            _(b'send changes only reachable by given bookmark'),
+            _(b'BOOKMARK'),
+        ),
+        (
+            b'',
+            b'bundlename',
+            b'bundle',
+            _(b'name of the bundle attachment file'),
+            _(b'NAME'),
+        ),
+        (b'r', b'rev', [], _(b'a revision to send'), _(b'REV')),
+        (
+            b'',
+            b'force',
+            None,
+            _(
+                b'run even when remote repository is unrelated '
+                b'(with -b/--bundle)'
+            ),
+        ),
+        (
+            b'',
+            b'base',
+            [],
+            _(
+                b'a base changeset to specify instead of a destination '
+                b'(with -b/--bundle)'
+            ),
+            _(b'REV'),
+        ),
+        (
+            b'',
+            b'intro',
+            None,
+            _(b'send an introduction email for a single patch'),
+        ),
+    ]
+    + emailopts
+    + cmdutil.remoteopts,
+    _(b'hg email [OPTION]... [DEST]...'),
+    helpcategory=command.CATEGORY_IMPORT_EXPORT,
+)
 def email(ui, repo, *revs, **opts):
     '''send changesets by email
 
@@ -606,29 +742,35 @@
 
     _charsets = mail._charsets(ui)
 
-    bundle = opts.get('bundle')
-    date = opts.get('date')
-    mbox = opts.get('mbox')
-    outgoing = opts.get('outgoing')
-    rev = opts.get('rev')
-    bookmark = opts.get('bookmark')
+    bundle = opts.get(b'bundle')
+    date = opts.get(b'date')
+    mbox = opts.get(b'mbox')
+    outgoing = opts.get(b'outgoing')
+    rev = opts.get(b'rev')
+    bookmark = opts.get(b'bookmark')
 
-    if not (opts.get('test') or mbox):
+    if not (opts.get(b'test') or mbox):
         # really sending
         mail.validateconfig(ui)
 
     if not (revs or rev or outgoing or bundle or bookmark):
-        raise error.Abort(_('specify at least one changeset with -B, -r or -o'))
+        raise error.Abort(
+            _(b'specify at least one changeset with -B, -r or -o')
+        )
 
     if outgoing and bundle:
-        raise error.Abort(_("--outgoing mode always on with --bundle;"
-                           " do not re-specify --outgoing"))
+        raise error.Abort(
+            _(
+                b"--outgoing mode always on with --bundle;"
+                b" do not re-specify --outgoing"
+            )
+        )
     if rev and bookmark:
-        raise error.Abort(_("-r and -B are mutually exclusive"))
+        raise error.Abort(_(b"-r and -B are mutually exclusive"))
 
     if outgoing or bundle:
         if len(revs) > 1:
-            raise error.Abort(_("too many destinations"))
+            raise error.Abort(_(b"too many destinations"))
         if revs:
             dest = revs[0]
         else:
@@ -637,31 +779,32 @@
 
     if rev:
         if revs:
-            raise error.Abort(_('use only one form to specify the revision'))
+            raise error.Abort(_(b'use only one form to specify the revision'))
         revs = rev
     elif bookmark:
         if bookmark not in repo._bookmarks:
-            raise error.Abort(_("bookmark '%s' not found") % bookmark)
+            raise error.Abort(_(b"bookmark '%s' not found") % bookmark)
         revs = scmutil.bookmarkrevs(repo, bookmark)
 
     revs = scmutil.revrange(repo, revs)
     if outgoing:
         revs = _getoutgoing(repo, dest, revs)
     if bundle:
-        opts['revs'] = ["%d" % r for r in revs]
+        opts[b'revs'] = [b"%d" % r for r in revs]
 
     # check if revision exist on the public destination
-    publicurl = repo.ui.config('patchbomb', 'publicurl')
+    publicurl = repo.ui.config(b'patchbomb', b'publicurl')
     if publicurl:
-        repo.ui.debug('checking that revision exist in the public repo\n')
+        repo.ui.debug(b'checking that revision exist in the public repo\n')
         try:
             publicpeer = hg.peer(repo, {}, publicurl)
         except error.RepoError:
-            repo.ui.write_err(_('unable to access public repo: %s\n')
-                              % publicurl)
+            repo.ui.write_err(
+                _(b'unable to access public repo: %s\n') % publicurl
+            )
             raise
-        if not publicpeer.capable('known'):
-            repo.ui.debug('skipping existence checks: public repo too old\n')
+        if not publicpeer.capable(b'known'):
+            repo.ui.debug(b'skipping existence checks: public repo too old\n')
         else:
             out = [repo[r] for r in revs]
             known = publicpeer.known(h.node() for h in out)
@@ -671,15 +814,16 @@
                     missing.append(h)
             if missing:
                 if len(missing) > 1:
-                    msg = _('public "%s" is missing %s and %i others')
+                    msg = _(b'public "%s" is missing %s and %i others')
                     msg %= (publicurl, missing[0], len(missing) - 1)
                 else:
-                    msg = _('public url %s is missing %s')
+                    msg = _(b'public url %s is missing %s')
                     msg %= (publicurl, missing[0])
                 missingrevs = [ctx.rev() for ctx in missing]
-                revhint = ' '.join('-r %s' % h
-                                   for h in repo.set('heads(%ld)', missingrevs))
-                hint = _("use 'hg push %s %s'") % (publicurl, revhint)
+                revhint = b' '.join(
+                    b'-r %s' % h for h in repo.set(b'heads(%ld)', missingrevs)
+                )
+                hint = _(b"use 'hg push %s %s'") % (publicurl, revhint)
                 raise error.Abort(msg, hint=hint)
 
     # start
@@ -692,9 +836,12 @@
         return _msgid(id[:20], int(start_time[0]))
 
     # deprecated config: patchbomb.from
-    sender = (opts.get('from') or ui.config('email', 'from') or
-              ui.config('patchbomb', 'from') or
-              prompt(ui, 'From', ui.username()))
+    sender = (
+        opts.get(b'from')
+        or ui.config(b'email', b'from')
+        or ui.config(b'patchbomb', b'from')
+        or prompt(ui, b'From', ui.username())
+    )
 
     if bundle:
         stropts = pycompat.strkwargs(opts)
@@ -709,94 +856,100 @@
 
     def getaddrs(header, ask=False, default=None):
         configkey = header.lower()
-        opt = header.replace('-', '_').lower()
+        opt = header.replace(b'-', b'_').lower()
         addrs = opts.get(opt)
         if addrs:
-            showaddrs.append('%s: %s' % (header, ', '.join(addrs)))
-            return mail.addrlistencode(ui, addrs, _charsets, opts.get('test'))
+            showaddrs.append(b'%s: %s' % (header, b', '.join(addrs)))
+            return mail.addrlistencode(ui, addrs, _charsets, opts.get(b'test'))
 
         # not on the command line: fallback to config and then maybe ask
-        addr = (ui.config('email', configkey) or
-                ui.config('patchbomb', configkey))
+        addr = ui.config(b'email', configkey) or ui.config(
+            b'patchbomb', configkey
+        )
         if not addr:
-            specified = (ui.hasconfig('email', configkey) or
-                         ui.hasconfig('patchbomb', configkey))
+            specified = ui.hasconfig(b'email', configkey) or ui.hasconfig(
+                b'patchbomb', configkey
+            )
             if not specified and ask:
                 addr = prompt(ui, header, default=default)
         if addr:
-            showaddrs.append('%s: %s' % (header, addr))
-            return mail.addrlistencode(ui, [addr], _charsets, opts.get('test'))
+            showaddrs.append(b'%s: %s' % (header, addr))
+            return mail.addrlistencode(ui, [addr], _charsets, opts.get(b'test'))
         elif default:
             return mail.addrlistencode(
-                ui, [default], _charsets, opts.get('test'))
+                ui, [default], _charsets, opts.get(b'test')
+            )
         return []
 
-    to = getaddrs('To', ask=True)
+    to = getaddrs(b'To', ask=True)
     if not to:
         # we can get here in non-interactive mode
-        raise error.Abort(_('no recipient addresses provided'))
-    cc = getaddrs('Cc', ask=True, default='')
-    bcc = getaddrs('Bcc')
-    replyto = getaddrs('Reply-To')
+        raise error.Abort(_(b'no recipient addresses provided'))
+    cc = getaddrs(b'Cc', ask=True, default=b'')
+    bcc = getaddrs(b'Bcc')
+    replyto = getaddrs(b'Reply-To')
 
-    confirm = ui.configbool('patchbomb', 'confirm')
-    confirm |= bool(opts.get('diffstat') or opts.get('confirm'))
+    confirm = ui.configbool(b'patchbomb', b'confirm')
+    confirm |= bool(opts.get(b'diffstat') or opts.get(b'confirm'))
 
     if confirm:
-        ui.write(_('\nFinal summary:\n\n'), label='patchbomb.finalsummary')
-        ui.write(('From: %s\n' % sender), label='patchbomb.from')
+        ui.write(_(b'\nFinal summary:\n\n'), label=b'patchbomb.finalsummary')
+        ui.write((b'From: %s\n' % sender), label=b'patchbomb.from')
         for addr in showaddrs:
-            ui.write('%s\n' % addr, label='patchbomb.to')
+            ui.write(b'%s\n' % addr, label=b'patchbomb.to')
         for m, subj, ds in msgs:
-            ui.write(('Subject: %s\n' % subj), label='patchbomb.subject')
+            ui.write((b'Subject: %s\n' % subj), label=b'patchbomb.subject')
             if ds:
-                ui.write(ds, label='patchbomb.diffstats')
-        ui.write('\n')
-        if ui.promptchoice(_('are you sure you want to send (yn)?'
-                             '$$ &Yes $$ &No')):
-            raise error.Abort(_('patchbomb canceled'))
+                ui.write(ds, label=b'patchbomb.diffstats')
+        ui.write(b'\n')
+        if ui.promptchoice(
+            _(b'are you sure you want to send (yn)?$$ &Yes $$ &No')
+        ):
+            raise error.Abort(_(b'patchbomb canceled'))
 
-    ui.write('\n')
+    ui.write(b'\n')
 
-    parent = opts.get('in_reply_to') or None
+    parent = opts.get(b'in_reply_to') or None
     # angle brackets may be omitted, they're not semantically part of the msg-id
     if parent is not None:
-        if not parent.startswith('<'):
-            parent = '<' + parent
-        if not parent.endswith('>'):
-            parent += '>'
+        if not parent.startswith(b'<'):
+            parent = b'<' + parent
+        if not parent.endswith(b'>'):
+            parent += b'>'
 
     sender_addr = eutil.parseaddr(encoding.strfromlocal(sender))[1]
-    sender = mail.addressencode(ui, sender, _charsets, opts.get('test'))
+    sender = mail.addressencode(ui, sender, _charsets, opts.get(b'test'))
     sendmail = None
     firstpatch = None
-    progress = ui.makeprogress(_('sending'), unit=_('emails'), total=len(msgs))
+    progress = ui.makeprogress(
+        _(b'sending'), unit=_(b'emails'), total=len(msgs)
+    )
     for i, (m, subj, ds) in enumerate(msgs):
         try:
-            m['Message-Id'] = genmsgid(m['X-Mercurial-Node'])
+            m[b'Message-Id'] = genmsgid(m[b'X-Mercurial-Node'])
             if not firstpatch:
-                firstpatch = m['Message-Id']
-            m['X-Mercurial-Series-Id'] = firstpatch
+                firstpatch = m[b'Message-Id']
+            m[b'X-Mercurial-Series-Id'] = firstpatch
         except TypeError:
-            m['Message-Id'] = genmsgid('patchbomb')
+            m[b'Message-Id'] = genmsgid(b'patchbomb')
         if parent:
-            m['In-Reply-To'] = parent
-            m['References'] = parent
-        if not parent or 'X-Mercurial-Node' not in m:
-            parent = m['Message-Id']
+            m[b'In-Reply-To'] = parent
+            m[b'References'] = parent
+        if not parent or b'X-Mercurial-Node' not in m:
+            parent = m[b'Message-Id']
 
-        m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version()
-        m['Date'] = eutil.formatdate(start_time[0], localtime=True)
+        m[b'User-Agent'] = b'Mercurial-patchbomb/%s' % util.version()
+        m[b'Date'] = eutil.formatdate(start_time[0], localtime=True)
 
         start_time = (start_time[0] + 1, start_time[1])
-        m['From'] = sender
-        m['To'] = ', '.join(to)
+        m[b'From'] = sender
+        m[b'To'] = b', '.join(to)
         if cc:
-            m['Cc']  = ', '.join(cc)
+            m[b'Cc'] = b', '.join(cc)
         if bcc:
-            m['Bcc'] = ', '.join(bcc)
+            m[b'Bcc'] = b', '.join(bcc)
         if replyto:
-            m['Reply-To'] = ', '.join(replyto)
+            m[b'Reply-To'] = b', '.join(replyto)
         # Fix up all headers to be native strings.
         # TODO(durin42): this should probably be cleaned up above in the future.
         if pycompat.ispy3:
@@ -814,26 +967,26 @@
                     change = True
                 if change:
                     m[hdr] = val
-        if opts.get('test'):
-            ui.status(_('displaying '), subj, ' ...\n')
-            ui.pager('email')
-            generator = _bytesgenerator(ui, mangle_from_=False)
+        if opts.get(b'test'):
+            ui.status(_(b'displaying '), subj, b' ...\n')
+            ui.pager(b'email')
+            generator = mail.Generator(ui, mangle_from_=False)
             try:
                 generator.flatten(m, 0)
-                ui.write('\n')
+                ui.write(b'\n')
             except IOError as inst:
                 if inst.errno != errno.EPIPE:
                     raise
         else:
             if not sendmail:
                 sendmail = mail.connect(ui, mbox=mbox)
-            ui.status(_('sending '), subj, ' ...\n')
+            ui.status(_(b'sending '), subj, b' ...\n')
             progress.update(i, item=subj)
             if not mbox:
                 # Exim does not remove the Bcc field
-                del m['Bcc']
+                del m[b'Bcc']
             fp = stringio()
-            generator = _bytesgenerator(fp, mangle_from_=False)
+            generator = mail.Generator(fp, mangle_from_=False)
             generator.flatten(m, 0)
             alldests = to + bcc + cc
             alldests = [encoding.strfromlocal(d) for d in alldests]
--- a/hgext/phabricator.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/phabricator.py	Mon Oct 21 11:09:48 2019 -0400
@@ -41,27 +41,33 @@
 
 from __future__ import absolute_import
 
+import base64
 import contextlib
+import hashlib
 import itertools
 import json
+import mimetypes
 import operator
 import re
 
 from mercurial.node import bin, nullid
 from mercurial.i18n import _
+from mercurial.pycompat import getattr
+from mercurial.thirdparty import attr
 from mercurial import (
     cmdutil,
     context,
     encoding,
     error,
+    exthelper,
     httpconnection as httpconnectionmod,
+    match,
     mdiff,
     obsutil,
     parser,
     patch,
     phases,
     pycompat,
-    registrar,
     scmutil,
     smartset,
     tags,
@@ -79,33 +85,34 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
+eh = exthelper.exthelper()
 
-cmdtable = {}
-command = registrar.command(cmdtable)
-
-configtable = {}
-configitem = registrar.configitem(configtable)
+cmdtable = eh.cmdtable
+command = eh.command
+configtable = eh.configtable
+templatekeyword = eh.templatekeyword
 
 # developer config: phabricator.batchsize
-configitem(b'phabricator', b'batchsize',
-    default=12,
+eh.configitem(
+    b'phabricator', b'batchsize', default=12,
 )
-configitem(b'phabricator', b'callsign',
-    default=None,
+eh.configitem(
+    b'phabricator', b'callsign', default=None,
 )
-configitem(b'phabricator', b'curlcmd',
-    default=None,
+eh.configitem(
+    b'phabricator', b'curlcmd', default=None,
 )
 # developer config: phabricator.repophid
-configitem(b'phabricator', b'repophid',
-    default=None,
+eh.configitem(
+    b'phabricator', b'repophid', default=None,
 )
-configitem(b'phabricator', b'url',
-    default=None,
+eh.configitem(
+    b'phabricator', b'url', default=None,
 )
-configitem(b'phabsend', b'confirm',
-    default=False,
+eh.configitem(
+    b'phabsend', b'confirm', default=False,
 )
 
 colortable = {
@@ -118,27 +125,44 @@
 }
 
 _VCR_FLAGS = [
-    (b'', b'test-vcr', b'',
-     _(b'Path to a vcr file. If nonexistent, will record a new vcr transcript'
-       b', otherwise will mock all http requests using the specified vcr file.'
-       b' (ADVANCED)'
-     )),
+    (
+        b'',
+        b'test-vcr',
+        b'',
+        _(
+            b'Path to a vcr file. If nonexistent, will record a new vcr transcript'
+            b', otherwise will mock all http requests using the specified vcr file.'
+            b' (ADVANCED)'
+        ),
+    ),
 ]
 
+
 def vcrcommand(name, flags, spec, helpcategory=None, optionalrepo=False):
     fullflags = flags + _VCR_FLAGS
+
     def hgmatcher(r1, r2):
         if r1.uri != r2.uri or r1.method != r2.method:
             return False
-        r1params = r1.body.split(b'&')
-        r2params = r2.body.split(b'&')
-        return set(r1params) == set(r2params)
+        r1params = util.urlreq.parseqs(r1.body)
+        r2params = util.urlreq.parseqs(r2.body)
+        for key in r1params:
+            if key not in r2params:
+                return False
+            value = r1params[key][0]
+            # we want to compare json payloads without worrying about ordering
+            if value.startswith(b'{') and value.endswith(b'}'):
+                r1json = json.loads(value)
+                r2json = json.loads(r2params[key][0])
+                if r1json != r2json:
+                    return False
+            elif r2params[key][0] != value:
+                return False
+        return True
 
     def sanitiserequest(request):
         request.body = re.sub(
-            r'cli-[a-z0-9]+',
-            r'cli-hahayouwish',
-            request.body
+            br'cli-[a-z0-9]+', br'cli-hahayouwish', request.body
         )
         return request
 
@@ -152,29 +176,46 @@
             cassette = pycompat.fsdecode(kwargs.pop(r'test_vcr', None))
             if cassette:
                 import hgdemandimport
+
                 with hgdemandimport.deactivated():
                     import vcr as vcrmod
                     import vcr.stubs as stubs
+
                     vcr = vcrmod.VCR(
                         serializer=r'json',
                         before_record_request=sanitiserequest,
                         before_record_response=sanitiseresponse,
                         custom_patches=[
-                            (urlmod, r'httpconnection',
-                             stubs.VCRHTTPConnection),
-                            (urlmod, r'httpsconnection',
-                             stubs.VCRHTTPSConnection),
-                        ])
+                            (
+                                urlmod,
+                                r'httpconnection',
+                                stubs.VCRHTTPConnection,
+                            ),
+                            (
+                                urlmod,
+                                r'httpsconnection',
+                                stubs.VCRHTTPSConnection,
+                            ),
+                        ],
+                    )
                     vcr.register_matcher(r'hgmatcher', hgmatcher)
                     with vcr.use_cassette(cassette, match_on=[r'hgmatcher']):
                         return fn(*args, **kwargs)
             return fn(*args, **kwargs)
+
         inner.__name__ = fn.__name__
         inner.__doc__ = fn.__doc__
-        return command(name, fullflags, spec, helpcategory=helpcategory,
-                       optionalrepo=optionalrepo)(inner)
+        return command(
+            name,
+            fullflags,
+            spec,
+            helpcategory=helpcategory,
+            optionalrepo=optionalrepo,
+        )(inner)
+
     return decorate
 
+
 def urlencodenested(params):
     """like urlencode, but works with nested parameters.
 
@@ -183,6 +224,7 @@
     urlencode. Note: the encoding is consistent with PHP's http_build_query.
     """
     flatparams = util.sortdict()
+
     def process(prefix, obj):
         if isinstance(obj, bool):
             obj = {True: b'true', False: b'false'}[obj]  # Python -> PHP form
@@ -196,9 +238,11 @@
                     process(b'%s[%s]' % (prefix, k), v)
                 else:
                     process(k, v)
+
     process(b'', params)
     return util.urlreq.urlencode(flatparams)
 
+
 def readurltoken(ui):
     """return conduit url, token and make sure they exist
 
@@ -207,8 +251,9 @@
     """
     url = ui.config(b'phabricator', b'url')
     if not url:
-        raise error.Abort(_(b'config %s.%s is required')
-                          % (b'phabricator', b'url'))
+        raise error.Abort(
+            _(b'config %s.%s is required') % (b'phabricator', b'url')
+        )
 
     res = httpconnectionmod.readauthforuri(ui, url, util.url(url).user)
     token = None
@@ -221,23 +266,33 @@
         token = auth.get(b'phabtoken')
 
     if not token:
-        raise error.Abort(_(b'Can\'t find conduit token associated to %s')
-                            % (url,))
+        raise error.Abort(
+            _(b'Can\'t find conduit token associated to %s') % (url,)
+        )
 
     return url, token
 
+
 def callconduit(ui, name, params):
     """call Conduit API, params is a dict. return json.loads result, or None"""
     host, token = readurltoken(ui)
     url, authinfo = util.url(b'/'.join([host, b'api', name])).authinfo()
     ui.debug(b'Conduit Call: %s %s\n' % (url, pycompat.byterepr(params)))
     params = params.copy()
-    params[b'api.token'] = token
-    data = urlencodenested(params)
+    params[b'__conduit__'] = {
+        b'token': token,
+    }
+    rawdata = {
+        b'params': templatefilters.json(params),
+        b'output': b'json',
+        b'__conduit__': 1,
+    }
+    data = urlencodenested(rawdata)
     curlcmd = ui.config(b'phabricator', b'curlcmd')
     if curlcmd:
-        sin, sout = procutil.popen2(b'%s -d @- %s'
-                                    % (curlcmd, procutil.shellquote(url)))
+        sin, sout = procutil.popen2(
+            b'%s -d @- %s' % (curlcmd, procutil.shellquote(url))
+        )
         sin.write(data)
         sin.close()
         body = sout.read()
@@ -248,16 +303,21 @@
             body = rsp.read()
     ui.debug(b'Conduit Response: %s\n' % body)
     parsed = pycompat.rapply(
-        lambda x: encoding.unitolocal(x) if isinstance(x, pycompat.unicode)
+        lambda x: encoding.unitolocal(x)
+        if isinstance(x, pycompat.unicode)
         else x,
-        json.loads(body)
+        # json.loads only accepts bytes from py3.6+
+        json.loads(encoding.unifromlocal(body)),
     )
     if parsed.get(b'error_code'):
-        msg = (_(b'Conduit Error (%s): %s')
-               % (parsed[b'error_code'], parsed[b'error_info']))
+        msg = _(b'Conduit Error (%s): %s') % (
+            parsed[b'error_code'],
+            parsed[b'error_info'],
+        )
         raise error.Abort(msg)
     return parsed[b'result']
 
+
 @vcrcommand(b'debugcallconduit', [], _(b'METHOD'), optionalrepo=True)
 def debugcallconduit(ui, repo, name):
     """call Conduit API
@@ -268,18 +328,21 @@
     # json.loads only accepts bytes from 3.6+
     rawparams = encoding.unifromlocal(ui.fin.read())
     # json.loads only returns unicode strings
-    params = pycompat.rapply(lambda x:
-        encoding.unitolocal(x) if isinstance(x, pycompat.unicode) else x,
-        json.loads(rawparams)
+    params = pycompat.rapply(
+        lambda x: encoding.unitolocal(x)
+        if isinstance(x, pycompat.unicode)
+        else x,
+        json.loads(rawparams),
     )
     # json.dumps only accepts unicode strings
-    result = pycompat.rapply(lambda x:
-        encoding.unifromlocal(x) if isinstance(x, bytes) else x,
-        callconduit(ui, name, params)
+    result = pycompat.rapply(
+        lambda x: encoding.unifromlocal(x) if isinstance(x, bytes) else x,
+        callconduit(ui, name, params),
     )
     s = json.dumps(result, sort_keys=True, indent=2, separators=(u',', u': '))
     ui.write(b'%s\n' % encoding.unitolocal(s))
 
+
 def getrepophid(repo):
     """given callsign, return repository PHID or None"""
     # developer config: phabricator.repophid
@@ -289,17 +352,23 @@
     callsign = repo.ui.config(b'phabricator', b'callsign')
     if not callsign:
         return None
-    query = callconduit(repo.ui, b'diffusion.repository.search',
-                        {b'constraints': {b'callsigns': [callsign]}})
+    query = callconduit(
+        repo.ui,
+        b'diffusion.repository.search',
+        {b'constraints': {b'callsigns': [callsign]}},
+    )
     if len(query[b'data']) == 0:
         return None
     repophid = query[b'data'][0][b'phid']
     repo.ui.setconfig(b'phabricator', b'repophid', repophid)
     return repophid
 
+
 _differentialrevisiontagre = re.compile(br'\AD([1-9][0-9]*)\Z')
 _differentialrevisiondescre = re.compile(
-    br'^Differential Revision:\s*(?P<url>(?:.*)D(?P<id>[1-9][0-9]*))$', re.M)
+    br'^Differential Revision:\s*(?P<url>(?:.*)D(?P<id>[1-9][0-9]*))$', re.M
+)
+
 
 def getoldnodedrevmap(repo, nodelist):
     """find previous nodes that has been sent to Phabricator
@@ -322,8 +391,8 @@
     unfi = repo.unfiltered()
     nodemap = unfi.changelog.nodemap
 
-    result = {} # {node: (oldnode?, lastdiff?, drev)}
-    toconfirm = {} # {node: (force, {precnode}, drev)}
+    result = {}  # {node: (oldnode?, lastdiff?, drev)}
+    toconfirm = {}  # {node: (force, {precnode}, drev)}
     for node in nodelist:
         ctx = unfi[node]
         # For tags like "D123", put them into "toconfirm" to verify later
@@ -345,13 +414,14 @@
     # Phabricator, and expect precursors overlap with it.
     if toconfirm:
         drevs = [drev for force, precs, drev in toconfirm.values()]
-        alldiffs = callconduit(unfi.ui, b'differential.querydiffs',
-                               {b'revisionIDs': drevs})
-        getnode = lambda d: bin(
-            getdiffmeta(d).get(b'node', b'')) or None
+        alldiffs = callconduit(
+            unfi.ui, b'differential.querydiffs', {b'revisionIDs': drevs}
+        )
+        getnode = lambda d: bin(getdiffmeta(d).get(b'node', b'')) or None
         for newnode, (force, precset, drev) in toconfirm.items():
-            diffs = [d for d in alldiffs.values()
-                     if int(d[b'revisionID']) == drev]
+            diffs = [
+                d for d in alldiffs.values() if int(d[b'revisionID']) == drev
+            ]
 
             # "precursors" as known by Phabricator
             phprecset = set(getnode(d) for d in diffs)
@@ -360,10 +430,22 @@
             # and force is not set (when commit message says nothing)
             if not force and not bool(phprecset & precset):
                 tagname = b'D%d' % drev
-                tags.tag(repo, tagname, nullid, message=None, user=None,
-                         date=None, local=True)
-                unfi.ui.warn(_(b'D%s: local tag removed - does not match '
-                               b'Differential history\n') % drev)
+                tags.tag(
+                    repo,
+                    tagname,
+                    nullid,
+                    message=None,
+                    user=None,
+                    date=None,
+                    local=True,
+                )
+                unfi.ui.warn(
+                    _(
+                        b'D%s: local tag removed - does not match '
+                        b'Differential history\n'
+                    )
+                    % drev
+                )
                 continue
 
             # Find the last node using Phabricator metadata, and make sure it
@@ -379,61 +461,444 @@
 
     return result
 
+
 def getdiff(ctx, diffopts):
     """plain-text diff without header (user, commit message, etc)"""
     output = util.stringio()
-    for chunk, _label in patch.diffui(ctx.repo(), ctx.p1().node(), ctx.node(),
-                                      None, opts=diffopts):
+    for chunk, _label in patch.diffui(
+        ctx.repo(), ctx.p1().node(), ctx.node(), None, opts=diffopts
+    ):
         output.write(chunk)
     return output.getvalue()
 
+
+class DiffChangeType(object):
+    ADD = 1
+    CHANGE = 2
+    DELETE = 3
+    MOVE_AWAY = 4
+    COPY_AWAY = 5
+    MOVE_HERE = 6
+    COPY_HERE = 7
+    MULTICOPY = 8
+
+
+class DiffFileType(object):
+    TEXT = 1
+    IMAGE = 2
+    BINARY = 3
+
+
+@attr.s
+class phabhunk(dict):
+    """Represents a Differential hunk, which is owned by a Differential change
+    """
+
+    oldOffset = attr.ib(default=0)  # camelcase-required
+    oldLength = attr.ib(default=0)  # camelcase-required
+    newOffset = attr.ib(default=0)  # camelcase-required
+    newLength = attr.ib(default=0)  # camelcase-required
+    corpus = attr.ib(default='')
+    # These get added to the phabchange's equivalents
+    addLines = attr.ib(default=0)  # camelcase-required
+    delLines = attr.ib(default=0)  # camelcase-required
+
+
+@attr.s
+class phabchange(object):
+    """Represents a Differential change, owns Differential hunks and owned by a
+    Differential diff.  Each one represents one file in a diff.
+    """
+
+    currentPath = attr.ib(default=None)  # camelcase-required
+    oldPath = attr.ib(default=None)  # camelcase-required
+    awayPaths = attr.ib(default=attr.Factory(list))  # camelcase-required
+    metadata = attr.ib(default=attr.Factory(dict))
+    oldProperties = attr.ib(default=attr.Factory(dict))  # camelcase-required
+    newProperties = attr.ib(default=attr.Factory(dict))  # camelcase-required
+    type = attr.ib(default=DiffChangeType.CHANGE)
+    fileType = attr.ib(default=DiffFileType.TEXT)  # camelcase-required
+    commitHash = attr.ib(default=None)  # camelcase-required
+    addLines = attr.ib(default=0)  # camelcase-required
+    delLines = attr.ib(default=0)  # camelcase-required
+    hunks = attr.ib(default=attr.Factory(list))
+
+    def copynewmetadatatoold(self):
+        for key in list(self.metadata.keys()):
+            newkey = key.replace(b'new:', b'old:')
+            self.metadata[newkey] = self.metadata[key]
+
+    def addoldmode(self, value):
+        self.oldProperties[b'unix:filemode'] = value
+
+    def addnewmode(self, value):
+        self.newProperties[b'unix:filemode'] = value
+
+    def addhunk(self, hunk):
+        if not isinstance(hunk, phabhunk):
+            raise error.Abort(b'phabchange.addhunk only takes phabhunks')
+        self.hunks.append(pycompat.byteskwargs(attr.asdict(hunk)))
+        # It's useful to include these stats since the Phab web UI shows them,
+        # and uses them to estimate how large a change a Revision is. Also used
+        # in email subjects for the [+++--] bit.
+        self.addLines += hunk.addLines
+        self.delLines += hunk.delLines
+
+
+@attr.s
+class phabdiff(object):
+    """Represents a Differential diff, owns Differential changes.  Corresponds
+    to a commit.
+    """
+
+    # Doesn't seem to be any reason to send this (output of uname -n)
+    sourceMachine = attr.ib(default=b'')  # camelcase-required
+    sourcePath = attr.ib(default=b'/')  # camelcase-required
+    sourceControlBaseRevision = attr.ib(default=b'0' * 40)  # camelcase-required
+    sourceControlPath = attr.ib(default=b'/')  # camelcase-required
+    sourceControlSystem = attr.ib(default=b'hg')  # camelcase-required
+    branch = attr.ib(default=b'default')
+    bookmark = attr.ib(default=None)
+    creationMethod = attr.ib(default=b'phabsend')  # camelcase-required
+    lintStatus = attr.ib(default=b'none')  # camelcase-required
+    unitStatus = attr.ib(default=b'none')  # camelcase-required
+    changes = attr.ib(default=attr.Factory(dict))
+    repositoryPHID = attr.ib(default=None)  # camelcase-required
+
+    def addchange(self, change):
+        if not isinstance(change, phabchange):
+            raise error.Abort(b'phabdiff.addchange only takes phabchanges')
+        self.changes[change.currentPath] = pycompat.byteskwargs(
+            attr.asdict(change)
+        )
+
+
+def maketext(pchange, ctx, fname):
+    """populate the phabchange for a text file"""
+    repo = ctx.repo()
+    fmatcher = match.exact([fname])
+    diffopts = mdiff.diffopts(git=True, context=32767)
+    _pfctx, _fctx, header, fhunks = next(
+        patch.diffhunks(repo, ctx.p1(), ctx, fmatcher, opts=diffopts)
+    )
+
+    for fhunk in fhunks:
+        (oldOffset, oldLength, newOffset, newLength), lines = fhunk
+        corpus = b''.join(lines[1:])
+        shunk = list(header)
+        shunk.extend(lines)
+        _mf, _mt, addLines, delLines, _hb = patch.diffstatsum(
+            patch.diffstatdata(util.iterlines(shunk))
+        )
+        pchange.addhunk(
+            phabhunk(
+                oldOffset,
+                oldLength,
+                newOffset,
+                newLength,
+                corpus,
+                addLines,
+                delLines,
+            )
+        )
+
+
+def uploadchunks(fctx, fphid):
+    """upload large binary files as separate chunks.
+    Phab requests chunking over 8MiB, and splits into 4MiB chunks
+    """
+    ui = fctx.repo().ui
+    chunks = callconduit(ui, b'file.querychunks', {b'filePHID': fphid})
+    progress = ui.makeprogress(
+        _(b'uploading file chunks'), unit=_(b'chunks'), total=len(chunks)
+    )
+    for chunk in chunks:
+        progress.increment()
+        if chunk[b'complete']:
+            continue
+        bstart = int(chunk[b'byteStart'])
+        bend = int(chunk[b'byteEnd'])
+        callconduit(
+            ui,
+            b'file.uploadchunk',
+            {
+                b'filePHID': fphid,
+                b'byteStart': bstart,
+                b'data': base64.b64encode(fctx.data()[bstart:bend]),
+                b'dataEncoding': b'base64',
+            },
+        )
+    progress.complete()
+
+
+def uploadfile(fctx):
+    """upload binary files to Phabricator"""
+    repo = fctx.repo()
+    ui = repo.ui
+    fname = fctx.path()
+    size = fctx.size()
+    fhash = pycompat.bytestr(hashlib.sha256(fctx.data()).hexdigest())
+
+    # an allocate call is required first to see if an upload is even required
+    # (Phab might already have it) and to determine if chunking is needed
+    allocateparams = {
+        b'name': fname,
+        b'contentLength': size,
+        b'contentHash': fhash,
+    }
+    filealloc = callconduit(ui, b'file.allocate', allocateparams)
+    fphid = filealloc[b'filePHID']
+
+    if filealloc[b'upload']:
+        ui.write(_(b'uploading %s\n') % bytes(fctx))
+        if not fphid:
+            uploadparams = {
+                b'name': fname,
+                b'data_base64': base64.b64encode(fctx.data()),
+            }
+            fphid = callconduit(ui, b'file.upload', uploadparams)
+        else:
+            uploadchunks(fctx, fphid)
+    else:
+        ui.debug(b'server already has %s\n' % bytes(fctx))
+
+    if not fphid:
+        raise error.Abort(b'Upload of %s failed.' % bytes(fctx))
+
+    return fphid
+
+
+def addoldbinary(pchange, fctx, originalfname):
+    """add the metadata for the previous version of a binary file to the
+    phabchange for the new version
+    """
+    oldfctx = fctx.p1()[originalfname]
+    if fctx.cmp(oldfctx):
+        # Files differ, add the old one
+        pchange.metadata[b'old:file:size'] = oldfctx.size()
+        mimeguess, _enc = mimetypes.guess_type(
+            encoding.unifromlocal(oldfctx.path())
+        )
+        if mimeguess:
+            pchange.metadata[b'old:file:mime-type'] = pycompat.bytestr(
+                mimeguess
+            )
+        fphid = uploadfile(oldfctx)
+        pchange.metadata[b'old:binary-phid'] = fphid
+    else:
+        # If it's left as IMAGE/BINARY web UI might try to display it
+        pchange.fileType = DiffFileType.TEXT
+        pchange.copynewmetadatatoold()
+
+
+def makebinary(pchange, fctx):
+    """populate the phabchange for a binary file"""
+    pchange.fileType = DiffFileType.BINARY
+    fphid = uploadfile(fctx)
+    pchange.metadata[b'new:binary-phid'] = fphid
+    pchange.metadata[b'new:file:size'] = fctx.size()
+    mimeguess, _enc = mimetypes.guess_type(encoding.unifromlocal(fctx.path()))
+    if mimeguess:
+        mimeguess = pycompat.bytestr(mimeguess)
+        pchange.metadata[b'new:file:mime-type'] = mimeguess
+        if mimeguess.startswith(b'image/'):
+            pchange.fileType = DiffFileType.IMAGE
+
+
+# Copied from mercurial/patch.py
+gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
+
+
+def notutf8(fctx):
+    """detect non-UTF-8 text files since Phabricator requires them to be marked
+    as binary
+    """
+    try:
+        fctx.data().decode('utf-8')
+        if fctx.parents():
+            fctx.p1().data().decode('utf-8')
+        return False
+    except UnicodeDecodeError:
+        fctx.repo().ui.write(
+            _(b'file %s detected as non-UTF-8, marked as binary\n')
+            % fctx.path()
+        )
+        return True
+
+
+def addremoved(pdiff, ctx, removed):
+    """add removed files to the phabdiff. Shouldn't include moves"""
+    for fname in removed:
+        pchange = phabchange(
+            currentPath=fname, oldPath=fname, type=DiffChangeType.DELETE
+        )
+        pchange.addoldmode(gitmode[ctx.p1()[fname].flags()])
+        fctx = ctx.p1()[fname]
+        if not (fctx.isbinary() or notutf8(fctx)):
+            maketext(pchange, ctx, fname)
+
+        pdiff.addchange(pchange)
+
+
+def addmodified(pdiff, ctx, modified):
+    """add modified files to the phabdiff"""
+    for fname in modified:
+        fctx = ctx[fname]
+        pchange = phabchange(currentPath=fname, oldPath=fname)
+        filemode = gitmode[ctx[fname].flags()]
+        originalmode = gitmode[ctx.p1()[fname].flags()]
+        if filemode != originalmode:
+            pchange.addoldmode(originalmode)
+            pchange.addnewmode(filemode)
+
+        if fctx.isbinary() or notutf8(fctx):
+            makebinary(pchange, fctx)
+            addoldbinary(pchange, fctx, fname)
+        else:
+            maketext(pchange, ctx, fname)
+
+        pdiff.addchange(pchange)
+
+
+def addadded(pdiff, ctx, added, removed):
+    """add file adds to the phabdiff, both new files and copies/moves"""
+    # Keep track of files that've been recorded as moved/copied, so if there are
+    # additional copies we can mark them (moves get removed from removed)
+    copiedchanges = {}
+    movedchanges = {}
+    for fname in added:
+        fctx = ctx[fname]
+        pchange = phabchange(currentPath=fname)
+
+        filemode = gitmode[ctx[fname].flags()]
+        renamed = fctx.renamed()
+
+        if renamed:
+            originalfname = renamed[0]
+            originalmode = gitmode[ctx.p1()[originalfname].flags()]
+            pchange.oldPath = originalfname
+
+            if originalfname in removed:
+                origpchange = phabchange(
+                    currentPath=originalfname,
+                    oldPath=originalfname,
+                    type=DiffChangeType.MOVE_AWAY,
+                    awayPaths=[fname],
+                )
+                movedchanges[originalfname] = origpchange
+                removed.remove(originalfname)
+                pchange.type = DiffChangeType.MOVE_HERE
+            elif originalfname in movedchanges:
+                movedchanges[originalfname].type = DiffChangeType.MULTICOPY
+                movedchanges[originalfname].awayPaths.append(fname)
+                pchange.type = DiffChangeType.COPY_HERE
+            else:  # pure copy
+                if originalfname not in copiedchanges:
+                    origpchange = phabchange(
+                        currentPath=originalfname, type=DiffChangeType.COPY_AWAY
+                    )
+                    copiedchanges[originalfname] = origpchange
+                else:
+                    origpchange = copiedchanges[originalfname]
+                origpchange.awayPaths.append(fname)
+                pchange.type = DiffChangeType.COPY_HERE
+
+            if filemode != originalmode:
+                pchange.addoldmode(originalmode)
+                pchange.addnewmode(filemode)
+        else:  # Brand-new file
+            pchange.addnewmode(gitmode[fctx.flags()])
+            pchange.type = DiffChangeType.ADD
+
+        if fctx.isbinary() or notutf8(fctx):
+            makebinary(pchange, fctx)
+            if renamed:
+                addoldbinary(pchange, fctx, originalfname)
+        else:
+            maketext(pchange, ctx, fname)
+
+        pdiff.addchange(pchange)
+
+    for _path, copiedchange in copiedchanges.items():
+        pdiff.addchange(copiedchange)
+    for _path, movedchange in movedchanges.items():
+        pdiff.addchange(movedchange)
+
+
 def creatediff(ctx):
     """create a Differential Diff"""
     repo = ctx.repo()
     repophid = getrepophid(repo)
-    # Create a "Differential Diff" via "differential.createrawdiff" API
-    params = {b'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))}
+    # Create a "Differential Diff" via "differential.creatediff" API
+    pdiff = phabdiff(
+        sourceControlBaseRevision=b'%s' % ctx.p1().hex(),
+        branch=b'%s' % ctx.branch(),
+    )
+    modified, added, removed, _d, _u, _i, _c = ctx.p1().status(ctx)
+    # addadded will remove moved files from removed, so addremoved won't get
+    # them
+    addadded(pdiff, ctx, added, removed)
+    addmodified(pdiff, ctx, modified)
+    addremoved(pdiff, ctx, removed)
     if repophid:
-        params[b'repositoryPHID'] = repophid
-    diff = callconduit(repo.ui, b'differential.createrawdiff', params)
+        pdiff.repositoryPHID = repophid
+    diff = callconduit(
+        repo.ui,
+        b'differential.creatediff',
+        pycompat.byteskwargs(attr.asdict(pdiff)),
+    )
     if not diff:
         raise error.Abort(_(b'cannot create diff for %s') % ctx)
     return diff
 
+
 def writediffproperties(ctx, diff):
     """write metadata to diff so patches could be applied losslessly"""
+    # creatediff returns with a diffid but query returns with an id
+    diffid = diff.get(b'diffid', diff.get(b'id'))
     params = {
-        b'diff_id': diff[b'id'],
+        b'diff_id': diffid,
         b'name': b'hg:meta',
-        b'data': templatefilters.json({
-            b'user': ctx.user(),
-            b'date': b'%d %d' % ctx.date(),
-            b'branch': ctx.branch(),
-            b'node': ctx.hex(),
-            b'parent': ctx.p1().hex(),
-        }),
+        b'data': templatefilters.json(
+            {
+                b'user': ctx.user(),
+                b'date': b'%d %d' % ctx.date(),
+                b'branch': ctx.branch(),
+                b'node': ctx.hex(),
+                b'parent': ctx.p1().hex(),
+            }
+        ),
     }
     callconduit(ctx.repo().ui, b'differential.setdiffproperty', params)
 
     params = {
-        b'diff_id': diff[b'id'],
+        b'diff_id': diffid,
         b'name': b'local:commits',
-        b'data': templatefilters.json({
-            ctx.hex(): {
-                b'author': stringutil.person(ctx.user()),
-                b'authorEmail': stringutil.email(ctx.user()),
-                b'time': int(ctx.date()[0]),
-                b'commit': ctx.hex(),
-                b'parents': [ctx.p1().hex()],
-                b'branch': ctx.branch(),
-            },
-        }),
+        b'data': templatefilters.json(
+            {
+                ctx.hex(): {
+                    b'author': stringutil.person(ctx.user()),
+                    b'authorEmail': stringutil.email(ctx.user()),
+                    b'time': int(ctx.date()[0]),
+                    b'commit': ctx.hex(),
+                    b'parents': [ctx.p1().hex()],
+                    b'branch': ctx.branch(),
+                },
+            }
+        ),
     }
     callconduit(ctx.repo().ui, b'differential.setdiffproperty', params)
 
-def createdifferentialrevision(ctx, revid=None, parentrevphid=None,
-                               oldnode=None, olddiff=None, actions=None,
-                               comment=None):
+
+def createdifferentialrevision(
+    ctx,
+    revid=None,
+    parentrevphid=None,
+    oldnode=None,
+    olddiff=None,
+    actions=None,
+    comment=None,
+):
     """create or update a Differential Revision
 
     If revid is None, create a new Differential Revision, otherwise update
@@ -448,7 +913,7 @@
     if oldnode:
         diffopts = mdiff.diffopts(git=True, context=32767)
         oldctx = repo.unfiltered()[oldnode]
-        neednewdiff = (getdiff(ctx, diffopts) != getdiff(oldctx, diffopts))
+        neednewdiff = getdiff(ctx, diffopts) != getdiff(oldctx, diffopts)
     else:
         neednewdiff = True
 
@@ -468,16 +933,18 @@
 
     # Set the parent Revision every time, so commit re-ordering is picked-up
     if parentrevphid:
-        transactions.append({b'type': b'parents.set',
-                             b'value': [parentrevphid]})
+        transactions.append(
+            {b'type': b'parents.set', b'value': [parentrevphid]}
+        )
 
     if actions:
         transactions += actions
 
     # Parse commit message and update related fields.
     desc = ctx.description()
-    info = callconduit(repo.ui, b'differential.parsecommitmessage',
-                       {b'corpus': desc})
+    info = callconduit(
+        repo.ui, b'differential.parsecommitmessage', {b'corpus': desc}
+    )
     for k, v in info[b'fields'].items():
         if k in [b'title', b'summary', b'testPlan']:
             transactions.append({b'type': k, b'value': v})
@@ -493,6 +960,7 @@
 
     return revision, diff
 
+
 def userphids(repo, names):
     """convert user names to PHIDs"""
     names = [name.lower() for name in names]
@@ -504,20 +972,30 @@
     resolved = set(entry[b'fields'][b'username'].lower() for entry in data)
     unresolved = set(names) - resolved
     if unresolved:
-        raise error.Abort(_(b'unknown username: %s')
-                          % b' '.join(sorted(unresolved)))
+        raise error.Abort(
+            _(b'unknown username: %s') % b' '.join(sorted(unresolved))
+        )
     return [entry[b'phid'] for entry in data]
 
-@vcrcommand(b'phabsend',
-         [(b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
-          (b'', b'amend', True, _(b'update commit messages')),
-          (b'', b'reviewer', [], _(b'specify reviewers')),
-          (b'', b'blocker', [], _(b'specify blocking reviewers')),
-          (b'm', b'comment', b'',
-           _(b'add a comment to Revisions with new/updated Diffs')),
-          (b'', b'confirm', None, _(b'ask for confirmation before sending'))],
-         _(b'REV [OPTIONS]'),
-         helpcategory=command.CATEGORY_IMPORT_EXPORT)
+
+@vcrcommand(
+    b'phabsend',
+    [
+        (b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
+        (b'', b'amend', True, _(b'update commit messages')),
+        (b'', b'reviewer', [], _(b'specify reviewers')),
+        (b'', b'blocker', [], _(b'specify blocking reviewers')),
+        (
+            b'm',
+            b'comment',
+            b'',
+            _(b'add a comment to Revisions with new/updated Diffs'),
+        ),
+        (b'', b'confirm', None, _(b'ask for confirmation before sending')),
+    ],
+    _(b'REV [OPTIONS]'),
+    helpcategory=command.CATEGORY_IMPORT_EXPORT,
+)
 def phabsend(ui, repo, *revs, **opts):
     """upload changesets to Phabricator
 
@@ -571,14 +1049,14 @@
     if reviewers:
         phids.extend(userphids(repo, reviewers))
     if blockers:
-        phids.extend(map(
-            lambda phid: b'blocking(%s)' % phid, userphids(repo, blockers)
-        ))
+        phids.extend(
+            map(lambda phid: b'blocking(%s)' % phid, userphids(repo, blockers))
+        )
     if phids:
         actions.append({b'type': b'reviewers.add', b'value': phids})
 
-    drevids = [] # [int]
-    diffmap = {} # {newnode: diff}
+    drevids = []  # [int]
+    diffmap = {}  # {newnode: diff}
 
     # Send patches one by one so we know their Differential Revision PHIDs and
     # can provide dependency relationship
@@ -592,8 +1070,14 @@
         if oldnode != ctx.node() or opts.get(b'amend'):
             # Create or update Differential Revision
             revision, diff = createdifferentialrevision(
-                ctx, revid, lastrevphid, oldnode, olddiff, actions,
-                opts.get(b'comment'))
+                ctx,
+                revid,
+                lastrevphid,
+                oldnode,
+                olddiff,
+                actions,
+                opts.get(b'comment'),
+            )
             diffmap[ctx.node()] = diff
             newrevid = int(revision[b'object'][b'id'])
             newrevphid = revision[b'object'][b'phid']
@@ -607,25 +1091,36 @@
             m = _differentialrevisiondescre.search(ctx.description())
             if not m or int(m.group(r'id')) != newrevid:
                 tagname = b'D%d' % newrevid
-                tags.tag(repo, tagname, ctx.node(), message=None, user=None,
-                         date=None, local=True)
+                tags.tag(
+                    repo,
+                    tagname,
+                    ctx.node(),
+                    message=None,
+                    user=None,
+                    date=None,
+                    local=True,
+                )
         else:
             # Nothing changed. But still set "newrevphid" so the next revision
             # could depend on this one and "newrevid" for the summary line.
-            newrevphid = querydrev(repo, str(revid))[0][b'phid']
+            newrevphid = querydrev(repo, b'%d' % revid)[0][b'phid']
             newrevid = revid
             action = b'skipped'
 
         actiondesc = ui.label(
-            {b'created': _(b'created'),
-             b'skipped': _(b'skipped'),
-             b'updated': _(b'updated')}[action],
-            b'phabricator.action.%s' % action)
+            {
+                b'created': _(b'created'),
+                b'skipped': _(b'skipped'),
+                b'updated': _(b'updated'),
+            }[action],
+            b'phabricator.action.%s' % action,
+        )
         drevdesc = ui.label(b'D%d' % newrevid, b'phabricator.drev')
         nodedesc = ui.label(bytes(ctx), b'phabricator.node')
         desc = ui.label(ctx.description().split(b'\n')[0], b'phabricator.desc')
-        ui.write(_(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc,
-                                             desc))
+        ui.write(
+            _(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc, desc)
+        )
         drevids.append(newrevid)
         lastrevphid = newrevphid
 
@@ -635,7 +1130,7 @@
         drevs = callconduit(ui, b'differential.query', {b'ids': drevids})
         with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
             wnode = unfi[b'.'].node()
-            mapping = {} # {oldnode: [newnode]}
+            mapping = {}  # {oldnode: [newnode]}
             for i, rev in enumerate(revs):
                 old = unfi[rev]
                 drevid = drevids[i]
@@ -644,16 +1139,24 @@
                 # Make sure commit message contain "Differential Revision"
                 if old.description() != newdesc:
                     if old.phase() == phases.public:
-                        ui.warn(_("warning: not updating public commit %s\n")
-                                % scmutil.formatchangeid(old))
+                        ui.warn(
+                            _(b"warning: not updating public commit %s\n")
+                            % scmutil.formatchangeid(old)
+                        )
                         continue
                     parents = [
                         mapping.get(old.p1().node(), (old.p1(),))[0],
                         mapping.get(old.p2().node(), (old.p2(),))[0],
                     ]
                     new = context.metadataonlyctx(
-                        repo, old, parents=parents, text=newdesc,
-                        user=old.user(), date=old.date(), extra=old.extra())
+                        repo,
+                        old,
+                        parents=parents,
+                        text=newdesc,
+                        user=old.user(),
+                        date=old.date(),
+                        extra=old.extra(),
+                    )
 
                     newnode = new.commit()
 
@@ -664,21 +1167,38 @@
                     try:
                         writediffproperties(unfi[newnode], diffmap[old.node()])
                     except util.urlerr.urlerror:
-                        ui.warn(b'Failed to update metadata for D%s\n' % drevid)
+                        ui.warnnoi18n(
+                            b'Failed to update metadata for D%s\n' % drevid
+                        )
                 # Remove local tags since it's no longer necessary
                 tagname = b'D%d' % drevid
                 if tagname in repo.tags():
-                    tags.tag(repo, tagname, nullid, message=None, user=None,
-                             date=None, local=True)
+                    tags.tag(
+                        repo,
+                        tagname,
+                        nullid,
+                        message=None,
+                        user=None,
+                        date=None,
+                        local=True,
+                    )
             scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True)
             if wnode in mapping:
                 unfi.setparents(mapping[wnode][0])
 
+
 # Map from "hg:meta" keys to header understood by "hg import". The order is
 # consistent with "hg export" output.
-_metanamemap = util.sortdict([(b'user', b'User'), (b'date', b'Date'),
-                              (b'branch', b'Branch'), (b'node', b'Node ID'),
-                              (b'parent', b'Parent ')])
+_metanamemap = util.sortdict(
+    [
+        (b'user', b'User'),
+        (b'date', b'Date'),
+        (b'branch', b'Branch'),
+        (b'node', b'Node ID'),
+        (b'parent', b'Parent '),
+    ]
+)
+
 
 def _confirmbeforesend(repo, revs, oldmap):
     url, token = readurltoken(repo.ui)
@@ -692,62 +1212,81 @@
         else:
             drevdesc = ui.label(_(b'NEW'), b'phabricator.drev')
 
-        ui.write(_(b'%s - %s: %s\n')
-                 % (drevdesc,
-                    ui.label(bytes(ctx), b'phabricator.node'),
-                    ui.label(desc, b'phabricator.desc')))
+        ui.write(
+            _(b'%s - %s: %s\n')
+            % (
+                drevdesc,
+                ui.label(bytes(ctx), b'phabricator.node'),
+                ui.label(desc, b'phabricator.desc'),
+            )
+        )
 
-    if ui.promptchoice(_(b'Send the above changes to %s (yn)?'
-                         b'$$ &Yes $$ &No') % url):
+    if ui.promptchoice(
+        _(b'Send the above changes to %s (yn)?$$ &Yes $$ &No') % url
+    ):
         return False
 
     return True
 
-_knownstatusnames = {b'accepted', b'needsreview', b'needsrevision', b'closed',
-                     b'abandoned'}
+
+_knownstatusnames = {
+    b'accepted',
+    b'needsreview',
+    b'needsrevision',
+    b'closed',
+    b'abandoned',
+}
+
 
 def _getstatusname(drev):
     """get normalized status name from a Differential Revision"""
     return drev[b'statusName'].replace(b' ', b'').lower()
 
+
 # Small language to specify differential revisions. Support symbols: (), :X,
 # +, and -.
 
 _elements = {
     # token-type: binding-strength, primary, prefix, infix, suffix
-    b'(':      (12, None, (b'group', 1, b')'), None, None),
-    b':':      (8, None, (b'ancestors', 8), None, None),
-    b'&':      (5,  None, None, (b'and_', 5), None),
-    b'+':      (4,  None, None, (b'add', 4), None),
-    b'-':      (4,  None, None, (b'sub', 4), None),
-    b')':      (0,  None, None, None, None),
+    b'(': (12, None, (b'group', 1, b')'), None, None),
+    b':': (8, None, (b'ancestors', 8), None, None),
+    b'&': (5, None, None, (b'and_', 5), None),
+    b'+': (4, None, None, (b'add', 4), None),
+    b'-': (4, None, None, (b'sub', 4), None),
+    b')': (0, None, None, None, None),
     b'symbol': (0, b'symbol', None, None, None),
-    b'end':    (0, None, None, None, None),
+    b'end': (0, None, None, None, None),
 }
 
+
 def _tokenize(text):
-    view = memoryview(text) # zero-copy slice
+    view = memoryview(text)  # zero-copy slice
     special = b'():+-& '
     pos = 0
     length = len(text)
     while pos < length:
-        symbol = b''.join(itertools.takewhile(lambda ch: ch not in special,
-                                              pycompat.iterbytestr(view[pos:])))
+        symbol = b''.join(
+            itertools.takewhile(
+                lambda ch: ch not in special, pycompat.iterbytestr(view[pos:])
+            )
+        )
         if symbol:
             yield (b'symbol', symbol, pos)
             pos += len(symbol)
-        else: # special char, ignore space
-            if text[pos] != b' ':
-                yield (text[pos], None, pos)
+        else:  # special char, ignore space
+            if text[pos : pos + 1] != b' ':
+                yield (text[pos : pos + 1], None, pos)
             pos += 1
     yield (b'end', None, pos)
 
+
 def _parse(text):
     tree, pos = parser.parser(_elements).parse(_tokenize(text))
     if pos != len(text):
         raise error.ParseError(b'invalid token', pos)
     return tree
 
+
 def _parsedrev(symbol):
     """str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None"""
     if symbol.startswith(b'D') and symbol[1:].isdigit():
@@ -755,6 +1294,7 @@
     if symbol.isdigit():
         return int(symbol)
 
+
 def _prefetchdrevs(tree):
     """return ({single-drev-id}, {ancestor-drev-id}) to prefetch"""
     drevs = set()
@@ -776,6 +1316,7 @@
             ancestordrevs.update(a)
     return drevs, ancestordrevs
 
+
 def querydrev(repo, spec):
     """return a list of "Differential Revision" dicts
 
@@ -818,6 +1359,7 @@
             "sourcePath": null
         }
     """
+
     def fetch(params):
         """params -> single drev or None"""
         key = (params.get(b'ids') or params.get(b'phids') or [None])[0]
@@ -829,8 +1371,9 @@
             prefetched[drev[b'phid']] = drev
             prefetched[int(drev[b'id'])] = drev
         if key not in prefetched:
-            raise error.Abort(_(b'cannot get Differential Revision %r')
-                              % params)
+            raise error.Abort(
+                _(b'cannot get Differential Revision %r') % params
+            )
         return prefetched[key]
 
     def getstack(topdrevids):
@@ -853,7 +1396,7 @@
         return smartset.baseset(result)
 
     # Initialize prefetch cache
-    prefetched = {} # {id or phid: drev}
+    prefetched = {}  # {id or phid: drev}
 
     tree = _parse(spec)
     drevs, ancestordrevs = _prefetchdrevs(tree)
@@ -877,8 +1420,11 @@
             if drev:
                 return smartset.baseset([drev])
             elif tree[1] in _knownstatusnames:
-                drevs = [r for r in validids
-                         if _getstatusname(prefetched[r]) == tree[1]]
+                drevs = [
+                    r
+                    for r in validids
+                    if _getstatusname(prefetched[r]) == tree[1]
+                ]
                 return smartset.baseset(drevs)
             else:
                 raise error.Abort(_(b'unknown symbol: %s') % tree[1])
@@ -894,6 +1440,7 @@
 
     return [prefetched[r] for r in walk(tree)]
 
+
 def getdescfromdrev(drev):
     """get description (commit message) from "Differential Revision"
 
@@ -908,6 +1455,7 @@
     uri = b'Differential Revision: %s' % drev[b'uri']
     return b'\n\n'.join(filter(None, [title, summary, testplan, uri]))
 
+
 def getdiffmeta(diff):
     """get commit metadata (date, node, user, p1) from a diff object
 
@@ -952,8 +1500,10 @@
             commit = sorted(props[b'local:commits'].values())[0]
             meta = {}
             if b'author' in commit and b'authorEmail' in commit:
-                meta[b'user'] = b'%s <%s>' % (commit[b'author'],
-                                              commit[b'authorEmail'])
+                meta[b'user'] = b'%s <%s>' % (
+                    commit[b'author'],
+                    commit[b'authorEmail'],
+                )
             if b'time' in commit:
                 meta[b'date'] = b'%d 0' % int(commit[b'time'])
             if b'branch' in commit:
@@ -973,6 +1523,7 @@
         meta[b'parent'] = diff[b'sourceControlBaseRevision']
     return meta
 
+
 def readpatch(repo, drevs, write):
     """generate plain-text patch readable by 'hg import'
 
@@ -988,8 +1539,9 @@
         repo.ui.note(_(b'reading D%s\n') % drev[b'id'])
 
         diffid = max(int(v) for v in drev[b'diffs'])
-        body = callconduit(repo.ui, b'differential.getrawdiff',
-                           {b'diffID': diffid})
+        body = callconduit(
+            repo.ui, b'differential.getrawdiff', {b'diffID': diffid}
+        )
         desc = getdescfromdrev(drev)
         header = b'# HG changeset patch\n'
 
@@ -1004,10 +1556,13 @@
         content = b'%s%s\n%s' % (header, desc, body)
         write(content)
 
-@vcrcommand(b'phabread',
-         [(b'', b'stack', False, _(b'read dependencies'))],
-         _(b'DREVSPEC [OPTIONS]'),
-         helpcategory=command.CATEGORY_IMPORT_EXPORT)
+
+@vcrcommand(
+    b'phabread',
+    [(b'', b'stack', False, _(b'read dependencies'))],
+    _(b'DREVSPEC [OPTIONS]'),
+    helpcategory=command.CATEGORY_IMPORT_EXPORT,
+)
 def phabread(ui, repo, spec, **opts):
     """print patches from Phabricator suitable for importing
 
@@ -1033,14 +1588,19 @@
     drevs = querydrev(repo, spec)
     readpatch(repo, drevs, ui.write)
 
-@vcrcommand(b'phabupdate',
-         [(b'', b'accept', False, _(b'accept revisions')),
-          (b'', b'reject', False, _(b'reject revisions')),
-          (b'', b'abandon', False, _(b'abandon revisions')),
-          (b'', b'reclaim', False, _(b'reclaim revisions')),
-          (b'm', b'comment', b'', _(b'comment on the last revision')),
-          ], _(b'DREVSPEC [OPTIONS]'),
-          helpcategory=command.CATEGORY_IMPORT_EXPORT)
+
+@vcrcommand(
+    b'phabupdate',
+    [
+        (b'', b'accept', False, _(b'accept revisions')),
+        (b'', b'reject', False, _(b'reject revisions')),
+        (b'', b'abandon', False, _(b'abandon revisions')),
+        (b'', b'reclaim', False, _(b'reclaim revisions')),
+        (b'm', b'comment', b'', _(b'comment on the last revision')),
+    ],
+    _(b'DREVSPEC [OPTIONS]'),
+    helpcategory=command.CATEGORY_IMPORT_EXPORT,
+)
 def phabupdate(ui, repo, spec, **opts):
     """update Differential Revision in batch
 
@@ -1060,13 +1620,14 @@
         if i + 1 == len(drevs) and opts.get(b'comment'):
             actions.append({b'type': b'comment', b'value': opts[b'comment']})
         if actions:
-            params = {b'objectIdentifier': drev[b'phid'],
-                      b'transactions': actions}
+            params = {
+                b'objectIdentifier': drev[b'phid'],
+                b'transactions': actions,
+            }
             callconduit(ui, b'differential.revision.edit', params)
 
-templatekeyword = registrar.templatekeyword()
 
-@templatekeyword(b'phabreview', requires={b'ctx'})
+@eh.templatekeyword(b'phabreview', requires={b'ctx'})
 def template_review(context, mapping):
     """:phabreview: Object describing the review for this changeset.
     Has attributes `url` and `id`.
@@ -1074,10 +1635,9 @@
     ctx = context.resource(mapping, b'ctx')
     m = _differentialrevisiondescre.search(ctx.description())
     if m:
-        return templateutil.hybriddict({
-            b'url': m.group(r'url'),
-            b'id': b"D%s" % m.group(r'id'),
-        })
+        return templateutil.hybriddict(
+            {b'url': m.group(r'url'), b'id': b"D%s" % m.group(r'id'),}
+        )
     else:
         tags = ctx.repo().nodetags(ctx.node())
         for t in tags:
@@ -1087,8 +1647,5 @@
                     url += b'/'
                 url += t
 
-                return templateutil.hybriddict({
-                    b'url': url,
-                    b'id': t,
-                })
+                return templateutil.hybriddict({b'url': url, b'id': t,})
     return None
--- a/hgext/purge.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/purge.py	Mon Oct 21 11:09:48 2019 -0400
@@ -40,19 +40,31 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
-@command('purge|clean',
-    [('a', 'abort-on-err', None, _('abort if an error occurs')),
-    ('',  'all', None, _('purge ignored files too')),
-    ('',  'dirs', None, _('purge empty directories')),
-    ('',  'files', None, _('purge files')),
-    ('p', 'print', None, _('print filenames instead of deleting them')),
-    ('0', 'print0', None, _('end filenames with NUL, for use with xargs'
-                            ' (implies -p/--print)')),
-    ] + cmdutil.walkopts,
-    _('hg purge [OPTION]... [DIR]...'),
-    helpcategory=command.CATEGORY_MAINTENANCE)
+@command(
+    b'purge|clean',
+    [
+        (b'a', b'abort-on-err', None, _(b'abort if an error occurs')),
+        (b'', b'all', None, _(b'purge ignored files too')),
+        (b'', b'dirs', None, _(b'purge empty directories')),
+        (b'', b'files', None, _(b'purge files')),
+        (b'p', b'print', None, _(b'print filenames instead of deleting them')),
+        (
+            b'0',
+            b'print0',
+            None,
+            _(
+                b'end filenames with NUL, for use with xargs'
+                b' (implies -p/--print)'
+            ),
+        ),
+    ]
+    + cmdutil.walkopts,
+    _(b'hg purge [OPTION]... [DIR]...'),
+    helpcategory=command.CATEGORY_MAINTENANCE,
+)
 def purge(ui, repo, *dirs, **opts):
     '''removes files not tracked by Mercurial
 
@@ -85,14 +97,14 @@
     '''
     opts = pycompat.byteskwargs(opts)
 
-    act = not opts.get('print')
-    eol = '\n'
-    if opts.get('print0'):
-        eol = '\0'
-        act = False # --print0 implies --print
+    act = not opts.get(b'print')
+    eol = b'\n'
+    if opts.get(b'print0'):
+        eol = b'\0'
+        act = False  # --print0 implies --print
 
-    removefiles = opts.get('files')
-    removedirs = opts.get('dirs')
+    removefiles = opts.get(b'files')
+    removedirs = opts.get(b'dirs')
 
     if not removefiles and not removedirs:
         removefiles = True
@@ -101,11 +113,15 @@
     match = scmutil.match(repo[None], dirs, opts)
 
     paths = mergemod.purge(
-        repo, match, ignored=opts.get('all', False),
-        removeemptydirs=removedirs, removefiles=removefiles,
-        abortonerror=opts.get('abort_on_err'),
-        noop=not act)
+        repo,
+        match,
+        ignored=opts.get(b'all', False),
+        removeemptydirs=removedirs,
+        removefiles=removefiles,
+        abortonerror=opts.get(b'abort_on_err'),
+        noop=not act,
+    )
 
     for path in paths:
         if not act:
-            ui.write('%s%s' % (path, eol))
+            ui.write(b'%s%s' % (path, eol))
--- a/hgext/rebase.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/rebase.py	Mon Oct 21 11:09:48 2019 -0400
@@ -24,6 +24,7 @@
     nullrev,
     short,
 )
+from mercurial.pycompat import open
 from mercurial import (
     bookmarks,
     cmdutil,
@@ -56,11 +57,11 @@
 
 # Indicates that a revision needs to be rebased
 revtodo = -1
-revtodostr = '-1'
+revtodostr = b'-1'
 
 # legacy revstates no longer needed in current code
 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
-legacystates = {'-2', '-3', '-4', '-5'}
+legacystates = {b'-2', b'-3', b'-4', b'-5'}
 
 cmdtable = {}
 command = registrar.command(cmdtable)
@@ -68,32 +69,43 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
 def _nothingtorebase():
     return 1
 
+
 def _savegraft(ctx, extra):
-    s = ctx.extra().get('source', None)
+    s = ctx.extra().get(b'source', None)
     if s is not None:
-        extra['source'] = s
-    s = ctx.extra().get('intermediate-source', None)
+        extra[b'source'] = s
+    s = ctx.extra().get(b'intermediate-source', None)
     if s is not None:
-        extra['intermediate-source'] = s
+        extra[b'intermediate-source'] = s
+
 
 def _savebranch(ctx, extra):
-    extra['branch'] = ctx.branch()
+    extra[b'branch'] = ctx.branch()
+
 
 def _destrebase(repo, sourceset, destspace=None):
     """small wrapper around destmerge to pass the right extra args
 
     Please wrap destutil.destmerge instead."""
-    return destutil.destmerge(repo, action='rebase', sourceset=sourceset,
-                              onheadcheck=False, destspace=destspace)
+    return destutil.destmerge(
+        repo,
+        action=b'rebase',
+        sourceset=sourceset,
+        onheadcheck=False,
+        destspace=destspace,
+    )
+
 
 revsetpredicate = registrar.revsetpredicate()
 
-@revsetpredicate('_destrebase')
+
+@revsetpredicate(b'_destrebase')
 def _revsetdestrebase(repo, subset, x):
     # ``_rebasedefaultdest()``
 
@@ -106,13 +118,14 @@
         sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
     return subset & smartset.baseset([_destrebase(repo, sourceset)])
 
-@revsetpredicate('_destautoorphanrebase')
+
+@revsetpredicate(b'_destautoorphanrebase')
 def _revsetdestautoorphanrebase(repo, subset, x):
     # ``_destautoorphanrebase()``
 
     # automatic rebase destination for a single orphan revision.
     unfi = repo.unfiltered()
-    obsoleted = unfi.revs('obsolete()')
+    obsoleted = unfi.revs(b'obsolete()')
 
     src = revset.getset(repo, subset, x).first()
 
@@ -122,27 +135,34 @@
     dests = destutil.orphanpossibledestination(repo, src)
     if len(dests) > 1:
         raise error.Abort(
-            _("ambiguous automatic rebase: %r could end up on any of %r") % (
-                src, dests))
+            _(b"ambiguous automatic rebase: %r could end up on any of %r")
+            % (src, dests)
+        )
     # We have zero or one destination, so we can just return here.
     return smartset.baseset(dests)
 
+
 def _ctxdesc(ctx):
     """short description for a context"""
-    desc = '%d:%s "%s"' % (ctx.rev(), ctx,
-                           ctx.description().split('\n', 1)[0])
+    desc = b'%d:%s "%s"' % (
+        ctx.rev(),
+        ctx,
+        ctx.description().split(b'\n', 1)[0],
+    )
     repo = ctx.repo()
     names = []
-    for nsname, ns in repo.names.iteritems():
-        if nsname == 'branches':
+    for nsname, ns in pycompat.iteritems(repo.names):
+        if nsname == b'branches':
             continue
         names.extend(ns.names(repo, ctx.node()))
     if names:
-        desc += ' (%s)' % ' '.join(names)
+        desc += b' (%s)' % b' '.join(names)
     return desc
 
+
 class rebaseruntime(object):
     """This class is a container for rebase runtime state"""
+
     def __init__(self, repo, ui, inmemory=False, opts=None):
         if opts is None:
             opts = {}
@@ -170,22 +190,22 @@
         self.destmap = {}
         self.skipped = set()
 
-        self.collapsef = opts.get('collapse', False)
+        self.collapsef = opts.get(b'collapse', False)
         self.collapsemsg = cmdutil.logmessage(ui, opts)
-        self.date = opts.get('date', None)
+        self.date = opts.get(b'date', None)
 
-        e = opts.get('extrafn') # internal, used by e.g. hgsubversion
+        e = opts.get(b'extrafn')  # internal, used by e.g. hgsubversion
         self.extrafns = [_savegraft]
         if e:
             self.extrafns = [e]
 
-        self.backupf = ui.configbool('rewrite', 'backup-bundle')
-        self.keepf = opts.get('keep', False)
-        self.keepbranchesf = opts.get('keepbranches', False)
+        self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
+        self.keepf = opts.get(b'keep', False)
+        self.keepbranchesf = opts.get(b'keepbranches', False)
         self.obsoletenotrebased = {}
         self.obsoletewithoutsuccessorindestination = set()
         self.inmemory = inmemory
-        self.stateobj = statemod.cmdstate(repo, 'rebasestate')
+        self.stateobj = statemod.cmdstate(repo, b'rebasestate')
 
     @property
     def repo(self):
@@ -197,87 +217,97 @@
     def storestatus(self, tr=None):
         """Store the current status to allow recovery"""
         if tr:
-            tr.addfilegenerator('rebasestate', ('rebasestate',),
-                                self._writestatus, location='plain')
+            tr.addfilegenerator(
+                b'rebasestate',
+                (b'rebasestate',),
+                self._writestatus,
+                location=b'plain',
+            )
         else:
-            with self.repo.vfs("rebasestate", "w") as f:
+            with self.repo.vfs(b"rebasestate", b"w") as f:
                 self._writestatus(f)
 
     def _writestatus(self, f):
         repo = self.repo
         assert repo.filtername is None
-        f.write(repo[self.originalwd].hex() + '\n')
+        f.write(repo[self.originalwd].hex() + b'\n')
         # was "dest". we now write dest per src root below.
-        f.write('\n')
-        f.write(repo[self.external].hex() + '\n')
-        f.write('%d\n' % int(self.collapsef))
-        f.write('%d\n' % int(self.keepf))
-        f.write('%d\n' % int(self.keepbranchesf))
-        f.write('%s\n' % (self.activebookmark or ''))
+        f.write(b'\n')
+        f.write(repo[self.external].hex() + b'\n')
+        f.write(b'%d\n' % int(self.collapsef))
+        f.write(b'%d\n' % int(self.keepf))
+        f.write(b'%d\n' % int(self.keepbranchesf))
+        f.write(b'%s\n' % (self.activebookmark or b''))
         destmap = self.destmap
-        for d, v in self.state.iteritems():
+        for d, v in pycompat.iteritems(self.state):
             oldrev = repo[d].hex()
             if v >= 0:
                 newrev = repo[v].hex()
             else:
-                newrev = "%d" % v
+                newrev = b"%d" % v
             destnode = repo[destmap[d]].hex()
-            f.write("%s:%s:%s\n" % (oldrev, newrev, destnode))
-        repo.ui.debug('rebase status stored\n')
+            f.write(b"%s:%s:%s\n" % (oldrev, newrev, destnode))
+        repo.ui.debug(b'rebase status stored\n')
 
     def restorestatus(self):
         """Restore a previously stored status"""
         if not self.stateobj.exists():
-            cmdutil.wrongtooltocontinue(self.repo, _('rebase'))
+            cmdutil.wrongtooltocontinue(self.repo, _(b'rebase'))
 
         data = self._read()
-        self.repo.ui.debug('rebase status resumed\n')
+        self.repo.ui.debug(b'rebase status resumed\n')
 
-        self.originalwd = data['originalwd']
-        self.destmap = data['destmap']
-        self.state = data['state']
-        self.skipped = data['skipped']
-        self.collapsef = data['collapse']
-        self.keepf = data['keep']
-        self.keepbranchesf = data['keepbranches']
-        self.external = data['external']
-        self.activebookmark = data['activebookmark']
+        self.originalwd = data[b'originalwd']
+        self.destmap = data[b'destmap']
+        self.state = data[b'state']
+        self.skipped = data[b'skipped']
+        self.collapsef = data[b'collapse']
+        self.keepf = data[b'keep']
+        self.keepbranchesf = data[b'keepbranches']
+        self.external = data[b'external']
+        self.activebookmark = data[b'activebookmark']
 
     def _read(self):
         self.prepared = True
         repo = self.repo
         assert repo.filtername is None
-        data = {'keepbranches': None, 'collapse': None, 'activebookmark': None,
-                'external': nullrev, 'keep': None, 'originalwd': None}
+        data = {
+            b'keepbranches': None,
+            b'collapse': None,
+            b'activebookmark': None,
+            b'external': nullrev,
+            b'keep': None,
+            b'originalwd': None,
+        }
         legacydest = None
         state = {}
         destmap = {}
 
         if True:
-            f = repo.vfs("rebasestate")
+            f = repo.vfs(b"rebasestate")
             for i, l in enumerate(f.read().splitlines()):
                 if i == 0:
-                    data['originalwd'] = repo[l].rev()
+                    data[b'originalwd'] = repo[l].rev()
                 elif i == 1:
                     # this line should be empty in newer version. but legacy
                     # clients may still use it
                     if l:
                         legacydest = repo[l].rev()
                 elif i == 2:
-                    data['external'] = repo[l].rev()
+                    data[b'external'] = repo[l].rev()
                 elif i == 3:
-                    data['collapse'] = bool(int(l))
+                    data[b'collapse'] = bool(int(l))
                 elif i == 4:
-                    data['keep'] = bool(int(l))
+                    data[b'keep'] = bool(int(l))
                 elif i == 5:
-                    data['keepbranches'] = bool(int(l))
-                elif i == 6 and not (len(l) == 81 and ':' in l):
+                    data[b'keepbranches'] = bool(int(l))
+                elif i == 6 and not (len(l) == 81 and b':' in l):
                     # line 6 is a recent addition, so for backwards
                     # compatibility check that the line doesn't look like the
                     # oldrev:newrev lines
-                    data['activebookmark'] = l
+                    data[b'activebookmark'] = l
                 else:
-                    args = l.split(':')
+                    args = l.split(b':')
                     oldrev = repo[args[0]].rev()
                     newrev = args[1]
                     if newrev in legacystates:
@@ -293,22 +323,24 @@
                     else:
                         state[oldrev] = repo[newrev].rev()
 
-        if data['keepbranches'] is None:
-            raise error.Abort(_('.hg/rebasestate is incomplete'))
+        if data[b'keepbranches'] is None:
+            raise error.Abort(_(b'.hg/rebasestate is incomplete'))
 
-        data['destmap'] = destmap
-        data['state'] = state
+        data[b'destmap'] = destmap
+        data[b'state'] = state
         skipped = set()
         # recompute the set of skipped revs
-        if not data['collapse']:
+        if not data[b'collapse']:
             seen = set(destmap.values())
             for old, new in sorted(state.items()):
                 if new != revtodo and new in seen:
                     skipped.add(old)
                 seen.add(new)
-        data['skipped'] = skipped
-        repo.ui.debug('computed skipped revs: %s\n' %
-                        (' '.join('%d' % r for r in sorted(skipped)) or ''))
+        data[b'skipped'] = skipped
+        repo.ui.debug(
+            b'computed skipped revs: %s\n'
+            % (b' '.join(b'%d' % r for r in sorted(skipped)) or b'')
+        )
 
         return data
 
@@ -319,13 +351,14 @@
         destmap:        {srcrev: destrev} destination revisions
         """
         self.obsoletenotrebased = {}
-        if not self.ui.configbool('experimental', 'rebaseskipobsolete'):
+        if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'):
             return
         obsoleteset = set(obsoleterevs)
-        (self.obsoletenotrebased,
-         self.obsoletewithoutsuccessorindestination,
-         obsoleteextinctsuccessors) = _computeobsoletenotrebased(
-             self.repo, obsoleteset, destmap)
+        (
+            self.obsoletenotrebased,
+            self.obsoletewithoutsuccessorindestination,
+            obsoleteextinctsuccessors,
+        ) = _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
         skippedset = set(self.obsoletenotrebased)
         skippedset.update(self.obsoletewithoutsuccessorindestination)
         skippedset.update(obsoleteextinctsuccessors)
@@ -339,12 +372,16 @@
             if isabort:
                 clearstatus(self.repo)
                 clearcollapsemsg(self.repo)
-                self.repo.ui.warn(_('rebase aborted (no revision is removed,'
-                                    ' only broken state is cleared)\n'))
+                self.repo.ui.warn(
+                    _(
+                        b'rebase aborted (no revision is removed,'
+                        b' only broken state is cleared)\n'
+                    )
+                )
                 return 0
             else:
-                msg = _('cannot continue inconsistent rebase')
-                hint = _('use "hg rebase --abort" to clear broken state')
+                msg = _(b'cannot continue inconsistent rebase')
+                hint = _(b'use "hg rebase --abort" to clear broken state')
                 raise error.Abort(msg, hint=hint)
 
         if isabort:
@@ -357,56 +394,66 @@
 
         rebaseset = destmap.keys()
         allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt)
-        if (not (self.keepf or allowunstable)
-              and self.repo.revs('first(children(%ld) - %ld)',
-                                 rebaseset, rebaseset)):
+        if not (self.keepf or allowunstable) and self.repo.revs(
+            b'first(children(%ld) - %ld)', rebaseset, rebaseset
+        ):
             raise error.Abort(
-                _("can't remove original changesets with"
-                  " unrebased descendants"),
-                hint=_('use --keep to keep original changesets'))
+                _(
+                    b"can't remove original changesets with"
+                    b" unrebased descendants"
+                ),
+                hint=_(b'use --keep to keep original changesets'),
+            )
 
         result = buildstate(self.repo, destmap, self.collapsef)
 
         if not result:
             # Empty state built, nothing to rebase
-            self.ui.status(_('nothing to rebase\n'))
+            self.ui.status(_(b'nothing to rebase\n'))
             return _nothingtorebase()
 
-        for root in self.repo.set('roots(%ld)', rebaseset):
+        for root in self.repo.set(b'roots(%ld)', rebaseset):
             if not self.keepf and not root.mutable():
-                raise error.Abort(_("can't rebase public changeset %s")
-                                  % root,
-                                  hint=_("see 'hg help phases' for details"))
+                raise error.Abort(
+                    _(b"can't rebase public changeset %s") % root,
+                    hint=_(b"see 'hg help phases' for details"),
+                )
 
         (self.originalwd, self.destmap, self.state) = result
         if self.collapsef:
             dests = set(self.destmap.values())
             if len(dests) != 1:
                 raise error.Abort(
-                    _('--collapse does not work with multiple destinations'))
+                    _(b'--collapse does not work with multiple destinations')
+                )
             destrev = next(iter(dests))
-            destancestors = self.repo.changelog.ancestors([destrev],
-                                                          inclusive=True)
+            destancestors = self.repo.changelog.ancestors(
+                [destrev], inclusive=True
+            )
             self.external = externalparent(self.repo, self.state, destancestors)
 
         for destrev in sorted(set(destmap.values())):
             dest = self.repo[destrev]
             if dest.closesbranch() and not self.keepbranchesf:
-                self.ui.status(_('reopening closed branch head %s\n') % dest)
+                self.ui.status(_(b'reopening closed branch head %s\n') % dest)
 
         self.prepared = True
 
     def _assignworkingcopy(self):
         if self.inmemory:
             from mercurial.context import overlayworkingctx
+
             self.wctx = overlayworkingctx(self.repo)
-            self.repo.ui.debug("rebasing in-memory\n")
+            self.repo.ui.debug(b"rebasing in-memory\n")
         else:
             self.wctx = self.repo[None]
-            self.repo.ui.debug("rebasing on disk\n")
-        self.repo.ui.log("rebase",
-                         "using in-memory rebase: %r\n", self.inmemory,
-                         rebase_imm_used=self.inmemory)
+            self.repo.ui.debug(b"rebasing on disk\n")
+        self.repo.ui.log(
+            b"rebase",
+            b"using in-memory rebase: %r\n",
+            self.inmemory,
+            rebase_imm_used=self.inmemory,
+        )
 
     def _performrebase(self, tr):
         self._assignworkingcopy()
@@ -421,8 +468,9 @@
                 for rev in self.state:
                     branches.add(repo[rev].branch())
                     if len(branches) > 1:
-                        raise error.Abort(_('cannot collapse multiple named '
-                            'branches'))
+                        raise error.Abort(
+                            _(b'cannot collapse multiple named branches')
+                        )
 
         # Calculate self.obsoletenotrebased
         obsrevs = _filterobsoleterevs(self.repo, self.state)
@@ -441,25 +489,29 @@
             # commits.
             self.storestatus(tr)
 
-        cands = [k for k, v in self.state.iteritems() if v == revtodo]
-        p = repo.ui.makeprogress(_("rebasing"), unit=_('changesets'),
-                                 total=len(cands))
+        cands = [k for k, v in pycompat.iteritems(self.state) if v == revtodo]
+        p = repo.ui.makeprogress(
+            _(b"rebasing"), unit=_(b'changesets'), total=len(cands)
+        )
+
         def progress(ctx):
-            p.increment(item=("%d:%s" % (ctx.rev(), ctx)))
+            p.increment(item=(b"%d:%s" % (ctx.rev(), ctx)))
+
         allowdivergence = self.ui.configbool(
-            'experimental', 'evolution.allowdivergence')
+            b'experimental', b'evolution.allowdivergence'
+        )
         for subset in sortsource(self.destmap):
-            sortedrevs = self.repo.revs('sort(%ld, -topo)', subset)
+            sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset)
             if not allowdivergence:
                 sortedrevs -= self.repo.revs(
-                    'descendants(%ld) and not %ld',
+                    b'descendants(%ld) and not %ld',
                     self.obsoletewithoutsuccessorindestination,
                     self.obsoletewithoutsuccessorindestination,
                 )
             for rev in sortedrevs:
                 self._rebasenode(tr, rev, allowdivergence, progress)
         p.complete()
-        ui.note(_('rebase merging completed\n'))
+        ui.note(_(b'rebase merging completed\n'))
 
     def _concludenode(self, rev, p1, p2, editor, commitmsg=None):
         '''Commit the wd changes with parents p1 and p2.
@@ -473,31 +525,39 @@
         date = self.date
         if date is None:
             date = ctx.date()
-        extra = {'rebase_source': ctx.hex()}
+        extra = {b'rebase_source': ctx.hex()}
         for c in self.extrafns:
             c(ctx, extra)
         keepbranch = self.keepbranchesf and repo[p1].branch() != ctx.branch()
         destphase = max(ctx.phase(), phases.draft)
-        overrides = {('phases', 'new-commit'): destphase}
+        overrides = {(b'phases', b'new-commit'): destphase}
         if keepbranch:
-            overrides[('ui', 'allowemptycommit')] = True
-        with repo.ui.configoverride(overrides, 'rebase'):
+            overrides[(b'ui', b'allowemptycommit')] = True
+        with repo.ui.configoverride(overrides, b'rebase'):
             if self.inmemory:
-                newnode = commitmemorynode(repo, p1, p2,
+                newnode = commitmemorynode(
+                    repo,
+                    p1,
+                    p2,
                     wctx=self.wctx,
                     extra=extra,
                     commitmsg=commitmsg,
                     editor=editor,
                     user=ctx.user(),
-                    date=date)
+                    date=date,
+                )
                 mergemod.mergestate.clean(repo)
             else:
-                newnode = commitnode(repo, p1, p2,
+                newnode = commitnode(
+                    repo,
+                    p1,
+                    p2,
                     extra=extra,
                     commitmsg=commitmsg,
                     editor=editor,
                     user=ctx.user(),
-                    date=date)
+                    date=date,
+                )
 
             if newnode is None:
                 # If it ended up being a no-op commit, then the normal
@@ -512,55 +572,79 @@
         ctx = repo[rev]
         desc = _ctxdesc(ctx)
         if self.state[rev] == rev:
-            ui.status(_('already rebased %s\n') % desc)
-        elif (not allowdivergence
-              and rev in self.obsoletewithoutsuccessorindestination):
-            msg = _('note: not rebasing %s and its descendants as '
-                    'this would cause divergence\n') % desc
+            ui.status(_(b'already rebased %s\n') % desc)
+        elif (
+            not allowdivergence
+            and rev in self.obsoletewithoutsuccessorindestination
+        ):
+            msg = (
+                _(
+                    b'note: not rebasing %s and its descendants as '
+                    b'this would cause divergence\n'
+                )
+                % desc
+            )
             repo.ui.status(msg)
             self.skipped.add(rev)
         elif rev in self.obsoletenotrebased:
             succ = self.obsoletenotrebased[rev]
             if succ is None:
-                msg = _('note: not rebasing %s, it has no '
-                        'successor\n') % desc
+                msg = _(b'note: not rebasing %s, it has no successor\n') % desc
             else:
                 succdesc = _ctxdesc(repo[succ])
-                msg = (_('note: not rebasing %s, already in '
-                         'destination as %s\n') % (desc, succdesc))
+                msg = _(
+                    b'note: not rebasing %s, already in destination as %s\n'
+                ) % (desc, succdesc)
             repo.ui.status(msg)
             # Make clearrebased aware state[rev] is not a true successor
             self.skipped.add(rev)
             # Record rev as moved to its desired destination in self.state.
             # This helps bookmark and working parent movement.
-            dest = max(adjustdest(repo, rev, self.destmap, self.state,
-                                  self.skipped))
+            dest = max(
+                adjustdest(repo, rev, self.destmap, self.state, self.skipped)
+            )
             self.state[rev] = dest
         elif self.state[rev] == revtodo:
-            ui.status(_('rebasing %s\n') % desc)
+            ui.status(_(b'rebasing %s\n') % desc)
             progressfn(ctx)
-            p1, p2, base = defineparents(repo, rev, self.destmap,
-                                         self.state, self.skipped,
-                                         self.obsoletenotrebased)
+            p1, p2, base = defineparents(
+                repo,
+                rev,
+                self.destmap,
+                self.state,
+                self.skipped,
+                self.obsoletenotrebased,
+            )
             if not self.inmemory and len(repo[None].parents()) == 2:
-                repo.ui.debug('resuming interrupted rebase\n')
+                repo.ui.debug(b'resuming interrupted rebase\n')
             else:
-                overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
-                with ui.configoverride(overrides, 'rebase'):
-                    stats = rebasenode(repo, rev, p1, base, self.collapsef,
-                                       dest, wctx=self.wctx)
+                overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
+                with ui.configoverride(overrides, b'rebase'):
+                    stats = rebasenode(
+                        repo,
+                        rev,
+                        p1,
+                        base,
+                        self.collapsef,
+                        dest,
+                        wctx=self.wctx,
+                    )
                     if stats.unresolvedcount > 0:
                         if self.inmemory:
                             raise error.InMemoryMergeConflictsError()
                         else:
                             raise error.InterventionRequired(
-                                _('unresolved conflicts (see hg '
-                                  'resolve, then hg rebase --continue)'))
+                                _(
+                                    b'unresolved conflicts (see hg '
+                                    b'resolve, then hg rebase --continue)'
+                                )
+                            )
             if not self.collapsef:
                 merging = p2 != nullrev
-                editform = cmdutil.mergeeditform(merging, 'rebase')
-                editor = cmdutil.getcommiteditor(editform=editform,
-                                                 **pycompat.strkwargs(opts))
+                editform = cmdutil.mergeeditform(merging, b'rebase')
+                editor = cmdutil.getcommiteditor(
+                    editform=editform, **pycompat.strkwargs(opts)
+                )
                 newnode = self._concludenode(rev, p1, p2, editor)
             else:
                 # Skip commit if we are collapsing
@@ -572,17 +656,23 @@
             # Update the state
             if newnode is not None:
                 self.state[rev] = repo[newnode].rev()
-                ui.debug('rebased as %s\n' % short(newnode))
+                ui.debug(b'rebased as %s\n' % short(newnode))
             else:
                 if not self.collapsef:
-                    ui.warn(_('note: not rebasing %s, its destination already '
-                              'has all its changes\n') % desc)
+                    ui.warn(
+                        _(
+                            b'note: not rebasing %s, its destination already '
+                            b'has all its changes\n'
+                        )
+                        % desc
+                    )
                     self.skipped.add(rev)
                 self.state[rev] = p1
-                ui.debug('next revision set to %d\n' % p1)
+                ui.debug(b'next revision set to %d\n' % p1)
         else:
-            ui.status(_('already rebased %s as %s\n') %
-                      (desc, repo[self.state[rev]]))
+            ui.status(
+                _(b'already rebased %s as %s\n') % (desc, repo[self.state[rev]])
+            )
         if not tr:
             # When not using single transaction, store state after each
             # commit is completely done. On InterventionRequired, we thus
@@ -592,36 +682,41 @@
 
     def _finishrebase(self):
         repo, ui, opts = self.repo, self.ui, self.opts
-        fm = ui.formatter('rebase', opts)
+        fm = ui.formatter(b'rebase', opts)
         fm.startitem()
         if self.collapsef:
-            p1, p2, _base = defineparents(repo, min(self.state), self.destmap,
-                                          self.state, self.skipped,
-                                          self.obsoletenotrebased)
-            editopt = opts.get('edit')
-            editform = 'rebase.collapse'
+            p1, p2, _base = defineparents(
+                repo,
+                min(self.state),
+                self.destmap,
+                self.state,
+                self.skipped,
+                self.obsoletenotrebased,
+            )
+            editopt = opts.get(b'edit')
+            editform = b'rebase.collapse'
             if self.collapsemsg:
                 commitmsg = self.collapsemsg
             else:
-                commitmsg = 'Collapsed revision'
+                commitmsg = b'Collapsed revision'
                 for rebased in sorted(self.state):
                     if rebased not in self.skipped:
-                        commitmsg += '\n* %s' % repo[rebased].description()
+                        commitmsg += b'\n* %s' % repo[rebased].description()
                 editopt = True
             editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
             revtoreuse = max(self.state)
 
-            newnode = self._concludenode(revtoreuse, p1, self.external,
-                                         editor, commitmsg=commitmsg)
+            newnode = self._concludenode(
+                revtoreuse, p1, self.external, editor, commitmsg=commitmsg
+            )
 
             if newnode is not None:
                 newrev = repo[newnode].rev()
                 for oldrev in self.state:
                     self.state[oldrev] = newrev
 
-        if 'qtip' in repo.tags():
-            updatemq(repo, self.state, self.skipped,
-                     **pycompat.strkwargs(opts))
+        if b'qtip' in repo.tags():
+            updatemq(repo, self.state, self.skipped, **pycompat.strkwargs(opts))
 
         # restore original working directory
         # (we do this before stripping)
@@ -630,28 +725,40 @@
             # original directory is a parent of rebase set root or ignored
             newwd = self.originalwd
         if newwd not in [c.rev() for c in repo[None].parents()]:
-            ui.note(_("update back to initial working directory parent\n"))
+            ui.note(_(b"update back to initial working directory parent\n"))
             hg.updaterepo(repo, newwd, overwrite=False)
 
         collapsedas = None
         if self.collapsef and not self.keepf:
             collapsedas = newnode
-        clearrebased(ui, repo, self.destmap, self.state, self.skipped,
-                     collapsedas, self.keepf, fm=fm, backup=self.backupf)
+        clearrebased(
+            ui,
+            repo,
+            self.destmap,
+            self.state,
+            self.skipped,
+            collapsedas,
+            self.keepf,
+            fm=fm,
+            backup=self.backupf,
+        )
 
         clearstatus(repo)
         clearcollapsemsg(repo)
 
-        ui.note(_("rebase completed\n"))
-        util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
+        ui.note(_(b"rebase completed\n"))
+        util.unlinkpath(repo.sjoin(b'undo'), ignoremissing=True)
         if self.skipped:
             skippedlen = len(self.skipped)
-            ui.note(_("%d revisions have been skipped\n") % skippedlen)
+            ui.note(_(b"%d revisions have been skipped\n") % skippedlen)
         fm.end()
 
-        if (self.activebookmark and self.activebookmark in repo._bookmarks and
-            repo['.'].node() == repo._bookmarks[self.activebookmark]):
-                bookmarks.activate(repo, self.activebookmark)
+        if (
+            self.activebookmark
+            and self.activebookmark in repo._bookmarks
+            and repo[b'.'].node() == repo._bookmarks[self.activebookmark]
+        ):
+            bookmarks.activate(repo, self.activebookmark)
 
     def _abort(self, backup=True, suppwarns=False):
         '''Restore the repository to its original state.'''
@@ -662,39 +769,50 @@
             # rebase, their values within the state mapping will be the dest
             # rev id. The rebased list must must not contain the dest rev
             # (issue4896)
-            rebased = [s for r, s in self.state.items()
-                       if s >= 0 and s != r and s != self.destmap[r]]
+            rebased = [
+                s
+                for r, s in self.state.items()
+                if s >= 0 and s != r and s != self.destmap[r]
+            ]
             immutable = [d for d in rebased if not repo[d].mutable()]
             cleanup = True
             if immutable:
-                repo.ui.warn(_("warning: can't clean up public changesets %s\n")
-                             % ', '.join(bytes(repo[r]) for r in immutable),
-                             hint=_("see 'hg help phases' for details"))
+                repo.ui.warn(
+                    _(b"warning: can't clean up public changesets %s\n")
+                    % b', '.join(bytes(repo[r]) for r in immutable),
+                    hint=_(b"see 'hg help phases' for details"),
+                )
                 cleanup = False
 
             descendants = set()
             if rebased:
                 descendants = set(repo.changelog.descendants(rebased))
             if descendants - set(rebased):
-                repo.ui.warn(_("warning: new changesets detected on "
-                               "destination branch, can't strip\n"))
+                repo.ui.warn(
+                    _(
+                        b"warning: new changesets detected on "
+                        b"destination branch, can't strip\n"
+                    )
+                )
                 cleanup = False
 
             if cleanup:
                 shouldupdate = False
                 if rebased:
                     strippoints = [
-                        c.node() for c in repo.set('roots(%ld)', rebased)]
+                        c.node() for c in repo.set(b'roots(%ld)', rebased)
+                    ]
 
                 updateifonnodes = set(rebased)
                 updateifonnodes.update(self.destmap.values())
                 updateifonnodes.add(self.originalwd)
-                shouldupdate = repo['.'].rev() in updateifonnodes
+                shouldupdate = repo[b'.'].rev() in updateifonnodes
 
                 # Update away from the rebase if necessary
                 if shouldupdate or needupdate(repo, self.state):
-                    mergemod.update(repo, self.originalwd, branchmerge=False,
-                                    force=True)
+                    mergemod.update(
+                        repo, self.originalwd, branchmerge=False, force=True
+                    )
 
                 # Strip from the first rebased revision
                 if rebased:
@@ -707,39 +825,75 @@
             clearstatus(repo)
             clearcollapsemsg(repo)
             if not suppwarns:
-                repo.ui.warn(_('rebase aborted\n'))
+                repo.ui.warn(_(b'rebase aborted\n'))
         return 0
 
-@command('rebase',
-    [('s', 'source', '',
-     _('rebase the specified changeset and descendants'), _('REV')),
-    ('b', 'base', '',
-     _('rebase everything from branching point of specified changeset'),
-     _('REV')),
-    ('r', 'rev', [],
-     _('rebase these revisions'),
-     _('REV')),
-    ('d', 'dest', '',
-     _('rebase onto the specified changeset'), _('REV')),
-    ('', 'collapse', False, _('collapse the rebased changesets')),
-    ('m', 'message', '',
-     _('use text as collapse commit message'), _('TEXT')),
-    ('e', 'edit', False, _('invoke editor on commit messages')),
-    ('l', 'logfile', '',
-     _('read collapse commit message from file'), _('FILE')),
-    ('k', 'keep', False, _('keep original changesets')),
-    ('', 'keepbranches', False, _('keep original branch names')),
-    ('D', 'detach', False, _('(DEPRECATED)')),
-    ('i', 'interactive', False, _('(DEPRECATED)')),
-    ('t', 'tool', '', _('specify merge tool')),
-    ('', 'stop', False, _('stop interrupted rebase')),
-    ('c', 'continue', False, _('continue an interrupted rebase')),
-    ('a', 'abort', False, _('abort an interrupted rebase')),
-    ('', 'auto-orphans', '', _('automatically rebase orphan revisions '
-                               'in the specified revset (EXPERIMENTAL)')),
-     ] + cmdutil.dryrunopts + cmdutil.formatteropts + cmdutil.confirmopts,
-    _('[-s REV | -b REV] [-d REV] [OPTION]'),
-    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
+
+@command(
+    b'rebase',
+    [
+        (
+            b's',
+            b'source',
+            b'',
+            _(b'rebase the specified changeset and descendants'),
+            _(b'REV'),
+        ),
+        (
+            b'b',
+            b'base',
+            b'',
+            _(b'rebase everything from branching point of specified changeset'),
+            _(b'REV'),
+        ),
+        (b'r', b'rev', [], _(b'rebase these revisions'), _(b'REV')),
+        (
+            b'd',
+            b'dest',
+            b'',
+            _(b'rebase onto the specified changeset'),
+            _(b'REV'),
+        ),
+        (b'', b'collapse', False, _(b'collapse the rebased changesets')),
+        (
+            b'm',
+            b'message',
+            b'',
+            _(b'use text as collapse commit message'),
+            _(b'TEXT'),
+        ),
+        (b'e', b'edit', False, _(b'invoke editor on commit messages')),
+        (
+            b'l',
+            b'logfile',
+            b'',
+            _(b'read collapse commit message from file'),
+            _(b'FILE'),
+        ),
+        (b'k', b'keep', False, _(b'keep original changesets')),
+        (b'', b'keepbranches', False, _(b'keep original branch names')),
+        (b'D', b'detach', False, _(b'(DEPRECATED)')),
+        (b'i', b'interactive', False, _(b'(DEPRECATED)')),
+        (b't', b'tool', b'', _(b'specify merge tool')),
+        (b'', b'stop', False, _(b'stop interrupted rebase')),
+        (b'c', b'continue', False, _(b'continue an interrupted rebase')),
+        (b'a', b'abort', False, _(b'abort an interrupted rebase')),
+        (
+            b'',
+            b'auto-orphans',
+            b'',
+            _(
+                b'automatically rebase orphan revisions '
+                b'in the specified revset (EXPERIMENTAL)'
+            ),
+        ),
+    ]
+    + cmdutil.dryrunopts
+    + cmdutil.formatteropts
+    + cmdutil.confirmopts,
+    _(b'[-s REV | -b REV] [-d REV] [OPTION]'),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
+)
 def rebase(ui, repo, **opts):
     """move changeset (and descendants) to a different branch
 
@@ -864,20 +1018,21 @@
 
     """
     opts = pycompat.byteskwargs(opts)
-    inmemory = ui.configbool('rebase', 'experimental.inmemory')
-    dryrun = opts.get('dry_run')
-    confirm = opts.get('confirm')
-    selactions = [k for k in ['abort', 'stop', 'continue'] if opts.get(k)]
+    inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
+    dryrun = opts.get(b'dry_run')
+    confirm = opts.get(b'confirm')
+    selactions = [k for k in [b'abort', b'stop', b'continue'] if opts.get(k)]
     if len(selactions) > 1:
-        raise error.Abort(_('cannot use --%s with --%s')
-                          % tuple(selactions[:2]))
+        raise error.Abort(
+            _(b'cannot use --%s with --%s') % tuple(selactions[:2])
+        )
     action = selactions[0] if selactions else None
     if dryrun and action:
-        raise error.Abort(_('cannot specify both --dry-run and --%s') % action)
+        raise error.Abort(_(b'cannot specify both --dry-run and --%s') % action)
     if confirm and action:
-        raise error.Abort(_('cannot specify both --confirm and --%s') % action)
+        raise error.Abort(_(b'cannot specify both --confirm and --%s') % action)
     if dryrun and confirm:
-        raise error.Abort(_('cannot specify both --confirm and --dry-run'))
+        raise error.Abort(_(b'cannot specify both --confirm and --dry-run'))
 
     if action or repo.currenttransaction() is not None:
         # in-memory rebase is not compatible with resuming rebases.
@@ -885,30 +1040,37 @@
         # fail the entire transaction.)
         inmemory = False
 
-    if opts.get('auto_orphans'):
+    if opts.get(b'auto_orphans'):
         for key in opts:
-            if key != 'auto_orphans' and opts.get(key):
-                raise error.Abort(_('--auto-orphans is incompatible with %s') %
-                                  ('--' + key))
-        userrevs = list(repo.revs(opts.get('auto_orphans')))
-        opts['rev'] = [revsetlang.formatspec('%ld and orphan()', userrevs)]
-        opts['dest'] = '_destautoorphanrebase(SRC)'
+            if key != b'auto_orphans' and opts.get(key):
+                raise error.Abort(
+                    _(b'--auto-orphans is incompatible with %s') % (b'--' + key)
+                )
+        userrevs = list(repo.revs(opts.get(b'auto_orphans')))
+        opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
+        opts[b'dest'] = b'_destautoorphanrebase(SRC)'
 
     if dryrun or confirm:
         return _dryrunrebase(ui, repo, action, opts)
-    elif action == 'stop':
+    elif action == b'stop':
         rbsrt = rebaseruntime(repo, ui)
         with repo.wlock(), repo.lock():
             rbsrt.restorestatus()
             if rbsrt.collapsef:
-                raise error.Abort(_("cannot stop in --collapse session"))
+                raise error.Abort(_(b"cannot stop in --collapse session"))
             allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
             if not (rbsrt.keepf or allowunstable):
-                raise error.Abort(_("cannot remove original changesets with"
-                                    " unrebased descendants"),
-                    hint=_('either enable obsmarkers to allow unstable '
-                           'revisions or use --keep to keep original '
-                           'changesets'))
+                raise error.Abort(
+                    _(
+                        b"cannot remove original changesets with"
+                        b" unrebased descendants"
+                    ),
+                    hint=_(
+                        b'either enable obsmarkers to allow unstable '
+                        b'revisions or use --keep to keep original '
+                        b'changesets'
+                    ),
+                )
             if needupdate(repo, rbsrt.state):
                 # update to the current working revision
                 # to clear interrupted merge
@@ -919,12 +1081,16 @@
         try:
             # in-memory merge doesn't support conflicts, so if we hit any, abort
             # and re-run as an on-disk merge.
-            overrides = {('rebase', 'singletransaction'): True}
-            with ui.configoverride(overrides, 'rebase'):
+            overrides = {(b'rebase', b'singletransaction'): True}
+            with ui.configoverride(overrides, b'rebase'):
                 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
         except error.InMemoryMergeConflictsError:
-            ui.warn(_('hit merge conflicts; re-running rebase without in-memory'
-                      ' merge\n'))
+            ui.warn(
+                _(
+                    b'hit merge conflicts; re-running rebase without in-memory'
+                    b' merge\n'
+                )
+            )
             # TODO: Make in-memory merge not use the on-disk merge state, so
             # we don't have to clean it here
             mergemod.mergestate.clean(repo)
@@ -934,98 +1100,131 @@
     else:
         return _dorebase(ui, repo, action, opts)
 
+
 def _dryrunrebase(ui, repo, action, opts):
     rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
-    confirm = opts.get('confirm')
+    confirm = opts.get(b'confirm')
     if confirm:
-        ui.status(_('starting in-memory rebase\n'))
+        ui.status(_(b'starting in-memory rebase\n'))
     else:
-        ui.status(_('starting dry-run rebase; repository will not be '
-                    'changed\n'))
+        ui.status(
+            _(b'starting dry-run rebase; repository will not be changed\n')
+        )
     with repo.wlock(), repo.lock():
         needsabort = True
         try:
-            overrides = {('rebase', 'singletransaction'): True}
-            with ui.configoverride(overrides, 'rebase'):
-                _origrebase(ui, repo, action, opts, rbsrt, inmemory=True,
-                            leaveunfinished=True)
+            overrides = {(b'rebase', b'singletransaction'): True}
+            with ui.configoverride(overrides, b'rebase'):
+                _origrebase(
+                    ui,
+                    repo,
+                    action,
+                    opts,
+                    rbsrt,
+                    inmemory=True,
+                    leaveunfinished=True,
+                )
         except error.InMemoryMergeConflictsError:
-            ui.status(_('hit a merge conflict\n'))
+            ui.status(_(b'hit a merge conflict\n'))
             return 1
         except error.Abort:
             needsabort = False
             raise
         else:
             if confirm:
-                ui.status(_('rebase completed successfully\n'))
-                if not ui.promptchoice(_(b'apply changes (yn)?'
-                                         b'$$ &Yes $$ &No')):
+                ui.status(_(b'rebase completed successfully\n'))
+                if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')):
                     # finish unfinished rebase
                     rbsrt._finishrebase()
                 else:
-                    rbsrt._prepareabortorcontinue(isabort=True, backup=False,
-                                                  suppwarns=True)
+                    rbsrt._prepareabortorcontinue(
+                        isabort=True, backup=False, suppwarns=True
+                    )
                 needsabort = False
             else:
-                ui.status(_('dry-run rebase completed successfully; run without'
-                            ' -n/--dry-run to perform this rebase\n'))
+                ui.status(
+                    _(
+                        b'dry-run rebase completed successfully; run without'
+                        b' -n/--dry-run to perform this rebase\n'
+                    )
+                )
             return 0
         finally:
             if needsabort:
                 # no need to store backup in case of dryrun
-                rbsrt._prepareabortorcontinue(isabort=True, backup=False,
-                                              suppwarns=True)
+                rbsrt._prepareabortorcontinue(
+                    isabort=True, backup=False, suppwarns=True
+                )
+
 
 def _dorebase(ui, repo, action, opts, inmemory=False):
     rbsrt = rebaseruntime(repo, ui, inmemory, opts)
     return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
 
-def _origrebase(ui, repo, action, opts, rbsrt, inmemory=False,
-                leaveunfinished=False):
-    assert action != 'stop'
+
+def _origrebase(
+    ui, repo, action, opts, rbsrt, inmemory=False, leaveunfinished=False
+):
+    assert action != b'stop'
     with repo.wlock(), repo.lock():
         # Validate input and define rebasing points
-        destf = opts.get('dest', None)
-        srcf = opts.get('source', None)
-        basef = opts.get('base', None)
-        revf = opts.get('rev', [])
+        destf = opts.get(b'dest', None)
+        srcf = opts.get(b'source', None)
+        basef = opts.get(b'base', None)
+        revf = opts.get(b'rev', [])
         # search default destination in this space
         # used in the 'hg pull --rebase' case, see issue 5214.
-        destspace = opts.get('_destspace')
-        if opts.get('interactive'):
+        destspace = opts.get(b'_destspace')
+        if opts.get(b'interactive'):
             try:
-                if extensions.find('histedit'):
-                    enablehistedit = ''
+                if extensions.find(b'histedit'):
+                    enablehistedit = b''
             except KeyError:
-                enablehistedit = " --config extensions.histedit="
-            help = "hg%s help -e histedit" % enablehistedit
-            msg = _("interactive history editing is supported by the "
-                    "'histedit' extension (see \"%s\")") % help
+                enablehistedit = b" --config extensions.histedit="
+            help = b"hg%s help -e histedit" % enablehistedit
+            msg = (
+                _(
+                    b"interactive history editing is supported by the "
+                    b"'histedit' extension (see \"%s\")"
+                )
+                % help
+            )
             raise error.Abort(msg)
 
         if rbsrt.collapsemsg and not rbsrt.collapsef:
-            raise error.Abort(
-                _('message can only be specified with collapse'))
+            raise error.Abort(_(b'message can only be specified with collapse'))
 
         if action:
             if rbsrt.collapsef:
                 raise error.Abort(
-                    _('cannot use collapse with continue or abort'))
+                    _(b'cannot use collapse with continue or abort')
+                )
             if srcf or basef or destf:
                 raise error.Abort(
-                    _('abort and continue do not allow specifying revisions'))
-            if action == 'abort' and opts.get('tool', False):
-                ui.warn(_('tool option will be ignored\n'))
-            if action == 'continue':
+                    _(b'abort and continue do not allow specifying revisions')
+                )
+            if action == b'abort' and opts.get(b'tool', False):
+                ui.warn(_(b'tool option will be ignored\n'))
+            if action == b'continue':
                 ms = mergemod.mergestate.read(repo)
                 mergeutil.checkunresolved(ms)
 
-            retcode = rbsrt._prepareabortorcontinue(isabort=(action == 'abort'))
+            retcode = rbsrt._prepareabortorcontinue(
+                isabort=(action == b'abort')
+            )
             if retcode is not None:
                 return retcode
         else:
-            destmap = _definedestmap(ui, repo, inmemory, destf, srcf, basef,
-                                     revf, destspace=destspace)
+            destmap = _definedestmap(
+                ui,
+                repo,
+                inmemory,
+                destf,
+                srcf,
+                basef,
+                revf,
+                destspace=destspace,
+            )
             retcode = rbsrt._preparenewrebase(destmap)
             if retcode is not None:
                 return retcode
@@ -1033,9 +1232,9 @@
 
         tr = None
 
-        singletr = ui.configbool('rebase', 'singletransaction')
+        singletr = ui.configbool(b'rebase', b'singletransaction')
         if singletr:
-            tr = repo.transaction('rebase')
+            tr = repo.transaction(b'rebase')
 
         # If `rebase.singletransaction` is enabled, wrap the entire operation in
         # one transaction here. Otherwise, transactions are obtained when
@@ -1045,14 +1244,23 @@
             # rebasing in-memory (it's not needed).
             dsguard = None
             if singletr and not inmemory:
-                dsguard = dirstateguard.dirstateguard(repo, 'rebase')
+                dsguard = dirstateguard.dirstateguard(repo, b'rebase')
             with util.acceptintervention(dsguard):
                 rbsrt._performrebase(tr)
                 if not leaveunfinished:
                     rbsrt._finishrebase()
 
-def _definedestmap(ui, repo, inmemory, destf=None, srcf=None, basef=None,
-                   revf=None, destspace=None):
+
+def _definedestmap(
+    ui,
+    repo,
+    inmemory,
+    destf=None,
+    srcf=None,
+    basef=None,
+    revf=None,
+    destspace=None,
+):
     """use revisions argument to define destmap {srcrev: destrev}"""
     if revf is None:
         revf = []
@@ -1060,39 +1268,42 @@
     # destspace is here to work around issues with `hg pull --rebase` see
     # issue5214 for details
     if srcf and basef:
-        raise error.Abort(_('cannot specify both a source and a base'))
+        raise error.Abort(_(b'cannot specify both a source and a base'))
     if revf and basef:
-        raise error.Abort(_('cannot specify both a revision and a base'))
+        raise error.Abort(_(b'cannot specify both a revision and a base'))
     if revf and srcf:
-        raise error.Abort(_('cannot specify both a revision and a source'))
+        raise error.Abort(_(b'cannot specify both a revision and a source'))
 
     if not inmemory:
         cmdutil.checkunfinished(repo)
         cmdutil.bailifchanged(repo)
 
-    if ui.configbool('commands', 'rebase.requiredest') and not destf:
-        raise error.Abort(_('you must specify a destination'),
-                          hint=_('use: hg rebase -d REV'))
+    if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
+        raise error.Abort(
+            _(b'you must specify a destination'),
+            hint=_(b'use: hg rebase -d REV'),
+        )
 
     dest = None
 
     if revf:
         rebaseset = scmutil.revrange(repo, revf)
         if not rebaseset:
-            ui.status(_('empty "rev" revision set - nothing to rebase\n'))
+            ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
             return None
     elif srcf:
         src = scmutil.revrange(repo, [srcf])
         if not src:
-            ui.status(_('empty "source" revision set - nothing to rebase\n'))
+            ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
             return None
-        rebaseset = repo.revs('(%ld)::', src)
+        rebaseset = repo.revs(b'(%ld)::', src)
         assert rebaseset
     else:
-        base = scmutil.revrange(repo, [basef or '.'])
+        base = scmutil.revrange(repo, [basef or b'.'])
         if not base:
-            ui.status(_('empty "base" revision set - '
-                        "can't compute rebase set\n"))
+            ui.status(
+                _(b'empty "base" revision set - ' b"can't compute rebase set\n")
+            )
             return None
         if destf:
             # --base does not support multiple destinations
@@ -1101,19 +1312,19 @@
             dest = repo[_destrebase(repo, base, destspace=destspace)]
             destf = bytes(dest)
 
-        roots = [] # selected children of branching points
-        bpbase = {} # {branchingpoint: [origbase]}
-        for b in base: # group bases by branching points
-            bp = repo.revs('ancestor(%d, %d)', b, dest.rev()).first()
+        roots = []  # selected children of branching points
+        bpbase = {}  # {branchingpoint: [origbase]}
+        for b in base:  # group bases by branching points
+            bp = repo.revs(b'ancestor(%d, %d)', b, dest.rev()).first()
             bpbase[bp] = bpbase.get(bp, []) + [b]
         if None in bpbase:
             # emulate the old behavior, showing "nothing to rebase" (a better
             # behavior may be abort with "cannot find branching point" error)
             bpbase.clear()
-        for bp, bs in bpbase.iteritems(): # calculate roots
-            roots += list(repo.revs('children(%d) & ancestors(%ld)', bp, bs))
+        for bp, bs in pycompat.iteritems(bpbase):  # calculate roots
+            roots += list(repo.revs(b'children(%d) & ancestors(%ld)', bp, bs))
 
-        rebaseset = repo.revs('%ld::', roots)
+        rebaseset = repo.revs(b'%ld::', roots)
 
         if not rebaseset:
             # transform to list because smartsets are not comparable to
@@ -1121,30 +1332,53 @@
             # smartset.
             if list(base) == [dest.rev()]:
                 if basef:
-                    ui.status(_('nothing to rebase - %s is both "base"'
-                                ' and destination\n') % dest)
+                    ui.status(
+                        _(
+                            b'nothing to rebase - %s is both "base"'
+                            b' and destination\n'
+                        )
+                        % dest
+                    )
                 else:
-                    ui.status(_('nothing to rebase - working directory '
-                                'parent is also destination\n'))
-            elif not repo.revs('%ld - ::%d', base, dest.rev()):
+                    ui.status(
+                        _(
+                            b'nothing to rebase - working directory '
+                            b'parent is also destination\n'
+                        )
+                    )
+            elif not repo.revs(b'%ld - ::%d', base, dest.rev()):
                 if basef:
-                    ui.status(_('nothing to rebase - "base" %s is '
-                                'already an ancestor of destination '
-                                '%s\n') %
-                              ('+'.join(bytes(repo[r]) for r in base),
-                               dest))
+                    ui.status(
+                        _(
+                            b'nothing to rebase - "base" %s is '
+                            b'already an ancestor of destination '
+                            b'%s\n'
+                        )
+                        % (b'+'.join(bytes(repo[r]) for r in base), dest)
+                    )
                 else:
-                    ui.status(_('nothing to rebase - working '
-                                'directory parent is already an '
-                                'ancestor of destination %s\n') % dest)
-            else: # can it happen?
-                ui.status(_('nothing to rebase from %s to %s\n') %
-                          ('+'.join(bytes(repo[r]) for r in base), dest))
+                    ui.status(
+                        _(
+                            b'nothing to rebase - working '
+                            b'directory parent is already an '
+                            b'ancestor of destination %s\n'
+                        )
+                        % dest
+                    )
+            else:  # can it happen?
+                ui.status(
+                    _(b'nothing to rebase from %s to %s\n')
+                    % (b'+'.join(bytes(repo[r]) for r in base), dest)
+                )
             return None
 
-    rebasingwcp = repo['.'].rev() in rebaseset
-    ui.log("rebase", "rebasing working copy parent: %r\n", rebasingwcp,
-           rebase_rebasing_wcp=rebasingwcp)
+    rebasingwcp = repo[b'.'].rev() in rebaseset
+    ui.log(
+        b"rebase",
+        b"rebasing working copy parent: %r\n",
+        rebasingwcp,
+        rebase_rebasing_wcp=rebasingwcp,
+    )
     if inmemory and rebasingwcp:
         # Check these since we did not before.
         cmdutil.checkunfinished(repo)
@@ -1154,8 +1388,8 @@
         dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
         destf = bytes(dest)
 
-    allsrc = revsetlang.formatspec('%ld', rebaseset)
-    alias = {'ALLSRC': allsrc}
+    allsrc = revsetlang.formatspec(b'%ld', rebaseset)
+    alias = {b'ALLSRC': allsrc}
 
     if dest is None:
         try:
@@ -1165,7 +1399,7 @@
             # multi-dest path: resolve dest for each SRC separately
             destmap = {}
             for r in rebaseset:
-                alias['SRC'] = revsetlang.formatspec('%d', r)
+                alias[b'SRC'] = revsetlang.formatspec(b'%d', r)
                 # use repo.anyrevs instead of scmutil.revsingle because we
                 # don't want to abort if destset is empty.
                 destset = repo.anyrevs([destf], user=True, localalias=alias)
@@ -1173,22 +1407,24 @@
                 if size == 1:
                     destmap[r] = destset.first()
                 elif size == 0:
-                    ui.note(_('skipping %s - empty destination\n') % repo[r])
+                    ui.note(_(b'skipping %s - empty destination\n') % repo[r])
                 else:
-                    raise error.Abort(_('rebase destination for %s is not '
-                                        'unique') % repo[r])
+                    raise error.Abort(
+                        _(b'rebase destination for %s is not unique') % repo[r]
+                    )
 
     if dest is not None:
         # single-dest case: assign dest to each rev in rebaseset
         destrev = dest.rev()
-        destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
+        destmap = {r: destrev for r in rebaseset}  # {srcrev: destrev}
 
     if not destmap:
-        ui.status(_('nothing to rebase - empty destination\n'))
+        ui.status(_(b'nothing to rebase - empty destination\n'))
         return None
 
     return destmap
 
+
 def externalparent(repo, state, destancestors):
     """Return the revision that should be used as the second parent
     when the revisions in state is collapsed on top of destancestors.
@@ -1200,77 +1436,97 @@
         if rev == source:
             continue
         for p in repo[rev].parents():
-            if (p.rev() not in state
-                        and p.rev() not in destancestors):
+            if p.rev() not in state and p.rev() not in destancestors:
                 parents.add(p.rev())
     if not parents:
         return nullrev
     if len(parents) == 1:
         return parents.pop()
-    raise error.Abort(_('unable to collapse on top of %d, there is more '
-                       'than one external parent: %s') %
-                     (max(destancestors),
-                      ', '.join("%d" % p for p in sorted(parents))))
+    raise error.Abort(
+        _(
+            b'unable to collapse on top of %d, there is more '
+            b'than one external parent: %s'
+        )
+        % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents)))
+    )
+
 
 def commitmemorynode(repo, p1, p2, wctx, editor, extra, user, date, commitmsg):
     '''Commit the memory changes with parents p1 and p2.
     Return node of committed revision.'''
     # Replicates the empty check in ``repo.commit``.
-    if wctx.isempty() and not repo.ui.configbool('ui', 'allowemptycommit'):
+    if wctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'):
         return None
 
     # By convention, ``extra['branch']`` (set by extrafn) clobbers
     # ``branch`` (used when passing ``--keepbranches``).
     branch = repo[p1].branch()
-    if 'branch' in extra:
-        branch = extra['branch']
+    if b'branch' in extra:
+        branch = extra[b'branch']
 
-    memctx = wctx.tomemctx(commitmsg, parents=(p1, p2), date=date,
-        extra=extra, user=user, branch=branch, editor=editor)
+    memctx = wctx.tomemctx(
+        commitmsg,
+        parents=(p1, p2),
+        date=date,
+        extra=extra,
+        user=user,
+        branch=branch,
+        editor=editor,
+    )
     commitres = repo.commitctx(memctx)
-    wctx.clean() # Might be reused
+    wctx.clean()  # Might be reused
     return commitres
 
+
 def commitnode(repo, p1, p2, editor, extra, user, date, commitmsg):
     '''Commit the wd changes with parents p1 and p2.
     Return node of committed revision.'''
     dsguard = util.nullcontextmanager()
-    if not repo.ui.configbool('rebase', 'singletransaction'):
-        dsguard = dirstateguard.dirstateguard(repo, 'rebase')
+    if not repo.ui.configbool(b'rebase', b'singletransaction'):
+        dsguard = dirstateguard.dirstateguard(repo, b'rebase')
     with dsguard:
         repo.setparents(repo[p1].node(), repo[p2].node())
 
         # Commit might fail if unresolved files exist
-        newnode = repo.commit(text=commitmsg, user=user, date=date,
-                              extra=extra, editor=editor)
+        newnode = repo.commit(
+            text=commitmsg, user=user, date=date, extra=extra, editor=editor
+        )
 
         repo.dirstate.setbranch(repo[newnode].branch())
         return newnode
 
+
 def rebasenode(repo, rev, p1, base, collapse, dest, wctx):
-    'Rebase a single revision rev on top of p1 using base as merge ancestor'
+    b'Rebase a single revision rev on top of p1 using base as merge ancestor'
     # Merge phase
     # Update to destination and merge it with local
     if wctx.isinmemory():
         wctx.setbase(repo[p1])
     else:
-        if repo['.'].rev() != p1:
-            repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
+        if repo[b'.'].rev() != p1:
+            repo.ui.debug(b" update to %d:%s\n" % (p1, repo[p1]))
             mergemod.update(repo, p1, branchmerge=False, force=True)
         else:
-            repo.ui.debug(" already in destination\n")
+            repo.ui.debug(b" already in destination\n")
         # This is, alas, necessary to invalidate workingctx's manifest cache,
         # as well as other data we litter on it in other places.
         wctx = repo[None]
         repo.dirstate.write(repo.currenttransaction())
-    repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev]))
+    repo.ui.debug(b" merge against %d:%s\n" % (rev, repo[rev]))
     if base is not None:
-        repo.ui.debug("   detach base %d:%s\n" % (base, repo[base]))
+        repo.ui.debug(b"   detach base %d:%s\n" % (base, repo[base]))
     # When collapsing in-place, the parent is the common ancestor, we
     # have to allow merging with it.
-    stats = mergemod.update(repo, rev, branchmerge=True, force=True,
-                            ancestor=base, mergeancestor=collapse,
-                            labels=['dest', 'source'], wc=wctx)
+    stats = mergemod.update(
+        repo,
+        rev,
+        branchmerge=True,
+        force=True,
+        ancestor=base,
+        mergeancestor=collapse,
+        labels=[b'dest', b'source'],
+        wc=wctx,
+    )
     if collapse:
         copies.duplicatecopies(repo, wctx, rev, dest)
     else:
@@ -1283,6 +1539,7 @@
         copies.duplicatecopies(repo, wctx, rev, p1rev, skiprev=dest)
     return stats
 
+
 def adjustdest(repo, rev, destmap, state, skipped):
     r"""adjust rebase destination given the current rebase state
 
@@ -1337,14 +1594,17 @@
     """
     # pick already rebased revs with same dest from state as interesting source
     dest = destmap[rev]
-    source = [s for s, d in state.items()
-              if d > 0 and destmap[s] == dest and s not in skipped]
+    source = [
+        s
+        for s, d in state.items()
+        if d > 0 and destmap[s] == dest and s not in skipped
+    ]
 
     result = []
     for prev in repo.changelog.parentrevs(rev):
         adjusted = dest
         if prev != nullrev:
-            candidate = repo.revs('max(%ld and (::%d))', source, prev).first()
+            candidate = repo.revs(b'max(%ld and (::%d))', source, prev).first()
             if candidate is not None:
                 adjusted = state[candidate]
         if adjusted == dest and dest in state:
@@ -1352,10 +1612,12 @@
             if adjusted == revtodo:
                 # sortsource should produce an order that makes this impossible
                 raise error.ProgrammingError(
-                    'rev %d should be rebased already at this time' % dest)
+                    b'rev %d should be rebased already at this time' % dest
+                )
         result.append(adjusted)
     return result
 
+
 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
     """
     Abort if rebase will create divergence or rebase is noop because of markers
@@ -1365,18 +1627,18 @@
     successors in destination or no non-obsolete successor.
     """
     # Obsolete node with successors not in dest leads to divergence
-    divergenceok = ui.configbool('experimental',
-                                 'evolution.allowdivergence')
+    divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence')
     divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
 
     if divergencebasecandidates and not divergenceok:
-        divhashes = (bytes(repo[r])
-                     for r in divergencebasecandidates)
-        msg = _("this rebase will cause "
-                "divergences from: %s")
-        h = _("to force the rebase please set "
-              "experimental.evolution.allowdivergence=True")
-        raise error.Abort(msg % (",".join(divhashes),), hint=h)
+        divhashes = (bytes(repo[r]) for r in divergencebasecandidates)
+        msg = _(b"this rebase will cause divergences from: %s")
+        h = _(
+            b"to force the rebase please set "
+            b"experimental.evolution.allowdivergence=True"
+        )
+        raise error.Abort(msg % (b",".join(divhashes),), hint=h)
+
 
 def successorrevs(unfi, rev):
     """yield revision numbers for successors of rev"""
@@ -1386,6 +1648,7 @@
         if s in nodemap:
             yield nodemap[s]
 
+
 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
     """Return new parents and optionally a merge base for rev being rebased
 
@@ -1407,10 +1670,10 @@
     isancestor = cl.isancestorrev
 
     dest = destmap[rev]
-    oldps = repo.changelog.parentrevs(rev) # old parents
-    newps = [nullrev, nullrev] # new parents
+    oldps = repo.changelog.parentrevs(rev)  # old parents
+    newps = [nullrev, nullrev]  # new parents
     dests = adjustdest(repo, rev, destmap, state, skipped)
-    bases = list(oldps) # merge base candidates, initially just old parents
+    bases = list(oldps)  # merge base candidates, initially just old parents
 
     if all(r == nullrev for r in oldps[1:]):
         # For non-merge changeset, just move p to adjusted dest as requested.
@@ -1439,7 +1702,7 @@
         # The loop tries to be not rely on the fact that a Mercurial node has
         # at most 2 parents.
         for i, p in enumerate(oldps):
-            np = p # new parent
+            np = p  # new parent
             if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
                 np = dests[i]
             elif p in state and state[p] > 0:
@@ -1465,9 +1728,9 @@
             for j, x in enumerate(newps[:i]):
                 if x == nullrev:
                     continue
-                if isancestor(np, x): # CASE-1
+                if isancestor(np, x):  # CASE-1
                     np = nullrev
-                elif isancestor(x, np): # CASE-2
+                elif isancestor(x, np):  # CASE-2
                     newps[j] = np
                     np = nullrev
                     # New parents forming an ancestor relationship does not
@@ -1492,15 +1755,19 @@
         #    /|    # None of A and B will be changed to D and rebase fails.
         #   A B D
         if set(newps) == set(oldps) and dest not in newps:
-            raise error.Abort(_('cannot rebase %d:%s without '
-                                'moving at least one of its parents')
-                              % (rev, repo[rev]))
+            raise error.Abort(
+                _(
+                    b'cannot rebase %d:%s without '
+                    b'moving at least one of its parents'
+                )
+                % (rev, repo[rev])
+            )
 
     # Source should not be ancestor of dest. The check here guarantees it's
     # impossible. With multi-dest, the initial check does not cover complex
     # cases since we don't have abstractions to dry-run rebase cheaply.
     if any(p != nullrev and isancestor(rev, p) for p in newps):
-        raise error.Abort(_('source is ancestor of destination'))
+        raise error.Abort(_(b'source is ancestor of destination'))
 
     # "rebasenode" updates to new p1, use the corresponding merge base.
     if bases[0] != nullrev:
@@ -1524,28 +1791,37 @@
     # better than the default (ancestor(F, Z) == null). Therefore still
     # pick one (so choose p1 above).
     if sum(1 for b in bases if b != nullrev) > 1:
-        unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
+        unwanted = [None, None]  # unwanted[i]: unwanted revs if choose bases[i]
         for i, base in enumerate(bases):
             if base == nullrev:
                 continue
             # Revisions in the side (not chosen as merge base) branch that
             # might contain "surprising" contents
-            siderevs = list(repo.revs('((%ld-%d) %% (%d+%d))',
-                                      bases, base, base, dest))
+            siderevs = list(
+                repo.revs(b'((%ld-%d) %% (%d+%d))', bases, base, base, dest)
+            )
 
             # If those revisions are covered by rebaseset, the result is good.
             # A merge in rebaseset would be considered to cover its ancestors.
             if siderevs:
-                rebaseset = [r for r, d in state.items()
-                             if d > 0 and r not in obsskipped]
-                merges = [r for r in rebaseset
-                          if cl.parentrevs(r)[1] != nullrev]
-                unwanted[i] = list(repo.revs('%ld - (::%ld) - %ld',
-                                             siderevs, merges, rebaseset))
+                rebaseset = [
+                    r for r, d in state.items() if d > 0 and r not in obsskipped
+                ]
+                merges = [
+                    r for r in rebaseset if cl.parentrevs(r)[1] != nullrev
+                ]
+                unwanted[i] = list(
+                    repo.revs(
+                        b'%ld - (::%ld) - %ld', siderevs, merges, rebaseset
+                    )
+                )
 
         # Choose a merge base that has a minimal number of unwanted revs.
-        l, i = min((len(revs), i)
-                   for i, revs in enumerate(unwanted) if revs is not None)
+        l, i = min(
+            (len(revs), i)
+            for i, revs in enumerate(unwanted)
+            if revs is not None
+        )
         base = bases[i]
 
         # newps[0] should match merge base if possible. Currently, if newps[i]
@@ -1558,27 +1834,34 @@
         # The merge will include unwanted revisions. Abort now. Revisit this if
         # we have a more advanced merge algorithm that handles multiple bases.
         if l > 0:
-            unwanteddesc = _(' or ').join(
-                (', '.join('%d:%s' % (r, repo[r]) for r in revs)
-                 for revs in unwanted if revs is not None))
+            unwanteddesc = _(b' or ').join(
+                (
+                    b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
+                    for revs in unwanted
+                    if revs is not None
+                )
+            )
             raise error.Abort(
-                _('rebasing %d:%s will include unwanted changes from %s')
-                % (rev, repo[rev], unwanteddesc))
+                _(b'rebasing %d:%s will include unwanted changes from %s')
+                % (rev, repo[rev], unwanteddesc)
+            )
 
-    repo.ui.debug(" future parents are %d and %d\n" % tuple(newps))
+    repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
 
     return newps[0], newps[1], base
 
+
 def isagitpatch(repo, patchname):
-    'Return true if the given patch is in git format'
+    b'Return true if the given patch is in git format'
     mqpatch = os.path.join(repo.mq.path, patchname)
-    for line in patch.linereader(open(mqpatch, 'rb')):
-        if line.startswith('diff --git'):
+    for line in patch.linereader(open(mqpatch, b'rb')):
+        if line.startswith(b'diff --git'):
             return True
     return False
 
+
 def updatemq(repo, state, skipped, **opts):
-    'Update rebased mq patches - finalize and then import them'
+    b'Update rebased mq patches - finalize and then import them'
     mqrebase = {}
     mq = repo.mq
     original_series = mq.fullseries[:]
@@ -1587,8 +1870,10 @@
     for p in mq.applied:
         rev = repo[p.node].rev()
         if rev in state:
-            repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
-                                        (rev, p.name))
+            repo.ui.debug(
+                b'revision %d is an mq patch (%s), finalize it.\n'
+                % (rev, p.name)
+            )
             mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
         else:
             # Applied but not rebased, not sure this should happen
@@ -1601,10 +1886,17 @@
         for rev in sorted(mqrebase, reverse=True):
             if rev not in skipped:
                 name, isgit = mqrebase[rev]
-                repo.ui.note(_('updating mq patch %s to %d:%s\n') %
-                             (name, state[rev], repo[state[rev]]))
-                mq.qimport(repo, (), patchname=name, git=isgit,
-                                rev=["%d" % state[rev]])
+                repo.ui.note(
+                    _(b'updating mq patch %s to %d:%s\n')
+                    % (name, state[rev], repo[state[rev]])
+                )
+                mq.qimport(
+                    repo,
+                    (),
+                    patchname=name,
+                    git=isgit,
+                    rev=[b"%d" % state[rev]],
+                )
             else:
                 # Rebased and skipped
                 skippedpatches.add(mqrebase[rev][0])
@@ -1612,27 +1904,33 @@
         # Patches were either applied and rebased and imported in
         # order, applied and removed or unapplied. Discard the removed
         # ones while preserving the original series order and guards.
-        newseries = [s for s in original_series
-                     if mq.guard_re.split(s, 1)[0] not in skippedpatches]
+        newseries = [
+            s
+            for s in original_series
+            if mq.guard_re.split(s, 1)[0] not in skippedpatches
+        ]
         mq.fullseries[:] = newseries
         mq.seriesdirty = True
         mq.savedirty()
 
+
 def storecollapsemsg(repo, collapsemsg):
-    'Store the collapse message to allow recovery'
-    collapsemsg = collapsemsg or ''
-    f = repo.vfs("last-message.txt", "w")
-    f.write("%s\n" % collapsemsg)
+    b'Store the collapse message to allow recovery'
+    collapsemsg = collapsemsg or b''
+    f = repo.vfs(b"last-message.txt", b"w")
+    f.write(b"%s\n" % collapsemsg)
     f.close()
 
+
 def clearcollapsemsg(repo):
-    'Remove collapse message file'
-    repo.vfs.unlinkpath("last-message.txt", ignoremissing=True)
+    b'Remove collapse message file'
+    repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True)
+
 
 def restorecollapsemsg(repo, isabort):
-    'Restore previously stored collapse message'
+    b'Restore previously stored collapse message'
     try:
-        f = repo.vfs("last-message.txt")
+        f = repo.vfs(b"last-message.txt")
         collapsemsg = f.readline().strip()
         f.close()
     except IOError as err:
@@ -1640,18 +1938,20 @@
             raise
         if isabort:
             # Oh well, just abort like normal
-            collapsemsg = ''
+            collapsemsg = b''
         else:
-            raise error.Abort(_('missing .hg/last-message.txt for rebase'))
+            raise error.Abort(_(b'missing .hg/last-message.txt for rebase'))
     return collapsemsg
 
+
 def clearstatus(repo):
-    'Remove the status files'
+    b'Remove the status files'
     # Make sure the active transaction won't write the state file
     tr = repo.currenttransaction()
     if tr:
-        tr.removefilegenerator('rebasestate')
-    repo.vfs.unlinkpath("rebasestate", ignoremissing=True)
+        tr.removefilegenerator(b'rebasestate')
+    repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True)
+
 
 def needupdate(repo, state):
     '''check whether we should `update --clean` away from a merge, or if
@@ -1663,13 +1963,15 @@
         return False
 
     # We should be standing on the first as-of-yet unrebased commit.
-    firstunrebased = min([old for old, new in state.iteritems()
-                          if new == nullrev])
+    firstunrebased = min(
+        [old for old, new in pycompat.iteritems(state) if new == nullrev]
+    )
     if firstunrebased in parents:
         return True
 
     return False
 
+
 def sortsource(destmap):
     """yield source revisions in an order that we only rebase things once
 
@@ -1691,10 +1993,11 @@
             if destmap[r] not in srcset:
                 result.append(r)
         if not result:
-            raise error.Abort(_('source and destination form a cycle'))
+            raise error.Abort(_(b'source and destination form a cycle'))
         srcset -= set(result)
         yield result
 
+
 def buildstate(repo, destmap, collapse):
     '''Define which revisions are going to be rebased and where
 
@@ -1702,38 +2005,40 @@
     destmap: {srcrev: destrev}
     '''
     rebaseset = destmap.keys()
-    originalwd = repo['.'].rev()
+    originalwd = repo[b'.'].rev()
 
     # This check isn't strictly necessary, since mq detects commits over an
     # applied patch. But it prevents messing up the working directory when
     # a partially completed rebase is blocked by mq.
-    if 'qtip' in repo.tags():
+    if b'qtip' in repo.tags():
         mqapplied = set(repo[s.node].rev() for s in repo.mq.applied)
         if set(destmap.values()) & mqapplied:
-            raise error.Abort(_('cannot rebase onto an applied mq patch'))
+            raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
 
     # Get "cycle" error early by exhausting the generator.
-    sortedsrc = list(sortsource(destmap)) # a list of sorted revs
+    sortedsrc = list(sortsource(destmap))  # a list of sorted revs
     if not sortedsrc:
-        raise error.Abort(_('no matching revisions'))
+        raise error.Abort(_(b'no matching revisions'))
 
     # Only check the first batch of revisions to rebase not depending on other
     # rebaseset. This means "source is ancestor of destination" for the second
     # (and following) batches of revisions are not checked here. We rely on
     # "defineparents" to do that check.
-    roots = list(repo.set('roots(%ld)', sortedsrc[0]))
+    roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
     if not roots:
-        raise error.Abort(_('no matching revisions'))
+        raise error.Abort(_(b'no matching revisions'))
+
     def revof(r):
         return r.rev()
+
     roots = sorted(roots, key=revof)
     state = dict.fromkeys(rebaseset, revtodo)
-    emptyrebase = (len(sortedsrc) == 1)
+    emptyrebase = len(sortedsrc) == 1
     for root in roots:
         dest = repo[destmap[root.rev()]]
         commonbase = root.ancestor(dest)
         if commonbase == root:
-            raise error.Abort(_('source is ancestor of destination'))
+            raise error.Abort(_(b'source is ancestor of destination'))
         if commonbase == dest:
             wctx = repo[None]
             if dest == wctx.p1():
@@ -1745,11 +2050,11 @@
                 # mark the revision as done by setting its new revision
                 # equal to its old (current) revisions
                 state[root.rev()] = root.rev()
-                repo.ui.debug('source is a child of destination\n')
+                repo.ui.debug(b'source is a child of destination\n')
                 continue
 
         emptyrebase = False
-        repo.ui.debug('rebase onto %s starting from %s\n' % (dest, root))
+        repo.ui.debug(b'rebase onto %s starting from %s\n' % (dest, root))
     if emptyrebase:
         return None
     for rev in sorted(state):
@@ -1759,8 +2064,18 @@
             state[rev] = rev
     return originalwd, destmap, state
 
-def clearrebased(ui, repo, destmap, state, skipped, collapsedas=None,
-                 keepf=False, fm=None, backup=True):
+
+def clearrebased(
+    ui,
+    repo,
+    destmap,
+    state,
+    skipped,
+    collapsedas=None,
+    keepf=False,
+    fm=None,
+    backup=True,
+):
     """dispose of rebased revision at the end of the rebase
 
     If `collapsedas` is not None, the rebase was a collapse whose result if the
@@ -1783,53 +2098,64 @@
             oldnode = tonode(rev)
             newnode = collapsedas or tonode(newrev)
             moves[oldnode] = newnode
-            if not keepf:
-                succs = None
-                if rev in skipped:
-                    if stripcleanup or not repo[rev].obsolete():
-                        succs = ()
-                elif collapsedas:
-                    collapsednodes.append(oldnode)
-                else:
-                    succs = (newnode,)
-                if succs is not None:
-                    replacements[(oldnode,)] = succs
+            succs = None
+            if rev in skipped:
+                if stripcleanup or not repo[rev].obsolete():
+                    succs = ()
+            elif collapsedas:
+                collapsednodes.append(oldnode)
+            else:
+                succs = (newnode,)
+            if succs is not None:
+                replacements[(oldnode,)] = succs
     if collapsednodes:
         replacements[tuple(collapsednodes)] = (collapsedas,)
-    scmutil.cleanupnodes(repo, replacements, 'rebase', moves, backup=backup)
     if fm:
         hf = fm.hexfunc
         fl = fm.formatlist
         fd = fm.formatdict
         changes = {}
-        for oldns, newn in replacements.iteritems():
+        for oldns, newn in pycompat.iteritems(replacements):
             for oldn in oldns:
-                changes[hf(oldn)] = fl([hf(n) for n in newn], name='node')
-        nodechanges = fd(changes, key="oldnode", value="newnodes")
+                changes[hf(oldn)] = fl([hf(n) for n in newn], name=b'node')
+        nodechanges = fd(changes, key=b"oldnode", value=b"newnodes")
         fm.data(nodechanges=nodechanges)
+    if keepf:
+        replacements = {}
+    scmutil.cleanupnodes(repo, replacements, b'rebase', moves, backup=backup)
+
 
 def pullrebase(orig, ui, repo, *args, **opts):
-    'Call rebase after pull if the latter has been invoked with --rebase'
+    b'Call rebase after pull if the latter has been invoked with --rebase'
     if opts.get(r'rebase'):
-        if ui.configbool('commands', 'rebase.requiredest'):
-            msg = _('rebase destination required by configuration')
-            hint = _('use hg pull followed by hg rebase -d DEST')
+        if ui.configbool(b'commands', b'rebase.requiredest'):
+            msg = _(b'rebase destination required by configuration')
+            hint = _(b'use hg pull followed by hg rebase -d DEST')
             raise error.Abort(msg, hint=hint)
 
         with repo.wlock(), repo.lock():
             if opts.get(r'update'):
                 del opts[r'update']
-                ui.debug('--update and --rebase are not compatible, ignoring '
-                         'the update flag\n')
+                ui.debug(
+                    b'--update and --rebase are not compatible, ignoring '
+                    b'the update flag\n'
+                )
 
             cmdutil.checkunfinished(repo, skipmerge=True)
-            cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: '
-                'please commit or shelve your changes first'))
+            cmdutil.bailifchanged(
+                repo,
+                hint=_(
+                    b'cannot pull with rebase: '
+                    b'please commit or shelve your changes first'
+                ),
+            )
 
             revsprepull = len(repo)
             origpostincoming = commands.postincoming
+
             def _dummy(*args, **kwargs):
                 pass
+
             commands.postincoming = _dummy
             try:
                 ret = orig(ui, repo, *args, **opts)
@@ -1853,24 +2179,26 @@
                 except error.NoMergeDestAbort:
                     # we can maybe update instead
                     rev, _a, _b = destutil.destupdate(repo)
-                    if rev == repo['.'].rev():
-                        ui.status(_('nothing to rebase\n'))
+                    if rev == repo[b'.'].rev():
+                        ui.status(_(b'nothing to rebase\n'))
                     else:
-                        ui.status(_('nothing to rebase - updating instead\n'))
+                        ui.status(_(b'nothing to rebase - updating instead\n'))
                         # not passing argument to get the bare update behavior
                         # with warning and trumpets
                         commands.update(ui, repo)
     else:
         if opts.get(r'tool'):
-            raise error.Abort(_('--tool can only be used with --rebase'))
+            raise error.Abort(_(b'--tool can only be used with --rebase'))
         ret = orig(ui, repo, *args, **opts)
 
     return ret
 
+
 def _filterobsoleterevs(repo, revs):
     """returns a set of the obsolete revisions in revs"""
     return set(r for r in revs if repo[r].obsolete())
 
+
 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
     """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
 
@@ -1890,7 +2218,7 @@
     assert repo.filtername is None
     cl = repo.changelog
     nodemap = cl.nodemap
-    extinctrevs = set(repo.revs('extinct()'))
+    extinctrevs = set(repo.revs(b'extinct()'))
     for srcrev in rebaseobsrevs:
         srcnode = cl.node(srcrev)
         # XXX: more advanced APIs are required to handle split correctly
@@ -1923,11 +2251,13 @@
         obsoleteextinctsuccessors,
     )
 
+
 def abortrebase(ui, repo):
     with repo.wlock(), repo.lock():
         rbsrt = rebaseruntime(repo, ui)
         rbsrt._prepareabortorcontinue(isabort=True)
 
+
 def continuerebase(ui, repo):
     with repo.wlock(), repo.lock():
         rbsrt = rebaseruntime(repo, ui)
@@ -1939,8 +2269,9 @@
         rbsrt._performrebase(None)
         rbsrt._finishrebase()
 
+
 def summaryhook(ui, repo):
-    if not repo.vfs.exists('rebasestate'):
+    if not repo.vfs.exists(b'rebasestate'):
         return
     try:
         rbsrt = rebaseruntime(repo, ui, {})
@@ -1948,24 +2279,34 @@
         state = rbsrt.state
     except error.RepoLookupError:
         # i18n: column positioning for "hg summary"
-        msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n')
+        msg = _(b'rebase: (use "hg rebase --abort" to clear broken state)\n')
         ui.write(msg)
         return
-    numrebased = len([i for i in state.itervalues() if i >= 0])
+    numrebased = len([i for i in pycompat.itervalues(state) if i >= 0])
     # i18n: column positioning for "hg summary"
-    ui.write(_('rebase: %s, %s (rebase --continue)\n') %
-             (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased,
-              ui.label(_('%d remaining'), 'rebase.remaining') %
-              (len(state) - numrebased)))
+    ui.write(
+        _(b'rebase: %s, %s (rebase --continue)\n')
+        % (
+            ui.label(_(b'%d rebased'), b'rebase.rebased') % numrebased,
+            ui.label(_(b'%d remaining'), b'rebase.remaining')
+            % (len(state) - numrebased),
+        )
+    )
+
 
 def uisetup(ui):
-    #Replace pull with a decorator to provide --rebase option
-    entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
-    entry[1].append(('', 'rebase', None,
-                     _("rebase working directory to branch head")))
-    entry[1].append(('t', 'tool', '',
-                     _("specify merge tool for rebase")))
-    cmdutil.summaryhooks.add('rebase', summaryhook)
-    statemod.addunfinished('rebase', fname='rebasestate', stopflag=True,
-                            continueflag=True, abortfunc=abortrebase,
-                            continuefunc=continuerebase)
+    # Replace pull with a decorator to provide --rebase option
+    entry = extensions.wrapcommand(commands.table, b'pull', pullrebase)
+    entry[1].append(
+        (b'', b'rebase', None, _(b"rebase working directory to branch head"))
+    )
+    entry[1].append((b't', b'tool', b'', _(b"specify merge tool for rebase")))
+    cmdutil.summaryhooks.add(b'rebase', summaryhook)
+    statemod.addunfinished(
+        b'rebase',
+        fname=b'rebasestate',
+        stopflag=True,
+        continueflag=True,
+        abortfunc=abortrebase,
+        continuefunc=continuerebase,
+    )
--- a/hgext/record.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/record.py	Mon Oct 21 11:09:48 2019 -0400
@@ -27,15 +27,17 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 
-@command("record",
-         # same options as commit + white space diff options
-        [c for c in commands.table['commit|ci'][1][:]
-            if c[1] != "interactive"] + cmdutil.diffwsopts,
-          _('hg record [OPTION]... [FILE]...'),
-        helpcategory=command.CATEGORY_COMMITTING)
+@command(
+    b"record",
+    # same options as commit + white space diff options
+    [c for c in commands.table[b'commit|ci'][1][:] if c[1] != b"interactive"]
+    + cmdutil.diffwsopts,
+    _(b'hg record [OPTION]... [FILE]...'),
+    helpcategory=command.CATEGORY_COMMITTING,
+)
 def record(ui, repo, *pats, **opts):
     '''interactively select changes to commit
 
@@ -66,19 +68,21 @@
     This command is not available when committing a merge.'''
 
     if not ui.interactive():
-        raise error.Abort(_('running non-interactively, use %s instead') %
-                         'commit')
+        raise error.Abort(
+            _(b'running non-interactively, use %s instead') % b'commit'
+        )
 
     opts[r"interactive"] = True
-    overrides = {('experimental', 'crecord'): False}
-    with ui.configoverride(overrides, 'record'):
+    overrides = {(b'experimental', b'crecord'): False}
+    with ui.configoverride(overrides, b'record'):
         return commands.commit(ui, repo, *pats, **opts)
 
+
 def qrefresh(origfn, ui, repo, *pats, **opts):
     if not opts[r'interactive']:
         return origfn(ui, repo, *pats, **opts)
 
-    mq = extensions.find('mq')
+    mq = extensions.find(b'mq')
 
     def committomq(ui, repo, *pats, **opts):
         # At this point the working copy contains only changes that
@@ -88,28 +92,33 @@
         mq.refresh(ui, repo, **opts)
 
     # backup all changed files
-    cmdutil.dorecord(ui, repo, committomq, None, True,
-                    cmdutil.recordfilter, *pats, **opts)
+    cmdutil.dorecord(
+        ui, repo, committomq, None, True, cmdutil.recordfilter, *pats, **opts
+    )
+
 
 # This command registration is replaced during uisetup().
-@command('qrecord',
+@command(
+    b'qrecord',
     [],
-    _('hg qrecord [OPTION]... PATCH [FILE]...'),
+    _(b'hg qrecord [OPTION]... PATCH [FILE]...'),
     helpcategory=command.CATEGORY_COMMITTING,
-    inferrepo=True)
+    inferrepo=True,
+)
 def qrecord(ui, repo, patch, *pats, **opts):
     '''interactively record a new patch
 
     See :hg:`help qnew` & :hg:`help record` for more information and
     usage.
     '''
-    return _qrecord('qnew', ui, repo, patch, *pats, **opts)
+    return _qrecord(b'qnew', ui, repo, patch, *pats, **opts)
+
 
 def _qrecord(cmdsuggest, ui, repo, patch, *pats, **opts):
     try:
-        mq = extensions.find('mq')
+        mq = extensions.find(b'mq')
     except KeyError:
-        raise error.Abort(_("'mq' extension not loaded"))
+        raise error.Abort(_(b"'mq' extension not loaded"))
 
     repo.mq.checkpatchname(patch)
 
@@ -117,11 +126,20 @@
         opts[r'checkname'] = False
         mq.new(ui, repo, patch, *pats, **opts)
 
-    overrides = {('experimental', 'crecord'): False}
-    with ui.configoverride(overrides, 'record'):
+    overrides = {(b'experimental', b'crecord'): False}
+    with ui.configoverride(overrides, b'record'):
         cmdutil.checkunfinished(repo)
-        cmdutil.dorecord(ui, repo, committomq, cmdsuggest, False,
-                         cmdutil.recordfilter, *pats, **opts)
+        cmdutil.dorecord(
+            ui,
+            repo,
+            committomq,
+            cmdsuggest,
+            False,
+            cmdutil.recordfilter,
+            *pats,
+            **opts
+        )
+
 
 def qnew(origfn, ui, repo, patch, *args, **opts):
     if opts[r'interactive']:
@@ -131,21 +149,27 @@
 
 def uisetup(ui):
     try:
-        mq = extensions.find('mq')
+        mq = extensions.find(b'mq')
     except KeyError:
         return
 
-    cmdtable["qrecord"] = (
+    cmdtable[b"qrecord"] = (
         qrecord,
         # same options as qnew, but copy them so we don't get
         # -i/--interactive for qrecord and add white space diff options
-        mq.cmdtable['qnew'][1][:] + cmdutil.diffwsopts,
-        _('hg qrecord [OPTION]... PATCH [FILE]...'))
+        mq.cmdtable[b'qnew'][1][:] + cmdutil.diffwsopts,
+        _(b'hg qrecord [OPTION]... PATCH [FILE]...'),
+    )
 
-    _wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch"))
-    _wrapcmd('qrefresh', mq.cmdtable, qrefresh,
-             _("interactively select changes to refresh"))
+    _wrapcmd(b'qnew', mq.cmdtable, qnew, _(b"interactively record a new patch"))
+    _wrapcmd(
+        b'qrefresh',
+        mq.cmdtable,
+        qrefresh,
+        _(b"interactively select changes to refresh"),
+    )
+
 
 def _wrapcmd(cmd, table, wrapfn, msg):
     entry = extensions.wrapcommand(table, cmd, wrapfn)
-    entry[1].append(('i', 'interactive', None, msg))
+    entry[1].append((b'i', b'interactive', None, msg))
--- a/hgext/releasenotes.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/releasenotes.py	Mon Oct 21 11:09:48 2019 -0400
@@ -18,6 +18,7 @@
 import re
 
 from mercurial.i18n import _
+from mercurial.pycompat import open
 from mercurial import (
     config,
     error,
@@ -28,15 +29,14 @@
     scmutil,
     util,
 )
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.utils import stringutil
 
 cmdtable = {}
 command = registrar.command(cmdtable)
 
 try:
     import fuzzywuzzy.fuzz as fuzz
+
     fuzz.token_set_ratio
 except ImportError:
     fuzz = None
@@ -45,20 +45,21 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 DEFAULT_SECTIONS = [
-    ('feature', _('New Features')),
-    ('bc', _('Backwards Compatibility Changes')),
-    ('fix', _('Bug Fixes')),
-    ('perf', _('Performance Improvements')),
-    ('api', _('API Changes')),
+    (b'feature', _(b'New Features')),
+    (b'bc', _(b'Backwards Compatibility Changes')),
+    (b'fix', _(b'Bug Fixes')),
+    (b'perf', _(b'Performance Improvements')),
+    (b'api', _(b'API Changes')),
 ]
 
 RE_DIRECTIVE = re.compile(br'^\.\. ([a-zA-Z0-9_]+)::\s*([^$]+)?$')
 RE_ISSUE = br'\bissue ?[0-9]{4,6}(?![0-9])\b'
 
-BULLET_SECTION = _('Other Changes')
+BULLET_SECTION = _(b'Other Changes')
+
 
 class parsedreleasenotes(object):
     def __init__(self):
@@ -103,23 +104,29 @@
         This is used to combine multiple sources of release notes together.
         """
         if not fuzz:
-            ui.warn(_("module 'fuzzywuzzy' not found, merging of similar "
-                      "releasenotes is disabled\n"))
+            ui.warn(
+                _(
+                    b"module 'fuzzywuzzy' not found, merging of similar "
+                    b"releasenotes is disabled\n"
+                )
+            )
 
         for section in other:
-            existingnotes = (
-                converttitled(self.titledforsection(section)) +
-                convertnontitled(self.nontitledforsection(section)))
+            existingnotes = converttitled(
+                self.titledforsection(section)
+            ) + convertnontitled(self.nontitledforsection(section))
             for title, paragraphs in other.titledforsection(section):
                 if self.hastitledinsection(section, title):
                     # TODO prompt for resolution if different and running in
                     # interactive mode.
-                    ui.write(_('%s already exists in %s section; ignoring\n') %
-                             (title, section))
+                    ui.write(
+                        _(b'%s already exists in %s section; ignoring\n')
+                        % (title, section)
+                    )
                     continue
 
                 incoming_str = converttitled([(title, paragraphs)])[0]
-                if section == 'fix':
+                if section == b'fix':
                     issue = getissuenum(incoming_str)
                     if issue:
                         if findissue(ui, existingnotes, issue):
@@ -135,7 +142,7 @@
                     continue
 
                 incoming_str = convertnontitled([paragraphs])[0]
-                if section == 'fix':
+                if section == b'fix':
                     issue = getissuenum(incoming_str)
                     if issue:
                         if findissue(ui, existingnotes, issue):
@@ -146,6 +153,7 @@
 
                 self.addnontitleditem(section, paragraphs)
 
+
 class releasenotessections(object):
     def __init__(self, ui, repo=None):
         if repo:
@@ -153,7 +161,7 @@
             custom_sections = getcustomadmonitions(repo)
             if custom_sections:
                 sections.update(custom_sections)
-            self._sections = list(sections.iteritems())
+            self._sections = list(pycompat.iteritems(sections))
         else:
             self._sections = list(DEFAULT_SECTIONS)
 
@@ -170,6 +178,7 @@
 
         return None
 
+
 def converttitled(titledparagraphs):
     """
     Convert titled paragraphs to strings
@@ -179,9 +188,10 @@
         lines = []
         for para in paragraphs:
             lines.extend(para)
-        string_list.append(' '.join(lines))
+        string_list.append(b' '.join(lines))
     return string_list
 
+
 def convertnontitled(nontitledparagraphs):
     """
     Convert non-titled bullets to strings
@@ -191,9 +201,10 @@
         lines = []
         for para in paragraphs:
             lines.extend(para)
-        string_list.append(' '.join(lines))
+        string_list.append(b' '.join(lines))
     return string_list
 
+
 def getissuenum(incoming_str):
     """
     Returns issue number from the incoming string if it exists
@@ -203,16 +214,18 @@
         issue = issue.group()
     return issue
 
+
 def findissue(ui, existing, issue):
     """
     Returns true if issue number already exists in notes.
     """
     if any(issue in s for s in existing):
-        ui.write(_('"%s" already exists in notes; ignoring\n') % issue)
+        ui.write(_(b'"%s" already exists in notes; ignoring\n') % issue)
         return True
     else:
         return False
 
+
 def similar(ui, existing, incoming_str):
     """
     Returns true if similar note found in existing notes.
@@ -220,14 +233,17 @@
     if len(incoming_str.split()) > 10:
         merge = similaritycheck(incoming_str, existing)
         if not merge:
-            ui.write(_('"%s" already exists in notes file; ignoring\n')
-                     % incoming_str)
+            ui.write(
+                _(b'"%s" already exists in notes file; ignoring\n')
+                % incoming_str
+            )
             return True
         else:
             return False
     else:
         return False
 
+
 def similaritycheck(incoming_str, existingnotes):
     """
     Returns false when note fragment can be merged to existing notes.
@@ -244,8 +260,9 @@
             break
     return merge
 
+
 def getcustomadmonitions(repo):
-    ctx = repo['.']
+    ctx = repo[b'.']
     p = config.config()
 
     def read(f, sections=None, remap=None):
@@ -253,12 +270,14 @@
             data = ctx[f].data()
             p.parse(f, data, sections, remap, read)
         else:
-            raise error.Abort(_(".hgreleasenotes file \'%s\' not found") %
-                              repo.pathto(f))
+            raise error.Abort(
+                _(b".hgreleasenotes file \'%s\' not found") % repo.pathto(f)
+            )
 
-    if '.hgreleasenotes' in ctx:
-        read('.hgreleasenotes')
-    return p['sections']
+    if b'.hgreleasenotes' in ctx:
+        read(b'.hgreleasenotes')
+    return p[b'sections']
+
 
 def checkadmonitions(ui, repo, directives, revs):
     """
@@ -280,21 +299,26 @@
             if admonition.group(1) in directives:
                 continue
             else:
-                ui.write(_("Invalid admonition '%s' present in changeset %s"
-                           "\n") % (admonition.group(1), ctx.hex()[:12]))
-                sim = lambda x: difflib.SequenceMatcher(None,
-                    admonition.group(1), x).ratio()
+                ui.write(
+                    _(b"Invalid admonition '%s' present in changeset %s\n")
+                    % (admonition.group(1), ctx.hex()[:12])
+                )
+                sim = lambda x: difflib.SequenceMatcher(
+                    None, admonition.group(1), x
+                ).ratio()
 
                 similar = [s for s in directives if sim(s) > 0.6]
                 if len(similar) == 1:
-                    ui.write(_("(did you mean %s?)\n") % similar[0])
+                    ui.write(_(b"(did you mean %s?)\n") % similar[0])
                 elif similar:
-                    ss = ", ".join(sorted(similar))
-                    ui.write(_("(did you mean one of %s?)\n") % ss)
+                    ss = b", ".join(sorted(similar))
+                    ui.write(_(b"(did you mean one of %s?)\n") % ss)
+
 
 def _getadmonitionlist(ui, sections):
     for section in sections:
-        ui.write("%s: %s\n" % (section[0], section[1]))
+        ui.write(b"%s: %s\n" % (section[0], section[1]))
+
 
 def parsenotesfromrevisions(repo, directives, revs):
     notes = parsedreleasenotes()
@@ -302,19 +326,25 @@
     for rev in revs:
         ctx = repo[rev]
 
-        blocks, pruned = minirst.parse(ctx.description(),
-                                       admonitions=directives)
+        blocks, pruned = minirst.parse(
+            ctx.description(), admonitions=directives
+        )
 
         for i, block in enumerate(blocks):
-            if block['type'] != 'admonition':
+            if block[b'type'] != b'admonition':
                 continue
 
-            directive = block['admonitiontitle']
-            title = block['lines'][0].strip() if block['lines'] else None
+            directive = block[b'admonitiontitle']
+            title = block[b'lines'][0].strip() if block[b'lines'] else None
 
             if i + 1 == len(blocks):
-                raise error.Abort(_('changeset %s: release notes directive %s '
-                        'lacks content') % (ctx, directive))
+                raise error.Abort(
+                    _(
+                        b'changeset %s: release notes directive %s '
+                        b'lacks content'
+                    )
+                    % (ctx, directive)
+                )
 
             # Now search ahead and find all paragraphs attached to this
             # admonition.
@@ -323,25 +353,32 @@
                 pblock = blocks[j]
 
                 # Margin blocks may appear between paragraphs. Ignore them.
-                if pblock['type'] == 'margin':
+                if pblock[b'type'] == b'margin':
                     continue
 
-                if pblock['type'] == 'admonition':
+                if pblock[b'type'] == b'admonition':
                     break
 
-                if pblock['type'] != 'paragraph':
-                    repo.ui.warn(_('changeset %s: unexpected block in release '
-                        'notes directive %s\n') % (ctx, directive))
+                if pblock[b'type'] != b'paragraph':
+                    repo.ui.warn(
+                        _(
+                            b'changeset %s: unexpected block in release '
+                            b'notes directive %s\n'
+                        )
+                        % (ctx, directive)
+                    )
 
-                if pblock['indent'] > 0:
-                    paragraphs.append(pblock['lines'])
+                if pblock[b'indent'] > 0:
+                    paragraphs.append(pblock[b'lines'])
                 else:
                     break
 
             # TODO consider using title as paragraph for more concise notes.
             if not paragraphs:
-                repo.ui.warn(_("error parsing releasenotes for revision: "
-                               "'%s'\n") % node.hex(ctx.node()))
+                repo.ui.warn(
+                    _(b"error parsing releasenotes for revision: '%s'\n")
+                    % node.hex(ctx.node())
+                )
             if title:
                 notes.addtitleditem(directive, title, paragraphs)
             else:
@@ -349,6 +386,7 @@
 
     return notes
 
+
 def parsereleasenotesfile(sections, text):
     """Parse text content containing generated release notes."""
     notes = parsedreleasenotes()
@@ -361,49 +399,52 @@
         for i in range(offset + 1, len(blocks)):
             block = blocks[i]
 
-            if block['type'] == 'margin':
+            if block[b'type'] == b'margin':
                 continue
-            elif block['type'] == 'section':
+            elif block[b'type'] == b'section':
                 break
-            elif block['type'] == 'bullet':
-                if block['indent'] != 0:
-                    raise error.Abort(_('indented bullet lists not supported'))
+            elif block[b'type'] == b'bullet':
+                if block[b'indent'] != 0:
+                    raise error.Abort(_(b'indented bullet lists not supported'))
                 if title:
-                    lines = [l[1:].strip() for l in block['lines']]
+                    lines = [l[1:].strip() for l in block[b'lines']]
                     notefragment.append(lines)
                     continue
                 else:
-                    lines = [[l[1:].strip() for l in block['lines']]]
+                    lines = [[l[1:].strip() for l in block[b'lines']]]
 
-                    for block in blocks[i + 1:]:
-                        if block['type'] in ('bullet', 'section'):
+                    for block in blocks[i + 1 :]:
+                        if block[b'type'] in (b'bullet', b'section'):
                             break
-                        if block['type'] == 'paragraph':
-                            lines.append(block['lines'])
+                        if block[b'type'] == b'paragraph':
+                            lines.append(block[b'lines'])
                     notefragment.append(lines)
                     continue
-            elif block['type'] != 'paragraph':
-                raise error.Abort(_('unexpected block type in release notes: '
-                                    '%s') % block['type'])
+            elif block[b'type'] != b'paragraph':
+                raise error.Abort(
+                    _(b'unexpected block type in release notes: %s')
+                    % block[b'type']
+                )
             if title:
-                notefragment.append(block['lines'])
+                notefragment.append(block[b'lines'])
 
         return notefragment
 
     currentsection = None
     for i, block in enumerate(blocks):
-        if block['type'] != 'section':
+        if block[b'type'] != b'section':
             continue
 
-        title = block['lines'][0]
+        title = block[b'lines'][0]
 
         # TODO the parsing around paragraphs and bullet points needs some
         # work.
-        if block['underline'] == '=':  # main section
+        if block[b'underline'] == b'=':  # main section
             name = sections.sectionfromtitle(title)
             if not name:
-                raise error.Abort(_('unknown release notes section: %s') %
-                                  title)
+                raise error.Abort(
+                    _(b'unknown release notes section: %s') % title
+                )
 
             currentsection = name
             bullet_points = gatherparagraphsbullets(i)
@@ -411,7 +452,7 @@
                 for para in bullet_points:
                     notes.addnontitleditem(currentsection, para)
 
-        elif block['underline'] == '-':  # sub-section
+        elif block[b'underline'] == b'-':  # sub-section
             if title == BULLET_SECTION:
                 bullet_points = gatherparagraphsbullets(i)
                 for para in bullet_points:
@@ -420,10 +461,11 @@
                 paragraphs = gatherparagraphsbullets(i, True)
                 notes.addtitleditem(currentsection, title, paragraphs)
         else:
-            raise error.Abort(_('unsupported section type for %s') % title)
+            raise error.Abort(_(b'unsupported section type for %s') % title)
 
     return notes
 
+
 def serializenotes(sections, notes):
     """Serialize release notes from parsed fragments and notes.
 
@@ -437,22 +479,23 @@
             continue
 
         lines.append(sectiontitle)
-        lines.append('=' * len(sectiontitle))
-        lines.append('')
+        lines.append(b'=' * len(sectiontitle))
+        lines.append(b'')
 
         # First pass to emit sub-sections.
         for title, paragraphs in notes.titledforsection(sectionname):
             lines.append(title)
-            lines.append('-' * len(title))
-            lines.append('')
+            lines.append(b'-' * len(title))
+            lines.append(b'')
 
             for i, para in enumerate(paragraphs):
                 if i:
-                    lines.append('')
-                lines.extend(stringutil.wrap(' '.join(para),
-                                             width=78).splitlines())
+                    lines.append(b'')
+                lines.extend(
+                    stringutil.wrap(b' '.join(para), width=78).splitlines()
+                )
 
-            lines.append('')
+            lines.append(b'')
 
         # Second pass to emit bullet list items.
 
@@ -464,37 +507,66 @@
         if notes.titledforsection(sectionname) and nontitled:
             # TODO make configurable.
             lines.append(BULLET_SECTION)
-            lines.append('-' * len(BULLET_SECTION))
-            lines.append('')
+            lines.append(b'-' * len(BULLET_SECTION))
+            lines.append(b'')
 
         for paragraphs in nontitled:
-            lines.extend(stringutil.wrap(' '.join(paragraphs[0]),
-                                         width=78,
-                                         initindent='* ',
-                                         hangindent='  ').splitlines())
+            lines.extend(
+                stringutil.wrap(
+                    b' '.join(paragraphs[0]),
+                    width=78,
+                    initindent=b'* ',
+                    hangindent=b'  ',
+                ).splitlines()
+            )
 
             for para in paragraphs[1:]:
-                lines.append('')
-                lines.extend(stringutil.wrap(' '.join(para),
-                                             width=78,
-                                             initindent='  ',
-                                             hangindent='  ').splitlines())
+                lines.append(b'')
+                lines.extend(
+                    stringutil.wrap(
+                        b' '.join(para),
+                        width=78,
+                        initindent=b'  ',
+                        hangindent=b'  ',
+                    ).splitlines()
+                )
 
-            lines.append('')
+            lines.append(b'')
 
     if lines and lines[-1]:
-        lines.append('')
+        lines.append(b'')
 
-    return '\n'.join(lines)
+    return b'\n'.join(lines)
+
 
-@command('releasenotes',
-    [('r', 'rev', '', _('revisions to process for release notes'), _('REV')),
-    ('c', 'check', False, _('checks for validity of admonitions (if any)'),
-        _('REV')),
-    ('l', 'list', False, _('list the available admonitions with their title'),
-        None)],
-    _('hg releasenotes [-r REV] [-c] FILE'),
-    helpcategory=command.CATEGORY_CHANGE_NAVIGATION)
+@command(
+    b'releasenotes',
+    [
+        (
+            b'r',
+            b'rev',
+            b'',
+            _(b'revisions to process for release notes'),
+            _(b'REV'),
+        ),
+        (
+            b'c',
+            b'check',
+            False,
+            _(b'checks for validity of admonitions (if any)'),
+            _(b'REV'),
+        ),
+        (
+            b'l',
+            b'list',
+            False,
+            _(b'list the available admonitions with their title'),
+            None,
+        ),
+    ],
+    _(b'hg releasenotes [-r REV] [-c] FILE'),
+    helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
+)
 def releasenotes(ui, repo, file_=None, **opts):
     """parse release notes from commit messages into an output file
 
@@ -581,29 +653,29 @@
     opts = pycompat.byteskwargs(opts)
     sections = releasenotessections(ui, repo)
 
-    listflag = opts.get('list')
+    listflag = opts.get(b'list')
 
-    if listflag and opts.get('rev'):
-        raise error.Abort(_('cannot use both \'--list\' and \'--rev\''))
-    if listflag and opts.get('check'):
-        raise error.Abort(_('cannot use both \'--list\' and \'--check\''))
+    if listflag and opts.get(b'rev'):
+        raise error.Abort(_(b'cannot use both \'--list\' and \'--rev\''))
+    if listflag and opts.get(b'check'):
+        raise error.Abort(_(b'cannot use both \'--list\' and \'--check\''))
 
     if listflag:
         return _getadmonitionlist(ui, sections)
 
-    rev = opts.get('rev')
-    revs = scmutil.revrange(repo, [rev or 'not public()'])
-    if opts.get('check'):
+    rev = opts.get(b'rev')
+    revs = scmutil.revrange(repo, [rev or b'not public()'])
+    if opts.get(b'check'):
         return checkadmonitions(ui, repo, sections.names(), revs)
 
     incoming = parsenotesfromrevisions(repo, sections.names(), revs)
 
     if file_ is None:
-        ui.pager('releasenotes')
+        ui.pager(b'releasenotes')
         return ui.write(serializenotes(sections, incoming))
 
     try:
-        with open(file_, 'rb') as fh:
+        with open(file_, b'rb') as fh:
             notes = parsereleasenotesfile(sections, fh.read())
     except IOError as e:
         if e.errno != errno.ENOENT:
@@ -613,16 +685,17 @@
 
     notes.merge(ui, incoming)
 
-    with open(file_, 'wb') as fh:
+    with open(file_, b'wb') as fh:
         fh.write(serializenotes(sections, notes))
 
-@command('debugparsereleasenotes', norepo=True)
+
+@command(b'debugparsereleasenotes', norepo=True)
 def debugparsereleasenotes(ui, path, repo=None):
     """parse release notes and print resulting data structure"""
-    if path == '-':
+    if path == b'-':
         text = pycompat.stdin.read()
     else:
-        with open(path, 'rb') as fh:
+        with open(path, b'rb') as fh:
             text = fh.read()
 
     sections = releasenotessections(ui, repo)
@@ -630,13 +703,13 @@
     notes = parsereleasenotesfile(sections, text)
 
     for section in notes:
-        ui.write(_('section: %s\n') % section)
+        ui.write(_(b'section: %s\n') % section)
         for title, paragraphs in notes.titledforsection(section):
-            ui.write(_('  subsection: %s\n') % title)
+            ui.write(_(b'  subsection: %s\n') % title)
             for para in paragraphs:
-                ui.write(_('    paragraph: %s\n') % ' '.join(para))
+                ui.write(_(b'    paragraph: %s\n') % b' '.join(para))
 
         for paragraphs in notes.nontitledforsection(section):
-            ui.write(_('  bullet point:\n'))
+            ui.write(_(b'  bullet point:\n'))
             for para in paragraphs:
-                ui.write(_('    paragraph: %s\n') % ' '.join(para))
+                ui.write(_(b'    paragraph: %s\n') % b' '.join(para))
--- a/hgext/relink.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/relink.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,15 +12,14 @@
 import stat
 
 from mercurial.i18n import _
+from mercurial.pycompat import open
 from mercurial import (
     error,
     hg,
     registrar,
     util,
 )
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.utils import stringutil
 
 cmdtable = {}
 command = registrar.command(cmdtable)
@@ -28,9 +27,12 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
-@command('relink', [], _('[ORIGIN]'), helpcategory=command.CATEGORY_MAINTENANCE)
+@command(
+    b'relink', [], _(b'[ORIGIN]'), helpcategory=command.CATEGORY_MAINTENANCE
+)
 def relink(ui, repo, origin=None, **opts):
     """recreate hardlinks between two repositories
 
@@ -56,29 +58,33 @@
     command is running. (Both repositories will be locked against
     writes.)
     """
-    if (not util.safehasattr(util, 'samefile') or
-        not util.safehasattr(util, 'samedevice')):
-        raise error.Abort(_('hardlinks are not supported on this system'))
-    src = hg.repository(repo.baseui, ui.expandpath(origin or 'default-relink',
-                                          origin or 'default'))
-    ui.status(_('relinking %s to %s\n') % (src.store.path, repo.store.path))
+    if not util.safehasattr(util, b'samefile') or not util.safehasattr(
+        util, b'samedevice'
+    ):
+        raise error.Abort(_(b'hardlinks are not supported on this system'))
+    src = hg.repository(
+        repo.baseui,
+        ui.expandpath(origin or b'default-relink', origin or b'default'),
+    )
+    ui.status(_(b'relinking %s to %s\n') % (src.store.path, repo.store.path))
     if repo.root == src.root:
-        ui.status(_('there is nothing to relink\n'))
+        ui.status(_(b'there is nothing to relink\n'))
         return
 
     if not util.samedevice(src.store.path, repo.store.path):
         # No point in continuing
-        raise error.Abort(_('source and destination are on different devices'))
+        raise error.Abort(_(b'source and destination are on different devices'))
 
     with repo.lock(), src.lock():
         candidates = sorted(collect(src, ui))
         targets = prune(candidates, src.store.path, repo.store.path, ui)
         do_relink(src.store.path, repo.store.path, targets, ui)
 
+
 def collect(src, ui):
     seplen = len(os.path.sep)
     candidates = []
-    live = len(src['tip'].manifest())
+    live = len(src[b'tip'].manifest())
     # Your average repository has some files which were deleted before
     # the tip revision. We account for that by assuming that there are
     # 3 tracked files for every 2 live files as of the tip version of
@@ -87,15 +93,17 @@
     # mozilla-central as of 2010-06-10 had a ratio of just over 7:5.
     total = live * 3 // 2
     src = src.store.path
-    progress = ui.makeprogress(_('collecting'), unit=_('files'), total=total)
+    progress = ui.makeprogress(_(b'collecting'), unit=_(b'files'), total=total)
     pos = 0
-    ui.status(_("tip has %d files, estimated total number of files: %d\n")
-              % (live, total))
+    ui.status(
+        _(b"tip has %d files, estimated total number of files: %d\n")
+        % (live, total)
+    )
     for dirpath, dirnames, filenames in os.walk(src):
         dirnames.sort()
-        relpath = dirpath[len(src) + seplen:]
+        relpath = dirpath[len(src) + seplen :]
         for filename in sorted(filenames):
-            if filename[-2:] not in ('.d', '.i'):
+            if filename[-2:] not in (b'.d', b'.i'):
                 continue
             st = os.stat(os.path.join(dirpath, filename))
             if not stat.S_ISREG(st.st_mode):
@@ -105,9 +113,10 @@
             progress.update(pos, item=filename)
 
     progress.complete()
-    ui.status(_('collected %d candidate storage files\n') % len(candidates))
+    ui.status(_(b'collected %d candidate storage files\n') % len(candidates))
     return candidates
 
+
 def prune(candidates, src, dst, ui):
     def linkfilter(src, dst, st):
         try:
@@ -120,14 +129,16 @@
         if not util.samedevice(src, dst):
             # No point in continuing
             raise error.Abort(
-                _('source and destination are on different devices'))
+                _(b'source and destination are on different devices')
+            )
         if st.st_size != ts.st_size:
             return False
         return st
 
     targets = []
-    progress = ui.makeprogress(_('pruning'), unit=_('files'),
-                               total=len(candidates))
+    progress = ui.makeprogress(
+        _(b'pruning'), unit=_(b'files'), total=len(candidates)
+    )
     pos = 0
     for fn, st in candidates:
         pos += 1
@@ -135,18 +146,21 @@
         tgt = os.path.join(dst, fn)
         ts = linkfilter(srcpath, tgt, st)
         if not ts:
-            ui.debug('not linkable: %s\n' % fn)
+            ui.debug(b'not linkable: %s\n' % fn)
             continue
         targets.append((fn, ts.st_size))
         progress.update(pos, item=fn)
 
     progress.complete()
-    ui.status(_('pruned down to %d probably relinkable files\n') % len(targets))
+    ui.status(
+        _(b'pruned down to %d probably relinkable files\n') % len(targets)
+    )
     return targets
 
+
 def do_relink(src, dst, files, ui):
     def relinkfile(src, dst):
-        bak = dst + '.bak'
+        bak = dst + b'.bak'
         os.rename(dst, bak)
         try:
             util.oslink(src, dst)
@@ -159,16 +173,17 @@
     relinked = 0
     savedbytes = 0
 
-    progress = ui.makeprogress(_('relinking'), unit=_('files'),
-                               total=len(files))
+    progress = ui.makeprogress(
+        _(b'relinking'), unit=_(b'files'), total=len(files)
+    )
     pos = 0
     for f, sz in files:
         pos += 1
         source = os.path.join(src, f)
         tgt = os.path.join(dst, f)
         # Binary mode, so that read() works correctly, especially on Windows
-        sfp = open(source, 'rb')
-        dfp = open(tgt, 'rb')
+        sfp = open(source, b'rb')
+        dfp = open(tgt, b'rb')
         sin = sfp.read(CHUNKLEN)
         while sin:
             din = dfp.read(CHUNKLEN)
@@ -178,7 +193,7 @@
         sfp.close()
         dfp.close()
         if sin:
-            ui.debug('not linkable: %s\n' % f)
+            ui.debug(b'not linkable: %s\n' % f)
             continue
         try:
             relinkfile(source, tgt)
@@ -186,9 +201,11 @@
             relinked += 1
             savedbytes += sz
         except OSError as inst:
-            ui.warn('%s: %s\n' % (tgt, stringutil.forcebytestr(inst)))
+            ui.warn(b'%s: %s\n' % (tgt, stringutil.forcebytestr(inst)))
 
     progress.complete()
 
-    ui.status(_('relinked %d files (%s reclaimed)\n') %
-              (relinked, util.bytecount(savedbytes)))
+    ui.status(
+        _(b'relinked %d files (%s reclaimed)\n')
+        % (relinked, util.bytecount(savedbytes))
+    )
--- a/hgext/remotefilelog/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -132,6 +132,7 @@
 
 from mercurial.node import hex
 from mercurial.i18n import _
+from mercurial.pycompat import open
 from mercurial import (
     changegroup,
     changelog,
@@ -185,146 +186,167 @@
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('remotefilelog', 'debug', default=False)
+configitem(b'remotefilelog', b'debug', default=False)
 
-configitem('remotefilelog', 'reponame', default='')
-configitem('remotefilelog', 'cachepath', default=None)
-configitem('remotefilelog', 'cachegroup', default=None)
-configitem('remotefilelog', 'cacheprocess', default=None)
-configitem('remotefilelog', 'cacheprocess.includepath', default=None)
-configitem("remotefilelog", "cachelimit", default="1000 GB")
+configitem(b'remotefilelog', b'reponame', default=b'')
+configitem(b'remotefilelog', b'cachepath', default=None)
+configitem(b'remotefilelog', b'cachegroup', default=None)
+configitem(b'remotefilelog', b'cacheprocess', default=None)
+configitem(b'remotefilelog', b'cacheprocess.includepath', default=None)
+configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB")
 
-configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault,
-           alias=[('remotefilelog', 'fallbackrepo')])
+configitem(
+    b'remotefilelog',
+    b'fallbackpath',
+    default=configitems.dynamicdefault,
+    alias=[(b'remotefilelog', b'fallbackrepo')],
+)
 
-configitem('remotefilelog', 'validatecachelog', default=None)
-configitem('remotefilelog', 'validatecache', default='on')
-configitem('remotefilelog', 'server', default=None)
-configitem('remotefilelog', 'servercachepath', default=None)
-configitem("remotefilelog", "serverexpiration", default=30)
-configitem('remotefilelog', 'backgroundrepack', default=False)
-configitem('remotefilelog', 'bgprefetchrevs', default=None)
-configitem('remotefilelog', 'pullprefetch', default=None)
-configitem('remotefilelog', 'backgroundprefetch', default=False)
-configitem('remotefilelog', 'prefetchdelay', default=120)
-configitem('remotefilelog', 'prefetchdays', default=14)
+configitem(b'remotefilelog', b'validatecachelog', default=None)
+configitem(b'remotefilelog', b'validatecache', default=b'on')
+configitem(b'remotefilelog', b'server', default=None)
+configitem(b'remotefilelog', b'servercachepath', default=None)
+configitem(b"remotefilelog", b"serverexpiration", default=30)
+configitem(b'remotefilelog', b'backgroundrepack', default=False)
+configitem(b'remotefilelog', b'bgprefetchrevs', default=None)
+configitem(b'remotefilelog', b'pullprefetch', default=None)
+configitem(b'remotefilelog', b'backgroundprefetch', default=False)
+configitem(b'remotefilelog', b'prefetchdelay', default=120)
+configitem(b'remotefilelog', b'prefetchdays', default=14)
 
-configitem('remotefilelog', 'getfilesstep', default=10000)
-configitem('remotefilelog', 'getfilestype', default='optimistic')
-configitem('remotefilelog', 'batchsize', configitems.dynamicdefault)
-configitem('remotefilelog', 'fetchwarning', default='')
+configitem(b'remotefilelog', b'getfilesstep', default=10000)
+configitem(b'remotefilelog', b'getfilestype', default=b'optimistic')
+configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault)
+configitem(b'remotefilelog', b'fetchwarning', default=b'')
+
+configitem(b'remotefilelog', b'includepattern', default=None)
+configitem(b'remotefilelog', b'excludepattern', default=None)
 
-configitem('remotefilelog', 'includepattern', default=None)
-configitem('remotefilelog', 'excludepattern', default=None)
+configitem(b'remotefilelog', b'gcrepack', default=False)
+configitem(b'remotefilelog', b'repackonhggc', default=False)
+configitem(b'repack', b'chainorphansbysize', default=True, experimental=True)
 
-configitem('remotefilelog', 'gcrepack', default=False)
-configitem('remotefilelog', 'repackonhggc', default=False)
-configitem('repack', 'chainorphansbysize', default=True)
+configitem(b'packs', b'maxpacksize', default=0)
+configitem(b'packs', b'maxchainlen', default=1000)
 
-configitem('packs', 'maxpacksize', default=0)
-configitem('packs', 'maxchainlen', default=1000)
+configitem(b'devel', b'remotefilelog.ensurestart', default=False)
 
 #  default TTL limit is 30 days
 _defaultlimit = 60 * 60 * 24 * 30
-configitem('remotefilelog', 'nodettl', default=_defaultlimit)
+configitem(b'remotefilelog', b'nodettl', default=_defaultlimit)
 
-configitem('remotefilelog', 'data.gencountlimit', default=2),
-configitem('remotefilelog', 'data.generations',
-           default=['1GB', '100MB', '1MB'])
-configitem('remotefilelog', 'data.maxrepackpacks', default=50)
-configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB')
-configitem('remotefilelog', 'data.repacksizelimit', default='100MB')
+configitem(b'remotefilelog', b'data.gencountlimit', default=2),
+configitem(
+    b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB']
+)
+configitem(b'remotefilelog', b'data.maxrepackpacks', default=50)
+configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB')
+configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB')
 
-configitem('remotefilelog', 'history.gencountlimit', default=2),
-configitem('remotefilelog', 'history.generations', default=['100MB'])
-configitem('remotefilelog', 'history.maxrepackpacks', default=50)
-configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB')
-configitem('remotefilelog', 'history.repacksizelimit', default='100MB')
+configitem(b'remotefilelog', b'history.gencountlimit', default=2),
+configitem(b'remotefilelog', b'history.generations', default=[b'100MB'])
+configitem(b'remotefilelog', b'history.maxrepackpacks', default=50)
+configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB')
+configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB')
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 repoclass = localrepo.localrepository
 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
 
 isenabled = shallowutil.isenabled
 
+
 def uisetup(ui):
     """Wraps user facing Mercurial commands to swap them out with shallow
     versions.
     """
     hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
 
-    entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow)
-    entry[1].append(('', 'shallow', None,
-                     _("create a shallow clone which uses remote file "
-                       "history")))
+    entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow)
+    entry[1].append(
+        (
+            b'',
+            b'shallow',
+            None,
+            _(b"create a shallow clone which uses remote file history"),
+        )
+    )
 
-    extensions.wrapcommand(commands.table, 'debugindex',
-        debugcommands.debugindex)
-    extensions.wrapcommand(commands.table, 'debugindexdot',
-        debugcommands.debugindexdot)
-    extensions.wrapcommand(commands.table, 'log', log)
-    extensions.wrapcommand(commands.table, 'pull', pull)
+    extensions.wrapcommand(
+        commands.table, b'debugindex', debugcommands.debugindex
+    )
+    extensions.wrapcommand(
+        commands.table, b'debugindexdot', debugcommands.debugindexdot
+    )
+    extensions.wrapcommand(commands.table, b'log', log)
+    extensions.wrapcommand(commands.table, b'pull', pull)
 
     # Prevent 'hg manifest --all'
     def _manifest(orig, ui, repo, *args, **opts):
-        if (isenabled(repo) and opts.get(r'all')):
-            raise error.Abort(_("--all is not supported in a shallow repo"))
+        if isenabled(repo) and opts.get(r'all'):
+            raise error.Abort(_(b"--all is not supported in a shallow repo"))
 
         return orig(ui, repo, *args, **opts)
-    extensions.wrapcommand(commands.table, "manifest", _manifest)
+
+    extensions.wrapcommand(commands.table, b"manifest", _manifest)
 
     # Wrap remotefilelog with lfs code
     def _lfsloaded(loaded=False):
         lfsmod = None
         try:
-            lfsmod = extensions.find('lfs')
+            lfsmod = extensions.find(b'lfs')
         except KeyError:
             pass
         if lfsmod:
             lfsmod.wrapfilelog(remotefilelog.remotefilelog)
             fileserverclient._lfsmod = lfsmod
-    extensions.afterloaded('lfs', _lfsloaded)
+
+    extensions.afterloaded(b'lfs', _lfsloaded)
 
     # debugdata needs remotefilelog.len to work
-    extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
+    extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow)
 
     changegroup.cgpacker = shallowbundle.shallowcg1packer
 
-    extensions.wrapfunction(changegroup, '_addchangegroupfiles',
-                            shallowbundle.addchangegroupfiles)
+    extensions.wrapfunction(
+        changegroup, b'_addchangegroupfiles', shallowbundle.addchangegroupfiles
+    )
     extensions.wrapfunction(
-        changegroup, 'makechangegroup', shallowbundle.makechangegroup)
-    extensions.wrapfunction(localrepo, 'makestore', storewrapper)
-    extensions.wrapfunction(exchange, 'pull', exchangepull)
-    extensions.wrapfunction(merge, 'applyupdates', applyupdates)
-    extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
-    extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
-    extensions.wrapfunction(scmutil, '_findrenames', findrenames)
-    extensions.wrapfunction(copies, '_computeforwardmissing',
-                            computeforwardmissing)
-    extensions.wrapfunction(dispatch, 'runcommand', runcommand)
-    extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
-    extensions.wrapfunction(context.changectx, 'filectx', filectx)
-    extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
-    extensions.wrapfunction(patch, 'trydiff', trydiff)
-    extensions.wrapfunction(hg, 'verify', _verify)
-    scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
+        changegroup, b'makechangegroup', shallowbundle.makechangegroup
+    )
+    extensions.wrapfunction(localrepo, b'makestore', storewrapper)
+    extensions.wrapfunction(exchange, b'pull', exchangepull)
+    extensions.wrapfunction(merge, b'applyupdates', applyupdates)
+    extensions.wrapfunction(merge, b'_checkunknownfiles', checkunknownfiles)
+    extensions.wrapfunction(context.workingctx, b'_checklookup', checklookup)
+    extensions.wrapfunction(scmutil, b'_findrenames', findrenames)
+    extensions.wrapfunction(
+        copies, b'_computeforwardmissing', computeforwardmissing
+    )
+    extensions.wrapfunction(dispatch, b'runcommand', runcommand)
+    extensions.wrapfunction(repair, b'_collectbrokencsets', _collectbrokencsets)
+    extensions.wrapfunction(context.changectx, b'filectx', filectx)
+    extensions.wrapfunction(context.workingctx, b'filectx', workingfilectx)
+    extensions.wrapfunction(patch, b'trydiff', trydiff)
+    extensions.wrapfunction(hg, b'verify', _verify)
+    scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook)
 
     # disappointing hacks below
-    extensions.wrapfunction(scmutil, 'getrenamedfn', getrenamedfn)
-    extensions.wrapfunction(revset, 'filelog', filelogrevset)
-    revset.symbols['filelog'] = revset.filelog
-    extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
+    extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn)
+    extensions.wrapfunction(revset, b'filelog', filelogrevset)
+    revset.symbols[b'filelog'] = revset.filelog
+    extensions.wrapfunction(cmdutil, b'walkfilerevs', walkfilerevs)
 
 
 def cloneshallow(orig, ui, repo, *args, **opts):
     if opts.get(r'shallow'):
         repos = []
+
         def pull_shallow(orig, self, *args, **kwargs):
             if not isenabled(self):
                 repos.append(self.unfiltered())
@@ -334,8 +356,10 @@
                 # setupclient fixed the class on the repo itself
                 # but we also need to fix it on the repoview
                 if isinstance(self, repoview.repoview):
-                    self.__class__.__bases__ = (self.__class__.__bases__[0],
-                                                self.unfiltered().__class__)
+                    self.__class__.__bases__ = (
+                        self.__class__.__bases__[0],
+                        self.unfiltered().__class__,
+                    )
                 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
                 self._writerequirements()
 
@@ -344,7 +368,8 @@
                 return exchangepull(orig, self, *args, **kwargs)
             else:
                 return orig(self, *args, **kwargs)
-        extensions.wrapfunction(exchange, 'pull', pull_shallow)
+
+        extensions.wrapfunction(exchange, b'pull', pull_shallow)
 
         # Wrap the stream logic to add requirements and to pass include/exclude
         # patterns around.
@@ -356,44 +381,55 @@
                 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
                     opts = {}
                     if repo.includepattern:
-                        opts[r'includepattern'] = '\0'.join(repo.includepattern)
+                        opts[r'includepattern'] = b'\0'.join(
+                            repo.includepattern
+                        )
                     if repo.excludepattern:
-                        opts[r'excludepattern'] = '\0'.join(repo.excludepattern)
-                    return remote._callstream('stream_out_shallow', **opts)
+                        opts[r'excludepattern'] = b'\0'.join(
+                            repo.excludepattern
+                        )
+                    return remote._callstream(b'stream_out_shallow', **opts)
                 else:
                     return orig()
-            extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
+
+            extensions.wrapfunction(remote, b'stream_out', stream_out_shallow)
+
         def stream_wrap(orig, op):
             setup_streamout(op.repo, op.remote)
             return orig(op)
+
         extensions.wrapfunction(
-            streamclone, 'maybeperformlegacystreamclone', stream_wrap)
+            streamclone, b'maybeperformlegacystreamclone', stream_wrap
+        )
 
         def canperformstreamclone(orig, pullop, bundle2=False):
             # remotefilelog is currently incompatible with the
             # bundle2 flavor of streamclones, so force us to use
             # v1 instead.
-            if 'v2' in pullop.remotebundle2caps.get('stream', []):
-                pullop.remotebundle2caps['stream'] = [
-                    c for c in pullop.remotebundle2caps['stream']
-                    if c != 'v2']
+            if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
+                pullop.remotebundle2caps[b'stream'] = [
+                    c for c in pullop.remotebundle2caps[b'stream'] if c != b'v2'
+                ]
             if bundle2:
                 return False, None
             supported, requirements = orig(pullop, bundle2=bundle2)
             if requirements is not None:
                 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
             return supported, requirements
+
         extensions.wrapfunction(
-            streamclone, 'canperformstreamclone', canperformstreamclone)
+            streamclone, b'canperformstreamclone', canperformstreamclone
+        )
 
     try:
         orig(ui, repo, *args, **opts)
     finally:
         if opts.get(r'shallow'):
             for r in repos:
-                if util.safehasattr(r, 'fileservice'):
+                if util.safehasattr(r, b'fileservice'):
                     r.fileservice.close()
 
+
 def debugdatashallow(orig, *args, **kwds):
     oldlen = remotefilelog.remotefilelog.__len__
     try:
@@ -402,19 +438,20 @@
     finally:
         remotefilelog.remotefilelog.__len__ = oldlen
 
+
 def reposetup(ui, repo):
     if not repo.local():
         return
 
     # put here intentionally bc doesnt work in uisetup
-    ui.setconfig('hooks', 'update.prefetch', wcpprefetch)
-    ui.setconfig('hooks', 'commit.prefetch', wcpprefetch)
+    ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch)
+    ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch)
 
-    isserverenabled = ui.configbool('remotefilelog', 'server')
+    isserverenabled = ui.configbool(b'remotefilelog', b'server')
     isshallowclient = isenabled(repo)
 
     if isserverenabled and isshallowclient:
-        raise RuntimeError("Cannot be both a server and shallow client.")
+        raise RuntimeError(b"Cannot be both a server and shallow client.")
 
     if isshallowclient:
         setupclient(ui, repo)
@@ -422,6 +459,7 @@
     if isserverenabled:
         remotefilelogserver.setupserver(ui, repo)
 
+
 def setupclient(ui, repo):
     if not isinstance(repo, localrepo.localrepository):
         return
@@ -434,6 +472,7 @@
     shallowrepo.wraprepo(repo)
     repo.store = shallowstore.wrapstore(repo.store)
 
+
 def storewrapper(orig, requirements, path, vfstype):
     s = orig(requirements, path, vfstype)
     if constants.SHALLOWREPO_REQUIREMENT in requirements:
@@ -441,37 +480,41 @@
 
     return s
 
+
 # prefetch files before update
-def applyupdates(orig, repo, actions, wctx, mctx, overwrite, wantfiledata,
-                 labels=None):
+def applyupdates(
+    orig, repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
+):
     if isenabled(repo):
         manifest = mctx.manifest()
         files = []
-        for f, args, msg in actions['g']:
+        for f, args, msg in actions[b'g']:
             files.append((f, hex(manifest[f])))
         # batch fetch the needed files from the server
         repo.fileservice.prefetch(files)
-    return orig(repo, actions, wctx, mctx, overwrite, wantfiledata,
-                labels=labels)
+    return orig(
+        repo, actions, wctx, mctx, overwrite, wantfiledata, labels=labels
+    )
+
 
 # Prefetch merge checkunknownfiles
-def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
-    *args, **kwargs):
+def checkunknownfiles(orig, repo, wctx, mctx, force, actions, *args, **kwargs):
     if isenabled(repo):
         files = []
         sparsematch = repo.maybesparsematch(mctx.rev())
-        for f, (m, actionargs, msg) in actions.iteritems():
+        for f, (m, actionargs, msg) in pycompat.iteritems(actions):
             if sparsematch and not sparsematch(f):
                 continue
-            if m in ('c', 'dc', 'cm'):
+            if m in (b'c', b'dc', b'cm'):
                 files.append((f, hex(mctx.filenode(f))))
-            elif m == 'dg':
+            elif m == b'dg':
                 f2 = actionargs[0]
                 files.append((f2, hex(mctx.filenode(f2))))
         # batch fetch the needed files from the server
         repo.fileservice.prefetch(files)
     return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
 
+
 # Prefetch files before status attempts to look at their size and contents
 def checklookup(orig, self, files):
     repo = self._repo
@@ -485,11 +528,12 @@
         repo.fileservice.prefetch(prefetchfiles)
     return orig(self, files)
 
+
 # Prefetch the logic that compares added and removed files for renames
 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
     if isenabled(repo):
         files = []
-        pmf = repo['.'].manifest()
+        pmf = repo[b'.'].manifest()
         for f in removed:
             if f in pmf:
                 files.append((f, hex(pmf[f])))
@@ -497,6 +541,7 @@
         repo.fileservice.prefetch(files)
     return orig(repo, matcher, added, removed, *args, **kwargs)
 
+
 # prefetch files before pathcopies check
 def computeforwardmissing(orig, a, b, match=None):
     missing = orig(a, b, match=match)
@@ -518,6 +563,7 @@
         repo.fileservice.prefetch(files)
     return missing
 
+
 # close cache miss server connection after the command has finished
 def runcommand(orig, lui, repo, *args, **kwargs):
     fileservice = None
@@ -532,31 +578,48 @@
         if fileservice:
             fileservice.close()
 
+
 # prevent strip from stripping remotefilelogs
 def _collectbrokencsets(orig, repo, files, striprev):
     if isenabled(repo):
         files = list([f for f in files if not repo.shallowmatch(f)])
     return orig(repo, files, striprev)
 
+
 # changectx wrappers
 def filectx(orig, self, path, fileid=None, filelog=None):
     if fileid is None:
         fileid = self.filenode(path)
-    if (isenabled(self._repo) and self._repo.shallowmatch(path)):
-        return remotefilectx.remotefilectx(self._repo, path, fileid=fileid,
-                                           changectx=self, filelog=filelog)
+    if isenabled(self._repo) and self._repo.shallowmatch(path):
+        return remotefilectx.remotefilectx(
+            self._repo, path, fileid=fileid, changectx=self, filelog=filelog
+        )
     return orig(self, path, fileid=fileid, filelog=filelog)
 
+
 def workingfilectx(orig, self, path, filelog=None):
-    if (isenabled(self._repo) and self._repo.shallowmatch(path)):
-        return remotefilectx.remoteworkingfilectx(self._repo, path,
-                                                  workingctx=self,
-                                                  filelog=filelog)
+    if isenabled(self._repo) and self._repo.shallowmatch(path):
+        return remotefilectx.remoteworkingfilectx(
+            self._repo, path, workingctx=self, filelog=filelog
+        )
     return orig(self, path, filelog=filelog)
 
+
 # prefetch required revisions before a diff
-def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
-    copy, getfilectx, *args, **kwargs):
+def trydiff(
+    orig,
+    repo,
+    revs,
+    ctx1,
+    ctx2,
+    modified,
+    added,
+    removed,
+    copy,
+    getfilectx,
+    *args,
+    **kwargs
+):
     if isenabled(repo):
         prefetch = []
         mf1 = ctx1.manifest()
@@ -573,8 +636,20 @@
 
         repo.fileservice.prefetch(prefetch)
 
-    return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy,
-                getfilectx, *args, **kwargs)
+    return orig(
+        repo,
+        revs,
+        ctx1,
+        ctx2,
+        modified,
+        added,
+        removed,
+        copy,
+        getfilectx,
+        *args,
+        **kwargs
+    )
+
 
 # Prevent verify from processing files
 # a stub for mercurial.hg.verify()
@@ -587,6 +662,8 @@
 
 
 clientonetime = False
+
+
 def onetimeclientsetup(ui):
     global clientonetime
     if clientonetime:
@@ -598,18 +675,53 @@
     # This violates Mercurial's filelog->manifest->changelog write order,
     # but is generally fine for client repos.
     pendingfilecommits = []
-    def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node,
-                       flags, cachedelta=None, _metatuple=None):
+
+    def addrawrevision(
+        orig,
+        self,
+        rawtext,
+        transaction,
+        link,
+        p1,
+        p2,
+        node,
+        flags,
+        cachedelta=None,
+        _metatuple=None,
+    ):
         if isinstance(link, int):
             pendingfilecommits.append(
-                (self, rawtext, transaction, link, p1, p2, node, flags,
-                 cachedelta, _metatuple))
+                (
+                    self,
+                    rawtext,
+                    transaction,
+                    link,
+                    p1,
+                    p2,
+                    node,
+                    flags,
+                    cachedelta,
+                    _metatuple,
+                )
+            )
             return node
         else:
-            return orig(self, rawtext, transaction, link, p1, p2, node, flags,
-                        cachedelta, _metatuple=_metatuple)
+            return orig(
+                self,
+                rawtext,
+                transaction,
+                link,
+                p1,
+                p2,
+                node,
+                flags,
+                cachedelta,
+                _metatuple=_metatuple,
+            )
+
     extensions.wrapfunction(
-        remotefilelog.remotefilelog, 'addrawrevision', addrawrevision)
+        remotefilelog.remotefilelog, b'addrawrevision', addrawrevision
+    )
 
     def changelogadd(orig, self, *args):
         oldlen = len(self)
@@ -623,17 +735,21 @@
                     log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
                 else:
                     raise error.ProgrammingError(
-                        'pending multiple integer revisions are not supported')
+                        b'pending multiple integer revisions are not supported'
+                    )
         else:
             # "link" is actually wrong here (it is set to len(changelog))
             # if changelog remains unchanged, skip writing file revisions
             # but still do a sanity check about pending multiple revisions
             if len(set(x[3] for x in pendingfilecommits)) > 1:
                 raise error.ProgrammingError(
-                    'pending multiple integer revisions are not supported')
+                    b'pending multiple integer revisions are not supported'
+                )
         del pendingfilecommits[:]
         return node
-    extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
+
+    extensions.wrapfunction(changelog.changelog, b'add', changelogadd)
+
 
 def getrenamedfn(orig, repo, endrev=None):
     if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
@@ -663,6 +779,7 @@
 
     return getrenamed
 
+
 def walkfilerevs(orig, repo, match, follow, revs, fncache):
     if not isenabled(repo):
         return orig(repo, match, follow, revs, fncache)
@@ -670,16 +787,17 @@
     # remotefilelog's can't be walked in rev order, so throw.
     # The caller will see the exception and walk the commit tree instead.
     if not follow:
-        raise cmdutil.FileWalkError("Cannot walk via filelog")
+        raise cmdutil.FileWalkError(b"Cannot walk via filelog")
 
     wanted = set()
     minrev, maxrev = min(revs), max(revs)
 
-    pctx = repo['.']
+    pctx = repo[b'.']
     for filename in match.files():
         if filename not in pctx:
-            raise error.Abort(_('cannot follow file not in parent '
-                               'revision: "%s"') % filename)
+            raise error.Abort(
+                _(b'cannot follow file not in parent revision: "%s"') % filename
+            )
         fctx = pctx[filename]
 
         linkrev = fctx.linkrev()
@@ -695,6 +813,7 @@
 
     return wanted
 
+
 def filelogrevset(orig, repo, subset, x):
     """``filelog(pattern)``
     Changesets connected to the specified filelog.
@@ -708,9 +827,10 @@
         return orig(repo, subset, x)
 
     # i18n: "filelog" is a keyword
-    pat = revset.getstring(x, _("filelog requires a pattern"))
-    m = match.match(repo.root, repo.getcwd(), [pat], default='relpath',
-                       ctx=repo[None])
+    pat = revset.getstring(x, _(b"filelog requires a pattern"))
+    m = match.match(
+        repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None]
+    )
     s = set()
 
     if not match.patkind(pat):
@@ -733,7 +853,8 @@
 
     return smartset.baseset([r for r in subset if r in s])
 
-@command('gc', [], _('hg gc [REPO...]'), norepo=True)
+
+@command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True)
 def gc(ui, *args, **opts):
     '''garbage collect the client and server filelog caches
     '''
@@ -746,7 +867,7 @@
 
     # get repo client and server cache
     repopaths = []
-    pwd = ui.environ.get('PWD')
+    pwd = ui.environ.get(b'PWD')
     if pwd:
         repopaths.append(pwd)
 
@@ -771,14 +892,15 @@
     for repo in repos:
         remotefilelogserver.gcserver(ui, repo._repo)
 
+
 def gcclient(ui, cachepath):
     # get list of repos that use this cache
-    repospath = os.path.join(cachepath, 'repos')
+    repospath = os.path.join(cachepath, b'repos')
     if not os.path.exists(repospath):
-        ui.warn(_("no known cache at %s\n") % cachepath)
+        ui.warn(_(b"no known cache at %s\n") % cachepath)
         return
 
-    reposfile = open(repospath, 'rb')
+    reposfile = open(repospath, b'rb')
     repos = {r[:-1] for r in reposfile.readlines()}
     reposfile.close()
 
@@ -790,15 +912,16 @@
     filesrepacked = False
 
     count = 0
-    progress = ui.makeprogress(_("analyzing repositories"), unit="repos",
-                               total=len(repos))
+    progress = ui.makeprogress(
+        _(b"analyzing repositories"), unit=b"repos", total=len(repos)
+    )
     for path in repos:
         progress.update(count)
         count += 1
         try:
             path = ui.expandpath(os.path.normpath(path))
         except TypeError as e:
-            ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
+            ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e))
             traceback.print_exc()
             continue
         try:
@@ -815,15 +938,17 @@
         if not isenabled(repo):
             continue
 
-        if not util.safehasattr(repo, 'name'):
-            ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path)
+        if not util.safehasattr(repo, b'name'):
+            ui.warn(
+                _(b"repo %s is a misconfigured remotefilelog repo\n") % path
+            )
             continue
 
         # If garbage collection on repack and repack on hg gc are enabled
         # then loose files are repacked and garbage collected.
         # Otherwise regular garbage collection is performed.
-        repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc')
-        gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack')
+        repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc')
+        gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack')
         if repackonhggc and gcrepack:
             try:
                 repackmod.incrementalrepack(repo)
@@ -841,6 +966,7 @@
         # Compute a keepset which is not garbage collected
         def keyfn(fname, fnode):
             return fileserverclient.getcachekey(reponame, fname, hex(fnode))
+
         keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
 
     progress.complete()
@@ -848,8 +974,8 @@
     # write list of valid repos back
     oldumask = os.umask(0o002)
     try:
-        reposfile = open(repospath, 'wb')
-        reposfile.writelines([("%s\n" % r) for r in validrepos])
+        reposfile = open(repospath, b'wb')
+        reposfile.writelines([(b"%s\n" % r) for r in validrepos])
         reposfile.close()
     finally:
         os.umask(oldumask)
@@ -858,7 +984,8 @@
     if sharedcache is not None:
         sharedcache.gc(keepkeys)
     elif not filesrepacked:
-        ui.warn(_("warning: no valid repos in repofile\n"))
+        ui.warn(_(b"warning: no valid repos in repofile\n"))
+
 
 def log(orig, ui, repo, *pats, **opts):
     if not isenabled(repo):
@@ -876,7 +1003,7 @@
         # If this is a non-follow log without any revs specified, recommend that
         # the user add -f to speed it up.
         if not follow and not revs:
-            match = scmutil.match(repo['.'], pats, pycompat.byteskwargs(opts))
+            match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts))
             isfile = not match.anypats()
             if isfile:
                 for file in match.files():
@@ -885,31 +1012,37 @@
                         break
 
             if isfile:
-                ui.warn(_("warning: file log can be slow on large repos - " +
-                          "use -f to speed it up\n"))
+                ui.warn(
+                    _(
+                        b"warning: file log can be slow on large repos - "
+                        + b"use -f to speed it up\n"
+                    )
+                )
 
     return orig(ui, repo, *pats, **opts)
 
+
 def revdatelimit(ui, revset):
     """Update revset so that only changesets no older than 'prefetchdays' days
     are included. The default value is set to 14 days. If 'prefetchdays' is set
     to zero or negative value then date restriction is not applied.
     """
-    days = ui.configint('remotefilelog', 'prefetchdays')
+    days = ui.configint(b'remotefilelog', b'prefetchdays')
     if days > 0:
-        revset = '(%s) & date(-%s)' % (revset, days)
+        revset = b'(%s) & date(-%s)' % (revset, days)
     return revset
 
+
 def readytofetch(repo):
     """Check that enough time has passed since the last background prefetch.
     This only relates to prefetches after operations that change the working
     copy parent. Default delay between background prefetches is 2 minutes.
     """
-    timeout = repo.ui.configint('remotefilelog', 'prefetchdelay')
-    fname = repo.vfs.join('lastprefetch')
+    timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay')
+    fname = repo.vfs.join(b'lastprefetch')
 
     ready = False
-    with open(fname, 'a'):
+    with open(fname, b'a'):
         # the with construct above is used to avoid race conditions
         modtime = os.path.getmtime(fname)
         if (time.time() - modtime) > timeout:
@@ -918,71 +1051,84 @@
 
     return ready
 
+
 def wcpprefetch(ui, repo, **kwargs):
     """Prefetches in background revisions specified by bgprefetchrevs revset.
     Does background repack if backgroundrepack flag is set in config.
     """
     shallow = isenabled(repo)
-    bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs')
+    bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs')
     isready = readytofetch(repo)
 
     if not (shallow and bgprefetchrevs and isready):
         return
 
-    bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
+    bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
     # update a revset with a date limit
     bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
 
     def anon():
-        if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
+        if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch:
             return
         repo.ranprefetch = True
         repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
 
     repo._afterlock(anon)
 
+
 def pull(orig, ui, repo, *pats, **opts):
     result = orig(ui, repo, *pats, **opts)
 
     if isenabled(repo):
         # prefetch if it's configured
-        prefetchrevset = ui.config('remotefilelog', 'pullprefetch')
-        bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
-        bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch')
+        prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch')
+        bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
+        bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch')
+        ensurestart = repo.ui.configbool(b'devel', b'remotefilelog.ensurestart')
 
         if prefetchrevset:
-            ui.status(_("prefetching file contents\n"))
+            ui.status(_(b"prefetching file contents\n"))
             revs = scmutil.revrange(repo, [prefetchrevset])
-            base = repo['.'].rev()
+            base = repo[b'.'].rev()
             if bgprefetch:
-                repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
+                repo.backgroundprefetch(
+                    prefetchrevset, repack=bgrepack, ensurestart=ensurestart
+                )
             else:
                 repo.prefetch(revs, base=base)
                 if bgrepack:
-                    repackmod.backgroundrepack(repo, incremental=True)
+                    repackmod.backgroundrepack(
+                        repo, incremental=True, ensurestart=ensurestart
+                    )
         elif bgrepack:
-            repackmod.backgroundrepack(repo, incremental=True)
+            repackmod.backgroundrepack(
+                repo, incremental=True, ensurestart=ensurestart
+            )
 
     return result
 
+
 def exchangepull(orig, repo, remote, *args, **kwargs):
     # Hook into the callstream/getbundle to insert bundle capabilities
     # during a pull.
-    def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None,
-                       **kwargs):
+    def localgetbundle(
+        orig, source, heads=None, common=None, bundlecaps=None, **kwargs
+    ):
         if not bundlecaps:
             bundlecaps = set()
         bundlecaps.add(constants.BUNDLE2_CAPABLITY)
-        return orig(source, heads=heads, common=common, bundlecaps=bundlecaps,
-                    **kwargs)
+        return orig(
+            source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs
+        )
 
-    if util.safehasattr(remote, '_callstream'):
+    if util.safehasattr(remote, b'_callstream'):
         remote._localrepo = repo
-    elif util.safehasattr(remote, 'getbundle'):
-        extensions.wrapfunction(remote, 'getbundle', localgetbundle)
+    elif util.safehasattr(remote, b'getbundle'):
+        extensions.wrapfunction(remote, b'getbundle', localgetbundle)
 
     return orig(repo, remote, *args, **kwargs)
 
+
 def _fileprefetchhook(repo, revs, match):
     if isenabled(repo):
         allfiles = []
@@ -997,76 +1143,99 @@
                     allfiles.append((path, hex(mf[path])))
         repo.fileservice.prefetch(allfiles)
 
-@command('debugremotefilelog', [
-    ('d', 'decompress', None, _('decompress the filelog first')),
-    ], _('hg debugremotefilelog <path>'), norepo=True)
+
+@command(
+    b'debugremotefilelog',
+    [(b'd', b'decompress', None, _(b'decompress the filelog first')),],
+    _(b'hg debugremotefilelog <path>'),
+    norepo=True,
+)
 def debugremotefilelog(ui, path, **opts):
     return debugcommands.debugremotefilelog(ui, path, **opts)
 
-@command('verifyremotefilelog', [
-    ('d', 'decompress', None, _('decompress the filelogs first')),
-    ], _('hg verifyremotefilelogs <directory>'), norepo=True)
+
+@command(
+    b'verifyremotefilelog',
+    [(b'd', b'decompress', None, _(b'decompress the filelogs first')),],
+    _(b'hg verifyremotefilelogs <directory>'),
+    norepo=True,
+)
 def verifyremotefilelog(ui, path, **opts):
     return debugcommands.verifyremotefilelog(ui, path, **opts)
 
-@command('debugdatapack', [
-    ('', 'long', None, _('print the long hashes')),
-    ('', 'node', '', _('dump the contents of node'), 'NODE'),
-    ], _('hg debugdatapack <paths>'), norepo=True)
+
+@command(
+    b'debugdatapack',
+    [
+        (b'', b'long', None, _(b'print the long hashes')),
+        (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'),
+    ],
+    _(b'hg debugdatapack <paths>'),
+    norepo=True,
+)
 def debugdatapack(ui, *paths, **opts):
     return debugcommands.debugdatapack(ui, *paths, **opts)
 
-@command('debughistorypack', [
-    ], _('hg debughistorypack <path>'), norepo=True)
+
+@command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True)
 def debughistorypack(ui, path, **opts):
     return debugcommands.debughistorypack(ui, path)
 
-@command('debugkeepset', [
-    ], _('hg debugkeepset'))
+
+@command(b'debugkeepset', [], _(b'hg debugkeepset'))
 def debugkeepset(ui, repo, **opts):
     # The command is used to measure keepset computation time
     def keyfn(fname, fnode):
         return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
+
     repackmod.keepset(repo, keyfn)
     return
 
-@command('debugwaitonrepack', [
-    ], _('hg debugwaitonrepack'))
+
+@command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack'))
 def debugwaitonrepack(ui, repo, **opts):
     return debugcommands.debugwaitonrepack(repo)
 
-@command('debugwaitonprefetch', [
-    ], _('hg debugwaitonprefetch'))
+
+@command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch'))
 def debugwaitonprefetch(ui, repo, **opts):
     return debugcommands.debugwaitonprefetch(repo)
 
+
 def resolveprefetchopts(ui, opts):
-    if not opts.get('rev'):
-        revset = ['.', 'draft()']
+    if not opts.get(b'rev'):
+        revset = [b'.', b'draft()']
 
-        prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
+        prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None)
         if prefetchrevset:
-            revset.append('(%s)' % prefetchrevset)
-        bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
+            revset.append(b'(%s)' % prefetchrevset)
+        bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None)
         if bgprefetchrevs:
-            revset.append('(%s)' % bgprefetchrevs)
-        revset = '+'.join(revset)
+            revset.append(b'(%s)' % bgprefetchrevs)
+        revset = b'+'.join(revset)
 
         # update a revset with a date limit
         revset = revdatelimit(ui, revset)
 
-        opts['rev'] = [revset]
+        opts[b'rev'] = [revset]
 
-    if not opts.get('base'):
-        opts['base'] = None
+    if not opts.get(b'base'):
+        opts[b'base'] = None
 
     return opts
 
-@command('prefetch', [
-    ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')),
-    ('', 'repack', False, _('run repack after prefetch')),
-    ('b', 'base', '', _("rev that is assumed to already be local")),
-    ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]'))
+
+@command(
+    b'prefetch',
+    [
+        (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')),
+        (b'', b'repack', False, _(b'run repack after prefetch')),
+        (b'b', b'base', b'', _(b"rev that is assumed to already be local")),
+    ]
+    + commands.walkopts,
+    _(b'hg prefetch [OPTIONS] [FILE...]'),
+    helpcategory=command.CATEGORY_MAINTENANCE,
+)
 def prefetch(ui, repo, *pats, **opts):
     """prefetch file revisions from the server
 
@@ -1079,28 +1248,48 @@
     """
     opts = pycompat.byteskwargs(opts)
     if not isenabled(repo):
-        raise error.Abort(_("repo is not shallow"))
+        raise error.Abort(_(b"repo is not shallow"))
 
     opts = resolveprefetchopts(ui, opts)
-    revs = scmutil.revrange(repo, opts.get('rev'))
-    repo.prefetch(revs, opts.get('base'), pats, opts)
+    revs = scmutil.revrange(repo, opts.get(b'rev'))
+    repo.prefetch(revs, opts.get(b'base'), pats, opts)
+
+    ensurestart = repo.ui.configbool(b'devel', b'remotefilelog.ensurestart')
 
     # Run repack in background
-    if opts.get('repack'):
-        repackmod.backgroundrepack(repo, incremental=True)
+    if opts.get(b'repack'):
+        repackmod.backgroundrepack(
+            repo, incremental=True, ensurestart=ensurestart
+        )
+
 
-@command('repack', [
-     ('', 'background', None, _('run in a background process'), None),
-     ('', 'incremental', None, _('do an incremental repack'), None),
-     ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None),
-    ], _('hg repack [OPTIONS]'))
+@command(
+    b'repack',
+    [
+        (b'', b'background', None, _(b'run in a background process'), None),
+        (b'', b'incremental', None, _(b'do an incremental repack'), None),
+        (
+            b'',
+            b'packsonly',
+            None,
+            _(b'only repack packs (skip loose objects)'),
+            None,
+        ),
+    ],
+    _(b'hg repack [OPTIONS]'),
+)
 def repack_(ui, repo, *pats, **opts):
     if opts.get(r'background'):
-        repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'),
-                                   packsonly=opts.get(r'packsonly', False))
+        ensurestart = repo.ui.configbool(b'devel', b'remotefilelog.ensurestart')
+        repackmod.backgroundrepack(
+            repo,
+            incremental=opts.get(r'incremental'),
+            packsonly=opts.get(r'packsonly', False),
+            ensurestart=ensurestart,
+        )
         return
 
-    options = {'packsonly': opts.get(r'packsonly')}
+    options = {b'packsonly': opts.get(r'packsonly')}
 
     try:
         if opts.get(r'incremental'):
@@ -1110,4 +1299,4 @@
     except repackmod.RepackAlreadyRunning as ex:
         # Don't propogate the exception if the repack is already in
         # progress, since we want the command to exit 0.
-        repo.ui.warn('%s\n' % ex)
+        repo.ui.warn(b'%s\n' % ex)
--- a/hgext/remotefilelog/basepack.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/basepack.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,6 +9,10 @@
 import time
 
 from mercurial.i18n import _
+from mercurial.pycompat import (
+    getattr,
+    open,
+)
 from mercurial import (
     node as nodemod,
     policy,
@@ -45,7 +49,7 @@
 # bisect) with (8 step fanout scan + 1 step bisect)
 # 5 step bisect = log(2^16 / 8 / 255)  # fanout
 # 10 step fanout scan = 2^16 / (2^16 / 8)  # fanout space divided by entries
-SMALLFANOUTCUTOFF = 2**16 // 8
+SMALLFANOUTCUTOFF = 2 ** 16 // 8
 
 # The amount of time to wait between checking for new packs. This prevents an
 # exception when data is moved to a new pack after the process has already
@@ -56,9 +60,10 @@
     # With glibc 2.7+ the 'e' flag uses O_CLOEXEC when opening.
     # The 'e' flag will be ignored on older versions of glibc.
     # Python 3 can't handle the 'e' flag.
-    PACKOPENMODE = 'rbe'
+    PACKOPENMODE = b'rbe'
 else:
-    PACKOPENMODE = 'rb'
+    PACKOPENMODE = b'rb'
+
 
 class _cachebackedpacks(object):
     def __init__(self, packs, cachesize):
@@ -105,6 +110,7 @@
         # Data not found in any pack.
         self._lastpack = None
 
+
 class basepackstore(object):
     # Default cache size limit for the pack files.
     DEFAULTCACHESIZE = 100
@@ -130,7 +136,7 @@
                 # Someone could have removed the file since we retrieved the
                 # list of paths.
                 if getattr(ex, 'errno', None) != errno.ENOENT:
-                    ui.warn(_('unable to load pack %s: %s\n') % (filepath, ex))
+                    ui.warn(_(b'unable to load pack %s: %s\n') % (filepath, ex))
                 continue
             packs.append(pack)
 
@@ -161,11 +167,14 @@
                 # (the index file and the pack file), we can yield once we see
                 # it twice.
                 if id:
-                    sizes[id] += stat.st_size # Sum both files' sizes together
+                    sizes[id] += stat.st_size  # Sum both files' sizes together
                     mtimes[id].append(stat.st_mtime)
                     if id in ids:
-                        yield (os.path.join(self.path, id), max(mtimes[id]),
-                            sizes[id])
+                        yield (
+                            os.path.join(self.path, id),
+                            max(mtimes[id]),
+                            sizes[id],
+                        )
                     else:
                         ids.add(id)
         except OSError as ex:
@@ -205,8 +214,8 @@
         """Returns metrics on the state of this store."""
         size, count = self.gettotalsizeandcount()
         return {
-            'numpacks': count,
-            'totalpacksize': size,
+            b'numpacks': count,
+            b'totalpacksize': size,
         }
 
     def getpack(self, path):
@@ -259,6 +268,7 @@
 
         return newpacks
 
+
 class versionmixin(object):
     # Mix-in for classes with multiple supported versions
     VERSION = None
@@ -270,14 +280,15 @@
                 # only affect this instance
                 self.VERSION = version
             elif self.VERSION != version:
-                raise RuntimeError('inconsistent version: %d' % version)
+                raise RuntimeError(b'inconsistent version: %d' % version)
         else:
-            raise RuntimeError('unsupported version: %d' % version)
+            raise RuntimeError(b'unsupported version: %d' % version)
+
 
 class basepack(versionmixin):
     # The maximum amount we should read via mmap before remmaping so the old
     # pages can be released (100MB)
-    MAXPAGEDIN = 100 * 1024**2
+    MAXPAGEDIN = 100 * 1024 ** 2
 
     SUPPORTED_VERSIONS = [2]
 
@@ -291,12 +302,12 @@
 
         self._index = None
         self._data = None
-        self.freememory() # initialize the mmap
+        self.freememory()  # initialize the mmap
 
-        version = struct.unpack('!B', self._data[:PACKVERSIONSIZE])[0]
+        version = struct.unpack(b'!B', self._data[:PACKVERSIONSIZE])[0]
         self._checkversion(version)
 
-        version, config = struct.unpack('!BB', self._index[:INDEXVERSIONSIZE])
+        version, config = struct.unpack(b'!BB', self._index[:INDEXVERSIONSIZE])
         self._checkversion(version)
 
         if 0b10000000 & config:
@@ -307,18 +318,19 @@
     @util.propertycache
     def _fanouttable(self):
         params = self.params
-        rawfanout = self._index[FANOUTSTART:FANOUTSTART + params.fanoutsize]
+        rawfanout = self._index[FANOUTSTART : FANOUTSTART + params.fanoutsize]
         fanouttable = []
         for i in pycompat.xrange(0, params.fanoutcount):
             loc = i * 4
-            fanoutentry = struct.unpack('!I', rawfanout[loc:loc + 4])[0]
+            fanoutentry = struct.unpack(b'!I', rawfanout[loc : loc + 4])[0]
             fanouttable.append(fanoutentry)
         return fanouttable
 
     @util.propertycache
     def _indexend(self):
-        nodecount = struct.unpack_from('!Q', self._index,
-                                       self.params.indexstart - 8)[0]
+        nodecount = struct.unpack_from(
+            b'!Q', self._index, self.params.indexstart - 8
+        )[0]
         return self.params.indexstart + nodecount * self.INDEXENTRYLENGTH
 
     def freememory(self):
@@ -335,8 +347,9 @@
         # TODO: use an opener/vfs to access these paths
         with open(self.indexpath, PACKOPENMODE) as indexfp:
             # memory-map the file, size 0 means whole file
-            self._index = mmap.mmap(indexfp.fileno(), 0,
-                                    access=mmap.ACCESS_READ)
+            self._index = mmap.mmap(
+                indexfp.fileno(), 0, access=mmap.ACCESS_READ
+            )
         with open(self.packpath, PACKOPENMODE) as datafp:
             self._data = mmap.mmap(datafp.fileno(), 0, access=mmap.ACCESS_READ)
 
@@ -358,12 +371,12 @@
     def iterentries(self):
         raise NotImplementedError()
 
+
 class mutablebasepack(versionmixin):
-
     def __init__(self, ui, packdir, version=2):
         self._checkversion(version)
         # TODO(augie): make this configurable
-        self._compressor = 'GZ'
+        self._compressor = b'GZ'
         opener = vfsmod.vfs(packdir)
         opener.createmode = 0o444
         self.opener = opener
@@ -372,9 +385,11 @@
 
         shallowutil.mkstickygroupdir(ui, packdir)
         self.packfp, self.packpath = opener.mkstemp(
-            suffix=self.PACKSUFFIX + '-tmp')
+            suffix=self.PACKSUFFIX + b'-tmp'
+        )
         self.idxfp, self.idxpath = opener.mkstemp(
-            suffix=self.INDEXSUFFIX + '-tmp')
+            suffix=self.INDEXSUFFIX + b'-tmp'
+        )
         self.packfp = os.fdopen(self.packfp, r'wb+')
         self.idxfp = os.fdopen(self.idxfp, r'wb+')
         self.sha = hashlib.sha1()
@@ -389,7 +404,7 @@
         # Write header
         # TODO: make it extensible (ex: allow specifying compression algorithm,
         # a flexible key/value header, delta algorithm, fanout size, etc)
-        versionbuf = struct.pack('!B', self.VERSION) # unsigned 1 byte int
+        versionbuf = struct.pack(b'!B', self.VERSION)  # unsigned 1 byte int
         self.writeraw(versionbuf)
 
     def __enter__(self):
@@ -474,19 +489,20 @@
             count += 1
 
             # Must use [0] on the unpack result since it's always a tuple.
-            fanoutkey = struct.unpack(params.fanoutstruct,
-                                      node[:params.fanoutprefix])[0]
+            fanoutkey = struct.unpack(
+                params.fanoutstruct, node[: params.fanoutprefix]
+            )[0]
             if fanouttable[fanoutkey] == EMPTYFANOUT:
                 fanouttable[fanoutkey] = location
 
-        rawfanouttable = ''
+        rawfanouttable = b''
         last = 0
         for offset in fanouttable:
             offset = offset if offset != EMPTYFANOUT else last
             last = offset
-            rawfanouttable += struct.pack('!I', offset)
+            rawfanouttable += struct.pack(b'!I', offset)
 
-        rawentrieslength = struct.pack('!Q', len(self.entries))
+        rawentrieslength = struct.pack(b'!Q', len(self.entries))
 
         # The index offset is the it's location in the file. So after the 2 byte
         # header and the fanouttable.
@@ -509,11 +525,17 @@
         config = 0
         if indexparams.fanoutprefix == LARGEFANOUTPREFIX:
             config = 0b10000000
-        self.idxfp.write(struct.pack('!BB', self.VERSION, config))
+        self.idxfp.write(struct.pack(b'!BB', self.VERSION, config))
+
 
 class indexparams(object):
-    __slots__ = (r'fanoutprefix', r'fanoutstruct', r'fanoutcount',
-                 r'fanoutsize', r'indexstart')
+    __slots__ = (
+        r'fanoutprefix',
+        r'fanoutstruct',
+        r'fanoutcount',
+        r'fanoutsize',
+        r'indexstart',
+    )
 
     def __init__(self, prefixsize, version):
         self.fanoutprefix = prefixsize
@@ -522,14 +544,14 @@
         # converts the node prefix into an integer location in the fanout
         # table).
         if prefixsize == SMALLFANOUTPREFIX:
-            self.fanoutstruct = '!B'
+            self.fanoutstruct = b'!B'
         elif prefixsize == LARGEFANOUTPREFIX:
-            self.fanoutstruct = '!H'
+            self.fanoutstruct = b'!H'
         else:
-            raise ValueError("invalid fanout prefix size: %s" % prefixsize)
+            raise ValueError(b"invalid fanout prefix size: %s" % prefixsize)
 
         # The number of fanout table entries
-        self.fanoutcount = 2**(prefixsize * 8)
+        self.fanoutcount = 2 ** (prefixsize * 8)
 
         # The total bytes used by the fanout table
         self.fanoutsize = self.fanoutcount * 4
--- a/hgext/remotefilelog/basestore.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/basestore.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,6 +9,7 @@
 
 from mercurial.i18n import _
 from mercurial.node import bin, hex
+from mercurial.pycompat import open
 from mercurial import (
     error,
     pycompat,
@@ -19,6 +20,7 @@
     shallowutil,
 )
 
+
 class basestore(object):
     def __init__(self, repo, path, reponame, shared=False):
         """Creates a remotefilelog store object for the given repo name.
@@ -37,13 +39,15 @@
         self._shared = shared
         self._uid = os.getuid() if not pycompat.iswindows else None
 
-        self._validatecachelog = self.ui.config("remotefilelog",
-                                                "validatecachelog")
-        self._validatecache = self.ui.config("remotefilelog", "validatecache",
-                                             'on')
-        if self._validatecache not in ('on', 'strict', 'off'):
-            self._validatecache = 'on'
-        if self._validatecache == 'off':
+        self._validatecachelog = self.ui.config(
+            b"remotefilelog", b"validatecachelog"
+        )
+        self._validatecache = self.ui.config(
+            b"remotefilelog", b"validatecache", b'on'
+        )
+        if self._validatecache not in (b'on', b'strict', b'off'):
+            self._validatecache = b'on'
+        if self._validatecache == b'off':
             self._validatecache = False
 
         if shared:
@@ -54,8 +58,11 @@
         for name, node in keys:
             filepath = self._getfilepath(name, node)
             exists = os.path.exists(filepath)
-            if (exists and self._validatecache == 'strict' and
-                not self._validatekey(filepath, 'contains')):
+            if (
+                exists
+                and self._validatecache == b'strict'
+                and not self._validatekey(filepath, b'contains')
+            ):
                 exists = False
             if not exists:
                 missing.append((name, node))
@@ -77,8 +84,9 @@
         ui = self.ui
         entries = ledger.sources.get(self, [])
         count = 0
-        progress = ui.makeprogress(_("cleaning up"), unit="files",
-                                   total=len(entries))
+        progress = ui.makeprogress(
+            _(b"cleaning up"), unit=b"files", total=len(entries)
+        )
         for entry in entries:
             if entry.gced or (entry.datarepacked and entry.historyrepacked):
                 progress.update(count)
@@ -114,7 +122,7 @@
                     pass
 
             elif stat.S_ISREG(mode):
-                if name.endswith('_old'):
+                if name.endswith(b'_old'):
                     oldfiles.add(name[:-4])
                 else:
                     otherfiles.add(name)
@@ -123,7 +131,7 @@
         # corresponding file without the suffix '_old'. See addremotefilelognode
         # method for the generation/purpose of files with '_old' suffix.
         for filename in oldfiles - otherfiles:
-            filepath = os.path.join(rootdir, filename + '_old')
+            filepath = os.path.join(rootdir, filename + b'_old')
             util.tryunlink(filepath)
 
     def _getfiles(self):
@@ -140,7 +148,7 @@
 
         filenamemap = self._resolvefilenames(existing.keys())
 
-        for filename, sha in filenamemap.iteritems():
+        for filename, sha in pycompat.iteritems(filenamemap):
             yield (filename, existing[sha])
 
     def _resolvefilenames(self, hashes):
@@ -157,7 +165,7 @@
         missingfilename = set(hashes)
 
         # Start with a full manifest, since it'll cover the majority of files
-        for filename in self.repo['tip'].manifest():
+        for filename in self.repo[b'tip'].manifest():
             sha = hashlib.sha1(filename).digest()
             if sha in missingfilename:
                 filenames[filename] = sha
@@ -178,8 +186,11 @@
         return filenames
 
     def _getrepocachepath(self):
-        return os.path.join(
-            self._path, self._reponame) if self._shared else self._path
+        return (
+            os.path.join(self._path, self._reponame)
+            if self._shared
+            else self._path
+        )
 
     def _listkeys(self):
         """List all the remotefilelog keys that exist in the store.
@@ -214,13 +225,14 @@
             data = shallowutil.readfile(filepath)
             if self._validatecache and not self._validatedata(data, filepath):
                 if self._validatecachelog:
-                    with open(self._validatecachelog, 'a+') as f:
-                        f.write("corrupt %s during read\n" % filepath)
-                os.rename(filepath, filepath + ".corrupt")
-                raise KeyError("corrupt local cache file %s" % filepath)
+                    with open(self._validatecachelog, b'a+') as f:
+                        f.write(b"corrupt %s during read\n" % filepath)
+                os.rename(filepath, filepath + b".corrupt")
+                raise KeyError(b"corrupt local cache file %s" % filepath)
         except IOError:
-            raise KeyError("no file found at %s for %s:%s" % (filepath, name,
-                                                              hex(node)))
+            raise KeyError(
+                b"no file found at %s for %s:%s" % (filepath, name, hex(node))
+            )
 
         return data
 
@@ -232,7 +244,7 @@
             # if this node already exists, save the old version for
             # recovery/debugging purposes.
             if os.path.exists(filepath):
-                newfilename = filepath + '_old'
+                newfilename = filepath + b'_old'
                 # newfilename can be read-only and shutil.copy will fail.
                 # Delete newfilename to avoid it
                 if os.path.exists(newfilename):
@@ -243,9 +255,10 @@
             shallowutil.writefile(filepath, data, readonly=True)
 
             if self._validatecache:
-                if not self._validatekey(filepath, 'write'):
-                    raise error.Abort(_("local cache write was corrupted %s") %
-                                      filepath)
+                if not self._validatekey(filepath, b'write'):
+                    raise error.Abort(
+                        _(b"local cache write was corrupted %s") % filepath
+                    )
         finally:
             os.umask(oldumask)
 
@@ -255,26 +268,26 @@
         collection, since it allows us to insecpt the repos to see what nodes
         they want to be kept alive in the store.
         """
-        repospath = os.path.join(self._path, "repos")
-        with open(repospath, 'ab') as reposfile:
-            reposfile.write(os.path.dirname(path) + "\n")
+        repospath = os.path.join(self._path, b"repos")
+        with open(repospath, b'ab') as reposfile:
+            reposfile.write(os.path.dirname(path) + b"\n")
 
         repospathstat = os.stat(repospath)
         if repospathstat.st_uid == self._uid:
             os.chmod(repospath, 0o0664)
 
     def _validatekey(self, path, action):
-        with open(path, 'rb') as f:
+        with open(path, b'rb') as f:
             data = f.read()
 
         if self._validatedata(data, path):
             return True
 
         if self._validatecachelog:
-            with open(self._validatecachelog, 'ab+') as f:
-                f.write("corrupt %s during %s\n" % (path, action))
+            with open(self._validatecachelog, b'ab+') as f:
+                f.write(b"corrupt %s during %s\n" % (path, action))
 
-        os.rename(path, path + ".corrupt")
+        os.rename(path, path + b".corrupt")
         return False
 
     def _validatedata(self, data, path):
@@ -288,7 +301,7 @@
 
                 # extract the node from the metadata
                 offset += size
-                datanode = data[offset:offset + 20]
+                datanode = data[offset : offset + 20]
 
                 # and compare against the path
                 if os.path.basename(path) == hex(datanode):
@@ -314,16 +327,17 @@
         # keep files newer than a day even if they aren't needed
         limit = time.time() - (60 * 60 * 24)
 
-        progress = ui.makeprogress(_("removing unnecessary files"),
-                                   unit="files")
+        progress = ui.makeprogress(
+            _(b"removing unnecessary files"), unit=b"files"
+        )
         progress.update(0)
         for root, dirs, files in os.walk(cachepath):
             for file in files:
-                if file == 'repos':
+                if file == b'repos':
                     continue
 
                 # Don't delete pack files
-                if '/packs/' in root:
+                if b'/packs/' in root:
                     continue
 
                 progress.update(count)
@@ -336,7 +350,9 @@
                     # errno.ENOENT = no such file or directory
                     if e.errno != errno.ENOENT:
                         raise
-                    msg = _("warning: file %s was removed by another process\n")
+                    msg = _(
+                        b"warning: file %s was removed by another process\n"
+                    )
                     ui.warn(msg % path)
                     continue
 
@@ -352,19 +368,22 @@
                         # errno.ENOENT = no such file or directory
                         if e.errno != errno.ENOENT:
                             raise
-                        msg = _("warning: file %s was removed by another "
-                                "process\n")
+                        msg = _(
+                            b"warning: file %s was removed by another "
+                            b"process\n"
+                        )
                         ui.warn(msg % path)
                         continue
                     removed += 1
         progress.complete()
 
         # remove oldest files until under limit
-        limit = ui.configbytes("remotefilelog", "cachelimit")
+        limit = ui.configbytes(b"remotefilelog", b"cachelimit")
         if size > limit:
             excess = size - limit
-            progress = ui.makeprogress(_("enforcing cache limit"), unit="bytes",
-                                       total=excess)
+            progress = ui.makeprogress(
+                _(b"enforcing cache limit"), unit=b"bytes", total=excess
+            )
             removedexcess = 0
             while queue and size > limit and size > 0:
                 progress.update(removedexcess)
@@ -375,17 +394,25 @@
                     # errno.ENOENT = no such file or directory
                     if e.errno != errno.ENOENT:
                         raise
-                    msg = _("warning: file %s was removed by another process\n")
+                    msg = _(
+                        b"warning: file %s was removed by another process\n"
+                    )
                     ui.warn(msg % oldpath)
                 size -= oldpathstat.st_size
                 removed += 1
                 removedexcess += oldpathstat.st_size
             progress.complete()
 
-        ui.status(_("finished: removed %d of %d files (%0.2f GB to %0.2f GB)\n")
-                  % (removed, count,
-                     float(originalsize) / 1024.0 / 1024.0 / 1024.0,
-                     float(size) / 1024.0 / 1024.0 / 1024.0))
+        ui.status(
+            _(b"finished: removed %d of %d files (%0.2f GB to %0.2f GB)\n")
+            % (
+                removed,
+                count,
+                float(originalsize) / 1024.0 / 1024.0 / 1024.0,
+                float(size) / 1024.0 / 1024.0 / 1024.0,
+            )
+        )
+
 
 class baseunionstore(object):
     def __init__(self, *args, **kwargs):
@@ -400,20 +427,21 @@
 
     def markforrefresh(self):
         for store in self.stores:
-            if util.safehasattr(store, 'markforrefresh'):
+            if util.safehasattr(store, b'markforrefresh'):
                 store.markforrefresh()
 
     @staticmethod
     def retriable(fn):
         def noop(*args):
             pass
+
         def wrapped(self, *args, **kwargs):
             retrylog = self.retrylog or noop
             funcname = fn.__name__
             i = 0
             while i < self.numattempts:
                 if i > 0:
-                    retrylog('re-attempting (n=%d) %s\n' % (i, funcname))
+                    retrylog(b're-attempting (n=%d) %s\n' % (i, funcname))
                     self.markforrefresh()
                 i += 1
                 try:
@@ -421,7 +449,10 @@
                 except KeyError:
                     if i == self.numattempts:
                         # retries exhausted
-                        retrylog('retries exhausted in %s, raising KeyError\n' %
-                                 pycompat.sysbytes(funcname))
+                        retrylog(
+                            b'retries exhausted in %s, raising KeyError\n'
+                            % pycompat.sysbytes(funcname)
+                        )
                         raise
+
         return wrapped
--- a/hgext/remotefilelog/connectionpool.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/connectionpool.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,12 +10,14 @@
 from mercurial import (
     extensions,
     hg,
+    pycompat,
     sshpeer,
     util,
 )
 
 _sshv1peer = sshpeer.sshv1peer
 
+
 class connectionpool(object):
     def __init__(self, repo):
         self._repo = repo
@@ -40,6 +42,7 @@
                 pass
 
         if conn is None:
+
             def _cleanup(orig):
                 # close pipee first so peer.cleanup reading it won't deadlock,
                 # if there are other processes with pipeo open (i.e. us).
@@ -50,18 +53,19 @@
 
             peer = hg.peer(self._repo.ui, {}, path)
             if util.safehasattr(peer, 'cleanup'):
-                extensions.wrapfunction(peer, 'cleanup', _cleanup)
+                extensions.wrapfunction(peer, b'cleanup', _cleanup)
 
             conn = connection(pathpool, peer)
 
         return conn
 
     def close(self):
-        for pathpool in self._pool.itervalues():
+        for pathpool in pycompat.itervalues(self._pool):
             for conn in pathpool:
                 conn.close()
             del pathpool[:]
 
+
 class connection(object):
     def __init__(self, pool, peer):
         self._pool = pool
--- a/hgext/remotefilelog/constants.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/constants.py	Mon Oct 21 11:09:48 2019 -0400
@@ -4,38 +4,40 @@
 
 from mercurial.i18n import _
 
-NETWORK_CAP_LEGACY_SSH_GETFILES = 'exp-remotefilelog-ssh-getfiles-1'
+NETWORK_CAP_LEGACY_SSH_GETFILES = b'exp-remotefilelog-ssh-getfiles-1'
 
-SHALLOWREPO_REQUIREMENT = "exp-remotefilelog-repo-req-1"
+SHALLOWREPO_REQUIREMENT = b"exp-remotefilelog-repo-req-1"
 
-BUNDLE2_CAPABLITY = "exp-remotefilelog-b2cap-1"
+BUNDLE2_CAPABLITY = b"exp-remotefilelog-b2cap-1"
 
-FILENAMESTRUCT = '!H'
+FILENAMESTRUCT = b'!H'
 FILENAMESIZE = struct.calcsize(FILENAMESTRUCT)
 
 NODESIZE = 20
-PACKREQUESTCOUNTSTRUCT = '!I'
+PACKREQUESTCOUNTSTRUCT = b'!I'
 
-NODECOUNTSTRUCT = '!I'
+NODECOUNTSTRUCT = b'!I'
 NODECOUNTSIZE = struct.calcsize(NODECOUNTSTRUCT)
 
-PATHCOUNTSTRUCT = '!I'
+PATHCOUNTSTRUCT = b'!I'
 PATHCOUNTSIZE = struct.calcsize(PATHCOUNTSTRUCT)
 
-FILEPACK_CATEGORY=""
-TREEPACK_CATEGORY="manifests"
+FILEPACK_CATEGORY = b""
+TREEPACK_CATEGORY = b"manifests"
 
 ALL_CATEGORIES = [FILEPACK_CATEGORY, TREEPACK_CATEGORY]
 
 # revision metadata keys. must be a single character.
-METAKEYFLAG = 'f'  # revlog flag
-METAKEYSIZE = 's'  # full rawtext size
+METAKEYFLAG = b'f'  # revlog flag
+METAKEYSIZE = b's'  # full rawtext size
+
 
 def getunits(category):
     if category == FILEPACK_CATEGORY:
-        return _("files")
+        return _(b"files")
     if category == TREEPACK_CATEGORY:
-        return _("trees")
+        return _(b"trees")
+
 
 # Repack options passed to ``markledger``.
-OPTION_PACKSONLY = 'packsonly'
+OPTION_PACKSONLY = b'packsonly'
--- a/hgext/remotefilelog/contentstore.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/contentstore.py	Mon Oct 21 11:09:48 2019 -0400
@@ -3,6 +3,7 @@
 import threading
 
 from mercurial.node import hex, nullid
+from mercurial.pycompat import getattr
 from mercurial import (
     mdiff,
     pycompat,
@@ -14,9 +15,11 @@
     shallowutil,
 )
 
+
 class ChainIndicies(object):
     """A static class for easy reference to the delta chain indicies.
     """
+
     # The filename of this revision delta
     NAME = 0
     # The mercurial file node for this revision delta
@@ -31,6 +34,7 @@
     # The actual delta or full text data.
     DATA = 4
 
+
 class unioncontentstore(basestore.baseunionstore):
     def __init__(self, *args, **kwargs):
         super(unioncontentstore, self).__init__(*args, **kwargs)
@@ -132,8 +136,9 @@
         raise KeyError((name, hex(node)))
 
     def add(self, name, node, data):
-        raise RuntimeError("cannot add content only to remotefilelog "
-                           "contentstore")
+        raise RuntimeError(
+            b"cannot add content only to remotefilelog contentstore"
+        )
 
     def getmissing(self, keys):
         missing = keys
@@ -146,12 +151,13 @@
         if self.writestore:
             self.writestore.addremotefilelognode(name, node, data)
         else:
-            raise RuntimeError("no writable store configured")
+            raise RuntimeError(b"no writable store configured")
 
     def markledger(self, ledger, options=None):
         for store in self.stores:
             store.markledger(ledger, options)
 
+
 class remotefilelogcontentstore(basestore.basestore):
     def __init__(self, *args, **kwargs):
         super(remotefilelogcontentstore, self).__init__(*args, **kwargs)
@@ -162,7 +168,7 @@
         data = self._getdata(name, node)
 
         offset, size, flags = shallowutil.parsesizeflags(data)
-        content = data[offset:offset + size]
+        content = data[offset : offset + size]
 
         ancestormap = shallowutil.ancestormap(data)
         p1, p2, linknode, copyfrom = ancestormap[node]
@@ -202,22 +208,23 @@
         return self._threaddata.metacache[1]
 
     def add(self, name, node, data):
-        raise RuntimeError("cannot add content only to remotefilelog "
-                           "contentstore")
+        raise RuntimeError(
+            b"cannot add content only to remotefilelog contentstore"
+        )
 
     def _sanitizemetacache(self):
         metacache = getattr(self._threaddata, 'metacache', None)
         if metacache is None:
-            self._threaddata.metacache = (None, None) # (node, meta)
+            self._threaddata.metacache = (None, None)  # (node, meta)
 
     def _updatemetacache(self, node, size, flags):
         self._sanitizemetacache()
         if node == self._threaddata.metacache[0]:
             return
-        meta = {constants.METAKEYFLAG: flags,
-                constants.METAKEYSIZE: size}
+        meta = {constants.METAKEYFLAG: flags, constants.METAKEYSIZE: size}
         self._threaddata.metacache = (node, meta)
 
+
 class remotecontentstore(object):
     def __init__(self, ui, fileservice, shared):
         self._fileservice = fileservice
@@ -225,8 +232,9 @@
         self._shared = shared
 
     def get(self, name, node):
-        self._fileservice.prefetch([(name, hex(node))], force=True,
-                                   fetchdata=True)
+        self._fileservice.prefetch(
+            [(name, hex(node))], force=True, fetchdata=True
+        )
         return self._shared.get(name, node)
 
     def getdelta(self, name, node):
@@ -242,12 +250,13 @@
         return [(name, node, None, nullid, revision)]
 
     def getmeta(self, name, node):
-        self._fileservice.prefetch([(name, hex(node))], force=True,
-                                   fetchdata=True)
+        self._fileservice.prefetch(
+            [(name, hex(node))], force=True, fetchdata=True
+        )
         return self._shared.getmeta(name, node)
 
     def add(self, name, node, data):
-        raise RuntimeError("cannot add to a remote store")
+        raise RuntimeError(b"cannot add to a remote store")
 
     def getmissing(self, keys):
         return keys
@@ -255,16 +264,17 @@
     def markledger(self, ledger, options=None):
         pass
 
+
 class manifestrevlogstore(object):
     def __init__(self, repo):
         self._store = repo.store
         self._svfs = repo.svfs
         self._revlogs = dict()
-        self._cl = revlog.revlog(self._svfs, '00changelog.i')
+        self._cl = revlog.revlog(self._svfs, b'00changelog.i')
         self._repackstartlinkrev = 0
 
     def get(self, name, node):
-        return self._revlog(name).revision(node, raw=True)
+        return self._revlog(name).rawdata(node)
 
     def getdelta(self, name, node):
         revision = self.get(name, node)
@@ -277,8 +287,10 @@
     def getmeta(self, name, node):
         rl = self._revlog(name)
         rev = rl.rev(node)
-        return {constants.METAKEYFLAG: rl.flags(rev),
-                constants.METAKEYSIZE: rl.rawsize(rev)}
+        return {
+            constants.METAKEYFLAG: rl.flags(rev),
+            constants.METAKEYSIZE: rl.rawsize(rev),
+        }
 
     def getancestors(self, name, node, known=None):
         if known is None:
@@ -300,7 +312,7 @@
                 missing.add(p2)
 
             linknode = self._cl.node(rl.linkrev(ancrev))
-            ancestors[rl.node(ancrev)] = (p1, p2, linknode, '')
+            ancestors[rl.node(ancrev)] = (p1, p2, linknode, b'')
             if not missing:
                 break
         return ancestors
@@ -313,14 +325,14 @@
         return (parents[0], parents[1], cl.node(linkrev), None)
 
     def add(self, *args):
-        raise RuntimeError("cannot add to a revlog store")
+        raise RuntimeError(b"cannot add to a revlog store")
 
     def _revlog(self, name):
         rl = self._revlogs.get(name)
         if rl is None:
-            revlogname = '00manifesttree.i'
-            if name != '':
-                revlogname = 'meta/%s/00manifest.i' % name
+            revlogname = b'00manifesttree.i'
+            if name != b'':
+                revlogname = b'meta/%s/00manifest.i' % name
             rl = revlog.revlog(self._svfs, revlogname)
             self._revlogs[name] = rl
         return rl
@@ -341,8 +353,8 @@
     def markledger(self, ledger, options=None):
         if options and options.get(constants.OPTION_PACKSONLY):
             return
-        treename = ''
-        rl = revlog.revlog(self._svfs, '00manifesttree.i')
+        treename = b''
+        rl = revlog.revlog(self._svfs, b'00manifesttree.i')
         startlinkrev = self._repackstartlinkrev
         endlinkrev = self._repackendlinkrev
         for rev in pycompat.xrange(len(rl) - 1, -1, -1):
@@ -356,10 +368,10 @@
             ledger.markhistoryentry(self, treename, node)
 
         for path, encoded, size in self._store.datafiles():
-            if path[:5] != 'meta/' or path[-2:] != '.i':
+            if path[:5] != b'meta/' or path[-2:] != b'.i':
                 continue
 
-            treename = path[5:-len('/00manifest.i')]
+            treename = path[5 : -len(b'/00manifest.i')]
 
             rl = revlog.revlog(self._svfs, path)
             for rev in pycompat.xrange(len(rl) - 1, -1, -1):
--- a/hgext/remotefilelog/datapack.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/datapack.py	Mon Oct 21 11:09:48 2019 -0400
@@ -21,8 +21,9 @@
 FULLTEXTINDEXMARK = -1
 NOBASEINDEXMARK = -2
 
-INDEXSUFFIX = '.dataidx'
-PACKSUFFIX = '.datapack'
+INDEXSUFFIX = b'.dataidx'
+PACKSUFFIX = b'.datapack'
+
 
 class datapackstore(basepack.basepackstore):
     INDEXSUFFIX = INDEXSUFFIX
@@ -35,7 +36,7 @@
         return datapack(path)
 
     def get(self, name, node):
-        raise RuntimeError("must use getdeltachain with datapackstore")
+        raise RuntimeError(b"must use getdeltachain with datapackstore")
 
     def getmeta(self, name, node):
         for pack in self.packs:
@@ -83,7 +84,8 @@
         raise KeyError((name, hex(node)))
 
     def add(self, name, node, data):
-        raise RuntimeError("cannot add to datapackstore")
+        raise RuntimeError(b"cannot add to datapackstore")
+
 
 class datapack(basepack.basepack):
     INDEXSUFFIX = INDEXSUFFIX
@@ -91,7 +93,7 @@
 
     # Format is <node><delta offset><pack data offset><pack data size>
     # See the mutabledatapack doccomment for more details.
-    INDEXFORMAT = '!20siQQ'
+    INDEXFORMAT = b'!20siQQ'
     INDEXENTRYLENGTH = 40
 
     SUPPORTED_VERSIONS = [2]
@@ -106,8 +108,9 @@
         return missing
 
     def get(self, name, node):
-        raise RuntimeError("must use getdeltachain with datapack (%s:%s)"
-                           % (name, hex(node)))
+        raise RuntimeError(
+            b"must use getdeltachain with datapack (%s:%s)" % (name, hex(node))
+        )
 
     def getmeta(self, name, node):
         value = self._find(node)
@@ -115,18 +118,18 @@
             raise KeyError((name, hex(node)))
 
         node, deltabaseoffset, offset, size = value
-        rawentry = self._data[offset:offset + size]
+        rawentry = self._data[offset : offset + size]
 
         # see docstring of mutabledatapack for the format
         offset = 0
-        offset += struct.unpack_from('!H', rawentry, offset)[0] + 2 # filename
-        offset += 40 # node, deltabase node
-        offset += struct.unpack_from('!Q', rawentry, offset)[0] + 8 # delta
+        offset += struct.unpack_from(b'!H', rawentry, offset)[0] + 2  # filename
+        offset += 40  # node, deltabase node
+        offset += struct.unpack_from(b'!Q', rawentry, offset)[0] + 8  # delta
 
-        metalen = struct.unpack_from('!I', rawentry, offset)[0]
+        metalen = struct.unpack_from(b'!I', rawentry, offset)[0]
         offset += 4
 
-        meta = shallowutil.parsepackmeta(rawentry[offset:offset + metalen])
+        meta = shallowutil.parsepackmeta(rawentry[offset : offset + metalen])
 
         return meta
 
@@ -155,11 +158,14 @@
         chain = [value]
         deltabaseoffset = value[1]
         entrylen = self.INDEXENTRYLENGTH
-        while (deltabaseoffset != FULLTEXTINDEXMARK
-               and deltabaseoffset != NOBASEINDEXMARK):
+        while (
+            deltabaseoffset != FULLTEXTINDEXMARK
+            and deltabaseoffset != NOBASEINDEXMARK
+        ):
             loc = params.indexstart + deltabaseoffset
-            value = struct.unpack(self.INDEXFORMAT,
-                                  self._index[loc:loc + entrylen])
+            value = struct.unpack(
+                self.INDEXFORMAT, self._index[loc : loc + entrylen]
+            )
             deltabaseoffset = value[1]
             chain.append(value)
 
@@ -175,33 +181,33 @@
         return deltachain
 
     def _readentry(self, offset, size, getmeta=False):
-        rawentry = self._data[offset:offset + size]
+        rawentry = self._data[offset : offset + size]
         self._pagedin += len(rawentry)
 
         # <2 byte len> + <filename>
         lengthsize = 2
-        filenamelen = struct.unpack('!H', rawentry[:2])[0]
-        filename = rawentry[lengthsize:lengthsize + filenamelen]
+        filenamelen = struct.unpack(b'!H', rawentry[:2])[0]
+        filename = rawentry[lengthsize : lengthsize + filenamelen]
 
         # <20 byte node> + <20 byte deltabase>
         nodestart = lengthsize + filenamelen
         deltabasestart = nodestart + NODELENGTH
         node = rawentry[nodestart:deltabasestart]
-        deltabasenode = rawentry[deltabasestart:deltabasestart + NODELENGTH]
+        deltabasenode = rawentry[deltabasestart : deltabasestart + NODELENGTH]
 
         # <8 byte len> + <delta>
         deltastart = deltabasestart + NODELENGTH
-        rawdeltalen = rawentry[deltastart:deltastart + 8]
-        deltalen = struct.unpack('!Q', rawdeltalen)[0]
+        rawdeltalen = rawentry[deltastart : deltastart + 8]
+        deltalen = struct.unpack(b'!Q', rawdeltalen)[0]
 
-        delta = rawentry[deltastart + 8:deltastart + 8 + deltalen]
+        delta = rawentry[deltastart + 8 : deltastart + 8 + deltalen]
         delta = self._decompress(delta)
 
         if getmeta:
             metastart = deltastart + 8 + deltalen
-            metalen = struct.unpack_from('!I', rawentry, metastart)[0]
+            metalen = struct.unpack_from(b'!I', rawentry, metastart)[0]
 
-            rawmeta = rawentry[metastart + 4:metastart + 4 + metalen]
+            rawmeta = rawentry[metastart + 4 : metastart + 4 + metalen]
             meta = shallowutil.parsepackmeta(rawmeta)
             return filename, node, deltabasenode, delta, meta
         else:
@@ -211,12 +217,13 @@
         return zlib.decompress(data)
 
     def add(self, name, node, data):
-        raise RuntimeError("cannot add to datapack (%s:%s)" % (name, node))
+        raise RuntimeError(b"cannot add to datapack (%s:%s)" % (name, node))
 
     def _find(self, node):
         params = self.params
-        fanoutkey = struct.unpack(params.fanoutstruct,
-                                  node[:params.fanoutprefix])[0]
+        fanoutkey = struct.unpack(
+            params.fanoutstruct, node[: params.fanoutprefix]
+        )[0]
         fanout = self._fanouttable
 
         start = fanout[fanoutkey] + params.indexstart
@@ -233,20 +240,20 @@
 
         # Bisect between start and end to find node
         index = self._index
-        startnode = index[start:start + NODELENGTH]
-        endnode = index[end:end + NODELENGTH]
+        startnode = index[start : start + NODELENGTH]
+        endnode = index[end : end + NODELENGTH]
         entrylen = self.INDEXENTRYLENGTH
         if startnode == node:
-            entry = index[start:start + entrylen]
+            entry = index[start : start + entrylen]
         elif endnode == node:
-            entry = index[end:end + entrylen]
+            entry = index[end : end + entrylen]
         else:
             while start < end - entrylen:
                 mid = start + (end - start) // 2
                 mid = mid - ((mid - params.indexstart) % entrylen)
-                midnode = index[mid:mid + NODELENGTH]
+                midnode = index[mid : mid + NODELENGTH]
                 if midnode == node:
-                    entry = index[mid:mid + entrylen]
+                    entry = index[mid : mid + entrylen]
                     break
                 if node > midnode:
                     start = mid
@@ -264,8 +271,9 @@
     def cleanup(self, ledger):
         entries = ledger.sources.get(self, [])
         allkeys = set(self)
-        repackedkeys = set((e.filename, e.node) for e in entries if
-                           e.datarepacked or e.gced)
+        repackedkeys = set(
+            (e.filename, e.node) for e in entries if e.datarepacked or e.gced
+        )
 
         if len(allkeys - repackedkeys) == 0:
             if self.path not in ledger.created:
@@ -284,31 +292,32 @@
             oldoffset = offset
 
             # <2 byte len> + <filename>
-            filenamelen = struct.unpack('!H', data[offset:offset + 2])[0]
+            filenamelen = struct.unpack(b'!H', data[offset : offset + 2])[0]
             offset += 2
-            filename = data[offset:offset + filenamelen]
+            filename = data[offset : offset + filenamelen]
             offset += filenamelen
 
             # <20 byte node>
-            node = data[offset:offset + constants.NODESIZE]
+            node = data[offset : offset + constants.NODESIZE]
             offset += constants.NODESIZE
             # <20 byte deltabase>
-            deltabase = data[offset:offset + constants.NODESIZE]
+            deltabase = data[offset : offset + constants.NODESIZE]
             offset += constants.NODESIZE
 
             # <8 byte len> + <delta>
-            rawdeltalen = data[offset:offset + 8]
-            deltalen = struct.unpack('!Q', rawdeltalen)[0]
+            rawdeltalen = data[offset : offset + 8]
+            deltalen = struct.unpack(b'!Q', rawdeltalen)[0]
             offset += 8
 
             # TODO(augie): we should store a header that is the
             # uncompressed size.
-            uncompressedlen = len(self._decompress(
-                data[offset:offset + deltalen]))
+            uncompressedlen = len(
+                self._decompress(data[offset : offset + deltalen])
+            )
             offset += deltalen
 
             # <4 byte len> + <metadata-list>
-            metalen = struct.unpack_from('!I', data, offset)[0]
+            metalen = struct.unpack_from(b'!I', data, offset)[0]
             offset += 4 + metalen
 
             yield (filename, node, deltabase, uncompressedlen)
@@ -318,6 +327,7 @@
             if self.freememory():
                 data = self._data
 
+
 class mutabledatapack(basepack.mutablebasepack):
     """A class for constructing and serializing a datapack file and index.
 
@@ -388,6 +398,7 @@
 
     [1]: new in version 1.
     """
+
     INDEXSUFFIX = INDEXSUFFIX
     PACKSUFFIX = PACKSUFFIX
 
@@ -403,10 +414,10 @@
 
     def add(self, name, node, deltabasenode, delta, metadata=None):
         # metadata is a dict, ex. {METAKEYFLAG: flag}
-        if len(name) > 2**16:
-            raise RuntimeError(_("name too long %s") % name)
+        if len(name) > 2 ** 16:
+            raise RuntimeError(_(b"name too long %s") % name)
         if len(node) != 20:
-            raise RuntimeError(_("node should be 20 bytes %s") % node)
+            raise RuntimeError(_(b"node should be 20 bytes %s") % node)
 
         if node in self.entries:
             # The revision has already been added
@@ -415,18 +426,20 @@
         # TODO: allow configurable compression
         delta = self._compress(delta)
 
-        rawdata = ''.join((
-            struct.pack('!H', len(name)), # unsigned 2 byte int
-            name,
-            node,
-            deltabasenode,
-            struct.pack('!Q', len(delta)), # unsigned 8 byte int
-            delta,
-        ))
+        rawdata = b''.join(
+            (
+                struct.pack(b'!H', len(name)),  # unsigned 2 byte int
+                name,
+                node,
+                deltabasenode,
+                struct.pack(b'!Q', len(delta)),  # unsigned 8 byte int
+                delta,
+            )
+        )
 
         # v1 support metadata
         rawmeta = shallowutil.buildpackmeta(metadata)
-        rawdata += struct.pack('!I', len(rawmeta)) # unsigned 4 byte
+        rawdata += struct.pack(b'!I', len(rawmeta))  # unsigned 4 byte
         rawdata += rawmeta
 
         offset = self.packfp.tell()
@@ -438,10 +451,11 @@
         self.writeraw(rawdata)
 
     def createindex(self, nodelocations, indexoffset):
-        entries = sorted((n, db, o, s) for n, (db, o, s)
-                         in self.entries.iteritems())
+        entries = sorted(
+            (n, db, o, s) for n, (db, o, s) in pycompat.iteritems(self.entries)
+        )
 
-        rawindex = ''
+        rawindex = b''
         fmt = self.INDEXFORMAT
         for node, deltabase, offset, size in entries:
             if deltabase == nullid:
@@ -449,8 +463,9 @@
             else:
                 # Instead of storing the deltabase node in the index, let's
                 # store a pointer directly to the index entry for the deltabase.
-                deltabaselocation = nodelocations.get(deltabase,
-                                                      NOBASEINDEXMARK)
+                deltabaselocation = nodelocations.get(
+                    deltabase, NOBASEINDEXMARK
+                )
 
             entry = struct.pack(fmt, node, deltabaselocation, offset, size)
             rawindex += entry
--- a/hgext/remotefilelog/debugcommands.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/debugcommands.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,9 +12,11 @@
 
 from mercurial.node import bin, hex, nullid, short
 from mercurial.i18n import _
+from mercurial.pycompat import open
 from mercurial import (
     error,
     filelog,
+    lock as lockmod,
     node as nodemod,
     pycompat,
     revlog,
@@ -22,58 +24,64 @@
 from . import (
     constants,
     datapack,
-    extutil,
     fileserverclient,
     historypack,
     repack,
     shallowutil,
 )
 
+
 def debugremotefilelog(ui, path, **opts):
     decompress = opts.get(r'decompress')
 
     size, firstnode, mapping = parsefileblob(path, decompress)
 
-    ui.status(_("size: %d bytes\n") % (size))
-    ui.status(_("path: %s \n") % (path))
-    ui.status(_("key: %s \n") % (short(firstnode)))
-    ui.status(_("\n"))
-    ui.status(_("%12s => %12s %13s %13s %12s\n") %
-              ("node", "p1", "p2", "linknode", "copyfrom"))
+    ui.status(_(b"size: %d bytes\n") % size)
+    ui.status(_(b"path: %s \n") % path)
+    ui.status(_(b"key: %s \n") % (short(firstnode)))
+    ui.status(_(b"\n"))
+    ui.status(
+        _(b"%12s => %12s %13s %13s %12s\n")
+        % (b"node", b"p1", b"p2", b"linknode", b"copyfrom")
+    )
 
     queue = [firstnode]
     while queue:
         node = queue.pop(0)
         p1, p2, linknode, copyfrom = mapping[node]
-        ui.status(_("%s => %s  %s  %s  %s\n") %
-            (short(node), short(p1), short(p2), short(linknode), copyfrom))
+        ui.status(
+            _(b"%s => %s  %s  %s  %s\n")
+            % (short(node), short(p1), short(p2), short(linknode), copyfrom)
+        )
         if p1 != nullid:
             queue.append(p1)
         if p2 != nullid:
             queue.append(p2)
 
+
 def buildtemprevlog(repo, file):
     # get filename key
     filekey = nodemod.hex(hashlib.sha1(file).digest())
-    filedir = os.path.join(repo.path, 'store/data', filekey)
+    filedir = os.path.join(repo.path, b'store/data', filekey)
 
     # sort all entries based on linkrev
     fctxs = []
     for filenode in os.listdir(filedir):
-        if '_old' not in filenode:
+        if b'_old' not in filenode:
             fctxs.append(repo.filectx(file, fileid=bin(filenode)))
 
     fctxs = sorted(fctxs, key=lambda x: x.linkrev())
 
     # add to revlog
-    temppath = repo.sjoin('data/temprevlog.i')
+    temppath = repo.sjoin(b'data/temprevlog.i')
     if os.path.exists(temppath):
         os.remove(temppath)
-    r = filelog.filelog(repo.svfs, 'temprevlog')
+    r = filelog.filelog(repo.svfs, b'temprevlog')
 
     class faket(object):
         def add(self, a, b, c):
             pass
+
     t = faket()
     for fctx in fctxs:
         if fctx.node() not in repo:
@@ -82,42 +90,53 @@
         p = fctx.filelog().parents(fctx.filenode())
         meta = {}
         if fctx.renamed():
-            meta['copy'] = fctx.renamed()[0]
-            meta['copyrev'] = hex(fctx.renamed()[1])
+            meta[b'copy'] = fctx.renamed()[0]
+            meta[b'copyrev'] = hex(fctx.renamed()[1])
 
         r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1])
 
     return r
 
+
 def debugindex(orig, ui, repo, file_=None, **opts):
     """dump the contents of an index file"""
-    if (opts.get(r'changelog') or
-        opts.get(r'manifest') or
-        opts.get(r'dir') or
-        not shallowutil.isenabled(repo) or
-        not repo.shallowmatch(file_)):
+    if (
+        opts.get(r'changelog')
+        or opts.get(r'manifest')
+        or opts.get(r'dir')
+        or not shallowutil.isenabled(repo)
+        or not repo.shallowmatch(file_)
+    ):
         return orig(ui, repo, file_, **opts)
 
     r = buildtemprevlog(repo, file_)
 
     # debugindex like normal
-    format = opts.get('format', 0)
+    format = opts.get(b'format', 0)
     if format not in (0, 1):
-        raise error.Abort(_("unknown format %d") % format)
+        raise error.Abort(_(b"unknown format %d") % format)
 
     generaldelta = r.version & revlog.FLAG_GENERALDELTA
     if generaldelta:
-        basehdr = ' delta'
+        basehdr = b' delta'
     else:
-        basehdr = '  base'
+        basehdr = b'  base'
 
     if format == 0:
-        ui.write(("   rev    offset  length " + basehdr + " linkrev"
-                  " nodeid       p1           p2\n"))
+        ui.write(
+            (
+                b"   rev    offset  length " + basehdr + b" linkrev"
+                b" nodeid       p1           p2\n"
+            )
+        )
     elif format == 1:
-        ui.write(("   rev flag   offset   length"
-                  "     size " + basehdr + "   link     p1     p2"
-                  "       nodeid\n"))
+        ui.write(
+            (
+                b"   rev flag   offset   length"
+                b"     size " + basehdr + b"   link     p1     p2"
+                b"       nodeid\n"
+            )
+        )
 
     for i in r:
         node = r.node(i)
@@ -130,14 +149,37 @@
                 pp = r.parents(node)
             except Exception:
                 pp = [nullid, nullid]
-            ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
-                    i, r.start(i), r.length(i), base, r.linkrev(i),
-                    short(node), short(pp[0]), short(pp[1])))
+            ui.write(
+                b"% 6d % 9d % 7d % 6d % 7d %s %s %s\n"
+                % (
+                    i,
+                    r.start(i),
+                    r.length(i),
+                    base,
+                    r.linkrev(i),
+                    short(node),
+                    short(pp[0]),
+                    short(pp[1]),
+                )
+            )
         elif format == 1:
             pr = r.parentrevs(i)
-            ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
-                    i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
-                    base, r.linkrev(i), pr[0], pr[1], short(node)))
+            ui.write(
+                b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n"
+                % (
+                    i,
+                    r.flags(i),
+                    r.start(i),
+                    r.length(i),
+                    r.rawsize(i),
+                    base,
+                    r.linkrev(i),
+                    pr[0],
+                    pr[1],
+                    short(node),
+                )
+            )
+
 
 def debugindexdot(orig, ui, repo, file_):
     """dump an index DAG as a graphviz dot file"""
@@ -146,37 +188,42 @@
 
     r = buildtemprevlog(repo, os.path.basename(file_)[:-2])
 
-    ui.write(("digraph G {\n"))
+    ui.writenoi18n(b"digraph G {\n")
     for i in r:
         node = r.node(i)
         pp = r.parents(node)
-        ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
+        ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
         if pp[1] != nullid:
-            ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
-    ui.write("}\n")
+            ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
+    ui.write(b"}\n")
+
 
 def verifyremotefilelog(ui, path, **opts):
     decompress = opts.get(r'decompress')
 
     for root, dirs, files in os.walk(path):
         for file in files:
-            if file == "repos":
+            if file == b"repos":
                 continue
             filepath = os.path.join(root, file)
             size, firstnode, mapping = parsefileblob(filepath, decompress)
-            for p1, p2, linknode, copyfrom in mapping.itervalues():
+            for p1, p2, linknode, copyfrom in pycompat.itervalues(mapping):
                 if linknode == nullid:
                     actualpath = os.path.relpath(root, path)
-                    key = fileserverclient.getcachekey("reponame", actualpath,
-                                                       file)
-                    ui.status("%s %s\n" % (key, os.path.relpath(filepath,
-                                                                path)))
+                    key = fileserverclient.getcachekey(
+                        b"reponame", actualpath, file
+                    )
+                    ui.status(
+                        b"%s %s\n" % (key, os.path.relpath(filepath, path))
+                    )
+
 
 def _decompressblob(raw):
     return zlib.decompress(raw)
 
+
 def parsefileblob(path, decompress):
-    f = open(path, "rb")
+    f = open(path, b"rb")
     try:
         raw = f.read()
     finally:
@@ -192,31 +239,32 @@
 
     mapping = {}
     while start < len(raw):
-        divider = raw.index('\0', start + 80)
+        divider = raw.index(b'\0', start + 80)
 
-        currentnode = raw[start:(start + 20)]
+        currentnode = raw[start : (start + 20)]
         if not firstnode:
             firstnode = currentnode
 
-        p1 = raw[(start + 20):(start + 40)]
-        p2 = raw[(start + 40):(start + 60)]
-        linknode = raw[(start + 60):(start + 80)]
-        copyfrom = raw[(start + 80):divider]
+        p1 = raw[(start + 20) : (start + 40)]
+        p2 = raw[(start + 40) : (start + 60)]
+        linknode = raw[(start + 60) : (start + 80)]
+        copyfrom = raw[(start + 80) : divider]
 
         mapping[currentnode] = (p1, p2, linknode, copyfrom)
         start = divider + 1
 
     return size, firstnode, mapping
 
+
 def debugdatapack(ui, *paths, **opts):
     for path in paths:
-        if '.data' in path:
-            path = path[:path.index('.data')]
-        ui.write("%s:\n" % path)
+        if b'.data' in path:
+            path = path[: path.index(b'.data')]
+        ui.write(b"%s:\n" % path)
         dpack = datapack.datapack(path)
         node = opts.get(r'node')
         if node:
-            deltachain = dpack.getdeltachain('', bin(node))
+            deltachain = dpack.getdeltachain(b'', bin(node))
             dumpdeltachain(ui, deltachain, **opts)
             return
 
@@ -230,22 +278,27 @@
         lastfilename = None
         totaldeltasize = 0
         totalblobsize = 0
+
         def printtotals():
             if lastfilename is not None:
-                ui.write("\n")
+                ui.write(b"\n")
             if not totaldeltasize or not totalblobsize:
                 return
             difference = totalblobsize - totaldeltasize
-            deltastr = "%0.1f%% %s" % (
+            deltastr = b"%0.1f%% %s" % (
                 (100.0 * abs(difference) / totalblobsize),
-                ("smaller" if difference > 0 else "bigger"))
+                (b"smaller" if difference > 0 else b"bigger"),
+            )
 
-            ui.write(("Total:%s%s  %s (%s)\n") % (
-                "".ljust(2 * hashlen - len("Total:")),
-                ('%d' % totaldeltasize).ljust(12),
-                ('%d' % totalblobsize).ljust(9),
-                deltastr
-            ))
+            ui.writenoi18n(
+                b"Total:%s%s  %s (%s)\n"
+                % (
+                    b"".ljust(2 * hashlen - len(b"Total:")),
+                    (b'%d' % totaldeltasize).ljust(12),
+                    (b'%d' % totalblobsize).ljust(9),
+                    deltastr,
+                )
+            )
 
         bases = {}
         nodes = set()
@@ -253,18 +306,22 @@
         for filename, node, deltabase, deltalen in dpack.iterentries():
             bases[node] = deltabase
             if node in nodes:
-                ui.write(("Bad entry: %s appears twice\n" % short(node)))
+                ui.write((b"Bad entry: %s appears twice\n" % short(node)))
                 failures += 1
             nodes.add(node)
             if filename != lastfilename:
                 printtotals()
-                name = '(empty name)' if filename == '' else filename
-                ui.write("%s:\n" % name)
-                ui.write("%s%s%s%s\n" % (
-                    "Node".ljust(hashlen),
-                    "Delta Base".ljust(hashlen),
-                    "Delta Length".ljust(14),
-                    "Blob Size".ljust(9)))
+                name = b'(empty name)' if filename == b'' else filename
+                ui.write(b"%s:\n" % name)
+                ui.write(
+                    b"%s%s%s%s\n"
+                    % (
+                        b"Node".ljust(hashlen),
+                        b"Delta Base".ljust(hashlen),
+                        b"Delta Length".ljust(14),
+                        b"Blob Size".ljust(9),
+                    )
+                )
                 lastfilename = filename
                 totalblobsize = 0
                 totaldeltasize = 0
@@ -276,21 +333,26 @@
                 totaldeltasize += deltalen
                 totalblobsize += blobsize
             else:
-                blobsize = "(missing)"
-            ui.write("%s  %s  %s%s\n" % (
-                hashformatter(node),
-                hashformatter(deltabase),
-                ('%d' % deltalen).ljust(14),
-                pycompat.bytestr(blobsize)))
+                blobsize = b"(missing)"
+            ui.write(
+                b"%s  %s  %s%s\n"
+                % (
+                    hashformatter(node),
+                    hashformatter(deltabase),
+                    (b'%d' % deltalen).ljust(14),
+                    pycompat.bytestr(blobsize),
+                )
+            )
 
         if filename is not None:
             printtotals()
 
         failures += _sanitycheck(ui, set(nodes), bases)
         if failures > 1:
-            ui.warn(("%d failures\n" % failures))
+            ui.warn((b"%d failures\n" % failures))
             return 1
 
+
 def _sanitycheck(ui, nodes, bases):
     """
     Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a
@@ -307,14 +369,22 @@
 
         while deltabase != nullid:
             if deltabase not in nodes:
-                ui.warn(("Bad entry: %s has an unknown deltabase (%s)\n" %
-                        (short(node), short(deltabase))))
+                ui.warn(
+                    (
+                        b"Bad entry: %s has an unknown deltabase (%s)\n"
+                        % (short(node), short(deltabase))
+                    )
+                )
                 failures += 1
                 break
 
             if deltabase in seen:
-                ui.warn(("Bad entry: %s has a cycle (at %s)\n" %
-                        (short(node), short(deltabase))))
+                ui.warn(
+                    (
+                        b"Bad entry: %s has a cycle (at %s)\n"
+                        % (short(node), short(deltabase))
+                    )
+                )
                 failures += 1
                 break
 
@@ -326,6 +396,7 @@
         bases[node] = nullid
     return failures
 
+
 def dumpdeltachain(ui, deltachain, **opts):
     hashformatter = hex
     hashlen = 40
@@ -333,46 +404,74 @@
     lastfilename = None
     for filename, node, filename, deltabasenode, delta in deltachain:
         if filename != lastfilename:
-            ui.write("\n%s\n" % filename)
+            ui.write(b"\n%s\n" % filename)
             lastfilename = filename
-        ui.write("%s  %s  %s  %s\n" % (
-            "Node".ljust(hashlen),
-            "Delta Base".ljust(hashlen),
-            "Delta SHA1".ljust(hashlen),
-            "Delta Length".ljust(6),
-        ))
+        ui.write(
+            b"%s  %s  %s  %s\n"
+            % (
+                b"Node".ljust(hashlen),
+                b"Delta Base".ljust(hashlen),
+                b"Delta SHA1".ljust(hashlen),
+                b"Delta Length".ljust(6),
+            )
+        )
 
-        ui.write("%s  %s  %s  %d\n" % (
-            hashformatter(node),
-            hashformatter(deltabasenode),
-            nodemod.hex(hashlib.sha1(delta).digest()),
-            len(delta)))
+        ui.write(
+            b"%s  %s  %s  %d\n"
+            % (
+                hashformatter(node),
+                hashformatter(deltabasenode),
+                nodemod.hex(hashlib.sha1(delta).digest()),
+                len(delta),
+            )
+        )
+
 
 def debughistorypack(ui, path):
-    if '.hist' in path:
-        path = path[:path.index('.hist')]
+    if b'.hist' in path:
+        path = path[: path.index(b'.hist')]
     hpack = historypack.historypack(path)
 
     lastfilename = None
     for entry in hpack.iterentries():
         filename, node, p1node, p2node, linknode, copyfrom = entry
         if filename != lastfilename:
-            ui.write("\n%s\n" % filename)
-            ui.write("%s%s%s%s%s\n" % (
-                "Node".ljust(14),
-                "P1 Node".ljust(14),
-                "P2 Node".ljust(14),
-                "Link Node".ljust(14),
-                "Copy From"))
+            ui.write(b"\n%s\n" % filename)
+            ui.write(
+                b"%s%s%s%s%s\n"
+                % (
+                    b"Node".ljust(14),
+                    b"P1 Node".ljust(14),
+                    b"P2 Node".ljust(14),
+                    b"Link Node".ljust(14),
+                    b"Copy From",
+                )
+            )
             lastfilename = filename
-        ui.write("%s  %s  %s  %s  %s\n" % (short(node), short(p1node),
-            short(p2node), short(linknode), copyfrom))
+        ui.write(
+            b"%s  %s  %s  %s  %s\n"
+            % (
+                short(node),
+                short(p1node),
+                short(p2node),
+                short(linknode),
+                copyfrom,
+            )
+        )
+
 
 def debugwaitonrepack(repo):
-    with extutil.flock(repack.repacklockvfs(repo).join('repacklock'), ''):
+    with lockmod.lock(repack.repacklockvfs(repo), b"repacklock", timeout=-1):
         return
 
+
 def debugwaitonprefetch(repo):
-    with repo._lock(repo.svfs, "prefetchlock", True, None,
-                         None, _('prefetching in %s') % repo.origroot):
+    with repo._lock(
+        repo.svfs,
+        b"prefetchlock",
+        True,
+        None,
+        None,
+        _(b'prefetching in %s') % repo.origroot,
+    ):
         pass
--- a/hgext/remotefilelog/extutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,66 +0,0 @@
-# extutil.py - useful utility methods for extensions
-#
-# Copyright 2016 Facebook
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-import contextlib
-import errno
-import os
-import time
-
-from mercurial import (
-    error,
-    lock as lockmod,
-    util,
-    vfs as vfsmod,
-)
-
-@contextlib.contextmanager
-def flock(lockpath, description, timeout=-1):
-    """A flock based lock object. Currently it is always non-blocking.
-
-    Note that since it is flock based, you can accidentally take it multiple
-    times within one process and the first one to be released will release all
-    of them. So the caller needs to be careful to not create more than one
-    instance per lock.
-    """
-
-    # best effort lightweight lock
-    try:
-        import fcntl
-        fcntl.flock
-    except ImportError:
-        # fallback to Mercurial lock
-        vfs = vfsmod.vfs(os.path.dirname(lockpath))
-        with lockmod.lock(vfs, os.path.basename(lockpath), timeout=timeout):
-            yield
-        return
-    # make sure lock file exists
-    util.makedirs(os.path.dirname(lockpath))
-    with open(lockpath, 'a'):
-        pass
-    lockfd = os.open(lockpath, os.O_RDONLY, 0o664)
-    start = time.time()
-    while True:
-        try:
-            fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
-            break
-        except IOError as ex:
-            if ex.errno == errno.EAGAIN:
-                if timeout != -1 and time.time() - start > timeout:
-                    raise error.LockHeld(errno.EAGAIN, lockpath, description,
-                                         '')
-                else:
-                    time.sleep(0.05)
-                    continue
-            raise
-
-    try:
-        yield
-    finally:
-        fcntl.flock(lockfd, fcntl.LOCK_UN)
-        os.close(lockfd)
--- a/hgext/remotefilelog/fileserverclient.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/fileserverclient.py	Mon Oct 21 11:09:48 2019 -0400
@@ -43,52 +43,61 @@
 
 _lfsmod = None
 
+
 def getcachekey(reponame, file, id):
     pathhash = node.hex(hashlib.sha1(file).digest())
     return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
 
+
 def getlocalkey(file, id):
     pathhash = node.hex(hashlib.sha1(file).digest())
     return os.path.join(pathhash, id)
 
+
 def peersetup(ui, peer):
-
     class remotefilepeer(peer.__class__):
         @wireprotov1peer.batchable
         def x_rfl_getfile(self, file, node):
-            if not self.capable('x_rfl_getfile'):
+            if not self.capable(b'x_rfl_getfile'):
                 raise error.Abort(
-                    'configured remotefile server does not support getfile')
+                    b'configured remotefile server does not support getfile'
+                )
             f = wireprotov1peer.future()
-            yield {'file': file, 'node': node}, f
-            code, data = f.value.split('\0', 1)
+            yield {b'file': file, b'node': node}, f
+            code, data = f.value.split(b'\0', 1)
             if int(code):
                 raise error.LookupError(file, node, data)
             yield data
 
         @wireprotov1peer.batchable
         def x_rfl_getflogheads(self, path):
-            if not self.capable('x_rfl_getflogheads'):
-                raise error.Abort('configured remotefile server does not '
-                                  'support getflogheads')
+            if not self.capable(b'x_rfl_getflogheads'):
+                raise error.Abort(
+                    b'configured remotefile server does not '
+                    b'support getflogheads'
+                )
             f = wireprotov1peer.future()
-            yield {'path': path}, f
-            heads = f.value.split('\n') if f.value else []
+            yield {b'path': path}, f
+            heads = f.value.split(b'\n') if f.value else []
             yield heads
 
         def _updatecallstreamopts(self, command, opts):
-            if command != 'getbundle':
+            if command != b'getbundle':
                 return
-            if (constants.NETWORK_CAP_LEGACY_SSH_GETFILES
-                not in self.capabilities()):
+            if (
+                constants.NETWORK_CAP_LEGACY_SSH_GETFILES
+                not in self.capabilities()
+            ):
                 return
             if not util.safehasattr(self, '_localrepo'):
                 return
-            if (constants.SHALLOWREPO_REQUIREMENT
-                not in self._localrepo.requirements):
+            if (
+                constants.SHALLOWREPO_REQUIREMENT
+                not in self._localrepo.requirements
+            ):
                 return
 
-            bundlecaps = opts.get('bundlecaps')
+            bundlecaps = opts.get(b'bundlecaps')
             if bundlecaps:
                 bundlecaps = [bundlecaps]
             else:
@@ -103,19 +112,20 @@
             # do this more cleanly.
             bundlecaps.append(constants.BUNDLE2_CAPABLITY)
             if self._localrepo.includepattern:
-                patterns = '\0'.join(self._localrepo.includepattern)
-                includecap = "includepattern=" + patterns
+                patterns = b'\0'.join(self._localrepo.includepattern)
+                includecap = b"includepattern=" + patterns
                 bundlecaps.append(includecap)
             if self._localrepo.excludepattern:
-                patterns = '\0'.join(self._localrepo.excludepattern)
-                excludecap = "excludepattern=" + patterns
+                patterns = b'\0'.join(self._localrepo.excludepattern)
+                excludecap = b"excludepattern=" + patterns
                 bundlecaps.append(excludecap)
-            opts['bundlecaps'] = ','.join(bundlecaps)
+            opts[b'bundlecaps'] = b','.join(bundlecaps)
 
         def _sendrequest(self, command, args, **opts):
             self._updatecallstreamopts(command, args)
-            return super(remotefilepeer, self)._sendrequest(command, args,
-                                                            **opts)
+            return super(remotefilepeer, self)._sendrequest(
+                command, args, **opts
+            )
 
         def _callstream(self, command, **opts):
             supertype = super(remotefilepeer, self)
@@ -125,11 +135,13 @@
 
     peer.__class__ = remotefilepeer
 
+
 class cacheconnection(object):
     """The connection for communicating with the remote cache. Performs
     gets and sets by communicating with an external process that has the
     cache-specific implementation.
     """
+
     def __init__(self):
         self.pipeo = self.pipei = self.pipee = None
         self.subprocess = None
@@ -137,9 +149,10 @@
 
     def connect(self, cachecommand):
         if self.pipeo:
-            raise error.Abort(_("cache connection already open"))
-        self.pipei, self.pipeo, self.pipee, self.subprocess = (
-            procutil.popen4(cachecommand))
+            raise error.Abort(_(b"cache connection already open"))
+        self.pipei, self.pipeo, self.pipee, self.subprocess = procutil.popen4(
+            cachecommand
+        )
         self.connected = True
 
     def close(self):
@@ -148,9 +161,10 @@
                 pipe.close()
             except Exception:
                 pass
+
         if self.connected:
             try:
-                self.pipei.write("exit\n")
+                self.pipei.write(b"exit\n")
             except Exception:
                 pass
             tryclose(self.pipei)
@@ -190,8 +204,10 @@
 
         return result
 
+
 def _getfilesbatch(
-        remote, receivemissing, progresstick, missed, idmap, batchsize):
+    remote, receivemissing, progresstick, missed, idmap, batchsize
+):
     # Over http(s), iterbatch is a streamy method and we can start
     # looking at results early. This means we send one (potentially
     # large) request, but then we show nice progress as we process
@@ -205,22 +221,25 @@
     with remote.commandexecutor() as e:
         futures = []
         for m in missed:
-            futures.append(e.callcommand('x_rfl_getfile', {
-                'file': idmap[m],
-                'node': m[-40:]
-            }))
+            futures.append(
+                e.callcommand(
+                    b'x_rfl_getfile', {b'file': idmap[m], b'node': m[-40:]}
+                )
+            )
 
         for i, m in enumerate(missed):
             r = futures[i].result()
             futures[i] = None  # release memory
             file_ = idmap[m]
             node = m[-40:]
-            receivemissing(io.BytesIO('%d\n%s' % (len(r), r)), file_, node)
+            receivemissing(io.BytesIO(b'%d\n%s' % (len(r), r)), file_, node)
             progresstick()
 
+
 def _getfiles_optimistic(
-    remote, receivemissing, progresstick, missed, idmap, step):
-    remote._callstream("x_rfl_getfiles")
+    remote, receivemissing, progresstick, missed, idmap, step
+):
+    remote._callstream(b"x_rfl_getfiles")
     i = 0
     pipeo = remote._pipeo
     pipei = remote._pipei
@@ -233,7 +252,7 @@
             # issue new request
             versionid = missingid[-40:]
             file = idmap[missingid]
-            sshrequest = "%s%s\n" % (versionid, file)
+            sshrequest = b"%s%s\n" % (versionid, file)
             pipeo.write(sshrequest)
         pipeo.flush()
 
@@ -245,12 +264,14 @@
             progresstick()
 
     # End the command
-    pipeo.write('\n')
+    pipeo.write(b'\n')
     pipeo.flush()
 
+
 def _getfiles_threaded(
-    remote, receivemissing, progresstick, missed, idmap, step):
-    remote._callstream("getfiles")
+    remote, receivemissing, progresstick, missed, idmap, step
+):
+    remote._callstream(b"getfiles")
     pipeo = remote._pipeo
     pipei = remote._pipei
 
@@ -258,9 +279,10 @@
         for missingid in missed:
             versionid = missingid[-40:]
             file = idmap[missingid]
-            sshrequest = "%s%s\n" % (versionid, file)
+            sshrequest = b"%s%s\n" % (versionid, file)
             pipeo.write(sshrequest)
         pipeo.flush()
+
     writerthread = threading.Thread(target=writer)
     writerthread.daemon = True
     writerthread.start()
@@ -273,26 +295,29 @@
 
     writerthread.join()
     # End the command
-    pipeo.write('\n')
+    pipeo.write(b'\n')
     pipeo.flush()
 
+
 class fileserverclient(object):
     """A client for requesting files from the remote file server.
     """
+
     def __init__(self, repo):
         ui = repo.ui
         self.repo = repo
         self.ui = ui
-        self.cacheprocess = ui.config("remotefilelog", "cacheprocess")
+        self.cacheprocess = ui.config(b"remotefilelog", b"cacheprocess")
         if self.cacheprocess:
             self.cacheprocess = util.expandpath(self.cacheprocess)
 
         # This option causes remotefilelog to pass the full file path to the
         # cacheprocess instead of a hashed key.
         self.cacheprocesspasspath = ui.configbool(
-            "remotefilelog", "cacheprocess.includepath")
+            b"remotefilelog", b"cacheprocess.includepath"
+        )
 
-        self.debugoutput = ui.configbool("remotefilelog", "debug")
+        self.debugoutput = ui.configbool(b"remotefilelog", b"debug")
 
         self.remotecache = cacheconnection()
 
@@ -318,19 +343,19 @@
 
         repo = self.repo
         total = len(fileids)
-        request = "get\n%d\n" % total
+        request = b"get\n%d\n" % total
         idmap = {}
         reponame = repo.name
         for file, id in fileids:
             fullid = getcachekey(reponame, file, id)
             if self.cacheprocesspasspath:
-                request += file + '\0'
-            request += fullid + "\n"
+                request += file + b'\0'
+            request += fullid + b"\n"
             idmap[fullid] = file
 
         cache.request(request)
 
-        progress = self.ui.makeprogress(_('downloading'), total=total)
+        progress = self.ui.makeprogress(_(b'downloading'), total=total)
         progress.update(0)
 
         missed = []
@@ -341,14 +366,18 @@
                 for missingid in idmap:
                     if not missingid in missedset:
                         missed.append(missingid)
-                self.ui.warn(_("warning: cache connection closed early - " +
-                    "falling back to server\n"))
+                self.ui.warn(
+                    _(
+                        b"warning: cache connection closed early - "
+                        + b"falling back to server\n"
+                    )
+                )
                 break
-            if missingid == "0":
+            if missingid == b"0":
                 break
-            if missingid.startswith("_hits_"):
+            if missingid.startswith(b"_hits_"):
                 # receive progress reports
-                parts = missingid.split("_")
+                parts = missingid.split(b"_")
                 progress.increment(int(parts[2]))
                 continue
 
@@ -359,8 +388,14 @@
 
         fromcache = total - len(missed)
         progress.update(fromcache, total=total)
-        self.ui.log("remotefilelog", "remote cache hit rate is %r of %r\n",
-                    fromcache, total, hit=fromcache, total=total)
+        self.ui.log(
+            b"remotefilelog",
+            b"remote cache hit rate is %r of %r\n",
+            fromcache,
+            total,
+            hit=fromcache,
+            total=total,
+        )
 
         oldumask = os.umask(0o002)
         try:
@@ -375,51 +410,74 @@
                     with self._connect() as conn:
                         remote = conn.peer
                         if remote.capable(
-                                constants.NETWORK_CAP_LEGACY_SSH_GETFILES):
+                            constants.NETWORK_CAP_LEGACY_SSH_GETFILES
+                        ):
                             if not isinstance(remote, _sshv1peer):
-                                raise error.Abort('remotefilelog requires ssh '
-                                                  'servers')
-                            step = self.ui.configint('remotefilelog',
-                                                     'getfilesstep')
-                            getfilestype = self.ui.config('remotefilelog',
-                                                          'getfilestype')
-                            if getfilestype == 'threaded':
+                                raise error.Abort(
+                                    b'remotefilelog requires ssh servers'
+                                )
+                            step = self.ui.configint(
+                                b'remotefilelog', b'getfilesstep'
+                            )
+                            getfilestype = self.ui.config(
+                                b'remotefilelog', b'getfilestype'
+                            )
+                            if getfilestype == b'threaded':
                                 _getfiles = _getfiles_threaded
                             else:
                                 _getfiles = _getfiles_optimistic
-                            _getfiles(remote, self.receivemissing,
-                                      progress.increment, missed, idmap, step)
-                        elif remote.capable("x_rfl_getfile"):
-                            if remote.capable('batch'):
+                            _getfiles(
+                                remote,
+                                self.receivemissing,
+                                progress.increment,
+                                missed,
+                                idmap,
+                                step,
+                            )
+                        elif remote.capable(b"x_rfl_getfile"):
+                            if remote.capable(b'batch'):
                                 batchdefault = 100
                             else:
                                 batchdefault = 10
                             batchsize = self.ui.configint(
-                                'remotefilelog', 'batchsize', batchdefault)
+                                b'remotefilelog', b'batchsize', batchdefault
+                            )
                             self.ui.debug(
                                 b'requesting %d files from '
-                                b'remotefilelog server...\n' % len(missed))
+                                b'remotefilelog server...\n' % len(missed)
+                            )
                             _getfilesbatch(
-                                remote, self.receivemissing, progress.increment,
-                                missed, idmap, batchsize)
+                                remote,
+                                self.receivemissing,
+                                progress.increment,
+                                missed,
+                                idmap,
+                                batchsize,
+                            )
                         else:
-                            raise error.Abort("configured remotefilelog server"
-                                             " does not support remotefilelog")
+                            raise error.Abort(
+                                b"configured remotefilelog server"
+                                b" does not support remotefilelog"
+                            )
 
-                    self.ui.log("remotefilefetchlog",
-                                "Success\n",
-                                fetched_files = progress.pos - fromcache,
-                                total_to_fetch = total - fromcache)
+                    self.ui.log(
+                        b"remotefilefetchlog",
+                        b"Success\n",
+                        fetched_files=progress.pos - fromcache,
+                        total_to_fetch=total - fromcache,
+                    )
                 except Exception:
-                    self.ui.log("remotefilefetchlog",
-                                "Fail\n",
-                                fetched_files = progress.pos - fromcache,
-                                total_to_fetch = total - fromcache)
+                    self.ui.log(
+                        b"remotefilefetchlog",
+                        b"Fail\n",
+                        fetched_files=progress.pos - fromcache,
+                        total_to_fetch=total - fromcache,
+                    )
                     raise
                 finally:
                     self.ui.verbose = verbose
                 # send to memcache
-                request = "set\n%d\n%s\n" % (len(missed), "\n".join(missed))
+                request = b"set\n%d\n%s\n" % (len(missed), b"\n".join(missed))
                 cache.request(request)
 
             progress.complete()
@@ -432,21 +490,25 @@
     def receivemissing(self, pipe, filename, node):
         line = pipe.readline()[:-1]
         if not line:
-            raise error.ResponseError(_("error downloading file contents:"),
-                                      _("connection closed early"))
+            raise error.ResponseError(
+                _(b"error downloading file contents:"),
+                _(b"connection closed early"),
+            )
         size = int(line)
         data = pipe.read(size)
         if len(data) != size:
-            raise error.ResponseError(_("error downloading file contents:"),
-                                      _("only received %s of %s bytes")
-                                      % (len(data), size))
+            raise error.ResponseError(
+                _(b"error downloading file contents:"),
+                _(b"only received %s of %s bytes") % (len(data), size),
+            )
 
-        self.writedata.addremotefilelognode(filename, bin(node),
-                                             zlib.decompress(data))
+        self.writedata.addremotefilelognode(
+            filename, bin(node), zlib.decompress(data)
+        )
 
     def connect(self):
         if self.cacheprocess:
-            cmd = "%s %s" % (self.cacheprocess, self.writedata._path)
+            cmd = b"%s %s" % (self.cacheprocess, self.writedata._path)
             self.remotecache.connect(cmd)
         else:
             # If no cache process is specified, we fake one that always
@@ -462,11 +524,11 @@
                     pass
 
                 def request(self, value, flush=True):
-                    lines = value.split("\n")
-                    if lines[0] != "get":
+                    lines = value.split(b"\n")
+                    if lines[0] != b"get":
                         return
                     self.missingids = lines[2:-1]
-                    self.missingids.append('0')
+                    self.missingids.append(b'0')
 
                 def receiveline(self):
                     if len(self.missingids) > 0:
@@ -477,26 +539,33 @@
 
     def close(self):
         if fetches:
-            msg = ("%d files fetched over %d fetches - " +
-                   "(%d misses, %0.2f%% hit ratio) over %0.2fs\n") % (
-                       fetched,
-                       fetches,
-                       fetchmisses,
-                       float(fetched - fetchmisses) / float(fetched) * 100.0,
-                       fetchcost)
+            msg = (
+                b"%d files fetched over %d fetches - "
+                + b"(%d misses, %0.2f%% hit ratio) over %0.2fs\n"
+            ) % (
+                fetched,
+                fetches,
+                fetchmisses,
+                float(fetched - fetchmisses) / float(fetched) * 100.0,
+                fetchcost,
+            )
             if self.debugoutput:
                 self.ui.warn(msg)
-            self.ui.log("remotefilelog.prefetch", msg.replace("%", "%%"),
+            self.ui.log(
+                b"remotefilelog.prefetch",
+                msg.replace(b"%", b"%%"),
                 remotefilelogfetched=fetched,
                 remotefilelogfetches=fetches,
                 remotefilelogfetchmisses=fetchmisses,
-                remotefilelogfetchtime=fetchcost * 1000)
+                remotefilelogfetchtime=fetchcost * 1000,
+            )
 
         if self.remotecache.connected:
             self.remotecache.close()
 
-    def prefetch(self, fileids, force=False, fetchdata=True,
-                 fetchhistory=False):
+    def prefetch(
+        self, fileids, force=False, fetchdata=True, fetchhistory=False
+    ):
         """downloads the given file versions to the cache
         """
         repo = self.repo
@@ -506,8 +575,11 @@
             # - we don't use .hgtags
             # - workingctx produces ids with length 42,
             #   which we skip since they aren't in any cache
-            if (file == '.hgtags' or len(id) == 42
-                or not repo.shallowmatch(file)):
+            if (
+                file == b'.hgtags'
+                or len(id) == 42
+                or not repo.shallowmatch(file)
+            ):
                 continue
 
             idstocheck.append((file, bin(id)))
@@ -517,7 +589,8 @@
         if force:
             datastore = contentstore.unioncontentstore(*repo.shareddatastores)
             historystore = metadatastore.unionmetadatastore(
-                *repo.sharedhistorystores)
+                *repo.sharedhistorystores
+            )
 
         missingids = set()
         if fetchdata:
@@ -531,9 +604,12 @@
         if nullids:
             missingids = [(f, id) for f, id in missingids if id != nullid]
             repo.ui.develwarn(
-                ('remotefilelog not fetching %d null revs'
-                 ' - this is likely hiding bugs' % nullids),
-                config='remotefilelog-ext')
+                (
+                    b'remotefilelog not fetching %d null revs'
+                    b' - this is likely hiding bugs' % nullids
+                ),
+                config=b'remotefilelog-ext',
+            )
         if missingids:
             global fetches, fetched, fetchcost
             fetches += 1
@@ -542,24 +618,27 @@
             # let's log that information for debugging.
             if fetches >= 15 and fetches < 18:
                 if fetches == 15:
-                    fetchwarning = self.ui.config('remotefilelog',
-                                                  'fetchwarning')
+                    fetchwarning = self.ui.config(
+                        b'remotefilelog', b'fetchwarning'
+                    )
                     if fetchwarning:
-                        self.ui.warn(fetchwarning + '\n')
+                        self.ui.warn(fetchwarning + b'\n')
                 self.logstacktrace()
             missingids = [(file, hex(id)) for file, id in sorted(missingids)]
             fetched += len(missingids)
             start = time.time()
             missingids = self.request(missingids)
             if missingids:
-                raise error.Abort(_("unable to download %d files") %
-                                  len(missingids))
+                raise error.Abort(
+                    _(b"unable to download %d files") % len(missingids)
+                )
             fetchcost += time.time() - start
             self._lfsprefetch(fileids)
 
     def _lfsprefetch(self, fileids):
         if not _lfsmod or not util.safehasattr(
-                self.repo.svfs, 'lfslocalblobstore'):
+            self.repo.svfs, b'lfslocalblobstore'
+        ):
             return
         if not _lfsmod.wrapper.candownload(self.repo):
             return
@@ -569,7 +648,7 @@
             node = bin(id)
             rlog = self.repo.file(file)
             if rlog.flags(node) & revlog.REVIDX_EXTSTORED:
-                text = rlog.revision(node, raw=True)
+                text = rlog.rawdata(node)
                 p = _lfsmod.pointer.deserialize(text)
                 oid = p.oid()
                 if not store.has(oid):
@@ -580,5 +659,9 @@
 
     def logstacktrace(self):
         import traceback
-        self.ui.log('remotefilelog', 'excess remotefilelog fetching:\n%s\n',
-                    ''.join(traceback.format_stack()))
+
+        self.ui.log(
+            b'remotefilelog',
+            b'excess remotefilelog fetching:\n%s\n',
+            b''.join(traceback.format_stack()),
+        )
--- a/hgext/remotefilelog/historypack.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/historypack.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,21 +15,21 @@
 )
 
 # (filename hash, offset, size)
-INDEXFORMAT2 = '!20sQQII'
+INDEXFORMAT2 = b'!20sQQII'
 INDEXENTRYLENGTH2 = struct.calcsize(INDEXFORMAT2)
 NODELENGTH = 20
 
-NODEINDEXFORMAT = '!20sQ'
+NODEINDEXFORMAT = b'!20sQ'
 NODEINDEXENTRYLENGTH = struct.calcsize(NODEINDEXFORMAT)
 
 # (node, p1, p2, linknode)
-PACKFORMAT = "!20s20s20s20sH"
+PACKFORMAT = b"!20s20s20s20sH"
 PACKENTRYLENGTH = 82
 
 ENTRYCOUNTSIZE = 4
 
-INDEXSUFFIX = '.histidx'
-PACKSUFFIX = '.histpack'
+INDEXSUFFIX = b'.histidx'
+PACKSUFFIX = b'.histpack'
 
 ANC_NODE = 0
 ANC_P1NODE = 1
@@ -37,6 +37,7 @@
 ANC_LINKNODE = 3
 ANC_COPYFROM = 4
 
+
 class historypackstore(basepack.basepackstore):
     INDEXSUFFIX = INDEXSUFFIX
     PACKSUFFIX = PACKSUFFIX
@@ -75,8 +76,10 @@
         raise KeyError((name, node))
 
     def add(self, filename, node, p1, p2, linknode, copyfrom):
-        raise RuntimeError("cannot add to historypackstore (%s:%s)"
-                           % (filename, hex(node)))
+        raise RuntimeError(
+            b"cannot add to historypackstore (%s:%s)" % (filename, hex(node))
+        )
+
 
 class historypack(basepack.basepack):
     INDEXSUFFIX = INDEXSUFFIX
@@ -153,17 +156,20 @@
 
     def _readentry(self, offset):
         data = self._data
-        entry = struct.unpack(PACKFORMAT, data[offset:offset + PACKENTRYLENGTH])
+        entry = struct.unpack(
+            PACKFORMAT, data[offset : offset + PACKENTRYLENGTH]
+        )
         copyfrom = None
         copyfromlen = entry[ANC_COPYFROM]
         if copyfromlen != 0:
             offset += PACKENTRYLENGTH
-            copyfrom = data[offset:offset + copyfromlen]
+            copyfrom = data[offset : offset + copyfromlen]
         return entry, copyfrom
 
     def add(self, filename, node, p1, p2, linknode, copyfrom):
-        raise RuntimeError("cannot add to historypack (%s:%s)" %
-                           (filename, hex(node)))
+        raise RuntimeError(
+            b"cannot add to historypack (%s:%s)" % (filename, hex(node))
+        )
 
     def _findnode(self, name, node):
         if self.VERSION == 0:
@@ -174,9 +180,12 @@
         else:
             section = self._findsection(name)
             nodeindexoffset, nodeindexsize = section[3:]
-            entry = self._bisect(node, nodeindexoffset,
-                                 nodeindexoffset + nodeindexsize,
-                                 NODEINDEXENTRYLENGTH)
+            entry = self._bisect(
+                node,
+                nodeindexoffset,
+                nodeindexoffset + nodeindexsize,
+                NODEINDEXENTRYLENGTH,
+            )
             if entry is not None:
                 node, offset = struct.unpack(NODEINDEXFORMAT, entry)
                 entry, copyfrom = self._readentry(offset)
@@ -184,13 +193,14 @@
                 # with the copyfrom string.
                 return entry[:4] + (copyfrom,)
 
-        raise KeyError("unable to find history for %s:%s" % (name, hex(node)))
+        raise KeyError(b"unable to find history for %s:%s" % (name, hex(node)))
 
     def _findsection(self, name):
         params = self.params
         namehash = hashlib.sha1(name).digest()
-        fanoutkey = struct.unpack(params.fanoutstruct,
-                                  namehash[:params.fanoutprefix])[0]
+        fanoutkey = struct.unpack(
+            params.fanoutstruct, namehash[: params.fanoutprefix]
+        )[0]
         fanout = self._fanouttable
 
         start = fanout[fanoutkey] + params.indexstart
@@ -209,54 +219,65 @@
 
         rawentry = struct.unpack(self.INDEXFORMAT, entry)
         x, offset, size, nodeindexoffset, nodeindexsize = rawentry
-        rawnamelen = self._index[nodeindexoffset:nodeindexoffset +
-                                                 constants.FILENAMESIZE]
-        actualnamelen = struct.unpack('!H', rawnamelen)[0]
+        rawnamelen = self._index[
+            nodeindexoffset : nodeindexoffset + constants.FILENAMESIZE
+        ]
+        actualnamelen = struct.unpack(b'!H', rawnamelen)[0]
         nodeindexoffset += constants.FILENAMESIZE
-        actualname = self._index[nodeindexoffset:nodeindexoffset +
-                                                 actualnamelen]
+        actualname = self._index[
+            nodeindexoffset : nodeindexoffset + actualnamelen
+        ]
         if actualname != name:
-            raise KeyError("found file name %s when looking for %s" %
-                           (actualname, name))
+            raise KeyError(
+                b"found file name %s when looking for %s" % (actualname, name)
+            )
         nodeindexoffset += actualnamelen
 
-        filenamelength = struct.unpack('!H', self._data[offset:offset +
-                                                    constants.FILENAMESIZE])[0]
+        filenamelength = struct.unpack(
+            b'!H', self._data[offset : offset + constants.FILENAMESIZE]
+        )[0]
         offset += constants.FILENAMESIZE
 
-        actualname = self._data[offset:offset + filenamelength]
+        actualname = self._data[offset : offset + filenamelength]
         offset += filenamelength
 
         if name != actualname:
-            raise KeyError("found file name %s when looking for %s" %
-                           (actualname, name))
+            raise KeyError(
+                b"found file name %s when looking for %s" % (actualname, name)
+            )
 
         # Skip entry list size
         offset += ENTRYCOUNTSIZE
 
         nodelistoffset = offset
-        nodelistsize = (size - constants.FILENAMESIZE - filenamelength -
-                        ENTRYCOUNTSIZE)
-        return (name, nodelistoffset, nodelistsize,
-                nodeindexoffset, nodeindexsize)
+        nodelistsize = (
+            size - constants.FILENAMESIZE - filenamelength - ENTRYCOUNTSIZE
+        )
+        return (
+            name,
+            nodelistoffset,
+            nodelistsize,
+            nodeindexoffset,
+            nodeindexsize,
+        )
 
     def _bisect(self, node, start, end, entrylen):
         # Bisect between start and end to find node
         origstart = start
-        startnode = self._index[start:start + NODELENGTH]
-        endnode = self._index[end:end + NODELENGTH]
+        startnode = self._index[start : start + NODELENGTH]
+        endnode = self._index[end : end + NODELENGTH]
 
         if startnode == node:
-            return self._index[start:start + entrylen]
+            return self._index[start : start + entrylen]
         elif endnode == node:
-            return self._index[end:end + entrylen]
+            return self._index[end : end + entrylen]
         else:
             while start < end - entrylen:
                 mid = start + (end - start) // 2
                 mid = mid - ((mid - origstart) % entrylen)
-                midnode = self._index[mid:mid + NODELENGTH]
+                midnode = self._index[mid : mid + NODELENGTH]
                 if midnode == node:
-                    return self._index[mid:mid + entrylen]
+                    return self._index[mid : mid + entrylen]
                 if node > midnode:
                     start = mid
                 elif node < midnode:
@@ -270,8 +291,9 @@
     def cleanup(self, ledger):
         entries = ledger.sources.get(self, [])
         allkeys = set(self)
-        repackedkeys = set((e.filename, e.node) for e in entries if
-                           e.historyrepacked)
+        repackedkeys = set(
+            (e.filename, e.node) for e in entries if e.historyrepacked
+        )
 
         if len(allkeys - repackedkeys) == 0:
             if self.path not in ledger.created:
@@ -288,32 +310,42 @@
         while offset < self.datasize:
             data = self._data
             # <2 byte len> + <filename>
-            filenamelen = struct.unpack('!H', data[offset:offset +
-                                                   constants.FILENAMESIZE])[0]
+            filenamelen = struct.unpack(
+                b'!H', data[offset : offset + constants.FILENAMESIZE]
+            )[0]
             offset += constants.FILENAMESIZE
-            filename = data[offset:offset + filenamelen]
+            filename = data[offset : offset + filenamelen]
             offset += filenamelen
 
-            revcount = struct.unpack('!I', data[offset:offset +
-                                                ENTRYCOUNTSIZE])[0]
+            revcount = struct.unpack(
+                b'!I', data[offset : offset + ENTRYCOUNTSIZE]
+            )[0]
             offset += ENTRYCOUNTSIZE
 
             for i in pycompat.xrange(revcount):
-                entry = struct.unpack(PACKFORMAT, data[offset:offset +
-                                                              PACKENTRYLENGTH])
+                entry = struct.unpack(
+                    PACKFORMAT, data[offset : offset + PACKENTRYLENGTH]
+                )
                 offset += PACKENTRYLENGTH
 
-                copyfrom = data[offset:offset + entry[ANC_COPYFROM]]
+                copyfrom = data[offset : offset + entry[ANC_COPYFROM]]
                 offset += entry[ANC_COPYFROM]
 
-                yield (filename, entry[ANC_NODE], entry[ANC_P1NODE],
-                        entry[ANC_P2NODE], entry[ANC_LINKNODE], copyfrom)
+                yield (
+                    filename,
+                    entry[ANC_NODE],
+                    entry[ANC_P1NODE],
+                    entry[ANC_P2NODE],
+                    entry[ANC_LINKNODE],
+                    copyfrom,
+                )
 
                 self._pagedin += PACKENTRYLENGTH
 
             # If we've read a lot of data from the mmap, free some memory.
             self.freememory()
 
+
 class mutablehistorypack(basepack.mutablebasepack):
     """A class for constructing and serializing a histpack file and index.
 
@@ -389,6 +421,7 @@
 
     [1]: new in version 1.
     """
+
     INDEXSUFFIX = INDEXSUFFIX
     PACKSUFFIX = PACKSUFFIX
 
@@ -407,12 +440,11 @@
         self.NODEINDEXENTRYLENGTH = NODEINDEXENTRYLENGTH
 
     def add(self, filename, node, p1, p2, linknode, copyfrom):
-        copyfrom = copyfrom or ''
-        copyfromlen = struct.pack('!H', len(copyfrom))
-        self.fileentries.setdefault(filename, []).append((node, p1, p2,
-                                                          linknode,
-                                                          copyfromlen,
-                                                          copyfrom))
+        copyfrom = copyfrom or b''
+        copyfromlen = struct.pack(b'!H', len(copyfrom))
+        self.fileentries.setdefault(filename, []).append(
+            (node, p1, p2, linknode, copyfromlen, copyfrom)
+        )
 
     def _write(self):
         for filename in sorted(self.fileentries):
@@ -421,6 +453,7 @@
 
             # Write the file section content
             entrymap = dict((e[0], e) for e in entries)
+
             def parentfunc(node):
                 x, p1, p2, x, x, x = entrymap[node]
                 parents = []
@@ -430,16 +463,21 @@
                     parents.append(p2)
                 return parents
 
-            sortednodes = list(reversed(shallowutil.sortnodes(
-                (e[0] for e in entries),
-                parentfunc)))
+            sortednodes = list(
+                reversed(
+                    shallowutil.sortnodes((e[0] for e in entries), parentfunc)
+                )
+            )
 
             # Write the file section header
-            self.writeraw("%s%s%s" % (
-                struct.pack('!H', len(filename)),
-                filename,
-                struct.pack('!I', len(sortednodes)),
-            ))
+            self.writeraw(
+                b"%s%s%s"
+                % (
+                    struct.pack(b'!H', len(filename)),
+                    filename,
+                    struct.pack(b'!I', len(sortednodes)),
+                )
+            )
 
             sectionlen = constants.FILENAMESIZE + len(filename) + 4
 
@@ -450,11 +488,11 @@
             offset = sectionstart + sectionlen
             for node in sortednodes:
                 locations[node] = offset
-                raw = '%s%s%s%s%s%s' % entrymap[node]
+                raw = b'%s%s%s%s%s%s' % entrymap[node]
                 rawstrings.append(raw)
                 offset += len(raw)
 
-            rawdata = ''.join(rawstrings)
+            rawdata = b''.join(rawstrings)
             sectionlen += len(rawdata)
 
             self.writeraw(rawdata)
@@ -478,14 +516,20 @@
         nodeindexformat = self.NODEINDEXFORMAT
         nodeindexlength = self.NODEINDEXENTRYLENGTH
 
-        files = ((hashlib.sha1(filename).digest(), filename, offset, size)
-                for filename, (offset, size) in self.files.iteritems())
+        files = (
+            (hashlib.sha1(filename).digest(), filename, offset, size)
+            for filename, (offset, size) in pycompat.iteritems(self.files)
+        )
         files = sorted(files)
 
         # node index is after file index size, file index, and node index size
-        indexlensize = struct.calcsize('!Q')
-        nodeindexoffset = (indexoffset + indexlensize +
-                           (len(files) * fileindexlength) + indexlensize)
+        indexlensize = struct.calcsize(b'!Q')
+        nodeindexoffset = (
+            indexoffset
+            + indexlensize
+            + (len(files) * fileindexlength)
+            + indexlensize
+        )
 
         fileindexentries = []
         nodeindexentries = []
@@ -496,22 +540,33 @@
 
             nodeindexsize = len(nodelocations) * nodeindexlength
 
-            rawentry = struct.pack(fileindexformat, namehash, offset, size,
-                                   nodeindexoffset, nodeindexsize)
+            rawentry = struct.pack(
+                fileindexformat,
+                namehash,
+                offset,
+                size,
+                nodeindexoffset,
+                nodeindexsize,
+            )
             # Node index
-            nodeindexentries.append(struct.pack(constants.FILENAMESTRUCT,
-                                                len(filename)) + filename)
+            nodeindexentries.append(
+                struct.pack(constants.FILENAMESTRUCT, len(filename)) + filename
+            )
             nodeindexoffset += constants.FILENAMESIZE + len(filename)
 
-            for node, location in sorted(nodelocations.iteritems()):
-                nodeindexentries.append(struct.pack(nodeindexformat, node,
-                                                    location))
+            for node, location in sorted(pycompat.iteritems(nodelocations)):
+                nodeindexentries.append(
+                    struct.pack(nodeindexformat, node, location)
+                )
                 nodecount += 1
 
             nodeindexoffset += len(nodelocations) * nodeindexlength
 
             fileindexentries.append(rawentry)
 
-        nodecountraw = struct.pack('!Q', nodecount)
-        return (''.join(fileindexentries) + nodecountraw +
-                ''.join(nodeindexentries))
+        nodecountraw = struct.pack(b'!Q', nodecount)
+        return (
+            b''.join(fileindexentries)
+            + nodecountraw
+            + b''.join(nodeindexentries)
+        )
--- a/hgext/remotefilelog/metadatastore.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/metadatastore.py	Mon Oct 21 11:09:48 2019 -0400
@@ -6,6 +6,7 @@
     shallowutil,
 )
 
+
 class unionmetadatastore(basestore.baseunionstore):
     def __init__(self, *args, **kwargs):
         super(unionmetadatastore, self).__init__(*args, **kwargs)
@@ -32,6 +33,7 @@
             return []
 
         ancestors = {}
+
         def traverse(curname, curnode):
             # TODO: this algorithm has the potential to traverse parts of
             # history twice. Ex: with A->B->C->F and A->B->D->F, both D and C
@@ -59,8 +61,9 @@
         while missing:
             curname, curnode = missing.pop()
             try:
-                ancestors.update(self._getpartialancestors(curname, curnode,
-                                                           known=known))
+                ancestors.update(
+                    self._getpartialancestors(curname, curnode, known=known)
+                )
                 newmissing = traverse(curname, curnode)
                 missing.extend(newmissing)
             except KeyError:
@@ -95,8 +98,9 @@
         raise KeyError((name, hex(node)))
 
     def add(self, name, node, data):
-        raise RuntimeError("cannot add content only to remotefilelog "
-                           "contentstore")
+        raise RuntimeError(
+            b"cannot add content only to remotefilelog contentstore"
+        )
 
     def getmissing(self, keys):
         missing = keys
@@ -113,6 +117,7 @@
         metrics = [s.getmetrics() for s in self.stores]
         return shallowutil.sumdicts(*metrics)
 
+
 class remotefilelogmetadatastore(basestore.basestore):
     def getancestors(self, name, node, known=None):
         """Returns as many ancestors as we're aware of.
@@ -130,8 +135,10 @@
         return self.getancestors(name, node)[node]
 
     def add(self, name, node, parents, linknode):
-        raise RuntimeError("cannot add metadata only to remotefilelog "
-                           "metadatastore")
+        raise RuntimeError(
+            b"cannot add metadata only to remotefilelog metadatastore"
+        )
+
 
 class remotemetadatastore(object):
     def __init__(self, ui, fileservice, shared):
@@ -139,15 +146,16 @@
         self._shared = shared
 
     def getancestors(self, name, node, known=None):
-        self._fileservice.prefetch([(name, hex(node))], force=True,
-                                   fetchdata=False, fetchhistory=True)
+        self._fileservice.prefetch(
+            [(name, hex(node))], force=True, fetchdata=False, fetchhistory=True
+        )
         return self._shared.getancestors(name, node, known=known)
 
     def getnodeinfo(self, name, node):
         return self.getancestors(name, node)[node]
 
     def add(self, name, node, data):
-        raise RuntimeError("cannot add to a remote store")
+        raise RuntimeError(b"cannot add to a remote store")
 
     def getmissing(self, keys):
         return keys
--- a/hgext/remotefilelog/remotefilectx.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/remotefilectx.py	Mon Oct 21 11:09:48 2019 -0400
@@ -22,15 +22,25 @@
 propertycache = util.propertycache
 FASTLOG_TIMEOUT_IN_SECS = 0.5
 
+
 class remotefilectx(context.filectx):
-    def __init__(self, repo, path, changeid=None, fileid=None,
-                 filelog=None, changectx=None, ancestormap=None):
+    def __init__(
+        self,
+        repo,
+        path,
+        changeid=None,
+        fileid=None,
+        filelog=None,
+        changectx=None,
+        ancestormap=None,
+    ):
         if fileid == nullrev:
             fileid = nullid
         if fileid and len(fileid) == 40:
             fileid = bin(fileid)
-        super(remotefilectx, self).__init__(repo, path, changeid,
-            fileid, filelog, changectx)
+        super(remotefilectx, self).__init__(
+            repo, path, changeid, fileid, filelog, changectx
+        )
         self._ancestormap = ancestormap
 
     def size(self):
@@ -45,8 +55,9 @@
         elif r'_descendantrev' in self.__dict__:
             # this file context was created from a revision with a known
             # descendant, we can (lazily) correct for linkrev aliases
-            linknode = self._adjustlinknode(self._path, self._filelog,
-                                            self._filenode, self._descendantrev)
+            linknode = self._adjustlinknode(
+                self._path, self._filelog, self._filenode, self._descendantrev
+            )
             return self._repo.unfiltered().changelog.rev(linknode)
         else:
             return self.linkrev()
@@ -54,8 +65,13 @@
     def filectx(self, fileid, changeid=None):
         '''opens an arbitrary revision of the file without
         opening a new filelog'''
-        return remotefilectx(self._repo, self._path, fileid=fileid,
-                             filelog=self._filelog, changeid=changeid)
+        return remotefilectx(
+            self._repo,
+            self._path,
+            fileid=fileid,
+            filelog=self._filelog,
+            changeid=changeid,
+        )
 
     def linkrev(self):
         return self._linkrev
@@ -79,8 +95,10 @@
 
         for rev in range(len(cl) - 1, 0, -1):
             node = cl.node(rev)
-            data = cl.read(node) # get changeset data (we avoid object creation)
-            if path in data[3]: # checking the 'files' field.
+            data = cl.read(
+                node
+            )  # get changeset data (we avoid object creation)
+            if path in data[3]:  # checking the 'files' field.
                 # The file has been touched, check if the hash is what we're
                 # looking for.
                 if fileid == mfl[data[0]].readfast().get(path):
@@ -104,9 +122,13 @@
         noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
         if noctx or self.rev() == lkr:
             return lkr
-        linknode = self._adjustlinknode(self._path, self._filelog,
-                                        self._filenode, self.rev(),
-                                        inclusive=True)
+        linknode = self._adjustlinknode(
+            self._path,
+            self._filelog,
+            self._filenode,
+            self.rev(),
+            inclusive=True,
+        )
         return self._repo.changelog.rev(linknode)
 
     def renamed(self):
@@ -155,16 +177,18 @@
         if p1 != nullid:
             path = copyfrom or self._path
             flog = repo.file(path)
-            p1ctx = remotefilectx(repo, path, fileid=p1, filelog=flog,
-                                  ancestormap=ancestormap)
+            p1ctx = remotefilectx(
+                repo, path, fileid=p1, filelog=flog, ancestormap=ancestormap
+            )
             p1ctx._descendantrev = self.rev()
             results.append(p1ctx)
 
         if p2 != nullid:
             path = self._path
             flog = repo.file(path)
-            p2ctx = remotefilectx(repo, path, fileid=p2, filelog=flog,
-                                  ancestormap=ancestormap)
+            p2ctx = remotefilectx(
+                repo, path, fileid=p2, filelog=flog, ancestormap=ancestormap
+            )
             p2ctx._descendantrev = self.rev()
             results.append(p2ctx)
 
@@ -172,7 +196,7 @@
 
     def _nodefromancrev(self, ancrev, cl, mfl, path, fnode):
         """returns the node for <path> in <ancrev> if content matches <fnode>"""
-        ancctx = cl.read(ancrev) # This avoids object creation.
+        ancctx = cl.read(ancrev)  # This avoids object creation.
         manifestnode, files = ancctx[0], ancctx[3]
         # If the file was touched in this ancestor, and the content is similar
         # to the one we are searching for.
@@ -214,7 +238,7 @@
         if srcrev is None:
             # wctx case, used by workingfilectx during mergecopy
             revs = [p.rev() for p in self._repo[None].parents()]
-            inclusive = True # we skipped the real (revless) source
+            inclusive = True  # we skipped the real (revless) source
         else:
             revs = [srcrev]
 
@@ -222,14 +246,14 @@
             return linknode
 
         commonlogkwargs = {
-            r'revs': ' '.join([hex(cl.node(rev)) for rev in revs]),
+            r'revs': b' '.join([hex(cl.node(rev)) for rev in revs]),
             r'fnode': hex(fnode),
             r'filepath': path,
             r'user': shallowutil.getusername(repo.ui),
             r'reponame': shallowutil.getreponame(repo.ui),
         }
 
-        repo.ui.log('linkrevfixup', 'adjusting linknode\n', **commonlogkwargs)
+        repo.ui.log(b'linkrevfixup', b'adjusting linknode\n', **commonlogkwargs)
 
         pc = repo._phasecache
         seenpublic = False
@@ -247,16 +271,16 @@
                 # TODO: there used to be a codepath to fetch linknodes
                 # from a server as a fast path, but it appeared to
                 # depend on an API FB added to their phabricator.
-                lnode = self._forceprefetch(repo, path, fnode, revs,
-                                            commonlogkwargs)
+                lnode = self._forceprefetch(
+                    repo, path, fnode, revs, commonlogkwargs
+                )
                 if lnode:
                     return lnode
                 seenpublic = True
 
         return linknode
 
-    def _forceprefetch(self, repo, path, fnode, revs,
-                       commonlogkwargs):
+    def _forceprefetch(self, repo, path, fnode, revs, commonlogkwargs):
         # This next part is super non-obvious, so big comment block time!
         #
         # It is possible to get extremely bad performance here when a fairly
@@ -298,7 +322,7 @@
         # the slow path is used too much. One promising possibility is using
         # obsolescence markers to find a more-likely-correct linkrev.
 
-        logmsg = ''
+        logmsg = b''
         start = time.time()
         try:
             repo.fileservice.prefetch([(path, hex(fnode))], force=True)
@@ -307,19 +331,23 @@
             # we need to rebuild the ancestor map to recompute the
             # linknodes.
             self._ancestormap = None
-            linknode = self.ancestormap()[fnode][2] # 2 is linknode
+            linknode = self.ancestormap()[fnode][2]  # 2 is linknode
             if self._verifylinknode(revs, linknode):
-                logmsg = 'remotefilelog prefetching succeeded'
+                logmsg = b'remotefilelog prefetching succeeded'
                 return linknode
-            logmsg = 'remotefilelog prefetching not found'
+            logmsg = b'remotefilelog prefetching not found'
             return None
         except Exception as e:
-            logmsg = 'remotefilelog prefetching failed (%s)' % e
+            logmsg = b'remotefilelog prefetching failed (%s)' % e
             return None
         finally:
             elapsed = time.time() - start
-            repo.ui.log('linkrevfixup', logmsg + '\n', elapsed=elapsed * 1000,
-                        **commonlogkwargs)
+            repo.ui.log(
+                b'linkrevfixup',
+                logmsg + b'\n',
+                elapsed=elapsed * 1000,
+                **commonlogkwargs
+            )
 
     def _verifylinknode(self, revs, linknode):
         """
@@ -370,7 +398,7 @@
 
         # Sort by linkrev
         # The copy tracing algorithm depends on these coming out in order
-        ancestors = sorted(ancestors, reverse=True, key=lambda x:x.linkrev())
+        ancestors = sorted(ancestors, reverse=True, key=lambda x: x.linkrev())
 
         for ancestor in ancestors:
             yield ancestor
@@ -404,8 +432,7 @@
         result = ancestor.genericancestor(a, b, parents)
         if result:
             f, n = result
-            r = remotefilectx(self._repo, f, fileid=n,
-                                 ancestormap=amap)
+            r = remotefilectx(self._repo, f, fileid=n, ancestormap=amap)
             return r
 
         return None
@@ -417,11 +444,14 @@
             # use introrev so prefetchskip can be accurately tested
             introrev = self.introrev()
             if self.rev() != introrev:
-                introctx = remotefilectx(self._repo, self._path,
-                                         changeid=introrev,
-                                         fileid=self._filenode,
-                                         filelog=self._filelog,
-                                         ancestormap=self._ancestormap)
+                introctx = remotefilectx(
+                    self._repo,
+                    self._path,
+                    changeid=introrev,
+                    fileid=self._filenode,
+                    filelog=self._filelog,
+                    ancestormap=self._ancestormap,
+                )
 
         # like self.ancestors, but append to "fetch" and skip visiting parents
         # of nodes in "prefetchskip".
@@ -442,8 +472,10 @@
                     seen.add(parent.node())
                     queue.append(parent)
 
-        self._repo.ui.debug('remotefilelog: prefetching %d files '
-                            'for annotate\n' % len(fetch))
+        self._repo.ui.debug(
+            b'remotefilelog: prefetching %d files '
+            b'for annotate\n' % len(fetch)
+        )
         if fetch:
             self._repo.fileservice.prefetch(fetch)
         return super(remotefilectx, self).annotate(*args, **kwargs)
@@ -452,11 +484,13 @@
     def children(self):
         return []
 
+
 class remoteworkingfilectx(context.workingfilectx, remotefilectx):
     def __init__(self, repo, path, filelog=None, workingctx=None):
         self._ancestormap = None
-        super(remoteworkingfilectx, self).__init__(repo, path, filelog,
-                                                   workingctx)
+        super(remoteworkingfilectx, self).__init__(
+            repo, path, filelog, workingctx
+        )
 
     def parents(self):
         return remotefilectx.parents(self)
@@ -485,7 +519,7 @@
                 p2ctx = self._repo.filectx(p2[0], fileid=p2[1])
                 m.update(p2ctx.filelog().ancestormap(p2[1]))
 
-            copyfrom = ''
+            copyfrom = b''
             if renamed:
                 copyfrom = renamed[0]
             m[None] = (p1[1], p2[1], nullid, copyfrom)
--- a/hgext/remotefilelog/remotefilelog.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/remotefilelog.py	Mon Oct 21 11:09:48 2019 -0400
@@ -21,9 +21,12 @@
     ancestor,
     error,
     mdiff,
+    pycompat,
     revlog,
+    util,
 )
 from mercurial.utils import storageutil
+from mercurial.revlogutils import flagutil
 
 from . import (
     constants,
@@ -31,6 +34,7 @@
     shallowutil,
 )
 
+
 class remotefilelognodemap(object):
     def __init__(self, filename, store):
         self._filename = filename
@@ -45,9 +49,11 @@
             raise KeyError(node)
         return node
 
+
 class remotefilelog(object):
 
     _generaldelta = True
+    _flagserrorclass = error.RevlogError
 
     def __init__(self, opener, path, repo):
         self.opener = opener
@@ -57,34 +63,38 @@
 
         self.version = 1
 
+        self._flagprocessors = dict(flagutil.flagprocessors)
+
     def read(self, node):
         """returns the file contents at this node"""
         t = self.revision(node)
-        if not t.startswith('\1\n'):
+        if not t.startswith(b'\1\n'):
             return t
-        s = t.index('\1\n', 2)
-        return t[s + 2:]
+        s = t.index(b'\1\n', 2)
+        return t[s + 2 :]
 
     def add(self, text, meta, transaction, linknode, p1=None, p2=None):
         # hash with the metadata, like in vanilla filelogs
-        hashtext = shallowutil.createrevlogtext(text, meta.get('copy'),
-                                                meta.get('copyrev'))
+        hashtext = shallowutil.createrevlogtext(
+            text, meta.get(b'copy'), meta.get(b'copyrev')
+        )
         node = storageutil.hashrevisionsha1(hashtext, p1, p2)
-        return self.addrevision(hashtext, transaction, linknode, p1, p2,
-                                node=node)
+        return self.addrevision(
+            hashtext, transaction, linknode, p1, p2, node=node
+        )
 
     def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
         # text passed to "_createfileblob" does not include filelog metadata
         header = shallowutil.buildfileblobheader(len(text), flags)
-        data = "%s\0%s" % (header, text)
+        data = b"%s\0%s" % (header, text)
 
         realp1 = p1
-        copyfrom = ""
-        if meta and 'copy' in meta:
-            copyfrom = meta['copy']
-            realp1 = bin(meta['copyrev'])
+        copyfrom = b""
+        if meta and b'copy' in meta:
+            copyfrom = meta[b'copy']
+            realp1 = bin(meta[b'copyrev'])
 
-        data += "%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
+        data += b"%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
 
         visited = set()
 
@@ -103,16 +113,21 @@
             queue.append(p2)
             visited.add(p2)
 
-        ancestortext = ""
+        ancestortext = b""
 
         # add the ancestors in topological order
         while queue:
             c = queue.pop(0)
             pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
 
-            pacopyfrom = pacopyfrom or ''
-            ancestortext += "%s%s%s%s%s\0" % (
-                c, pa1, pa2, ancestorlinknode, pacopyfrom)
+            pacopyfrom = pacopyfrom or b''
+            ancestortext += b"%s%s%s%s%s\0" % (
+                c,
+                pa1,
+                pa2,
+                ancestorlinknode,
+                pacopyfrom,
+            )
 
             if pa1 != nullid and pa1 not in visited:
                 queue.append(pa1)
@@ -125,20 +140,52 @@
 
         return data
 
-    def addrevision(self, text, transaction, linknode, p1, p2, cachedelta=None,
-                    node=None, flags=revlog.REVIDX_DEFAULT_FLAGS):
+    def addrevision(
+        self,
+        text,
+        transaction,
+        linknode,
+        p1,
+        p2,
+        cachedelta=None,
+        node=None,
+        flags=revlog.REVIDX_DEFAULT_FLAGS,
+        sidedata=None,
+    ):
         # text passed to "addrevision" includes hg filelog metadata header
         if node is None:
             node = storageutil.hashrevisionsha1(text, p1, p2)
+        if sidedata is None:
+            sidedata = {}
 
         meta, metaoffset = storageutil.parsemeta(text)
-        rawtext, validatehash = self._processflags(text, flags, 'write')
-        return self.addrawrevision(rawtext, transaction, linknode, p1, p2,
-                                   node, flags, cachedelta,
-                                   _metatuple=(meta, metaoffset))
+        rawtext, validatehash = flagutil.processflagswrite(
+            self, text, flags, sidedata=sidedata
+        )
+        return self.addrawrevision(
+            rawtext,
+            transaction,
+            linknode,
+            p1,
+            p2,
+            node,
+            flags,
+            cachedelta,
+            _metatuple=(meta, metaoffset),
+        )
 
-    def addrawrevision(self, rawtext, transaction, linknode, p1, p2, node,
-                       flags, cachedelta=None, _metatuple=None):
+    def addrawrevision(
+        self,
+        rawtext,
+        transaction,
+        linknode,
+        p1,
+        p2,
+        node,
+        flags,
+        cachedelta=None,
+        _metatuple=None,
+    ):
         if _metatuple:
             # _metatuple: used by "addrevision" internally by remotefilelog
             # meta was parsed confidently
@@ -164,8 +211,9 @@
             blobtext = rawtext[metaoffset:]
         else:
             blobtext = rawtext
-        data = self._createfileblob(blobtext, meta, flags, p1, p2, node,
-                                    linknode)
+        data = self._createfileblob(
+            blobtext, meta, flags, p1, p2, node, linknode
+        )
         self.repo.contentstore.addremotefilelognode(self.filename, node, data)
 
         return node
@@ -202,14 +250,14 @@
     __bool__ = __nonzero__
 
     def __len__(self):
-        if self.filename == '.hgtags':
+        if self.filename == b'.hgtags':
             # The length of .hgtags is used to fast path tag checking.
             # remotefilelog doesn't support .hgtags since the entire .hgtags
             # history is needed.  Use the excludepattern setting to make
             # .hgtags a normal filelog.
             return 0
 
-        raise RuntimeError("len not supported")
+        raise RuntimeError(b"len not supported")
 
     def empty(self):
         return False
@@ -217,7 +265,8 @@
     def flags(self, node):
         if isinstance(node, int):
             raise error.ProgrammingError(
-                'remotefilelog does not accept integer rev for flags')
+                b'remotefilelog does not accept integer rev for flags'
+            )
         store = self.repo.contentstore
         return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
 
@@ -245,9 +294,15 @@
     def linkrev(self, node):
         return self.repo.unfiltered().changelog.rev(self.linknode(node))
 
-    def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
-                      assumehaveparentrevisions=False, deltaprevious=False,
-                      deltamode=None):
+    def emitrevisions(
+        self,
+        nodes,
+        nodesorder=None,
+        revisiondata=False,
+        assumehaveparentrevisions=False,
+        deltaprevious=False,
+        deltamode=None,
+    ):
         # we don't use any of these parameters here
         del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
         del deltamode
@@ -262,7 +317,7 @@
                 revision = None
                 delta = self.revdiff(basenode, node)
             else:
-                revision = self.revision(node, raw=True)
+                revision = self.rawdata(node)
                 delta = None
             yield revlog.revlogrevisiondelta(
                 node=node,
@@ -274,18 +329,18 @@
                 baserevisionsize=None,
                 revision=revision,
                 delta=delta,
-                )
+            )
 
     def revdiff(self, node1, node2):
-        return mdiff.textdiff(self.revision(node1, raw=True),
-                              self.revision(node2, raw=True))
+        return mdiff.textdiff(self.rawdata(node1), self.rawdata(node2))
 
     def lookup(self, node):
         if len(node) == 40:
             node = bin(node)
         if len(node) != 20:
-            raise error.LookupError(node, self.filename,
-                                    _('invalid lookup input'))
+            raise error.LookupError(
+                node, self.filename, _(b'invalid lookup input')
+            )
 
         return node
 
@@ -297,9 +352,21 @@
         # This is a hack.
         if isinstance(rev, int):
             raise error.ProgrammingError(
-                'remotefilelog does not convert integer rev to node')
+                b'remotefilelog does not convert integer rev to node'
+            )
         return rev
 
+    def _processflags(self, text, flags, operation, raw=False):
+        """deprecated entry point to access flag processors"""
+        msg = b'_processflag(...) use the specialized variant'
+        util.nouideprecwarn(msg, b'5.2', stacklevel=2)
+        if raw:
+            return text, flagutil.processflagsraw(self, text, flags)
+        elif operation == b'read':
+            return flagutil.processflagsread(self, text, flags)
+        else:  # write operation
+            return flagutil.processflagswrite(self, text, flags)
+
     def revision(self, node, raw=False):
         """returns the revlog contents at this node.
         this includes the meta data traditionally included in file revlogs.
@@ -307,10 +374,11 @@
         hg clients.
         """
         if node == nullid:
-            return ""
+            return b""
         if len(node) != 20:
-            raise error.LookupError(node, self.filename,
-                                    _('invalid revision input'))
+            raise error.LookupError(
+                node, self.filename, _(b'invalid revision input')
+            )
         if node == wdirid or node in wdirfilenodeids:
             raise error.WdirUnsupported
 
@@ -321,37 +389,18 @@
         flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
         if flags == 0:
             return rawtext
-        text, verifyhash = self._processflags(rawtext, flags, 'read')
-        return text
+        return flagutil.processflagsread(self, rawtext, flags)[0]
 
-    def _processflags(self, text, flags, operation, raw=False):
-        # mostly copied from hg/mercurial/revlog.py
-        validatehash = True
-        orderedflags = revlog.REVIDX_FLAGS_ORDER
-        if operation == 'write':
-            orderedflags = reversed(orderedflags)
-        for flag in orderedflags:
-            if flag & flags:
-                vhash = True
-                if flag not in revlog._flagprocessors:
-                    message = _("missing processor for flag '%#x'") % (flag)
-                    raise revlog.RevlogError(message)
-                readfunc, writefunc, rawfunc = revlog._flagprocessors[flag]
-                if raw:
-                    vhash = rawfunc(self, text)
-                elif operation == 'read':
-                    text, vhash = readfunc(self, text)
-                elif operation == 'write':
-                    text, vhash = writefunc(self, text)
-                validatehash = validatehash and vhash
-        return text, validatehash
+    def rawdata(self, node):
+        return self.revision(node, raw=False)
 
     def _read(self, id):
         """reads the raw file blob from disk, cache, or server"""
         fileservice = self.repo.fileservice
         localcache = fileservice.localcache
-        cachekey = fileserverclient.getcachekey(self.repo.name, self.filename,
-                                                id)
+        cachekey = fileserverclient.getcachekey(
+            self.repo.name, self.filename, id
+        )
         try:
             return localcache.read(cachekey)
         except KeyError:
@@ -370,7 +419,7 @@
         except KeyError:
             pass
 
-        raise error.LookupError(id, self.filename, _('no node'))
+        raise error.LookupError(id, self.filename, _(b'no node'))
 
     def ancestormap(self, node):
         return self.repo.metadatastore.getancestors(self.filename, node)
@@ -380,7 +429,7 @@
             return nullid
 
         revmap, parentfunc = self._buildrevgraph(a, b)
-        nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
+        nodemap = dict(((v, k) for (k, v) in pycompat.iteritems(revmap)))
 
         ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
         if ancs:
@@ -395,7 +444,7 @@
             return nullid
 
         revmap, parentfunc = self._buildrevgraph(a, b)
-        nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
+        nodemap = dict(((v, k) for (k, v) in pycompat.iteritems(revmap)))
 
         ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
         return map(nodemap.__getitem__, ancs)
@@ -411,7 +460,7 @@
         parentsmap = collections.defaultdict(list)
         allparents = set()
         for mapping in (amap, bmap):
-            for node, pdata in mapping.iteritems():
+            for node, pdata in pycompat.iteritems(mapping):
                 parents = parentsmap[node]
                 p1, p2, linknode, copyfrom = pdata
                 # Don't follow renames (copyfrom).
@@ -426,8 +475,9 @@
         # Breadth first traversal to build linkrev graph
         parentrevs = collections.defaultdict(list)
         revmap = {}
-        queue = collections.deque(((None, n) for n in parentsmap
-                 if n not in allparents))
+        queue = collections.deque(
+            ((None, n) for n in parentsmap if n not in allparents)
+        )
         while queue:
             prevrev, current = queue.pop()
             if current in revmap:
--- a/hgext/remotefilelog/remotefilelogserver.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/remotefilelogserver.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,6 +14,7 @@
 
 from mercurial.i18n import _
 from mercurial.node import bin, hex, nullid
+from mercurial.pycompat import open
 from mercurial import (
     changegroup,
     changelog,
@@ -21,6 +22,7 @@
     error,
     extensions,
     match,
+    pycompat,
     store,
     streamclone,
     util,
@@ -28,45 +30,53 @@
     wireprototypes,
     wireprotov1server,
 )
-from .  import (
+from . import (
     constants,
     shallowutil,
 )
 
 _sshv1server = wireprotoserver.sshv1protocolhandler
 
+
 def setupserver(ui, repo):
     """Sets up a normal Mercurial repo so it can serve files to shallow repos.
     """
     onetimesetup(ui)
 
     # don't send files to shallow clients during pulls
-    def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source,
-                      *args, **kwargs):
+    def generatefiles(
+        orig, self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
+    ):
         caps = self._bundlecaps or []
         if constants.BUNDLE2_CAPABLITY in caps:
             # only send files that don't match the specified patterns
             includepattern = None
             excludepattern = None
-            for cap in (self._bundlecaps or []):
-                if cap.startswith("includepattern="):
-                    includepattern = cap[len("includepattern="):].split('\0')
-                elif cap.startswith("excludepattern="):
-                    excludepattern = cap[len("excludepattern="):].split('\0')
+            for cap in self._bundlecaps or []:
+                if cap.startswith(b"includepattern="):
+                    includepattern = cap[len(b"includepattern=") :].split(b'\0')
+                elif cap.startswith(b"excludepattern="):
+                    excludepattern = cap[len(b"excludepattern=") :].split(b'\0')
 
             m = match.always()
             if includepattern or excludepattern:
-                m = match.match(repo.root, '', None,
-                    includepattern, excludepattern)
+                m = match.match(
+                    repo.root, b'', None, includepattern, excludepattern
+                )
 
             changedfiles = list([f for f in changedfiles if not m(f)])
-        return orig(self, changedfiles, linknodes, commonrevs, source,
-                    *args, **kwargs)
+        return orig(
+            self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
+        )
 
     extensions.wrapfunction(
-        changegroup.cgpacker, 'generatefiles', generatefiles)
+        changegroup.cgpacker, b'generatefiles', generatefiles
+    )
+
 
 onetime = False
+
+
 def onetimesetup(ui):
     """Configures the wireprotocol for both clients and servers.
     """
@@ -77,27 +87,31 @@
 
     # support file content requests
     wireprotov1server.wireprotocommand(
-        'x_rfl_getflogheads', 'path', permission='pull')(getflogheads)
+        b'x_rfl_getflogheads', b'path', permission=b'pull'
+    )(getflogheads)
     wireprotov1server.wireprotocommand(
-        'x_rfl_getfiles', '', permission='pull')(getfiles)
+        b'x_rfl_getfiles', b'', permission=b'pull'
+    )(getfiles)
     wireprotov1server.wireprotocommand(
-        'x_rfl_getfile', 'file node', permission='pull')(getfile)
+        b'x_rfl_getfile', b'file node', permission=b'pull'
+    )(getfile)
 
     class streamstate(object):
         match = None
         shallowremote = False
         noflatmf = False
+
     state = streamstate()
 
     def stream_out_shallow(repo, proto, other):
         includepattern = None
         excludepattern = None
-        raw = other.get('includepattern')
+        raw = other.get(b'includepattern')
         if raw:
-            includepattern = raw.split('\0')
-        raw = other.get('excludepattern')
+            includepattern = raw.split(b'\0')
+        raw = other.get(b'excludepattern')
         if raw:
-            excludepattern = raw.split('\0')
+            excludepattern = raw.split(b'\0')
 
         oldshallow = state.shallowremote
         oldmatch = state.match
@@ -105,28 +119,34 @@
         try:
             state.shallowremote = True
             state.match = match.always()
-            state.noflatmf = other.get('noflatmanifest') == 'True'
+            state.noflatmf = other.get(b'noflatmanifest') == b'True'
             if includepattern or excludepattern:
-                state.match = match.match(repo.root, '', None,
-                    includepattern, excludepattern)
+                state.match = match.match(
+                    repo.root, b'', None, includepattern, excludepattern
+                )
             streamres = wireprotov1server.stream(repo, proto)
 
             # Force the first value to execute, so the file list is computed
             # within the try/finally scope
             first = next(streamres.gen)
             second = next(streamres.gen)
+
             def gen():
                 yield first
                 yield second
                 for value in streamres.gen:
                     yield value
+
             return wireprototypes.streamres(gen())
         finally:
             state.shallowremote = oldshallow
             state.match = oldmatch
             state.noflatmf = oldnoflatmf
 
-    wireprotov1server.commands['stream_out_shallow'] = (stream_out_shallow, '*')
+    wireprotov1server.commands[b'stream_out_shallow'] = (
+        stream_out_shallow,
+        b'*',
+    )
 
     # don't clone filelogs to shallow clients
     def _walkstreamfiles(orig, repo, matcher=None):
@@ -135,22 +155,25 @@
             if shallowutil.isenabled(repo):
                 striplen = len(repo.store.path) + 1
                 readdir = repo.store.rawvfs.readdir
-                visit = [os.path.join(repo.store.path, 'data')]
+                visit = [os.path.join(repo.store.path, b'data')]
                 while visit:
                     p = visit.pop()
                     for f, kind, st in readdir(p, stat=True):
-                        fp = p + '/' + f
+                        fp = p + b'/' + f
                         if kind == stat.S_IFREG:
-                            if not fp.endswith('.i') and not fp.endswith('.d'):
+                            if not fp.endswith(b'.i') and not fp.endswith(
+                                b'.d'
+                            ):
                                 n = util.pconvert(fp[striplen:])
                                 yield (store.decodedir(n), n, st.st_size)
                         if kind == stat.S_IFDIR:
                             visit.append(fp)
 
-            if 'treemanifest' in repo.requirements:
+            if b'treemanifest' in repo.requirements:
                 for (u, e, s) in repo.store.datafiles():
-                    if (u.startswith('meta/') and
-                        (u.endswith('.i') or u.endswith('.d'))):
+                    if u.startswith(b'meta/') and (
+                        u.endswith(b'.i') or u.endswith(b'.d')
+                    ):
                         yield (u, e, s)
 
             # Return .d and .i files that do not match the shallow pattern
@@ -162,7 +185,7 @@
                         yield (u, e, s)
 
             for x in repo.store.topfiles():
-                if state.noflatmf and x[0][:11] == '00manifest.':
+                if state.noflatmf and x[0][:11] == b'00manifest.':
                     continue
                 yield x
 
@@ -170,44 +193,49 @@
             # don't allow cloning from a shallow repo to a full repo
             # since it would require fetching every version of every
             # file in order to create the revlogs.
-            raise error.Abort(_("Cannot clone from a shallow repo "
-                                "to a full repo."))
+            raise error.Abort(
+                _(b"Cannot clone from a shallow repo to a full repo.")
+            )
         else:
             for x in orig(repo, matcher):
                 yield x
 
-    extensions.wrapfunction(streamclone, '_walkstreamfiles', _walkstreamfiles)
+    extensions.wrapfunction(streamclone, b'_walkstreamfiles', _walkstreamfiles)
 
     # expose remotefilelog capabilities
     def _capabilities(orig, repo, proto):
         caps = orig(repo, proto)
-        if (shallowutil.isenabled(repo) or ui.configbool('remotefilelog',
-                                                         'server')):
+        if shallowutil.isenabled(repo) or ui.configbool(
+            b'remotefilelog', b'server'
+        ):
             if isinstance(proto, _sshv1server):
                 # legacy getfiles method which only works over ssh
                 caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES)
-            caps.append('x_rfl_getflogheads')
-            caps.append('x_rfl_getfile')
+            caps.append(b'x_rfl_getflogheads')
+            caps.append(b'x_rfl_getfile')
         return caps
-    extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities)
+
+    extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities)
 
     def _adjustlinkrev(orig, self, *args, **kwargs):
         # When generating file blobs, taking the real path is too slow on large
         # repos, so force it to just return the linkrev directly.
         repo = self._repo
-        if util.safehasattr(repo, 'forcelinkrev') and repo.forcelinkrev:
+        if util.safehasattr(repo, b'forcelinkrev') and repo.forcelinkrev:
             return self._filelog.linkrev(self._filelog.rev(self._filenode))
         return orig(self, *args, **kwargs)
 
     extensions.wrapfunction(
-        context.basefilectx, '_adjustlinkrev', _adjustlinkrev)
+        context.basefilectx, b'_adjustlinkrev', _adjustlinkrev
+    )
 
     def _iscmd(orig, cmd):
-        if cmd == 'x_rfl_getfiles':
+        if cmd == b'x_rfl_getfiles':
             return False
         return orig(cmd)
 
-    extensions.wrapfunction(wireprotoserver, 'iscmd', _iscmd)
+    extensions.wrapfunction(wireprotoserver, b'iscmd', _iscmd)
+
 
 def _loadfileblob(repo, cachepath, path, node):
     filecachepath = os.path.join(cachepath, path, hex(node))
@@ -234,7 +262,7 @@
 
             f = None
             try:
-                f = util.atomictempfile(filecachepath, "wb")
+                f = util.atomictempfile(filecachepath, b"wb")
                 f.write(text)
             except (IOError, OSError):
                 # Don't abort if the user only has permission to read,
@@ -246,16 +274,18 @@
         finally:
             os.umask(oldumask)
     else:
-        with open(filecachepath, "rb") as f:
+        with open(filecachepath, b"rb") as f:
             text = f.read()
     return text
 
+
 def getflogheads(repo, proto, path):
     """A server api for requesting a filelog's heads
     """
     flog = repo.file(path)
     heads = flog.heads()
-    return '\n'.join((hex(head) for head in heads if head != nullid))
+    return b'\n'.join((hex(head) for head in heads if head != nullid))
+
 
 def getfile(repo, proto, file, node):
     """A server api for requesting a particular version of a file. Can be used
@@ -267,29 +297,30 @@
     createfileblob for its content.
     """
     if shallowutil.isenabled(repo):
-        return '1\0' + _('cannot fetch remote files from shallow repo')
-    cachepath = repo.ui.config("remotefilelog", "servercachepath")
+        return b'1\0' + _(b'cannot fetch remote files from shallow repo')
+    cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
     if not cachepath:
-        cachepath = os.path.join(repo.path, "remotefilelogcache")
+        cachepath = os.path.join(repo.path, b"remotefilelogcache")
     node = bin(node.strip())
     if node == nullid:
-        return '0\0'
-    return '0\0' + _loadfileblob(repo, cachepath, file, node)
+        return b'0\0'
+    return b'0\0' + _loadfileblob(repo, cachepath, file, node)
+
 
 def getfiles(repo, proto):
     """A server api for requesting particular versions of particular files.
     """
     if shallowutil.isenabled(repo):
-        raise error.Abort(_('cannot fetch remote files from shallow repo'))
+        raise error.Abort(_(b'cannot fetch remote files from shallow repo'))
     if not isinstance(proto, _sshv1server):
-        raise error.Abort(_('cannot fetch remote files over non-ssh protocol'))
+        raise error.Abort(_(b'cannot fetch remote files over non-ssh protocol'))
 
     def streamer():
         fin = proto._fin
 
-        cachepath = repo.ui.config("remotefilelog", "servercachepath")
+        cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
         if not cachepath:
-            cachepath = os.path.join(repo.path, "remotefilelogcache")
+            cachepath = os.path.join(repo.path, b"remotefilelogcache")
 
         while True:
             request = fin.readline()[:-1]
@@ -298,20 +329,22 @@
 
             node = bin(request[:40])
             if node == nullid:
-                yield '0\n'
+                yield b'0\n'
                 continue
 
             path = request[40:]
 
             text = _loadfileblob(repo, cachepath, path, node)
 
-            yield '%d\n%s' % (len(text), text)
+            yield b'%d\n%s' % (len(text), text)
 
             # it would be better to only flush after processing a whole batch
             # but currently we don't know if there are more requests coming
             proto._fout.flush()
+
     return wireprototypes.streamres(streamer())
 
+
 def createfileblob(filectx):
     """
     format:
@@ -335,7 +368,7 @@
         text = filectx.data()
     else:
         # lfs, read raw revision data
-        text = flog.revision(frev, raw=True)
+        text = flog.rawdata(frev)
 
     repo = filectx._repo
 
@@ -345,7 +378,7 @@
         repo.forcelinkrev = True
         ancestors.extend([f for f in filectx.ancestors()])
 
-        ancestortext = ""
+        ancestortext = b""
         for ancestorctx in ancestors:
             parents = ancestorctx.parents()
             p1 = nullid
@@ -355,40 +388,45 @@
             if len(parents) > 1:
                 p2 = parents[1].filenode()
 
-            copyname = ""
+            copyname = b""
             rename = ancestorctx.renamed()
             if rename:
                 copyname = rename[0]
             linknode = ancestorctx.node()
-            ancestortext += "%s%s%s%s%s\0" % (
-                ancestorctx.filenode(), p1, p2, linknode,
-                copyname)
+            ancestortext += b"%s%s%s%s%s\0" % (
+                ancestorctx.filenode(),
+                p1,
+                p2,
+                linknode,
+                copyname,
+            )
     finally:
         repo.forcelinkrev = False
 
     header = shallowutil.buildfileblobheader(len(text), revlogflags)
 
-    return "%s\0%s%s" % (header, text, ancestortext)
+    return b"%s\0%s%s" % (header, text, ancestortext)
+
 
 def gcserver(ui, repo):
-    if not repo.ui.configbool("remotefilelog", "server"):
+    if not repo.ui.configbool(b"remotefilelog", b"server"):
         return
 
     neededfiles = set()
-    heads = repo.revs("heads(tip~25000:) - null")
+    heads = repo.revs(b"heads(tip~25000:) - null")
 
-    cachepath = repo.vfs.join("remotefilelogcache")
+    cachepath = repo.vfs.join(b"remotefilelogcache")
     for head in heads:
         mf = repo[head].manifest()
-        for filename, filenode in mf.iteritems():
+        for filename, filenode in pycompat.iteritems(mf):
             filecachepath = os.path.join(cachepath, filename, hex(filenode))
             neededfiles.add(filecachepath)
 
     # delete unneeded older files
-    days = repo.ui.configint("remotefilelog", "serverexpiration")
+    days = repo.ui.configint(b"remotefilelog", b"serverexpiration")
     expiration = time.time() - (days * 24 * 60 * 60)
 
-    progress = ui.makeprogress(_("removing old server cache"), unit="files")
+    progress = ui.makeprogress(_(b"removing old server cache"), unit=b"files")
     progress.update(0)
     for root, dirs, files in os.walk(cachepath):
         for file in files:
--- a/hgext/remotefilelog/repack.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/repack.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,6 +11,7 @@
 from mercurial import (
     encoding,
     error,
+    lock as lockmod,
     mdiff,
     policy,
     pycompat,
@@ -23,7 +24,6 @@
     constants,
     contentstore,
     datapack,
-    extutil,
     historypack,
     metadatastore,
     shallowutil,
@@ -31,36 +31,46 @@
 
 osutil = policy.importmod(r'osutil')
 
+
 class RepackAlreadyRunning(error.Abort):
     pass
 
-def backgroundrepack(repo, incremental=True, packsonly=False):
-    cmd = [procutil.hgexecutable(), '-R', repo.origroot, 'repack']
-    msg = _("(running background repack)\n")
+
+def backgroundrepack(
+    repo, incremental=True, packsonly=False, ensurestart=False
+):
+    cmd = [procutil.hgexecutable(), b'-R', repo.origroot, b'repack']
+    msg = _(b"(running background repack)\n")
     if incremental:
-        cmd.append('--incremental')
-        msg = _("(running background incremental repack)\n")
+        cmd.append(b'--incremental')
+        msg = _(b"(running background incremental repack)\n")
     if packsonly:
-        cmd.append('--packsonly')
+        cmd.append(b'--packsonly')
     repo.ui.warn(msg)
     # We know this command will find a binary, so don't block on it starting.
-    procutil.runbgcommand(cmd, encoding.environ, ensurestart=False)
+    procutil.runbgcommand(cmd, encoding.environ, ensurestart=ensurestart)
+
 
 def fullrepack(repo, options=None):
     """If ``packsonly`` is True, stores creating only loose objects are skipped.
     """
     if util.safehasattr(repo, 'shareddatastores'):
-        datasource = contentstore.unioncontentstore(
-            *repo.shareddatastores)
+        datasource = contentstore.unioncontentstore(*repo.shareddatastores)
         historysource = metadatastore.unionmetadatastore(
-            *repo.sharedhistorystores,
-            allowincomplete=True)
+            *repo.sharedhistorystores, allowincomplete=True
+        )
 
         packpath = shallowutil.getcachepackpath(
+            repo, constants.FILEPACK_CATEGORY
+        )
+        _runrepack(
             repo,
-            constants.FILEPACK_CATEGORY)
-        _runrepack(repo, datasource, historysource, packpath,
-                   constants.FILEPACK_CATEGORY, options=options)
+            datasource,
+            historysource,
+            packpath,
+            constants.FILEPACK_CATEGORY,
+            options=options,
+        )
 
     if util.safehasattr(repo.manifestlog, 'datastore'):
         localdata, shareddata = _getmanifeststores(repo)
@@ -70,20 +80,33 @@
         # Repack the shared manifest store
         datasource = contentstore.unioncontentstore(*sdstores)
         historysource = metadatastore.unionmetadatastore(
-                        *shstores,
-                        allowincomplete=True)
-        _runrepack(repo, datasource, historysource, spackpath,
-                   constants.TREEPACK_CATEGORY, options=options)
+            *shstores, allowincomplete=True
+        )
+        _runrepack(
+            repo,
+            datasource,
+            historysource,
+            spackpath,
+            constants.TREEPACK_CATEGORY,
+            options=options,
+        )
 
         # Repack the local manifest store
         datasource = contentstore.unioncontentstore(
-                        *ldstores,
-                        allowincomplete=True)
+            *ldstores, allowincomplete=True
+        )
         historysource = metadatastore.unionmetadatastore(
-                        *lhstores,
-                        allowincomplete=True)
-        _runrepack(repo, datasource, historysource, lpackpath,
-                   constants.TREEPACK_CATEGORY, options=options)
+            *lhstores, allowincomplete=True
+        )
+        _runrepack(
+            repo,
+            datasource,
+            historysource,
+            lpackpath,
+            constants.TREEPACK_CATEGORY,
+            options=options,
+        )
+
 
 def incrementalrepack(repo, options=None):
     """This repacks the repo by looking at the distribution of pack files in the
@@ -91,14 +114,16 @@
     """
     if util.safehasattr(repo, 'shareddatastores'):
         packpath = shallowutil.getcachepackpath(
+            repo, constants.FILEPACK_CATEGORY
+        )
+        _incrementalrepack(
             repo,
-            constants.FILEPACK_CATEGORY)
-        _incrementalrepack(repo,
-                           repo.shareddatastores,
-                           repo.sharedhistorystores,
-                           packpath,
-                           constants.FILEPACK_CATEGORY,
-                           options=options)
+            repo.shareddatastores,
+            repo.sharedhistorystores,
+            packpath,
+            constants.FILEPACK_CATEGORY,
+            options=options,
+        )
 
     if util.safehasattr(repo.manifestlog, 'datastore'):
         localdata, shareddata = _getmanifeststores(repo)
@@ -106,21 +131,26 @@
         spackpath, sdstores, shstores = shareddata
 
         # Repack the shared manifest store
-        _incrementalrepack(repo,
-                           sdstores,
-                           shstores,
-                           spackpath,
-                           constants.TREEPACK_CATEGORY,
-                           options=options)
+        _incrementalrepack(
+            repo,
+            sdstores,
+            shstores,
+            spackpath,
+            constants.TREEPACK_CATEGORY,
+            options=options,
+        )
 
         # Repack the local manifest store
-        _incrementalrepack(repo,
-                           ldstores,
-                           lhstores,
-                           lpackpath,
-                           constants.TREEPACK_CATEGORY,
-                           allowincompletedata=True,
-                           options=options)
+        _incrementalrepack(
+            repo,
+            ldstores,
+            lhstores,
+            lpackpath,
+            constants.TREEPACK_CATEGORY,
+            allowincompletedata=True,
+            options=options,
+        )
+
 
 def _getmanifeststores(repo):
     shareddatastores = repo.manifestlog.shareddatastores
@@ -128,123 +158,165 @@
     sharedhistorystores = repo.manifestlog.sharedhistorystores
     localhistorystores = repo.manifestlog.localhistorystores
 
-    sharedpackpath = shallowutil.getcachepackpath(repo,
-                                            constants.TREEPACK_CATEGORY)
-    localpackpath = shallowutil.getlocalpackpath(repo.svfs.vfs.base,
-                                            constants.TREEPACK_CATEGORY)
+    sharedpackpath = shallowutil.getcachepackpath(
+        repo, constants.TREEPACK_CATEGORY
+    )
+    localpackpath = shallowutil.getlocalpackpath(
+        repo.svfs.vfs.base, constants.TREEPACK_CATEGORY
+    )
 
-    return ((localpackpath, localdatastores, localhistorystores),
-            (sharedpackpath, shareddatastores, sharedhistorystores))
+    return (
+        (localpackpath, localdatastores, localhistorystores),
+        (sharedpackpath, shareddatastores, sharedhistorystores),
+    )
+
 
 def _topacks(packpath, files, constructor):
     paths = list(os.path.join(packpath, p) for p in files)
     packs = list(constructor(p) for p in paths)
     return packs
 
+
 def _deletebigpacks(repo, folder, files):
     """Deletes packfiles that are bigger than ``packs.maxpacksize``.
 
     Returns ``files` with the removed files omitted."""
-    maxsize = repo.ui.configbytes("packs", "maxpacksize")
+    maxsize = repo.ui.configbytes(b"packs", b"maxpacksize")
     if maxsize <= 0:
         return files
 
     # This only considers datapacks today, but we could broaden it to include
     # historypacks.
-    VALIDEXTS = [".datapack", ".dataidx"]
+    VALIDEXTS = [b".datapack", b".dataidx"]
 
     # Either an oversize index or datapack will trigger cleanup of the whole
     # pack:
-    oversized = {os.path.splitext(path)[0] for path, ftype, stat in files
-        if (stat.st_size > maxsize and (os.path.splitext(path)[1]
-                                        in VALIDEXTS))}
+    oversized = {
+        os.path.splitext(path)[0]
+        for path, ftype, stat in files
+        if (stat.st_size > maxsize and (os.path.splitext(path)[1] in VALIDEXTS))
+    }
 
     for rootfname in oversized:
         rootpath = os.path.join(folder, rootfname)
         for ext in VALIDEXTS:
             path = rootpath + ext
-            repo.ui.debug('removing oversize packfile %s (%s)\n' %
-                          (path, util.bytecount(os.stat(path).st_size)))
+            repo.ui.debug(
+                b'removing oversize packfile %s (%s)\n'
+                % (path, util.bytecount(os.stat(path).st_size))
+            )
             os.unlink(path)
     return [row for row in files if os.path.basename(row[0]) not in oversized]
 
-def _incrementalrepack(repo, datastore, historystore, packpath, category,
-        allowincompletedata=False, options=None):
+
+def _incrementalrepack(
+    repo,
+    datastore,
+    historystore,
+    packpath,
+    category,
+    allowincompletedata=False,
+    options=None,
+):
     shallowutil.mkstickygroupdir(repo.ui, packpath)
 
     files = osutil.listdir(packpath, stat=True)
     files = _deletebigpacks(repo, packpath, files)
-    datapacks = _topacks(packpath,
-        _computeincrementaldatapack(repo.ui, files),
-        datapack.datapack)
-    datapacks.extend(s for s in datastore
-                     if not isinstance(s, datapack.datapackstore))
+    datapacks = _topacks(
+        packpath, _computeincrementaldatapack(repo.ui, files), datapack.datapack
+    )
+    datapacks.extend(
+        s for s in datastore if not isinstance(s, datapack.datapackstore)
+    )
 
-    historypacks = _topacks(packpath,
+    historypacks = _topacks(
+        packpath,
         _computeincrementalhistorypack(repo.ui, files),
-        historypack.historypack)
-    historypacks.extend(s for s in historystore
-                        if not isinstance(s, historypack.historypackstore))
+        historypack.historypack,
+    )
+    historypacks.extend(
+        s
+        for s in historystore
+        if not isinstance(s, historypack.historypackstore)
+    )
 
     # ``allhistory{files,packs}`` contains all known history packs, even ones we
     # don't plan to repack. They are used during the datapack repack to ensure
     # good ordering of nodes.
-    allhistoryfiles = _allpackfileswithsuffix(files, historypack.PACKSUFFIX,
-                            historypack.INDEXSUFFIX)
-    allhistorypacks = _topacks(packpath,
+    allhistoryfiles = _allpackfileswithsuffix(
+        files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX
+    )
+    allhistorypacks = _topacks(
+        packpath,
         (f for f, mode, stat in allhistoryfiles),
-        historypack.historypack)
-    allhistorypacks.extend(s for s in historystore
-                        if not isinstance(s, historypack.historypackstore))
-    _runrepack(repo,
-               contentstore.unioncontentstore(
-                   *datapacks,
-                   allowincomplete=allowincompletedata),
-               metadatastore.unionmetadatastore(
-                   *historypacks,
-                   allowincomplete=True),
-               packpath, category,
-               fullhistory=metadatastore.unionmetadatastore(
-                   *allhistorypacks,
-                   allowincomplete=True),
-                options=options)
+        historypack.historypack,
+    )
+    allhistorypacks.extend(
+        s
+        for s in historystore
+        if not isinstance(s, historypack.historypackstore)
+    )
+    _runrepack(
+        repo,
+        contentstore.unioncontentstore(
+            *datapacks, allowincomplete=allowincompletedata
+        ),
+        metadatastore.unionmetadatastore(*historypacks, allowincomplete=True),
+        packpath,
+        category,
+        fullhistory=metadatastore.unionmetadatastore(
+            *allhistorypacks, allowincomplete=True
+        ),
+        options=options,
+    )
+
 
 def _computeincrementaldatapack(ui, files):
     opts = {
-        'gencountlimit' : ui.configint(
-            'remotefilelog', 'data.gencountlimit'),
-        'generations' : ui.configlist(
-            'remotefilelog', 'data.generations'),
-        'maxrepackpacks' : ui.configint(
-            'remotefilelog', 'data.maxrepackpacks'),
-        'repackmaxpacksize' : ui.configbytes(
-            'remotefilelog', 'data.repackmaxpacksize'),
-        'repacksizelimit' : ui.configbytes(
-            'remotefilelog', 'data.repacksizelimit'),
+        b'gencountlimit': ui.configint(b'remotefilelog', b'data.gencountlimit'),
+        b'generations': ui.configlist(b'remotefilelog', b'data.generations'),
+        b'maxrepackpacks': ui.configint(
+            b'remotefilelog', b'data.maxrepackpacks'
+        ),
+        b'repackmaxpacksize': ui.configbytes(
+            b'remotefilelog', b'data.repackmaxpacksize'
+        ),
+        b'repacksizelimit': ui.configbytes(
+            b'remotefilelog', b'data.repacksizelimit'
+        ),
     }
 
     packfiles = _allpackfileswithsuffix(
-        files, datapack.PACKSUFFIX, datapack.INDEXSUFFIX)
+        files, datapack.PACKSUFFIX, datapack.INDEXSUFFIX
+    )
     return _computeincrementalpack(packfiles, opts)
 
+
 def _computeincrementalhistorypack(ui, files):
     opts = {
-        'gencountlimit' : ui.configint(
-            'remotefilelog', 'history.gencountlimit'),
-        'generations' : ui.configlist(
-            'remotefilelog', 'history.generations', ['100MB']),
-        'maxrepackpacks' : ui.configint(
-            'remotefilelog', 'history.maxrepackpacks'),
-        'repackmaxpacksize' : ui.configbytes(
-            'remotefilelog', 'history.repackmaxpacksize', '400MB'),
-        'repacksizelimit' : ui.configbytes(
-            'remotefilelog', 'history.repacksizelimit'),
+        b'gencountlimit': ui.configint(
+            b'remotefilelog', b'history.gencountlimit'
+        ),
+        b'generations': ui.configlist(
+            b'remotefilelog', b'history.generations', [b'100MB']
+        ),
+        b'maxrepackpacks': ui.configint(
+            b'remotefilelog', b'history.maxrepackpacks'
+        ),
+        b'repackmaxpacksize': ui.configbytes(
+            b'remotefilelog', b'history.repackmaxpacksize', b'400MB'
+        ),
+        b'repacksizelimit': ui.configbytes(
+            b'remotefilelog', b'history.repacksizelimit'
+        ),
     }
 
     packfiles = _allpackfileswithsuffix(
-        files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX)
+        files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX
+    )
     return _computeincrementalpack(packfiles, opts)
 
+
 def _allpackfileswithsuffix(files, packsuffix, indexsuffix):
     result = []
     fileset = set(fn for fn, mode, stat in files)
@@ -252,7 +324,7 @@
         if not filename.endswith(packsuffix):
             continue
 
-        prefix = filename[:-len(packsuffix)]
+        prefix = filename[: -len(packsuffix)]
 
         # Don't process a pack if it doesn't have an index.
         if (prefix + indexsuffix) not in fileset:
@@ -261,6 +333,7 @@
 
     return result
 
+
 def _computeincrementalpack(files, opts):
     """Given a set of pack files along with the configuration options, this
     function computes the list of files that should be packed as part of an
@@ -271,8 +344,9 @@
     over time).
     """
 
-    limits = list(sorted((util.sizetoint(s) for s in opts['generations']),
-                                reverse=True))
+    limits = list(
+        sorted((util.sizetoint(s) for s in opts[b'generations']), reverse=True)
+    )
     limits.append(0)
 
     # Group the packs by generation (i.e. by size)
@@ -283,7 +357,7 @@
     sizes = {}
     for prefix, mode, stat in files:
         size = stat.st_size
-        if size > opts['repackmaxpacksize']:
+        if size > opts[b'repackmaxpacksize']:
             continue
 
         sizes[prefix] = size
@@ -300,25 +374,31 @@
     # Find the largest generation with more than gencountlimit packs
     genpacks = []
     for i, limit in enumerate(limits):
-        if len(generations[i]) > opts['gencountlimit']:
+        if len(generations[i]) > opts[b'gencountlimit']:
             # Sort to be smallest last, for easy popping later
-            genpacks.extend(sorted(generations[i], reverse=True,
-                                   key=lambda x: sizes[x]))
+            genpacks.extend(
+                sorted(generations[i], reverse=True, key=lambda x: sizes[x])
+            )
             break
 
     # Take as many packs from the generation as we can
     chosenpacks = genpacks[-3:]
     genpacks = genpacks[:-3]
     repacksize = sum(sizes[n] for n in chosenpacks)
-    while (repacksize < opts['repacksizelimit'] and genpacks and
-           len(chosenpacks) < opts['maxrepackpacks']):
+    while (
+        repacksize < opts[b'repacksizelimit']
+        and genpacks
+        and len(chosenpacks) < opts[b'maxrepackpacks']
+    ):
         chosenpacks.append(genpacks.pop())
         repacksize += sizes[chosenpacks[-1]]
 
     return chosenpacks
 
-def _runrepack(repo, data, history, packpath, category, fullhistory=None,
-               options=None):
+
+def _runrepack(
+    repo, data, history, packpath, category, fullhistory=None, options=None
+):
     shallowutil.mkstickygroupdir(repo.ui, packpath)
 
     def isold(repo, filename, node):
@@ -328,24 +408,37 @@
         filectx = repo.filectx(filename, fileid=node)
         filetime = repo[filectx.linkrev()].date()
 
-        ttl = repo.ui.configint('remotefilelog', 'nodettl')
+        ttl = repo.ui.configint(b'remotefilelog', b'nodettl')
 
         limit = time.time() - ttl
         return filetime[0] < limit
 
-    garbagecollect = repo.ui.configbool('remotefilelog', 'gcrepack')
+    garbagecollect = repo.ui.configbool(b'remotefilelog', b'gcrepack')
     if not fullhistory:
         fullhistory = history
-    packer = repacker(repo, data, history, fullhistory, category,
-                      gc=garbagecollect, isold=isold, options=options)
+    packer = repacker(
+        repo,
+        data,
+        history,
+        fullhistory,
+        category,
+        gc=garbagecollect,
+        isold=isold,
+        options=options,
+    )
 
     with datapack.mutabledatapack(repo.ui, packpath) as dpack:
         with historypack.mutablehistorypack(repo.ui, packpath) as hpack:
             try:
                 packer.run(dpack, hpack)
             except error.LockHeld:
-                raise RepackAlreadyRunning(_("skipping repack - another repack "
-                                             "is already running"))
+                raise RepackAlreadyRunning(
+                    _(
+                        b"skipping repack - another repack "
+                        b"is already running"
+                    )
+                )
+
 
 def keepset(repo, keyfn, lastkeepkeys=None):
     """Computes a keepset which is not garbage collected.
@@ -363,16 +456,16 @@
     # 2. Draft commits
     # 3. Parents of draft commits
     # 4. Pullprefetch and bgprefetchrevs revsets if specified
-    revs = ['.', 'draft()', 'parents(draft())']
-    prefetchrevs = repo.ui.config('remotefilelog', 'pullprefetch', None)
+    revs = [b'.', b'draft()', b'parents(draft())']
+    prefetchrevs = repo.ui.config(b'remotefilelog', b'pullprefetch', None)
     if prefetchrevs:
-        revs.append('(%s)' % prefetchrevs)
-    prefetchrevs = repo.ui.config('remotefilelog', 'bgprefetchrevs', None)
+        revs.append(b'(%s)' % prefetchrevs)
+    prefetchrevs = repo.ui.config(b'remotefilelog', b'bgprefetchrevs', None)
     if prefetchrevs:
-        revs.append('(%s)' % prefetchrevs)
-    revs = '+'.join(revs)
+        revs.append(b'(%s)' % prefetchrevs)
+    revs = b'+'.join(revs)
 
-    revs = ['sort((%s), "topo")' % revs]
+    revs = [b'sort((%s), "topo")' % revs]
     keep = scmutil.revrange(repo, revs)
 
     processed = set()
@@ -398,22 +491,33 @@
         if type(m) is dict:
             # m is a result of diff of two manifests and is a dictionary that
             # maps filename to ((newnode, newflag), (oldnode, oldflag)) tuple
-            for filename, diff in m.iteritems():
+            for filename, diff in pycompat.iteritems(m):
                 if diff[0][0] is not None:
                     keepkeys.add(keyfn(filename, diff[0][0]))
         else:
             # m is a manifest object
-            for filename, filenode in m.iteritems():
+            for filename, filenode in pycompat.iteritems(m):
                 keepkeys.add(keyfn(filename, filenode))
 
     return keepkeys
 
+
 class repacker(object):
     """Class for orchestrating the repack of data and history information into a
     new format.
     """
-    def __init__(self, repo, data, history, fullhistory, category, gc=False,
-                 isold=None, options=None):
+
+    def __init__(
+        self,
+        repo,
+        data,
+        history,
+        fullhistory,
+        category,
+        gc=False,
+        isold=None,
+        options=None,
+    ):
         self.repo = repo
         self.data = data
         self.history = history
@@ -423,17 +527,18 @@
         self.options = options
         if self.garbagecollect:
             if not isold:
-                raise ValueError("Function 'isold' is not properly specified")
+                raise ValueError(b"Function 'isold' is not properly specified")
             # use (filename, node) tuple as a keepset key
-            self.keepkeys = keepset(repo, lambda f, n : (f, n))
+            self.keepkeys = keepset(repo, lambda f, n: (f, n))
             self.isold = isold
 
     def run(self, targetdata, targethistory):
         ledger = repackledger()
 
-        with extutil.flock(repacklockvfs(self.repo).join("repacklock"),
-                           _('repacking %s') % self.repo.origroot, timeout=0):
-            self.repo.hook('prerepack')
+        with lockmod.lock(
+            repacklockvfs(self.repo), b"repacklock", desc=None, timeout=0
+        ):
+            self.repo.hook(b'prerepack')
 
             # Populate ledger from source
             self.data.markledger(ledger, options=self.options)
@@ -472,8 +577,10 @@
         orphans = sorted(orphans)
         orphans = list(sorted(orphans, key=getsize, reverse=True))
         if ui.debugflag:
-            ui.debug("%s: orphan chain: %s\n" % (filename,
-                ", ".join([short(s) for s in orphans])))
+            ui.debug(
+                b"%s: orphan chain: %s\n"
+                % (filename, b", ".join([short(s) for s in orphans]))
+            )
 
         # Create one contiguous chain and reassign deltabases.
         for i, node in enumerate(orphans):
@@ -488,31 +595,36 @@
 
     def repackdata(self, ledger, target):
         ui = self.repo.ui
-        maxchainlen = ui.configint('packs', 'maxchainlen', 1000)
+        maxchainlen = ui.configint(b'packs', b'maxchainlen', 1000)
 
         byfile = {}
-        for entry in ledger.entries.itervalues():
+        for entry in pycompat.itervalues(ledger.entries):
             if entry.datasource:
                 byfile.setdefault(entry.filename, {})[entry.node] = entry
 
         count = 0
-        repackprogress = ui.makeprogress(_("repacking data"), unit=self.unit,
-                                            total=len(byfile))
-        for filename, entries in sorted(byfile.iteritems()):
+        repackprogress = ui.makeprogress(
+            _(b"repacking data"), unit=self.unit, total=len(byfile)
+        )
+        for filename, entries in sorted(pycompat.iteritems(byfile)):
             repackprogress.update(count)
 
             ancestors = {}
             nodes = list(node for node in entries)
             nohistory = []
-            buildprogress = ui.makeprogress(_("building history"), unit='nodes',
-                                            total=len(nodes))
+            buildprogress = ui.makeprogress(
+                _(b"building history"), unit=b'nodes', total=len(nodes)
+            )
             for i, node in enumerate(nodes):
                 if node in ancestors:
                     continue
                 buildprogress.update(i)
                 try:
-                    ancestors.update(self.fullhistory.getancestors(filename,
-                        node, known=ancestors))
+                    ancestors.update(
+                        self.fullhistory.getancestors(
+                            filename, node, known=ancestors
+                        )
+                    )
                 except KeyError:
                     # Since we're packing data entries, we may not have the
                     # corresponding history entries for them. It's not a big
@@ -523,14 +635,16 @@
             # Order the nodes children first, so we can produce reverse deltas
             orderednodes = list(reversed(self._toposort(ancestors)))
             if len(nohistory) > 0:
-                ui.debug('repackdata: %d nodes without history\n' %
-                         len(nohistory))
+                ui.debug(
+                    b'repackdata: %d nodes without history\n' % len(nohistory)
+                )
             orderednodes.extend(sorted(nohistory))
 
             # Filter orderednodes to just the nodes we want to serialize (it
             # currently also has the edge nodes' ancestors).
-            orderednodes = list(filter(lambda node: node in nodes,
-                                orderednodes))
+            orderednodes = list(
+                filter(lambda node: node in nodes, orderednodes)
+            )
 
             # Garbage collect old nodes:
             if self.garbagecollect:
@@ -538,8 +652,9 @@
                 for node in orderednodes:
                     # If the node is old and is not in the keepset, we skip it,
                     # and mark as garbage collected
-                    if ((filename, node) not in self.keepkeys and
-                        self.isold(self.repo, filename, node)):
+                    if (filename, node) not in self.keepkeys and self.isold(
+                        self.repo, filename, node
+                    ):
                         entries[node].gced = True
                         continue
                     neworderednodes.append(node)
@@ -550,9 +665,9 @@
             nobase = set()
             referenced = set()
             nodes = set(nodes)
-            processprogress = ui.makeprogress(_("processing nodes"),
-                                              unit='nodes',
-                                              total=len(orderednodes))
+            processprogress = ui.makeprogress(
+                _(b"processing nodes"), unit=b'nodes', total=len(orderednodes)
+            )
             for i, node in enumerate(orderednodes):
                 processprogress.update(i)
                 # Find delta base
@@ -590,10 +705,11 @@
                             deltabases[p2] = (node, chainlen + 1)
 
             # experimental config: repack.chainorphansbysize
-            if ui.configbool('repack', 'chainorphansbysize'):
+            if ui.configbool(b'repack', b'chainorphansbysize'):
                 orphans = nobase - referenced
-                orderednodes = self._chainorphans(ui, filename, orderednodes,
-                    orphans, deltabases)
+                orderednodes = self._chainorphans(
+                    ui, filename, orderednodes, orphans, deltabases
+                )
 
             # Compute deltas and write to the pack
             for i, node in enumerate(orderednodes):
@@ -606,8 +722,11 @@
                     deltaentry = self.data.getdelta(filename, node)
                     delta, deltabasename, origdeltabase, meta = deltaentry
                     size = meta.get(constants.METAKEYSIZE)
-                    if (deltabasename != filename or origdeltabase != deltabase
-                        or size is None):
+                    if (
+                        deltabasename != filename
+                        or origdeltabase != deltabase
+                        or size is None
+                    ):
                         deltabasetext = self.data.get(filename, deltabase)
                         original = self.data.get(filename, node)
                         size = len(original)
@@ -634,21 +753,23 @@
         ui = self.repo.ui
 
         byfile = {}
-        for entry in ledger.entries.itervalues():
+        for entry in pycompat.itervalues(ledger.entries):
             if entry.historysource:
                 byfile.setdefault(entry.filename, {})[entry.node] = entry
 
-        progress = ui.makeprogress(_("repacking history"), unit=self.unit,
-                                   total=len(byfile))
-        for filename, entries in sorted(byfile.iteritems()):
+        progress = ui.makeprogress(
+            _(b"repacking history"), unit=self.unit, total=len(byfile)
+        )
+        for filename, entries in sorted(pycompat.iteritems(byfile)):
             ancestors = {}
             nodes = list(node for node in entries)
 
             for node in nodes:
                 if node in ancestors:
                     continue
-                ancestors.update(self.history.getancestors(filename, node,
-                                                           known=ancestors))
+                ancestors.update(
+                    self.history.getancestors(filename, node, known=ancestors)
+                )
 
             # Order the nodes children first
             orderednodes = reversed(self._toposort(ancestors))
@@ -701,11 +822,13 @@
         sortednodes = shallowutil.sortnodes(ancestors.keys(), parentfunc)
         return sortednodes
 
+
 class repackledger(object):
     """Storage for all the bookkeeping that happens during a repack. It contains
     the list of revisions being repacked, what happened to each revision, and
     which source store contained which revision originally (for later cleanup).
     """
+
     def __init__(self):
         self.entries = {}
         self.sources = {}
@@ -747,11 +870,21 @@
     def addcreated(self, value):
         self.created.add(value)
 
+
 class repackentry(object):
     """Simple class representing a single revision entry in the repackledger.
     """
-    __slots__ = (r'filename', r'node', r'datasource', r'historysource',
-                 r'datarepacked', r'historyrepacked', r'gced')
+
+    __slots__ = (
+        r'filename',
+        r'node',
+        r'datasource',
+        r'historysource',
+        r'datarepacked',
+        r'historyrepacked',
+        r'gced',
+    )
+
     def __init__(self, filename, node):
         self.filename = filename
         self.node = node
@@ -766,13 +899,14 @@
         # If garbage collected
         self.gced = False
 
+
 def repacklockvfs(repo):
     if util.safehasattr(repo, 'name'):
         # Lock in the shared cache so repacks across multiple copies of the same
         # repo are coordinated.
         sharedcachepath = shallowutil.getcachepackpath(
-            repo,
-            constants.FILEPACK_CATEGORY)
+            repo, constants.FILEPACK_CATEGORY
+        )
         return vfs.vfs(sharedcachepath)
     else:
         return repo.svfs
--- a/hgext/remotefilelog/shallowbundle.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/shallowbundle.py	Mon Oct 21 11:09:48 2019 -0400
@@ -26,10 +26,10 @@
 LocalFiles = 1
 AllFiles = 2
 
+
 def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None):
     if not isinstance(rlog, remotefilelog.remotefilelog):
-        for c in super(cls, self).group(nodelist, rlog, lookup,
-                                        units=units):
+        for c in super(cls, self).group(nodelist, rlog, lookup, units=units):
             yield c
         return
 
@@ -52,17 +52,20 @@
 
     yield self.close()
 
+
 class shallowcg1packer(changegroup.cgpacker):
     def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
         if shallowutil.isenabled(self._repo):
             fastpathlinkrev = False
 
-        return super(shallowcg1packer, self).generate(commonrevs, clnodes,
-            fastpathlinkrev, source)
+        return super(shallowcg1packer, self).generate(
+            commonrevs, clnodes, fastpathlinkrev, source
+        )
 
     def group(self, nodelist, rlog, lookup, units=None, reorder=None):
-        return shallowgroup(shallowcg1packer, self, nodelist, rlog, lookup,
-                            units=units)
+        return shallowgroup(
+            shallowcg1packer, self, nodelist, rlog, lookup, units=units
+        )
 
     def generatefiles(self, changedfiles, *args):
         try:
@@ -76,42 +79,45 @@
                 # bundlerepo is heavily tied to revlogs. Instead require that
                 # the user use unbundle instead.
                 # Force load the filelog data.
-                bundlerepo.bundlerepository.file(repo, 'foo')
+                bundlerepo.bundlerepository.file(repo, b'foo')
                 if repo._cgfilespos:
-                    raise error.Abort("cannot pull from full bundles",
-                                      hint="use `hg unbundle` instead")
+                    raise error.Abort(
+                        b"cannot pull from full bundles",
+                        hint=b"use `hg unbundle` instead",
+                    )
                 return []
             filestosend = self.shouldaddfilegroups(source)
             if filestosend == NoFiles:
-                changedfiles = list([f for f in changedfiles
-                                     if not repo.shallowmatch(f)])
+                changedfiles = list(
+                    [f for f in changedfiles if not repo.shallowmatch(f)]
+                )
 
-        return super(shallowcg1packer, self).generatefiles(
-            changedfiles, *args)
+        return super(shallowcg1packer, self).generatefiles(changedfiles, *args)
 
     def shouldaddfilegroups(self, source):
         repo = self._repo
         if not shallowutil.isenabled(repo):
             return AllFiles
 
-        if source == "push" or source == "bundle":
+        if source == b"push" or source == b"bundle":
             return AllFiles
 
         caps = self._bundlecaps or []
-        if source == "serve" or source == "pull":
+        if source == b"serve" or source == b"pull":
             if constants.BUNDLE2_CAPABLITY in caps:
                 return LocalFiles
             else:
                 # Serving to a full repo requires us to serve everything
-                repo.ui.warn(_("pulling from a shallow repo\n"))
+                repo.ui.warn(_(b"pulling from a shallow repo\n"))
                 return AllFiles
 
         return NoFiles
 
     def prune(self, rlog, missing, commonrevs):
         if not isinstance(rlog, remotefilelog.remotefilelog):
-            return super(shallowcg1packer, self).prune(rlog, missing,
-                commonrevs)
+            return super(shallowcg1packer, self).prune(
+                rlog, missing, commonrevs
+            )
 
         repo = self._repo
         results = []
@@ -122,9 +128,9 @@
         return results
 
     def nodechunk(self, revlog, node, prevnode, linknode):
-        prefix = ''
+        prefix = b''
         if prevnode == nullid:
-            delta = revlog.revision(node, raw=True)
+            delta = revlog.rawdata(node)
             prefix = mdiff.trivialdiffheader(len(delta))
         else:
             # Actually uses remotefilelog.revdiff which works on nodes, not revs
@@ -138,6 +144,7 @@
         yield meta
         yield delta
 
+
 def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs):
     if not shallowutil.isenabled(repo):
         return orig(repo, outgoing, version, source, *args, **kwargs)
@@ -145,28 +152,30 @@
     original = repo.shallowmatch
     try:
         # if serving, only send files the clients has patterns for
-        if source == 'serve':
+        if source == b'serve':
             bundlecaps = kwargs.get(r'bundlecaps')
             includepattern = None
             excludepattern = None
-            for cap in (bundlecaps or []):
-                if cap.startswith("includepattern="):
-                    raw = cap[len("includepattern="):]
+            for cap in bundlecaps or []:
+                if cap.startswith(b"includepattern="):
+                    raw = cap[len(b"includepattern=") :]
                     if raw:
-                        includepattern = raw.split('\0')
-                elif cap.startswith("excludepattern="):
-                    raw = cap[len("excludepattern="):]
+                        includepattern = raw.split(b'\0')
+                elif cap.startswith(b"excludepattern="):
+                    raw = cap[len(b"excludepattern=") :]
                     if raw:
-                        excludepattern = raw.split('\0')
+                        excludepattern = raw.split(b'\0')
             if includepattern or excludepattern:
-                repo.shallowmatch = match.match(repo.root, '', None,
-                    includepattern, excludepattern)
+                repo.shallowmatch = match.match(
+                    repo.root, b'', None, includepattern, excludepattern
+                )
             else:
                 repo.shallowmatch = match.always()
         return orig(repo, outgoing, version, source, *args, **kwargs)
     finally:
         repo.shallowmatch = original
 
+
 def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
     if not shallowutil.isenabled(repo):
         return orig(repo, source, revmap, trp, expectedfiles, *args)
@@ -183,13 +192,13 @@
     # files in topological order.
 
     # read all the file chunks but don't add them
-    progress = repo.ui.makeprogress(_('files'), total=expectedfiles)
+    progress = repo.ui.makeprogress(_(b'files'), total=expectedfiles)
     while True:
         chunkdata = source.filelogheader()
         if not chunkdata:
             break
-        f = chunkdata["filename"]
-        repo.ui.debug("adding %s revisions\n" % f)
+        f = chunkdata[b"filename"]
+        repo.ui.debug(b"adding %s revisions\n" % f)
         progress.increment()
 
         if not repo.shallowmatch(f):
@@ -215,9 +224,10 @@
                 visited.add(f)
 
         if chain is None:
-            raise error.Abort(_("received file revlog group is empty"))
+            raise error.Abort(_(b"received file revlog group is empty"))
 
     processed = set()
+
     def available(f, node, depf, depnode):
         if depnode != nullid and (depf, depnode) not in processed:
             if not (depf, depnode) in revisiondatas:
@@ -256,7 +266,7 @@
 
         skipcount += 1
         if skipcount > len(queue) + 1:
-            raise error.Abort(_("circular node dependency"))
+            raise error.Abort(_(b"circular node dependency"))
 
         fl = repo.file(f)
 
@@ -267,15 +277,15 @@
         if not available(f, node, f, deltabase):
             continue
 
-        base = fl.revision(deltabase, raw=True)
+        base = fl.rawdata(deltabase)
         text = mdiff.patch(base, delta)
         if not isinstance(text, bytes):
             text = bytes(text)
 
         meta, text = shallowutil.parsemeta(text)
-        if 'copy' in meta:
-            copyfrom = meta['copy']
-            copynode = bin(meta['copyrev'])
+        if b'copy' in meta:
+            copyfrom = meta[b'copy']
+            copynode = bin(meta[b'copyrev'])
             if not available(f, node, copyfrom, copynode):
                 continue
 
--- a/hgext/remotefilelog/shallowrepo.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/shallowrepo.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,6 +15,7 @@
     error,
     localrepo,
     match,
+    pycompat,
     scmutil,
     sparse,
     util,
@@ -37,25 +38,30 @@
 # them.
 def makelocalstores(repo):
     """In-repo stores, like .hg/store/data; can not be discarded."""
-    localpath = os.path.join(repo.svfs.vfs.base, 'data')
+    localpath = os.path.join(repo.svfs.vfs.base, b'data')
     if not os.path.exists(localpath):
         os.makedirs(localpath)
 
     # Instantiate local data stores
     localcontent = contentstore.remotefilelogcontentstore(
-        repo, localpath, repo.name, shared=False)
+        repo, localpath, repo.name, shared=False
+    )
     localmetadata = metadatastore.remotefilelogmetadatastore(
-        repo, localpath, repo.name, shared=False)
+        repo, localpath, repo.name, shared=False
+    )
     return localcontent, localmetadata
 
+
 def makecachestores(repo):
     """Typically machine-wide, cache of remote data; can be discarded."""
     # Instantiate shared cache stores
     cachepath = shallowutil.getcachepath(repo.ui)
     cachecontent = contentstore.remotefilelogcontentstore(
-        repo, cachepath, repo.name, shared=True)
+        repo, cachepath, repo.name, shared=True
+    )
     cachemetadata = metadatastore.remotefilelogmetadatastore(
-        repo, cachepath, repo.name, shared=True)
+        repo, cachepath, repo.name, shared=True
+    )
 
     repo.sharedstore = cachecontent
     repo.shareddatastores.append(cachecontent)
@@ -63,30 +69,35 @@
 
     return cachecontent, cachemetadata
 
+
 def makeremotestores(repo, cachecontent, cachemetadata):
     """These stores fetch data from a remote server."""
     # Instantiate remote stores
     repo.fileservice = fileserverclient.fileserverclient(repo)
     remotecontent = contentstore.remotecontentstore(
-        repo.ui, repo.fileservice, cachecontent)
+        repo.ui, repo.fileservice, cachecontent
+    )
     remotemetadata = metadatastore.remotemetadatastore(
-        repo.ui, repo.fileservice, cachemetadata)
+        repo.ui, repo.fileservice, cachemetadata
+    )
     return remotecontent, remotemetadata
 
+
 def makepackstores(repo):
     """Packs are more efficient (to read from) cache stores."""
     # Instantiate pack stores
-    packpath = shallowutil.getcachepackpath(repo,
-                                            constants.FILEPACK_CATEGORY)
+    packpath = shallowutil.getcachepackpath(repo, constants.FILEPACK_CATEGORY)
     packcontentstore = datapack.datapackstore(repo.ui, packpath)
     packmetadatastore = historypack.historypackstore(repo.ui, packpath)
 
     repo.shareddatastores.append(packcontentstore)
     repo.sharedhistorystores.append(packmetadatastore)
-    shallowutil.reportpackmetrics(repo.ui, 'filestore', packcontentstore,
-        packmetadatastore)
+    shallowutil.reportpackmetrics(
+        repo.ui, b'filestore', packcontentstore, packmetadatastore
+    )
     return packcontentstore, packmetadatastore
 
+
 def makeunionstores(repo):
     """Union stores iterate the other stores and return the first result."""
     repo.shareddatastores = []
@@ -95,37 +106,57 @@
     packcontentstore, packmetadatastore = makepackstores(repo)
     cachecontent, cachemetadata = makecachestores(repo)
     localcontent, localmetadata = makelocalstores(repo)
-    remotecontent, remotemetadata = makeremotestores(repo, cachecontent,
-                                                     cachemetadata)
+    remotecontent, remotemetadata = makeremotestores(
+        repo, cachecontent, cachemetadata
+    )
 
     # Instantiate union stores
     repo.contentstore = contentstore.unioncontentstore(
-        packcontentstore, cachecontent,
-        localcontent, remotecontent, writestore=localcontent)
+        packcontentstore,
+        cachecontent,
+        localcontent,
+        remotecontent,
+        writestore=localcontent,
+    )
     repo.metadatastore = metadatastore.unionmetadatastore(
-        packmetadatastore, cachemetadata, localmetadata, remotemetadata,
-        writestore=localmetadata)
+        packmetadatastore,
+        cachemetadata,
+        localmetadata,
+        remotemetadata,
+        writestore=localmetadata,
+    )
 
     fileservicedatawrite = cachecontent
     fileservicehistorywrite = cachemetadata
-    repo.fileservice.setstore(repo.contentstore, repo.metadatastore,
-                              fileservicedatawrite, fileservicehistorywrite)
-    shallowutil.reportpackmetrics(repo.ui, 'filestore',
-        packcontentstore, packmetadatastore)
+    repo.fileservice.setstore(
+        repo.contentstore,
+        repo.metadatastore,
+        fileservicedatawrite,
+        fileservicehistorywrite,
+    )
+    shallowutil.reportpackmetrics(
+        repo.ui, b'filestore', packcontentstore, packmetadatastore
+    )
+
 
 def wraprepo(repo):
     class shallowrepository(repo.__class__):
         @util.propertycache
         def name(self):
-            return self.ui.config('remotefilelog', 'reponame')
+            return self.ui.config(b'remotefilelog', b'reponame')
 
         @util.propertycache
         def fallbackpath(self):
-            path = repo.ui.config("remotefilelog", "fallbackpath",
-                                  repo.ui.config('paths', 'default'))
+            path = repo.ui.config(
+                b"remotefilelog",
+                b"fallbackpath",
+                repo.ui.config(b'paths', b'default'),
+            )
             if not path:
-                raise error.Abort("no remotefilelog server "
-                    "configured - is your .hg/hgrc trusted?")
+                raise error.Abort(
+                    b"no remotefilelog server "
+                    b"configured - is your .hg/hgrc trusted?"
+                )
 
             return path
 
@@ -145,7 +176,7 @@
             return ret
 
         def file(self, f):
-            if f[0] == '/':
+            if f[0] == b'/':
                 f = f[1:]
 
             if self.shallowmatch(f):
@@ -157,8 +188,9 @@
             if self.shallowmatch(path):
                 return remotefilectx.remotefilectx(self, path, *args, **kwargs)
             else:
-                return super(shallowrepository, self).filectx(path, *args,
-                                                              **kwargs)
+                return super(shallowrepository, self).filectx(
+                    path, *args, **kwargs
+                )
 
         @localrepo.unfilteredmethod
         def commitctx(self, ctx, error=False, origctx=None):
@@ -178,29 +210,44 @@
                     if fparent1 != nullid:
                         files.append((f, hex(fparent1)))
                 self.fileservice.prefetch(files)
-            return super(shallowrepository, self).commitctx(ctx,
-                                                            error=error,
-                                                            origctx=origctx)
+            return super(shallowrepository, self).commitctx(
+                ctx, error=error, origctx=origctx
+            )
 
-        def backgroundprefetch(self, revs, base=None, repack=False, pats=None,
-                               opts=None):
+        def backgroundprefetch(
+            self,
+            revs,
+            base=None,
+            repack=False,
+            pats=None,
+            opts=None,
+            ensurestart=False,
+        ):
             """Runs prefetch in background with optional repack
             """
-            cmd = [procutil.hgexecutable(), '-R', repo.origroot, 'prefetch']
+            cmd = [procutil.hgexecutable(), b'-R', repo.origroot, b'prefetch']
             if repack:
-                cmd.append('--repack')
+                cmd.append(b'--repack')
             if revs:
-                cmd += ['-r', revs]
+                cmd += [b'-r', revs]
             # We know this command will find a binary, so don't block
             # on it starting.
-            procutil.runbgcommand(cmd, encoding.environ, ensurestart=False)
+            procutil.runbgcommand(
+                cmd, encoding.environ, ensurestart=ensurestart
+            )
 
         def prefetch(self, revs, base=None, pats=None, opts=None):
             """Prefetches all the necessary file revisions for the given revs
             Optionally runs repack in background
             """
-            with repo._lock(repo.svfs, 'prefetchlock', True, None, None,
-                            _('prefetching in %s') % repo.origroot):
+            with repo._lock(
+                repo.svfs,
+                b'prefetchlock',
+                True,
+                None,
+                None,
+                _(b'prefetching in %s') % repo.origroot,
+            ):
                 self._prefetch(revs, base, pats, opts)
 
         def _prefetch(self, revs, base=None, pats=None, opts=None):
@@ -209,20 +256,23 @@
                 # If we know a rev is on the server, we should fetch the server
                 # version of those files, since our local file versions might
                 # become obsolete if the local commits are stripped.
-                localrevs = repo.revs('outgoing(%s)', fallbackpath)
+                localrevs = repo.revs(b'outgoing(%s)', fallbackpath)
                 if base is not None and base != nullrev:
-                    serverbase = list(repo.revs('first(reverse(::%s) - %ld)',
-                                                base, localrevs))
+                    serverbase = list(
+                        repo.revs(
+                            b'first(reverse(::%s) - %ld)', base, localrevs
+                        )
+                    )
                     if serverbase:
                         base = serverbase[0]
             else:
                 localrevs = repo
 
             mfl = repo.manifestlog
-            mfrevlog = mfl.getstorage('')
+            mfrevlog = mfl.getstorage(b'')
             if base is not None:
                 mfdict = mfl[repo[base].manifestnode()].read()
-                skip = set(mfdict.iteritems())
+                skip = set(pycompat.iteritems(mfdict))
             else:
                 skip = set()
 
@@ -233,7 +283,7 @@
             visited = set()
             visited.add(nullrev)
             revcount = len(revs)
-            progress = self.ui.makeprogress(_('prefetching'), total=revcount)
+            progress = self.ui.makeprogress(_(b'prefetching'), total=revcount)
             progress.update(0)
             for rev in sorted(revs):
                 ctx = repo[rev]
@@ -252,7 +302,7 @@
                 else:
                     mfdict = mfl[mfnode].read()
 
-                diff = mfdict.iteritems()
+                diff = pycompat.iteritems(mfdict)
                 if pats:
                     diff = (pf for pf in diff if m(pf[0]))
                 if sparsematch:
@@ -289,13 +339,16 @@
 
     makeunionstores(repo)
 
-    repo.includepattern = repo.ui.configlist("remotefilelog", "includepattern",
-                                             None)
-    repo.excludepattern = repo.ui.configlist("remotefilelog", "excludepattern",
-                                             None)
+    repo.includepattern = repo.ui.configlist(
+        b"remotefilelog", b"includepattern", None
+    )
+    repo.excludepattern = repo.ui.configlist(
+        b"remotefilelog", b"excludepattern", None
+    )
     if not util.safehasattr(repo, 'connectionpool'):
         repo.connectionpool = connectionpool.connectionpool(repo)
 
     if repo.includepattern or repo.excludepattern:
-        repo.shallowmatch = match.match(repo.root, '', None,
-            repo.includepattern, repo.excludepattern)
+        repo.shallowmatch = match.match(
+            repo.root, b'', None, repo.includepattern, repo.excludepattern
+        )
--- a/hgext/remotefilelog/shallowstore.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/shallowstore.py	Mon Oct 21 11:09:48 2019 -0400
@@ -6,6 +6,7 @@
 # GNU General Public License version 2 or any later version.
 from __future__ import absolute_import
 
+
 def wrapstore(store):
     class shallowstore(store.__class__):
         def __contains__(self, path):
--- a/hgext/remotefilelog/shallowutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/shallowutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,6 +15,7 @@
 import tempfile
 
 from mercurial.i18n import _
+from mercurial.pycompat import open
 from mercurial import (
     error,
     node,
@@ -31,59 +32,69 @@
 if not pycompat.iswindows:
     import grp
 
+
 def isenabled(repo):
     """returns whether the repository is remotefilelog enabled or not"""
     return constants.SHALLOWREPO_REQUIREMENT in repo.requirements
 
+
 def getcachekey(reponame, file, id):
     pathhash = node.hex(hashlib.sha1(file).digest())
     return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
 
+
 def getlocalkey(file, id):
     pathhash = node.hex(hashlib.sha1(file).digest())
     return os.path.join(pathhash, id)
 
+
 def getcachepath(ui, allowempty=False):
-    cachepath = ui.config("remotefilelog", "cachepath")
+    cachepath = ui.config(b"remotefilelog", b"cachepath")
     if not cachepath:
         if allowempty:
             return None
         else:
-            raise error.Abort(_("could not find config option "
-                                "remotefilelog.cachepath"))
+            raise error.Abort(
+                _(b"could not find config option remotefilelog.cachepath")
+            )
     return util.expandpath(cachepath)
 
+
 def getcachepackpath(repo, category):
     cachepath = getcachepath(repo.ui)
     if category != constants.FILEPACK_CATEGORY:
-        return os.path.join(cachepath, repo.name, 'packs', category)
+        return os.path.join(cachepath, repo.name, b'packs', category)
     else:
-        return os.path.join(cachepath, repo.name, 'packs')
+        return os.path.join(cachepath, repo.name, b'packs')
+
 
 def getlocalpackpath(base, category):
-    return os.path.join(base, 'packs', category)
+    return os.path.join(base, b'packs', category)
+
 
 def createrevlogtext(text, copyfrom=None, copyrev=None):
     """returns a string that matches the revlog contents in a
     traditional revlog
     """
     meta = {}
-    if copyfrom or text.startswith('\1\n'):
+    if copyfrom or text.startswith(b'\1\n'):
         if copyfrom:
-            meta['copy'] = copyfrom
-            meta['copyrev'] = copyrev
+            meta[b'copy'] = copyfrom
+            meta[b'copyrev'] = copyrev
         text = storageutil.packmeta(meta, text)
 
     return text
 
+
 def parsemeta(text):
     """parse mercurial filelog metadata"""
     meta, size = storageutil.parsemeta(text)
-    if text.startswith('\1\n'):
-        s = text.index('\1\n', 2)
-        text = text[s + 2:]
+    if text.startswith(b'\1\n'):
+        s = text.index(b'\1\n', 2)
+        text = text[s + 2 :]
     return meta or {}, text
 
+
 def sumdicts(*dicts):
     """Adds all the values of *dicts together into one dictionary. This assumes
     the values in *dicts are all summable.
@@ -92,21 +103,24 @@
     """
     result = collections.defaultdict(lambda: 0)
     for dict in dicts:
-        for k, v in dict.iteritems():
+        for k, v in pycompat.iteritems(dict):
             result[k] += v
     return result
 
+
 def prefixkeys(dict, prefix):
     """Returns ``dict`` with ``prefix`` prepended to all its keys."""
     result = {}
-    for k, v in dict.iteritems():
+    for k, v in pycompat.iteritems(dict):
         result[prefix + k] = v
     return result
 
+
 def reportpackmetrics(ui, prefix, *stores):
     dicts = [s.getmetrics() for s in stores]
-    dict = prefixkeys(sumdicts(*dicts), prefix + '_')
-    ui.log(prefix + "_packsizes", "\n", **pycompat.strkwargs(dict))
+    dict = prefixkeys(sumdicts(*dicts), prefix + b'_')
+    ui.log(prefix + b"_packsizes", b"\n", **pycompat.strkwargs(dict))
+
 
 def _parsepackmeta(metabuf):
     """parse datapack meta, bytes (<metadata-list>) -> dict
@@ -121,19 +135,20 @@
     offset = 0
     buflen = len(metabuf)
     while buflen - offset >= 3:
-        key = metabuf[offset:offset + 1]
+        key = metabuf[offset : offset + 1]
         offset += 1
-        metalen = struct.unpack_from('!H', metabuf, offset)[0]
+        metalen = struct.unpack_from(b'!H', metabuf, offset)[0]
         offset += 2
         if offset + metalen > buflen:
-            raise ValueError('corrupted metadata: incomplete buffer')
-        value = metabuf[offset:offset + metalen]
+            raise ValueError(b'corrupted metadata: incomplete buffer')
+        value = metabuf[offset : offset + metalen]
         metadict[key] = value
         offset += metalen
     if offset != buflen:
-        raise ValueError('corrupted metadata: redundant data')
+        raise ValueError(b'corrupted metadata: redundant data')
     return metadict
 
+
 def _buildpackmeta(metadict):
     """reverse of _parsepackmeta, dict -> bytes (<metadata-list>)
 
@@ -144,25 +159,28 @@
     raise ProgrammingError when metadata key is illegal, or ValueError if
     length limit is exceeded
     """
-    metabuf = ''
-    for k, v in sorted((metadict or {}).iteritems()):
+    metabuf = b''
+    for k, v in sorted(pycompat.iteritems((metadict or {}))):
         if len(k) != 1:
-            raise error.ProgrammingError('packmeta: illegal key: %s' % k)
-        if len(v) > 0xfffe:
-            raise ValueError('metadata value is too long: 0x%x > 0xfffe'
-                             % len(v))
+            raise error.ProgrammingError(b'packmeta: illegal key: %s' % k)
+        if len(v) > 0xFFFE:
+            raise ValueError(
+                b'metadata value is too long: 0x%x > 0xfffe' % len(v)
+            )
         metabuf += k
-        metabuf += struct.pack('!H', len(v))
+        metabuf += struct.pack(b'!H', len(v))
         metabuf += v
     # len(metabuf) is guaranteed representable in 4 bytes, because there are
     # only 256 keys, and for each value, len(value) <= 0xfffe.
     return metabuf
 
+
 _metaitemtypes = {
     constants.METAKEYFLAG: (int, pycompat.long),
     constants.METAKEYSIZE: (int, pycompat.long),
 }
 
+
 def buildpackmeta(metadict):
     """like _buildpackmeta, but typechecks metadict and normalize it.
 
@@ -170,10 +188,10 @@
     and METAKEYFLAG will be dropped if its value is 0.
     """
     newmeta = {}
-    for k, v in (metadict or {}).iteritems():
+    for k, v in pycompat.iteritems(metadict or {}):
         expectedtype = _metaitemtypes.get(k, (bytes,))
         if not isinstance(v, expectedtype):
-            raise error.ProgrammingError('packmeta: wrong type of key %s' % k)
+            raise error.ProgrammingError(b'packmeta: wrong type of key %s' % k)
         # normalize int to binary buffer
         if int in expectedtype:
             # optimization: remove flag if it's 0 to save space
@@ -183,6 +201,7 @@
         newmeta[k] = v
     return _buildpackmeta(newmeta)
 
+
 def parsepackmeta(metabuf):
     """like _parsepackmeta, but convert fields to desired types automatically.
 
@@ -190,19 +209,21 @@
     integers.
     """
     metadict = _parsepackmeta(metabuf)
-    for k, v in metadict.iteritems():
+    for k, v in pycompat.iteritems(metadict):
         if k in _metaitemtypes and int in _metaitemtypes[k]:
             metadict[k] = bin2int(v)
     return metadict
 
+
 def int2bin(n):
     """convert a non-negative integer to raw binary buffer"""
     buf = bytearray()
     while n > 0:
-        buf.insert(0, n & 0xff)
+        buf.insert(0, n & 0xFF)
         n >>= 8
     return bytes(buf)
 
+
 def bin2int(buf):
     """the reverse of int2bin, convert a binary buffer to an integer"""
     x = 0
@@ -211,6 +232,7 @@
         x |= b
     return x
 
+
 def parsesizeflags(raw):
     """given a remotefilelog blob, return (headersize, rawtextsize, flags)
 
@@ -220,19 +242,20 @@
     flags = revlog.REVIDX_DEFAULT_FLAGS
     size = None
     try:
-        index = raw.index('\0')
+        index = raw.index(b'\0')
         header = raw[:index]
-        if header.startswith('v'):
+        if header.startswith(b'v'):
             # v1 and above, header starts with 'v'
-            if header.startswith('v1\n'):
-                for s in header.split('\n'):
+            if header.startswith(b'v1\n'):
+                for s in header.split(b'\n'):
                     if s.startswith(constants.METAKEYSIZE):
-                        size = int(s[len(constants.METAKEYSIZE):])
+                        size = int(s[len(constants.METAKEYSIZE) :])
                     elif s.startswith(constants.METAKEYFLAG):
-                        flags = int(s[len(constants.METAKEYFLAG):])
+                        flags = int(s[len(constants.METAKEYFLAG) :])
             else:
-                raise RuntimeError('unsupported remotefilelog header: %s'
-                                   % header)
+                raise RuntimeError(
+                    b'unsupported remotefilelog header: %s' % header
+                )
         else:
             # v0, str(int(size)) is the header
             size = int(header)
@@ -242,6 +265,7 @@
         raise RuntimeError(r"unexpected remotefilelog header: no size found")
     return index + 1, size, flags
 
+
 def buildfileblobheader(size, flags, version=None):
     """return the header of a remotefilelog blob.
 
@@ -254,56 +278,63 @@
     if version is None:
         version = int(bool(flags))
     if version == 1:
-        header = ('v1\n%s%d\n%s%d'
-                  % (constants.METAKEYSIZE, size,
-                     constants.METAKEYFLAG, flags))
+        header = b'v1\n%s%d\n%s%d' % (
+            constants.METAKEYSIZE,
+            size,
+            constants.METAKEYFLAG,
+            flags,
+        )
     elif version == 0:
         if flags:
-            raise error.ProgrammingError('fileblob v0 does not support flag')
-        header = '%d' % size
+            raise error.ProgrammingError(b'fileblob v0 does not support flag')
+        header = b'%d' % size
     else:
-        raise error.ProgrammingError('unknown fileblob version %d' % version)
+        raise error.ProgrammingError(b'unknown fileblob version %d' % version)
     return header
 
+
 def ancestormap(raw):
     offset, size, flags = parsesizeflags(raw)
     start = offset + size
 
     mapping = {}
     while start < len(raw):
-        divider = raw.index('\0', start + 80)
+        divider = raw.index(b'\0', start + 80)
 
-        currentnode = raw[start:(start + 20)]
-        p1 = raw[(start + 20):(start + 40)]
-        p2 = raw[(start + 40):(start + 60)]
-        linknode = raw[(start + 60):(start + 80)]
-        copyfrom = raw[(start + 80):divider]
+        currentnode = raw[start : (start + 20)]
+        p1 = raw[(start + 20) : (start + 40)]
+        p2 = raw[(start + 40) : (start + 60)]
+        linknode = raw[(start + 60) : (start + 80)]
+        copyfrom = raw[(start + 80) : divider]
 
         mapping[currentnode] = (p1, p2, linknode, copyfrom)
         start = divider + 1
 
     return mapping
 
+
 def readfile(path):
-    f = open(path, 'rb')
+    f = open(path, b'rb')
     try:
         result = f.read()
 
         # we should never have empty files
         if not result:
             os.remove(path)
-            raise IOError("empty file: %s" % path)
+            raise IOError(b"empty file: %s" % path)
 
         return result
     finally:
         f.close()
 
+
 def unlinkfile(filepath):
     if pycompat.iswindows:
         # On Windows, os.unlink cannnot delete readonly files
         os.chmod(filepath, stat.S_IWUSR)
     os.unlink(filepath)
 
+
 def renamefile(source, destination):
     if pycompat.iswindows:
         # On Windows, os.rename cannot rename readonly files
@@ -315,6 +346,7 @@
 
     os.rename(source, destination)
 
+
 def writefile(path, content, readonly=False):
     dirname, filename = os.path.split(path)
     if not os.path.exists(dirname):
@@ -324,11 +356,11 @@
             if ex.errno != errno.EEXIST:
                 raise
 
-    fd, temp = tempfile.mkstemp(prefix='.%s-' % filename, dir=dirname)
+    fd, temp = tempfile.mkstemp(prefix=b'.%s-' % filename, dir=dirname)
     os.close(fd)
 
     try:
-        f = util.posixfile(temp, 'wb')
+        f = util.posixfile(temp, b'wb')
         f.write(content)
         f.close()
 
@@ -352,6 +384,7 @@
             pass
         raise
 
+
 def sortnodes(nodes, parentfunc):
     """Topologically sorts the nodes, using the parentfunc to find
     the parents of nodes."""
@@ -388,36 +421,43 @@
 
     return results
 
+
 def readexactly(stream, n):
     '''read n bytes from stream.read and abort if less was available'''
     s = stream.read(n)
     if len(s) < n:
-        raise error.Abort(_("stream ended unexpectedly"
-                           " (got %d bytes, expected %d)")
-                          % (len(s), n))
+        raise error.Abort(
+            _(b"stream ended unexpectedly (got %d bytes, expected %d)")
+            % (len(s), n)
+        )
     return s
 
+
 def readunpack(stream, fmt):
     data = readexactly(stream, struct.calcsize(fmt))
     return struct.unpack(fmt, data)
 
+
 def readpath(stream):
     rawlen = readexactly(stream, constants.FILENAMESIZE)
     pathlen = struct.unpack(constants.FILENAMESTRUCT, rawlen)[0]
     return readexactly(stream, pathlen)
 
+
 def readnodelist(stream):
     rawlen = readexactly(stream, constants.NODECOUNTSIZE)
     nodecount = struct.unpack(constants.NODECOUNTSTRUCT, rawlen)[0]
     for i in pycompat.xrange(nodecount):
         yield readexactly(stream, constants.NODESIZE)
 
+
 def readpathlist(stream):
     rawlen = readexactly(stream, constants.PATHCOUNTSIZE)
     pathcount = struct.unpack(constants.PATHCOUNTSTRUCT, rawlen)[0]
     for i in pycompat.xrange(pathcount):
         yield readpath(stream)
 
+
 def getgid(groupname):
     try:
         gid = grp.getgrnam(pycompat.fsdecode(groupname)).gr_gid
@@ -425,6 +465,7 @@
     except KeyError:
         return None
 
+
 def setstickygroupdir(path, gid, warn=None):
     if gid is None:
         return
@@ -433,17 +474,18 @@
         os.chmod(path, 0o2775)
     except (IOError, OSError) as ex:
         if warn:
-            warn(_('unable to chown/chmod on %s: %s\n') % (path, ex))
+            warn(_(b'unable to chown/chmod on %s: %s\n') % (path, ex))
+
 
 def mkstickygroupdir(ui, path):
     """Creates the given directory (if it doesn't exist) and give it a
     particular group with setgid enabled."""
     gid = None
-    groupname = ui.config("remotefilelog", "cachegroup")
+    groupname = ui.config(b"remotefilelog", b"cachegroup")
     if groupname:
         gid = getgid(groupname)
         if gid is None:
-            ui.warn(_('unable to resolve group name: %s\n') % groupname)
+            ui.warn(_(b'unable to resolve group name: %s\n') % groupname)
 
     # we use a single stat syscall to test the existence and mode / group bit
     st = None
@@ -479,14 +521,16 @@
     finally:
         os.umask(oldumask)
 
+
 def getusername(ui):
     try:
         return stringutil.shortuser(ui.username())
     except Exception:
-        return 'unknown'
+        return b'unknown'
+
 
 def getreponame(ui):
-    reponame = ui.config('paths', 'default')
+    reponame = ui.config(b'paths', b'default')
     if reponame:
         return os.path.basename(reponame)
-    return "unknown"
+    return b"unknown"
--- a/hgext/remotefilelog/shallowverifier.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotefilelog/shallowverifier.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,9 +9,11 @@
 from mercurial.i18n import _
 from mercurial import verify
 
+
 class shallowverifier(verify.verifier):
     def _verifyfiles(self, filenodes, filelinkrevs):
         """Skips files verification since repo's not guaranteed to have them"""
         self.repo.ui.status(
-            _("skipping filelog check since remotefilelog is used\n"))
+            _(b"skipping filelog check since remotefilelog is used\n")
+        )
         return 0, 0
--- a/hgext/remotenames.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/remotenames.py	Mon Oct 21 11:09:48 2019 -0400
@@ -28,9 +28,7 @@
 
 from mercurial.i18n import _
 
-from mercurial.node import (
-    bin,
-)
+from mercurial.node import bin
 from mercurial import (
     bookmarks,
     error,
@@ -45,38 +43,39 @@
     util,
 )
 
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.utils import stringutil
 
 if pycompat.ispy3:
     import collections.abc
+
     mutablemapping = collections.abc.MutableMapping
 else:
     import collections
+
     mutablemapping = collections.MutableMapping
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 templatekeyword = registrar.templatekeyword()
 revsetpredicate = registrar.revsetpredicate()
 
-configitem('remotenames', 'bookmarks',
-    default=True,
+configitem(
+    b'remotenames', b'bookmarks', default=True,
 )
-configitem('remotenames', 'branches',
-    default=True,
+configitem(
+    b'remotenames', b'branches', default=True,
 )
-configitem('remotenames', 'hoistedpeer',
-    default='default',
+configitem(
+    b'remotenames', b'hoistedpeer', default=b'default',
 )
 
+
 class lazyremotenamedict(mutablemapping):
     """
     Read-only dict-like Class to lazily resolve remotename entries
@@ -88,10 +87,11 @@
     is in self.potentialentries we resolve it and store the result in
     self.cache. We cannot be lazy is when asked all the entries (keys).
     """
+
     def __init__(self, kind, repo):
         self.cache = {}
         self.potentialentries = {}
-        self._kind = kind # bookmarks or branches
+        self._kind = kind  # bookmarks or branches
         self._repo = repo
         self.loaded = False
 
@@ -99,9 +99,10 @@
         """ Read the remotenames file, store entries matching selected kind """
         self.loaded = True
         repo = self._repo
-        for node, rpath, rname in logexchange.readremotenamefile(repo,
-                                                                self._kind):
-            name = rpath + '/' + rname
+        for node, rpath, rname in logexchange.readremotenamefile(
+            repo, self._kind
+        ):
+            name = rpath + b'/' + rname
             self.potentialentries[name] = (node, rpath, name)
 
     def _resolvedata(self, potentialentry):
@@ -117,7 +118,7 @@
         except LookupError:
             return None
         # Skip closed branches
-        if (self._kind == 'branches' and repo[binnode].closesbranch()):
+        if self._kind == b'branches' and repo[binnode].closesbranch():
             return None
         return [binnode]
 
@@ -164,11 +165,12 @@
         if not self.loaded:
             self._load()
 
-        for k, vtup in self.potentialentries.iteritems():
+        for k, vtup in pycompat.iteritems(self.potentialentries):
             yield (k, [bin(vtup[0])])
 
     items = iteritems
 
+
 class remotenames(object):
     """
     This class encapsulates all the remotenames state. It also contains
@@ -183,8 +185,8 @@
 
     def clearnames(self):
         """ Clear all remote names state """
-        self.bookmarks = lazyremotenamedict("bookmarks", self._repo)
-        self.branches = lazyremotenamedict("branches", self._repo)
+        self.bookmarks = lazyremotenamedict(b"bookmarks", self._repo)
+        self.branches = lazyremotenamedict(b"branches", self._repo)
         self._invalidatecache()
 
     def _invalidatecache(self):
@@ -200,7 +202,7 @@
         if not self._nodetobmarks:
             bmarktonodes = self.bmarktonodes()
             self._nodetobmarks = {}
-            for name, node in bmarktonodes.iteritems():
+            for name, node in pycompat.iteritems(bmarktonodes):
                 self._nodetobmarks.setdefault(node[0], []).append(name)
         return self._nodetobmarks
 
@@ -211,7 +213,7 @@
         if not self._nodetobranch:
             branchtonodes = self.branchtonodes()
             self._nodetobranch = {}
-            for name, nodes in branchtonodes.iteritems():
+            for name, nodes in pycompat.iteritems(branchtonodes):
                 for node in nodes:
                     self._nodetobranch.setdefault(node, []).append(name)
         return self._nodetobranch
@@ -220,10 +222,10 @@
         if not self._hoisttonodes:
             marktonodes = self.bmarktonodes()
             self._hoisttonodes = {}
-            hoist += '/'
-            for name, node in marktonodes.iteritems():
+            hoist += b'/'
+            for name, node in pycompat.iteritems(marktonodes):
                 if name.startswith(hoist):
-                    name = name[len(hoist):]
+                    name = name[len(hoist) :]
                     self._hoisttonodes[name] = node
         return self._hoisttonodes
 
@@ -231,17 +233,18 @@
         if not self._nodetohoists:
             marktonodes = self.bmarktonodes()
             self._nodetohoists = {}
-            hoist += '/'
-            for name, node in marktonodes.iteritems():
+            hoist += b'/'
+            for name, node in pycompat.iteritems(marktonodes):
                 if name.startswith(hoist):
-                    name = name[len(hoist):]
+                    name = name[len(hoist) :]
                     self._nodetohoists.setdefault(node[0], []).append(name)
         return self._nodetohoists
 
+
 def wrapprintbookmarks(orig, ui, repo, fm, bmarks):
-    if 'remotebookmarks' not in repo.names:
+    if b'remotebookmarks' not in repo.names:
         return
-    ns = repo.names['remotebookmarks']
+    ns = repo.names[b'remotebookmarks']
 
     for name in ns.listnames(repo):
         nodes = ns.nodes(repo, name)
@@ -249,17 +252,19 @@
             continue
         node = nodes[0]
 
-        bmarks[name] = (node, ' ', '')
+        bmarks[name] = (node, b' ', b'')
 
     return orig(ui, repo, fm, bmarks)
 
+
 def extsetup(ui):
-    extensions.wrapfunction(bookmarks, '_printbookmarks', wrapprintbookmarks)
+    extensions.wrapfunction(bookmarks, b'_printbookmarks', wrapprintbookmarks)
+
 
 def reposetup(ui, repo):
 
     # set the config option to store remotenames
-    repo.ui.setconfig('experimental', 'remotenames', True, 'remotenames-ext')
+    repo.ui.setconfig(b'experimental', b'remotenames', True, b'remotenames-ext')
 
     if not repo.local():
         return
@@ -267,96 +272,122 @@
     repo._remotenames = remotenames(repo)
     ns = namespaces.namespace
 
-    if ui.configbool('remotenames', 'bookmarks'):
+    if ui.configbool(b'remotenames', b'bookmarks'):
         remotebookmarkns = ns(
-            'remotebookmarks',
-            templatename='remotebookmarks',
-            colorname='remotebookmark',
-            logfmt='remote bookmark:  %s\n',
+            b'remotebookmarks',
+            templatename=b'remotebookmarks',
+            colorname=b'remotebookmark',
+            logfmt=b'remote bookmark:  %s\n',
             listnames=lambda repo: repo._remotenames.bmarktonodes().keys(),
-            namemap=lambda repo, name:
-                repo._remotenames.bmarktonodes().get(name, []),
-            nodemap=lambda repo, node:
-                repo._remotenames.nodetobmarks().get(node, []))
+            namemap=lambda repo, name: repo._remotenames.bmarktonodes().get(
+                name, []
+            ),
+            nodemap=lambda repo, node: repo._remotenames.nodetobmarks().get(
+                node, []
+            ),
+        )
         repo.names.addnamespace(remotebookmarkns)
 
         # hoisting only works if there are remote bookmarks
-        hoist = ui.config('remotenames', 'hoistedpeer')
+        hoist = ui.config(b'remotenames', b'hoistedpeer')
         if hoist:
             hoistednamens = ns(
-                'hoistednames',
-                templatename='hoistednames',
-                colorname='hoistedname',
-                logfmt='hoisted name:  %s\n',
-                listnames = lambda repo:
-                    repo._remotenames.hoisttonodes(hoist).keys(),
-                namemap = lambda repo, name:
-                    repo._remotenames.hoisttonodes(hoist).get(name, []),
-                nodemap = lambda repo, node:
-                    repo._remotenames.nodetohoists(hoist).get(node, []))
+                b'hoistednames',
+                templatename=b'hoistednames',
+                colorname=b'hoistedname',
+                logfmt=b'hoisted name:  %s\n',
+                listnames=lambda repo: repo._remotenames.hoisttonodes(
+                    hoist
+                ).keys(),
+                namemap=lambda repo, name: repo._remotenames.hoisttonodes(
+                    hoist
+                ).get(name, []),
+                nodemap=lambda repo, node: repo._remotenames.nodetohoists(
+                    hoist
+                ).get(node, []),
+            )
             repo.names.addnamespace(hoistednamens)
 
-    if ui.configbool('remotenames', 'branches'):
+    if ui.configbool(b'remotenames', b'branches'):
         remotebranchns = ns(
-            'remotebranches',
-            templatename='remotebranches',
-            colorname='remotebranch',
-            logfmt='remote branch:  %s\n',
-            listnames = lambda repo: repo._remotenames.branchtonodes().keys(),
-            namemap = lambda repo, name:
-                repo._remotenames.branchtonodes().get(name, []),
-            nodemap = lambda repo, node:
-                repo._remotenames.nodetobranch().get(node, []))
+            b'remotebranches',
+            templatename=b'remotebranches',
+            colorname=b'remotebranch',
+            logfmt=b'remote branch:  %s\n',
+            listnames=lambda repo: repo._remotenames.branchtonodes().keys(),
+            namemap=lambda repo, name: repo._remotenames.branchtonodes().get(
+                name, []
+            ),
+            nodemap=lambda repo, node: repo._remotenames.nodetobranch().get(
+                node, []
+            ),
+        )
         repo.names.addnamespace(remotebranchns)
 
-@templatekeyword('remotenames', requires={'repo', 'ctx'})
+
+@templatekeyword(b'remotenames', requires={b'repo', b'ctx'})
 def remotenameskw(context, mapping):
     """List of strings. Remote names associated with the changeset."""
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
 
     remotenames = []
-    if 'remotebookmarks' in repo.names:
-        remotenames = repo.names['remotebookmarks'].names(repo, ctx.node())
+    if b'remotebookmarks' in repo.names:
+        remotenames = repo.names[b'remotebookmarks'].names(repo, ctx.node())
+
+    if b'remotebranches' in repo.names:
+        remotenames += repo.names[b'remotebranches'].names(repo, ctx.node())
 
-    if 'remotebranches' in repo.names:
-        remotenames += repo.names['remotebranches'].names(repo, ctx.node())
+    return templateutil.compatlist(
+        context, mapping, b'remotename', remotenames, plural=b'remotenames'
+    )
 
-    return templateutil.compatlist(context, mapping, 'remotename', remotenames,
-                                   plural='remotenames')
 
-@templatekeyword('remotebookmarks', requires={'repo', 'ctx'})
+@templatekeyword(b'remotebookmarks', requires={b'repo', b'ctx'})
 def remotebookmarkskw(context, mapping):
     """List of strings. Remote bookmarks associated with the changeset."""
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
 
     remotebmarks = []
-    if 'remotebookmarks' in repo.names:
-        remotebmarks = repo.names['remotebookmarks'].names(repo, ctx.node())
+    if b'remotebookmarks' in repo.names:
+        remotebmarks = repo.names[b'remotebookmarks'].names(repo, ctx.node())
 
-    return templateutil.compatlist(context, mapping, 'remotebookmark',
-                                   remotebmarks, plural='remotebookmarks')
+    return templateutil.compatlist(
+        context,
+        mapping,
+        b'remotebookmark',
+        remotebmarks,
+        plural=b'remotebookmarks',
+    )
 
-@templatekeyword('remotebranches', requires={'repo', 'ctx'})
+
+@templatekeyword(b'remotebranches', requires={b'repo', b'ctx'})
 def remotebrancheskw(context, mapping):
     """List of strings. Remote branches associated with the changeset."""
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
 
     remotebranches = []
-    if 'remotebranches' in repo.names:
-        remotebranches = repo.names['remotebranches'].names(repo, ctx.node())
+    if b'remotebranches' in repo.names:
+        remotebranches = repo.names[b'remotebranches'].names(repo, ctx.node())
 
-    return templateutil.compatlist(context, mapping, 'remotebranch',
-                                   remotebranches, plural='remotebranches')
+    return templateutil.compatlist(
+        context,
+        mapping,
+        b'remotebranch',
+        remotebranches,
+        plural=b'remotebranches',
+    )
+
 
 def _revsetutil(repo, subset, x, rtypes):
     """utility function to return a set of revs based on the rtypes"""
-    args = revsetlang.getargs(x, 0, 1, _('only one argument accepted'))
+    args = revsetlang.getargs(x, 0, 1, _(b'only one argument accepted'))
     if args:
         kind, pattern, matcher = stringutil.stringmatcher(
-            revsetlang.getstring(args[0], _('argument must be a string')))
+            revsetlang.getstring(args[0], _(b'argument must be a string'))
+        )
     else:
         kind = pattern = None
         matcher = util.always
@@ -370,36 +401,40 @@
                 if not matcher(name):
                     continue
                 nodes.update(ns.nodes(repo, name))
-    if kind == 'literal' and not nodes:
-        raise error.RepoLookupError(_("remote name '%s' does not exist")
-                                    % pattern)
+    if kind == b'literal' and not nodes:
+        raise error.RepoLookupError(
+            _(b"remote name '%s' does not exist") % pattern
+        )
 
     revs = (cl.rev(n) for n in nodes if cl.hasnode(n))
     return subset & smartset.baseset(revs)
 
-@revsetpredicate('remotenames([name])')
+
+@revsetpredicate(b'remotenames([name])')
 def remotenamesrevset(repo, subset, x):
     """All changesets which have a remotename on them. If `name` is
     specified, only remotenames of matching remote paths are considered.
 
     Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
     """
-    return _revsetutil(repo, subset, x, ('remotebookmarks', 'remotebranches'))
+    return _revsetutil(repo, subset, x, (b'remotebookmarks', b'remotebranches'))
 
-@revsetpredicate('remotebranches([name])')
+
+@revsetpredicate(b'remotebranches([name])')
 def remotebranchesrevset(repo, subset, x):
     """All changesets which are branch heads on remotes. If `name` is
     specified, only remotenames of matching remote paths are considered.
 
     Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
     """
-    return _revsetutil(repo, subset, x, ('remotebranches',))
+    return _revsetutil(repo, subset, x, (b'remotebranches',))
 
-@revsetpredicate('remotebookmarks([name])')
+
+@revsetpredicate(b'remotebookmarks([name])')
 def remotebmarksrevset(repo, subset, x):
     """All changesets which have bookmarks on remotes. If `name` is
     specified, only remotenames of matching remote paths are considered.
 
     Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
     """
-    return _revsetutil(repo, subset, x, ('remotebookmarks',))
+    return _revsetutil(repo, subset, x, (b'remotebookmarks',))
--- a/hgext/schemes.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/schemes.py	Mon Oct 21 11:09:48 2019 -0400
@@ -61,10 +61,11 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 _partre = re.compile(br'\{(\d+)\}')
 
+
 class ShortRepository(object):
     def __init__(self, url, scheme, templater):
         self.scheme = scheme
@@ -76,60 +77,74 @@
             self.parts = 0
 
     def __repr__(self):
-        return '<ShortRepository: %s>' % self.scheme
+        return b'<ShortRepository: %s>' % self.scheme
 
     def instance(self, ui, url, create, intents=None, createopts=None):
         url = self.resolve(url)
-        return hg._peerlookup(url).instance(ui, url, create, intents=intents,
-                                            createopts=createopts)
+        return hg._peerlookup(url).instance(
+            ui, url, create, intents=intents, createopts=createopts
+        )
 
     def resolve(self, url):
         # Should this use the util.url class, or is manual parsing better?
         try:
-            url = url.split('://', 1)[1]
+            url = url.split(b'://', 1)[1]
         except IndexError:
-            raise error.Abort(_("no '://' in scheme url '%s'") % url)
-        parts = url.split('/', self.parts)
+            raise error.Abort(_(b"no '://' in scheme url '%s'") % url)
+        parts = url.split(b'/', self.parts)
         if len(parts) > self.parts:
             tail = parts[-1]
             parts = parts[:-1]
         else:
-            tail = ''
-        context = dict(('%d' % (i + 1), v) for i, v in enumerate(parts))
-        return ''.join(self.templater.process(self.url, context)) + tail
+            tail = b''
+        context = dict((b'%d' % (i + 1), v) for i, v in enumerate(parts))
+        return b''.join(self.templater.process(self.url, context)) + tail
+
 
 def hasdriveletter(orig, path):
     if path:
         for scheme in schemes:
-            if path.startswith(scheme + ':'):
+            if path.startswith(scheme + b':'):
                 return False
     return orig(path)
 
+
 schemes = {
-    'py': 'http://hg.python.org/',
-    'bb': 'https://bitbucket.org/',
-    'bb+ssh': 'ssh://hg@bitbucket.org/',
-    'gcode': 'https://{1}.googlecode.com/hg/',
-    'kiln': 'https://{1}.kilnhg.com/Repo/'
-    }
+    b'py': b'http://hg.python.org/',
+    b'bb': b'https://bitbucket.org/',
+    b'bb+ssh': b'ssh://hg@bitbucket.org/',
+    b'gcode': b'https://{1}.googlecode.com/hg/',
+    b'kiln': b'https://{1}.kilnhg.com/Repo/',
+}
+
 
 def extsetup(ui):
-    schemes.update(dict(ui.configitems('schemes')))
+    schemes.update(dict(ui.configitems(b'schemes')))
     t = templater.engine(templater.parse)
     for scheme, url in schemes.items():
-        if (pycompat.iswindows and len(scheme) == 1 and scheme.isalpha()
-            and os.path.exists('%s:\\' % scheme)):
-            raise error.Abort(_('custom scheme %s:// conflicts with drive '
-                               'letter %s:\\\n') % (scheme, scheme.upper()))
+        if (
+            pycompat.iswindows
+            and len(scheme) == 1
+            and scheme.isalpha()
+            and os.path.exists(b'%s:\\' % scheme)
+        ):
+            raise error.Abort(
+                _(
+                    b'custom scheme %s:// conflicts with drive '
+                    b'letter %s:\\\n'
+                )
+                % (scheme, scheme.upper())
+            )
         hg.schemes[scheme] = ShortRepository(url, scheme, t)
 
-    extensions.wrapfunction(util, 'hasdriveletter', hasdriveletter)
+    extensions.wrapfunction(util, b'hasdriveletter', hasdriveletter)
 
-@command('debugexpandscheme', norepo=True)
+
+@command(b'debugexpandscheme', norepo=True)
 def expandscheme(ui, url, **opts):
     """given a repo path, provide the scheme-expanded path
     """
     repo = hg._peerlookup(url)
     if isinstance(repo, ShortRepository):
         url = repo.resolve(url)
-    ui.write(url + '\n')
+    ui.write(url + b'\n')
--- a/hgext/share.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/share.py	Mon Oct 21 11:09:48 2019 -0400
@@ -58,19 +58,23 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
-@command('share',
-    [('U', 'noupdate', None, _('do not create a working directory')),
-     ('B', 'bookmarks', None, _('also share bookmarks')),
-     ('', 'relative', None, _('point to source using a relative path '
-                              '(EXPERIMENTAL)')),
+@command(
+    b'share',
+    [
+        (b'U', b'noupdate', None, _(b'do not create a working directory')),
+        (b'B', b'bookmarks', None, _(b'also share bookmarks')),
+        (b'', b'relative', None, _(b'point to source using a relative path'),),
     ],
-    _('[-U] [-B] SOURCE [DEST]'),
+    _(b'[-U] [-B] SOURCE [DEST]'),
     helpcategory=command.CATEGORY_REPO_CREATION,
-    norepo=True)
-def share(ui, source, dest=None, noupdate=False, bookmarks=False,
-          relative=False):
+    norepo=True,
+)
+def share(
+    ui, source, dest=None, noupdate=False, bookmarks=False, relative=False
+):
     """create a new shared repository
 
     Initialize a new repository and working directory that shares its
@@ -88,11 +92,18 @@
        the broken clone to reset it to a changeset that still exists.
     """
 
-    hg.share(ui, source, dest=dest, update=not noupdate,
-             bookmarks=bookmarks, relative=relative)
+    hg.share(
+        ui,
+        source,
+        dest=dest,
+        update=not noupdate,
+        bookmarks=bookmarks,
+        relative=relative,
+    )
     return 0
 
-@command('unshare', [], '', helpcategory=command.CATEGORY_MAINTENANCE)
+
+@command(b'unshare', [], b'', helpcategory=command.CATEGORY_MAINTENANCE)
 def unshare(ui, repo):
     """convert a shared repository to a normal one
 
@@ -100,28 +111,31 @@
     """
 
     if not repo.shared():
-        raise error.Abort(_("this is not a shared repo"))
+        raise error.Abort(_(b"this is not a shared repo"))
 
     hg.unshare(ui, repo)
 
+
 # Wrap clone command to pass auto share options.
 def clone(orig, ui, source, *args, **opts):
-    pool = ui.config('share', 'pool')
+    pool = ui.config(b'share', b'pool')
     if pool:
         pool = util.expandpath(pool)
 
     opts[r'shareopts'] = {
-        'pool': pool,
-        'mode': ui.config('share', 'poolnaming'),
+        b'pool': pool,
+        b'mode': ui.config(b'share', b'poolnaming'),
     }
 
     return orig(ui, source, *args, **opts)
 
+
 def extsetup(ui):
-    extensions.wrapfunction(bookmarks, '_getbkfile', getbkfile)
-    extensions.wrapfunction(bookmarks.bmstore, '_recordchange', recordchange)
-    extensions.wrapfunction(bookmarks.bmstore, '_writerepo', writerepo)
-    extensions.wrapcommand(commands.table, 'clone', clone)
+    extensions.wrapfunction(bookmarks, b'_getbkfile', getbkfile)
+    extensions.wrapfunction(bookmarks.bmstore, b'_recordchange', recordchange)
+    extensions.wrapfunction(bookmarks.bmstore, b'_writerepo', writerepo)
+    extensions.wrapcommand(commands.table, b'clone', clone)
+
 
 def _hassharedbookmarks(repo):
     """Returns whether this repo has shared bookmarks"""
@@ -130,13 +144,14 @@
         # from/to the source repo.
         return False
     try:
-        shared = repo.vfs.read('shared').splitlines()
+        shared = repo.vfs.read(b'shared').splitlines()
     except IOError as inst:
         if inst.errno != errno.ENOENT:
             raise
         return False
     return hg.sharedbookmarks in shared
 
+
 def getbkfile(orig, repo):
     if _hassharedbookmarks(repo):
         srcrepo = hg.sharedreposource(repo)
@@ -144,8 +159,9 @@
             # just orig(srcrepo) doesn't work as expected, because
             # HG_PENDING refers repo.root.
             try:
-                fp, pending = txnutil.trypending(repo.root, repo.vfs,
-                                                 'bookmarks')
+                fp, pending = txnutil.trypending(
+                    repo.root, repo.vfs, b'bookmarks'
+                )
                 if pending:
                     # only in this case, bookmark information in repo
                     # is up-to-date.
@@ -165,6 +181,7 @@
             # See also https://www.mercurial-scm.org/wiki/SharedRepository
     return orig(repo)
 
+
 def recordchange(orig, self, tr):
     # Continue with write to local bookmarks file as usual
     orig(self, tr)
@@ -172,9 +189,10 @@
     if _hassharedbookmarks(self._repo):
         srcrepo = hg.sharedreposource(self._repo)
         if srcrepo is not None:
-            category = 'share-bookmarks'
+            category = b'share-bookmarks'
             tr.addpostclose(category, lambda tr: self._writerepo(srcrepo))
 
+
 def writerepo(orig, self, repo):
     # First write local bookmarks file in case we ever unshare
     orig(self, repo)
--- a/hgext/show.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/show.py	Mon Oct 21 11:09:48 2019 -0400
@@ -28,9 +28,7 @@
 from __future__ import absolute_import
 
 from mercurial.i18n import _
-from mercurial.node import (
-    nullrev,
-)
+from mercurial.node import nullrev
 from mercurial import (
     cmdutil,
     commands,
@@ -51,18 +49,19 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 cmdtable = {}
 command = registrar.command(cmdtable)
 
 revsetpredicate = registrar.revsetpredicate()
 
+
 class showcmdfunc(registrar._funcregistrarbase):
     """Register a function to be invoked for an `hg show <thing>`."""
 
     # Used by _formatdoc().
-    _docformat = '%s -- %s'
+    _docformat = b'%s -- %s'
 
     def _extrasetup(self, name, func, fmtopic=None, csettopic=None):
         """Called with decorator arguments to register a show view.
@@ -84,17 +83,23 @@
         func._fmtopic = fmtopic
         func._csettopic = csettopic
 
+
 showview = showcmdfunc()
 
-@command('show', [
-    # TODO: Switch this template flag to use cmdutil.formatteropts if
-    # 'hg show' becomes stable before --template/-T is stable. For now,
-    # we are putting it here without the '(EXPERIMENTAL)' flag because it
-    # is an important part of the 'hg show' user experience and the entire
-    # 'hg show' experience is experimental.
-    ('T', 'template', '', ('display with template'), _('TEMPLATE')),
-    ], _('VIEW'),
-    helpcategory=command.CATEGORY_CHANGE_NAVIGATION)
+
+@command(
+    b'show',
+    [
+        # TODO: Switch this template flag to use cmdutil.formatteropts if
+        # 'hg show' becomes stable before --template/-T is stable. For now,
+        # we are putting it here without the '(EXPERIMENTAL)' flag because it
+        # is an important part of the 'hg show' user experience and the entire
+        # 'hg show' experience is experimental.
+        (b'T', b'template', b'', b'display with template', _(b'TEMPLATE')),
+    ],
+    _(b'VIEW'),
+    helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
+)
 def show(ui, repo, view=None, template=None):
     """show various repository information
 
@@ -114,48 +119,55 @@
     List of available views:
     """
     if ui.plain() and not template:
-        hint = _('invoke with -T/--template to control output format')
-        raise error.Abort(_('must specify a template in plain mode'), hint=hint)
+        hint = _(b'invoke with -T/--template to control output format')
+        raise error.Abort(
+            _(b'must specify a template in plain mode'), hint=hint
+        )
 
     views = showview._table
 
     if not view:
-        ui.pager('show')
+        ui.pager(b'show')
         # TODO consider using formatter here so available views can be
         # rendered to custom format.
-        ui.write(_('available views:\n'))
-        ui.write('\n')
+        ui.write(_(b'available views:\n'))
+        ui.write(b'\n')
 
         for name, func in sorted(views.items()):
-            ui.write(('%s\n') % pycompat.sysbytes(func.__doc__))
+            ui.write(b'%s\n' % pycompat.sysbytes(func.__doc__))
 
-        ui.write('\n')
-        raise error.Abort(_('no view requested'),
-                          hint=_('use "hg show VIEW" to choose a view'))
+        ui.write(b'\n')
+        raise error.Abort(
+            _(b'no view requested'),
+            hint=_(b'use "hg show VIEW" to choose a view'),
+        )
 
     # TODO use same logic as dispatch to perform prefix matching.
     if view not in views:
-        raise error.Abort(_('unknown view: %s') % view,
-                          hint=_('run "hg show" to see available views'))
+        raise error.Abort(
+            _(b'unknown view: %s') % view,
+            hint=_(b'run "hg show" to see available views'),
+        )
 
-    template = template or 'show'
+    template = template or b'show'
 
     fn = views[view]
-    ui.pager('show')
+    ui.pager(b'show')
 
     if fn._fmtopic:
-        fmtopic = 'show%s' % fn._fmtopic
-        with ui.formatter(fmtopic, {'template': template}) as fm:
+        fmtopic = b'show%s' % fn._fmtopic
+        with ui.formatter(fmtopic, {b'template': template}) as fm:
             return fn(ui, repo, fm)
     elif fn._csettopic:
-        ref = 'show%s' % fn._csettopic
+        ref = b'show%s' % fn._csettopic
         spec = formatter.lookuptemplate(ui, ref, template)
         displayer = logcmdutil.changesettemplater(ui, repo, spec, buffered=True)
         return fn(ui, repo, displayer)
     else:
         return fn(ui, repo)
 
-@showview('bookmarks', fmtopic='bookmarks')
+
+@showview(b'bookmarks', fmtopic=b'bookmarks')
 def showbookmarks(ui, repo, fm):
     """bookmarks and their associated changeset"""
     marks = repo._bookmarks
@@ -164,7 +176,7 @@
         # specify an empty output, but we shouldn't corrupt JSON while
         # waiting for this functionality.
         if not isinstance(fm, formatter.jsonformatter):
-            ui.write(_('(no bookmarks set)\n'))
+            ui.write(_(b'(no bookmarks set)\n'))
         return
 
     revs = [repo[node].rev() for node in marks.values()]
@@ -175,23 +187,32 @@
     for bm, node in sorted(marks.items()):
         fm.startitem()
         fm.context(ctx=repo[node])
-        fm.write('bookmark', '%s', bm)
-        fm.write('node', fm.hexfunc(node), fm.hexfunc(node))
-        fm.data(active=bm == active,
-                longestbookmarklen=longestname,
-                nodelen=nodelen)
+        fm.write(b'bookmark', b'%s', bm)
+        fm.write(b'node', fm.hexfunc(node), fm.hexfunc(node))
+        fm.data(
+            active=bm == active, longestbookmarklen=longestname, nodelen=nodelen
+        )
 
-@showview('stack', csettopic='stack')
+
+@showview(b'stack', csettopic=b'stack')
 def showstack(ui, repo, displayer):
     """current line of work"""
-    wdirctx = repo['.']
+    wdirctx = repo[b'.']
     if wdirctx.rev() == nullrev:
-        raise error.Abort(_('stack view only available when there is a '
-                            'working directory'))
+        raise error.Abort(
+            _(
+                b'stack view only available when there is a '
+                b'working directory'
+            )
+        )
 
     if wdirctx.phase() == phases.public:
-        ui.write(_('(empty stack; working directory parent is a published '
-                   'changeset)\n'))
+        ui.write(
+            _(
+                b'(empty stack; working directory parent is a published '
+                b'changeset)\n'
+            )
+        )
         return
 
     # TODO extract "find stack" into a function to facilitate
@@ -204,7 +225,7 @@
         baserev = wdirctx.rev()
         stackrevs = {wdirctx.rev()}
     else:
-        stackrevs = set(repo.revs('%d::.', baserev))
+        stackrevs = set(repo.revs(b'%d::.', baserev))
 
     ctx = repo[baserev]
     if ctx.p1().rev() != nullrev:
@@ -238,8 +259,11 @@
     # merge or rebase targets.
     if basectx:
         # TODO make this customizable?
-        newheads = set(repo.revs('heads(%d::) - %ld - not public()',
-                                 basectx.rev(), stackrevs))
+        newheads = set(
+            repo.revs(
+                b'heads(%d::) - %ld - not public()', basectx.rev(), stackrevs
+            )
+        )
     else:
         newheads = set()
 
@@ -247,7 +271,7 @@
     nodelen = longestshortest(repo, allrevs)
 
     try:
-        cmdutil.findcmd('rebase', commands.table)
+        cmdutil.findcmd(b'rebase', commands.table)
         haverebase = True
     except (error.AmbiguousCommand, error.UnknownCommand):
         haverebase = False
@@ -258,10 +282,12 @@
     # TODO use proper graph symbols from graphmod
 
     tres = formatter.templateresources(ui, repo)
-    shortesttmpl = formatter.maketemplater(ui, '{shortest(node, %d)}' % nodelen,
-                                           resources=tres)
+    shortesttmpl = formatter.maketemplater(
+        ui, b'{shortest(node, %d)}' % nodelen, resources=tres
+    )
+
     def shortest(ctx):
-        return shortesttmpl.renderdefault({'ctx': ctx, 'node': ctx.hex()})
+        return shortesttmpl.renderdefault({b'ctx': ctx, b'node': ctx.hex()})
 
     # We write out new heads to aid in DAG awareness and to help with decision
     # making on how the stack should be reconciled with commits made since the
@@ -278,61 +304,68 @@
 
         sourcectx = repo[stackrevs[-1]]
 
-        sortedheads = sorted(newheads, key=lambda x: revdistance[x],
-                             reverse=True)
+        sortedheads = sorted(
+            newheads, key=lambda x: revdistance[x], reverse=True
+        )
 
         for i, rev in enumerate(sortedheads):
             ctx = repo[rev]
 
             if i:
-                ui.write(': ')
+                ui.write(b': ')
             else:
-                ui.write('  ')
+                ui.write(b'  ')
 
-            ui.write(('o  '))
+            ui.writenoi18n(b'o  ')
             displayer.show(ctx, nodelen=nodelen)
             displayer.flush(ctx)
-            ui.write('\n')
+            ui.write(b'\n')
 
             if i:
-                ui.write(':/')
+                ui.write(b':/')
             else:
-                ui.write(' /')
+                ui.write(b' /')
 
-            ui.write('    (')
-            ui.write(_('%d commits ahead') % revdistance[rev],
-                     label='stack.commitdistance')
+            ui.write(b'    (')
+            ui.write(
+                _(b'%d commits ahead') % revdistance[rev],
+                label=b'stack.commitdistance',
+            )
 
             if haverebase:
                 # TODO may be able to omit --source in some scenarios
-                ui.write('; ')
-                ui.write(('hg rebase --source %s --dest %s' % (
-                         shortest(sourcectx), shortest(ctx))),
-                         label='stack.rebasehint')
+                ui.write(b'; ')
+                ui.write(
+                    (
+                        b'hg rebase --source %s --dest %s'
+                        % (shortest(sourcectx), shortest(ctx))
+                    ),
+                    label=b'stack.rebasehint',
+                )
 
-            ui.write(')\n')
+            ui.write(b')\n')
 
-        ui.write(':\n:    ')
-        ui.write(_('(stack head)\n'), label='stack.label')
+        ui.write(b':\n:    ')
+        ui.write(_(b'(stack head)\n'), label=b'stack.label')
 
     if branchpointattip:
-        ui.write(' \\ /  ')
-        ui.write(_('(multiple children)\n'), label='stack.label')
-        ui.write('  |\n')
+        ui.write(b' \\ /  ')
+        ui.write(_(b'(multiple children)\n'), label=b'stack.label')
+        ui.write(b'  |\n')
 
     for rev in stackrevs:
         ctx = repo[rev]
-        symbol = '@' if rev == wdirctx.rev() else 'o'
+        symbol = b'@' if rev == wdirctx.rev() else b'o'
 
         if newheads:
-            ui.write(': ')
+            ui.write(b': ')
         else:
-            ui.write('  ')
+            ui.write(b'  ')
 
-        ui.write(symbol, '  ')
+        ui.write(symbol, b'  ')
         displayer.show(ctx, nodelen=nodelen)
         displayer.flush(ctx)
-        ui.write('\n')
+        ui.write(b'\n')
 
     # TODO display histedit hint?
 
@@ -340,24 +373,25 @@
         # Vertically and horizontally separate stack base from parent
         # to reinforce stack boundary.
         if newheads:
-            ui.write(':/   ')
+            ui.write(b':/   ')
         else:
-            ui.write(' /   ')
+            ui.write(b' /   ')
 
-        ui.write(_('(stack base)'), '\n', label='stack.label')
-        ui.write(('o  '))
+        ui.write(_(b'(stack base)'), b'\n', label=b'stack.label')
+        ui.writenoi18n(b'o  ')
 
         displayer.show(basectx, nodelen=nodelen)
         displayer.flush(basectx)
-        ui.write('\n')
+        ui.write(b'\n')
 
-@revsetpredicate('_underway([commitage[, headage]])')
+
+@revsetpredicate(b'_underway([commitage[, headage]])')
 def underwayrevset(repo, subset, x):
-    args = revset.getargsdict(x, 'underway', 'commitage headage')
-    if 'commitage' not in args:
-        args['commitage'] = None
-    if 'headage' not in args:
-        args['headage'] = None
+    args = revset.getargsdict(x, b'underway', b'commitage headage')
+    if b'commitage' not in args:
+        args[b'commitage'] = None
+    if b'headage' not in args:
+        args[b'headage'] = None
 
     # We assume callers of this revset add a topographical sort on the
     # result. This means there is no benefit to making the revset lazy
@@ -370,68 +404,84 @@
     # to return. ``not public()`` will also pull in obsolete changesets if
     # there is a non-obsolete changeset with obsolete ancestors. This is
     # why we exclude obsolete changesets from this query.
-    rs = 'not public() and not obsolete()'
+    rs = b'not public() and not obsolete()'
     rsargs = []
-    if args['commitage']:
-        rs += ' and date(%s)'
-        rsargs.append(revsetlang.getstring(args['commitage'],
-                                           _('commitage requires a string')))
+    if args[b'commitage']:
+        rs += b' and date(%s)'
+        rsargs.append(
+            revsetlang.getstring(
+                args[b'commitage'], _(b'commitage requires a string')
+            )
+        )
 
     mutable = repo.revs(rs, *rsargs)
     relevant = revset.baseset(mutable)
 
     # Add parents of mutable changesets to provide context.
-    relevant += repo.revs('parents(%ld)', mutable)
+    relevant += repo.revs(b'parents(%ld)', mutable)
 
     # We also pull in (public) heads if they a) aren't closing a branch
     # b) are recent.
-    rs = 'head() and not closed()'
+    rs = b'head() and not closed()'
     rsargs = []
-    if args['headage']:
-        rs += ' and date(%s)'
-        rsargs.append(revsetlang.getstring(args['headage'],
-                                           _('headage requires a string')))
+    if args[b'headage']:
+        rs += b' and date(%s)'
+        rsargs.append(
+            revsetlang.getstring(
+                args[b'headage'], _(b'headage requires a string')
+            )
+        )
 
     relevant += repo.revs(rs, *rsargs)
 
     # Add working directory parent.
-    wdirrev = repo['.'].rev()
+    wdirrev = repo[b'.'].rev()
     if wdirrev != nullrev:
         relevant += revset.baseset({wdirrev})
 
     return subset & relevant
 
-@showview('work', csettopic='work')
+
+@showview(b'work', csettopic=b'work')
 def showwork(ui, repo, displayer):
     """changesets that aren't finished"""
     # TODO support date-based limiting when calling revset.
-    revs = repo.revs('sort(_underway(), topo)')
+    revs = repo.revs(b'sort(_underway(), topo)')
     nodelen = longestshortest(repo, revs)
 
     revdag = graphmod.dagwalker(repo, revs)
 
-    ui.setconfig('experimental', 'graphshorten', True)
-    logcmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges,
-                            props={'nodelen': nodelen})
+    ui.setconfig(b'experimental', b'graphshorten', True)
+    logcmdutil.displaygraph(
+        ui,
+        repo,
+        revdag,
+        displayer,
+        graphmod.asciiedges,
+        props={b'nodelen': nodelen},
+    )
+
 
 def extsetup(ui):
     # Alias `hg <prefix><view>` to `hg show <view>`.
-    for prefix in ui.configlist('commands', 'show.aliasprefix'):
+    for prefix in ui.configlist(b'commands', b'show.aliasprefix'):
         for view in showview._table:
-            name = '%s%s' % (prefix, view)
+            name = b'%s%s' % (prefix, view)
 
-            choice, allcommands = cmdutil.findpossible(name, commands.table,
-                                                       strict=True)
+            choice, allcommands = cmdutil.findpossible(
+                name, commands.table, strict=True
+            )
 
             # This alias is already a command name. Don't set it.
             if name in choice:
                 continue
 
             # Same for aliases.
-            if ui.config('alias', name, None):
+            if ui.config(b'alias', name, None):
                 continue
 
-            ui.setconfig('alias', name, 'show %s' % view, source='show')
+            ui.setconfig(b'alias', name, b'show %s' % view, source=b'show')
+
 
 def longestshortest(repo, revs, minlen=4):
     """Return the length of the longest shortest node to identify revisions.
@@ -448,8 +498,11 @@
     if not revs:
         return minlen
     cl = repo.changelog
-    return max(len(scmutil.shortesthexnodeidprefix(repo, cl.node(r), minlen))
-               for r in revs)
+    return max(
+        len(scmutil.shortesthexnodeidprefix(repo, cl.node(r), minlen))
+        for r in revs
+    )
+
 
 # Adjust the docstring of the show command so it shows all registered views.
 # This is a bit hacky because it runs at the end of module load. When moved
@@ -460,11 +513,18 @@
     longest = max(map(len, showview._table.keys()))
     entries = []
     for key in sorted(showview._table.keys()):
-        entries.append(r'    %s   %s' % (
-            pycompat.sysstr(key.ljust(longest)), showview._table[key]._origdoc))
+        entries.append(
+            r'    %s   %s'
+            % (
+                pycompat.sysstr(key.ljust(longest)),
+                showview._table[key]._origdoc,
+            )
+        )
 
-    cmdtable['show'][0].__doc__ = pycompat.sysstr('%s\n\n%s\n    ') % (
-        cmdtable['show'][0].__doc__.rstrip(),
-        pycompat.sysstr('\n\n').join(entries))
+    cmdtable[b'show'][0].__doc__ = pycompat.sysstr(b'%s\n\n%s\n    ') % (
+        cmdtable[b'show'][0].__doc__.rstrip(),
+        pycompat.sysstr(b'\n\n').join(entries),
+    )
+
 
 _updatedocstring()
--- a/hgext/sparse.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/sparse.py	Mon Oct 21 11:09:48 2019 -0400
@@ -74,6 +74,7 @@
 from __future__ import absolute_import
 
 from mercurial.i18n import _
+from mercurial.pycompat import setattr
 from mercurial import (
     commands,
     dirstate,
@@ -92,11 +93,12 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 cmdtable = {}
 command = registrar.command(cmdtable)
 
+
 def extsetup(ui):
     sparse.enabled = True
 
@@ -105,6 +107,7 @@
     _setupadd(ui)
     _setupdirstate(ui)
 
+
 def replacefilecache(cls, propname, replacement):
     """Replace a filecache property with a new class. This allows changing the
     cache invalidation condition."""
@@ -118,24 +121,36 @@
         cls = cls.__bases__[0]
 
     if cls is object:
-        raise AttributeError(_("type '%s' has no property '%s'") % (origcls,
-                             propname))
+        raise AttributeError(
+            _(b"type '%s' has no property '%s'") % (origcls, propname)
+        )
+
 
 def _setuplog(ui):
-    entry = commands.table['log|history']
-    entry[1].append(('', 'sparse', None,
-        "limit to changesets affecting the sparse checkout"))
+    entry = commands.table[b'log|history']
+    entry[1].append(
+        (
+            b'',
+            b'sparse',
+            None,
+            b"limit to changesets affecting the sparse checkout",
+        )
+    )
 
     def _initialrevs(orig, repo, opts):
         revs = orig(repo, opts)
-        if opts.get('sparse'):
+        if opts.get(b'sparse'):
             sparsematch = sparse.matcher(repo)
+
             def ctxmatch(rev):
                 ctx = repo[rev]
                 return any(f for f in ctx.files() if sparsematch(f))
+
             revs = revs.filter(ctxmatch)
         return revs
-    extensions.wrapfunction(logcmdutil, '_initialrevs', _initialrevs)
+
+    extensions.wrapfunction(logcmdutil, b'_initialrevs', _initialrevs)
+
 
 def _clonesparsecmd(orig, ui, repo, *args, **opts):
     include_pat = opts.get(r'include')
@@ -153,32 +168,45 @@
         pat = enableprofile_pat
         enableprofile = True
     if sum([include, exclude, enableprofile]) > 1:
-        raise error.Abort(_("too many flags specified."))
+        raise error.Abort(_(b"too many flags specified."))
     # if --narrow is passed, it means they are includes and excludes for narrow
     # clone
     if not narrow_pat and (include or exclude or enableprofile):
+
         def clonesparse(orig, self, node, overwrite, *args, **kwargs):
-            sparse.updateconfig(self.unfiltered(), pat, {}, include=include,
-                                exclude=exclude, enableprofile=enableprofile,
-                                usereporootpaths=True)
+            sparse.updateconfig(
+                self.unfiltered(),
+                pat,
+                {},
+                include=include,
+                exclude=exclude,
+                enableprofile=enableprofile,
+                usereporootpaths=True,
+            )
             return orig(self, node, overwrite, *args, **kwargs)
-        extensions.wrapfunction(hg, 'updaterepo', clonesparse)
+
+        extensions.wrapfunction(hg, b'updaterepo', clonesparse)
     return orig(ui, repo, *args, **opts)
 
+
 def _setupclone(ui):
-    entry = commands.table['clone']
-    entry[1].append(('', 'enable-profile', [],
-                    'enable a sparse profile'))
-    entry[1].append(('', 'include', [],
-                    'include sparse pattern'))
-    entry[1].append(('', 'exclude', [],
-                    'exclude sparse pattern'))
-    extensions.wrapcommand(commands.table, 'clone', _clonesparsecmd)
+    entry = commands.table[b'clone']
+    entry[1].append((b'', b'enable-profile', [], b'enable a sparse profile'))
+    entry[1].append((b'', b'include', [], b'include sparse pattern'))
+    entry[1].append((b'', b'exclude', [], b'exclude sparse pattern'))
+    extensions.wrapcommand(commands.table, b'clone', _clonesparsecmd)
+
 
 def _setupadd(ui):
-    entry = commands.table['add']
-    entry[1].append(('s', 'sparse', None,
-                    'also include directories of added files in sparse config'))
+    entry = commands.table[b'add']
+    entry[1].append(
+        (
+            b's',
+            b'sparse',
+            None,
+            b'also include directories of added files in sparse config',
+        )
+    )
 
     def _add(orig, ui, repo, *pats, **opts):
         if opts.get(r'sparse'):
@@ -189,7 +217,8 @@
             sparse.updateconfig(repo, list(dirs), opts, include=True)
         return orig(ui, repo, *pats, **opts)
 
-    extensions.wrapcommand(commands.table, 'add', _add)
+    extensions.wrapcommand(commands.table, b'add', _add)
+
 
 def _setupdirstate(ui):
     """Modify the dirstate to prevent stat'ing excluded files,
@@ -204,7 +233,7 @@
         match = matchmod.intersectmatchers(match, sm)
         return orig(self, match, subrepos, unknown, ignored, full)
 
-    extensions.wrapfunction(dirstate.dirstate, 'walk', walk)
+    extensions.wrapfunction(dirstate.dirstate, b'walk', walk)
 
     # dirstate.rebuild should not add non-matching files
     def _rebuild(orig, self, parent, allfiles, changedfiles=None):
@@ -221,39 +250,70 @@
                 changedfiles = dirstatefilestoremove.union(changedfiles)
 
         return orig(self, parent, allfiles, changedfiles)
-    extensions.wrapfunction(dirstate.dirstate, 'rebuild', _rebuild)
+
+    extensions.wrapfunction(dirstate.dirstate, b'rebuild', _rebuild)
 
     # Prevent adding files that are outside the sparse checkout
-    editfuncs = ['normal', 'add', 'normallookup', 'copy', 'remove', 'merge']
-    hint = _('include file with `hg debugsparse --include <pattern>` or use ' +
-             '`hg add -s <file>` to include file directory while adding')
+    editfuncs = [
+        b'normal',
+        b'add',
+        b'normallookup',
+        b'copy',
+        b'remove',
+        b'merge',
+    ]
+    hint = _(
+        b'include file with `hg debugsparse --include <pattern>` or use '
+        + b'`hg add -s <file>` to include file directory while adding'
+    )
     for func in editfuncs:
+
         def _wrapper(orig, self, *args, **kwargs):
             sparsematch = self._sparsematcher
             if not sparsematch.always():
                 for f in args:
-                    if (f is not None and not sparsematch(f) and
-                        f not in self):
-                        raise error.Abort(_("cannot add '%s' - it is outside "
-                                            "the sparse checkout") % f,
-                                          hint=hint)
+                    if f is not None and not sparsematch(f) and f not in self:
+                        raise error.Abort(
+                            _(
+                                b"cannot add '%s' - it is outside "
+                                b"the sparse checkout"
+                            )
+                            % f,
+                            hint=hint,
+                        )
             return orig(self, *args, **kwargs)
+
         extensions.wrapfunction(dirstate.dirstate, func, _wrapper)
 
-@command('debugsparse', [
-    ('I', 'include', False, _('include files in the sparse checkout')),
-    ('X', 'exclude', False, _('exclude files in the sparse checkout')),
-    ('d', 'delete', False, _('delete an include/exclude rule')),
-    ('f', 'force', False, _('allow changing rules even with pending changes')),
-    ('', 'enable-profile', False, _('enables the specified profile')),
-    ('', 'disable-profile', False, _('disables the specified profile')),
-    ('', 'import-rules', False, _('imports rules from a file')),
-    ('', 'clear-rules', False, _('clears local include/exclude rules')),
-    ('', 'refresh', False, _('updates the working after sparseness changes')),
-    ('', 'reset', False, _('makes the repo full again')),
-    ] + commands.templateopts,
-    _('[--OPTION] PATTERN...'),
-    helpbasic=True)
+
+@command(
+    b'debugsparse',
+    [
+        (b'I', b'include', False, _(b'include files in the sparse checkout')),
+        (b'X', b'exclude', False, _(b'exclude files in the sparse checkout')),
+        (b'd', b'delete', False, _(b'delete an include/exclude rule')),
+        (
+            b'f',
+            b'force',
+            False,
+            _(b'allow changing rules even with pending changes'),
+        ),
+        (b'', b'enable-profile', False, _(b'enables the specified profile')),
+        (b'', b'disable-profile', False, _(b'disables the specified profile')),
+        (b'', b'import-rules', False, _(b'imports rules from a file')),
+        (b'', b'clear-rules', False, _(b'clears local include/exclude rules')),
+        (
+            b'',
+            b'refresh',
+            False,
+            _(b'updates the working after sparseness changes'),
+        ),
+        (b'', b'reset', False, _(b'makes the repo full again')),
+    ]
+    + commands.templateopts,
+    _(b'[--OPTION] PATTERN...'),
+    helpbasic=True,
+)
 def debugsparse(ui, repo, *pats, **opts):
     """make the current checkout sparse, or edit the existing checkout
 
@@ -296,38 +356,63 @@
     Returns 0 if editing the sparse checkout succeeds.
     """
     opts = pycompat.byteskwargs(opts)
-    include = opts.get('include')
-    exclude = opts.get('exclude')
-    force = opts.get('force')
-    enableprofile = opts.get('enable_profile')
-    disableprofile = opts.get('disable_profile')
-    importrules = opts.get('import_rules')
-    clearrules = opts.get('clear_rules')
-    delete = opts.get('delete')
-    refresh = opts.get('refresh')
-    reset = opts.get('reset')
-    count = sum([include, exclude, enableprofile, disableprofile, delete,
-                 importrules, refresh, clearrules, reset])
+    include = opts.get(b'include')
+    exclude = opts.get(b'exclude')
+    force = opts.get(b'force')
+    enableprofile = opts.get(b'enable_profile')
+    disableprofile = opts.get(b'disable_profile')
+    importrules = opts.get(b'import_rules')
+    clearrules = opts.get(b'clear_rules')
+    delete = opts.get(b'delete')
+    refresh = opts.get(b'refresh')
+    reset = opts.get(b'reset')
+    count = sum(
+        [
+            include,
+            exclude,
+            enableprofile,
+            disableprofile,
+            delete,
+            importrules,
+            refresh,
+            clearrules,
+            reset,
+        ]
+    )
     if count > 1:
-        raise error.Abort(_("too many flags specified"))
+        raise error.Abort(_(b"too many flags specified"))
 
     if count == 0:
-        if repo.vfs.exists('sparse'):
-            ui.status(repo.vfs.read("sparse") + "\n")
+        if repo.vfs.exists(b'sparse'):
+            ui.status(repo.vfs.read(b"sparse") + b"\n")
             temporaryincludes = sparse.readtemporaryincludes(repo)
             if temporaryincludes:
-                ui.status(_("Temporarily Included Files (for merge/rebase):\n"))
-                ui.status(("\n".join(temporaryincludes) + "\n"))
+                ui.status(
+                    _(b"Temporarily Included Files (for merge/rebase):\n")
+                )
+                ui.status((b"\n".join(temporaryincludes) + b"\n"))
             return
         else:
-            raise error.Abort(_('the debugsparse command is only supported on'
-                                ' sparse repositories'))
+            raise error.Abort(
+                _(
+                    b'the debugsparse command is only supported on'
+                    b' sparse repositories'
+                )
+            )
 
     if include or exclude or delete or reset or enableprofile or disableprofile:
-        sparse.updateconfig(repo, pats, opts, include=include, exclude=exclude,
-                            reset=reset, delete=delete,
-                            enableprofile=enableprofile,
-                            disableprofile=disableprofile, force=force)
+        sparse.updateconfig(
+            repo,
+            pats,
+            opts,
+            include=include,
+            exclude=exclude,
+            reset=reset,
+            delete=delete,
+            enableprofile=enableprofile,
+            disableprofile=disableprofile,
+            force=force,
+        )
 
     if importrules:
         sparse.importfromfiles(repo, opts, pats, force=force)
@@ -340,9 +425,16 @@
             wlock = repo.wlock()
             fcounts = map(
                 len,
-                sparse.refreshwdir(repo, repo.status(), sparse.matcher(repo),
-                                   force=force))
-            sparse.printchanges(ui, opts, added=fcounts[0], dropped=fcounts[1],
-                                conflicting=fcounts[2])
+                sparse.refreshwdir(
+                    repo, repo.status(), sparse.matcher(repo), force=force
+                ),
+            )
+            sparse.printchanges(
+                ui,
+                opts,
+                added=fcounts[0],
+                dropped=fcounts[1],
+                conflicting=fcounts[2],
+            )
         finally:
             wlock.release()
--- a/hgext/split.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/split.py	Mon Oct 21 11:09:48 2019 -0400
@@ -31,9 +31,7 @@
 )
 
 # allow people to use split without explicitly enabling rebase extension
-from . import (
-    rebase,
-)
+from . import rebase
 
 cmdtable = {}
 command = registrar.command(cmdtable)
@@ -42,14 +40,20 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
-@command('split',
-    [('r', 'rev', '', _("revision to split"), _('REV')),
-     ('', 'rebase', True, _('rebase descendants after split')),
-    ] + cmdutil.commitopts2,
-    _('hg split [--no-rebase] [[-r] REV]'),
-    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, helpbasic=True)
+@command(
+    b'split',
+    [
+        (b'r', b'rev', b'', _(b"revision to split"), _(b'REV')),
+        (b'', b'rebase', True, _(b'rebase descendants after split')),
+    ]
+    + cmdutil.commitopts2,
+    _(b'hg split [--no-rebase] [[-r] REV]'),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
+    helpbasic=True,
+)
 def split(ui, repo, *revs, **opts):
     """split a changeset into smaller ones
 
@@ -63,21 +67,21 @@
     """
     opts = pycompat.byteskwargs(opts)
     revlist = []
-    if opts.get('rev'):
-        revlist.append(opts.get('rev'))
+    if opts.get(b'rev'):
+        revlist.append(opts.get(b'rev'))
     revlist.extend(revs)
-    with repo.wlock(), repo.lock(), repo.transaction('split') as tr:
-        revs = scmutil.revrange(repo, revlist or ['.'])
+    with repo.wlock(), repo.lock(), repo.transaction(b'split') as tr:
+        revs = scmutil.revrange(repo, revlist or [b'.'])
         if len(revs) > 1:
-            raise error.Abort(_('cannot split multiple revisions'))
+            raise error.Abort(_(b'cannot split multiple revisions'))
 
         rev = revs.first()
         ctx = repo[rev]
         if rev is None or ctx.node() == nullid:
-            ui.status(_('nothing to split\n'))
+            ui.status(_(b'nothing to split\n'))
             return 1
         if ctx.node() is None:
-            raise error.Abort(_('cannot split working directory'))
+            raise error.Abort(_(b'cannot split working directory'))
 
         # rewriteutil.precheck is not very useful here because:
         # 1. null check is done above and it's more friendly to return 1
@@ -87,27 +91,34 @@
         #
         # So only "public" check is useful and it's checked directly here.
         if ctx.phase() == phases.public:
-            raise error.Abort(_('cannot split public changeset'),
-                              hint=_("see 'hg help phases' for details"))
+            raise error.Abort(
+                _(b'cannot split public changeset'),
+                hint=_(b"see 'hg help phases' for details"),
+            )
 
-        descendants = list(repo.revs('(%d::) - (%d)', rev, rev))
+        descendants = list(repo.revs(b'(%d::) - (%d)', rev, rev))
         alloworphaned = obsolete.isenabled(repo, obsolete.allowunstableopt)
-        if opts.get('rebase'):
+        if opts.get(b'rebase'):
             # Skip obsoleted descendants and their descendants so the rebase
             # won't cause conflicts for sure.
-            torebase = list(repo.revs('%ld - (%ld & obsolete())::',
-                                      descendants, descendants))
+            torebase = list(
+                repo.revs(
+                    b'%ld - (%ld & obsolete())::', descendants, descendants
+                )
+            )
             if not alloworphaned and len(torebase) != len(descendants):
-                raise error.Abort(_('split would leave orphaned changesets '
-                                    'behind'))
+                raise error.Abort(
+                    _(b'split would leave orphaned changesets behind')
+                )
         else:
             if not alloworphaned and descendants:
                 raise error.Abort(
-                    _('cannot split changeset with children without rebase'))
+                    _(b'cannot split changeset with children without rebase')
+                )
             torebase = ()
 
         if len(ctx.parents()) > 1:
-            raise error.Abort(_('cannot split a merge changeset'))
+            raise error.Abort(_(b'cannot split a merge changeset'))
 
         cmdutil.bailifchanged(repo)
 
@@ -116,7 +127,7 @@
         if bname and repo._bookmarks[bname] != ctx.node():
             bookmarks.deactivate(repo)
 
-        wnode = repo['.'].node()
+        wnode = repo[b'.'].node()
         top = None
         try:
             top = dosplit(ui, repo, tr, ctx, opts)
@@ -130,8 +141,9 @@
         if torebase and top:
             dorebase(ui, repo, torebase, top)
 
+
 def dosplit(ui, repo, tr, ctx, opts):
-    committed = [] # [ctx]
+    committed = []  # [ctx]
 
     # Set working parent to ctx.p1(), and keep working copy as ctx's content
     if ctx.node() != repo.dirstate.p1():
@@ -145,33 +157,48 @@
     # Main split loop
     while incomplete(repo):
         if committed:
-            header = (_('HG: Splitting %s. So far it has been split into:\n')
-                      % short(ctx.node()))
+            header = _(
+                b'HG: Splitting %s. So far it has been split into:\n'
+            ) % short(ctx.node())
             for c in committed:
-                firstline = c.description().split('\n', 1)[0]
-                header += _('HG: - %s: %s\n') % (short(c.node()), firstline)
-            header += _('HG: Write commit message for the next split '
-                        'changeset.\n')
+                firstline = c.description().split(b'\n', 1)[0]
+                header += _(b'HG: - %s: %s\n') % (short(c.node()), firstline)
+            header += _(
+                b'HG: Write commit message for the next split changeset.\n'
+            )
         else:
-            header = _('HG: Splitting %s. Write commit message for the '
-                       'first split changeset.\n') % short(ctx.node())
-        opts.update({
-            'edit': True,
-            'interactive': True,
-            'message': header + ctx.description(),
-        })
+            header = _(
+                b'HG: Splitting %s. Write commit message for the '
+                b'first split changeset.\n'
+            ) % short(ctx.node())
+        opts.update(
+            {
+                b'edit': True,
+                b'interactive': True,
+                b'message': header + ctx.description(),
+            }
+        )
         commands.commit(ui, repo, **pycompat.strkwargs(opts))
-        newctx = repo['.']
+        newctx = repo[b'.']
         committed.append(newctx)
 
     if not committed:
-        raise error.Abort(_('cannot split an empty revision'))
+        raise error.Abort(_(b'cannot split an empty revision'))
 
-    scmutil.cleanupnodes(repo, {ctx.node(): [c.node() for c in committed]},
-                         operation='split', fixphase=True)
+    scmutil.cleanupnodes(
+        repo,
+        {ctx.node(): [c.node() for c in committed]},
+        operation=b'split',
+        fixphase=True,
+    )
 
     return committed[-1]
 
+
 def dorebase(ui, repo, src, destctx):
-    rebase.rebase(ui, repo, rev=[revsetlang.formatspec('%ld', src)],
-                  dest=revsetlang.formatspec('%d', destctx.rev()))
+    rebase.rebase(
+        ui,
+        repo,
+        rev=[revsetlang.formatspec(b'%ld', src)],
+        dest=revsetlang.formatspec(b'%d', destctx.rev()),
+    )
--- a/hgext/sqlitestore.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/sqlitestore.py	Mon Oct 21 11:09:48 2019 -0400
@@ -57,9 +57,7 @@
     nullrev,
     short,
 )
-from mercurial.thirdparty import (
-    attr,
-)
+from mercurial.thirdparty import attr
 from mercurial import (
     ancestor,
     dagop,
@@ -70,17 +68,18 @@
     mdiff,
     pycompat,
     registrar,
-    repository,
     util,
     verify,
 )
-from mercurial.utils import (
-    interfaceutil,
-    storageutil,
+from mercurial.interfaces import (
+    repository,
+    util as interfaceutil,
 )
+from mercurial.utils import storageutil
 
 try:
     from mercurial import zstd
+
     zstd.__version__
 except ImportError:
     zstd = None
@@ -89,14 +88,18 @@
 configitem = registrar.configitem(configtable)
 
 # experimental config: storage.sqlite.compression
-configitem('storage', 'sqlite.compression',
-           default='zstd' if zstd else 'zlib')
+configitem(
+    b'storage',
+    b'sqlite.compression',
+    default=b'zstd' if zstd else b'zlib',
+    experimental=True,
+)
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 REQUIREMENT = b'exp-sqlite-001'
 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
@@ -118,24 +121,19 @@
     # Deltas are stored as content-indexed blobs.
     # compression column holds COMPRESSION_* constant for how the
     # delta is encoded.
-
     r'CREATE TABLE delta ('
     r'    id INTEGER PRIMARY KEY, '
     r'    compression INTEGER NOT NULL, '
     r'    hash BLOB UNIQUE ON CONFLICT ABORT, '
     r'    delta BLOB NOT NULL '
     r')',
-
     # Tracked paths are denormalized to integers to avoid redundant
     # storage of the path name.
     r'CREATE TABLE filepath ('
     r'    id INTEGER PRIMARY KEY, '
     r'    path BLOB NOT NULL '
     r')',
-
-    r'CREATE UNIQUE INDEX filepath_path '
-    r'    ON filepath (path)',
-
+    r'CREATE UNIQUE INDEX filepath_path ' r'    ON filepath (path)',
     # We have a single table for all file revision data.
     # Each file revision is uniquely described by a (path, rev) and
     # (path, node).
@@ -159,13 +157,10 @@
     r'    deltabaseid INTEGER REFERENCES fileindex(id), '
     r'    node BLOB NOT NULL '
     r')',
-
     r'CREATE UNIQUE INDEX fileindex_pathrevnum '
     r'    ON fileindex (pathid, revnum)',
-
     r'CREATE UNIQUE INDEX fileindex_pathnode '
     r'    ON fileindex (pathid, node)',
-
     # Provide a view over all file data for convenience.
     r'CREATE VIEW filedata AS '
     r'SELECT '
@@ -182,12 +177,11 @@
     r'    fileindex.deltabaseid AS deltabaseid '
     r'FROM filepath, fileindex '
     r'WHERE fileindex.pathid=filepath.id',
-
     r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
 ]
 
-def resolvedeltachain(db, pathid, node, revisioncache,
-                      stoprids, zstddctx=None):
+
+def resolvedeltachain(db, pathid, node, revisioncache, stoprids, zstddctx=None):
     """Resolve a delta chain for a file node."""
 
     # TODO the "not in ({stops})" here is possibly slowing down the query
@@ -211,8 +205,10 @@
         r'SELECT deltachain.baseid, compression, delta '
         r'FROM deltachain, delta '
         r'WHERE delta.id=deltachain.deltaid'.format(
-            stops=r','.join([r'?'] * len(stoprids))),
-        tuple([pathid, node] + list(stoprids.keys())))
+            stops=r','.join([r'?'] * len(stoprids))
+        ),
+        tuple([pathid, node] + list(stoprids.keys())),
+    )
 
     deltas = []
     lastdeltabaseid = None
@@ -227,8 +223,9 @@
         elif compression == COMPRESSION_ZLIB:
             delta = zlib.decompress(delta)
         else:
-            raise SQLiteStoreError('unhandled compression type: %d' %
-                                   compression)
+            raise SQLiteStoreError(
+                b'unhandled compression type: %d' % compression
+            )
 
         deltas.append(delta)
 
@@ -248,20 +245,24 @@
 
     return fulltext
 
+
 def insertdelta(db, compression, hash, delta):
     try:
         return db.execute(
             r'INSERT INTO delta (compression, hash, delta) '
             r'VALUES (?, ?, ?)',
-            (compression, hash, delta)).lastrowid
+            (compression, hash, delta),
+        ).lastrowid
     except sqlite3.IntegrityError:
         return db.execute(
-            r'SELECT id FROM delta WHERE hash=?',
-            (hash,)).fetchone()[0]
+            r'SELECT id FROM delta WHERE hash=?', (hash,)
+        ).fetchone()[0]
+
 
 class SQLiteStoreError(error.StorageError):
     pass
 
+
 @attr.s
 class revisionentry(object):
     rid = attr.ib()
@@ -274,6 +275,7 @@
     linkrev = attr.ib()
     flags = attr.ib()
 
+
 @interfaceutil.implementer(repository.irevisiondelta)
 @attr.s(slots=True)
 class sqliterevisiondelta(object):
@@ -287,6 +289,7 @@
     delta = attr.ib()
     linknode = attr.ib(default=None)
 
+
 @interfaceutil.implementer(repository.iverifyproblem)
 @attr.s(frozen=True)
 class sqliteproblem(object):
@@ -294,6 +297,7 @@
     error = attr.ib(default=None)
     node = attr.ib(default=None)
 
+
 @interfaceutil.implementer(repository.ifilestorage)
 class sqlitefilestore(object):
     """Implements storage for an individual tracked path."""
@@ -315,7 +319,7 @@
 
         self._compengine = compression
 
-        if compression == 'zstd':
+        if compression == b'zstd':
             self._cctx = zstd.ZstdCompressor(level=3)
             self._dctx = zstd.ZstdDecompressor()
         else:
@@ -329,8 +333,11 @@
         self._nodetorev = {}
         self._revisions = {}
 
-        res = list(self._db.execute(
-            r'SELECT id FROM filepath WHERE path=?', (self._path,)))
+        res = list(
+            self._db.execute(
+                r'SELECT id FROM filepath WHERE path=?', (self._path,)
+            )
+        )
 
         if not res:
             self._pathid = None
@@ -343,14 +350,16 @@
             r'FROM fileindex '
             r'WHERE pathid=? '
             r'ORDER BY revnum ASC',
-            (self._pathid,))
+            (self._pathid,),
+        )
 
         for i, row in enumerate(res):
             rid, rev, node, p1rev, p2rev, linkrev, flags = row
 
             if i != rev:
-                raise SQLiteStoreError(_('sqlite database has inconsistent '
-                                         'revision numbers'))
+                raise SQLiteStoreError(
+                    _(b'sqlite database has inconsistent revision numbers')
+                )
 
             if p1rev == nullrev:
                 p1node = nullid
@@ -371,7 +380,8 @@
                 p1node=p1node,
                 p2node=p2node,
                 linkrev=linkrev,
-                flags=flags)
+                flags=flags,
+            )
 
             self._revtonode[rev] = node
             self._nodetorev[node] = rev
@@ -392,15 +402,16 @@
         return node in self._nodetorev
 
     def revs(self, start=0, stop=None):
-        return storageutil.iterrevs(len(self._revisions), start=start,
-                                    stop=stop)
+        return storageutil.iterrevs(
+            len(self._revisions), start=start, stop=stop
+        )
 
     def parents(self, node):
         if node == nullid:
             return nullid, nullid
 
         if node not in self._revisions:
-            raise error.LookupError(node, self._path, _('no node'))
+            raise error.LookupError(node, self._path, _(b'no node'))
 
         entry = self._revisions[node]
         return entry.p1node, entry.p2node
@@ -420,7 +431,7 @@
             return nullrev
 
         if node not in self._nodetorev:
-            raise error.LookupError(node, self._path, _('no node'))
+            raise error.LookupError(node, self._path, _(b'no node'))
 
         return self._nodetorev[node]
 
@@ -475,8 +486,9 @@
         startrev = self.rev(start) if start is not None else nullrev
         stoprevs = {self.rev(n) for n in stop or []}
 
-        revs = dagop.headrevssubset(self.revs, self.parentrevs,
-                                    startrev=startrev, stoprevs=stoprevs)
+        revs = dagop.headrevssubset(
+            self.revs, self.parentrevs, startrev=startrev, stoprevs=stoprevs
+        )
 
         return [self.node(rev) for rev in revs]
 
@@ -489,7 +501,8 @@
             r'  FROM filedata '
             r'  WHERE path=? AND (p1rev=? OR p2rev=?) '
             r'  ORDER BY revnum ASC',
-            (self._path, rev, rev))
+            (self._path, rev, rev),
+        )
 
         return [row[0] for row in res]
 
@@ -519,7 +532,7 @@
             node = self.node(node)
 
         if node not in self._nodetorev:
-            raise error.LookupError(node, self._path, _('no node'))
+            raise error.LookupError(node, self._path, _(b'no node'))
 
         if node in self._revisioncache:
             return self._revisioncache[node]
@@ -528,15 +541,19 @@
         # short-circuit delta chain traversal and decompression as soon as
         # we encounter a revision in the cache.
 
-        stoprids = {self._revisions[n].rid: n
-                    for n in self._revisioncache}
+        stoprids = {self._revisions[n].rid: n for n in self._revisioncache}
 
         if not stoprids:
             stoprids[-1] = None
 
-        fulltext = resolvedeltachain(self._db, self._pathid, node,
-                                     self._revisioncache, stoprids,
-                                     zstddctx=self._dctx)
+        fulltext = resolvedeltachain(
+            self._db,
+            self._pathid,
+            node,
+            self._revisioncache,
+            stoprids,
+            zstddctx=self._dctx,
+        )
 
         # Don't verify hashes if parent nodes were rewritten, as the hash
         # wouldn't verify.
@@ -549,6 +566,9 @@
 
         return fulltext
 
+    def rawdata(self, *args, **kwargs):
+        return self.revision(*args, **kwargs)
+
     def read(self, node):
         return storageutil.filtermetadata(self.revision(node))
 
@@ -558,12 +578,18 @@
     def cmp(self, node, fulltext):
         return not storageutil.filedataequivalent(self, node, fulltext)
 
-    def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
-                      assumehaveparentrevisions=False,
-                      deltamode=repository.CG_DELTAMODE_STD):
-        if nodesorder not in ('nodes', 'storage', 'linear', None):
-            raise error.ProgrammingError('unhandled value for nodesorder: %s' %
-                                         nodesorder)
+    def emitrevisions(
+        self,
+        nodes,
+        nodesorder=None,
+        revisiondata=False,
+        assumehaveparentrevisions=False,
+        deltamode=repository.CG_DELTAMODE_STD,
+    ):
+        if nodesorder not in (b'nodes', b'storage', b'linear', None):
+            raise error.ProgrammingError(
+                b'unhandled value for nodesorder: %s' % nodesorder
+            )
 
         nodes = [n for n in nodes if n != nullid]
 
@@ -575,23 +601,29 @@
             r'SELECT revnum, deltaid FROM fileindex '
             r'WHERE pathid=? '
             r'    AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
-            tuple([self._pathid] + nodes))
+            tuple([self._pathid] + nodes),
+        )
 
         deltabases = {}
 
         for rev, deltaid in res:
             res = self._db.execute(
                 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
-                (self._pathid, deltaid))
+                (self._pathid, deltaid),
+            )
             deltabases[rev] = res.fetchone()[0]
 
         # TODO define revdifffn so we can use delta from storage.
         for delta in storageutil.emitrevisions(
-            self, nodes, nodesorder, sqliterevisiondelta,
+            self,
+            nodes,
+            nodesorder,
+            sqliterevisiondelta,
             deltaparentfn=deltabases.__getitem__,
             revisiondata=revisiondata,
             assumehaveparentrevisions=assumehaveparentrevisions,
-            deltamode=deltamode):
+            deltamode=deltamode,
+        ):
 
             yield delta
 
@@ -605,10 +637,19 @@
 
         return self.addrevision(filedata, transaction, linkrev, p1, p2)
 
-    def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None,
-                    flags=0, cachedelta=None):
+    def addrevision(
+        self,
+        revisiondata,
+        transaction,
+        linkrev,
+        p1,
+        p2,
+        node=None,
+        flags=0,
+        cachedelta=None,
+    ):
         if flags:
-            raise SQLiteStoreError(_('flags not supported on revisions'))
+            raise SQLiteStoreError(_(b'flags not supported on revisions'))
 
         validatehash = node is not None
         node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
@@ -619,14 +660,21 @@
         if node in self._nodetorev:
             return node
 
-        node = self._addrawrevision(node, revisiondata, transaction, linkrev,
-                                    p1, p2)
+        node = self._addrawrevision(
+            node, revisiondata, transaction, linkrev, p1, p2
+        )
 
         self._revisioncache[node] = revisiondata
         return node
 
-    def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
-                 maybemissingparents=False):
+    def addgroup(
+        self,
+        deltas,
+        linkmapper,
+        transaction,
+        addrevisioncb=None,
+        maybemissingparents=False,
+    ):
         nodes = []
 
         for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
@@ -636,7 +684,7 @@
                 storeflags |= FLAG_CENSORED
 
             if wireflags & ~repository.REVISION_FLAG_CENSORED:
-                raise SQLiteStoreError('unhandled revision flag')
+                raise SQLiteStoreError(b'unhandled revision flag')
 
             if maybemissingparents:
                 if p1 != nullid and not self.hasnode(p1):
@@ -652,18 +700,16 @@
             # If base is censored, delta must be full replacement in a single
             # patch operation.
             if baserev != nullrev and self.iscensored(baserev):
-                hlen = struct.calcsize('>lll')
-                oldlen = len(self.revision(deltabase, raw=True,
-                                           _verifyhash=False))
+                hlen = struct.calcsize(b'>lll')
+                oldlen = len(self.rawdata(deltabase, _verifyhash=False))
                 newlen = len(delta) - hlen
 
                 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
-                    raise error.CensoredBaseError(self._path,
-                                                  deltabase)
+                    raise error.CensoredBaseError(self._path, deltabase)
 
-            if (not (storeflags & FLAG_CENSORED)
-                and storageutil.deltaiscensored(
-                    delta, baserev, lambda x: len(self.revision(x, raw=True)))):
+            if not (storeflags & FLAG_CENSORED) and storageutil.deltaiscensored(
+                delta, baserev, lambda x: len(self.rawdata(x))
+            ):
                 storeflags |= FLAG_CENSORED
 
             linkrev = linkmapper(linknode)
@@ -680,9 +726,9 @@
                     entry.flags &= ~FLAG_MISSING_P1
 
                     self._db.execute(
-                        r'UPDATE fileindex SET p1rev=?, flags=? '
-                        r'WHERE id=?',
-                        (self._nodetorev[p1], entry.flags, entry.rid))
+                        r'UPDATE fileindex SET p1rev=?, flags=? ' r'WHERE id=?',
+                        (self._nodetorev[p1], entry.flags, entry.rid),
+                    )
 
                 if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
                     entry.p2node = p2
@@ -690,9 +736,9 @@
                     entry.flags &= ~FLAG_MISSING_P2
 
                     self._db.execute(
-                        r'UPDATE fileindex SET p2rev=?, flags=? '
-                        r'WHERE id=?',
-                        (self._nodetorev[p1], entry.flags, entry.rid))
+                        r'UPDATE fileindex SET p2rev=?, flags=? ' r'WHERE id=?',
+                        (self._nodetorev[p1], entry.flags, entry.rid),
+                    )
 
                 continue
 
@@ -703,8 +749,16 @@
                 text = None
                 storedelta = (deltabase, delta)
 
-            self._addrawrevision(node, text, transaction, linkrev, p1, p2,
-                                 storedelta=storedelta, flags=storeflags)
+            self._addrawrevision(
+                node,
+                text,
+                transaction,
+                linkrev,
+                p1,
+                p2,
+                storedelta=storedelta,
+                flags=storeflags,
+            )
 
             if addrevisioncb:
                 addrevisioncb(self, node)
@@ -716,9 +770,10 @@
 
         # This restriction is cargo culted from revlogs and makes no sense for
         # SQLite, since columns can be resized at will.
-        if len(tombstone) > len(self.revision(censornode, raw=True)):
-            raise error.Abort(_('censor tombstone must be no longer than '
-                                'censored data'))
+        if len(tombstone) > len(self.rawdata(censornode)):
+            raise error.Abort(
+                _(b'censor tombstone must be no longer than censored data')
+            )
 
         # We need to replace the censored revision's data with the tombstone.
         # But replacing that data will have implications for delta chains that
@@ -733,36 +788,42 @@
         # Find the delta to be censored.
         censoreddeltaid = self._db.execute(
             r'SELECT deltaid FROM fileindex WHERE id=?',
-            (self._revisions[censornode].rid,)).fetchone()[0]
+            (self._revisions[censornode].rid,),
+        ).fetchone()[0]
 
         # Find all its delta chain children.
         # TODO once we support storing deltas for !files, we'll need to look
         # for those delta chains too.
-        rows = list(self._db.execute(
-            r'SELECT id, pathid, node FROM fileindex '
-            r'WHERE deltabaseid=? OR deltaid=?',
-            (censoreddeltaid, censoreddeltaid)))
+        rows = list(
+            self._db.execute(
+                r'SELECT id, pathid, node FROM fileindex '
+                r'WHERE deltabaseid=? OR deltaid=?',
+                (censoreddeltaid, censoreddeltaid),
+            )
+        )
 
         for row in rows:
             rid, pathid, node = row
 
-            fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None},
-                                         zstddctx=self._dctx)
+            fulltext = resolvedeltachain(
+                self._db, pathid, node, {}, {-1: None}, zstddctx=self._dctx
+            )
 
             deltahash = hashlib.sha1(fulltext).digest()
 
-            if self._compengine == 'zstd':
+            if self._compengine == b'zstd':
                 deltablob = self._cctx.compress(fulltext)
                 compression = COMPRESSION_ZSTD
-            elif self._compengine == 'zlib':
+            elif self._compengine == b'zlib':
                 deltablob = zlib.compress(fulltext)
                 compression = COMPRESSION_ZLIB
-            elif self._compengine == 'none':
+            elif self._compengine == b'none':
                 deltablob = fulltext
                 compression = COMPRESSION_NONE
             else:
-                raise error.ProgrammingError('unhandled compression engine: %s'
-                                             % self._compengine)
+                raise error.ProgrammingError(
+                    b'unhandled compression engine: %s' % self._compengine
+                )
 
             if len(deltablob) >= len(fulltext):
                 deltablob = fulltext
@@ -772,13 +833,16 @@
 
             self._db.execute(
                 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
-                r'WHERE id=?', (deltaid, rid))
+                r'WHERE id=?',
+                (deltaid, rid),
+            )
 
         # Now create the tombstone delta and replace the delta on the censored
         # node.
         deltahash = hashlib.sha1(tombstone).digest()
-        tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE,
-                                       deltahash, tombstone)
+        tombstonedeltaid = insertdelta(
+            self._db, COMPRESSION_NONE, deltahash, tombstone
+        )
 
         flags = self._revisions[censornode].flags
         flags |= FLAG_CENSORED
@@ -786,19 +850,22 @@
         self._db.execute(
             r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
             r'WHERE pathid=? AND node=?',
-            (flags, tombstonedeltaid, self._pathid, censornode))
+            (flags, tombstonedeltaid, self._pathid, censornode),
+        )
 
-        self._db.execute(
-            r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
+        self._db.execute(r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
 
         self._refreshindex()
         self._revisioncache.clear()
 
     def getstrippoint(self, minlink):
-        return storageutil.resolvestripinfo(minlink, len(self) - 1,
-                                            [self.rev(n) for n in self.heads()],
-                                            self.linkrev,
-                                            self.parentrevs)
+        return storageutil.resolvestripinfo(
+            minlink,
+            len(self) - 1,
+            [self.rev(n) for n in self.heads()],
+            self.linkrev,
+            self.parentrevs,
+        )
 
     def strip(self, minlink, transaction):
         if not len(self):
@@ -812,7 +879,8 @@
         for rev in self.revs(rev):
             self._db.execute(
                 r'DELETE FROM fileindex WHERE pathid=? AND node=?',
-                (self._pathid, self.node(rev)))
+                (self._pathid, self.node(rev)),
+            )
 
         # TODO how should we garbage collect data in delta table?
 
@@ -825,33 +893,39 @@
     def files(self):
         return []
 
-    def storageinfo(self, exclusivefiles=False, sharedfiles=False,
-                    revisionscount=False, trackedsize=False,
-                    storedsize=False):
+    def storageinfo(
+        self,
+        exclusivefiles=False,
+        sharedfiles=False,
+        revisionscount=False,
+        trackedsize=False,
+        storedsize=False,
+    ):
         d = {}
 
         if exclusivefiles:
-            d['exclusivefiles'] = []
+            d[b'exclusivefiles'] = []
 
         if sharedfiles:
             # TODO list sqlite file(s) here.
-            d['sharedfiles'] = []
+            d[b'sharedfiles'] = []
 
         if revisionscount:
-            d['revisionscount'] = len(self)
+            d[b'revisionscount'] = len(self)
 
         if trackedsize:
-            d['trackedsize'] = sum(len(self.revision(node))
-                                       for node in self._nodetorev)
+            d[b'trackedsize'] = sum(
+                len(self.revision(node)) for node in self._nodetorev
+            )
 
         if storedsize:
             # TODO implement this?
-            d['storedsize'] = None
+            d[b'storedsize'] = None
 
         return d
 
     def verifyintegrity(self, state):
-        state['skipread'] = set()
+        state[b'skipread'] = set()
 
         for rev in self:
             node = self.node(rev)
@@ -860,10 +934,10 @@
                 self.revision(node)
             except Exception as e:
                 yield sqliteproblem(
-                    error=_('unpacking %s: %s') % (short(node), e),
-                    node=node)
+                    error=_(b'unpacking %s: %s') % (short(node), e), node=node
+                )
 
-                state['skipread'].add(node)
+                state[b'skipread'].add(node)
 
     # End of ifilestorage interface.
 
@@ -882,14 +956,23 @@
         if storageutil.iscensoredtext(fulltext):
             raise error.CensoredNodeError(self._path, node, fulltext)
 
-        raise SQLiteStoreError(_('integrity check failed on %s') %
-                               self._path)
+        raise SQLiteStoreError(_(b'integrity check failed on %s') % self._path)
 
-    def _addrawrevision(self, node, revisiondata, transaction, linkrev,
-                        p1, p2, storedelta=None, flags=0):
+    def _addrawrevision(
+        self,
+        node,
+        revisiondata,
+        transaction,
+        linkrev,
+        p1,
+        p2,
+        storedelta=None,
+        flags=0,
+    ):
         if self._pathid is None:
             res = self._db.execute(
-                r'INSERT INTO filepath (path) VALUES (?)', (self._path,))
+                r'INSERT INTO filepath (path) VALUES (?)', (self._path,)
+            )
             self._pathid = res.lastrowid
 
         # For simplicity, always store a delta against p1.
@@ -908,8 +991,9 @@
             if deltabase == nullid:
                 delta = revisiondata
             else:
-                delta = mdiff.textdiff(self.revision(self.rev(deltabase)),
-                                       revisiondata)
+                delta = mdiff.textdiff(
+                    self.revision(self.rev(deltabase)), revisiondata
+                )
 
         # File index stores a pointer to its delta and the parent delta.
         # The parent delta is stored via a pointer to the fileindex PK.
@@ -924,18 +1008,19 @@
         # first.
         deltahash = hashlib.sha1(delta).digest()
 
-        if self._compengine == 'zstd':
+        if self._compengine == b'zstd':
             deltablob = self._cctx.compress(delta)
             compression = COMPRESSION_ZSTD
-        elif self._compengine == 'zlib':
+        elif self._compengine == b'zlib':
             deltablob = zlib.compress(delta)
             compression = COMPRESSION_ZLIB
-        elif self._compengine == 'none':
+        elif self._compengine == b'none':
             deltablob = delta
             compression = COMPRESSION_NONE
         else:
-            raise error.ProgrammingError('unhandled compression engine: %s' %
-                                         self._compengine)
+            raise error.ProgrammingError(
+                b'unhandled compression engine: %s' % self._compengine
+            )
 
         # Don't store compressed data if it isn't practical.
         if len(deltablob) >= len(delta):
@@ -961,8 +1046,17 @@
             r'    pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
             r'    deltaid, deltabaseid) '
             r'    VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
-            (self._pathid, rev, node, p1rev, p2rev, linkrev, flags,
-             deltaid, baseid)
+            (
+                self._pathid,
+                rev,
+                node,
+                p1rev,
+                p2rev,
+                linkrev,
+                flags,
+                deltaid,
+                baseid,
+            ),
         ).lastrowid
 
         entry = revisionentry(
@@ -974,7 +1068,8 @@
             p1node=p1,
             p2node=p2,
             linkrev=linkrev,
-            flags=flags)
+            flags=flags,
+        )
 
         self._nodetorev[node] = rev
         self._revtonode[rev] = node
@@ -982,6 +1077,7 @@
 
         return node
 
+
 class sqliterepository(localrepo.localrepository):
     def cancopy(self):
         return False
@@ -999,7 +1095,7 @@
         def committransaction(_):
             self._dbconn.commit()
 
-        tr.addfinalize('sqlitestore', committransaction)
+        tr.addfinalize(b'sqlitestore', committransaction)
 
         return tr
 
@@ -1014,11 +1110,12 @@
             if self._db[0] == tid:
                 return self._db[1]
 
-        db = makedb(self.svfs.join('db.sqlite'))
+        db = makedb(self.svfs.join(b'db.sqlite'))
         self._db = (tid, db)
 
         return db
 
+
 def makedb(path):
     """Construct a database handle for a database at path."""
 
@@ -1038,12 +1135,13 @@
         pass
 
     else:
-        raise error.Abort(_('sqlite database has unrecognized version'))
+        raise error.Abort(_(b'sqlite database has unrecognized version'))
 
     db.execute(r'PRAGMA journal_mode=WAL')
 
     return db
 
+
 def featuresetup(ui, supported):
     supported.add(REQUIREMENT)
 
@@ -1055,77 +1153,97 @@
     supported.add(REQUIREMENT_SHALLOW_FILES)
     supported.add(repository.NARROW_REQUIREMENT)
 
+
 def newreporequirements(orig, ui, createopts):
-    if createopts['backend'] != 'sqlite':
+    if createopts[b'backend'] != b'sqlite':
         return orig(ui, createopts)
 
     # This restriction can be lifted once we have more confidence.
-    if 'sharedrepo' in createopts:
-        raise error.Abort(_('shared repositories not supported with SQLite '
-                            'store'))
+    if b'sharedrepo' in createopts:
+        raise error.Abort(
+            _(b'shared repositories not supported with SQLite store')
+        )
 
     # This filtering is out of an abundance of caution: we want to ensure
     # we honor creation options and we do that by annotating exactly the
     # creation options we recognize.
     known = {
-        'narrowfiles',
-        'backend',
-        'shallowfilestore',
+        b'narrowfiles',
+        b'backend',
+        b'shallowfilestore',
     }
 
     unsupported = set(createopts) - known
     if unsupported:
-        raise error.Abort(_('SQLite store does not support repo creation '
-                            'option: %s') % ', '.join(sorted(unsupported)))
+        raise error.Abort(
+            _(b'SQLite store does not support repo creation option: %s')
+            % b', '.join(sorted(unsupported))
+        )
 
     # Since we're a hybrid store that still relies on revlogs, we fall back
     # to using the revlogv1 backend's storage requirements then adding our
     # own requirement.
-    createopts['backend'] = 'revlogv1'
+    createopts[b'backend'] = b'revlogv1'
     requirements = orig(ui, createopts)
     requirements.add(REQUIREMENT)
 
-    compression = ui.config('storage', 'sqlite.compression')
+    compression = ui.config(b'storage', b'sqlite.compression')
 
-    if compression == 'zstd' and not zstd:
-        raise error.Abort(_('storage.sqlite.compression set to "zstd" but '
-                            'zstandard compression not available to this '
-                            'Mercurial install'))
+    if compression == b'zstd' and not zstd:
+        raise error.Abort(
+            _(
+                b'storage.sqlite.compression set to "zstd" but '
+                b'zstandard compression not available to this '
+                b'Mercurial install'
+            )
+        )
 
-    if compression == 'zstd':
+    if compression == b'zstd':
         requirements.add(REQUIREMENT_ZSTD)
-    elif compression == 'zlib':
+    elif compression == b'zlib':
         requirements.add(REQUIREMENT_ZLIB)
-    elif compression == 'none':
+    elif compression == b'none':
         requirements.add(REQUIREMENT_NONE)
     else:
-        raise error.Abort(_('unknown compression engine defined in '
-                            'storage.sqlite.compression: %s') % compression)
+        raise error.Abort(
+            _(
+                b'unknown compression engine defined in '
+                b'storage.sqlite.compression: %s'
+            )
+            % compression
+        )
 
-    if createopts.get('shallowfilestore'):
+    if createopts.get(b'shallowfilestore'):
         requirements.add(REQUIREMENT_SHALLOW_FILES)
 
     return requirements
 
+
 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
 class sqlitefilestorage(object):
     """Repository file storage backed by SQLite."""
+
     def file(self, path):
         if path[0] == b'/':
             path = path[1:]
 
         if REQUIREMENT_ZSTD in self.requirements:
-            compression = 'zstd'
+            compression = b'zstd'
         elif REQUIREMENT_ZLIB in self.requirements:
-            compression = 'zlib'
+            compression = b'zlib'
         elif REQUIREMENT_NONE in self.requirements:
-            compression = 'none'
+            compression = b'none'
         else:
-            raise error.Abort(_('unable to determine what compression engine '
-                                'to use for SQLite storage'))
+            raise error.Abort(
+                _(
+                    b'unable to determine what compression engine '
+                    b'to use for SQLite storage'
+                )
+            )
 
         return sqlitefilestore(self._dbconn, path, compression)
 
+
 def makefilestorage(orig, requirements, features, **kwargs):
     """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
     if REQUIREMENT in requirements:
@@ -1136,16 +1254,22 @@
     else:
         return orig(requirements=requirements, features=features, **kwargs)
 
+
 def makemain(orig, ui, requirements, **kwargs):
     if REQUIREMENT in requirements:
         if REQUIREMENT_ZSTD in requirements and not zstd:
-            raise error.Abort(_('repository uses zstandard compression, which '
-                                'is not available to this Mercurial install'))
+            raise error.Abort(
+                _(
+                    b'repository uses zstandard compression, which '
+                    b'is not available to this Mercurial install'
+                )
+            )
 
         return sqliterepository
 
     return orig(requirements=requirements, **kwargs)
 
+
 def verifierinit(orig, self, *args, **kwargs):
     orig(self, *args, **kwargs)
 
@@ -1153,16 +1277,16 @@
     # advertised. So suppress these warnings.
     self.warnorphanstorefiles = False
 
+
 def extsetup(ui):
     localrepo.featuresetupfuncs.add(featuresetup)
-    extensions.wrapfunction(localrepo, 'newreporequirements',
-                            newreporequirements)
-    extensions.wrapfunction(localrepo, 'makefilestorage',
-                            makefilestorage)
-    extensions.wrapfunction(localrepo, 'makemain',
-                            makemain)
-    extensions.wrapfunction(verify.verifier, '__init__',
-                            verifierinit)
+    extensions.wrapfunction(
+        localrepo, b'newreporequirements', newreporequirements
+    )
+    extensions.wrapfunction(localrepo, b'makefilestorage', makefilestorage)
+    extensions.wrapfunction(localrepo, b'makemain', makemain)
+    extensions.wrapfunction(verify.verifier, b'__init__', verifierinit)
+
 
 def reposetup(ui, repo):
     if isinstance(repo, sqliterepository):
--- a/hgext/strip.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/strip.py	Mon Oct 21 11:09:48 2019 -0400
@@ -6,6 +6,7 @@
 from __future__ import absolute_import
 
 from mercurial.i18n import _
+from mercurial.pycompat import getattr
 from mercurial import (
     bookmarks as bookmarksmod,
     cmdutil,
@@ -20,6 +21,7 @@
     scmutil,
     util,
 )
+
 nullid = nodemod.nullid
 release = lockmod.release
 
@@ -29,7 +31,8 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
 def checklocalchanges(repo, force=False):
     s = repo.status()
@@ -40,26 +43,40 @@
         cmdutil.checkunfinished(repo, skipmerge=True)
     return s
 
+
 def _findupdatetarget(repo, nodes):
     unode, p2 = repo.changelog.parents(nodes[0])
     currentbranch = repo[None].branch()
 
-    if (util.safehasattr(repo, 'mq') and p2 != nullid
-        and p2 in [x.node for x in repo.mq.applied]):
+    if (
+        util.safehasattr(repo, b'mq')
+        and p2 != nullid
+        and p2 in [x.node for x in repo.mq.applied]
+    ):
         unode = p2
     elif currentbranch != repo[unode].branch():
-        pwdir = 'parents(wdir())'
-        revset = 'max(((parents(%ln::%r) + %r) - %ln::%r) and branch(%s))'
-        branchtarget = repo.revs(revset, nodes, pwdir, pwdir, nodes, pwdir,
-                                 currentbranch)
+        pwdir = b'parents(wdir())'
+        revset = b'max(((parents(%ln::%r) + %r) - %ln::%r) and branch(%s))'
+        branchtarget = repo.revs(
+            revset, nodes, pwdir, pwdir, nodes, pwdir, currentbranch
+        )
         if branchtarget:
             cl = repo.changelog
             unode = cl.node(branchtarget.first())
 
     return unode
 
-def strip(ui, repo, revs, update=True, backup=True, force=None, bookmarks=None,
-          soft=False):
+
+def strip(
+    ui,
+    repo,
+    revs,
+    update=True,
+    backup=True,
+    force=None,
+    bookmarks=None,
+    soft=False,
+):
     with repo.wlock(), repo.lock():
 
         if update:
@@ -75,33 +92,63 @@
 
         repomarks = repo._bookmarks
         if bookmarks:
-            with repo.transaction('strip') as tr:
+            with repo.transaction(b'strip') as tr:
                 if repo._activebookmark in bookmarks:
                     bookmarksmod.deactivate(repo)
                 repomarks.applychanges(repo, tr, [(b, None) for b in bookmarks])
             for bookmark in sorted(bookmarks):
-                ui.write(_("bookmark '%s' deleted\n") % bookmark)
+                ui.write(_(b"bookmark '%s' deleted\n") % bookmark)
+
 
-@command("strip",
-         [
-          ('r', 'rev', [], _('strip specified revision (optional, '
-                               'can specify revisions without this '
-                               'option)'), _('REV')),
-          ('f', 'force', None, _('force removal of changesets, discard '
-                                 'uncommitted changes (no backup)')),
-          ('', 'no-backup', None, _('do not save backup bundle')),
-          ('', 'nobackup', None, _('do not save backup bundle '
-                                   '(DEPRECATED)')),
-          ('n', '', None, _('ignored  (DEPRECATED)')),
-          ('k', 'keep', None, _("do not modify working directory during "
-                                "strip")),
-          ('B', 'bookmark', [], _("remove revs only reachable from given"
-                                  " bookmark"), _('BOOKMARK')),
-          ('', 'soft', None,
-          _("simply drop changesets from visible history (EXPERIMENTAL)")),
-         ],
-          _('hg strip [-k] [-f] [-B bookmark] [-r] REV...'),
-          helpcategory=command.CATEGORY_MAINTENANCE)
+@command(
+    b"strip",
+    [
+        (
+            b'r',
+            b'rev',
+            [],
+            _(
+                b'strip specified revision (optional, '
+                b'can specify revisions without this '
+                b'option)'
+            ),
+            _(b'REV'),
+        ),
+        (
+            b'f',
+            b'force',
+            None,
+            _(
+                b'force removal of changesets, discard '
+                b'uncommitted changes (no backup)'
+            ),
+        ),
+        (b'', b'no-backup', None, _(b'do not save backup bundle')),
+        (b'', b'nobackup', None, _(b'do not save backup bundle (DEPRECATED)'),),
+        (b'n', b'', None, _(b'ignored  (DEPRECATED)')),
+        (
+            b'k',
+            b'keep',
+            None,
+            _(b"do not modify working directory during strip"),
+        ),
+        (
+            b'B',
+            b'bookmark',
+            [],
+            _(b"remove revs only reachable from given bookmark"),
+            _(b'BOOKMARK'),
+        ),
+        (
+            b'',
+            b'soft',
+            None,
+            _(b"simply drop changesets from visible history (EXPERIMENTAL)"),
+        ),
+    ],
+    _(b'hg strip [-k] [-f] [-B bookmark] [-r] REV...'),
+    helpcategory=command.CATEGORY_MAINTENANCE,
+)
 def stripcmd(ui, repo, *revs, **opts):
     """strip changesets and all their descendants from the repository
 
@@ -133,40 +180,42 @@
     """
     opts = pycompat.byteskwargs(opts)
     backup = True
-    if opts.get('no_backup') or opts.get('nobackup'):
+    if opts.get(b'no_backup') or opts.get(b'nobackup'):
         backup = False
 
     cl = repo.changelog
-    revs = list(revs) + opts.get('rev')
+    revs = list(revs) + opts.get(b'rev')
     revs = set(scmutil.revrange(repo, revs))
 
     with repo.wlock():
-        bookmarks = set(opts.get('bookmark'))
+        bookmarks = set(opts.get(b'bookmark'))
         if bookmarks:
             repomarks = repo._bookmarks
             if not bookmarks.issubset(repomarks):
-                raise error.Abort(_("bookmark '%s' not found") %
-                    ','.join(sorted(bookmarks - set(repomarks.keys()))))
+                raise error.Abort(
+                    _(b"bookmark '%s' not found")
+                    % b','.join(sorted(bookmarks - set(repomarks.keys())))
+                )
 
             # If the requested bookmark is not the only one pointing to a
             # a revision we have to only delete the bookmark and not strip
             # anything. revsets cannot detect that case.
             nodetobookmarks = {}
-            for mark, node in repomarks.iteritems():
+            for mark, node in pycompat.iteritems(repomarks):
                 nodetobookmarks.setdefault(node, []).append(mark)
             for marks in nodetobookmarks.values():
                 if bookmarks.issuperset(marks):
                     rsrevs = scmutil.bookmarkrevs(repo, marks[0])
                     revs.update(set(rsrevs))
             if not revs:
-                with repo.lock(), repo.transaction('bookmark') as tr:
+                with repo.lock(), repo.transaction(b'bookmark') as tr:
                     bmchanges = [(b, None) for b in bookmarks]
                     repomarks.applychanges(repo, tr, bmchanges)
                 for bookmark in sorted(bookmarks):
-                    ui.write(_("bookmark '%s' deleted\n") % bookmark)
+                    ui.write(_(b"bookmark '%s' deleted\n") % bookmark)
 
         if not revs:
-            raise error.Abort(_('empty revision set'))
+            raise error.Abort(_(b'empty revision set'))
 
         descendants = set(cl.descendants(revs))
         strippedrevs = revs.union(descendants)
@@ -174,8 +223,10 @@
 
         # if one of the wdir parent is stripped we'll need
         # to update away to an earlier revision
-        update = any(p != nullid and cl.rev(p) in strippedrevs
-                     for p in repo.dirstate.parents())
+        update = any(
+            p != nullid and cl.rev(p) in strippedrevs
+            for p in repo.dirstate.parents()
+        )
 
         rootnodes = set(cl.node(r) for r in roots)
 
@@ -183,7 +234,7 @@
         if q is not None and q.applied:
             # refresh queue state if we're about to strip
             # applied patches
-            if cl.rev(repo.lookup('qtip')) in strippedrevs:
+            if cl.rev(repo.lookup(b'qtip')) in strippedrevs:
                 q.applieddirty = True
                 start = 0
                 end = len(q.applied)
@@ -197,7 +248,7 @@
                 q.savedirty()
 
         revs = sorted(rootnodes)
-        if update and opts.get('keep'):
+        if update and opts.get(b'keep'):
             urev = _findupdatetarget(repo, revs)
             uctx = repo[urev]
 
@@ -211,20 +262,26 @@
 
             # reset files that only changed in the dirstate too
             dirstate = repo.dirstate
-            dirchanges = [f for f in dirstate if dirstate[f] != 'n']
+            dirchanges = [f for f in dirstate if dirstate[f] != b'n']
             changedfiles.extend(dirchanges)
 
             repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles)
             repo.dirstate.write(repo.currenttransaction())
 
             # clear resolve state
-            merge.mergestate.clean(repo, repo['.'].node())
+            merge.mergestate.clean(repo, repo[b'.'].node())
 
             update = False
 
-
-        strip(ui, repo, revs, backup=backup, update=update,
-              force=opts.get('force'), bookmarks=bookmarks,
-              soft=opts['soft'])
+        strip(
+            ui,
+            repo,
+            revs,
+            backup=backup,
+            update=update,
+            force=opts.get(b'force'),
+            bookmarks=bookmarks,
+            soft=opts[b'soft'],
+        )
 
     return 0
--- a/hgext/transplant.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/transplant.py	Mon Oct 21 11:09:48 2019 -0400
@@ -18,6 +18,7 @@
 import os
 
 from mercurial.i18n import _
+from mercurial.pycompat import open
 from mercurial import (
     bundlerepo,
     cmdutil,
@@ -44,32 +45,36 @@
     stringutil,
 )
 
+
 class TransplantError(error.Abort):
     pass
 
+
 cmdtable = {}
 command = registrar.command(cmdtable)
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('transplant', 'filter',
-    default=None,
+configitem(
+    b'transplant', b'filter', default=None,
 )
-configitem('transplant', 'log',
-    default=None,
+configitem(
+    b'transplant', b'log', default=None,
 )
 
+
 class transplantentry(object):
     def __init__(self, lnode, rnode):
         self.lnode = lnode
         self.rnode = rnode
 
+
 class transplants(object):
     def __init__(self, path=None, transplantfile=None, opener=None):
         self.path = path
@@ -86,7 +91,7 @@
         abspath = os.path.join(self.path, self.transplantfile)
         if self.transplantfile and os.path.exists(abspath):
             for line in self.opener.read(self.transplantfile).splitlines():
-                lnode, rnode = map(revlog.bin, line.split(':'))
+                lnode, rnode = map(revlog.bin, line.split(b':'))
                 list = self.transplants.setdefault(rnode, [])
                 list.append(transplantentry(lnode, rnode))
 
@@ -94,11 +99,11 @@
         if self.dirty and self.transplantfile:
             if not os.path.isdir(self.path):
                 os.mkdir(self.path)
-            fp = self.opener(self.transplantfile, 'w')
-            for list in self.transplants.itervalues():
+            fp = self.opener(self.transplantfile, b'w')
+            for list in pycompat.itervalues(self.transplants):
                 for t in list:
                     l, r = map(nodemod.hex, (t.lnode, t.rnode))
-                    fp.write(l + ':' + r + '\n')
+                    fp.write(l + b':' + r + b'\n')
             fp.close()
         self.dirty = False
 
@@ -116,17 +121,22 @@
             del list[list.index(transplant)]
             self.dirty = True
 
+
 class transplanter(object):
     def __init__(self, ui, repo, opts):
         self.ui = ui
-        self.path = repo.vfs.join('transplant')
+        self.path = repo.vfs.join(b'transplant')
         self.opener = vfsmod.vfs(self.path)
-        self.transplants = transplants(self.path, 'transplants',
-                                       opener=self.opener)
+        self.transplants = transplants(
+            self.path, b'transplants', opener=self.opener
+        )
+
         def getcommiteditor():
-            editform = cmdutil.mergeeditform(repo[None], 'transplant')
-            return cmdutil.getcommiteditor(editform=editform,
-                                           **pycompat.strkwargs(opts))
+            editform = cmdutil.mergeeditform(repo[None], b'transplant')
+            return cmdutil.getcommiteditor(
+                editform=editform, **pycompat.strkwargs(opts)
+            )
+
         self.getcommiteditor = getcommiteditor
 
     def applied(self, repo, node, parent):
@@ -136,8 +146,9 @@
             parentrev = repo.changelog.rev(parent)
         if hasnode(repo, node):
             rev = repo.changelog.rev(node)
-            reachable = repo.changelog.ancestors([parentrev], rev,
-                                                 inclusive=True)
+            reachable = repo.changelog.ancestors(
+                [parentrev], rev, inclusive=True
+            )
             if rev in reachable:
                 return True
         for t in self.transplants.get(node):
@@ -146,8 +157,9 @@
                 self.transplants.remove(t)
                 return False
             lnoderev = repo.changelog.rev(t.lnode)
-            if lnoderev in repo.changelog.ancestors([parentrev], lnoderev,
-                                                    inclusive=True):
+            if lnoderev in repo.changelog.ancestors(
+                [parentrev], lnoderev, inclusive=True
+            ):
                 return True
         return False
 
@@ -164,18 +176,19 @@
         lock = tr = None
         try:
             lock = repo.lock()
-            tr = repo.transaction('transplant')
+            tr = repo.transaction(b'transplant')
             for rev in revs:
                 node = revmap[rev]
-                revstr = '%d:%s' % (rev, nodemod.short(node))
+                revstr = b'%d:%s' % (rev, nodemod.short(node))
 
                 if self.applied(repo, node, p1):
-                    self.ui.warn(_('skipping already applied revision %s\n') %
-                                 revstr)
+                    self.ui.warn(
+                        _(b'skipping already applied revision %s\n') % revstr
+                    )
                     continue
 
                 parents = source.changelog.parents(node)
-                if not (opts.get('filter') or opts.get('log')):
+                if not (opts.get(b'filter') or opts.get(b'log')):
                     # If the changeset parent is the same as the
                     # wdir's parent, just pull it.
                     if parents[0] == p1:
@@ -185,8 +198,9 @@
                     if pulls:
                         if source != repo:
                             exchange.pull(repo, source.peer(), heads=pulls)
-                        merge.update(repo, pulls[-1], branchmerge=False,
-                                     force=False)
+                        merge.update(
+                            repo, pulls[-1], branchmerge=False, force=False
+                        )
                         p1 = repo.dirstate.p1()
                         pulls = []
 
@@ -201,23 +215,26 @@
 
                 skipmerge = False
                 if parents[1] != revlog.nullid:
-                    if not opts.get('parent'):
-                        self.ui.note(_('skipping merge changeset %d:%s\n')
-                                     % (rev, nodemod.short(node)))
+                    if not opts.get(b'parent'):
+                        self.ui.note(
+                            _(b'skipping merge changeset %d:%s\n')
+                            % (rev, nodemod.short(node))
+                        )
                         skipmerge = True
                     else:
-                        parent = source.lookup(opts['parent'])
+                        parent = source.lookup(opts[b'parent'])
                         if parent not in parents:
-                            raise error.Abort(_('%s is not a parent of %s') %
-                                              (nodemod.short(parent),
-                                               nodemod.short(node)))
+                            raise error.Abort(
+                                _(b'%s is not a parent of %s')
+                                % (nodemod.short(parent), nodemod.short(node))
+                            )
                 else:
                     parent = parents[0]
 
                 if skipmerge:
                     patchfile = None
                 else:
-                    fd, patchfile = pycompat.mkstemp(prefix='hg-transplant-')
+                    fd, patchfile = pycompat.mkstemp(prefix=b'hg-transplant-')
                     fp = os.fdopen(fd, r'wb')
                     gen = patch.diff(source, parent, node, opts=diffopts)
                     for chunk in gen:
@@ -228,23 +245,30 @@
                 if patchfile or domerge:
                     try:
                         try:
-                            n = self.applyone(repo, node,
-                                              source.changelog.read(node),
-                                              patchfile, merge=domerge,
-                                              log=opts.get('log'),
-                                              filter=opts.get('filter'))
+                            n = self.applyone(
+                                repo,
+                                node,
+                                source.changelog.read(node),
+                                patchfile,
+                                merge=domerge,
+                                log=opts.get(b'log'),
+                                filter=opts.get(b'filter'),
+                            )
                         except TransplantError:
                             # Do not rollback, it is up to the user to
                             # fix the merge or cancel everything
                             tr.close()
                             raise
                         if n and domerge:
-                            self.ui.status(_('%s merged at %s\n') % (revstr,
-                                      nodemod.short(n)))
+                            self.ui.status(
+                                _(b'%s merged at %s\n')
+                                % (revstr, nodemod.short(n))
+                            )
                         elif n:
-                            self.ui.status(_('%s transplanted to %s\n')
-                                           % (nodemod.short(node),
-                                              nodemod.short(n)))
+                            self.ui.status(
+                                _(b'%s transplanted to %s\n')
+                                % (nodemod.short(node), nodemod.short(n))
+                            )
                     finally:
                         if patchfile:
                             os.unlink(patchfile)
@@ -263,64 +287,76 @@
     def filter(self, filter, node, changelog, patchfile):
         '''arbitrarily rewrite changeset before applying it'''
 
-        self.ui.status(_('filtering %s\n') % patchfile)
+        self.ui.status(_(b'filtering %s\n') % patchfile)
         user, date, msg = (changelog[1], changelog[2], changelog[4])
-        fd, headerfile = pycompat.mkstemp(prefix='hg-transplant-')
+        fd, headerfile = pycompat.mkstemp(prefix=b'hg-transplant-')
         fp = os.fdopen(fd, r'wb')
-        fp.write("# HG changeset patch\n")
-        fp.write("# User %s\n" % user)
-        fp.write("# Date %d %d\n" % date)
-        fp.write(msg + '\n')
+        fp.write(b"# HG changeset patch\n")
+        fp.write(b"# User %s\n" % user)
+        fp.write(b"# Date %d %d\n" % date)
+        fp.write(msg + b'\n')
         fp.close()
 
         try:
-            self.ui.system('%s %s %s' % (filter,
-                                         procutil.shellquote(headerfile),
-                                         procutil.shellquote(patchfile)),
-                           environ={'HGUSER': changelog[1],
-                                    'HGREVISION': nodemod.hex(node),
-                                    },
-                           onerr=error.Abort, errprefix=_('filter failed'),
-                           blockedtag='transplant_filter')
-            user, date, msg = self.parselog(open(headerfile, 'rb'))[1:4]
+            self.ui.system(
+                b'%s %s %s'
+                % (
+                    filter,
+                    procutil.shellquote(headerfile),
+                    procutil.shellquote(patchfile),
+                ),
+                environ={
+                    b'HGUSER': changelog[1],
+                    b'HGREVISION': nodemod.hex(node),
+                },
+                onerr=error.Abort,
+                errprefix=_(b'filter failed'),
+                blockedtag=b'transplant_filter',
+            )
+            user, date, msg = self.parselog(open(headerfile, b'rb'))[1:4]
         finally:
             os.unlink(headerfile)
 
         return (user, date, msg)
 
-    def applyone(self, repo, node, cl, patchfile, merge=False, log=False,
-                 filter=None):
+    def applyone(
+        self, repo, node, cl, patchfile, merge=False, log=False, filter=None
+    ):
         '''apply the patch in patchfile to the repository as a transplant'''
         (manifest, user, (time, timezone), files, message) = cl[:5]
-        date = "%d %d" % (time, timezone)
-        extra = {'transplant_source': node}
+        date = b"%d %d" % (time, timezone)
+        extra = {b'transplant_source': node}
         if filter:
             (user, date, message) = self.filter(filter, node, cl, patchfile)
 
         if log:
             # we don't translate messages inserted into commits
-            message += '\n(transplanted from %s)' % nodemod.hex(node)
+            message += b'\n(transplanted from %s)' % nodemod.hex(node)
 
-        self.ui.status(_('applying %s\n') % nodemod.short(node))
-        self.ui.note('%s %s\n%s\n' % (user, date, message))
+        self.ui.status(_(b'applying %s\n') % nodemod.short(node))
+        self.ui.note(b'%s %s\n%s\n' % (user, date, message))
 
         if not patchfile and not merge:
-            raise error.Abort(_('can only omit patchfile if merging'))
+            raise error.Abort(_(b'can only omit patchfile if merging'))
         if patchfile:
             try:
                 files = set()
                 patch.patch(self.ui, repo, patchfile, files=files, eolmode=None)
                 files = list(files)
             except Exception as inst:
-                seriespath = os.path.join(self.path, 'series')
+                seriespath = os.path.join(self.path, b'series')
                 if os.path.exists(seriespath):
                     os.unlink(seriespath)
                 p1 = repo.dirstate.p1()
                 p2 = node
                 self.log(user, date, message, p1, p2, merge=merge)
-                self.ui.write(stringutil.forcebytestr(inst) + '\n')
-                raise TransplantError(_('fix up the working directory and run '
-                                        'hg transplant --continue'))
+                self.ui.write(stringutil.forcebytestr(inst) + b'\n')
+                raise TransplantError(
+                    _(
+                        b'fix up the working directory and run '
+                        b'hg transplant --continue'
+                    )
+                )
         else:
             files = None
         if merge:
@@ -330,11 +366,18 @@
         else:
             m = match.exact(files)
 
-        n = repo.commit(message, user, date, extra=extra, match=m,
-                        editor=self.getcommiteditor())
+        n = repo.commit(
+            message,
+            user,
+            date,
+            extra=extra,
+            match=m,
+            editor=self.getcommiteditor(),
+        )
         if not n:
-            self.ui.warn(_('skipping emptied changeset %s\n') %
-                           nodemod.short(node))
+            self.ui.warn(
+                _(b'skipping emptied changeset %s\n') % nodemod.short(node)
+            )
             return None
         if not merge:
             self.transplants.set(n, node)
@@ -342,20 +385,23 @@
         return n
 
     def canresume(self):
-        return os.path.exists(os.path.join(self.path, 'journal'))
+        return os.path.exists(os.path.join(self.path, b'journal'))
 
     def resume(self, repo, source, opts):
         '''recover last transaction and apply remaining changesets'''
-        if os.path.exists(os.path.join(self.path, 'journal')):
+        if os.path.exists(os.path.join(self.path, b'journal')):
             n, node = self.recover(repo, source, opts)
             if n:
-                self.ui.status(_('%s transplanted as %s\n') %
-                                 (nodemod.short(node),
-                                  nodemod.short(n)))
+                self.ui.status(
+                    _(b'%s transplanted as %s\n')
+                    % (nodemod.short(node), nodemod.short(n))
+                )
             else:
-                self.ui.status(_('%s skipped due to empty diff\n')
-                               % (nodemod.short(node),))
-        seriespath = os.path.join(self.path, 'series')
+                self.ui.status(
+                    _(b'%s skipped due to empty diff\n')
+                    % (nodemod.short(node),)
+                )
+        seriespath = os.path.join(self.path, b'series')
         if not os.path.exists(seriespath):
             self.transplants.write()
             return
@@ -373,33 +419,41 @@
         merge = False
 
         if not user or not date or not message or not parents[0]:
-            raise error.Abort(_('transplant log file is corrupt'))
+            raise error.Abort(_(b'transplant log file is corrupt'))
 
         parent = parents[0]
         if len(parents) > 1:
-            if opts.get('parent'):
-                parent = source.lookup(opts['parent'])
+            if opts.get(b'parent'):
+                parent = source.lookup(opts[b'parent'])
                 if parent not in parents:
-                    raise error.Abort(_('%s is not a parent of %s') %
-                                      (nodemod.short(parent),
-                                       nodemod.short(node)))
+                    raise error.Abort(
+                        _(b'%s is not a parent of %s')
+                        % (nodemod.short(parent), nodemod.short(node))
+                    )
             else:
                 merge = True
 
-        extra = {'transplant_source': node}
+        extra = {b'transplant_source': node}
         try:
             p1 = repo.dirstate.p1()
             if p1 != parent:
-                raise error.Abort(_('working directory not at transplant '
-                                   'parent %s') % nodemod.hex(parent))
+                raise error.Abort(
+                    _(b'working directory not at transplant parent %s')
+                    % nodemod.hex(parent)
+                )
             if merge:
                 repo.setparents(p1, parents[1])
             modified, added, removed, deleted = repo.status()[:4]
             if merge or modified or added or removed or deleted:
-                n = repo.commit(message, user, date, extra=extra,
-                                editor=self.getcommiteditor())
+                n = repo.commit(
+                    message,
+                    user,
+                    date,
+                    extra=extra,
+                    editor=self.getcommiteditor(),
+                )
                 if not n:
-                    raise error.Abort(_('commit failed'))
+                    raise error.Abort(_(b'commit failed'))
                 if not merge:
                     self.transplants.set(n, node)
             else:
@@ -412,12 +466,24 @@
             # this is kept only to reduce changes in a patch.
             pass
 
+    def stop(self, ui, repo):
+        """logic to stop an interrupted transplant"""
+        if self.canresume():
+            startctx = repo[b'.']
+            hg.updaterepo(repo, startctx.node(), overwrite=True)
+            ui.status(_(b"stopped the interrupted transplant\n"))
+            ui.status(
+                _(b"working directory is now at %s\n") % startctx.hex()[:12]
+            )
+            self.unlog()
+            return 0
+
     def readseries(self):
         nodes = []
         merges = []
         cur = nodes
-        for line in self.opener.read('series').splitlines():
-            if line.startswith('# Merges'):
+        for line in self.opener.read(b'series').splitlines():
+            if line.startswith(b'# Merges'):
                 cur = merges
                 continue
             cur.append(revlog.bin(line))
@@ -430,13 +496,13 @@
 
         if not os.path.isdir(self.path):
             os.mkdir(self.path)
-        series = self.opener('series', 'w')
+        series = self.opener(b'series', b'w')
         for rev in sorted(revmap):
-            series.write(nodemod.hex(revmap[rev]) + '\n')
+            series.write(nodemod.hex(revmap[rev]) + b'\n')
         if merges:
-            series.write('# Merges\n')
+            series.write(b'# Merges\n')
             for m in merges:
-                series.write(nodemod.hex(m) + '\n')
+                series.write(nodemod.hex(m) + b'\n')
         series.close()
 
     def parselog(self, fp):
@@ -449,42 +515,44 @@
         for line in fp.read().splitlines():
             if inmsg:
                 message.append(line)
-            elif line.startswith('# User '):
+            elif line.startswith(b'# User '):
                 user = line[7:]
-            elif line.startswith('# Date '):
+            elif line.startswith(b'# Date '):
                 date = line[7:]
-            elif line.startswith('# Node ID '):
+            elif line.startswith(b'# Node ID '):
                 node = revlog.bin(line[10:])
-            elif line.startswith('# Parent '):
+            elif line.startswith(b'# Parent '):
                 parents.append(revlog.bin(line[9:]))
-            elif not line.startswith('# '):
+            elif not line.startswith(b'# '):
                 inmsg = True
                 message.append(line)
         if None in (user, date):
-            raise error.Abort(_("filter corrupted changeset (no user or date)"))
-        return (node, user, date, '\n'.join(message), parents)
+            raise error.Abort(
+                _(b"filter corrupted changeset (no user or date)")
+            )
+        return (node, user, date, b'\n'.join(message), parents)
 
     def log(self, user, date, message, p1, p2, merge=False):
         '''journal changelog metadata for later recover'''
 
         if not os.path.isdir(self.path):
             os.mkdir(self.path)
-        fp = self.opener('journal', 'w')
-        fp.write('# User %s\n' % user)
-        fp.write('# Date %s\n' % date)
-        fp.write('# Node ID %s\n' % nodemod.hex(p2))
-        fp.write('# Parent ' + nodemod.hex(p1) + '\n')
+        fp = self.opener(b'journal', b'w')
+        fp.write(b'# User %s\n' % user)
+        fp.write(b'# Date %s\n' % date)
+        fp.write(b'# Node ID %s\n' % nodemod.hex(p2))
+        fp.write(b'# Parent ' + nodemod.hex(p1) + b'\n')
         if merge:
-            fp.write('# Parent ' + nodemod.hex(p2) + '\n')
-        fp.write(message.rstrip() + '\n')
+            fp.write(b'# Parent ' + nodemod.hex(p2) + b'\n')
+        fp.write(message.rstrip() + b'\n')
         fp.close()
 
     def readlog(self):
-        return self.parselog(self.opener('journal'))
+        return self.parselog(self.opener(b'journal'))
 
     def unlog(self):
         '''remove changelog journal'''
-        absdst = os.path.join(self.path, 'journal')
+        absdst = os.path.join(self.path, b'journal')
         if os.path.exists(absdst):
             os.unlink(absdst)
 
@@ -495,77 +563,120 @@
             if source.changelog.parents(node)[1] != revlog.nullid:
                 return False
             extra = source.changelog.read(node)[5]
-            cnode = extra.get('transplant_source')
+            cnode = extra.get(b'transplant_source')
             if cnode and self.applied(repo, cnode, root):
                 return False
             return True
 
         return matchfn
 
+
 def hasnode(repo, node):
     try:
         return repo.changelog.rev(node) is not None
     except error.StorageError:
         return False
 
+
 def browserevs(ui, repo, nodes, opts):
     '''interactively transplant changesets'''
     displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
     transplants = []
     merges = []
-    prompt = _('apply changeset? [ynmpcq?]:'
-               '$$ &yes, transplant this changeset'
-               '$$ &no, skip this changeset'
-               '$$ &merge at this changeset'
-               '$$ show &patch'
-               '$$ &commit selected changesets'
-               '$$ &quit and cancel transplant'
-               '$$ &? (show this help)')
+    prompt = _(
+        b'apply changeset? [ynmpcq?]:'
+        b'$$ &yes, transplant this changeset'
+        b'$$ &no, skip this changeset'
+        b'$$ &merge at this changeset'
+        b'$$ show &patch'
+        b'$$ &commit selected changesets'
+        b'$$ &quit and cancel transplant'
+        b'$$ &? (show this help)'
+    )
     for node in nodes:
         displayer.show(repo[node])
         action = None
         while not action:
             choice = ui.promptchoice(prompt)
-            action = 'ynmpcq?'[choice:choice + 1]
-            if action == '?':
+            action = b'ynmpcq?'[choice : choice + 1]
+            if action == b'?':
                 for c, t in ui.extractchoices(prompt)[1]:
-                    ui.write('%s: %s\n' % (c, t))
+                    ui.write(b'%s: %s\n' % (c, t))
                 action = None
-            elif action == 'p':
+            elif action == b'p':
                 parent = repo.changelog.parents(node)[0]
                 for chunk in patch.diff(repo, parent, node):
                     ui.write(chunk)
                 action = None
-        if action == 'y':
+        if action == b'y':
             transplants.append(node)
-        elif action == 'm':
+        elif action == b'm':
             merges.append(node)
-        elif action == 'c':
+        elif action == b'c':
             break
-        elif action == 'q':
+        elif action == b'q':
             transplants = ()
             merges = ()
             break
     displayer.close()
     return (transplants, merges)
 
-@command('transplant',
-    [('s', 'source', '', _('transplant changesets from REPO'), _('REPO')),
-    ('b', 'branch', [], _('use this source changeset as head'), _('REV')),
-    ('a', 'all', None, _('pull all changesets up to the --branch revisions')),
-    ('p', 'prune', [], _('skip over REV'), _('REV')),
-    ('m', 'merge', [], _('merge at REV'), _('REV')),
-    ('', 'parent', '',
-     _('parent to choose when transplanting merge'), _('REV')),
-    ('e', 'edit', False, _('invoke editor on commit messages')),
-    ('', 'log', None, _('append transplant info to log message')),
-    ('c', 'continue', None, _('continue last transplant session '
-                              'after fixing conflicts')),
-    ('', 'filter', '',
-     _('filter changesets through command'), _('CMD'))],
-    _('hg transplant [-s REPO] [-b BRANCH [-a]] [-p REV] '
-      '[-m REV] [REV]...'),
-    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
+
+@command(
+    b'transplant',
+    [
+        (
+            b's',
+            b'source',
+            b'',
+            _(b'transplant changesets from REPO'),
+            _(b'REPO'),
+        ),
+        (
+            b'b',
+            b'branch',
+            [],
+            _(b'use this source changeset as head'),
+            _(b'REV'),
+        ),
+        (
+            b'a',
+            b'all',
+            None,
+            _(b'pull all changesets up to the --branch revisions'),
+        ),
+        (b'p', b'prune', [], _(b'skip over REV'), _(b'REV')),
+        (b'm', b'merge', [], _(b'merge at REV'), _(b'REV')),
+        (
+            b'',
+            b'parent',
+            b'',
+            _(b'parent to choose when transplanting merge'),
+            _(b'REV'),
+        ),
+        (b'e', b'edit', False, _(b'invoke editor on commit messages')),
+        (b'', b'log', None, _(b'append transplant info to log message')),
+        (b'', b'stop', False, _(b'stop interrupted transplant')),
+        (
+            b'c',
+            b'continue',
+            None,
+            _(b'continue last transplant session after fixing conflicts'),
+        ),
+        (
+            b'',
+            b'filter',
+            b'',
+            _(b'filter changesets through command'),
+            _(b'CMD'),
+        ),
+    ],
+    _(
+        b'hg transplant [-s REPO] [-b BRANCH [-a]] [-p REV] '
+        b'[-m REV] [REV]...'
+    ),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
+)
 def transplant(ui, repo, *revs, **opts):
     '''transplant changesets from another branch
 
@@ -620,6 +731,7 @@
     with repo.wlock():
         return _dotransplant(ui, repo, *revs, **opts)
 
+
 def _dotransplant(ui, repo, *revs, **opts):
     def incwalk(repo, csets, match=util.always):
         for node in csets:
@@ -641,85 +753,115 @@
                 yield node
 
     def checkopts(opts, revs):
-        if opts.get('continue'):
-            if opts.get('branch') or opts.get('all') or opts.get('merge'):
-                raise error.Abort(_('--continue is incompatible with '
-                                   '--branch, --all and --merge'))
+        if opts.get(b'continue'):
+            if opts.get(b'branch') or opts.get(b'all') or opts.get(b'merge'):
+                raise error.Abort(
+                    _(
+                        b'--continue is incompatible with '
+                        b'--branch, --all and --merge'
+                    )
+                )
+            return
+        if opts.get(b'stop'):
+            if opts.get(b'branch') or opts.get(b'all') or opts.get(b'merge'):
+                raise error.Abort(
+                    _(
+                        b'--stop is incompatible with '
+                        b'--branch, --all and --merge'
+                    )
+                )
             return
-        if not (opts.get('source') or revs or
-                opts.get('merge') or opts.get('branch')):
-            raise error.Abort(_('no source URL, branch revision, or revision '
-                               'list provided'))
-        if opts.get('all'):
-            if not opts.get('branch'):
-                raise error.Abort(_('--all requires a branch revision'))
+        if not (
+            opts.get(b'source')
+            or revs
+            or opts.get(b'merge')
+            or opts.get(b'branch')
+        ):
+            raise error.Abort(
+                _(
+                    b'no source URL, branch revision, or revision '
+                    b'list provided'
+                )
+            )
+        if opts.get(b'all'):
+            if not opts.get(b'branch'):
+                raise error.Abort(_(b'--all requires a branch revision'))
             if revs:
-                raise error.Abort(_('--all is incompatible with a '
-                                   'revision list'))
+                raise error.Abort(
+                    _(b'--all is incompatible with a revision list')
+                )
 
     opts = pycompat.byteskwargs(opts)
     checkopts(opts, revs)
 
-    if not opts.get('log'):
+    if not opts.get(b'log'):
         # deprecated config: transplant.log
-        opts['log'] = ui.config('transplant', 'log')
-    if not opts.get('filter'):
+        opts[b'log'] = ui.config(b'transplant', b'log')
+    if not opts.get(b'filter'):
         # deprecated config: transplant.filter
-        opts['filter'] = ui.config('transplant', 'filter')
+        opts[b'filter'] = ui.config(b'transplant', b'filter')
 
     tp = transplanter(ui, repo, opts)
 
     p1 = repo.dirstate.p1()
     if len(repo) > 0 and p1 == revlog.nullid:
-        raise error.Abort(_('no revision checked out'))
-    if opts.get('continue'):
+        raise error.Abort(_(b'no revision checked out'))
+    if opts.get(b'continue'):
         if not tp.canresume():
-            raise error.Abort(_('no transplant to continue'))
+            raise error.Abort(_(b'no transplant to continue'))
+    elif opts.get(b'stop'):
+        if not tp.canresume():
+            raise error.Abort(_(b'no interrupted transplant found'))
+        return tp.stop(ui, repo)
     else:
         cmdutil.checkunfinished(repo)
         cmdutil.bailifchanged(repo)
 
-    sourcerepo = opts.get('source')
+    sourcerepo = opts.get(b'source')
     if sourcerepo:
         peer = hg.peer(repo, opts, ui.expandpath(sourcerepo))
-        heads = pycompat.maplist(peer.lookup, opts.get('branch', ()))
+        heads = pycompat.maplist(peer.lookup, opts.get(b'branch', ()))
         target = set(heads)
         for r in revs:
             try:
                 target.add(peer.lookup(r))
             except error.RepoError:
                 pass
-        source, csets, cleanupfn = bundlerepo.getremotechanges(ui, repo, peer,
-                                    onlyheads=sorted(target), force=True)
+        source, csets, cleanupfn = bundlerepo.getremotechanges(
+            ui, repo, peer, onlyheads=sorted(target), force=True
+        )
     else:
         source = repo
-        heads = pycompat.maplist(source.lookup, opts.get('branch', ()))
+        heads = pycompat.maplist(source.lookup, opts.get(b'branch', ()))
         cleanupfn = None
 
     try:
-        if opts.get('continue'):
+        if opts.get(b'continue'):
             tp.resume(repo, source, opts)
             return
 
         tf = tp.transplantfilter(repo, source, p1)
-        if opts.get('prune'):
-            prune = set(source[r].node()
-                        for r in scmutil.revrange(source, opts.get('prune')))
+        if opts.get(b'prune'):
+            prune = set(
+                source[r].node()
+                for r in scmutil.revrange(source, opts.get(b'prune'))
+            )
             matchfn = lambda x: tf(x) and x not in prune
         else:
             matchfn = tf
-        merges = pycompat.maplist(source.lookup, opts.get('merge', ()))
+        merges = pycompat.maplist(source.lookup, opts.get(b'merge', ()))
         revmap = {}
         if revs:
             for r in scmutil.revrange(source, revs):
                 revmap[int(r)] = source[r].node()
-        elif opts.get('all') or not merges:
+        elif opts.get(b'all') or not merges:
             if source != repo:
                 alltransplants = incwalk(source, csets, match=matchfn)
             else:
-                alltransplants = transplantwalk(source, p1, heads,
-                                                match=matchfn)
-            if opts.get('all'):
+                alltransplants = transplantwalk(
+                    source, p1, heads, match=matchfn
+                )
+            if opts.get(b'all'):
                 revs = alltransplants
             else:
                 revs, newmerges = browserevs(ui, source, alltransplants, opts)
@@ -734,9 +876,19 @@
         if cleanupfn:
             cleanupfn()
 
+
+def continuecmd(ui, repo):
+    """logic to resume an interrupted transplant using
+    'hg continue'"""
+    with repo.wlock():
+        tp = transplanter(ui, repo, {})
+        return tp.resume(repo, repo, {})
+
+
 revsetpredicate = registrar.revsetpredicate()
 
-@revsetpredicate('transplanted([set])')
+
+@revsetpredicate(b'transplanted([set])')
 def revsettransplanted(repo, subset, x):
     """Transplanted changesets in set, or all transplanted changesets.
     """
@@ -744,26 +896,36 @@
         s = revset.getset(repo, subset, x)
     else:
         s = subset
-    return smartset.baseset([r for r in s if
-        repo[r].extra().get('transplant_source')])
+    return smartset.baseset(
+        [r for r in s if repo[r].extra().get(b'transplant_source')]
+    )
+
 
 templatekeyword = registrar.templatekeyword()
 
-@templatekeyword('transplanted', requires={'ctx'})
+
+@templatekeyword(b'transplanted', requires={b'ctx'})
 def kwtransplanted(context, mapping):
     """String. The node identifier of the transplanted
     changeset if any."""
-    ctx = context.resource(mapping, 'ctx')
-    n = ctx.extra().get('transplant_source')
-    return n and nodemod.hex(n) or ''
+    ctx = context.resource(mapping, b'ctx')
+    n = ctx.extra().get(b'transplant_source')
+    return n and nodemod.hex(n) or b''
+
 
 def extsetup(ui):
-    statemod.addunfinished (
-        'transplant', fname='transplant/journal', clearable=True,
-        statushint=_('To continue:    hg transplant --continue\n'
-                     'To abort:       hg update'),
-        cmdhint=_("use 'hg transplant --continue' or 'hg update' to abort")
+    statemod.addunfinished(
+        b'transplant',
+        fname=b'transplant/journal',
+        clearable=True,
+        continuefunc=continuecmd,
+        statushint=_(
+            b'To continue:    hg transplant --continue\n'
+            b'To stop:        hg transplant --stop'
+        ),
+        cmdhint=_(b"use 'hg transplant --continue' or 'hg transplant --stop'"),
     )
 
+
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = [revsettransplanted, kwtransplanted]
--- a/hgext/uncommit.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/uncommit.py	Mon Oct 21 11:09:48 2019 -0400
@@ -42,20 +42,23 @@
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('experimental', 'uncommitondirtywdir',
-    default=False,
+configitem(
+    b'experimental', b'uncommitondirtywdir', default=False,
 )
-configitem('experimental', 'uncommit.keep',
-    default=False,
+configitem(
+    b'experimental', b'uncommit.keep', default=False,
 )
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
+
 
-def _commitfiltered(repo, ctx, match, keepcommit):
+def _commitfiltered(
+    repo, ctx, match, keepcommit, message=None, user=None, date=None
+):
     """Recommit ctx with changed files not in match. Return the new
     node identifier, or None if nothing changed.
     """
@@ -72,41 +75,70 @@
     if not keepcommit:
         return ctx.p1().node()
 
-    files = (initialfiles - exclude)
+    files = initialfiles - exclude
     # Filter copies
     copied = copiesmod.pathcopies(base, ctx)
-    copied = dict((dst, src) for dst, src in copied.iteritems()
-                  if dst in files)
+    copied = dict(
+        (dst, src) for dst, src in pycompat.iteritems(copied) if dst in files
+    )
+
     def filectxfn(repo, memctx, path, contentctx=ctx, redirect=()):
         if path not in contentctx:
             return None
         fctx = contentctx[path]
-        mctx = context.memfilectx(repo, memctx, fctx.path(), fctx.data(),
-                                  fctx.islink(),
-                                  fctx.isexec(),
-                                  copysource=copied.get(path))
+        mctx = context.memfilectx(
+            repo,
+            memctx,
+            fctx.path(),
+            fctx.data(),
+            fctx.islink(),
+            fctx.isexec(),
+            copysource=copied.get(path),
+        )
         return mctx
 
     if not files:
-        repo.ui.status(_("note: keeping empty commit\n"))
+        repo.ui.status(_(b"note: keeping empty commit\n"))
+
+    if message is None:
+        message = ctx.description()
+    if not user:
+        user = ctx.user()
+    if not date:
+        date = ctx.date()
 
-    new = context.memctx(repo,
-                         parents=[base.node(), node.nullid],
-                         text=ctx.description(),
-                         files=files,
-                         filectxfn=filectxfn,
-                         user=ctx.user(),
-                         date=ctx.date(),
-                         extra=ctx.extra())
+    new = context.memctx(
+        repo,
+        parents=[base.node(), node.nullid],
+        text=message,
+        files=files,
+        filectxfn=filectxfn,
+        user=user,
+        date=date,
+        extra=ctx.extra(),
+    )
     return repo.commitctx(new)
 
-@command('uncommit',
-    [('', 'keep', None, _('allow an empty commit after uncommiting')),
-     ('', 'allow-dirty-working-copy', False,
-    _('allow uncommit with outstanding changes'))
-    ] + commands.walkopts,
-    _('[OPTION]... [FILE]...'),
-    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
+
+@command(
+    b'uncommit',
+    [
+        (b'', b'keep', None, _(b'allow an empty commit after uncommitting')),
+        (
+            b'',
+            b'allow-dirty-working-copy',
+            False,
+            _(b'allow uncommit with outstanding changes'),
+        ),
+        (b'n', b'note', b'', _(b'store a note on uncommit'), _(b'TEXT')),
+    ]
+    + commands.walkopts
+    + commands.commitopts
+    + commands.commitopts2
+    + commands.commitopts3,
+    _(b'[OPTION]... [FILE]...'),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
+)
 def uncommit(ui, repo, *pats, **opts):
     """uncommit part or all of a local changeset
 
@@ -120,19 +152,25 @@
     """
     opts = pycompat.byteskwargs(opts)
 
+    cmdutil.checknotesize(ui, opts)
+    cmdutil.resolvecommitoptions(ui, opts)
+
     with repo.wlock(), repo.lock():
 
         m, a, r, d = repo.status()[:4]
         isdirtypath = any(set(m + a + r + d) & set(pats))
-        allowdirtywcopy = (opts['allow_dirty_working_copy'] or
-                    repo.ui.configbool('experimental', 'uncommitondirtywdir'))
+        allowdirtywcopy = opts[
+            b'allow_dirty_working_copy'
+        ] or repo.ui.configbool(b'experimental', b'uncommitondirtywdir')
         if not allowdirtywcopy and (not pats or isdirtypath):
-            cmdutil.bailifchanged(repo, hint=_('requires '
-                                '--allow-dirty-working-copy to uncommit'))
-        old = repo['.']
-        rewriteutil.precheck(repo, [old.rev()], 'uncommit')
+            cmdutil.bailifchanged(
+                repo,
+                hint=_(b'requires --allow-dirty-working-copy to uncommit'),
+            )
+        old = repo[b'.']
+        rewriteutil.precheck(repo, [old.rev()], b'uncommit')
         if len(old.parents()) > 1:
-            raise error.Abort(_("cannot uncommit merge changeset"))
+            raise error.Abort(_(b"cannot uncommit merge changeset"))
 
         match = scmutil.match(old, pats, opts)
 
@@ -151,26 +189,43 @@
 
             for f in sorted(badfiles):
                 if f in s.clean:
-                    hint = _(b"file was not changed in working directory "
-                             b"parent")
+                    hint = _(
+                        b"file was not changed in working directory parent"
+                    )
                 elif repo.wvfs.exists(f):
                     hint = _(b"file was untracked in working directory parent")
                 else:
                     hint = _(b"file does not exist")
 
-                raise error.Abort(_(b'cannot uncommit "%s"')
-                                  % scmutil.getuipathfn(repo)(f), hint=hint)
+                raise error.Abort(
+                    _(b'cannot uncommit "%s"') % scmutil.getuipathfn(repo)(f),
+                    hint=hint,
+                )
 
-        with repo.transaction('uncommit'):
+        with repo.transaction(b'uncommit'):
+            if not (opts[b'message'] or opts[b'logfile']):
+                opts[b'message'] = old.description()
+            message = cmdutil.logmessage(ui, opts)
+
             keepcommit = pats
             if not keepcommit:
-                if opts.get('keep') is not None:
-                    keepcommit = opts.get('keep')
+                if opts.get(b'keep') is not None:
+                    keepcommit = opts.get(b'keep')
                 else:
-                    keepcommit = ui.configbool('experimental', 'uncommit.keep')
-            newid = _commitfiltered(repo, old, match, keepcommit)
+                    keepcommit = ui.configbool(
+                        b'experimental', b'uncommit.keep'
+                    )
+            newid = _commitfiltered(
+                repo,
+                old,
+                match,
+                keepcommit,
+                message=message,
+                user=opts.get(b'user'),
+                date=opts.get(b'date'),
+            )
             if newid is None:
-                ui.status(_("nothing to uncommit\n"))
+                ui.status(_(b"nothing to uncommit\n"))
                 return 1
 
             mapping = {}
@@ -184,15 +239,21 @@
             with repo.dirstate.parentchange():
                 scmutil.movedirstate(repo, repo[newid], match)
 
-            scmutil.cleanupnodes(repo, mapping, 'uncommit', fixphase=True)
+            scmutil.cleanupnodes(repo, mapping, b'uncommit', fixphase=True)
+
 
 def predecessormarkers(ctx):
     """yields the obsolete markers marking the given changeset as a successor"""
     for data in ctx.repo().obsstore.predecessors.get(ctx.node(), ()):
         yield obsutil.marker(ctx.repo(), data)
 
-@command('unamend', [], helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
-         helpbasic=True)
+
+@command(
+    b'unamend',
+    [],
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
+    helpbasic=True,
+)
 def unamend(ui, repo, **opts):
     """undo the most recent amend operation on a current changeset
 
@@ -203,17 +264,17 @@
     """
 
     unfi = repo.unfiltered()
-    with repo.wlock(), repo.lock(), repo.transaction('unamend'):
+    with repo.wlock(), repo.lock(), repo.transaction(b'unamend'):
 
         # identify the commit from which to unamend
-        curctx = repo['.']
+        curctx = repo[b'.']
 
-        rewriteutil.precheck(repo, [curctx.rev()], 'unamend')
+        rewriteutil.precheck(repo, [curctx.rev()], b'unamend')
 
         # identify the commit to which to unamend
         markers = list(predecessormarkers(curctx))
         if len(markers) != 1:
-            e = _("changeset must have one predecessor, found %i predecessors")
+            e = _(b"changeset must have one predecessor, found %i predecessors")
             raise error.Abort(e % len(markers))
 
         prednode = markers[0].prednode()
@@ -222,7 +283,7 @@
         # add an extra so that we get a new hash
         # note: allowing unamend to undo an unamend is an intentional feature
         extras = predctx.extra()
-        extras['unamend_source'] = curctx.hex()
+        extras[b'unamend_source'] = curctx.hex()
 
         def filectxfn(repo, ctx_, path):
             try:
@@ -231,14 +292,16 @@
                 return None
 
         # Make a new commit same as predctx
-        newctx = context.memctx(repo,
-                                parents=(predctx.p1(), predctx.p2()),
-                                text=predctx.description(),
-                                files=predctx.files(),
-                                filectxfn=filectxfn,
-                                user=predctx.user(),
-                                date=predctx.date(),
-                                extra=extras)
+        newctx = context.memctx(
+            repo,
+            parents=(predctx.p1(), predctx.p2()),
+            text=predctx.description(),
+            files=predctx.files(),
+            filectxfn=filectxfn,
+            user=predctx.user(),
+            date=predctx.date(),
+            extra=extras,
+        )
         newprednode = repo.commitctx(newctx)
         newpredctx = repo[newprednode]
         dirstate = repo.dirstate
@@ -247,4 +310,4 @@
             scmutil.movedirstate(repo, newpredctx)
 
         mapping = {curctx.node(): (newprednode,)}
-        scmutil.cleanupnodes(repo, mapping, 'unamend', fixphase=True)
+        scmutil.cleanupnodes(repo, mapping, b'unamend', fixphase=True)
--- a/hgext/win32mbcs.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/win32mbcs.py	Mon Oct 21 11:09:48 2019 -0400
@@ -50,6 +50,7 @@
 import sys
 
 from mercurial.i18n import _
+from mercurial.pycompat import getattr, setattr
 from mercurial import (
     encoding,
     error,
@@ -61,25 +62,26 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
 # Encoding.encoding may be updated by --encoding option.
 # Use a lambda do delay the resolution.
-configitem('win32mbcs', 'encoding',
-    default=lambda: encoding.encoding,
+configitem(
+    b'win32mbcs', b'encoding', default=lambda: encoding.encoding,
 )
 
-_encoding = None                                # see extsetup
+_encoding = None  # see extsetup
+
 
 def decode(arg):
     if isinstance(arg, str):
         uarg = arg.decode(_encoding)
         if arg == uarg.encode(_encoding):
             return uarg
-        raise UnicodeError("Not local encoding")
+        raise UnicodeError(b"Not local encoding")
     elif isinstance(arg, tuple):
         return tuple(map(decode, arg))
     elif isinstance(arg, list):
@@ -89,6 +91,7 @@
             arg[k] = decode(v)
     return arg
 
+
 def encode(arg):
     if isinstance(arg, pycompat.unicode):
         return arg.encode(_encoding)
@@ -101,13 +104,14 @@
             arg[k] = encode(v)
     return arg
 
+
 def appendsep(s):
     # ensure the path ends with os.sep, appending it if necessary.
     try:
         us = decode(s)
     except UnicodeError:
         us = s
-    if us and us[-1] not in ':/\\':
+    if us and us[-1] not in b':/\\':
         s += pycompat.ossep
     return s
 
@@ -123,8 +127,11 @@
         # return value.
         return enc(func(*dec(args), **dec(kwds)))
     except UnicodeError:
-        raise error.Abort(_("[win32mbcs] filename conversion failed with"
-                         " %s encoding\n") % (_encoding))
+        raise error.Abort(
+            _(b"[win32mbcs] filename conversion failed with %s encoding\n")
+            % _encoding
+        )
+
 
 def wrapper(func, args, kwds):
     return basewrapper(func, pycompat.unicode, encode, decode, args, kwds)
@@ -133,29 +140,34 @@
 def reversewrapper(func, args, kwds):
     return basewrapper(func, str, decode, encode, args, kwds)
 
+
 def wrapperforlistdir(func, args, kwds):
     # Ensure 'path' argument ends with os.sep to avoids
     # misinterpreting last 0x5c of MBCS 2nd byte as path separator.
     if args:
         args = list(args)
         args[0] = appendsep(args[0])
-    if 'path' in kwds:
-        kwds['path'] = appendsep(kwds['path'])
+    if b'path' in kwds:
+        kwds[b'path'] = appendsep(kwds[b'path'])
     return func(*args, **kwds)
 
+
 def wrapname(name, wrapper):
-    module, name = name.rsplit('.', 1)
+    module, name = name.rsplit(b'.', 1)
     module = sys.modules[module]
     func = getattr(module, name)
+
     def f(*args, **kwds):
         return wrapper(func, args, kwds)
+
     f.__name__ = func.__name__
     setattr(module, name, f)
 
+
 # List of functions to be wrapped.
 # NOTE: os.path.dirname() and os.path.basename() are safe because
 #       they use result of os.path.split()
-funcs = '''os.path.join os.path.split os.path.splitext
+funcs = b'''os.path.join os.path.split os.path.splitext
  os.path.normpath os.makedirs mercurial.util.endswithsep
  mercurial.util.splitpath mercurial.util.fscasesensitive
  mercurial.util.fspath mercurial.util.pconvert mercurial.util.normpath
@@ -165,27 +177,29 @@
 # These functions are required to be called with local encoded string
 # because they expects argument is local encoded string and cause
 # problem with unicode string.
-rfuncs = '''mercurial.encoding.upper mercurial.encoding.lower
+rfuncs = b'''mercurial.encoding.upper mercurial.encoding.lower
  mercurial.util._filenamebytestr'''
 
 # List of Windows specific functions to be wrapped.
-winfuncs = '''os.path.splitunc'''
+winfuncs = b'''os.path.splitunc'''
 
 # codec and alias names of sjis and big5 to be faked.
-problematic_encodings = '''big5 big5-tw csbig5 big5hkscs big5-hkscs
+problematic_encodings = b'''big5 big5-tw csbig5 big5hkscs big5-hkscs
  hkscs cp932 932 ms932 mskanji ms-kanji shift_jis csshiftjis shiftjis
  sjis s_jis shift_jis_2004 shiftjis2004 sjis_2004 sjis2004
  shift_jisx0213 shiftjisx0213 sjisx0213 s_jisx0213 950 cp950 ms950 '''
 
+
 def extsetup(ui):
     # TODO: decide use of config section for this extension
-    if ((not os.path.supports_unicode_filenames) and
-        (pycompat.sysplatform != 'cygwin')):
-        ui.warn(_("[win32mbcs] cannot activate on this platform.\n"))
+    if (not os.path.supports_unicode_filenames) and (
+        pycompat.sysplatform != b'cygwin'
+    ):
+        ui.warn(_(b"[win32mbcs] cannot activate on this platform.\n"))
         return
     # determine encoding for filename
     global _encoding
-    _encoding = ui.config('win32mbcs', 'encoding')
+    _encoding = ui.config(b'win32mbcs', b'encoding')
     # fake is only for relevant environment.
     if _encoding.lower() in problematic_encodings.split():
         for f in funcs.split():
@@ -193,14 +207,15 @@
         if pycompat.iswindows:
             for f in winfuncs.split():
                 wrapname(f, wrapper)
-        wrapname("mercurial.util.listdir", wrapperforlistdir)
-        wrapname("mercurial.windows.listdir", wrapperforlistdir)
+        wrapname(b"mercurial.util.listdir", wrapperforlistdir)
+        wrapname(b"mercurial.windows.listdir", wrapperforlistdir)
         # wrap functions to be called with local byte string arguments
         for f in rfuncs.split():
             wrapname(f, reversewrapper)
         # Check sys.args manually instead of using ui.debug() because
         # command line options is not yet applied when
         # extensions.loadall() is called.
-        if '--debug' in sys.argv:
-            ui.write(("[win32mbcs] activated with encoding: %s\n")
-                     % _encoding)
+        if b'--debug' in sys.argv:
+            ui.writenoi18n(
+                b"[win32mbcs] activated with encoding: %s\n" % _encoding
+            )
--- a/hgext/win32text.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/win32text.py	Mon Oct 21 11:09:48 2019 -0400
@@ -45,35 +45,32 @@
 
 import re
 from mercurial.i18n import _
-from mercurial.node import (
-    short,
-)
+from mercurial.node import short
 from mercurial import (
     pycompat,
     registrar,
 )
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.utils import stringutil
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem('win32text', 'warn',
-    default=True,
+configitem(
+    b'win32text', b'warn', default=True,
 )
 
 # regexp for single LF without CR preceding.
-re_single_lf = re.compile('(^|[^\r])\n', re.MULTILINE)
+re_single_lf = re.compile(b'(^|[^\r])\n', re.MULTILINE)
 
-newlinestr = {'\r\n': 'CRLF', '\r': 'CR'}
-filterstr = {'\r\n': 'clever', '\r': 'mac'}
+newlinestr = {b'\r\n': b'CRLF', b'\r': b'CR'}
+filterstr = {b'\r\n': b'clever', b'\r': b'mac'}
+
 
 def checknewline(s, newline, ui=None, repo=None, filename=None):
     # warn if already has 'newline' in repository.
@@ -81,57 +78,71 @@
     # see issue 302:
     #   https://bz.mercurial-scm.org/302
     if newline in s and ui and filename and repo:
-        ui.warn(_('WARNING: %s already has %s line endings\n'
-                  'and does not need EOL conversion by the win32text plugin.\n'
-                  'Before your next commit, please reconsider your '
-                  'encode/decode settings in \nMercurial.ini or %s.\n') %
-                (filename, newlinestr[newline], repo.vfs.join('hgrc')))
+        ui.warn(
+            _(
+                b'WARNING: %s already has %s line endings\n'
+                b'and does not need EOL conversion by the win32text plugin.\n'
+                b'Before your next commit, please reconsider your '
+                b'encode/decode settings in \nMercurial.ini or %s.\n'
+            )
+            % (filename, newlinestr[newline], repo.vfs.join(b'hgrc'))
+        )
+
 
 def dumbdecode(s, cmd, **kwargs):
-    checknewline(s, '\r\n', **kwargs)
+    checknewline(s, b'\r\n', **kwargs)
     # replace single LF to CRLF
-    return re_single_lf.sub('\\1\r\n', s)
+    return re_single_lf.sub(b'\\1\r\n', s)
+
 
 def dumbencode(s, cmd):
-    return s.replace('\r\n', '\n')
+    return s.replace(b'\r\n', b'\n')
+
 
 def macdumbdecode(s, cmd, **kwargs):
-    checknewline(s, '\r', **kwargs)
-    return s.replace('\n', '\r')
+    checknewline(s, b'\r', **kwargs)
+    return s.replace(b'\n', b'\r')
+
 
 def macdumbencode(s, cmd):
-    return s.replace('\r', '\n')
+    return s.replace(b'\r', b'\n')
+
 
 def cleverdecode(s, cmd, **kwargs):
     if not stringutil.binary(s):
         return dumbdecode(s, cmd, **kwargs)
     return s
 
+
 def cleverencode(s, cmd):
     if not stringutil.binary(s):
         return dumbencode(s, cmd)
     return s
 
+
 def macdecode(s, cmd, **kwargs):
     if not stringutil.binary(s):
         return macdumbdecode(s, cmd, **kwargs)
     return s
 
+
 def macencode(s, cmd):
     if not stringutil.binary(s):
         return macdumbencode(s, cmd)
     return s
 
+
 _filters = {
-    'dumbdecode:': dumbdecode,
-    'dumbencode:': dumbencode,
-    'cleverdecode:': cleverdecode,
-    'cleverencode:': cleverencode,
-    'macdumbdecode:': macdumbdecode,
-    'macdumbencode:': macdumbencode,
-    'macdecode:': macdecode,
-    'macencode:': macencode,
-    }
+    b'dumbdecode:': dumbdecode,
+    b'dumbencode:': dumbencode,
+    b'cleverdecode:': cleverdecode,
+    b'cleverencode:': cleverencode,
+    b'macdumbdecode:': macdumbdecode,
+    b'macdumbencode:': macdumbencode,
+    b'macdecode:': macdecode,
+    b'macencode:': macencode,
+}
+
 
 def forbidnewline(ui, repo, hooktype, node, newline, **kwargs):
     halt = False
@@ -141,9 +152,10 @@
     # newest version as canonical. this prevents us from blocking a
     # changegroup that contains an unacceptable commit followed later
     # by a commit that fixes the problem.
-    tip = repo['tip']
-    for rev in pycompat.xrange(repo.changelog.tiprev(),
-                               repo[node].rev() - 1, -1):
+    tip = repo[b'tip']
+    for rev in pycompat.xrange(
+        repo.changelog.tiprev(), repo[node].rev() - 1, -1
+    ):
         c = repo[rev]
         for f in c.files():
             if f in seen or f not in tip or f not in c:
@@ -152,44 +164,61 @@
             data = c[f].data()
             if not stringutil.binary(data) and newline in data:
                 if not halt:
-                    ui.warn(_('attempt to commit or push text file(s) '
-                              'using %s line endings\n') %
-                              newlinestr[newline])
-                ui.warn(_('in %s: %s\n') % (short(c.node()), f))
+                    ui.warn(
+                        _(
+                            b'attempt to commit or push text file(s) '
+                            b'using %s line endings\n'
+                        )
+                        % newlinestr[newline]
+                    )
+                ui.warn(_(b'in %s: %s\n') % (short(c.node()), f))
                 halt = True
-    if halt and hooktype == 'pretxnchangegroup':
+    if halt and hooktype == b'pretxnchangegroup':
         crlf = newlinestr[newline].lower()
         filter = filterstr[newline]
-        ui.warn(_('\nTo prevent this mistake in your local repository,\n'
-                  'add to Mercurial.ini or .hg/hgrc:\n'
-                  '\n'
-                  '[hooks]\n'
-                  'pretxncommit.%s = python:hgext.win32text.forbid%s\n'
-                  '\n'
-                  'and also consider adding:\n'
-                  '\n'
-                  '[extensions]\n'
-                  'win32text =\n'
-                  '[encode]\n'
-                  '** = %sencode:\n'
-                  '[decode]\n'
-                  '** = %sdecode:\n') % (crlf, crlf, filter, filter))
+        ui.warn(
+            _(
+                b'\nTo prevent this mistake in your local repository,\n'
+                b'add to Mercurial.ini or .hg/hgrc:\n'
+                b'\n'
+                b'[hooks]\n'
+                b'pretxncommit.%s = python:hgext.win32text.forbid%s\n'
+                b'\n'
+                b'and also consider adding:\n'
+                b'\n'
+                b'[extensions]\n'
+                b'win32text =\n'
+                b'[encode]\n'
+                b'** = %sencode:\n'
+                b'[decode]\n'
+                b'** = %sdecode:\n'
+            )
+            % (crlf, crlf, filter, filter)
+        )
     return halt
 
+
 def forbidcrlf(ui, repo, hooktype, node, **kwargs):
-    return forbidnewline(ui, repo, hooktype, node, '\r\n', **kwargs)
+    return forbidnewline(ui, repo, hooktype, node, b'\r\n', **kwargs)
+
 
 def forbidcr(ui, repo, hooktype, node, **kwargs):
-    return forbidnewline(ui, repo, hooktype, node, '\r', **kwargs)
+    return forbidnewline(ui, repo, hooktype, node, b'\r', **kwargs)
+
 
 def reposetup(ui, repo):
     if not repo.local():
         return
-    for name, fn in _filters.iteritems():
+    for name, fn in pycompat.iteritems(_filters):
         repo.adddatafilter(name, fn)
 
+
 def extsetup(ui):
     # deprecated config: win32text.warn
-    if ui.configbool('win32text', 'warn'):
-        ui.warn(_("win32text is deprecated: "
-                  "https://mercurial-scm.org/wiki/Win32TextExtension\n"))
+    if ui.configbool(b'win32text', b'warn'):
+        ui.warn(
+            _(
+                b"win32text is deprecated: "
+                b"https://mercurial-scm.org/wiki/Win32TextExtension\n"
+            )
+        )
--- a/hgext/zeroconf/Zeroconf.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/zeroconf/Zeroconf.py	Mon Oct 21 11:09:48 2019 -0400
@@ -76,9 +76,9 @@
                  ensure names end in '.local.'
                  timeout on receiving socket for clean shutdown"""
 
-__author__ = "Paul Scott-Murphy"
-__email__ = "paul at scott dash murphy dot com"
-__version__ = "0.12"
+__author__ = b"Paul Scott-Murphy"
+__email__ = b"paul at scott dash murphy dot com"
+__version__ = b"0.12"
 
 import errno
 import itertools
@@ -91,11 +91,11 @@
 
 from mercurial import pycompat
 
-__all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"]
+__all__ = [b"Zeroconf", b"ServiceInfo", b"ServiceBrowser"]
 
 # hook for threads
 
-globals()['_GLOBAL_DONE'] = 0
+globals()[b'_GLOBAL_DONE'] = 0
 
 # Some timing constants
 
@@ -110,23 +110,23 @@
 _MDNS_ADDR = r'224.0.0.251'
 _MDNS_PORT = 5353
 _DNS_PORT = 53
-_DNS_TTL = 60 * 60 # one hour default TTL
+_DNS_TTL = 60 * 60  # one hour default TTL
 
-_MAX_MSG_TYPICAL = 1460 # unused
+_MAX_MSG_TYPICAL = 1460  # unused
 _MAX_MSG_ABSOLUTE = 8972
 
-_FLAGS_QR_MASK = 0x8000 # query response mask
-_FLAGS_QR_QUERY = 0x0000 # query
-_FLAGS_QR_RESPONSE = 0x8000 # response
+_FLAGS_QR_MASK = 0x8000  # query response mask
+_FLAGS_QR_QUERY = 0x0000  # query
+_FLAGS_QR_RESPONSE = 0x8000  # response
 
-_FLAGS_AA = 0x0400 # Authoritative answer
-_FLAGS_TC = 0x0200 # Truncated
-_FLAGS_RD = 0x0100 # Recursion desired
-_FLAGS_RA = 0x8000 # Recursion available
+_FLAGS_AA = 0x0400  # Authoritative answer
+_FLAGS_TC = 0x0200  # Truncated
+_FLAGS_RD = 0x0100  # Recursion desired
+_FLAGS_RA = 0x8000  # Recursion available
 
-_FLAGS_Z = 0x0040 # Zero
-_FLAGS_AD = 0x0020 # Authentic data
-_FLAGS_CD = 0x0010 # Checking disabled
+_FLAGS_Z = 0x0040  # Zero
+_FLAGS_AD = 0x0020  # Authentic data
+_FLAGS_CD = 0x0010  # Checking disabled
 
 _CLASS_IN = 1
 _CLASS_CS = 2
@@ -159,65 +159,80 @@
 
 # Mapping constants to names
 
-_CLASSES = { _CLASS_IN : "in",
-             _CLASS_CS : "cs",
-             _CLASS_CH : "ch",
-             _CLASS_HS : "hs",
-             _CLASS_NONE : "none",
-             _CLASS_ANY : "any" }
+_CLASSES = {
+    _CLASS_IN: b"in",
+    _CLASS_CS: b"cs",
+    _CLASS_CH: b"ch",
+    _CLASS_HS: b"hs",
+    _CLASS_NONE: b"none",
+    _CLASS_ANY: b"any",
+}
 
-_TYPES = { _TYPE_A : "a",
-           _TYPE_NS : "ns",
-           _TYPE_MD : "md",
-           _TYPE_MF : "mf",
-           _TYPE_CNAME : "cname",
-           _TYPE_SOA : "soa",
-           _TYPE_MB : "mb",
-           _TYPE_MG : "mg",
-           _TYPE_MR : "mr",
-           _TYPE_NULL : "null",
-           _TYPE_WKS : "wks",
-           _TYPE_PTR : "ptr",
-           _TYPE_HINFO : "hinfo",
-           _TYPE_MINFO : "minfo",
-           _TYPE_MX : "mx",
-           _TYPE_TXT : "txt",
-           _TYPE_AAAA : "quada",
-           _TYPE_SRV : "srv",
-           _TYPE_ANY : "any" }
+_TYPES = {
+    _TYPE_A: b"a",
+    _TYPE_NS: b"ns",
+    _TYPE_MD: b"md",
+    _TYPE_MF: b"mf",
+    _TYPE_CNAME: b"cname",
+    _TYPE_SOA: b"soa",
+    _TYPE_MB: b"mb",
+    _TYPE_MG: b"mg",
+    _TYPE_MR: b"mr",
+    _TYPE_NULL: b"null",
+    _TYPE_WKS: b"wks",
+    _TYPE_PTR: b"ptr",
+    _TYPE_HINFO: b"hinfo",
+    _TYPE_MINFO: b"minfo",
+    _TYPE_MX: b"mx",
+    _TYPE_TXT: b"txt",
+    _TYPE_AAAA: b"quada",
+    _TYPE_SRV: b"srv",
+    _TYPE_ANY: b"any",
+}
 
 # utility functions
 
+
 def currentTimeMillis():
     """Current system time in milliseconds"""
     return time.time() * 1000
 
+
 # Exceptions
 
+
 class NonLocalNameException(Exception):
     pass
 
+
 class NonUniqueNameException(Exception):
     pass
 
+
 class NamePartTooLongException(Exception):
     pass
 
+
 class AbstractMethodException(Exception):
     pass
 
+
 class BadTypeInNameException(Exception):
     pass
 
+
 class BadDomainName(Exception):
     def __init__(self, pos):
-        Exception.__init__(self, "at position %s" % pos)
+        Exception.__init__(self, b"at position %s" % pos)
+
 
 class BadDomainNameCircular(BadDomainName):
     pass
 
+
 # implementation classes
 
+
 class DNSEntry(object):
     """A DNS entry"""
 
@@ -231,8 +246,11 @@
     def __eq__(self, other):
         """Equality test on name, type, and class"""
         if isinstance(other, DNSEntry):
-            return (self.name == other.name and self.type == other.type and
-                    self.clazz == other.clazz)
+            return (
+                self.name == other.name
+                and self.type == other.type
+                and self.clazz == other.clazz
+            )
         return 0
 
     def __ne__(self, other):
@@ -244,49 +262,55 @@
         try:
             return _CLASSES[clazz]
         except KeyError:
-            return "?(%s)" % (clazz)
+            return b"?(%s)" % clazz
 
     def getType(self, type):
         """Type accessor"""
         try:
             return _TYPES[type]
         except KeyError:
-            return "?(%s)" % (type)
+            return b"?(%s)" % type
 
     def toString(self, hdr, other):
         """String representation with additional information"""
-        result = ("%s[%s,%s" %
-            (hdr, self.getType(self.type), self.getClazz(self.clazz)))
+        result = b"%s[%s,%s" % (
+            hdr,
+            self.getType(self.type),
+            self.getClazz(self.clazz),
+        )
         if self.unique:
-            result += "-unique,"
+            result += b"-unique,"
         else:
-            result += ","
+            result += b","
         result += self.name
         if other is not None:
-            result += ",%s]" % (other)
+            result += b",%s]" % other
         else:
-            result += "]"
+            result += b"]"
         return result
 
+
 class DNSQuestion(DNSEntry):
     """A DNS question entry"""
 
     def __init__(self, name, type, clazz):
         if pycompat.ispy3 and isinstance(name, str):
             name = name.encode('ascii')
-        if not name.endswith(".local."):
+        if not name.endswith(b".local."):
             raise NonLocalNameException(name)
         DNSEntry.__init__(self, name, type, clazz)
 
     def answeredBy(self, rec):
         """Returns true if the question is answered by the record"""
-        return (self.clazz == rec.clazz and
-                (self.type == rec.type or self.type == _TYPE_ANY) and
-                self.name == rec.name)
+        return (
+            self.clazz == rec.clazz
+            and (self.type == rec.type or self.type == _TYPE_ANY)
+            and self.name == rec.name
+        )
 
     def __repr__(self):
         """String representation"""
-        return DNSEntry.toString(self, "question", None)
+        return DNSEntry.toString(self, b"question", None)
 
 
 class DNSRecord(DNSEntry):
@@ -347,9 +371,13 @@
 
     def toString(self, other):
         """String representation with additional information"""
-        arg = ("%s/%s,%s" %
-            (self.ttl, self.getRemainingTTL(currentTimeMillis()), other))
-        return DNSEntry.toString(self, "record", arg)
+        arg = b"%s/%s,%s" % (
+            self.ttl,
+            self.getRemainingTTL(currentTimeMillis()),
+            other,
+        )
+        return DNSEntry.toString(self, b"record", arg)
+
 
 class DNSAddress(DNSRecord):
     """A DNS address record"""
@@ -375,6 +403,7 @@
         except Exception:
             return self.address
 
+
 class DNSHinfo(DNSRecord):
     """A DNS host information record"""
 
@@ -396,7 +425,8 @@
 
     def __repr__(self):
         """String representation"""
-        return self.cpu + " " + self.os
+        return self.cpu + b" " + self.os
+
 
 class DNSPointer(DNSRecord):
     """A DNS pointer record"""
@@ -419,6 +449,7 @@
         """String representation"""
         return self.toString(self.alias)
 
+
 class DNSText(DNSRecord):
     """A DNS text record"""
 
@@ -439,10 +470,11 @@
     def __repr__(self):
         """String representation"""
         if len(self.text) > 10:
-            return self.toString(self.text[:7] + "...")
+            return self.toString(self.text[:7] + b"...")
         else:
             return self.toString(self.text)
 
+
 class DNSService(DNSRecord):
     """A DNS service record"""
 
@@ -463,15 +495,18 @@
     def __eq__(self, other):
         """Tests equality on priority, weight, port and server"""
         if isinstance(other, DNSService):
-            return (self.priority == other.priority and
-                    self.weight == other.weight and
-                    self.port == other.port and
-                    self.server == other.server)
+            return (
+                self.priority == other.priority
+                and self.weight == other.weight
+                and self.port == other.port
+                and self.server == other.server
+            )
         return 0
 
     def __repr__(self):
         """String representation"""
-        return self.toString("%s:%s" % (self.server, self.port))
+        return self.toString(b"%s:%s" % (self.server, self.port))
+
 
 class DNSIncoming(object):
     """Object representation of an incoming DNS packet"""
@@ -493,10 +528,11 @@
 
     def readHeader(self):
         """Reads header portion of packet"""
-        format = '!HHHHHH'
+        format = b'!HHHHHH'
         length = struct.calcsize(format)
-        info = struct.unpack(format,
-                             self.data[self.offset:self.offset + length])
+        info = struct.unpack(
+            format, self.data[self.offset : self.offset + length]
+        )
         self.offset += length
 
         self.id = info[0]
@@ -508,12 +544,13 @@
 
     def readQuestions(self):
         """Reads questions section of packet"""
-        format = '!HH'
+        format = b'!HH'
         length = struct.calcsize(format)
         for i in range(0, self.numquestions):
             name = self.readName()
-            info = struct.unpack(format,
-                                 self.data[self.offset:self.offset + length])
+            info = struct.unpack(
+                format, self.data[self.offset : self.offset + length]
+            )
             self.offset += length
 
             try:
@@ -524,10 +561,11 @@
 
     def readInt(self):
         """Reads an integer from the packet"""
-        format = '!I'
+        format = b'!I'
         length = struct.calcsize(format)
-        info = struct.unpack(format,
-                             self.data[self.offset:self.offset + length])
+        info = struct.unpack(
+            format, self.data[self.offset : self.offset + length]
+        )
         self.offset += length
         return info[0]
 
@@ -539,56 +577,73 @@
 
     def readString(self, len):
         """Reads a string of a given length from the packet"""
-        format = '!%ds' % len
+        format = b'!%ds' % len
         length = struct.calcsize(format)
-        info = struct.unpack(format,
-                             self.data[self.offset:self.offset + length])
+        info = struct.unpack(
+            format, self.data[self.offset : self.offset + length]
+        )
         self.offset += length
         return info[0]
 
     def readUnsignedShort(self):
         """Reads an unsigned short from the packet"""
-        format = '!H'
+        format = b'!H'
         length = struct.calcsize(format)
-        info = struct.unpack(format,
-                             self.data[self.offset:self.offset + length])
+        info = struct.unpack(
+            format, self.data[self.offset : self.offset + length]
+        )
         self.offset += length
         return info[0]
 
     def readOthers(self):
         """Reads answers, authorities and additionals section of the packet"""
-        format = '!HHiH'
+        format = b'!HHiH'
         length = struct.calcsize(format)
         n = self.numanswers + self.numauthorities + self.numadditionals
         for i in range(0, n):
             domain = self.readName()
-            info = struct.unpack(format,
-                                 self.data[self.offset:self.offset + length])
+            info = struct.unpack(
+                format, self.data[self.offset : self.offset + length]
+            )
             self.offset += length
 
             rec = None
             if info[0] == _TYPE_A:
-                rec = DNSAddress(domain, info[0], info[1], info[2],
-                                 self.readString(4))
+                rec = DNSAddress(
+                    domain, info[0], info[1], info[2], self.readString(4)
+                )
             elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR:
-                rec = DNSPointer(domain, info[0], info[1], info[2],
-                                 self.readName())
+                rec = DNSPointer(
+                    domain, info[0], info[1], info[2], self.readName()
+                )
             elif info[0] == _TYPE_TXT:
-                rec = DNSText(domain, info[0], info[1], info[2],
-                              self.readString(info[3]))
+                rec = DNSText(
+                    domain, info[0], info[1], info[2], self.readString(info[3])
+                )
             elif info[0] == _TYPE_SRV:
-                rec = DNSService(domain, info[0], info[1], info[2],
-                                 self.readUnsignedShort(),
-                                 self.readUnsignedShort(),
-                                 self.readUnsignedShort(),
-                                 self.readName())
+                rec = DNSService(
+                    domain,
+                    info[0],
+                    info[1],
+                    info[2],
+                    self.readUnsignedShort(),
+                    self.readUnsignedShort(),
+                    self.readUnsignedShort(),
+                    self.readName(),
+                )
             elif info[0] == _TYPE_HINFO:
-                rec = DNSHinfo(domain, info[0], info[1], info[2],
-                               self.readCharacterString(),
-                               self.readCharacterString())
+                rec = DNSHinfo(
+                    domain,
+                    info[0],
+                    info[1],
+                    info[2],
+                    self.readCharacterString(),
+                    self.readCharacterString(),
+                )
             elif info[0] == _TYPE_AAAA:
-                rec = DNSAddress(domain, info[0], info[1], info[2],
-                                 self.readString(16))
+                rec = DNSAddress(
+                    domain, info[0], info[1], info[2], self.readString(16)
+                )
             else:
                 # Try to ignore types we don't know about
                 # this may mean the rest of the name is
@@ -596,8 +651,8 @@
                 # so this is left for debugging.  New types
                 # encountered need to be parsed properly.
                 #
-                #print "UNKNOWN TYPE = " + str(info[0])
-                #raise BadTypeInNameException
+                # print "UNKNOWN TYPE = " + str(info[0])
+                # raise BadTypeInNameException
                 self.offset += info[3]
 
             if rec is not None:
@@ -613,7 +668,7 @@
 
     def readUTF(self, offset, len):
         """Reads a UTF-8 string of a given length from the packet"""
-        return self.data[offset:offset + len].decode('utf-8')
+        return self.data[offset : offset + len].decode('utf-8')
 
     def readName(self):
         """Reads a domain name from the packet"""
@@ -623,7 +678,7 @@
         first = off
 
         while True:
-            len = ord(self.data[off:off + 1])
+            len = ord(self.data[off : off + 1])
             off += 1
             if len == 0:
                 break
@@ -634,7 +689,7 @@
             elif t == 0xC0:
                 if next < 0:
                     next = off + 1
-                off = ((len & 0x3F) << 8) | ord(self.data[off:off + 1])
+                off = ((len & 0x3F) << 8) | ord(self.data[off : off + 1])
                 if off >= first:
                     raise BadDomainNameCircular(off)
                 first = off
@@ -691,31 +746,31 @@
 
     def writeByte(self, value):
         """Writes a single byte to the packet"""
-        format = '!c'
+        format = b'!c'
         self.data.append(struct.pack(format, chr(value)))
         self.size += 1
 
     def insertShort(self, index, value):
         """Inserts an unsigned short in a certain position in the packet"""
-        format = '!H'
+        format = b'!H'
         self.data.insert(index, struct.pack(format, value))
         self.size += 2
 
     def writeShort(self, value):
         """Writes an unsigned short to the packet"""
-        format = '!H'
+        format = b'!H'
         self.data.append(struct.pack(format, value))
         self.size += 2
 
     def writeInt(self, value):
         """Writes an unsigned integer to the packet"""
-        format = '!I'
+        format = b'!I'
         self.data.append(struct.pack(format, int(value)))
         self.size += 4
 
     def writeString(self, value, length):
         """Writes a string to the packet"""
-        format = '!' + str(length) + 's'
+        format = b'!' + str(length) + b's'
         self.data.append(struct.pack(format, value))
         self.size += length
 
@@ -741,8 +796,8 @@
             # for future pointers to it.
             #
             self.names[name] = self.size
-            parts = name.split('.')
-            if parts[-1] == '':
+            parts = name.split(b'.')
+            if parts[-1] == b'':
                 parts = parts[:-1]
             for part in parts:
                 self.writeUTF(part)
@@ -780,8 +835,8 @@
         record.write(self)
         self.size -= 2
 
-        length = len(''.join(self.data[index:]))
-        self.insertShort(index, length) # Here is the short we adjusted for
+        length = len(b''.join(self.data[index:]))
+        self.insertShort(index, length)  # Here is the short we adjusted for
 
     def packet(self):
         """Returns a string containing the packet's bytes
@@ -808,7 +863,7 @@
                 self.insertShort(0, 0)
             else:
                 self.insertShort(0, self.id)
-        return ''.join(self.data)
+        return b''.join(self.data)
 
 
 class DNSCache(object):
@@ -878,13 +933,13 @@
     def __init__(self, zeroconf):
         threading.Thread.__init__(self)
         self.zeroconf = zeroconf
-        self.readers = {} # maps socket to reader
+        self.readers = {}  # maps socket to reader
         self.timeout = 5
         self.condition = threading.Condition()
         self.start()
 
     def run(self):
-        while not globals()['_GLOBAL_DONE']:
+        while not globals()[b'_GLOBAL_DONE']:
             rs = self.getReaders()
             if len(rs) == 0:
                 # No sockets to manage, but we wait for the timeout
@@ -900,7 +955,7 @@
                         try:
                             self.readers[sock].handle_read()
                         except Exception:
-                            if not globals()['_GLOBAL_DONE']:
+                            if not globals()[b'_GLOBAL_DONE']:
                                 traceback.print_exc()
                 except Exception:
                     pass
@@ -928,6 +983,7 @@
         self.condition.notify()
         self.condition.release()
 
+
 class Listener(object):
     """A Listener is used by this module to listen on the multicast
     group to which DNS messages are sent, allowing the implementation
@@ -979,7 +1035,7 @@
     def run(self):
         while True:
             self.zeroconf.wait(10 * 1000)
-            if globals()['_GLOBAL_DONE']:
+            if globals()[b'_GLOBAL_DONE']:
                 return
             now = currentTimeMillis()
             for record in self.zeroconf.cache.entries():
@@ -1008,8 +1064,9 @@
 
         self.done = 0
 
-        self.zeroconf.addListener(self, DNSQuestion(self.type, _TYPE_PTR,
-                                                    _CLASS_IN))
+        self.zeroconf.addListener(
+            self, DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN)
+        )
         self.start()
 
     def updateRecord(self, zeroconf, now, record):
@@ -1024,15 +1081,17 @@
                     oldrecord.resetTTL(record)
                 else:
                     del self.services[record.alias.lower()]
-                    callback = (lambda x:
-                        self.listener.removeService(x, self.type, record.alias))
+                    callback = lambda x: self.listener.removeService(
+                        x, self.type, record.alias
+                    )
                     self.list.append(callback)
                     return
             except Exception:
                 if not expired:
                     self.services[record.alias.lower()] = record
-                    callback = (lambda x:
-                        self.listener.addService(x, self.type, record.alias))
+                    callback = lambda x: self.listener.addService(
+                        x, self.type, record.alias
+                    )
                     self.list.append(callback)
 
             expires = record.getExpirationTime(75)
@@ -1049,7 +1108,7 @@
             now = currentTimeMillis()
             if len(self.list) == 0 and self.nexttime > now:
                 self.zeroconf.wait(self.nexttime - now)
-            if globals()['_GLOBAL_DONE'] or self.done:
+            if globals()[b'_GLOBAL_DONE'] or self.done:
                 return
             now = currentTimeMillis()
 
@@ -1073,8 +1132,17 @@
 class ServiceInfo(object):
     """Service information"""
 
-    def __init__(self, type, name, address=None, port=None, weight=0,
-                 priority=0, properties=None, server=None):
+    def __init__(
+        self,
+        type,
+        name,
+        address=None,
+        port=None,
+        weight=0,
+        priority=0,
+        properties=None,
+        server=None,
+    ):
         """Create a service description.
 
         type: fully qualified service type name
@@ -1106,24 +1174,25 @@
         if isinstance(properties, dict):
             self.properties = properties
             list = []
-            result = ''
+            result = b''
             for key in properties:
                 value = properties[key]
                 if value is None:
-                    suffix = ''
+                    suffix = b''
                 elif isinstance(value, str):
                     suffix = value
                 elif isinstance(value, int):
                     if value:
-                        suffix = 'true'
+                        suffix = b'true'
                     else:
-                        suffix = 'false'
+                        suffix = b'false'
                 else:
-                    suffix = ''
-                list.append('='.join((key, suffix)))
+                    suffix = b''
+                list.append(b'='.join((key, suffix)))
             for item in list:
-                result = ''.join((result, struct.pack('!c', chr(len(item))),
-                                  item))
+                result = b''.join(
+                    (result, struct.pack(b'!c', chr(len(item))), item)
+                )
             self.text = result
         else:
             self.text = properties
@@ -1139,21 +1208,21 @@
             while index < end:
                 length = ord(text[index])
                 index += 1
-                strs.append(text[index:index + length])
+                strs.append(text[index : index + length])
                 index += length
 
             for s in strs:
-                eindex = s.find('=')
+                eindex = s.find(b'=')
                 if eindex == -1:
                     # No equals sign at all
                     key = s
                     value = 0
                 else:
                     key = s[:eindex]
-                    value = s[eindex + 1:]
-                    if value == 'true':
+                    value = s[eindex + 1 :]
+                    if value == b'true':
                         value = 1
-                    elif value == 'false' or not value:
+                    elif value == b'false' or not value:
                         value = 0
 
                 # Only update non-existent properties
@@ -1171,8 +1240,8 @@
 
     def getName(self):
         """Name accessor"""
-        if self.type is not None and self.name.endswith("." + self.type):
-            return self.name[:len(self.name) - len(self.type) - 1]
+        if self.type is not None and self.name.endswith(b"." + self.type):
+            return self.name[: len(self.name) - len(self.type) - 1]
         return self.name
 
     def getAddress(self):
@@ -1207,7 +1276,7 @@
         """Updates service information from a DNS record"""
         if record is not None and not record.isExpired(now):
             if record.type == _TYPE_A:
-                #if record.name == self.name:
+                # if record.name == self.name:
                 if record.name == self.server:
                     self.address = record.address
             elif record.type == _TYPE_SRV:
@@ -1216,10 +1285,14 @@
                     self.port = record.port
                     self.weight = record.weight
                     self.priority = record.priority
-                    #self.address = None
-                    self.updateRecord(zeroconf, now,
-                                      zeroconf.cache.getByDetails(self.server,
-                                      _TYPE_A, _CLASS_IN))
+                    # self.address = None
+                    self.updateRecord(
+                        zeroconf,
+                        now,
+                        zeroconf.cache.getByDetails(
+                            self.server, _TYPE_A, _CLASS_IN
+                        ),
+                    )
             elif record.type == _TYPE_TXT:
                 if record.name == self.name:
                     self.setText(record.text)
@@ -1233,34 +1306,44 @@
         next = now + delay
         last = now + timeout
         try:
-            zeroconf.addListener(self, DNSQuestion(self.name, _TYPE_ANY,
-                                                   _CLASS_IN))
-            while (self.server is None or self.address is None or
-                   self.text is None):
+            zeroconf.addListener(
+                self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN)
+            )
+            while (
+                self.server is None or self.address is None or self.text is None
+            ):
                 if last <= now:
                     return 0
                 if next <= now:
                     out = DNSOutgoing(_FLAGS_QR_QUERY)
-                    out.addQuestion(DNSQuestion(self.name, _TYPE_SRV,
-                                                _CLASS_IN))
+                    out.addQuestion(
+                        DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN)
+                    )
                     out.addAnswerAtTime(
-                        zeroconf.cache.getByDetails(self.name,
-                                                    _TYPE_SRV,
-                                                    _CLASS_IN),
-                                        now)
-                    out.addQuestion(DNSQuestion(self.name, _TYPE_TXT,
-                                                _CLASS_IN))
+                        zeroconf.cache.getByDetails(
+                            self.name, _TYPE_SRV, _CLASS_IN
+                        ),
+                        now,
+                    )
+                    out.addQuestion(
+                        DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN)
+                    )
                     out.addAnswerAtTime(
-                        zeroconf.cache.getByDetails(self.name, _TYPE_TXT,
-                                                    _CLASS_IN),
-                                        now)
+                        zeroconf.cache.getByDetails(
+                            self.name, _TYPE_TXT, _CLASS_IN
+                        ),
+                        now,
+                    )
                     if self.server is not None:
                         out.addQuestion(
-                            DNSQuestion(self.server, _TYPE_A, _CLASS_IN))
+                            DNSQuestion(self.server, _TYPE_A, _CLASS_IN)
+                        )
                         out.addAnswerAtTime(
-                            zeroconf.cache.getByDetails(self.server, _TYPE_A,
-                                                        _CLASS_IN),
-                                            now)
+                            zeroconf.cache.getByDetails(
+                                self.server, _TYPE_A, _CLASS_IN
+                            ),
+                            now,
+                        )
                     zeroconf.send(out)
                     next = now + delay
                     delay = delay * 2
@@ -1285,16 +1368,19 @@
 
     def __repr__(self):
         """String representation"""
-        result = ("service[%s,%s:%s," %
-            (self.name, socket.inet_ntoa(self.getAddress()), self.port))
+        result = b"service[%s,%s:%s," % (
+            self.name,
+            socket.inet_ntoa(self.getAddress()),
+            self.port,
+        )
         if self.text is None:
-            result += "None"
+            result += b"None"
         else:
             if len(self.text) < 20:
                 result += self.text
             else:
-                result += self.text[:17] + "..."
-        result += "]"
+                result += self.text[:17] + b"..."
+        result += b"]"
         return result
 
 
@@ -1303,15 +1389,16 @@
 
     Supports registration, unregistration, queries and browsing.
     """
+
     def __init__(self, bindaddress=None):
         """Creates an instance of the Zeroconf class, establishing
         multicast communications, listening and reaping threads."""
-        globals()['_GLOBAL_DONE'] = 0
+        globals()[b'_GLOBAL_DONE'] = 0
         if bindaddress is None:
             self.intf = socket.gethostbyname(socket.gethostname())
         else:
             self.intf = bindaddress
-        self.group = ('', _MDNS_PORT)
+        self.group = (b'', _MDNS_PORT)
         self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
         try:
             self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
@@ -1327,16 +1414,19 @@
             # work as expected.
             #
             pass
-        self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, "\xff")
-        self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, "\x01")
+        self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, b"\xff")
+        self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, b"\x01")
         try:
             self.socket.bind(self.group)
         except Exception:
             # Some versions of linux raise an exception even though
             # SO_REUSEADDR and SO_REUSEPORT have been set, so ignore it
             pass
-        self.socket.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP,
-            socket.inet_aton(_MDNS_ADDR) + socket.inet_aton(r'0.0.0.0'))
+        self.socket.setsockopt(
+            socket.SOL_IP,
+            socket.IP_ADD_MEMBERSHIP,
+            socket.inet_aton(_MDNS_ADDR) + socket.inet_aton(r'0.0.0.0'),
+        )
 
         self.listeners = []
         self.browsers = []
@@ -1352,10 +1442,10 @@
         self.reaper = Reaper(self)
 
     def isLoopback(self):
-        return self.intf.startswith("127.0.0.1")
+        return self.intf.startswith(b"127.0.0.1")
 
     def isLinklocal(self):
-        return self.intf.startswith("169.254.")
+        return self.intf.startswith(b"169.254.")
 
     def wait(self, timeout):
         """Calling thread waits for a given number of milliseconds or
@@ -1413,20 +1503,32 @@
                 now = currentTimeMillis()
                 continue
             out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
-            out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR,
-                _CLASS_IN, ttl, info.name), 0)
+            out.addAnswerAtTime(
+                DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, ttl, info.name), 0
+            )
             out.addAnswerAtTime(
                 DNSService(
-                    info.name, _TYPE_SRV,
-                    _CLASS_IN, ttl, info.priority, info.weight, info.port,
-                    info.server),
-                0)
+                    info.name,
+                    _TYPE_SRV,
+                    _CLASS_IN,
+                    ttl,
+                    info.priority,
+                    info.weight,
+                    info.port,
+                    info.server,
+                ),
+                0,
+            )
             out.addAnswerAtTime(
-                DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text),
-                0)
+                DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text), 0
+            )
             if info.address:
-                out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A,
-                    _CLASS_IN, ttl, info.address), 0)
+                out.addAnswerAtTime(
+                    DNSAddress(
+                        info.server, _TYPE_A, _CLASS_IN, ttl, info.address
+                    ),
+                    0,
+                )
             self.send(out)
             i += 1
             nexttime += _REGISTER_TIME
@@ -1451,17 +1553,31 @@
                 continue
             out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
             out.addAnswerAtTime(
-                DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
+                DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0
+            )
             out.addAnswerAtTime(
-                DNSService(info.name, _TYPE_SRV,
-                           _CLASS_IN, 0, info.priority, info.weight, info.port,
-                           info.name),
-                0)
-            out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT,
-                _CLASS_IN, 0, info.text), 0)
+                DNSService(
+                    info.name,
+                    _TYPE_SRV,
+                    _CLASS_IN,
+                    0,
+                    info.priority,
+                    info.weight,
+                    info.port,
+                    info.name,
+                ),
+                0,
+            )
+            out.addAnswerAtTime(
+                DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0
+            )
             if info.address:
-                out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A,
-                    _CLASS_IN, 0, info.address), 0)
+                out.addAnswerAtTime(
+                    DNSAddress(
+                        info.server, _TYPE_A, _CLASS_IN, 0, info.address
+                    ),
+                    0,
+                )
             self.send(out)
             i += 1
             nexttime += _UNREGISTER_TIME
@@ -1479,18 +1595,36 @@
                     continue
                 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
                 for info in self.services.values():
-                    out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR,
-                        _CLASS_IN, 0, info.name), 0)
+                    out.addAnswerAtTime(
+                        DNSPointer(
+                            info.type, _TYPE_PTR, _CLASS_IN, 0, info.name
+                        ),
+                        0,
+                    )
                     out.addAnswerAtTime(
-                        DNSService(info.name, _TYPE_SRV,
-                                   _CLASS_IN, 0, info.priority, info.weight,
-                                   info.port, info.server),
-                        0)
-                    out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT,
-                        _CLASS_IN, 0, info.text), 0)
+                        DNSService(
+                            info.name,
+                            _TYPE_SRV,
+                            _CLASS_IN,
+                            0,
+                            info.priority,
+                            info.weight,
+                            info.port,
+                            info.server,
+                        ),
+                        0,
+                    )
+                    out.addAnswerAtTime(
+                        DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text),
+                        0,
+                    )
                     if info.address:
-                        out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A,
-                           _CLASS_IN, 0, info.address), 0)
+                        out.addAnswerAtTime(
+                            DNSAddress(
+                                info.server, _TYPE_A, _CLASS_IN, 0, info.address
+                            ),
+                            0,
+                        )
                 self.send(out)
                 i += 1
                 nexttime += _UNREGISTER_TIME
@@ -1503,11 +1637,18 @@
         i = 0
         while i < 3:
             for record in self.cache.entriesWithName(info.type):
-                if (record.type == _TYPE_PTR and not record.isExpired(now) and
-                    record.alias == info.name):
-                    if (info.name.find('.') < 0):
-                        info.name = ("%w.[%s:%d].%s" %
-                            (info.name, info.address, info.port, info.type))
+                if (
+                    record.type == _TYPE_PTR
+                    and not record.isExpired(now)
+                    and record.alias == info.name
+                ):
+                    if info.name.find(b'.') < 0:
+                        info.name = b"%w.[%s:%d].%s" % (
+                            info.name,
+                            info.address,
+                            info.port,
+                            info.type,
+                        )
                         self.checkService(info)
                         return
                     raise NonUniqueNameException
@@ -1518,8 +1659,9 @@
             out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
             self.debug = out
             out.addQuestion(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN))
-            out.addAuthoritativeAnswer(DNSPointer(info.type, _TYPE_PTR,
-                _CLASS_IN, _DNS_TTL, info.name))
+            out.addAuthoritativeAnswer(
+                DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, info.name)
+            )
             self.send(out)
             i += 1
             nexttime += _CHECK_TIME
@@ -1584,21 +1726,34 @@
 
         for question in msg.questions:
             if question.type == _TYPE_PTR:
-                if question.name == "_services._dns-sd._udp.local.":
+                if question.name == b"_services._dns-sd._udp.local.":
                     for stype in self.servicetypes.keys():
                         if out is None:
                             out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
-                        out.addAnswer(msg,
-                                      DNSPointer(
-                                          "_services._dns-sd._udp.local.",
-                                           _TYPE_PTR, _CLASS_IN,
-                                           _DNS_TTL, stype))
+                        out.addAnswer(
+                            msg,
+                            DNSPointer(
+                                b"_services._dns-sd._udp.local.",
+                                _TYPE_PTR,
+                                _CLASS_IN,
+                                _DNS_TTL,
+                                stype,
+                            ),
+                        )
                 for service in self.services.values():
                     if question.name == service.type:
                         if out is None:
                             out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
-                        out.addAnswer(msg, DNSPointer(service.type, _TYPE_PTR,
-                            _CLASS_IN, _DNS_TTL, service.name))
+                        out.addAnswer(
+                            msg,
+                            DNSPointer(
+                                service.type,
+                                _TYPE_PTR,
+                                _CLASS_IN,
+                                _DNS_TTL,
+                                service.name,
+                            ),
+                        )
             else:
                 try:
                     if out is None:
@@ -1608,32 +1763,56 @@
                     if question.type == _TYPE_A or question.type == _TYPE_ANY:
                         for service in self.services.values():
                             if service.server == question.name.lower():
-                                out.addAnswer(msg,
-                                    DNSAddress(question.name, _TYPE_A,
-                                               _CLASS_IN | _CLASS_UNIQUE,
-                                               _DNS_TTL, service.address))
+                                out.addAnswer(
+                                    msg,
+                                    DNSAddress(
+                                        question.name,
+                                        _TYPE_A,
+                                        _CLASS_IN | _CLASS_UNIQUE,
+                                        _DNS_TTL,
+                                        service.address,
+                                    ),
+                                )
 
                     service = self.services.get(question.name.lower(), None)
                     if not service:
                         continue
 
-                    if (question.type == _TYPE_SRV or
-                        question.type == _TYPE_ANY):
-                        out.addAnswer(msg,
-                            DNSService(question.name, _TYPE_SRV,
-                                       _CLASS_IN | _CLASS_UNIQUE,
-                                       _DNS_TTL, service.priority,
-                                       service.weight, service.port,
-                                       service.server))
-                    if (question.type == _TYPE_TXT or
-                        question.type == _TYPE_ANY):
-                        out.addAnswer(msg, DNSText(question.name, _TYPE_TXT,
-                            _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.text))
+                    if question.type == _TYPE_SRV or question.type == _TYPE_ANY:
+                        out.addAnswer(
+                            msg,
+                            DNSService(
+                                question.name,
+                                _TYPE_SRV,
+                                _CLASS_IN | _CLASS_UNIQUE,
+                                _DNS_TTL,
+                                service.priority,
+                                service.weight,
+                                service.port,
+                                service.server,
+                            ),
+                        )
+                    if question.type == _TYPE_TXT or question.type == _TYPE_ANY:
+                        out.addAnswer(
+                            msg,
+                            DNSText(
+                                question.name,
+                                _TYPE_TXT,
+                                _CLASS_IN | _CLASS_UNIQUE,
+                                _DNS_TTL,
+                                service.text,
+                            ),
+                        )
                     if question.type == _TYPE_SRV:
                         out.addAdditionalAnswer(
-                            DNSAddress(service.server, _TYPE_A,
-                                       _CLASS_IN | _CLASS_UNIQUE,
-                                       _DNS_TTL, service.address))
+                            DNSAddress(
+                                service.server,
+                                _TYPE_A,
+                                _CLASS_IN | _CLASS_UNIQUE,
+                                _DNS_TTL,
+                                service.address,
+                            )
+                        )
                 except Exception:
                     traceback.print_exc()
 
@@ -1644,7 +1823,7 @@
     def send(self, out, addr=_MDNS_ADDR, port=_MDNS_PORT):
         """Sends an outgoing packet."""
         # This is a quick test to see if we can parse the packets we generate
-        #temp = DNSIncoming(out.packet())
+        # temp = DNSIncoming(out.packet())
         try:
             self.socket.sendto(out.packet(), 0, (addr, port))
         except Exception:
@@ -1654,39 +1833,56 @@
     def close(self):
         """Ends the background threads, and prevent this instance from
         servicing further queries."""
-        if globals()['_GLOBAL_DONE'] == 0:
-            globals()['_GLOBAL_DONE'] = 1
+        if globals()[b'_GLOBAL_DONE'] == 0:
+            globals()[b'_GLOBAL_DONE'] = 1
             self.notifyAll()
             self.engine.notify()
             self.unregisterAllServices()
-            self.socket.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP,
-                socket.inet_aton(_MDNS_ADDR) + socket.inet_aton(r'0.0.0.0'))
+            self.socket.setsockopt(
+                socket.SOL_IP,
+                socket.IP_DROP_MEMBERSHIP,
+                socket.inet_aton(_MDNS_ADDR) + socket.inet_aton(r'0.0.0.0'),
+            )
             self.socket.close()
 
+
 # Test a few module features, including service registration, service
 # query (for Zoe), and service unregistration.
 
 if __name__ == '__main__':
-    print("Multicast DNS Service Discovery for Python, version", __version__)
+    print(b"Multicast DNS Service Discovery for Python, version", __version__)
     r = Zeroconf()
-    print("1. Testing registration of a service...")
-    desc = {'version':'0.10','a':'test value', 'b':'another value'}
-    info = ServiceInfo("_http._tcp.local.",
-                       "My Service Name._http._tcp.local.",
-        socket.inet_aton("127.0.0.1"), 1234, 0, 0, desc)
-    print("   Registering service...")
+    print(b"1. Testing registration of a service...")
+    desc = {b'version': b'0.10', b'a': b'test value', b'b': b'another value'}
+    info = ServiceInfo(
+        b"_http._tcp.local.",
+        b"My Service Name._http._tcp.local.",
+        socket.inet_aton(b"127.0.0.1"),
+        1234,
+        0,
+        0,
+        desc,
+    )
+    print(b"   Registering service...")
     r.registerService(info)
-    print("   Registration done.")
-    print("2. Testing query of service information...")
-    print("   Getting ZOE service:",
-        str(r.getServiceInfo("_http._tcp.local.", "ZOE._http._tcp.local.")))
-    print("   Query done.")
-    print("3. Testing query of own service...")
-    print("   Getting self:",
-        str(r.getServiceInfo("_http._tcp.local.",
-                             "My Service Name._http._tcp.local.")))
-    print("   Query done.")
-    print("4. Testing unregister of service information...")
+    print(b"   Registration done.")
+    print(b"2. Testing query of service information...")
+    print(
+        b"   Getting ZOE service:",
+        str(r.getServiceInfo(b"_http._tcp.local.", b"ZOE._http._tcp.local.")),
+    )
+    print(b"   Query done.")
+    print(b"3. Testing query of own service...")
+    print(
+        b"   Getting self:",
+        str(
+            r.getServiceInfo(
+                b"_http._tcp.local.", b"My Service Name._http._tcp.local."
+            )
+        ),
+    )
+    print(b"   Query done.")
+    print(b"4. Testing unregister of service information...")
     r.unregisterService(info)
-    print("   Unregister done.")
+    print(b"   Unregister done.")
     r.close()
--- a/hgext/zeroconf/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext/zeroconf/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -37,21 +37,20 @@
     pycompat,
     ui as uimod,
 )
-from mercurial.hgweb import (
-    server as servermod
-)
+from mercurial.hgweb import server as servermod
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
 # publish
 
 server = None
 localip = None
 
+
 def getip():
     # finds external-facing interface without sending any packets (Linux)
     try:
@@ -83,6 +82,7 @@
 
     return dumbip
 
+
 def publish(name, desc, path, port):
     global server, localip
     if not server:
@@ -98,25 +98,32 @@
     name = r"%s-%s" % (hostname, name)
 
     # advertise to browsers
-    svc = Zeroconf.ServiceInfo('_http._tcp.local.',
-                               pycompat.bytestr(name + r'._http._tcp.local.'),
-                               server = host,
-                               port = port,
-                               properties = {'description': desc,
-                                             'path': "/" + path},
-                               address = localip, weight = 0, priority = 0)
+    svc = Zeroconf.ServiceInfo(
+        b'_http._tcp.local.',
+        pycompat.bytestr(name + r'._http._tcp.local.'),
+        server=host,
+        port=port,
+        properties={b'description': desc, b'path': b"/" + path},
+        address=localip,
+        weight=0,
+        priority=0,
+    )
     server.registerService(svc)
 
     # advertise to Mercurial clients
-    svc = Zeroconf.ServiceInfo('_hg._tcp.local.',
-                               pycompat.bytestr(name + r'._hg._tcp.local.'),
-                               server = host,
-                               port = port,
-                               properties = {'description': desc,
-                                             'path': "/" + path},
-                               address = localip, weight = 0, priority = 0)
+    svc = Zeroconf.ServiceInfo(
+        b'_hg._tcp.local.',
+        pycompat.bytestr(name + r'._hg._tcp.local.'),
+        server=host,
+        port=port,
+        properties={b'description': desc, b'path': b"/" + path},
+        address=localip,
+        weight=0,
+        priority=0,
+    )
     server.registerService(svc)
 
+
 def zc_create_server(create_server, ui, app):
     httpd = create_server(ui, app)
     port = httpd.port
@@ -127,67 +134,78 @@
         # single repo
         with app._obtainrepo() as repo:
             name = app.reponame or os.path.basename(repo.root)
-            path = repo.ui.config("web", "prefix", "").strip('/')
-            desc = repo.ui.config("web", "description")
+            path = repo.ui.config(b"web", b"prefix", b"").strip(b'/')
+            desc = repo.ui.config(b"web", b"description")
             if not desc:
                 desc = name
         publish(name, desc, path, port)
     else:
         # webdir
-        prefix = app.ui.config("web", "prefix", "").strip('/') + '/'
+        prefix = app.ui.config(b"web", b"prefix", b"").strip(b'/') + b'/'
         for repo, path in repos:
             u = app.ui.copy()
-            u.readconfig(os.path.join(path, '.hg', 'hgrc'))
+            u.readconfig(os.path.join(path, b'.hg', b'hgrc'))
             name = os.path.basename(repo)
-            path = (prefix + repo).strip('/')
-            desc = u.config('web', 'description')
+            path = (prefix + repo).strip(b'/')
+            desc = u.config(b'web', b'description')
             if not desc:
                 desc = name
             publish(name, desc, path, port)
     return httpd
 
+
 # listen
 
+
 class listener(object):
     def __init__(self):
         self.found = {}
+
     def removeService(self, server, type, name):
         if repr(name) in self.found:
             del self.found[repr(name)]
+
     def addService(self, server, type, name):
         self.found[repr(name)] = server.getServiceInfo(type, name)
 
+
 def getzcpaths():
     ip = getip()
     if ip.startswith(r'127.'):
         return
     server = Zeroconf.Zeroconf(ip)
     l = listener()
-    Zeroconf.ServiceBrowser(server, "_hg._tcp.local.", l)
+    Zeroconf.ServiceBrowser(server, b"_hg._tcp.local.", l)
     time.sleep(1)
     server.close()
     for value in l.found.values():
-        name = value.name[:value.name.index(b'.')]
-        url = r"http://%s:%s%s" % (socket.inet_ntoa(value.address), value.port,
-                                   value.properties.get(r"path", r"/"))
+        name = value.name[: value.name.index(b'.')]
+        url = r"http://%s:%s%s" % (
+            socket.inet_ntoa(value.address),
+            value.port,
+            value.properties.get(r"path", r"/"),
+        )
         yield b"zc-" + name, pycompat.bytestr(url)
 
+
 def config(orig, self, section, key, *args, **kwargs):
-    if section == "paths" and key.startswith("zc-"):
+    if section == b"paths" and key.startswith(b"zc-"):
         for name, path in getzcpaths():
             if name == key:
                 return path
     return orig(self, section, key, *args, **kwargs)
 
+
 def configitems(orig, self, section, *args, **kwargs):
     repos = orig(self, section, *args, **kwargs)
-    if section == "paths":
+    if section == b"paths":
         repos += getzcpaths()
     return repos
 
+
 def configsuboptions(orig, self, section, name, *args, **kwargs):
     opt, sub = orig(self, section, name, *args, **kwargs)
-    if section == "paths" and name.startswith("zc-"):
+    if section == b"paths" and name.startswith(b"zc-"):
         # We have to find the URL in the zeroconf paths.  We can't cons up any
         # suboptions, so we use any that we found in the original config.
         for zcname, zcurl in getzcpaths():
@@ -195,12 +213,14 @@
                 return zcurl, sub
     return opt, sub
 
+
 def defaultdest(orig, source):
     for name, path in getzcpaths():
         if path == source:
             return name.encode(encoding.encoding)
     return orig(source)
 
+
 def cleanupafterdispatch(orig, ui, options, cmd, cmdfunc):
     try:
         return orig(ui, options, cmd, cmdfunc)
@@ -211,10 +231,11 @@
         if server:
             server.close()
 
-extensions.wrapfunction(dispatch, '_runcommand', cleanupafterdispatch)
+
+extensions.wrapfunction(dispatch, b'_runcommand', cleanupafterdispatch)
 
-extensions.wrapfunction(uimod.ui, 'config', config)
-extensions.wrapfunction(uimod.ui, 'configitems', configitems)
-extensions.wrapfunction(uimod.ui, 'configsuboptions', configsuboptions)
-extensions.wrapfunction(hg, 'defaultdest', defaultdest)
-extensions.wrapfunction(servermod, 'create_server', zc_create_server)
+extensions.wrapfunction(uimod.ui, b'config', config)
+extensions.wrapfunction(uimod.ui, b'configitems', configitems)
+extensions.wrapfunction(uimod.ui, b'configsuboptions', configsuboptions)
+extensions.wrapfunction(hg, b'defaultdest', defaultdest)
+extensions.wrapfunction(servermod, b'create_server', zc_create_server)
--- a/hgext3rd/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/hgext3rd/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -1,4 +1,5 @@
 # name space package to host third party extensions
 from __future__ import absolute_import
 import pkgutil
+
 __path__ = pkgutil.extend_path(__path__, __name__)
--- a/i18n/check-translation.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/i18n/check-translation.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,12 +10,15 @@
 scanners = []
 checkers = []
 
+
 def scanner():
     def decorator(func):
         scanners.append(func)
         return func
+
     return decorator
 
+
 def levelchecker(level, msgidpat):
     def decorator(func):
         if msgidpat:
@@ -25,8 +28,10 @@
         checkers.append((func, level))
         func.match = match
         return func
+
     return decorator
 
+
 def match(checker, pe):
     """Examine whether POEntry "pe" is target of specified checker or not
     """
@@ -39,11 +44,14 @@
             return
     return True
 
+
 ####################
 
+
 def fatalchecker(msgidpat=None):
     return levelchecker('fatal', msgidpat)
 
+
 @fatalchecker(r'\$\$')
 def promptchoice(pe):
     """Check translation of the string given to "ui.promptchoice()"
@@ -70,7 +78,10 @@
     if [c for c, i in indices if len(c) == i + 1]:
         yield "msgstr has invalid '&' followed by none"
 
+
 deprecatedpe = None
+
+
 @scanner()
 def deprecatedsetup(pofile):
     pes = [p for p in pofile if p.msgid == '(DEPRECATED)' and p.msgstr]
@@ -78,6 +89,7 @@
         global deprecatedpe
         deprecatedpe = pes[0]
 
+
 @fatalchecker(r'\(DEPRECATED\)')
 def deprecated(pe):
     """Check for DEPRECATED
@@ -109,16 +121,20 @@
     ...     msgstr= 'something (DETACERPED, foo bar)')
     >>> match(deprecated, pe)
     """
-    if not ('(DEPRECATED)' in pe.msgstr or
-            (deprecatedpe and
-             deprecatedpe.msgstr in pe.msgstr)):
+    if not (
+        '(DEPRECATED)' in pe.msgstr
+        or (deprecatedpe and deprecatedpe.msgstr in pe.msgstr)
+    ):
         yield "msgstr inconsistently translated (DEPRECATED)"
 
+
 ####################
 
+
 def warningchecker(msgidpat=None):
     return levelchecker('warning', msgidpat)
 
+
 @warningchecker()
 def taildoublecolons(pe):
     """Check equality of tail '::'-ness between msgid and msgstr
@@ -141,6 +157,7 @@
     if pe.msgid.endswith('::') != pe.msgstr.endswith('::'):
         yield "tail '::'-ness differs between msgid and msgstr"
 
+
 @warningchecker()
 def indentation(pe):
     """Check equality of initial indentation between msgid and msgstr
@@ -159,13 +176,15 @@
     if idindent != strindent:
         yield "initial indentation width differs betweeen msgid and msgstr"
 
+
 ####################
 
+
 def check(pofile, fatal=True, warning=False):
-    targetlevel = { 'fatal': fatal, 'warning': warning }
-    targetcheckers = [(checker, level)
-                      for checker, level in checkers
-                      if targetlevel[level]]
+    targetlevel = {'fatal': fatal, 'warning': warning}
+    targetcheckers = [
+        (checker, level) for checker, level in checkers if targetlevel[level]
+    ]
     if not targetcheckers:
         return []
 
@@ -176,19 +195,22 @@
         errors = []
         for checker, level in targetcheckers:
             if match(checker, pe):
-                errors.extend((level, checker.__name__, error)
-                              for error in checker(pe))
+                errors.extend(
+                    (level, checker.__name__, error) for error in checker(pe)
+                )
         if errors:
             detected.append((pe, errors))
     return detected
 
+
 ########################################
 
 if __name__ == "__main__":
     import sys
     import optparse
 
-    optparser = optparse.OptionParser("""%prog [options] pofile ...
+    optparser = optparse.OptionParser(
+        """%prog [options] pofile ...
 
 This checks Mercurial specific translation problems in specified
 '*.po' files.
@@ -207,32 +229,44 @@
     # no-foo-check
     msgid = "....."
     msgstr = "....."
-""")
-    optparser.add_option("", "--warning",
-                         help="show also warning level problems",
-                         action="store_true")
-    optparser.add_option("", "--doctest",
-                         help="run doctest of this tool, instead of check",
-                         action="store_true")
+"""
+    )
+    optparser.add_option(
+        "",
+        "--warning",
+        help="show also warning level problems",
+        action="store_true",
+    )
+    optparser.add_option(
+        "",
+        "--doctest",
+        help="run doctest of this tool, instead of check",
+        action="store_true",
+    )
     (options, args) = optparser.parse_args()
 
     if options.doctest:
         import os
+
         if 'TERM' in os.environ:
             del os.environ['TERM']
         import doctest
+
         failures, tests = doctest.testmod()
         sys.exit(failures and 1 or 0)
 
     detected = []
     warning = options.warning
     for f in args:
-        detected.extend((f, pe, errors)
-                        for pe, errors in check(polib.pofile(f),
-                                                warning=warning))
+        detected.extend(
+            (f, pe, errors)
+            for pe, errors in check(polib.pofile(f), warning=warning)
+        )
     if detected:
         for f, pe, errors in detected:
             for level, checker, error in errors:
-                sys.stderr.write('%s:%d:%s(%s): %s\n'
-                                 % (f, pe.linenum, level, checker, error))
+                sys.stderr.write(
+                    '%s:%d:%s(%s): %s\n'
+                    % (f, pe.linenum, level, checker, error)
+                )
         sys.exit(1)
--- a/i18n/polib.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/i18n/polib.py	Mon Oct 21 11:09:48 2019 -0400
@@ -17,8 +17,18 @@
 
 __author__ = 'David Jean Louis <izimobil@gmail.com>'
 __version__ = '1.0.7'
-__all__ = ['pofile', 'POFile', 'POEntry', 'mofile', 'MOFile', 'MOEntry',
-           'default_encoding', 'escape', 'unescape', 'detect_encoding', ]
+__all__ = [
+    'pofile',
+    'POFile',
+    'POEntry',
+    'mofile',
+    'MOFile',
+    'MOEntry',
+    'default_encoding',
+    'escape',
+    'unescape',
+    'detect_encoding',
+]
 
 import array
 import codecs
@@ -55,6 +65,7 @@
     def u(s):
         return unicode(s, "unicode_escape")
 
+
 else:
     PY3 = True
     text_type = str
@@ -64,6 +75,8 @@
 
     def u(s):
         return s
+
+
 # }}}
 # _pofile_or_mofile {{{
 
@@ -84,11 +97,13 @@
         f,
         encoding=enc,
         check_for_duplicates=kwargs.get('check_for_duplicates', False),
-        klass=kwargs.get('klass')
+        klass=kwargs.get('klass'),
     )
     instance = parser.parse()
     instance.wrapwidth = kwargs.get('wrapwidth', 78)
     return instance
+
+
 # }}}
 # _is_file {{{
 
@@ -107,6 +122,8 @@
         return os.path.exists(filename_or_contents)
     except (ValueError, UnicodeEncodeError):
         return False
+
+
 # }}}
 # function pofile() {{{
 
@@ -139,6 +156,8 @@
         instance).
     """
     return _pofile_or_mofile(pofile, 'pofile', **kwargs)
+
+
 # }}}
 # function mofile() {{{
 
@@ -172,6 +191,8 @@
         instance).
     """
     return _pofile_or_mofile(mofile, 'mofile', **kwargs)
+
+
 # }}}
 # function detect_encoding() {{{
 
@@ -229,6 +250,8 @@
                     return enc
         f.close()
     return default_encoding
+
+
 # }}}
 # function escape() {{{
 
@@ -238,11 +261,15 @@
     Escapes the characters ``\\\\``, ``\\t``, ``\\n``, ``\\r`` and ``"`` in
     the given string ``st`` and returns it.
     """
-    return st.replace('\\', r'\\')\
-             .replace('\t', r'\t')\
-             .replace('\r', r'\r')\
-             .replace('\n', r'\n')\
-             .replace('\"', r'\"')
+    return (
+        st.replace('\\', r'\\')
+        .replace('\t', r'\t')
+        .replace('\r', r'\r')
+        .replace('\n', r'\n')
+        .replace('\"', r'\"')
+    )
+
+
 # }}}
 # function unescape() {{{
 
@@ -252,6 +279,7 @@
     Unescapes the characters ``\\\\``, ``\\t``, ``\\n``, ``\\r`` and ``"`` in
     the given string ``st`` and returns it.
     """
+
     def unescape_repl(m):
         m = m.group(1)
         if m == 'n':
@@ -263,7 +291,10 @@
         if m == '\\':
             return '\\'
         return m  # handles escaped double quote
+
     return re.sub(r'\\(\\|n|t|r|")', unescape_repl, st)
+
+
 # }}}
 # class _BaseFile {{{
 
@@ -317,8 +348,9 @@
         Returns the unicode representation of the file.
         """
         ret = []
-        entries = [self.metadata_as_entry()] + \
-                  [e for e in self if not e.obsolete]
+        entries = [self.metadata_as_entry()] + [
+            e for e in self if not e.obsolete
+        ]
         for entry in entries:
             ret.append(entry.__unicode__(self.wrapwidth))
         for entry in self.obsolete_entries():
@@ -326,14 +358,17 @@
         ret = u('\n').join(ret)
 
         assert isinstance(ret, text_type)
-        #if type(ret) != text_type:
+        # if type(ret) != text_type:
         #    return unicode(ret, self.encoding)
         return ret
 
     if PY3:
+
         def __str__(self):
             return self.__unicode__()
+
     else:
+
         def __str__(self):
             """
             Returns the string representation of the file.
@@ -353,8 +388,10 @@
         ``entry``
             an instance of :class:`~polib._BaseEntry`.
         """
-        return self.find(entry.msgid, by='msgid', msgctxt=entry.msgctxt) \
+        return (
+            self.find(entry.msgid, by='msgid', msgctxt=entry.msgctxt)
             is not None
+        )
 
     def __eq__(self, other):
         return str(self) == str(other)
@@ -439,8 +476,9 @@
         if self.fpath is None and fpath:
             self.fpath = fpath
 
-    def find(self, st, by='msgid', include_obsolete_entries=False,
-             msgctxt=False):
+    def find(
+        self, st, by='msgid', include_obsolete_entries=False, msgctxt=False
+    ):
         """
         Find the entry which msgid (or property identified by the ``by``
         argument) matches the string ``st``.
@@ -490,7 +528,7 @@
             'Content-Type',
             'Content-Transfer-Encoding',
             'Language',
-            'Plural-Forms'
+            'Plural-Forms',
         ]
         ordered_data = []
         for data in data_order:
@@ -524,10 +562,11 @@
                 return -1
             else:
                 return 0
+
         # add metadata entry
         entries.sort(key=lambda o: o.msgctxt or o.msgid)
         mentry = self.metadata_as_entry()
-        #mentry.msgstr = mentry.msgstr.replace('\\n', '').lstrip()
+        # mentry.msgstr = mentry.msgstr.replace('\\n', '').lstrip()
         entries = [mentry] + entries
         entries_len = len(entries)
         ids, strs = b(''), b('')
@@ -578,8 +617,8 @@
             # start of value index
             7 * 4 + entries_len * 8,
             # size and offset of hash table, we don't use hash tables
-            0, keystart
-
+            0,
+            keystart,
         )
         if PY3 and sys.version_info.minor > 1:  # python 3.2 or superior
             output += array.array("i", offsets).tobytes()
@@ -597,6 +636,8 @@
         if isinstance(mixed, text_type):
             mixed = mixed.encode(self.encoding)
         return mixed
+
+
 # }}}
 # class POFile {{{
 
@@ -658,8 +699,11 @@
         """
         Convenience method that returns the list of untranslated entries.
         """
-        return [e for e in self if not e.translated() and not e.obsolete
-                and not 'fuzzy' in e.flags]
+        return [
+            e
+            for e in self
+            if not e.translated() and not e.obsolete and not 'fuzzy' in e.flags
+        ]
 
     def fuzzy_entries(self):
         """
@@ -703,6 +747,8 @@
         for entry in self:
             if entry.msgid not in refpot_msgids:
                 entry.obsolete = True
+
+
 # }}}
 # class MOFile {{{
 
@@ -713,8 +759,9 @@
     This class inherits the :class:`~polib._BaseFile` class and, by
     extension, the python ``list`` type.
     """
-    MAGIC = 0x950412de
-    MAGIC_SWAPPED = 0xde120495
+
+    MAGIC = 0x950412DE
+    MAGIC_SWAPPED = 0xDE120495
 
     def __init__(self, *args, **kwargs):
         """
@@ -776,6 +823,8 @@
         Convenience method to keep the same interface with POFile instances.
         """
         return []
+
+
 # }}}
 # class _BaseEntry {{{
 
@@ -831,14 +880,16 @@
         ret = []
         # write the msgctxt if any
         if self.msgctxt is not None:
-            ret += self._str_field("msgctxt", delflag, "", self.msgctxt,
-                                   wrapwidth)
+            ret += self._str_field(
+                "msgctxt", delflag, "", self.msgctxt, wrapwidth
+            )
         # write the msgid
         ret += self._str_field("msgid", delflag, "", self.msgid, wrapwidth)
         # write the msgid_plural if any
         if self.msgid_plural:
-            ret += self._str_field("msgid_plural", delflag, "",
-                                   self.msgid_plural, wrapwidth)
+            ret += self._str_field(
+                "msgid_plural", delflag, "", self.msgid_plural, wrapwidth
+            )
         if self.msgstr_plural:
             # write the msgstr_plural if any
             msgstrs = self.msgstr_plural
@@ -847,20 +898,25 @@
             for index in keys:
                 msgstr = msgstrs[index]
                 plural_index = '[%s]' % index
-                ret += self._str_field("msgstr", delflag, plural_index, msgstr,
-                                       wrapwidth)
+                ret += self._str_field(
+                    "msgstr", delflag, plural_index, msgstr, wrapwidth
+                )
         else:
             # otherwise write the msgstr
-            ret += self._str_field("msgstr", delflag, "", self.msgstr,
-                                   wrapwidth)
+            ret += self._str_field(
+                "msgstr", delflag, "", self.msgstr, wrapwidth
+            )
         ret.append('')
         ret = u('\n').join(ret)
         return ret
 
     if PY3:
+
         def __str__(self):
             return self.__unicode__()
+
     else:
+
         def __str__(self):
             """
             Returns the string representation of the entry.
@@ -870,8 +926,7 @@
     def __eq__(self, other):
         return str(self) == str(other)
 
-    def _str_field(self, fieldname, delflag, plural_index, field,
-                   wrapwidth=78):
+    def _str_field(self, fieldname, delflag, plural_index, field, wrapwidth=78):
         lines = field.splitlines(True)
         if len(lines) > 1:
             lines = [''] + lines  # start with initial empty line
@@ -888,23 +943,30 @@
             real_wrapwidth = wrapwidth - flength + specialchars_count
             if wrapwidth > 0 and len(field) > real_wrapwidth:
                 # Wrap the line but take field name into account
-                lines = [''] + [unescape(item) for item in wrap(
-                    escaped_field,
-                    wrapwidth - 2,  # 2 for quotes ""
-                    drop_whitespace=False,
-                    break_long_words=False
-                )]
+                lines = [''] + [
+                    unescape(item)
+                    for item in wrap(
+                        escaped_field,
+                        wrapwidth - 2,  # 2 for quotes ""
+                        drop_whitespace=False,
+                        break_long_words=False,
+                    )
+                ]
             else:
                 lines = [field]
         if fieldname.startswith('previous_'):
             # quick and dirty trick to get the real field name
             fieldname = fieldname[9:]
 
-        ret = ['%s%s%s "%s"' % (delflag, fieldname, plural_index,
-                                escape(lines.pop(0)))]
+        ret = [
+            '%s%s%s "%s"'
+            % (delflag, fieldname, plural_index, escape(lines.pop(0)))
+        ]
         for line in lines:
             ret.append('%s"%s"' % (delflag, escape(line)))
         return ret
+
+
 # }}}
 # class POEntry {{{
 
@@ -972,7 +1034,7 @@
                             wrapwidth,
                             initial_indent=c[1],
                             subsequent_indent=c[1],
-                            break_long_words=False
+                            break_long_words=False,
                         )
                     else:
                         ret.append('%s%s' % (c[1], comment))
@@ -991,13 +1053,16 @@
                 # what we want for filenames, so the dirty hack is to
                 # temporally replace hyphens with a char that a file cannot
                 # contain, like "*"
-                ret += [l.replace('*', '-') for l in wrap(
-                    filestr.replace('-', '*'),
-                    wrapwidth,
-                    initial_indent='#: ',
-                    subsequent_indent='#: ',
-                    break_long_words=False
-                )]
+                ret += [
+                    l.replace('*', '-')
+                    for l in wrap(
+                        filestr.replace('-', '*'),
+                        wrapwidth,
+                        initial_indent='#: ',
+                        subsequent_indent='#: ',
+                        break_long_words=False,
+                    )
+                ]
             else:
                 ret.append('#: ' + filestr)
 
@@ -1006,8 +1071,7 @@
             ret.append('#, %s' % ', '.join(self.flags))
 
         # previous context and previous msgid/msgid_plural
-        fields = ['previous_msgctxt', 'previous_msgid',
-                  'previous_msgid_plural']
+        fields = ['previous_msgctxt', 'previous_msgid', 'previous_msgid_plural']
         for f in fields:
             val = getattr(self, f)
             if val:
@@ -1017,7 +1081,7 @@
         ret = u('\n').join(ret)
 
         assert isinstance(ret, text_type)
-        #if type(ret) != types.UnicodeType:
+        # if type(ret) != types.UnicodeType:
         #    return unicode(ret, self.encoding)
         return ret
 
@@ -1131,6 +1195,8 @@
 
     def __hash__(self):
         return hash((self.msgid, self.msgstr))
+
+
 # }}}
 # class MOEntry {{{
 
@@ -1139,6 +1205,7 @@
     """
     Represents a mo file entry.
     """
+
     def __init__(self, *args, **kwargs):
         """
         Constructor, accepts the following keyword arguments,
@@ -1168,6 +1235,7 @@
     def __hash__(self):
         return hash((self.msgid, self.msgstr))
 
+
 # }}}
 # class _POFileParser {{{
 
@@ -1211,7 +1279,7 @@
         self.instance = klass(
             pofile=pofile,
             encoding=enc,
-            check_for_duplicates=kwargs.get('check_for_duplicates', False)
+            check_for_duplicates=kwargs.get('check_for_duplicates', False),
         )
         self.transitions = {}
         self.current_line = 0
@@ -1238,25 +1306,61 @@
         #     * MS: a msgstr
         #     * MX: a msgstr plural
         #     * MC: a msgid or msgstr continuation line
-        all = ['st', 'he', 'gc', 'oc', 'fl', 'ct', 'pc', 'pm', 'pp', 'tc',
-               'ms', 'mp', 'mx', 'mi']
+        all = [
+            'st',
+            'he',
+            'gc',
+            'oc',
+            'fl',
+            'ct',
+            'pc',
+            'pm',
+            'pp',
+            'tc',
+            'ms',
+            'mp',
+            'mx',
+            'mi',
+        ]
 
-        self.add('tc', ['st', 'he'],                                     'he')
-        self.add('tc', ['gc', 'oc', 'fl', 'tc', 'pc', 'pm', 'pp', 'ms',
-                        'mp', 'mx', 'mi'],                               'tc')
-        self.add('gc', all,                                              'gc')
-        self.add('oc', all,                                              'oc')
-        self.add('fl', all,                                              'fl')
-        self.add('pc', all,                                              'pc')
-        self.add('pm', all,                                              'pm')
-        self.add('pp', all,                                              'pp')
-        self.add('ct', ['st', 'he', 'gc', 'oc', 'fl', 'tc', 'pc', 'pm',
-                        'pp', 'ms', 'mx'],                               'ct')
-        self.add('mi', ['st', 'he', 'gc', 'oc', 'fl', 'ct', 'tc', 'pc',
-                 'pm', 'pp', 'ms', 'mx'],                                'mi')
-        self.add('mp', ['tc', 'gc', 'pc', 'pm', 'pp', 'mi'],             'mp')
-        self.add('ms', ['mi', 'mp', 'tc'],                               'ms')
-        self.add('mx', ['mi', 'mx', 'mp', 'tc'],                         'mx')
+        self.add('tc', ['st', 'he'], 'he')
+        self.add(
+            'tc',
+            ['gc', 'oc', 'fl', 'tc', 'pc', 'pm', 'pp', 'ms', 'mp', 'mx', 'mi'],
+            'tc',
+        )
+        self.add('gc', all, 'gc')
+        self.add('oc', all, 'oc')
+        self.add('fl', all, 'fl')
+        self.add('pc', all, 'pc')
+        self.add('pm', all, 'pm')
+        self.add('pp', all, 'pp')
+        self.add(
+            'ct',
+            ['st', 'he', 'gc', 'oc', 'fl', 'tc', 'pc', 'pm', 'pp', 'ms', 'mx'],
+            'ct',
+        )
+        self.add(
+            'mi',
+            [
+                'st',
+                'he',
+                'gc',
+                'oc',
+                'fl',
+                'ct',
+                'tc',
+                'pc',
+                'pm',
+                'pp',
+                'ms',
+                'mx',
+            ],
+            'mi',
+        )
+        self.add('mp', ['tc', 'gc', 'pc', 'pm', 'pp', 'mi'], 'mp')
+        self.add('ms', ['mi', 'mp', 'tc'], 'ms')
+        self.add('mx', ['mi', 'mx', 'mp', 'tc'], 'mx')
         self.add('mc', ['ct', 'mi', 'mp', 'ms', 'mx', 'pm', 'pp', 'pc'], 'mc')
 
     def parse(self):
@@ -1300,11 +1404,13 @@
             # Take care of keywords like
             # msgid, msgid_plural, msgctxt & msgstr.
             if tokens[0] in keywords and nb_tokens > 1:
-                line = line[len(tokens[0]):].lstrip()
+                line = line[len(tokens[0]) :].lstrip()
                 if re.search(r'([^\\]|^)"', line[1:-1]):
-                    raise IOError('Syntax error in po file %s (line %s): '
-                                  'unescaped double quote found' %
-                                  (self.instance.fpath, self.current_line))
+                    raise IOError(
+                        'Syntax error in po file %s (line %s): '
+                        'unescaped double quote found'
+                        % (self.instance.fpath, self.current_line)
+                    )
                 self.current_token = line
                 self.process(keywords[tokens[0]])
                 continue
@@ -1320,9 +1426,11 @@
             elif line[:1] == '"':
                 # we are on a continuation line
                 if re.search(r'([^\\]|^)"', line[1:-1]):
-                    raise IOError('Syntax error in po file %s (line %s): '
-                                  'unescaped double quote found' %
-                                  (self.instance.fpath, self.current_line))
+                    raise IOError(
+                        'Syntax error in po file %s (line %s): '
+                        'unescaped double quote found'
+                        % (self.instance.fpath, self.current_line)
+                    )
                 self.process('mc')
 
             elif line[:7] == 'msgstr[':
@@ -1349,8 +1457,10 @@
 
             elif tokens[0] == '#|':
                 if nb_tokens <= 1:
-                    raise IOError('Syntax error in po file %s (line %s)' %
-                                  (self.instance.fpath, self.current_line))
+                    raise IOError(
+                        'Syntax error in po file %s (line %s)'
+                        % (self.instance.fpath, self.current_line)
+                    )
 
                 # Remove the marker and any whitespace right after that.
                 line = line[2:].lstrip()
@@ -1363,30 +1473,38 @@
 
                 if nb_tokens == 2:
                     # Invalid continuation line.
-                    raise IOError('Syntax error in po file %s (line %s): '
-                                  'invalid continuation line' %
-                                  (self.instance.fpath, self.current_line))
+                    raise IOError(
+                        'Syntax error in po file %s (line %s): '
+                        'invalid continuation line'
+                        % (self.instance.fpath, self.current_line)
+                    )
 
                 # we are on a "previous translation" comment line,
                 if tokens[1] not in prev_keywords:
                     # Unknown keyword in previous translation comment.
-                    raise IOError('Syntax error in po file %s (line %s): '
-                                  'unknown keyword %s' %
-                                  (self.instance.fpath, self.current_line,
-                                   tokens[1]))
+                    raise IOError(
+                        'Syntax error in po file %s (line %s): '
+                        'unknown keyword %s'
+                        % (self.instance.fpath, self.current_line, tokens[1])
+                    )
 
                 # Remove the keyword and any whitespace
                 # between it and the starting quote.
-                line = line[len(tokens[1]):].lstrip()
+                line = line[len(tokens[1]) :].lstrip()
                 self.current_token = line
                 self.process(prev_keywords[tokens[1]])
 
             else:
-                raise IOError('Syntax error in po file %s (line %s)' %
-                              (self.instance.fpath, self.current_line))
+                raise IOError(
+                    'Syntax error in po file %s (line %s)'
+                    % (self.instance.fpath, self.current_line)
+                )
 
-        if self.current_entry and len(tokens) > 0 and \
-           not tokens[0].startswith('#'):
+        if (
+            self.current_entry
+            and len(tokens) > 0
+            and not tokens[0].startswith('#')
+        ):
             # since entries are added when another entry is found, we must add
             # the last entry here (only if there are lines). Trailing comments
             # are ignored
@@ -1449,8 +1567,9 @@
             if action():
                 self.current_state = state
         except Exception:
-            raise IOError('Syntax error in po file (line %s)' %
-                          self.current_line)
+            raise IOError(
+                'Syntax error in po file (line %s)' % self.current_line
+            )
 
     # state handlers
 
@@ -1507,8 +1626,9 @@
         if self.current_state in ['mc', 'ms', 'mx']:
             self.instance.append(self.current_entry)
             self.current_entry = POEntry(linenum=self.current_line)
-        self.current_entry.flags += [c.strip() for c in
-                                     self.current_token[3:].split(',')]
+        self.current_entry.flags += [
+            c.strip() for c in self.current_token[3:].split(',')
+        ]
         return True
 
     def handle_pp(self):
@@ -1516,8 +1636,9 @@
         if self.current_state in ['mc', 'ms', 'mx']:
             self.instance.append(self.current_entry)
             self.current_entry = POEntry(linenum=self.current_line)
-        self.current_entry.previous_msgid_plural = \
-            unescape(self.current_token[1:-1])
+        self.current_entry.previous_msgid_plural = unescape(
+            self.current_token[1:-1]
+        )
         return True
 
     def handle_pm(self):
@@ -1525,8 +1646,7 @@
         if self.current_state in ['mc', 'ms', 'mx']:
             self.instance.append(self.current_entry)
             self.current_entry = POEntry(linenum=self.current_line)
-        self.current_entry.previous_msgid = \
-            unescape(self.current_token[1:-1])
+        self.current_entry.previous_msgid = unescape(self.current_token[1:-1])
         return True
 
     def handle_pc(self):
@@ -1534,8 +1654,7 @@
         if self.current_state in ['mc', 'ms', 'mx']:
             self.instance.append(self.current_entry)
             self.current_entry = POEntry(linenum=self.current_line)
-        self.current_entry.previous_msgctxt = \
-            unescape(self.current_token[1:-1])
+        self.current_entry.previous_msgctxt = unescape(self.current_token[1:-1])
         return True
 
     def handle_ct(self):
@@ -1568,7 +1687,7 @@
     def handle_mx(self):
         """Handle a msgstr plural."""
         index = self.current_token[7]
-        value = self.current_token[self.current_token.find('"') + 1:-1]
+        value = self.current_token[self.current_token.find('"') + 1 : -1]
         self.current_entry.msgstr_plural[int(index)] = unescape(value)
         self.msgstr_index = int(index)
         return True
@@ -1594,6 +1713,8 @@
             self.current_entry.previous_msgctxt += token
         # don't change the current state
         return False
+
+
 # }}}
 # class _MOFileParser {{{
 
@@ -1628,7 +1749,7 @@
         self.instance = klass(
             fpath=mofile,
             encoding=kwargs.get('encoding', default_encoding),
-            check_for_duplicates=kwargs.get('check_for_duplicates', False)
+            check_for_duplicates=kwargs.get('check_for_duplicates', False),
         )
 
     def __del__(self):
@@ -1699,8 +1820,9 @@
                 entry = self._build_entry(
                     msgid=msgid_tokens[0],
                     msgid_plural=msgid_tokens[1],
-                    msgstr_plural=dict((k, v) for k, v in
-                                       enumerate(msgstr.split(b('\0'))))
+                    msgstr_plural=dict(
+                        (k, v) for k, v in enumerate(msgstr.split(b('\0')))
+                    ),
                 )
             else:
                 entry = self._build_entry(msgid=msgid, msgstr=msgstr)
@@ -1709,8 +1831,9 @@
         self.fhandle.close()
         return self.instance
 
-    def _build_entry(self, msgid, msgstr=None, msgid_plural=None,
-                     msgstr_plural=None):
+    def _build_entry(
+        self, msgid, msgstr=None, msgid_plural=None, msgstr_plural=None
+    ):
         msgctxt_msgid = msgid.split(b('\x04'))
         encoding = self.instance.encoding
         if len(msgctxt_msgid) > 1:
@@ -1740,6 +1863,8 @@
         if len(tup) == 1:
             return tup[0]
         return tup
+
+
 # }}}
 # class TextWrapper {{{
 
@@ -1749,6 +1874,7 @@
     Subclass of textwrap.TextWrapper that backport the
     drop_whitespace option.
     """
+
     def __init__(self, *args, **kwargs):
         drop_whitespace = kwargs.pop('drop_whitespace', True)
         textwrap.TextWrapper.__init__(self, *args, **kwargs)
@@ -1823,6 +1949,8 @@
                 lines.append(indent + ''.join(cur_line))
 
         return lines
+
+
 # }}}
 # function wrap() {{{
 
@@ -1835,4 +1963,5 @@
         return TextWrapper(width=width, **kwargs).wrap(text)
     return textwrap.wrap(text, width=width, **kwargs)
 
+
 # }}}
--- a/mercurial/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -7,308 +7,7 @@
 
 from __future__ import absolute_import
 
-import sys
-
 # Allow 'from mercurial import demandimport' to keep working.
 import hgdemandimport
+
 demandimport = hgdemandimport
-
-__all__ = []
-
-# Python 3 uses a custom module loader that transforms source code between
-# source file reading and compilation. This is done by registering a custom
-# finder that changes the spec for Mercurial modules to use a custom loader.
-if sys.version_info[0] >= 3:
-    import importlib
-    import importlib.abc
-    import io
-    import token
-    import tokenize
-
-    class hgpathentryfinder(importlib.abc.MetaPathFinder):
-        """A sys.meta_path finder that uses a custom module loader."""
-        def find_spec(self, fullname, path, target=None):
-            # Only handle Mercurial-related modules.
-            if not fullname.startswith(('mercurial.', 'hgext.')):
-                return None
-            # don't try to parse binary
-            if fullname.startswith('mercurial.cext.'):
-                return None
-            # third-party packages are expected to be dual-version clean
-            if fullname.startswith('mercurial.thirdparty'):
-                return None
-            # zstd is already dual-version clean, don't try and mangle it
-            if fullname.startswith('mercurial.zstd'):
-                return None
-            # rustext is built for the right python version,
-            # don't try and mangle it
-            if fullname.startswith('mercurial.rustext'):
-                return None
-            # pywatchman is already dual-version clean, don't try and mangle it
-            if fullname.startswith('hgext.fsmonitor.pywatchman'):
-                return None
-
-            # Try to find the module using other registered finders.
-            spec = None
-            for finder in sys.meta_path:
-                if finder == self:
-                    continue
-
-                # Originally the API was a `find_module` method, but it was
-                # renamed to `find_spec` in python 3.4, with a new `target`
-                # argument.
-                find_spec_method = getattr(finder, 'find_spec', None)
-                if find_spec_method:
-                    spec = find_spec_method(fullname, path, target=target)
-                else:
-                    spec = finder.find_module(fullname)
-                    if spec is not None:
-                        spec = importlib.util.spec_from_loader(fullname, spec)
-                if spec:
-                    break
-
-            # This is a Mercurial-related module but we couldn't find it
-            # using the previously-registered finders. This likely means
-            # the module doesn't exist.
-            if not spec:
-                return None
-
-            # TODO need to support loaders from alternate specs, like zip
-            # loaders.
-            loader = hgloader(spec.name, spec.origin)
-            # Can't use util.safehasattr here because that would require
-            # importing util, and we're in import code.
-            if hasattr(spec.loader, 'loader'): # hasattr-py3-only
-                # This is a nested loader (maybe a lazy loader?)
-                spec.loader.loader = loader
-            else:
-                spec.loader = loader
-            return spec
-
-    def replacetokens(tokens, fullname):
-        """Transform a stream of tokens from raw to Python 3.
-
-        It is called by the custom module loading machinery to rewrite
-        source/tokens between source decoding and compilation.
-
-        Returns a generator of possibly rewritten tokens.
-
-        The input token list may be mutated as part of processing. However,
-        its changes do not necessarily match the output token stream.
-
-        REMEMBER TO CHANGE ``BYTECODEHEADER`` WHEN CHANGING THIS FUNCTION
-        OR CACHED FILES WON'T GET INVALIDATED PROPERLY.
-        """
-        futureimpline = False
-
-        # The following utility functions access the tokens list and i index of
-        # the for i, t enumerate(tokens) loop below
-        def _isop(j, *o):
-            """Assert that tokens[j] is an OP with one of the given values"""
-            try:
-                return tokens[j].type == token.OP and tokens[j].string in o
-            except IndexError:
-                return False
-
-        def _findargnofcall(n):
-            """Find arg n of a call expression (start at 0)
-
-            Returns index of the first token of that argument, or None if
-            there is not that many arguments.
-
-            Assumes that token[i + 1] is '('.
-
-            """
-            nested = 0
-            for j in range(i + 2, len(tokens)):
-                if _isop(j, ')', ']', '}'):
-                    # end of call, tuple, subscription or dict / set
-                    nested -= 1
-                    if nested < 0:
-                        return None
-                elif n == 0:
-                    # this is the starting position of arg
-                    return j
-                elif _isop(j, '(', '[', '{'):
-                    nested += 1
-                elif _isop(j, ',') and nested == 0:
-                    n -= 1
-
-            return None
-
-        def _ensureunicode(j):
-            """Make sure the token at j is a unicode string
-
-            This rewrites a string token to include the unicode literal prefix
-            so the string transformer won't add the byte prefix.
-
-            Ignores tokens that are not strings. Assumes bounds checking has
-            already been done.
-
-            """
-            st = tokens[j]
-            if st.type == token.STRING and st.string.startswith(("'", '"')):
-                tokens[j] = st._replace(string='u%s' % st.string)
-
-        for i, t in enumerate(tokens):
-            # Convert most string literals to byte literals. String literals
-            # in Python 2 are bytes. String literals in Python 3 are unicode.
-            # Most strings in Mercurial are bytes and unicode strings are rare.
-            # Rather than rewrite all string literals to use ``b''`` to indicate
-            # byte strings, we apply this token transformer to insert the ``b``
-            # prefix nearly everywhere.
-            if t.type == token.STRING:
-                s = t.string
-
-                # Preserve docstrings as string literals. This is inconsistent
-                # with regular unprefixed strings. However, the
-                # "from __future__" parsing (which allows a module docstring to
-                # exist before it) doesn't properly handle the docstring if it
-                # is b''' prefixed, leading to a SyntaxError. We leave all
-                # docstrings as unprefixed to avoid this. This means Mercurial
-                # components touching docstrings need to handle unicode,
-                # unfortunately.
-                if s[0:3] in ("'''", '"""'):
-                    yield t
-                    continue
-
-                # If the first character isn't a quote, it is likely a string
-                # prefixing character (such as 'b', 'u', or 'r'. Ignore.
-                if s[0] not in ("'", '"'):
-                    yield t
-                    continue
-
-                # String literal. Prefix to make a b'' string.
-                yield t._replace(string='b%s' % t.string)
-                continue
-
-            # Insert compatibility imports at "from __future__ import" line.
-            # No '\n' should be added to preserve line numbers.
-            if (t.type == token.NAME and t.string == 'import' and
-                all(u.type == token.NAME for u in tokens[i - 2:i]) and
-                [u.string for u in tokens[i - 2:i]] == ['from', '__future__']):
-                futureimpline = True
-            if t.type == token.NEWLINE and futureimpline:
-                futureimpline = False
-                if fullname == 'mercurial.pycompat':
-                    yield t
-                    continue
-                r, c = t.start
-                l = (b'; from mercurial.pycompat import '
-                     b'delattr, getattr, hasattr, setattr, '
-                     b'open, unicode\n')
-                for u in tokenize.tokenize(io.BytesIO(l).readline):
-                    if u.type in (tokenize.ENCODING, token.ENDMARKER):
-                        continue
-                    yield u._replace(
-                        start=(r, c + u.start[1]), end=(r, c + u.end[1]))
-                continue
-
-            # This looks like a function call.
-            if t.type == token.NAME and _isop(i + 1, '('):
-                fn = t.string
-
-                # *attr() builtins don't accept byte strings to 2nd argument.
-                if (fn in ('getattr', 'setattr', 'hasattr', 'safehasattr') and
-                        not _isop(i - 1, '.')):
-                    arg1idx = _findargnofcall(1)
-                    if arg1idx is not None:
-                        _ensureunicode(arg1idx)
-
-                # .encode() and .decode() on str/bytes/unicode don't accept
-                # byte strings on Python 3.
-                elif fn in ('encode', 'decode') and _isop(i - 1, '.'):
-                    for argn in range(2):
-                        argidx = _findargnofcall(argn)
-                        if argidx is not None:
-                            _ensureunicode(argidx)
-
-                # It changes iteritems/values to items/values as they are not
-                # present in Python 3 world.
-                elif (fn in ('iteritems', 'itervalues') and
-                      not (tokens[i - 1].type == token.NAME and
-                           tokens[i - 1].string == 'def')):
-                    yield t._replace(string=fn[4:])
-                    continue
-
-            # Emit unmodified token.
-            yield t
-
-    # Header to add to bytecode files. This MUST be changed when
-    # ``replacetoken`` or any mechanism that changes semantics of module
-    # loading is changed. Otherwise cached bytecode may get loaded without
-    # the new transformation mechanisms applied.
-    BYTECODEHEADER = b'HG\x00\x0c'
-
-    class hgloader(importlib.machinery.SourceFileLoader):
-        """Custom module loader that transforms source code.
-
-        When the source code is converted to a code object, we transform
-        certain patterns to be Python 3 compatible. This allows us to write code
-        that is natively Python 2 and compatible with Python 3 without
-        making the code excessively ugly.
-
-        We do this by transforming the token stream between parse and compile.
-
-        Implementing transformations invalidates caching assumptions made
-        by the built-in importer. The built-in importer stores a header on
-        saved bytecode files indicating the Python/bytecode version. If the
-        version changes, the cached bytecode is ignored. The Mercurial
-        transformations could change at any time. This means we need to check
-        that cached bytecode was generated with the current transformation
-        code or there could be a mismatch between cached bytecode and what
-        would be generated from this class.
-
-        We supplement the bytecode caching layer by wrapping ``get_data``
-        and ``set_data``. These functions are called when the
-        ``SourceFileLoader`` retrieves and saves bytecode cache files,
-        respectively. We simply add an additional header on the file. As
-        long as the version in this file is changed when semantics change,
-        cached bytecode should be invalidated when transformations change.
-
-        The added header has the form ``HG<VERSION>``. That is a literal
-        ``HG`` with 2 binary bytes indicating the transformation version.
-        """
-        def get_data(self, path):
-            data = super(hgloader, self).get_data(path)
-
-            if not path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
-                return data
-
-            # There should be a header indicating the Mercurial transformation
-            # version. If it doesn't exist or doesn't match the current version,
-            # we raise an OSError because that is what
-            # ``SourceFileLoader.get_code()`` expects when loading bytecode
-            # paths to indicate the cached file is "bad."
-            if data[0:2] != b'HG':
-                raise OSError('no hg header')
-            if data[0:4] != BYTECODEHEADER:
-                raise OSError('hg header version mismatch')
-
-            return data[4:]
-
-        def set_data(self, path, data, *args, **kwargs):
-            if path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
-                data = BYTECODEHEADER + data
-
-            return super(hgloader, self).set_data(path, data, *args, **kwargs)
-
-        def source_to_code(self, data, path):
-            """Perform token transformation before compilation."""
-            buf = io.BytesIO(data)
-            tokens = tokenize.tokenize(buf.readline)
-            data = tokenize.untokenize(replacetokens(list(tokens), self.name))
-            # Python's built-in importer strips frames from exceptions raised
-            # for this code. Unfortunately, that mechanism isn't extensible
-            # and our frame will be blamed for the import failure. There
-            # are extremely hacky ways to do frame stripping. We haven't
-            # implemented them because they are very ugly.
-            return super(hgloader, self).source_to_code(data, path)
-
-    # We automagically register our custom importer as a side-effect of
-    # loading. This is necessary to ensure that any entry points are able
-    # to import mercurial.* modules without having to perform this
-    # registration themselves.
-    if not any(isinstance(x, hgpathentryfinder) for x in sys.meta_path):
-        # meta_path is used before any implicit finders and before sys.path.
-        sys.meta_path.insert(0, hgpathentryfinder())
--- a/mercurial/ancestor.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/ancestor.py	Mon Oct 21 11:09:48 2019 -0400
@@ -18,6 +18,7 @@
 
 parsers = policy.importmod(r'parsers')
 
+
 def commonancestorsheads(pfunc, *nodes):
     """Returns a set with the heads of all common ancestors of all nodes,
     heads(::nodes[0] and ::nodes[1] and ...) .
@@ -74,6 +75,7 @@
                 seen[p] = sv
     return gca
 
+
 def ancestors(pfunc, *orignodes):
     """
     Returns the common ancestors of a and b that are furthest from a
@@ -81,6 +83,7 @@
 
     pfunc must return a list of parent vertices for a given vertex.
     """
+
     def deepest(nodes):
         interesting = {}
         count = max(nodes) + 1
@@ -143,12 +146,14 @@
         return gca
     return deepest(gca)
 
+
 class incrementalmissingancestors(object):
     '''persistent state used to calculate missing ancestors incrementally
 
     Although similar in spirit to lazyancestors below, this is a separate class
     because trying to support contains and missingancestors operations with the
     same internal data structures adds needless complexity.'''
+
     def __init__(self, pfunc, bases):
         self.bases = set(bases)
         if not self.bases:
@@ -266,6 +271,7 @@
         missing.reverse()
         return missing
 
+
 # Extracted from lazyancestors.__iter__ to avoid a reference cycle
 def _lazyancestorsiter(parentrevs, initrevs, stoprev, inclusive):
     seen = {nullrev}
@@ -310,6 +316,7 @@
             heappush(visit, -p2)
             see(p2)
 
+
 class lazyancestors(object):
     def __init__(self, pfunc, revs, stoprev=0, inclusive=False):
         """Create a new object generating ancestors for the given revs. Does
@@ -329,10 +336,9 @@
         self._inclusive = inclusive
 
         self._containsseen = set()
-        self._containsiter = _lazyancestorsiter(self._parentrevs,
-                                                self._initrevs,
-                                                self._stoprev,
-                                                self._inclusive)
+        self._containsiter = _lazyancestorsiter(
+            self._parentrevs, self._initrevs, self._stoprev, self._inclusive
+        )
 
     def __nonzero__(self):
         """False if the set is empty, True otherwise."""
@@ -355,8 +361,9 @@
 
         If inclusive is True, the source revisions are also yielded. The
         reverse revision number order is still enforced."""
-        return _lazyancestorsiter(self._parentrevs, self._initrevs,
-                                  self._stoprev, self._inclusive)
+        return _lazyancestorsiter(
+            self._parentrevs, self._initrevs, self._stoprev, self._inclusive
+        )
 
     def __contains__(self, target):
         """Test whether target is an ancestor of self._initrevs."""
@@ -387,8 +394,8 @@
             self._containsiter = None
             return False
 
+
 class rustlazyancestors(object):
-
     def __init__(self, index, revs, stoprev=0, inclusive=False):
         self._index = index
         self._stoprev = stoprev
@@ -400,7 +407,8 @@
         self._initrevs = initrevs = list(revs)
 
         self._containsiter = parsers.rustlazyancestors(
-            index, initrevs, stoprev, inclusive)
+            index, initrevs, stoprev, inclusive
+        )
 
     def __nonzero__(self):
         """False if the set is empty, True otherwise.
@@ -415,10 +423,9 @@
             return False
 
     def __iter__(self):
-        return parsers.rustlazyancestors(self._index,
-                                         self._initrevs,
-                                         self._stoprev,
-                                         self._inclusive)
+        return parsers.rustlazyancestors(
+            self._index, self._initrevs, self._stoprev, self._inclusive
+        )
 
     def __contains__(self, target):
         return target in self._containsiter
--- a/mercurial/archival.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/archival.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,9 +16,8 @@
 import zlib
 
 from .i18n import _
-from .node import (
-    nullrev,
-)
+from .node import nullrev
+from .pycompat import open
 
 from . import (
     error,
@@ -29,11 +28,13 @@
     util,
     vfs as vfsmod,
 )
+
 stringio = util.stringio
 
 # from unzip source code:
 _UNX_IFREG = 0x8000
-_UNX_IFLNK = 0xa000
+_UNX_IFLNK = 0xA000
+
 
 def tidyprefix(dest, kind, prefix):
     '''choose prefix to use for names in archive.  make sure prefix is
@@ -43,44 +44,49 @@
         prefix = util.normpath(prefix)
     else:
         if not isinstance(dest, bytes):
-            raise ValueError('dest must be string if no prefix')
+            raise ValueError(b'dest must be string if no prefix')
         prefix = os.path.basename(dest)
         lower = prefix.lower()
         for sfx in exts.get(kind, []):
             if lower.endswith(sfx):
-                prefix = prefix[:-len(sfx)]
+                prefix = prefix[: -len(sfx)]
                 break
     lpfx = os.path.normpath(util.localpath(prefix))
     prefix = util.pconvert(lpfx)
-    if not prefix.endswith('/'):
-        prefix += '/'
+    if not prefix.endswith(b'/'):
+        prefix += b'/'
     # Drop the leading '.' path component if present, so Windows can read the
     # zip files (issue4634)
-    if prefix.startswith('./'):
+    if prefix.startswith(b'./'):
         prefix = prefix[2:]
-    if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
-        raise error.Abort(_('archive prefix contains illegal components'))
+    if prefix.startswith(b'../') or os.path.isabs(lpfx) or b'/../' in prefix:
+        raise error.Abort(_(b'archive prefix contains illegal components'))
     return prefix
 
+
 exts = {
-    'tar': ['.tar'],
-    'tbz2': ['.tbz2', '.tar.bz2'],
-    'tgz': ['.tgz', '.tar.gz'],
-    'zip': ['.zip'],
-    }
+    b'tar': [b'.tar'],
+    b'tbz2': [b'.tbz2', b'.tar.bz2'],
+    b'tgz': [b'.tgz', b'.tar.gz'],
+    b'zip': [b'.zip'],
+    b'txz': [b'.txz', b'.tar.xz'],
+}
+
 
 def guesskind(dest):
-    for kind, extensions in exts.iteritems():
+    for kind, extensions in pycompat.iteritems(exts):
         if any(dest.endswith(ext) for ext in extensions):
             return kind
     return None
 
+
 def _rootctx(repo):
     # repo[0] may be hidden
     for rev in repo:
         return repo[rev]
     return repo[nullrev]
 
+
 # {tags} on ctx includes local tags and 'tip', with no current way to limit
 # that to global tags.  Therefore, use {latesttag} as a substitute when
 # the distance is 0, since that will be the list of global tags on ctx.
@@ -93,39 +99,43 @@
                join(latesttag % "latesttag: {tag}", "\n"),
                "latesttagdistance: {latesttagdistance}",
                "changessincelatesttag: {changessincelatesttag}"))}
-'''[1:]  # drop leading '\n'
+'''[
+    1:
+]  # drop leading '\n'
+
 
 def buildmetadata(ctx):
     '''build content of .hg_archival.txt'''
     repo = ctx.repo()
 
     opts = {
-        'template': repo.ui.config('experimental', 'archivemetatemplate',
-                                   _defaultmetatemplate)
+        b'template': repo.ui.config(
+            b'experimental', b'archivemetatemplate', _defaultmetatemplate
+        )
     }
 
     out = util.stringio()
 
-    fm = formatter.formatter(repo.ui, out, 'archive', opts)
+    fm = formatter.formatter(repo.ui, out, b'archive', opts)
     fm.startitem()
     fm.context(ctx=ctx)
     fm.data(root=_rootctx(repo).hex())
 
     if ctx.rev() is None:
-        dirty = ''
+        dirty = b''
         if ctx.dirty(missing=True):
-            dirty = '+'
+            dirty = b'+'
         fm.data(dirty=dirty)
     fm.end()
 
     return out.getvalue()
 
+
 class tarit(object):
     '''write archive to tar file or stream.  can write uncompressed,
     or compress with gzip or bzip2.'''
 
     class GzipFileWithTime(gzip.GzipFile):
-
         def __init__(self, *args, **kw):
             timestamp = None
             if r'timestamp' in kw:
@@ -137,45 +147,48 @@
             gzip.GzipFile.__init__(self, *args, **kw)
 
         def _write_gzip_header(self):
-            self.fileobj.write('\037\213')             # magic header
-            self.fileobj.write('\010')                 # compression method
+            self.fileobj.write(b'\037\213')  # magic header
+            self.fileobj.write(b'\010')  # compression method
             fname = self.name
-            if fname and fname.endswith('.gz'):
+            if fname and fname.endswith(b'.gz'):
                 fname = fname[:-3]
             flags = 0
             if fname:
                 flags = gzip.FNAME
             self.fileobj.write(pycompat.bytechr(flags))
             gzip.write32u(self.fileobj, int(self.timestamp))
-            self.fileobj.write('\002')
-            self.fileobj.write('\377')
+            self.fileobj.write(b'\002')
+            self.fileobj.write(b'\377')
             if fname:
-                self.fileobj.write(fname + '\000')
+                self.fileobj.write(fname + b'\000')
 
-    def __init__(self, dest, mtime, kind=''):
+    def __init__(self, dest, mtime, kind=b''):
         self.mtime = mtime
         self.fileobj = None
 
-        def taropen(mode, name='', fileobj=None):
-            if kind == 'gz':
+        def taropen(mode, name=b'', fileobj=None):
+            if kind == b'gz':
                 mode = mode[0:1]
                 if not fileobj:
-                    fileobj = open(name, mode + 'b')
-                gzfileobj = self.GzipFileWithTime(name,
-                                                  pycompat.sysstr(mode + 'b'),
-                                                  zlib.Z_BEST_COMPRESSION,
-                                                  fileobj, timestamp=mtime)
+                    fileobj = open(name, mode + b'b')
+                gzfileobj = self.GzipFileWithTime(
+                    name,
+                    pycompat.sysstr(mode + b'b'),
+                    zlib.Z_BEST_COMPRESSION,
+                    fileobj,
+                    timestamp=mtime,
+                )
                 self.fileobj = gzfileobj
                 return tarfile.TarFile.taropen(
-                    name, pycompat.sysstr(mode), gzfileobj)
+                    name, pycompat.sysstr(mode), gzfileobj
+                )
             else:
-                return tarfile.open(
-                    name, pycompat.sysstr(mode + kind), fileobj)
+                return tarfile.open(name, pycompat.sysstr(mode + kind), fileobj)
 
         if isinstance(dest, bytes):
-            self.z = taropen('w:', name=dest)
+            self.z = taropen(b'w:', name=dest)
         else:
-            self.z = taropen('w|', fileobj=dest)
+            self.z = taropen(b'w|', fileobj=dest)
 
     def addfile(self, name, mode, islink, data):
         name = pycompat.fsdecode(name)
@@ -198,6 +211,7 @@
         if self.fileobj:
             self.fileobj.close()
 
+
 class zipit(object):
     '''write archive to zip file or stream.  can write uncompressed,
     or compressed with deflate.'''
@@ -205,13 +219,13 @@
     def __init__(self, dest, mtime, compress=True):
         if isinstance(dest, bytes):
             dest = pycompat.fsdecode(dest)
-        self.z = zipfile.ZipFile(dest, r'w',
-                                 compress and zipfile.ZIP_DEFLATED or
-                                 zipfile.ZIP_STORED)
+        self.z = zipfile.ZipFile(
+            dest, r'w', compress and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED
+        )
 
         # Python's zipfile module emits deprecation warnings if we try
         # to store files with a date before 1980.
-        epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
+        epoch = 315532800  # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
         if mtime < epoch:
             mtime = epoch
 
@@ -232,16 +246,19 @@
         # add "extended-timestamp" extra block, because zip archives
         # without this will be extracted with unexpected timestamp,
         # if TZ is not configured as GMT
-        i.extra += struct.pack('<hhBl',
-                               0x5455,     # block type: "extended-timestamp"
-                               1 + 4,      # size of this block
-                               1,          # "modification time is present"
-                               int(self.mtime)) # last modification (UTC)
+        i.extra += struct.pack(
+            b'<hhBl',
+            0x5455,  # block type: "extended-timestamp"
+            1 + 4,  # size of this block
+            1,  # "modification time is present"
+            int(self.mtime),
+        )  # last modification (UTC)
         self.z.writestr(i, data)
 
     def done(self):
         self.z.close()
 
+
 class fileit(object):
     '''write archive as files in directory.'''
 
@@ -254,7 +271,7 @@
         if islink:
             self.opener.symlink(data, name)
             return
-        f = self.opener(name, "w", atomictemp=False)
+        f = self.opener(name, b"w", atomictemp=False)
         f.write(data)
         f.close()
         destfile = os.path.join(self.basedir, name)
@@ -265,17 +282,29 @@
     def done(self):
         pass
 
+
 archivers = {
-    'files': fileit,
-    'tar': tarit,
-    'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
-    'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
-    'uzip': lambda name, mtime: zipit(name, mtime, False),
-    'zip': zipit,
-    }
+    b'files': fileit,
+    b'tar': tarit,
+    b'tbz2': lambda name, mtime: tarit(name, mtime, b'bz2'),
+    b'tgz': lambda name, mtime: tarit(name, mtime, b'gz'),
+    b'txz': lambda name, mtime: tarit(name, mtime, b'xz'),
+    b'uzip': lambda name, mtime: zipit(name, mtime, False),
+    b'zip': zipit,
+}
+
 
-def archive(repo, dest, node, kind, decode=True, match=None,
-            prefix='', mtime=None, subrepos=False):
+def archive(
+    repo,
+    dest,
+    node,
+    kind,
+    decode=True,
+    match=None,
+    prefix=b'',
+    mtime=None,
+    subrepos=False,
+):
     '''create archive of repo as it was at node.
 
     dest can be name of directory, name of archive file, or file
@@ -295,9 +324,12 @@
     subrepos tells whether to include subrepos.
     '''
 
-    if kind == 'files':
+    if kind == b'txz' and not pycompat.ispy3:
+        raise error.Abort(_(b'xz compression is only available in Python 3'))
+
+    if kind == b'files':
         if prefix:
-            raise error.Abort(_('cannot give prefix when archiving to files'))
+            raise error.Abort(_(b'cannot give prefix when archiving to files'))
     else:
         prefix = tidyprefix(dest, kind, prefix)
 
@@ -308,7 +340,7 @@
         archiver.addfile(prefix + name, mode, islink, data)
 
     if kind not in archivers:
-        raise error.Abort(_("unknown archive type '%s'") % kind)
+        raise error.Abort(_(b"unknown archive type '%s'") % kind)
 
     ctx = repo[node]
     archiver = archivers[kind](dest, mtime or ctx.date()[0])
@@ -316,8 +348,8 @@
     if not match:
         match = scmutil.matchall(repo)
 
-    if repo.ui.configbool("ui", "archivemeta"):
-        name = '.hg_archival.txt'
+    if repo.ui.configbool(b"ui", b"archivemeta"):
+        name = b'.hg_archival.txt'
         if match(name):
             write(name, 0o644, False, lambda: buildmetadata(ctx))
 
@@ -325,14 +357,16 @@
     total = len(files)
     if total:
         files.sort()
-        scmutil.prefetchfiles(repo, [ctx.rev()],
-                              scmutil.matchfiles(repo, files))
-        progress = repo.ui.makeprogress(_('archiving'), unit=_('files'),
-                                        total=total)
+        scmutil.prefetchfiles(
+            repo, [ctx.rev()], scmutil.matchfiles(repo, files)
+        )
+        progress = repo.ui.makeprogress(
+            _(b'archiving'), unit=_(b'files'), total=total
+        )
         progress.update(0)
         for f in files:
             ff = ctx.flags(f)
-            write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, ctx[f].data)
+            write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, ctx[f].data)
             progress.increment(item=f)
         progress.complete()
 
@@ -340,11 +374,11 @@
         for subpath in sorted(ctx.substate):
             sub = ctx.workingsub(subpath)
             submatch = matchmod.subdirmatcher(subpath, match)
-            subprefix = prefix + subpath + '/'
+            subprefix = prefix + subpath + b'/'
             total += sub.archive(archiver, subprefix, submatch, decode)
 
     if total == 0:
-        raise error.Abort(_('no files match the archive pattern'))
+        raise error.Abort(_(b'no files match the archive pattern'))
 
     archiver.done()
     return total
--- a/mercurial/bookmarks.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/bookmarks.py	Mon Oct 21 11:09:48 2019 -0400
@@ -17,6 +17,7 @@
     short,
     wdirid,
 )
+from .pycompat import getattr
 from . import (
     encoding,
     error,
@@ -31,16 +32,19 @@
 # until 3.5, bookmarks.current was the advertised name, not
 # bookmarks.active, so we must use both to avoid breaking old
 # custom styles
-activebookmarklabel = 'bookmarks.active bookmarks.current'
+activebookmarklabel = b'bookmarks.active bookmarks.current'
 
-BOOKMARKS_IN_STORE_REQUIREMENT = 'bookmarksinstore'
+BOOKMARKS_IN_STORE_REQUIREMENT = b'bookmarksinstore'
+
 
 def bookmarksinstore(repo):
     return BOOKMARKS_IN_STORE_REQUIREMENT in repo.requirements
 
+
 def bookmarksvfs(repo):
     return repo.svfs if bookmarksinstore(repo) else repo.vfs
 
+
 def _getbkfile(repo):
     """Hook so that extensions that mess with the store can hook bm storage.
 
@@ -48,9 +52,12 @@
     bookmarks or the committed ones. Other extensions (like share)
     may need to tweak this behavior further.
     """
-    fp, pending = txnutil.trypending(repo.root, bookmarksvfs(repo), 'bookmarks')
+    fp, pending = txnutil.trypending(
+        repo.root, bookmarksvfs(repo), b'bookmarks'
+    )
     return fp
 
+
 class bmstore(object):
     r"""Storage for bookmarks.
 
@@ -72,7 +79,7 @@
         self._clean = True
         self._aclean = True
         nm = repo.changelog.nodemap
-        tonode = bin # force local lookup
+        tonode = bin  # force local lookup
         try:
             with _getbkfile(repo) as bkfile:
                 for line in bkfile:
@@ -80,7 +87,7 @@
                     if not line:
                         continue
                     try:
-                        sha, refspec = line.split(' ', 1)
+                        sha, refspec = line.split(b' ', 1)
                         node = tonode(sha)
                         if node in nm:
                             refspec = encoding.tolocal(refspec)
@@ -99,11 +106,13 @@
                         # ValueError:
                         # - node in nm, for non-20-bytes entry
                         # - split(...), for string without ' '
-                        bookmarkspath = '.hg/bookmarks'
+                        bookmarkspath = b'.hg/bookmarks'
                         if bookmarksinstore(repo):
-                            bookmarkspath = '.hg/store/bookmarks'
-                        repo.ui.warn(_('malformed line in %s: %r\n')
-                                     % (bookmarkspath, pycompat.bytestr(line)))
+                            bookmarkspath = b'.hg/store/bookmarks'
+                        repo.ui.warn(
+                            _(b'malformed line in %s: %r\n')
+                            % (bookmarkspath, pycompat.bytestr(line))
+                        )
         except IOError as inst:
             if inst.errno != errno.ENOENT:
                 raise
@@ -116,7 +125,7 @@
     @active.setter
     def active(self, mark):
         if mark is not None and mark not in self._refmap:
-            raise AssertionError('bookmark %s does not exist!' % mark)
+            raise AssertionError(b'bookmark %s does not exist!' % mark)
 
         self._active = mark
         self._aclean = False
@@ -128,7 +137,7 @@
         return iter(self._refmap)
 
     def iteritems(self):
-        return self._refmap.iteritems()
+        return pycompat.iteritems(self._refmap)
 
     def items(self):
         return self._refmap.items()
@@ -177,14 +186,10 @@
         """Return a sorted list of bookmarks pointing to the specified node"""
         return self._nodemap.get(node, [])
 
-    def changectx(self, mark):
-        node = self._refmap[mark]
-        return self._repo[node]
-
     def applychanges(self, repo, tr, changes):
         """Apply a list of changes to bookmarks
         """
-        bmchanges = tr.changes.get('bookmarks')
+        bmchanges = tr.changes.get(b'bookmarks')
         for name, node in changes:
             old = self._refmap.get(name)
             if node is None:
@@ -203,10 +208,11 @@
         """record that bookmarks have been changed in a transaction
 
         The transaction is then responsible for updating the file content."""
-        location = '' if bookmarksinstore(self._repo) else 'plain'
-        tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
-                            location=location)
-        tr.hookargs['bookmark_moved'] = '1'
+        location = b'' if bookmarksinstore(self._repo) else b'plain'
+        tr.addfilegenerator(
+            b'bookmarks', (b'bookmarks',), self._write, location=location
+        )
+        tr.hookargs[b'bookmark_moved'] = b'1'
 
     def _writerepo(self, repo):
         """Factored out for extensibility"""
@@ -222,7 +228,7 @@
             vfs = repo.vfs
             lock = repo.wlock()
         with lock:
-            with vfs('bookmarks', 'w', atomictemp=True, checkambig=True) as f:
+            with vfs(b'bookmarks', b'w', atomictemp=True, checkambig=True) as f:
                 self._write(f)
 
     def _writeactive(self):
@@ -230,25 +236,26 @@
             return
         with self._repo.wlock():
             if self._active is not None:
-                with self._repo.vfs('bookmarks.current', 'w', atomictemp=True,
-                                   checkambig=True) as f:
+                with self._repo.vfs(
+                    b'bookmarks.current', b'w', atomictemp=True, checkambig=True
+                ) as f:
                     f.write(encoding.fromlocal(self._active))
             else:
-                self._repo.vfs.tryunlink('bookmarks.current')
+                self._repo.vfs.tryunlink(b'bookmarks.current')
         self._aclean = True
 
     def _write(self, fp):
-        for name, node in sorted(self._refmap.iteritems()):
-            fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
+        for name, node in sorted(pycompat.iteritems(self._refmap)):
+            fp.write(b"%s %s\n" % (hex(node), encoding.fromlocal(name)))
         self._clean = True
         self._repo.invalidatevolatilesets()
 
     def expandname(self, bname):
-        if bname == '.':
+        if bname == b'.':
             if self.active:
                 return self.active
             else:
-                raise error.RepoLookupError(_("no active bookmark"))
+                raise error.RepoLookupError(_(b"no active bookmark"))
         return bname
 
     def checkconflict(self, mark, force=False, target=None):
@@ -263,7 +270,7 @@
 
         If divergent bookmark are to be deleted, they will be returned as list.
         """
-        cur = self._repo['.'].node()
+        cur = self._repo[b'.'].node()
         if mark in self._refmap and not force:
             if target:
                 if self._refmap[mark] == target and target == cur:
@@ -271,9 +278,12 @@
                     return []
                 rev = self._repo[target].rev()
                 anc = self._repo.changelog.ancestors([rev])
-                bmctx = self.changectx(mark)
-                divs = [self._refmap[b] for b in self._refmap
-                        if b.split('@', 1)[0] == mark.split('@', 1)[0]]
+                bmctx = self._repo[self[mark]]
+                divs = [
+                    self._refmap[b]
+                    for b in self._refmap
+                    if b.split(b'@', 1)[0] == mark.split(b'@', 1)[0]
+                ]
 
                 # allow resolving a single divergent bookmark even if moving
                 # the bookmark across branches when a revision is specified
@@ -281,20 +291,26 @@
                 if bmctx.rev() not in anc and target in divs:
                     return divergent2delete(self._repo, [target], mark)
 
-                deletefrom = [b for b in divs
-                              if self._repo[b].rev() in anc or b == target]
+                deletefrom = [
+                    b for b in divs if self._repo[b].rev() in anc or b == target
+                ]
                 delbms = divergent2delete(self._repo, deletefrom, mark)
                 if validdest(self._repo, bmctx, self._repo[target]):
                     self._repo.ui.status(
-                        _("moving bookmark '%s' forward from %s\n") %
-                        (mark, short(bmctx.node())))
+                        _(b"moving bookmark '%s' forward from %s\n")
+                        % (mark, short(bmctx.node()))
+                    )
                     return delbms
-            raise error.Abort(_("bookmark '%s' already exists "
-                                "(use -f to force)") % mark)
-        if ((mark in self._repo.branchmap() or
-             mark == self._repo.dirstate.branch()) and not force):
             raise error.Abort(
-                _("a bookmark cannot have the name of an existing branch"))
+                _(b"bookmark '%s' already exists (use -f to force)") % mark
+            )
+        if (
+            mark in self._repo.branchmap()
+            or mark == self._repo.dirstate.branch()
+        ) and not force:
+            raise error.Abort(
+                _(b"a bookmark cannot have the name of an existing branch")
+            )
         if len(mark) > 3 and not force:
             try:
                 shadowhash = scmutil.isrevsymbol(self._repo, mark)
@@ -302,12 +318,16 @@
                 shadowhash = False
             if shadowhash:
                 self._repo.ui.warn(
-                    _("bookmark %s matches a changeset hash\n"
-                      "(did you leave a -r out of an 'hg bookmark' "
-                      "command?)\n")
-                    % mark)
+                    _(
+                        b"bookmark %s matches a changeset hash\n"
+                        b"(did you leave a -r out of an 'hg bookmark' "
+                        b"command?)\n"
+                    )
+                    % mark
+                )
         return []
 
+
 def _readactive(repo, marks):
     """
     Get the active bookmark. We can have an active bookmark that updates
@@ -316,12 +336,13 @@
     """
     # No readline() in osutil.posixfile, reading everything is
     # cheap.
-    content = repo.vfs.tryread('bookmarks.current')
-    mark = encoding.tolocal((content.splitlines() or [''])[0])
-    if mark == '' or mark not in marks:
+    content = repo.vfs.tryread(b'bookmarks.current')
+    mark = encoding.tolocal((content.splitlines() or [b''])[0])
+    if mark == b'' or mark not in marks:
         mark = None
     return mark
 
+
 def activate(repo, mark):
     """
     Set the given bookmark to be 'active', meaning that this bookmark will
@@ -331,6 +352,7 @@
     repo._bookmarks.active = mark
     repo._bookmarks._writeactive()
 
+
 def deactivate(repo):
     """
     Unset the active bookmark in this repository.
@@ -338,6 +360,7 @@
     repo._bookmarks.active = None
     repo._bookmarks._writeactive()
 
+
 def isactivewdirparent(repo):
     """
     Tell whether the 'active' bookmark (the one that follows new commits)
@@ -350,7 +373,8 @@
     mark = repo._activebookmark
     marks = repo._bookmarks
     parents = [p.node() for p in repo[None].parents()]
-    return (mark in marks and marks[mark] in parents)
+    return mark in marks and marks[mark] in parents
+
 
 def divergent2delete(repo, deletefrom, bm):
     """find divergent versions of bm on nodes in deletefrom.
@@ -358,9 +382,11 @@
     the list of bookmark to delete."""
     todelete = []
     marks = repo._bookmarks
-    divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
+    divergent = [
+        b for b in marks if b.split(b'@', 1)[0] == bm.split(b'@', 1)[0]
+    ]
     for mark in divergent:
-        if mark == '@' or '@' not in mark:
+        if mark == b'@' or b'@' not in mark:
             # can't be divergent by definition
             continue
         if mark and marks[mark] in deletefrom:
@@ -368,6 +394,7 @@
                 todelete.append(mark)
     return todelete
 
+
 def headsforactive(repo):
     """Given a repo with an active bookmark, return divergent bookmark nodes.
 
@@ -382,26 +409,29 @@
     """
     if not repo._activebookmark:
         raise ValueError(
-            'headsforactive() only makes sense with an active bookmark')
-    name = repo._activebookmark.split('@', 1)[0]
+            b'headsforactive() only makes sense with an active bookmark'
+        )
+    name = repo._activebookmark.split(b'@', 1)[0]
     heads = []
-    for mark, n in repo._bookmarks.iteritems():
-        if mark.split('@', 1)[0] == name:
+    for mark, n in pycompat.iteritems(repo._bookmarks):
+        if mark.split(b'@', 1)[0] == name:
             heads.append(n)
     return heads
 
+
 def calculateupdate(ui, repo):
     '''Return a tuple (activemark, movemarkfrom) indicating the active bookmark
     and where to move the active bookmark from, if needed.'''
     checkout, movemarkfrom = None, None
     activemark = repo._activebookmark
     if isactivewdirparent(repo):
-        movemarkfrom = repo['.'].node()
+        movemarkfrom = repo[b'.'].node()
     elif activemark:
-        ui.status(_("updating to active bookmark %s\n") % activemark)
+        ui.status(_(b"updating to active bookmark %s\n") % activemark)
         checkout = activemark
     return (checkout, movemarkfrom)
 
+
 def update(repo, parents, node):
     deletefrom = parents
     marks = repo._bookmarks
@@ -412,49 +442,55 @@
     bmchanges = []
     if marks[active] in parents:
         new = repo[node]
-        divs = [marks.changectx(b) for b in marks
-                if b.split('@', 1)[0] == active.split('@', 1)[0]]
+        divs = [
+            repo[marks[b]]
+            for b in marks
+            if b.split(b'@', 1)[0] == active.split(b'@', 1)[0]
+        ]
         anc = repo.changelog.ancestors([new.rev()])
         deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
-        if validdest(repo, marks.changectx(active), new):
+        if validdest(repo, repo[marks[active]], new):
             bmchanges.append((active, new.node()))
 
     for bm in divergent2delete(repo, deletefrom, active):
         bmchanges.append((bm, None))
 
     if bmchanges:
-        with repo.lock(), repo.transaction('bookmark') as tr:
+        with repo.lock(), repo.transaction(b'bookmark') as tr:
             marks.applychanges(repo, tr, bmchanges)
     return bool(bmchanges)
 
+
 def listbinbookmarks(repo):
     # We may try to list bookmarks on a repo type that does not
     # support it (e.g., statichttprepository).
     marks = getattr(repo, '_bookmarks', {})
 
     hasnode = repo.changelog.hasnode
-    for k, v in marks.iteritems():
+    for k, v in pycompat.iteritems(marks):
         # don't expose local divergent bookmarks
-        if hasnode(v) and ('@' not in k or k.endswith('@')):
+        if hasnode(v) and (b'@' not in k or k.endswith(b'@')):
             yield k, v
 
+
 def listbookmarks(repo):
     d = {}
     for book, node in listbinbookmarks(repo):
         d[book] = hex(node)
     return d
 
+
 def pushbookmark(repo, key, old, new):
     if bookmarksinstore(repo):
         wlock = util.nullcontextmanager()
     else:
         wlock = repo.wlock()
-    with wlock, repo.lock(), repo.transaction('bookmarks') as tr:
+    with wlock, repo.lock(), repo.transaction(b'bookmarks') as tr:
         marks = repo._bookmarks
-        existing = hex(marks.get(key, ''))
+        existing = hex(marks.get(key, b''))
         if existing != old and existing != new:
             return False
-        if new == '':
+        if new == b'':
             changes = [(key, None)]
         else:
             if new not in repo:
@@ -463,6 +499,7 @@
         marks.applychanges(repo, tr, changes)
         return True
 
+
 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
     '''Compare bookmarks between srcmarks and dstmarks
 
@@ -480,8 +517,7 @@
 
     Each elements of lists in result tuple is tuple "(bookmark name,
     changeset ID on source side, changeset ID on destination
-    side)". Each changeset IDs are 40 hexadecimal digit string or
-    None.
+    side)". Each changeset ID is a binary node or None.
 
     Changeset IDs of tuples in "addsrc", "adddst", "differ" or
      "invalid" list may be unknown for repo.
@@ -539,6 +575,7 @@
 
     return results
 
+
 def _diverge(ui, b, path, localmarks, remotenode):
     '''Return appropriate diverged bookmark for specified ``path``
 
@@ -548,33 +585,36 @@
     This reuses already existing one with "@number" suffix, if it
     refers ``remotenode``.
     '''
-    if b == '@':
-        b = ''
+    if b == b'@':
+        b = b''
     # try to use an @pathalias suffix
     # if an @pathalias already exists, we overwrite (update) it
-    if path.startswith("file:"):
+    if path.startswith(b"file:"):
         path = util.url(path).path
-    for p, u in ui.configitems("paths"):
-        if u.startswith("file:"):
+    for p, u in ui.configitems(b"paths"):
+        if u.startswith(b"file:"):
             u = util.url(u).path
         if path == u:
-            return '%s@%s' % (b, p)
+            return b'%s@%s' % (b, p)
 
     # assign a unique "@number" suffix newly
     for x in range(1, 100):
-        n = '%s@%d' % (b, x)
+        n = b'%s@%d' % (b, x)
         if n not in localmarks or localmarks[n] == remotenode:
             return n
 
     return None
 
+
 def unhexlifybookmarks(marks):
     binremotemarks = {}
     for name, node in marks.items():
         binremotemarks[name] = bin(node)
     return binremotemarks
 
-_binaryentry = struct.Struct('>20sH')
+
+_binaryentry = struct.Struct(b'>20sH')
+
 
 def binaryencode(bookmarks):
     """encode a '(bookmark, node)' iterable into a binary stream
@@ -591,11 +631,12 @@
     """
     binarydata = []
     for book, node in bookmarks:
-        if not node: # None or ''
+        if not node:  # None or ''
             node = wdirid
         binarydata.append(_binaryentry.pack(node, len(book)))
         binarydata.append(book)
-    return ''.join(binarydata)
+    return b''.join(binarydata)
+
 
 def binarydecode(stream):
     """decode a binary stream into an '(bookmark, node)' iterable
@@ -616,70 +657,91 @@
         entry = stream.read(entrysize)
         if len(entry) < entrysize:
             if entry:
-                raise error.Abort(_('bad bookmark stream'))
+                raise error.Abort(_(b'bad bookmark stream'))
             break
         node, length = _binaryentry.unpack(entry)
         bookmark = stream.read(length)
         if len(bookmark) < length:
             if entry:
-                raise error.Abort(_('bad bookmark stream'))
+                raise error.Abort(_(b'bad bookmark stream'))
         if node == wdirid:
             node = None
         books.append((bookmark, node))
     return books
 
+
 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
-    ui.debug("checking for updated bookmarks\n")
+    ui.debug(b"checking for updated bookmarks\n")
     localmarks = repo._bookmarks
-    (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
+    (
+        addsrc,
+        adddst,
+        advsrc,
+        advdst,
+        diverge,
+        differ,
+        invalid,
+        same,
     ) = comparebookmarks(repo, remotemarks, localmarks)
 
     status = ui.status
     warn = ui.warn
-    if ui.configbool('ui', 'quietbookmarkmove'):
+    if ui.configbool(b'ui', b'quietbookmarkmove'):
         status = warn = ui.debug
 
     explicit = set(explicit)
     changed = []
     for b, scid, dcid in addsrc:
-        if scid in repo: # add remote bookmarks for changes we already have
-            changed.append((b, scid, status,
-                            _("adding remote bookmark %s\n") % (b)))
+        if scid in repo:  # add remote bookmarks for changes we already have
+            changed.append(
+                (b, scid, status, _(b"adding remote bookmark %s\n") % b)
+            )
         elif b in explicit:
             explicit.remove(b)
-            ui.warn(_("remote bookmark %s points to locally missing %s\n")
-                    % (b, hex(scid)[:12]))
+            ui.warn(
+                _(b"remote bookmark %s points to locally missing %s\n")
+                % (b, hex(scid)[:12])
+            )
 
     for b, scid, dcid in advsrc:
-        changed.append((b, scid, status,
-                        _("updating bookmark %s\n") % (b)))
+        changed.append((b, scid, status, _(b"updating bookmark %s\n") % b))
     # remove normal movement from explicit set
     explicit.difference_update(d[0] for d in changed)
 
     for b, scid, dcid in diverge:
         if b in explicit:
             explicit.discard(b)
-            changed.append((b, scid, status,
-                            _("importing bookmark %s\n") % (b)))
+            changed.append((b, scid, status, _(b"importing bookmark %s\n") % b))
         else:
             db = _diverge(ui, b, path, localmarks, scid)
             if db:
-                changed.append((db, scid, warn,
-                                _("divergent bookmark %s stored as %s\n") %
-                                (b, db)))
+                changed.append(
+                    (
+                        db,
+                        scid,
+                        warn,
+                        _(b"divergent bookmark %s stored as %s\n") % (b, db),
+                    )
+                )
             else:
-                warn(_("warning: failed to assign numbered name "
-                       "to divergent bookmark %s\n") % (b))
+                warn(
+                    _(
+                        b"warning: failed to assign numbered name "
+                        b"to divergent bookmark %s\n"
+                    )
+                    % b
+                )
     for b, scid, dcid in adddst + advdst:
         if b in explicit:
             explicit.discard(b)
-            changed.append((b, scid, status,
-                            _("importing bookmark %s\n") % (b)))
+            changed.append((b, scid, status, _(b"importing bookmark %s\n") % b))
     for b, scid, dcid in differ:
         if b in explicit:
             explicit.remove(b)
-            ui.warn(_("remote bookmark %s points to locally missing %s\n")
-                    % (b, hex(scid)[:12]))
+            ui.warn(
+                _(b"remote bookmark %s points to locally missing %s\n")
+                % (b, hex(scid)[:12])
+            )
 
     if changed:
         tr = trfunc()
@@ -689,15 +751,16 @@
             writer(msg)
         localmarks.applychanges(repo, tr, changes)
 
+
 def incoming(ui, repo, peer):
     '''Show bookmarks incoming from other to repo
     '''
-    ui.status(_("searching for changed bookmarks\n"))
+    ui.status(_(b"searching for changed bookmarks\n"))
 
     with peer.commandexecutor() as e:
-        remotemarks = unhexlifybookmarks(e.callcommand('listkeys', {
-            'namespace': 'bookmarks',
-        }).result())
+        remotemarks = unhexlifybookmarks(
+            e.callcommand(b'listkeys', {b'namespace': b'bookmarks',}).result()
+        )
 
     r = comparebookmarks(repo, remotemarks, repo._bookmarks)
     addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
@@ -708,26 +771,30 @@
     else:
         getid = lambda id: id[:12]
     if ui.verbose:
+
         def add(b, id, st):
-            incomings.append("   %-25s %s %s\n" % (b, getid(id), st))
+            incomings.append(b"   %-25s %s %s\n" % (b, getid(id), st))
+
     else:
+
         def add(b, id, st):
-            incomings.append("   %-25s %s\n" % (b, getid(id)))
+            incomings.append(b"   %-25s %s\n" % (b, getid(id)))
+
     for b, scid, dcid in addsrc:
         # i18n: "added" refers to a bookmark
-        add(b, hex(scid), _('added'))
+        add(b, hex(scid), _(b'added'))
     for b, scid, dcid in advsrc:
         # i18n: "advanced" refers to a bookmark
-        add(b, hex(scid), _('advanced'))
+        add(b, hex(scid), _(b'advanced'))
     for b, scid, dcid in diverge:
         # i18n: "diverged" refers to a bookmark
-        add(b, hex(scid), _('diverged'))
+        add(b, hex(scid), _(b'diverged'))
     for b, scid, dcid in differ:
         # i18n: "changed" refers to a bookmark
-        add(b, hex(scid), _('changed'))
+        add(b, hex(scid), _(b'changed'))
 
     if not incomings:
-        ui.status(_("no changed bookmarks found\n"))
+        ui.status(_(b"no changed bookmarks found\n"))
         return 1
 
     for s in sorted(incomings):
@@ -735,12 +802,13 @@
 
     return 0
 
+
 def outgoing(ui, repo, other):
     '''Show bookmarks outgoing from repo to other
     '''
-    ui.status(_("searching for changed bookmarks\n"))
+    ui.status(_(b"searching for changed bookmarks\n"))
 
-    remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
+    remotemarks = unhexlifybookmarks(other.listkeys(b'bookmarks'))
     r = comparebookmarks(repo, repo._bookmarks, remotemarks)
     addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
 
@@ -750,29 +818,33 @@
     else:
         getid = lambda id: id[:12]
     if ui.verbose:
+
         def add(b, id, st):
-            outgoings.append("   %-25s %s %s\n" % (b, getid(id), st))
+            outgoings.append(b"   %-25s %s %s\n" % (b, getid(id), st))
+
     else:
+
         def add(b, id, st):
-            outgoings.append("   %-25s %s\n" % (b, getid(id)))
+            outgoings.append(b"   %-25s %s\n" % (b, getid(id)))
+
     for b, scid, dcid in addsrc:
         # i18n: "added refers to a bookmark
-        add(b, hex(scid), _('added'))
+        add(b, hex(scid), _(b'added'))
     for b, scid, dcid in adddst:
         # i18n: "deleted" refers to a bookmark
-        add(b, ' ' * 40, _('deleted'))
+        add(b, b' ' * 40, _(b'deleted'))
     for b, scid, dcid in advsrc:
         # i18n: "advanced" refers to a bookmark
-        add(b, hex(scid), _('advanced'))
+        add(b, hex(scid), _(b'advanced'))
     for b, scid, dcid in diverge:
         # i18n: "diverged" refers to a bookmark
-        add(b, hex(scid), _('diverged'))
+        add(b, hex(scid), _(b'diverged'))
     for b, scid, dcid in differ:
         # i18n: "changed" refers to a bookmark
-        add(b, hex(scid), _('changed'))
+        add(b, hex(scid), _(b'changed'))
 
     if not outgoings:
-        ui.status(_("no changed bookmarks found\n"))
+        ui.status(_(b"no changed bookmarks found\n"))
         return 1
 
     for s in sorted(outgoings):
@@ -780,20 +852,22 @@
 
     return 0
 
+
 def summary(repo, peer):
     '''Compare bookmarks between repo and other for "hg summary" output
 
     This returns "(# of incoming, # of outgoing)" tuple.
     '''
     with peer.commandexecutor() as e:
-        remotemarks = unhexlifybookmarks(e.callcommand('listkeys', {
-            'namespace': 'bookmarks',
-        }).result())
+        remotemarks = unhexlifybookmarks(
+            e.callcommand(b'listkeys', {b'namespace': b'bookmarks',}).result()
+        )
 
     r = comparebookmarks(repo, remotemarks, repo._bookmarks)
     addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
     return (len(addsrc), len(adddst))
 
+
 def validdest(repo, old, new):
     """Is the new bookmark destination a valid update from the old one"""
     repo = repo.unfiltered()
@@ -810,6 +884,7 @@
         # still an independent clause as it is lazier (and therefore faster)
         return old.isancestorof(new)
 
+
 def checkformat(repo, mark):
     """return a valid version of a potential bookmark name
 
@@ -817,11 +892,13 @@
     """
     mark = mark.strip()
     if not mark:
-        raise error.Abort(_("bookmark names cannot consist entirely of "
-                            "whitespace"))
-    scmutil.checknewlabel(repo, mark, 'bookmark')
+        raise error.Abort(
+            _(b"bookmark names cannot consist entirely of whitespace")
+        )
+    scmutil.checknewlabel(repo, mark, b'bookmark')
     return mark
 
+
 def delete(repo, tr, names):
     """remove a mark from the bookmark store
 
@@ -831,12 +908,13 @@
     changes = []
     for mark in names:
         if mark not in marks:
-            raise error.Abort(_("bookmark '%s' does not exist") % mark)
+            raise error.Abort(_(b"bookmark '%s' does not exist") % mark)
         if mark == repo._activebookmark:
             deactivate(repo)
         changes.append((mark, None))
     marks.applychanges(repo, tr, changes)
 
+
 def rename(repo, tr, old, new, force=False, inactive=False):
     """rename a bookmark from old to new
 
@@ -850,7 +928,7 @@
     marks = repo._bookmarks
     mark = checkformat(repo, new)
     if old not in marks:
-        raise error.Abort(_("bookmark '%s' does not exist") % old)
+        raise error.Abort(_(b"bookmark '%s' does not exist") % old)
     changes = []
     for bm in marks.checkconflict(mark, force):
         changes.append((bm, None))
@@ -859,6 +937,7 @@
     if repo._activebookmark == old and not inactive:
         activate(repo, mark)
 
+
 def addbookmarks(repo, tr, names, rev=None, force=False, inactive=False):
     """add a list of bookmarks
 
@@ -871,14 +950,14 @@
     Raises an abort error if old is not in the bookmark store.
     """
     marks = repo._bookmarks
-    cur = repo['.'].node()
+    cur = repo[b'.'].node()
     newact = None
     changes = []
     hiddenrev = None
 
     # unhide revs if any
     if rev:
-        repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+        repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
 
     for mark in names:
         mark = checkformat(repo, mark)
@@ -898,11 +977,11 @@
         changes.append((mark, tgt))
 
     if hiddenrev:
-        repo.ui.warn(_("bookmarking hidden changeset %s\n") % hiddenrev)
+        repo.ui.warn(_(b"bookmarking hidden changeset %s\n") % hiddenrev)
 
         if ctx.obsolete():
-            msg = obsutil._getfilteredreason(repo, "%s" % hiddenrev, ctx)
-            repo.ui.warn("(%s)\n" % msg)
+            msg = obsutil._getfilteredreason(repo, b"%s" % hiddenrev, ctx)
+            repo.ui.warn(b"(%s)\n" % msg)
 
     marks.applychanges(repo, tr, changes)
     if not inactive and cur == marks[newact] and not rev:
@@ -910,6 +989,7 @@
     elif cur != tgt and newact == repo._activebookmark:
         deactivate(repo)
 
+
 def _printbookmarks(ui, repo, fm, bmarks):
     """private method to print bookmarks
 
@@ -918,18 +998,25 @@
     """
     hexfn = fm.hexfunc
     if len(bmarks) == 0 and fm.isplain():
-        ui.status(_("no bookmarks set\n"))
-    for bmark, (n, prefix, label) in sorted(bmarks.iteritems()):
+        ui.status(_(b"no bookmarks set\n"))
+    for bmark, (n, prefix, label) in sorted(pycompat.iteritems(bmarks)):
         fm.startitem()
         fm.context(repo=repo)
         if not ui.quiet:
-            fm.plain(' %s ' % prefix, label=label)
-        fm.write('bookmark', '%s', bmark, label=label)
-        pad = " " * (25 - encoding.colwidth(bmark))
-        fm.condwrite(not ui.quiet, 'rev node', pad + ' %d:%s',
-                     repo.changelog.rev(n), hexfn(n), label=label)
+            fm.plain(b' %s ' % prefix, label=label)
+        fm.write(b'bookmark', b'%s', bmark, label=label)
+        pad = b" " * (25 - encoding.colwidth(bmark))
+        fm.condwrite(
+            not ui.quiet,
+            b'rev node',
+            pad + b' %d:%s',
+            repo.changelog.rev(n),
+            hexfn(n),
+            label=label,
+        )
         fm.data(active=(activebookmarklabel in label))
-        fm.plain('\n')
+        fm.plain(b'\n')
+
 
 def printbookmarks(ui, repo, fm, names=None):
     """print bookmarks by the given formatter
@@ -938,23 +1025,22 @@
     """
     marks = repo._bookmarks
     bmarks = {}
-    for bmark in (names or marks):
+    for bmark in names or marks:
         if bmark not in marks:
-            raise error.Abort(_("bookmark '%s' does not exist") % bmark)
+            raise error.Abort(_(b"bookmark '%s' does not exist") % bmark)
         active = repo._activebookmark
         if bmark == active:
-            prefix, label = '*', activebookmarklabel
+            prefix, label = b'*', activebookmarklabel
         else:
-            prefix, label = ' ', ''
+            prefix, label = b' ', b''
 
         bmarks[bmark] = (marks[bmark], prefix, label)
     _printbookmarks(ui, repo, fm, bmarks)
 
+
 def preparehookargs(name, old, new):
     if new is None:
-        new = ''
+        new = b''
     if old is None:
-        old = ''
-    return {'bookmark': name,
-            'node': hex(new),
-            'oldnode': hex(old)}
+        old = b''
+    return {b'bookmark': name, b'node': hex(new), b'oldnode': hex(old)}
--- a/mercurial/branchmap.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/branchmap.py	Mon Oct 21 11:09:48 2019 -0400
@@ -27,7 +27,7 @@
     stringutil,
 )
 
-subsettable = repoviewutil. subsettable
+subsettable = repoviewutil.subsettable
 
 calcsize = struct.calcsize
 pack_into = struct.pack_into
@@ -36,6 +36,7 @@
 
 class BranchMapCache(object):
     """mapping of filtered views of repo with their branchcache"""
+
     def __init__(self):
         self._per_filter = {}
 
@@ -90,7 +91,7 @@
         clbranchinfo = cl.branchinfo
         rbheads = []
         closed = []
-        for bheads in remotebranchmap.itervalues():
+        for bheads in pycompat.itervalues(remotebranchmap):
             rbheads += bheads
             for h in bheads:
                 r = clrev(h)
@@ -101,12 +102,15 @@
         if rbheads:
             rtiprev = max((int(clrev(node)) for node in rbheads))
             cache = branchcache(
-                remotebranchmap, repo[rtiprev].node(), rtiprev,
-                closednodes=closed)
+                remotebranchmap,
+                repo[rtiprev].node(),
+                rtiprev,
+                closednodes=closed,
+            )
 
             # Try to stick it as low as possible
             # filter above served are unlikely to be fetch from a clone
-            for candidate in ('base', 'immutable', 'served'):
+            for candidate in (b'base', b'immutable', b'served'):
                 rview = repo.filtered(candidate)
                 if cache.validfor(rview):
                     self._per_filter[candidate] = cache
@@ -116,16 +120,19 @@
     def clear(self):
         self._per_filter.clear()
 
+
 def _unknownnode(node):
     """ raises ValueError when branchcache found a node which does not exists
     """
     raise ValueError(r'node %s does not exist' % pycompat.sysstr(hex(node)))
 
+
 def _branchcachedesc(repo):
     if repo.filtername is not None:
-        return 'branch cache (%s)' % repo.filtername
+        return b'branch cache (%s)' % repo.filtername
     else:
-        return 'branch cache'
+        return b'branch cache'
+
 
 class branchcache(object):
     """A dict like object that hold branches heads cache.
@@ -149,8 +156,15 @@
     branch head closes a branch or not.
     """
 
-    def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
-                 filteredhash=None, closednodes=None, hasnode=None):
+    def __init__(
+        self,
+        entries=(),
+        tipnode=nullid,
+        tiprev=nullrev,
+        filteredhash=None,
+        closednodes=None,
+        hasnode=None,
+    ):
         """ hasnode is a function which can be used to verify whether changelog
         has a given node or not. If it's not provided, we assume that every node
         we have exists in changelog """
@@ -214,7 +228,7 @@
         return key in self._entries
 
     def iteritems(self):
-        for k, v in self._entries.iteritems():
+        for k, v in pycompat.iteritems(self._entries):
             self._verifybranch(k)
             yield k, v
 
@@ -231,15 +245,19 @@
         try:
             f = repo.cachevfs(cls._filename(repo))
             lineiter = iter(f)
-            cachekey = next(lineiter).rstrip('\n').split(" ", 2)
+            cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
             last, lrev = cachekey[:2]
             last, lrev = bin(last), int(lrev)
             filteredhash = None
             hasnode = repo.changelog.hasnode
             if len(cachekey) > 2:
                 filteredhash = bin(cachekey[2])
-            bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash,
-                         hasnode=hasnode)
+            bcache = cls(
+                tipnode=last,
+                tiprev=lrev,
+                filteredhash=filteredhash,
+                hasnode=hasnode,
+            )
             if not bcache.validfor(repo):
                 # invalidate the cache
                 raise ValueError(r'tip differs')
@@ -249,9 +267,10 @@
 
         except Exception as inst:
             if repo.ui.debugflag:
-                msg = 'invalid %s: %s\n'
-                repo.ui.debug(msg % (_branchcachedesc(repo),
-                                     pycompat.bytestr(inst)))
+                msg = b'invalid %s: %s\n'
+                repo.ui.debug(
+                    msg % (_branchcachedesc(repo), pycompat.bytestr(inst))
+                )
             bcache = None
 
         finally:
@@ -264,24 +283,24 @@
         """ fully loads the branchcache by reading from the file using the line
         iterator passed"""
         for line in lineiter:
-            line = line.rstrip('\n')
+            line = line.rstrip(b'\n')
             if not line:
                 continue
-            node, state, label = line.split(" ", 2)
-            if state not in 'oc':
+            node, state, label = line.split(b" ", 2)
+            if state not in b'oc':
                 raise ValueError(r'invalid branch state')
             label = encoding.tolocal(label.strip())
             node = bin(node)
             self._entries.setdefault(label, []).append(node)
-            if state == 'c':
+            if state == b'c':
                 self._closednodes.add(node)
 
     @staticmethod
     def _filename(repo):
         """name of a branchcache file for a given repo or repoview"""
-        filename = "branch2"
+        filename = b"branch2"
         if repo.filtername:
-            filename = '%s-%s' % (filename, repo.filtername)
+            filename = b'%s-%s' % (filename, repo.filtername)
         return filename
 
     def validfor(self, repo):
@@ -290,9 +309,9 @@
         - False when cached tipnode is unknown or if we detect a strip.
         - True when cache is up to date or a subset of current repo."""
         try:
-            return ((self.tipnode == repo.changelog.node(self.tiprev))
-                    and (self.filteredhash ==
-                         scmutil.filteredhash(repo, self.tiprev)))
+            return (self.tipnode == repo.changelog.node(self.tiprev)) and (
+                self.filteredhash == scmutil.filteredhash(repo, self.tiprev)
+            )
         except IndexError:
             return False
 
@@ -325,44 +344,55 @@
         return heads
 
     def iterbranches(self):
-        for bn, heads in self.iteritems():
+        for bn, heads in pycompat.iteritems(self):
             yield (bn, heads) + self._branchtip(heads)
 
     def iterheads(self):
         """ returns all the heads """
         self._verifyall()
-        return self._entries.itervalues()
+        return pycompat.itervalues(self._entries)
 
     def copy(self):
         """return an deep copy of the branchcache object"""
         return type(self)(
-            self._entries, self.tipnode, self.tiprev, self.filteredhash,
-            self._closednodes)
+            self._entries,
+            self.tipnode,
+            self.tiprev,
+            self.filteredhash,
+            self._closednodes,
+        )
 
     def write(self, repo):
         try:
-            f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
-            cachekey = [hex(self.tipnode), '%d' % self.tiprev]
+            f = repo.cachevfs(self._filename(repo), b"w", atomictemp=True)
+            cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
             if self.filteredhash is not None:
                 cachekey.append(hex(self.filteredhash))
-            f.write(" ".join(cachekey) + '\n')
+            f.write(b" ".join(cachekey) + b'\n')
             nodecount = 0
-            for label, nodes in sorted(self._entries.iteritems()):
+            for label, nodes in sorted(pycompat.iteritems(self._entries)):
                 label = encoding.fromlocal(label)
                 for node in nodes:
                     nodecount += 1
                     if node in self._closednodes:
-                        state = 'c'
+                        state = b'c'
                     else:
-                        state = 'o'
-                    f.write("%s %s %s\n" % (hex(node), state, label))
+                        state = b'o'
+                    f.write(b"%s %s %s\n" % (hex(node), state, label))
             f.close()
-            repo.ui.log('branchcache', 'wrote %s with %d labels and %d nodes\n',
-                        _branchcachedesc(repo), len(self._entries), nodecount)
+            repo.ui.log(
+                b'branchcache',
+                b'wrote %s with %d labels and %d nodes\n',
+                _branchcachedesc(repo),
+                len(self._entries),
+                nodecount,
+            )
         except (IOError, OSError, error.Abort) as inst:
             # Abort may be raised by read only opener, so log and continue
-            repo.ui.debug("couldn't write branch cache: %s\n" %
-                          stringutil.forcebytestr(inst))
+            repo.ui.debug(
+                b"couldn't write branch cache: %s\n"
+                % stringutil.forcebytestr(inst)
+            )
 
     def update(self, repo, revgen):
         """Given a branchhead cache, self, that may have extra nodes or be
@@ -390,7 +420,7 @@
         # if older branchheads are reachable from new ones, they aren't
         # really branchheads. Note checking parents is insufficient:
         # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
-        for branch, newheadrevs in newbranches.iteritems():
+        for branch, newheadrevs in pycompat.iteritems(newbranches):
             bheads = self._entries.setdefault(branch, [])
             bheadset = set(cl.rev(node) for node in bheads)
 
@@ -429,30 +459,36 @@
         self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
 
         duration = util.timer() - starttime
-        repo.ui.log('branchcache', 'updated %s in %.4f seconds\n',
-                    _branchcachedesc(repo), duration)
+        repo.ui.log(
+            b'branchcache',
+            b'updated %s in %.4f seconds\n',
+            _branchcachedesc(repo),
+            duration,
+        )
 
         self.write(repo)
 
 
 class remotebranchcache(branchcache):
     """Branchmap info for a remote connection, should not write locally"""
+
     def write(self, repo):
         pass
 
 
 # Revision branch info cache
 
-_rbcversion = '-v1'
-_rbcnames = 'rbc-names' + _rbcversion
-_rbcrevs = 'rbc-revs' + _rbcversion
+_rbcversion = b'-v1'
+_rbcnames = b'rbc-names' + _rbcversion
+_rbcrevs = b'rbc-revs' + _rbcversion
 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
-_rbcrecfmt = '>4sI'
+_rbcrecfmt = b'>4sI'
 _rbcrecsize = calcsize(_rbcrecfmt)
 _rbcnodelen = 4
-_rbcbranchidxmask = 0x7fffffff
+_rbcbranchidxmask = 0x7FFFFFFF
 _rbccloseflag = 0x80000000
 
+
 class revbranchcache(object):
     """Persistent cache, mapping from revision number to branch name and close.
     This is a low level cache, independent of filtering.
@@ -479,15 +515,16 @@
     def __init__(self, repo, readonly=True):
         assert repo.filtername is None
         self._repo = repo
-        self._names = [] # branch names in local encoding with static index
+        self._names = []  # branch names in local encoding with static index
         self._rbcrevs = bytearray()
-        self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
+        self._rbcsnameslen = 0  # length of names read at _rbcsnameslen
         try:
             bndata = repo.cachevfs.read(_rbcnames)
-            self._rbcsnameslen = len(bndata) # for verification before writing
+            self._rbcsnameslen = len(bndata)  # for verification before writing
             if bndata:
-                self._names = [encoding.tolocal(bn)
-                               for bn in bndata.split('\0')]
+                self._names = [
+                    encoding.tolocal(bn) for bn in bndata.split(b'\0')
+                ]
         except (IOError, OSError):
             if readonly:
                 # don't try to use cache - fall back to the slow path
@@ -498,15 +535,18 @@
                 data = repo.cachevfs.read(_rbcrevs)
                 self._rbcrevs[:] = data
             except (IOError, OSError) as inst:
-                repo.ui.debug("couldn't read revision branch cache: %s\n" %
-                              stringutil.forcebytestr(inst))
+                repo.ui.debug(
+                    b"couldn't read revision branch cache: %s\n"
+                    % stringutil.forcebytestr(inst)
+                )
         # remember number of good records on disk
-        self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
-                               len(repo.changelog))
+        self._rbcrevslen = min(
+            len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
+        )
         if self._rbcrevslen == 0:
             self._names = []
-        self._rbcnamescount = len(self._names) # number of names read at
-                                               # _rbcsnameslen
+        self._rbcnamescount = len(self._names)  # number of names read at
+        # _rbcsnameslen
 
     def _clear(self):
         self._rbcsnameslen = 0
@@ -514,7 +554,7 @@
         self._rbcnamescount = 0
         self._rbcrevslen = len(self._repo.changelog)
         self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
-        util.clearcachedproperty(self, '_namesreverse')
+        util.clearcachedproperty(self, b'_namesreverse')
 
     @util.propertycache
     def _namesreverse(self):
@@ -537,24 +577,29 @@
         # fast path: extract data from cache, use it if node is matching
         reponode = changelog.node(rev)[:_rbcnodelen]
         cachenode, branchidx = unpack_from(
-            _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
+            _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx
+        )
         close = bool(branchidx & _rbccloseflag)
         if close:
             branchidx &= _rbcbranchidxmask
-        if cachenode == '\0\0\0\0':
+        if cachenode == b'\0\0\0\0':
             pass
         elif cachenode == reponode:
             try:
                 return self._names[branchidx], close
             except IndexError:
                 # recover from invalid reference to unknown branch
-                self._repo.ui.debug("referenced branch names not found"
-                    " - rebuilding revision branch cache from scratch\n")
+                self._repo.ui.debug(
+                    b"referenced branch names not found"
+                    b" - rebuilding revision branch cache from scratch\n"
+                )
                 self._clear()
         else:
             # rev/node map has changed, invalidate the cache from here up
-            self._repo.ui.debug("history modification detected - truncating "
-                "revision branch cache to revision %d\n" % rev)
+            self._repo.ui.debug(
+                b"history modification detected - truncating "
+                b"revision branch cache to revision %d\n" % rev
+            )
             truncate = rbcrevidx + _rbcrecsize
             del self._rbcrevs[truncate:]
             self._rbcrevslen = min(self._rbcrevslen, truncate)
@@ -604,39 +649,42 @@
             return
         rbcrevidx = rev * _rbcrecsize
         if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
-            self._rbcrevs.extend('\0' *
-                                 (len(self._repo.changelog) * _rbcrecsize -
-                                  len(self._rbcrevs)))
+            self._rbcrevs.extend(
+                b'\0'
+                * (len(self._repo.changelog) * _rbcrecsize - len(self._rbcrevs))
+            )
         pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
         self._rbcrevslen = min(self._rbcrevslen, rev)
 
         tr = self._repo.currenttransaction()
         if tr:
-            tr.addfinalize('write-revbranchcache', self.write)
+            tr.addfinalize(b'write-revbranchcache', self.write)
 
     def write(self, tr=None):
         """Save branch cache if it is dirty."""
         repo = self._repo
         wlock = None
-        step = ''
+        step = b''
         try:
             # write the new names
             if self._rbcnamescount < len(self._names):
                 wlock = repo.wlock(wait=False)
-                step = ' names'
+                step = b' names'
                 self._writenames(repo)
 
             # write the new revs
             start = self._rbcrevslen * _rbcrecsize
             if start != len(self._rbcrevs):
-                step = ''
+                step = b''
                 if wlock is None:
                     wlock = repo.wlock(wait=False)
                 self._writerevs(repo, start)
 
         except (IOError, OSError, error.Abort, error.LockError) as inst:
-            repo.ui.debug("couldn't write revision branch cache%s: %s\n"
-                          % (step, stringutil.forcebytestr(inst)))
+            repo.ui.debug(
+                b"couldn't write revision branch cache%s: %s\n"
+                % (step, stringutil.forcebytestr(inst))
+            )
         finally:
             if wlock is not None:
                 wlock.release()
@@ -644,20 +692,24 @@
     def _writenames(self, repo):
         """ write the new branch names to revbranchcache """
         if self._rbcnamescount != 0:
-            f = repo.cachevfs.open(_rbcnames, 'ab')
+            f = repo.cachevfs.open(_rbcnames, b'ab')
             if f.tell() == self._rbcsnameslen:
-                f.write('\0')
+                f.write(b'\0')
             else:
                 f.close()
-                repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
+                repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames)
                 self._rbcnamescount = 0
                 self._rbcrevslen = 0
         if self._rbcnamescount == 0:
             # before rewriting names, make sure references are removed
             repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
-            f = repo.cachevfs.open(_rbcnames, 'wb')
-        f.write('\0'.join(encoding.fromlocal(b)
-                          for b in self._names[self._rbcnamescount:]))
+            f = repo.cachevfs.open(_rbcnames, b'wb')
+        f.write(
+            b'\0'.join(
+                encoding.fromlocal(b)
+                for b in self._names[self._rbcnamescount :]
+            )
+        )
         self._rbcsnameslen = f.tell()
         f.close()
         self._rbcnamescount = len(self._names)
@@ -665,9 +717,11 @@
     def _writerevs(self, repo, start):
         """ write the new revs to revbranchcache """
         revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
-        with repo.cachevfs.open(_rbcrevs, 'ab') as f:
+        with repo.cachevfs.open(_rbcrevs, b'ab') as f:
             if f.tell() != start:
-                repo.ui.debug("truncating cache/%s to %d\n" % (_rbcrevs, start))
+                repo.ui.debug(
+                    b"truncating cache/%s to %d\n" % (_rbcrevs, start)
+                )
                 f.seek(start)
                 if f.tell() != start:
                     start = 0
--- a/mercurial/bundle2.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/bundle2.py	Mon Oct 21 11:09:48 2019 -0400
@@ -171,9 +171,7 @@
     url,
     util,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
 
 urlerr = util.urlerr
 urlreq = util.urlreq
@@ -181,42 +179,48 @@
 _pack = struct.pack
 _unpack = struct.unpack
 
-_fstreamparamsize = '>i'
-_fpartheadersize = '>i'
-_fparttypesize = '>B'
-_fpartid = '>I'
-_fpayloadsize = '>i'
-_fpartparamcount = '>BB'
+_fstreamparamsize = b'>i'
+_fpartheadersize = b'>i'
+_fparttypesize = b'>B'
+_fpartid = b'>I'
+_fpayloadsize = b'>i'
+_fpartparamcount = b'>BB'
 
 preferedchunksize = 32768
 
-_parttypeforbidden = re.compile('[^a-zA-Z0-9_:-]')
+_parttypeforbidden = re.compile(b'[^a-zA-Z0-9_:-]')
+
 
 def outdebug(ui, message):
     """debug regarding output stream (bundling)"""
-    if ui.configbool('devel', 'bundle2.debug'):
-        ui.debug('bundle2-output: %s\n' % message)
+    if ui.configbool(b'devel', b'bundle2.debug'):
+        ui.debug(b'bundle2-output: %s\n' % message)
+
 
 def indebug(ui, message):
     """debug on input stream (unbundling)"""
-    if ui.configbool('devel', 'bundle2.debug'):
-        ui.debug('bundle2-input: %s\n' % message)
+    if ui.configbool(b'devel', b'bundle2.debug'):
+        ui.debug(b'bundle2-input: %s\n' % message)
+
 
 def validateparttype(parttype):
     """raise ValueError if a parttype contains invalid character"""
     if _parttypeforbidden.search(parttype):
         raise ValueError(parttype)
 
+
 def _makefpartparamsizes(nbparams):
     """return a struct format to read part parameter sizes
 
     The number parameters is variable so we need to build that format
     dynamically.
     """
-    return '>'+('BB'*nbparams)
+    return b'>' + (b'BB' * nbparams)
+
 
 parthandlermapping = {}
 
+
 def parthandler(parttype, params=()):
     """decorator that register a function as a bundle2 part handler
 
@@ -228,14 +232,17 @@
             ...
     """
     validateparttype(parttype)
+
     def _decorator(func):
-        lparttype = parttype.lower() # enforce lower case matching.
+        lparttype = parttype.lower()  # enforce lower case matching.
         assert lparttype not in parthandlermapping
         parthandlermapping[lparttype] = func
         func.params = frozenset(params)
         return func
+
     return _decorator
 
+
 class unbundlerecords(object):
     """keep record of what happens during and unbundle
 
@@ -283,6 +290,7 @@
 
     __bool__ = __nonzero__
 
+
 class bundleoperation(object):
     """an object that represents a single bundling process
 
@@ -299,7 +307,7 @@
     * a way to construct a bundle response when applicable.
     """
 
-    def __init__(self, repo, transactiongetter, captureoutput=True, source=''):
+    def __init__(self, repo, transactiongetter, captureoutput=True, source=b''):
         self.repo = repo
         self.ui = repo.ui
         self.records = unbundlerecords()
@@ -328,13 +336,17 @@
 
     def addhookargs(self, hookargs):
         if self.hookargs is None:
-            raise error.ProgrammingError('attempted to add hookargs to '
-                                         'operation after transaction started')
+            raise error.ProgrammingError(
+                b'attempted to add hookargs to '
+                b'operation after transaction started'
+            )
         self.hookargs.update(hookargs)
 
+
 class TransactionUnavailable(RuntimeError):
     pass
 
+
 def _notransaction():
     """default method to get a transaction while processing a bundle
 
@@ -342,14 +354,15 @@
     to be created"""
     raise TransactionUnavailable()
 
+
 def applybundle(repo, unbundler, tr, source, url=None, **kwargs):
     # transform me into unbundler.apply() as soon as the freeze is lifted
     if isinstance(unbundler, unbundle20):
-        tr.hookargs['bundle2'] = '1'
-        if source is not None and 'source' not in tr.hookargs:
-            tr.hookargs['source'] = source
-        if url is not None and 'url' not in tr.hookargs:
-            tr.hookargs['url'] = url
+        tr.hookargs[b'bundle2'] = b'1'
+        if source is not None and b'source' not in tr.hookargs:
+            tr.hookargs[b'source'] = source
+        if url is not None and b'url' not in tr.hookargs:
+            tr.hookargs[b'url'] = url
         return processbundle(repo, unbundler, lambda: tr, source=source)
     else:
         # the transactiongetter won't be used, but we might as well set it
@@ -357,6 +370,7 @@
         _processchangegroup(op, unbundler, tr, source, url, **kwargs)
         return op
 
+
 class partiterator(object):
     def __init__(self, repo, op, unbundler):
         self.repo = repo
@@ -368,13 +382,14 @@
 
     def __enter__(self):
         def func():
-            itr = enumerate(self.unbundler.iterparts())
+            itr = enumerate(self.unbundler.iterparts(), 1)
             for count, p in itr:
                 self.count = count
                 self.current = p
                 yield p
                 p.consume()
                 self.current = None
+
         self.iterator = func()
         return self.iterator
 
@@ -422,10 +437,12 @@
             if seekerror:
                 raise exc
 
-        self.repo.ui.debug('bundle2-input-bundle: %i parts total\n' %
-                           self.count)
-
-def processbundle(repo, unbundler, transactiongetter=None, op=None, source=''):
+        self.repo.ui.debug(
+            b'bundle2-input-bundle: %i parts total\n' % self.count
+        )
+
+
+def processbundle(repo, unbundler, transactiongetter=None, op=None, source=b''):
     """This function process a bundle, apply effect to/from a repo
 
     It iterates over each part then searches for and uses the proper handling
@@ -447,72 +464,75 @@
     # - exception catching
     unbundler.params
     if repo.ui.debugflag:
-        msg = ['bundle2-input-bundle:']
+        msg = [b'bundle2-input-bundle:']
         if unbundler.params:
-            msg.append(' %i params' % len(unbundler.params))
+            msg.append(b' %i params' % len(unbundler.params))
         if op._gettransaction is None or op._gettransaction is _notransaction:
-            msg.append(' no-transaction')
+            msg.append(b' no-transaction')
         else:
-            msg.append(' with-transaction')
-        msg.append('\n')
-        repo.ui.debug(''.join(msg))
+            msg.append(b' with-transaction')
+        msg.append(b'\n')
+        repo.ui.debug(b''.join(msg))
 
     processparts(repo, op, unbundler)
 
     return op
 
+
 def processparts(repo, op, unbundler):
     with partiterator(repo, op, unbundler) as parts:
         for part in parts:
             _processpart(op, part)
 
+
 def _processchangegroup(op, cg, tr, source, url, **kwargs):
     ret = cg.apply(op.repo, tr, source, url, **kwargs)
-    op.records.add('changegroup', {
-        'return': ret,
-    })
+    op.records.add(b'changegroup', {b'return': ret,})
     return ret
 
+
 def _gethandler(op, part):
-    status = 'unknown' # used by debug output
+    status = b'unknown'  # used by debug output
     try:
         handler = parthandlermapping.get(part.type)
         if handler is None:
-            status = 'unsupported-type'
+            status = b'unsupported-type'
             raise error.BundleUnknownFeatureError(parttype=part.type)
-        indebug(op.ui, 'found a handler for part %s' % part.type)
+        indebug(op.ui, b'found a handler for part %s' % part.type)
         unknownparams = part.mandatorykeys - handler.params
         if unknownparams:
             unknownparams = list(unknownparams)
             unknownparams.sort()
-            status = 'unsupported-params (%s)' % ', '.join(unknownparams)
-            raise error.BundleUnknownFeatureError(parttype=part.type,
-                                                  params=unknownparams)
-        status = 'supported'
+            status = b'unsupported-params (%s)' % b', '.join(unknownparams)
+            raise error.BundleUnknownFeatureError(
+                parttype=part.type, params=unknownparams
+            )
+        status = b'supported'
     except error.BundleUnknownFeatureError as exc:
-        if part.mandatory: # mandatory parts
+        if part.mandatory:  # mandatory parts
             raise
-        indebug(op.ui, 'ignoring unsupported advisory part %s' % exc)
-        return # skip to part processing
+        indebug(op.ui, b'ignoring unsupported advisory part %s' % exc)
+        return  # skip to part processing
     finally:
         if op.ui.debugflag:
-            msg = ['bundle2-input-part: "%s"' % part.type]
+            msg = [b'bundle2-input-part: "%s"' % part.type]
             if not part.mandatory:
-                msg.append(' (advisory)')
+                msg.append(b' (advisory)')
             nbmp = len(part.mandatorykeys)
             nbap = len(part.params) - nbmp
             if nbmp or nbap:
-                msg.append(' (params:')
+                msg.append(b' (params:')
                 if nbmp:
-                    msg.append(' %i mandatory' % nbmp)
+                    msg.append(b' %i mandatory' % nbmp)
                 if nbap:
-                    msg.append(' %i advisory' % nbmp)
-                msg.append(')')
-            msg.append(' %s\n' % status)
-            op.ui.debug(''.join(msg))
+                    msg.append(b' %i advisory' % nbmp)
+                msg.append(b')')
+            msg.append(b' %s\n' % status)
+            op.ui.debug(b''.join(msg))
 
     return handler
 
+
 def _processpart(op, part):
     """process a single part from a bundle
 
@@ -529,17 +549,18 @@
     output = None
     if op.captureoutput and op.reply is not None:
         op.ui.pushbuffer(error=True, subproc=True)
-        output = ''
+        output = b''
     try:
         handler(op, part)
     finally:
         if output is not None:
             output = op.ui.popbuffer()
         if output:
-            outpart = op.reply.newpart('output', data=output,
-                                       mandatory=False)
+            outpart = op.reply.newpart(b'output', data=output, mandatory=False)
             outpart.addparam(
-                'in-reply-to', pycompat.bytestr(part.id), mandatory=False)
+                b'in-reply-to', pycompat.bytestr(part.id), mandatory=False
+            )
+
 
 def decodecaps(blob):
     """decode a bundle2 caps bytes blob into a dictionary
@@ -554,16 +575,17 @@
     for line in blob.splitlines():
         if not line:
             continue
-        if '=' not in line:
+        if b'=' not in line:
             key, vals = line, ()
         else:
-            key, vals = line.split('=', 1)
-            vals = vals.split(',')
+            key, vals = line.split(b'=', 1)
+            vals = vals.split(b',')
         key = urlreq.unquote(key)
         vals = [urlreq.unquote(v) for v in vals]
         caps[key] = vals
     return caps
 
+
 def encodecaps(caps):
     """encode a bundle2 caps dictionary into a bytes blob"""
     chunks = []
@@ -572,22 +594,24 @@
         ca = urlreq.quote(ca)
         vals = [urlreq.quote(v) for v in vals]
         if vals:
-            ca = "%s=%s" % (ca, ','.join(vals))
+            ca = b"%s=%s" % (ca, b','.join(vals))
         chunks.append(ca)
-    return '\n'.join(chunks)
+    return b'\n'.join(chunks)
+
 
 bundletypes = {
-    "": ("", 'UN'),       # only when using unbundle on ssh and old http servers
-                          # since the unification ssh accepts a header but there
-                          # is no capability signaling it.
-    "HG20": (), # special-cased below
-    "HG10UN": ("HG10UN", 'UN'),
-    "HG10BZ": ("HG10", 'BZ'),
-    "HG10GZ": ("HG10GZ", 'GZ'),
+    b"": (b"", b'UN'),  # only when using unbundle on ssh and old http servers
+    # since the unification ssh accepts a header but there
+    # is no capability signaling it.
+    b"HG20": (),  # special-cased below
+    b"HG10UN": (b"HG10UN", b'UN'),
+    b"HG10BZ": (b"HG10", b'BZ'),
+    b"HG10GZ": (b"HG10GZ", b'GZ'),
 }
 
 # hgweb uses this list to communicate its preferred type
-bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
+bundlepriority = [b'HG10GZ', b'HG10BZ', b'HG10UN']
+
 
 class bundle20(object):
     """represent an outgoing bundle2 container
@@ -596,14 +620,14 @@
     populate it. Then call `getchunks` to retrieve all the binary chunks of
     data that compose the bundle2 container."""
 
-    _magicstring = 'HG20'
+    _magicstring = b'HG20'
 
     def __init__(self, ui, capabilities=()):
         self.ui = ui
         self._params = []
         self._parts = []
         self.capabilities = dict(capabilities)
-        self._compengine = util.compengines.forbundletype('UN')
+        self._compengine = util.compengines.forbundletype(b'UN')
         self._compopts = None
         # If compression is being handled by a consumer of the raw
         # data (e.g. the wire protocol), unsetting this flag tells
@@ -612,10 +636,10 @@
 
     def setcompression(self, alg, compopts=None):
         """setup core part compression to <alg>"""
-        if alg in (None, 'UN'):
+        if alg in (None, b'UN'):
             return
-        assert not any(n.lower() == 'compression' for n, v in self._params)
-        self.addparam('Compression', alg)
+        assert not any(n.lower() == b'compression' for n, v in self._params)
+        self.addparam(b'Compression', alg)
         self._compengine = util.compengines.forbundletype(alg)
         self._compopts = compopts
 
@@ -630,8 +654,9 @@
         if not name:
             raise error.ProgrammingError(b'empty parameter name')
         if name[0:1] not in pycompat.bytestr(string.ascii_letters):
-            raise error.ProgrammingError(b'non letter first character: %s'
-                                         % name)
+            raise error.ProgrammingError(
+                b'non letter first character: %s' % name
+            )
         self._params.append((name, value))
 
     def addpart(self, part):
@@ -639,7 +664,7 @@
 
         Parts contains the actual applicative payload."""
         assert part.id is None
-        part.id = len(self._parts) # very cheap counter
+        part.id = len(self._parts)  # very cheap counter
         self._parts.append(part)
 
     def newpart(self, typeid, *args, **kwargs):
@@ -658,20 +683,21 @@
     # methods used to generate the bundle2 stream
     def getchunks(self):
         if self.ui.debugflag:
-            msg = ['bundle2-output-bundle: "%s",' % self._magicstring]
+            msg = [b'bundle2-output-bundle: "%s",' % self._magicstring]
             if self._params:
-                msg.append(' (%i params)' % len(self._params))
-            msg.append(' %i parts total\n' % len(self._parts))
-            self.ui.debug(''.join(msg))
-        outdebug(self.ui, 'start emission of %s stream' % self._magicstring)
+                msg.append(b' (%i params)' % len(self._params))
+            msg.append(b' %i parts total\n' % len(self._parts))
+            self.ui.debug(b''.join(msg))
+        outdebug(self.ui, b'start emission of %s stream' % self._magicstring)
         yield self._magicstring
         param = self._paramchunk()
-        outdebug(self.ui, 'bundle parameter: %s' % param)
+        outdebug(self.ui, b'bundle parameter: %s' % param)
         yield _pack(_fstreamparamsize, len(param))
         if param:
             yield param
-        for chunk in self._compengine.compressstream(self._getcorechunk(),
-                                                     self._compopts):
+        for chunk in self._compengine.compressstream(
+            self._getcorechunk(), self._compopts
+        ):
             yield chunk
 
     def _paramchunk(self):
@@ -681,23 +707,22 @@
             par = urlreq.quote(par)
             if value is not None:
                 value = urlreq.quote(value)
-                par = '%s=%s' % (par, value)
+                par = b'%s=%s' % (par, value)
             blocks.append(par)
-        return ' '.join(blocks)
+        return b' '.join(blocks)
 
     def _getcorechunk(self):
         """yield chunk for the core part of the bundle
 
         (all but headers and parameters)"""
-        outdebug(self.ui, 'start of parts')
+        outdebug(self.ui, b'start of parts')
         for part in self._parts:
-            outdebug(self.ui, 'bundle part: "%s"' % part.type)
+            outdebug(self.ui, b'bundle part: "%s"' % part.type)
             for chunk in part.getchunks(ui=self.ui):
                 yield chunk
-        outdebug(self.ui, 'end of bundle')
+        outdebug(self.ui, b'end of bundle')
         yield _pack(_fpartheadersize, 0)
 
-
     def salvageoutput(self):
         """return a list with a copy of all output parts in the bundle
 
@@ -705,7 +730,7 @@
         server output"""
         salvaged = []
         for part in self._parts:
-            if part.type.startswith('output'):
+            if part.type.startswith(b'output'):
                 salvaged.append(part.copy())
         return salvaged
 
@@ -737,47 +762,51 @@
         Do not use it to implement higher-level logic or methods."""
         return changegroup.readexactly(self._fp, size)
 
+
 def getunbundler(ui, fp, magicstring=None):
     """return a valid unbundler object for a given magicstring"""
     if magicstring is None:
         magicstring = changegroup.readexactly(fp, 4)
     magic, version = magicstring[0:2], magicstring[2:4]
-    if magic != 'HG':
+    if magic != b'HG':
         ui.debug(
-            "error: invalid magic: %r (version %r), should be 'HG'\n"
-            % (magic, version))
-        raise error.Abort(_('not a Mercurial bundle'))
+            b"error: invalid magic: %r (version %r), should be 'HG'\n"
+            % (magic, version)
+        )
+        raise error.Abort(_(b'not a Mercurial bundle'))
     unbundlerclass = formatmap.get(version)
     if unbundlerclass is None:
-        raise error.Abort(_('unknown bundle version %s') % version)
+        raise error.Abort(_(b'unknown bundle version %s') % version)
     unbundler = unbundlerclass(ui, fp)
-    indebug(ui, 'start processing of %s stream' % magicstring)
+    indebug(ui, b'start processing of %s stream' % magicstring)
     return unbundler
 
+
 class unbundle20(unpackermixin):
     """interpret a bundle2 stream
 
     This class is fed with a binary stream and yields parts through its
     `iterparts` methods."""
 
-    _magicstring = 'HG20'
+    _magicstring = b'HG20'
 
     def __init__(self, ui, fp):
         """If header is specified, we do not read it out of the stream."""
         self.ui = ui
-        self._compengine = util.compengines.forbundletype('UN')
+        self._compengine = util.compengines.forbundletype(b'UN')
         self._compressed = None
         super(unbundle20, self).__init__(fp)
 
     @util.propertycache
     def params(self):
         """dictionary of stream level parameters"""
-        indebug(self.ui, 'reading bundle2 stream parameters')
+        indebug(self.ui, b'reading bundle2 stream parameters')
         params = {}
         paramssize = self._unpack(_fstreamparamsize)[0]
         if paramssize < 0:
-            raise error.BundleValueError('negative bundle param size: %i'
-                                         % paramssize)
+            raise error.BundleValueError(
+                b'negative bundle param size: %i' % paramssize
+            )
         if paramssize:
             params = self._readexact(paramssize)
             params = self._processallparams(params)
@@ -786,8 +815,8 @@
     def _processallparams(self, paramsblock):
         """"""
         params = util.sortdict()
-        for p in paramsblock.split(' '):
-            p = p.split('=', 1)
+        for p in paramsblock.split(b' '):
+            p = p.split(b'=', 1)
             p = [urlreq.unquote(i) for i in p]
             if len(p) < 2:
                 p.append(None)
@@ -795,7 +824,6 @@
             params[p[0]] = p[1]
         return params
 
-
     def _processparam(self, name, value):
         """process a parameter, applying its effect if needed
 
@@ -814,7 +842,7 @@
             handler = b2streamparamsmap[name.lower()]
         except KeyError:
             if name[0:1].islower():
-                indebug(self.ui, "ignoring unknown parameter %s" % name)
+                indebug(self.ui, b"ignoring unknown parameter %s" % name)
             else:
                 raise error.BundleUnknownFeatureError(params=(name,))
         else:
@@ -829,22 +857,23 @@
         needed to move forward to get general delta enabled.
         """
         yield self._magicstring
-        assert 'params' not in vars(self)
+        assert b'params' not in vars(self)
         paramssize = self._unpack(_fstreamparamsize)[0]
         if paramssize < 0:
-            raise error.BundleValueError('negative bundle param size: %i'
-                                         % paramssize)
+            raise error.BundleValueError(
+                b'negative bundle param size: %i' % paramssize
+            )
         if paramssize:
             params = self._readexact(paramssize)
             self._processallparams(params)
             # The payload itself is decompressed below, so drop
             # the compression parameter passed down to compensate.
             outparams = []
-            for p in params.split(' '):
-                k, v = p.split('=', 1)
-                if k.lower() != 'compression':
+            for p in params.split(b' '):
+                k, v = p.split(b'=', 1)
+                if k.lower() != b'compression':
                     outparams.append(p)
-            outparams = ' '.join(outparams)
+            outparams = b' '.join(outparams)
             yield _pack(_fstreamparamsize, len(outparams))
             yield outparams
         else:
@@ -865,10 +894,9 @@
             if size == flaginterrupt:
                 continue
             elif size < 0:
-                raise error.BundleValueError('negative chunk size: %i')
+                raise error.BundleValueError(b'negative chunk size: %i')
             yield self._readexact(size)
 
-
     def iterparts(self, seekable=False):
         """yield all parts contained in the stream"""
         cls = seekableunbundlepart if seekable else unbundlepart
@@ -876,7 +904,7 @@
         self.params
         # From there, payload need to be decompressed
         self._fp = self._compengine.decompressorreader(self._fp)
-        indebug(self.ui, 'start extraction of bundle2 parts')
+        indebug(self.ui, b'start extraction of bundle2 parts')
         headerblock = self._readpartheader()
         while headerblock is not None:
             part = cls(self.ui, headerblock, self._fp)
@@ -886,7 +914,7 @@
             part.consume()
 
             headerblock = self._readpartheader()
-        indebug(self.ui, 'end of bundle2 stream')
+        indebug(self.ui, b'end of bundle2 stream')
 
     def _readpartheader(self):
         """reads a part header size and return the bytes blob
@@ -894,15 +922,16 @@
         returns None if empty"""
         headersize = self._unpack(_fpartheadersize)[0]
         if headersize < 0:
-            raise error.BundleValueError('negative part header size: %i'
-                                         % headersize)
-        indebug(self.ui, 'part header size: %i' % headersize)
+            raise error.BundleValueError(
+                b'negative part header size: %i' % headersize
+            )
+        indebug(self.ui, b'part header size: %i' % headersize)
         if headersize:
             return self._readexact(headersize)
         return None
 
     def compressed(self):
-        self.params # load params
+        self.params  # load params
         return self._compressed
 
     def close(self):
@@ -910,28 +939,33 @@
         if util.safehasattr(self._fp, 'close'):
             return self._fp.close()
 
-formatmap = {'20': unbundle20}
+
+formatmap = {b'20': unbundle20}
 
 b2streamparamsmap = {}
 
+
 def b2streamparamhandler(name):
     """register a handler for a stream level parameter"""
+
     def decorator(func):
         assert name not in formatmap
         b2streamparamsmap[name] = func
         return func
+
     return decorator
 
-@b2streamparamhandler('compression')
+
+@b2streamparamhandler(b'compression')
 def processcompression(unbundler, param, value):
     """read compression parameter and install payload decompression"""
     if value not in util.compengines.supportedbundletypes:
-        raise error.BundleUnknownFeatureError(params=(param,),
-                                              values=(value,))
+        raise error.BundleUnknownFeatureError(params=(param,), values=(value,))
     unbundler._compengine = util.compengines.forbundletype(value)
     if value is not None:
         unbundler._compressed = True
 
+
 class bundlepart(object):
     """A bundle2 part contains application level payload
 
@@ -948,8 +982,14 @@
     Both data and parameters cannot be modified after the generation has begun.
     """
 
-    def __init__(self, parttype, mandatoryparams=(), advisoryparams=(),
-                 data='', mandatory=True):
+    def __init__(
+        self,
+        parttype,
+        mandatoryparams=(),
+        advisoryparams=(),
+        data=b'',
+        mandatory=True,
+    ):
         validateparttype(parttype)
         self.id = None
         self.type = parttype
@@ -960,7 +1000,7 @@
         self._seenparams = set()
         for pname, __ in self._mandatoryparams + self._advisoryparams:
             if pname in self._seenparams:
-                raise error.ProgrammingError('duplicated params: %s' % pname)
+                raise error.ProgrammingError(b'duplicated params: %s' % pname)
             self._seenparams.add(pname)
         # status of the part's generation:
         # - None: not started,
@@ -970,9 +1010,14 @@
         self.mandatory = mandatory
 
     def __repr__(self):
-        cls = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
-        return ('<%s object at %x; id: %s; type: %s; mandatory: %s>'
-                % (cls, id(self), self.id, self.type, self.mandatory))
+        cls = b"%s.%s" % (self.__class__.__module__, self.__class__.__name__)
+        return b'<%s object at %x; id: %s; type: %s; mandatory: %s>' % (
+            cls,
+            id(self),
+            self.id,
+            self.type,
+            self.mandatory,
+        )
 
     def copy(self):
         """return a copy of the part
@@ -980,8 +1025,13 @@
         The new part have the very same content but no partid assigned yet.
         Parts with generated data cannot be copied."""
         assert not util.safehasattr(self.data, 'next')
-        return self.__class__(self.type, self._mandatoryparams,
-                              self._advisoryparams, self._data, self.mandatory)
+        return self.__class__(
+            self.type,
+            self._mandatoryparams,
+            self._advisoryparams,
+            self._data,
+            self.mandatory,
+        )
 
     # methods used to defines the part content
     @property
@@ -991,7 +1041,7 @@
     @data.setter
     def data(self, data):
         if self._generated is not None:
-            raise error.ReadOnlyPartError('part is being generated')
+            raise error.ReadOnlyPartError(b'part is being generated')
         self._data = data
 
     @property
@@ -1004,7 +1054,7 @@
         # make it an immutable tuple to force people through ``addparam``
         return tuple(self._advisoryparams)
 
-    def addparam(self, name, value='', mandatory=True):
+    def addparam(self, name, value=b'', mandatory=True):
         """add a parameter to the part
 
         If 'mandatory' is set to True, the remote handler must claim support
@@ -1013,9 +1063,9 @@
         The 'name' and 'value' cannot exceed 255 bytes each.
         """
         if self._generated is not None:
-            raise error.ReadOnlyPartError('part is being generated')
+            raise error.ReadOnlyPartError(b'part is being generated')
         if name in self._seenparams:
-            raise ValueError('duplicated params: %s' % name)
+            raise ValueError(b'duplicated params: %s' % name)
         self._seenparams.add(name)
         params = self._advisoryparams
         if mandatory:
@@ -1025,42 +1075,45 @@
     # methods used to generates the bundle2 stream
     def getchunks(self, ui):
         if self._generated is not None:
-            raise error.ProgrammingError('part can only be consumed once')
+            raise error.ProgrammingError(b'part can only be consumed once')
         self._generated = False
 
         if ui.debugflag:
-            msg = ['bundle2-output-part: "%s"' % self.type]
+            msg = [b'bundle2-output-part: "%s"' % self.type]
             if not self.mandatory:
-                msg.append(' (advisory)')
+                msg.append(b' (advisory)')
             nbmp = len(self.mandatoryparams)
             nbap = len(self.advisoryparams)
             if nbmp or nbap:
-                msg.append(' (params:')
+                msg.append(b' (params:')
                 if nbmp:
-                    msg.append(' %i mandatory' % nbmp)
+                    msg.append(b' %i mandatory' % nbmp)
                 if nbap:
-                    msg.append(' %i advisory' % nbmp)
-                msg.append(')')
+                    msg.append(b' %i advisory' % nbmp)
+                msg.append(b')')
             if not self.data:
-                msg.append(' empty payload')
-            elif (util.safehasattr(self.data, 'next')
-                  or util.safehasattr(self.data, '__next__')):
-                msg.append(' streamed payload')
+                msg.append(b' empty payload')
+            elif util.safehasattr(self.data, 'next') or util.safehasattr(
+                self.data, b'__next__'
+            ):
+                msg.append(b' streamed payload')
             else:
-                msg.append(' %i bytes payload' % len(self.data))
-            msg.append('\n')
-            ui.debug(''.join(msg))
+                msg.append(b' %i bytes payload' % len(self.data))
+            msg.append(b'\n')
+            ui.debug(b''.join(msg))
 
         #### header
         if self.mandatory:
             parttype = self.type.upper()
         else:
             parttype = self.type.lower()
-        outdebug(ui, 'part %s: "%s"' % (pycompat.bytestr(self.id), parttype))
+        outdebug(ui, b'part %s: "%s"' % (pycompat.bytestr(self.id), parttype))
         ## parttype
-        header = [_pack(_fparttypesize, len(parttype)),
-                  parttype, _pack(_fpartid, self.id),
-                 ]
+        header = [
+            _pack(_fparttypesize, len(parttype)),
+            parttype,
+            _pack(_fpartid, self.id),
+        ]
         ## parameters
         # count
         manpar = self.mandatoryparams
@@ -1085,44 +1138,48 @@
             header.append(value)
         ## finalize header
         try:
-            headerchunk = ''.join(header)
+            headerchunk = b''.join(header)
         except TypeError:
-            raise TypeError(r'Found a non-bytes trying to '
-                            r'build bundle part header: %r' % header)
-        outdebug(ui, 'header chunk size: %i' % len(headerchunk))
+            raise TypeError(
+                r'Found a non-bytes trying to '
+                r'build bundle part header: %r' % header
+            )
+        outdebug(ui, b'header chunk size: %i' % len(headerchunk))
         yield _pack(_fpartheadersize, len(headerchunk))
         yield headerchunk
         ## payload
         try:
             for chunk in self._payloadchunks():
-                outdebug(ui, 'payload chunk size: %i' % len(chunk))
+                outdebug(ui, b'payload chunk size: %i' % len(chunk))
                 yield _pack(_fpayloadsize, len(chunk))
                 yield chunk
         except GeneratorExit:
             # GeneratorExit means that nobody is listening for our
             # results anyway, so just bail quickly rather than trying
             # to produce an error part.
-            ui.debug('bundle2-generatorexit\n')
+            ui.debug(b'bundle2-generatorexit\n')
             raise
         except BaseException as exc:
             bexc = stringutil.forcebytestr(exc)
             # backup exception data for later
-            ui.debug('bundle2-input-stream-interrupt: encoding exception %s'
-                     % bexc)
+            ui.debug(
+                b'bundle2-input-stream-interrupt: encoding exception %s' % bexc
+            )
             tb = sys.exc_info()[2]
-            msg = 'unexpected error: %s' % bexc
-            interpart = bundlepart('error:abort', [('message', msg)],
-                                   mandatory=False)
+            msg = b'unexpected error: %s' % bexc
+            interpart = bundlepart(
+                b'error:abort', [(b'message', msg)], mandatory=False
+            )
             interpart.id = 0
             yield _pack(_fpayloadsize, -1)
             for chunk in interpart.getchunks(ui=ui):
                 yield chunk
-            outdebug(ui, 'closing payload chunk')
+            outdebug(ui, b'closing payload chunk')
             # abort current part payload
             yield _pack(_fpayloadsize, 0)
             pycompat.raisewithtb(exc, tb)
         # end of payload
-        outdebug(ui, 'closing payload chunk')
+        outdebug(ui, b'closing payload chunk')
         yield _pack(_fpayloadsize, 0)
         self._generated = True
 
@@ -1132,8 +1189,9 @@
         Exists to handle the different methods to provide data to a part."""
         # we only support fixed size data now.
         # This will be improved in the future.
-        if (util.safehasattr(self.data, 'next')
-            or util.safehasattr(self.data, '__next__')):
+        if util.safehasattr(self.data, 'next') or util.safehasattr(
+            self.data, b'__next__'
+        ):
             buff = util.chunkbuffer(self.data)
             chunk = buff.read(preferedchunksize)
             while chunk:
@@ -1145,6 +1203,7 @@
 
 flaginterrupt = -1
 
+
 class interrupthandler(unpackermixin):
     """read one part and process it with restricted capability
 
@@ -1163,21 +1222,23 @@
         returns None if empty"""
         headersize = self._unpack(_fpartheadersize)[0]
         if headersize < 0:
-            raise error.BundleValueError('negative part header size: %i'
-                                         % headersize)
-        indebug(self.ui, 'part header size: %i\n' % headersize)
+            raise error.BundleValueError(
+                b'negative part header size: %i' % headersize
+            )
+        indebug(self.ui, b'part header size: %i\n' % headersize)
         if headersize:
             return self._readexact(headersize)
         return None
 
     def __call__(self):
 
-        self.ui.debug('bundle2-input-stream-interrupt:'
-                      ' opening out of band context\n')
-        indebug(self.ui, 'bundle2 stream interruption, looking for a part.')
+        self.ui.debug(
+            b'bundle2-input-stream-interrupt: opening out of band context\n'
+        )
+        indebug(self.ui, b'bundle2 stream interruption, looking for a part.')
         headerblock = self._readpartheader()
         if headerblock is None:
-            indebug(self.ui, 'no part found during interruption.')
+            indebug(self.ui, b'no part found during interruption.')
             return
         part = unbundlepart(self.ui, headerblock, self._fp)
         op = interruptoperation(self.ui)
@@ -1190,8 +1251,10 @@
         finally:
             if not hardabort:
                 part.consume()
-        self.ui.debug('bundle2-input-stream-interrupt:'
-                      ' closing out of band context\n')
+        self.ui.debug(
+            b'bundle2-input-stream-interrupt: closing out of band context\n'
+        )
+
 
 class interruptoperation(object):
     """A limited operation to be use by part handler during interruption
@@ -1206,10 +1269,11 @@
 
     @property
     def repo(self):
-        raise error.ProgrammingError('no repo access from stream interruption')
+        raise error.ProgrammingError(b'no repo access from stream interruption')
 
     def gettransaction(self):
-        raise TransactionUnavailable('no repo access from stream interruption')
+        raise TransactionUnavailable(b'no repo access from stream interruption')
+
 
 def decodepayloadchunks(ui, fh):
     """Reads bundle2 part payload data into chunks.
@@ -1217,7 +1281,7 @@
     Part payload data consists of framed chunks. This function takes
     a file handle and emits those chunks.
     """
-    dolog = ui.configbool('devel', 'bundle2.debug')
+    dolog = ui.configbool(b'devel', b'bundle2.debug')
     debug = ui.debug
 
     headerstruct = struct.Struct(_fpayloadsize)
@@ -1228,16 +1292,20 @@
     read = fh.read
 
     chunksize = unpack(readexactly(fh, headersize))[0]
-    indebug(ui, 'payload chunk size: %i' % chunksize)
+    indebug(ui, b'payload chunk size: %i' % chunksize)
 
     # changegroup.readexactly() is inlined below for performance.
     while chunksize:
         if chunksize >= 0:
             s = read(chunksize)
             if len(s) < chunksize:
-                raise error.Abort(_('stream ended unexpectedly '
-                                    ' (got %d bytes, expected %d)') %
-                                  (len(s), chunksize))
+                raise error.Abort(
+                    _(
+                        b'stream ended unexpectedly '
+                        b' (got %d bytes, expected %d)'
+                    )
+                    % (len(s), chunksize)
+                )
 
             yield s
         elif chunksize == flaginterrupt:
@@ -1246,27 +1314,31 @@
             interrupthandler(ui, fh)()
         else:
             raise error.BundleValueError(
-                'negative payload chunk size: %s' % chunksize)
+                b'negative payload chunk size: %s' % chunksize
+            )
 
         s = read(headersize)
         if len(s) < headersize:
-            raise error.Abort(_('stream ended unexpectedly '
-                                ' (got %d bytes, expected %d)') %
-                              (len(s), chunksize))
+            raise error.Abort(
+                _(b'stream ended unexpectedly  (got %d bytes, expected %d)')
+                % (len(s), chunksize)
+            )
 
         chunksize = unpack(s)[0]
 
         # indebug() inlined for performance.
         if dolog:
-            debug('bundle2-input: payload chunk size: %i\n' % chunksize)
+            debug(b'bundle2-input: payload chunk size: %i\n' % chunksize)
+
 
 class unbundlepart(unpackermixin):
     """a bundle part read from a bundle"""
 
     def __init__(self, ui, header, fp):
         super(unbundlepart, self).__init__(fp)
-        self._seekable = (util.safehasattr(fp, 'seek') and
-                          util.safehasattr(fp, 'tell'))
+        self._seekable = util.safehasattr(fp, 'seek') and util.safehasattr(
+            fp, b'tell'
+        )
         self.ui = ui
         # unbundle state attr
         self._headerdata = header
@@ -1287,7 +1359,7 @@
     def _fromheader(self, size):
         """return the next <size> byte from the header"""
         offset = self._headeroffset
-        data = self._headerdata[offset:(offset + size)]
+        data = self._headerdata[offset : (offset + size)]
         self._headeroffset = offset + size
         return data
 
@@ -1302,7 +1374,7 @@
         """internal function to setup all logic related parameters"""
         # make it read only to prevent people touching it by mistake.
         self.mandatoryparams = tuple(mandatoryparams)
-        self.advisoryparams  = tuple(advisoryparams)
+        self.advisoryparams = tuple(advisoryparams)
         # user friendly UI
         self.params = util.sortdict(self.mandatoryparams)
         self.params.update(self.advisoryparams)
@@ -1312,16 +1384,16 @@
         """read the header and setup the object"""
         typesize = self._unpackheader(_fparttypesize)[0]
         self.type = self._fromheader(typesize)
-        indebug(self.ui, 'part type: "%s"' % self.type)
+        indebug(self.ui, b'part type: "%s"' % self.type)
         self.id = self._unpackheader(_fpartid)[0]
-        indebug(self.ui, 'part id: "%s"' % pycompat.bytestr(self.id))
+        indebug(self.ui, b'part id: "%s"' % pycompat.bytestr(self.id))
         # extract mandatory bit from type
-        self.mandatory = (self.type != self.type.lower())
+        self.mandatory = self.type != self.type.lower()
         self.type = self.type.lower()
         ## reading parameters
         # param count
         mancount, advcount = self._unpackheader(_fpartparamcount)
-        indebug(self.ui, 'part parameters: %i' % (mancount + advcount))
+        indebug(self.ui, b'part parameters: %i' % (mancount + advcount))
         # param size
         fparamsizes = _makefpartparamsizes(mancount + advcount)
         paramsizes = self._unpackheader(fparamsizes)
@@ -1372,11 +1444,13 @@
         self._pos += len(data)
         if size is None or len(data) < size:
             if not self.consumed and self._pos:
-                self.ui.debug('bundle2-input-part: total payload size %i\n'
-                              % self._pos)
+                self.ui.debug(
+                    b'bundle2-input-part: total payload size %i\n' % self._pos
+                )
             self.consumed = True
         return data
 
+
 class seekableunbundlepart(unbundlepart):
     """A bundle2 part in a bundle that is seekable.
 
@@ -1394,6 +1468,7 @@
     to the number of chunks within the payload (which almost certainly
     increases in proportion with the size of the part).
     """
+
     def __init__(self, ui, header, fp):
         # (payload, file) offsets for chunk starts.
         self._chunkindex = []
@@ -1403,11 +1478,12 @@
     def _payloadchunks(self, chunknum=0):
         '''seek to specified chunk and start yielding data'''
         if len(self._chunkindex) == 0:
-            assert chunknum == 0, 'Must start with chunk 0'
+            assert chunknum == 0, b'Must start with chunk 0'
             self._chunkindex.append((0, self._tellfp()))
         else:
             assert chunknum < len(self._chunkindex), (
-                   'Unknown chunk %d' % chunknum)
+                b'Unknown chunk %d' % chunknum
+            )
             self._seekfp(self._chunkindex[chunknum][1])
 
         pos = self._chunkindex[chunknum][0]
@@ -1427,7 +1503,7 @@
                 return chunk, 0
             elif ppos > pos:
                 return chunk - 1, pos - self._chunkindex[chunk - 1][0]
-        raise ValueError('Unknown chunk')
+        raise ValueError(b'Unknown chunk')
 
     def tell(self):
         return self._pos
@@ -1445,7 +1521,7 @@
                     chunk = self.read(32768)
             newpos = self._chunkindex[-1][0] - offset
         else:
-            raise ValueError('Unknown whence value: %r' % (whence,))
+            raise ValueError(b'Unknown whence value: %r' % (whence,))
 
         if newpos > self._chunkindex[-1][0] and not self.consumed:
             # Can't use self.consume() here because it advances self._pos.
@@ -1454,14 +1530,14 @@
                 chunk = self.read(32668)
 
         if not 0 <= newpos <= self._chunkindex[-1][0]:
-            raise ValueError('Offset out of range')
+            raise ValueError(b'Offset out of range')
 
         if self._pos != newpos:
             chunk, internaloffset = self._findchunk(newpos)
             self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
             adjust = self.read(internaloffset)
             if len(adjust) != internaloffset:
-                raise error.Abort(_('Seek failed\n'))
+                raise error.Abort(_(b'Seek failed\n'))
             self._pos = newpos
 
     def _seekfp(self, offset, whence=0):
@@ -1475,7 +1551,7 @@
         if self._seekable:
             return self._fp.seek(offset, whence)
         else:
-            raise NotImplementedError(_('File pointer is not seekable'))
+            raise NotImplementedError(_(b'File pointer is not seekable'))
 
     def _tellfp(self):
         """return the file offset, or None if file is not seekable
@@ -1495,21 +1571,23 @@
                     raise
         return None
 
+
 # These are only the static capabilities.
 # Check the 'getrepocaps' function for the rest.
-capabilities = {'HG20': (),
-                'bookmarks': (),
-                'error': ('abort', 'unsupportedcontent', 'pushraced',
-                          'pushkey'),
-                'listkeys': (),
-                'pushkey': (),
-                'digests': tuple(sorted(util.DIGESTS.keys())),
-                'remote-changegroup': ('http', 'https'),
-                'hgtagsfnodes': (),
-                'rev-branch-cache': (),
-                'phases': ('heads',),
-                'stream': ('v2',),
-               }
+capabilities = {
+    b'HG20': (),
+    b'bookmarks': (),
+    b'error': (b'abort', b'unsupportedcontent', b'pushraced', b'pushkey'),
+    b'listkeys': (),
+    b'pushkey': (),
+    b'digests': tuple(sorted(util.DIGESTS.keys())),
+    b'remote-changegroup': (b'http', b'https'),
+    b'hgtagsfnodes': (),
+    b'rev-branch-cache': (),
+    b'phases': (b'heads',),
+    b'stream': (b'v2',),
+}
+
 
 def getrepocaps(repo, allowpushback=False, role=None):
     """return the bundle2 capabilities for a given repo
@@ -1520,62 +1598,84 @@
     well as clients advertising their capabilities to servers as part of
     bundle2 requests. The ``role`` argument specifies which is which.
     """
-    if role not in ('client', 'server'):
-        raise error.ProgrammingError('role argument must be client or server')
+    if role not in (b'client', b'server'):
+        raise error.ProgrammingError(b'role argument must be client or server')
 
     caps = capabilities.copy()
-    caps['changegroup'] = tuple(sorted(
-        changegroup.supportedincomingversions(repo)))
+    caps[b'changegroup'] = tuple(
+        sorted(changegroup.supportedincomingversions(repo))
+    )
     if obsolete.isenabled(repo, obsolete.exchangeopt):
-        supportedformat = tuple('V%i' % v for v in obsolete.formats)
-        caps['obsmarkers'] = supportedformat
+        supportedformat = tuple(b'V%i' % v for v in obsolete.formats)
+        caps[b'obsmarkers'] = supportedformat
     if allowpushback:
-        caps['pushback'] = ()
-    cpmode = repo.ui.config('server', 'concurrent-push-mode')
-    if cpmode == 'check-related':
-        caps['checkheads'] = ('related',)
-    if 'phases' in repo.ui.configlist('devel', 'legacy.exchange'):
-        caps.pop('phases')
+        caps[b'pushback'] = ()
+    cpmode = repo.ui.config(b'server', b'concurrent-push-mode')
+    if cpmode == b'check-related':
+        caps[b'checkheads'] = (b'related',)
+    if b'phases' in repo.ui.configlist(b'devel', b'legacy.exchange'):
+        caps.pop(b'phases')
 
     # Don't advertise stream clone support in server mode if not configured.
-    if role == 'server':
-        streamsupported = repo.ui.configbool('server', 'uncompressed',
-                                             untrusted=True)
-        featuresupported = repo.ui.configbool('server', 'bundle2.stream')
+    if role == b'server':
+        streamsupported = repo.ui.configbool(
+            b'server', b'uncompressed', untrusted=True
+        )
+        featuresupported = repo.ui.configbool(b'server', b'bundle2.stream')
 
         if not streamsupported or not featuresupported:
-            caps.pop('stream')
+            caps.pop(b'stream')
     # Else always advertise support on client, because payload support
     # should always be advertised.
 
     return caps
 
+
 def bundle2caps(remote):
     """return the bundle capabilities of a peer as dict"""
-    raw = remote.capable('bundle2')
-    if not raw and raw != '':
+    raw = remote.capable(b'bundle2')
+    if not raw and raw != b'':
         return {}
-    capsblob = urlreq.unquote(remote.capable('bundle2'))
+    capsblob = urlreq.unquote(remote.capable(b'bundle2'))
     return decodecaps(capsblob)
 
+
 def obsmarkersversion(caps):
     """extract the list of supported obsmarkers versions from a bundle2caps dict
     """
-    obscaps = caps.get('obsmarkers', ())
-    return [int(c[1:]) for c in obscaps if c.startswith('V')]
-
-def writenewbundle(ui, repo, source, filename, bundletype, outgoing, opts,
-                   vfs=None, compression=None, compopts=None):
-    if bundletype.startswith('HG10'):
-        cg = changegroup.makechangegroup(repo, outgoing, '01', source)
-        return writebundle(ui, cg, filename, bundletype, vfs=vfs,
-                           compression=compression, compopts=compopts)
-    elif not bundletype.startswith('HG20'):
-        raise error.ProgrammingError('unknown bundle type: %s' % bundletype)
+    obscaps = caps.get(b'obsmarkers', ())
+    return [int(c[1:]) for c in obscaps if c.startswith(b'V')]
+
+
+def writenewbundle(
+    ui,
+    repo,
+    source,
+    filename,
+    bundletype,
+    outgoing,
+    opts,
+    vfs=None,
+    compression=None,
+    compopts=None,
+):
+    if bundletype.startswith(b'HG10'):
+        cg = changegroup.makechangegroup(repo, outgoing, b'01', source)
+        return writebundle(
+            ui,
+            cg,
+            filename,
+            bundletype,
+            vfs=vfs,
+            compression=compression,
+            compopts=compopts,
+        )
+    elif not bundletype.startswith(b'HG20'):
+        raise error.ProgrammingError(b'unknown bundle type: %s' % bundletype)
 
     caps = {}
-    if 'obsolescence' in opts:
-        caps['obsmarkers'] = ('V1',)
+    if b'obsolescence' in opts:
+        caps[b'obsmarkers'] = (b'V1',)
     bundle = bundle20(ui, caps)
     bundle.setcompression(compression, compopts)
     _addpartsfromopts(ui, repo, bundle, source, outgoing, opts)
@@ -1583,6 +1683,7 @@
 
     return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
 
+
 def _addpartsfromopts(ui, repo, bundler, source, outgoing, opts):
     # We should eventually reconcile this logic with the one behind
     # 'exchange.getbundle2partsgenerator'.
@@ -1593,37 +1694,44 @@
 
     # we might not always want a changegroup in such bundle, for example in
     # stream bundles
-    if opts.get('changegroup', True):
-        cgversion = opts.get('cg.version')
+    if opts.get(b'changegroup', True):
+        cgversion = opts.get(b'cg.version')
         if cgversion is None:
             cgversion = changegroup.safeversion(repo)
         cg = changegroup.makechangegroup(repo, outgoing, cgversion, source)
-        part = bundler.newpart('changegroup', data=cg.getchunks())
-        part.addparam('version', cg.version)
-        if 'clcount' in cg.extras:
-            part.addparam('nbchanges', '%d' % cg.extras['clcount'],
-                          mandatory=False)
-        if opts.get('phases') and repo.revs('%ln and secret()',
-                                            outgoing.missingheads):
-            part.addparam('targetphase', '%d' % phases.secret, mandatory=False)
-
-    if opts.get('streamv2', False):
+        part = bundler.newpart(b'changegroup', data=cg.getchunks())
+        part.addparam(b'version', cg.version)
+        if b'clcount' in cg.extras:
+            part.addparam(
+                b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
+            )
+        if opts.get(b'phases') and repo.revs(
+            b'%ln and secret()', outgoing.missingheads
+        ):
+            part.addparam(
+                b'targetphase', b'%d' % phases.secret, mandatory=False
+            )
+        if b'exp-sidedata-flag' in repo.requirements:
+            part.addparam(b'exp-sidedata', b'1')
+
+    if opts.get(b'streamv2', False):
         addpartbundlestream2(bundler, repo, stream=True)
 
-    if opts.get('tagsfnodescache', True):
+    if opts.get(b'tagsfnodescache', True):
         addparttagsfnodescache(repo, bundler, outgoing)
 
-    if opts.get('revbranchcache', True):
+    if opts.get(b'revbranchcache', True):
         addpartrevbranchcache(repo, bundler, outgoing)
 
-    if opts.get('obsolescence', False):
+    if opts.get(b'obsolescence', False):
         obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing)
         buildobsmarkerspart(bundler, obsmarkers)
 
-    if opts.get('phases', False):
+    if opts.get(b'phases', False):
         headsbyphase = phases.subsetphaseheads(repo, outgoing.missing)
         phasedata = phases.binaryencode(headsbyphase)
-        bundler.newpart('phase-heads', data=phasedata)
+        bundler.newpart(b'phase-heads', data=phasedata)
+
 
 def addparttagsfnodescache(repo, bundler, outgoing):
     # we include the tags fnode cache for the bundle changeset
@@ -1647,7 +1755,8 @@
             chunks.extend([node, fnode])
 
     if chunks:
-        bundler.newpart('hgtagsfnodes', data=''.join(chunks))
+        bundler.newpart(b'hgtagsfnodes', data=b''.join(chunks))
+
 
 def addpartrevbranchcache(repo, bundler, outgoing):
     # we include the rev branch cache for the bundle changeset
@@ -1669,28 +1778,36 @@
             for n in sorted(closed):
                 yield n
 
-    bundler.newpart('cache:rev-branch-cache', data=generate(),
-                    mandatory=False)
+    bundler.newpart(b'cache:rev-branch-cache', data=generate(), mandatory=False)
+
 
 def _formatrequirementsspec(requirements):
-    requirements = [req for req in requirements if req != "shared"]
-    return urlreq.quote(','.join(sorted(requirements)))
+    requirements = [req for req in requirements if req != b"shared"]
+    return urlreq.quote(b','.join(sorted(requirements)))
+
 
 def _formatrequirementsparams(requirements):
     requirements = _formatrequirementsspec(requirements)
-    params = "%s%s" % (urlreq.quote("requirements="), requirements)
+    params = b"%s%s" % (urlreq.quote(b"requirements="), requirements)
     return params
 
+
 def addpartbundlestream2(bundler, repo, **kwargs):
     if not kwargs.get(r'stream', False):
         return
 
     if not streamclone.allowservergeneration(repo):
-        raise error.Abort(_('stream data requested but server does not allow '
-                            'this feature'),
-                          hint=_('well-behaved clients should not be '
-                                 'requesting stream data from servers not '
-                                 'advertising it; the client may be buggy'))
+        raise error.Abort(
+            _(
+                b'stream data requested but server does not allow '
+                b'this feature'
+            ),
+            hint=_(
+                b'well-behaved clients should not be '
+                b'requesting stream data from servers not '
+                b'advertising it; the client may be buggy'
+            ),
+        )
 
     # Stream clones don't compress well. And compression undermines a
     # goal of stream clones, which is to be fast. Communicate the desire
@@ -1701,29 +1818,35 @@
     includepats = kwargs.get(r'includepats')
     excludepats = kwargs.get(r'excludepats')
 
-    narrowstream = repo.ui.configbool('experimental',
-                                      'server.stream-narrow-clones')
+    narrowstream = repo.ui.configbool(
+        b'experimental', b'server.stream-narrow-clones'
+    )
 
     if (includepats or excludepats) and not narrowstream:
-        raise error.Abort(_('server does not support narrow stream clones'))
+        raise error.Abort(_(b'server does not support narrow stream clones'))
 
     includeobsmarkers = False
     if repo.obsstore:
         remoteversions = obsmarkersversion(bundler.capabilities)
         if not remoteversions:
-            raise error.Abort(_('server has obsolescence markers, but client '
-                                'cannot receive them via stream clone'))
+            raise error.Abort(
+                _(
+                    b'server has obsolescence markers, but client '
+                    b'cannot receive them via stream clone'
+                )
+            )
         elif repo.obsstore._version in remoteversions:
             includeobsmarkers = True
 
-    filecount, bytecount, it = streamclone.generatev2(repo, includepats,
-                                                      excludepats,
-                                                      includeobsmarkers)
+    filecount, bytecount, it = streamclone.generatev2(
+        repo, includepats, excludepats, includeobsmarkers
+    )
     requirements = _formatrequirementsspec(repo.requirements)
-    part = bundler.newpart('stream2', data=it)
-    part.addparam('bytecount', '%d' % bytecount, mandatory=True)
-    part.addparam('filecount', '%d' % filecount, mandatory=True)
-    part.addparam('requirements', requirements, mandatory=True)
+    part = bundler.newpart(b'stream2', data=it)
+    part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True)
+    part.addparam(b'filecount', b'%d' % filecount, mandatory=True)
+    part.addparam(b'requirements', requirements, mandatory=True)
+
 
 def buildobsmarkerspart(bundler, markers):
     """add an obsmarker part to the bundler with <markers>
@@ -1737,12 +1860,14 @@
     remoteversions = obsmarkersversion(bundler.capabilities)
     version = obsolete.commonversion(remoteversions)
     if version is None:
-        raise ValueError('bundler does not support common obsmarker format')
+        raise ValueError(b'bundler does not support common obsmarker format')
     stream = obsolete.encodemarkers(markers, True, version=version)
-    return bundler.newpart('obsmarkers', data=stream)
-
-def writebundle(ui, cg, filename, bundletype, vfs=None, compression=None,
-                compopts=None):
+    return bundler.newpart(b'obsmarkers', data=stream)
+
+
+def writebundle(
+    ui, cg, filename, bundletype, vfs=None, compression=None, compopts=None
+):
     """Write a bundle file and return its filename.
 
     Existing files will not be overwritten.
@@ -1751,40 +1876,43 @@
     The bundle file will be deleted in case of errors.
     """
 
-    if bundletype == "HG20":
+    if bundletype == b"HG20":
         bundle = bundle20(ui)
         bundle.setcompression(compression, compopts)
-        part = bundle.newpart('changegroup', data=cg.getchunks())
-        part.addparam('version', cg.version)
-        if 'clcount' in cg.extras:
-            part.addparam('nbchanges', '%d' % cg.extras['clcount'],
-                          mandatory=False)
+        part = bundle.newpart(b'changegroup', data=cg.getchunks())
+        part.addparam(b'version', cg.version)
+        if b'clcount' in cg.extras:
+            part.addparam(
+                b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
+            )
         chunkiter = bundle.getchunks()
     else:
         # compression argument is only for the bundle2 case
         assert compression is None
-        if cg.version != '01':
-            raise error.Abort(_('old bundle types only supports v1 '
-                                'changegroups'))
+        if cg.version != b'01':
+            raise error.Abort(
+                _(b'old bundle types only supports v1 changegroups')
+            )
         header, comp = bundletypes[bundletype]
         if comp not in util.compengines.supportedbundletypes:
-            raise error.Abort(_('unknown stream compression type: %s')
-                              % comp)
+            raise error.Abort(_(b'unknown stream compression type: %s') % comp)
         compengine = util.compengines.forbundletype(comp)
+
         def chunkiter():
             yield header
             for chunk in compengine.compressstream(cg.getchunks(), compopts):
                 yield chunk
+
         chunkiter = chunkiter()
 
     # parse the changegroup data, otherwise we will block
     # in case of sshrepo because we don't know the end of the stream
     return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
 
+
 def combinechangegroupresults(op):
     """logic to combine 0 or more addchangegroup results into one"""
-    results = [r.get('return', 0)
-               for r in op.records['changegroup']]
+    results = [r.get(b'return', 0) for r in op.records[b'changegroup']]
     changedheads = 0
     result = 1
     for ret in results:
@@ -1802,8 +1930,17 @@
         result = -1 + changedheads
     return result
 
-@parthandler('changegroup', ('version', 'nbchanges', 'treemanifest',
-                             'targetphase'))
+
+@parthandler(
+    b'changegroup',
+    (
+        b'version',
+        b'nbchanges',
+        b'exp-sidedata',
+        b'treemanifest',
+        b'targetphase',
+    ),
+)
 def handlechangegroup(op, inpart):
     """apply a changegroup part on the repo
 
@@ -1813,42 +1950,69 @@
     from . import localrepo
 
     tr = op.gettransaction()
-    unpackerversion = inpart.params.get('version', '01')
+    unpackerversion = inpart.params.get(b'version', b'01')
     # We should raise an appropriate exception here
     cg = changegroup.getunbundler(unpackerversion, inpart, None)
     # the source and url passed here are overwritten by the one contained in
     # the transaction.hookargs argument. So 'bundle2' is a placeholder
     nbchangesets = None
-    if 'nbchanges' in inpart.params:
-        nbchangesets = int(inpart.params.get('nbchanges'))
-    if ('treemanifest' in inpart.params and
-        'treemanifest' not in op.repo.requirements):
+    if b'nbchanges' in inpart.params:
+        nbchangesets = int(inpart.params.get(b'nbchanges'))
+    if (
+        b'treemanifest' in inpart.params
+        and b'treemanifest' not in op.repo.requirements
+    ):
         if len(op.repo.changelog) != 0:
-            raise error.Abort(_(
-                "bundle contains tree manifests, but local repo is "
-                "non-empty and does not use tree manifests"))
-        op.repo.requirements.add('treemanifest')
+            raise error.Abort(
+                _(
+                    b"bundle contains tree manifests, but local repo is "
+                    b"non-empty and does not use tree manifests"
+                )
+            )
+        op.repo.requirements.add(b'treemanifest')
         op.repo.svfs.options = localrepo.resolvestorevfsoptions(
-            op.repo.ui, op.repo.requirements, op.repo.features)
+            op.repo.ui, op.repo.requirements, op.repo.features
+        )
         op.repo._writerequirements()
+
+    bundlesidedata = bool(b'exp-sidedata' in inpart.params)
+    reposidedata = bool(b'exp-sidedata-flag' in op.repo.requirements)
+    if reposidedata and not bundlesidedata:
+        msg = b"repository is using sidedata but the bundle source do not"
+        hint = b'this is currently unsupported'
+        raise error.Abort(msg, hint=hint)
+
     extrakwargs = {}
-    targetphase = inpart.params.get('targetphase')
+    targetphase = inpart.params.get(b'targetphase')
     if targetphase is not None:
         extrakwargs[r'targetphase'] = int(targetphase)
-    ret = _processchangegroup(op, cg, tr, 'bundle2', 'bundle2',
-                              expectedtotal=nbchangesets, **extrakwargs)
+    ret = _processchangegroup(
+        op,
+        cg,
+        tr,
+        b'bundle2',
+        b'bundle2',
+        expectedtotal=nbchangesets,
+        **extrakwargs
+    )
     if op.reply is not None:
         # This is definitely not the final form of this
         # return. But one need to start somewhere.
-        part = op.reply.newpart('reply:changegroup', mandatory=False)
+        part = op.reply.newpart(b'reply:changegroup', mandatory=False)
         part.addparam(
-            'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False)
-        part.addparam('return', '%i' % ret, mandatory=False)
+            b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
+        )
+        part.addparam(b'return', b'%i' % ret, mandatory=False)
     assert not inpart.read()
 
-_remotechangegroupparams = tuple(['url', 'size', 'digests'] +
-    ['digest:%s' % k for k in util.DIGESTS.keys()])
-@parthandler('remote-changegroup', _remotechangegroupparams)
+
+_remotechangegroupparams = tuple(
+    [b'url', b'size', b'digests']
+    + [b'digest:%s' % k for k in util.DIGESTS.keys()]
+)
+
+
+@parthandler(b'remote-changegroup', _remotechangegroupparams)
 def handleremotechangegroup(op, inpart):
     """apply a bundle10 on the repo, given an url and validation information
 
@@ -1866,62 +2030,75 @@
     When multiple digest types are given, all of them are checked.
     """
     try:
-        raw_url = inpart.params['url']
+        raw_url = inpart.params[b'url']
     except KeyError:
-        raise error.Abort(_('remote-changegroup: missing "%s" param') % 'url')
+        raise error.Abort(_(b'remote-changegroup: missing "%s" param') % b'url')
     parsed_url = util.url(raw_url)
-    if parsed_url.scheme not in capabilities['remote-changegroup']:
-        raise error.Abort(_('remote-changegroup does not support %s urls') %
-            parsed_url.scheme)
+    if parsed_url.scheme not in capabilities[b'remote-changegroup']:
+        raise error.Abort(
+            _(b'remote-changegroup does not support %s urls')
+            % parsed_url.scheme
+        )
 
     try:
-        size = int(inpart.params['size'])
+        size = int(inpart.params[b'size'])
     except ValueError:
-        raise error.Abort(_('remote-changegroup: invalid value for param "%s"')
-            % 'size')
+        raise error.Abort(
+            _(b'remote-changegroup: invalid value for param "%s"') % b'size'
+        )
     except KeyError:
-        raise error.Abort(_('remote-changegroup: missing "%s" param') % 'size')
+        raise error.Abort(
+            _(b'remote-changegroup: missing "%s" param') % b'size'
+        )
 
     digests = {}
-    for typ in inpart.params.get('digests', '').split():
-        param = 'digest:%s' % typ
+    for typ in inpart.params.get(b'digests', b'').split():
+        param = b'digest:%s' % typ
         try:
             value = inpart.params[param]
         except KeyError:
-            raise error.Abort(_('remote-changegroup: missing "%s" param') %
-                param)
+            raise error.Abort(
+                _(b'remote-changegroup: missing "%s" param') % param
+            )
         digests[typ] = value
 
     real_part = util.digestchecker(url.open(op.ui, raw_url), size, digests)
 
     tr = op.gettransaction()
     from . import exchange
+
     cg = exchange.readbundle(op.repo.ui, real_part, raw_url)
     if not isinstance(cg, changegroup.cg1unpacker):
-        raise error.Abort(_('%s: not a bundle version 1.0') %
-            util.hidepassword(raw_url))
-    ret = _processchangegroup(op, cg, tr, 'bundle2', 'bundle2')
+        raise error.Abort(
+            _(b'%s: not a bundle version 1.0') % util.hidepassword(raw_url)
+        )
+    ret = _processchangegroup(op, cg, tr, b'bundle2', b'bundle2')
     if op.reply is not None:
         # This is definitely not the final form of this
         # return. But one need to start somewhere.
-        part = op.reply.newpart('reply:changegroup')
+        part = op.reply.newpart(b'reply:changegroup')
         part.addparam(
-            'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False)
-        part.addparam('return', '%i' % ret, mandatory=False)
+            b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
+        )
+        part.addparam(b'return', b'%i' % ret, mandatory=False)
     try:
         real_part.validate()
     except error.Abort as e:
-        raise error.Abort(_('bundle at %s is corrupted:\n%s') %
-                          (util.hidepassword(raw_url), bytes(e)))
+        raise error.Abort(
+            _(b'bundle at %s is corrupted:\n%s')
+            % (util.hidepassword(raw_url), bytes(e))
+        )
     assert not inpart.read()
 
-@parthandler('reply:changegroup', ('return', 'in-reply-to'))
+
+@parthandler(b'reply:changegroup', (b'return', b'in-reply-to'))
 def handlereplychangegroup(op, inpart):
-    ret = int(inpart.params['return'])
-    replyto = int(inpart.params['in-reply-to'])
-    op.records.add('changegroup', {'return': ret}, replyto)
-
-@parthandler('check:bookmarks')
+    ret = int(inpart.params[b'return'])
+    replyto = int(inpart.params[b'in-reply-to'])
+    op.records.add(b'changegroup', {b'return': ret}, replyto)
+
+
+@parthandler(b'check:bookmarks')
 def handlecheckbookmarks(op, inpart):
     """check location of bookmarks
 
@@ -1931,12 +2108,18 @@
     """
     bookdata = bookmarks.binarydecode(inpart)
 
-    msgstandard = ('remote repository changed while pushing - please try again '
-                   '(bookmark "%s" move from %s to %s)')
-    msgmissing = ('remote repository changed while pushing - please try again '
-                  '(bookmark "%s" is missing, expected %s)')
-    msgexist = ('remote repository changed while pushing - please try again '
-                '(bookmark "%s" set on %s, expected missing)')
+    msgstandard = (
+        b'remote repository changed while pushing - please try again '
+        b'(bookmark "%s" move from %s to %s)'
+    )
+    msgmissing = (
+        b'remote repository changed while pushing - please try again '
+        b'(bookmark "%s" is missing, expected %s)'
+    )
+    msgexist = (
+        b'remote repository changed while pushing - please try again '
+        b'(bookmark "%s" set on %s, expected missing)'
+    )
     for book, node in bookdata:
         currentnode = op.repo._bookmarks.get(book)
         if currentnode != node:
@@ -1945,11 +2128,15 @@
             elif currentnode is None:
                 finalmsg = msgmissing % (book, nodemod.short(node))
             else:
-                finalmsg = msgstandard % (book, nodemod.short(node),
-                                          nodemod.short(currentnode))
+                finalmsg = msgstandard % (
+                    book,
+                    nodemod.short(node),
+                    nodemod.short(currentnode),
+                )
             raise error.PushRaced(finalmsg)
 
-@parthandler('check:heads')
+
+@parthandler(b'check:heads')
 def handlecheckheads(op, inpart):
     """check that head of the repo did not change
 
@@ -1962,13 +2149,15 @@
         h = inpart.read(20)
     assert not h
     # Trigger a transaction so that we are guaranteed to have the lock now.
-    if op.ui.configbool('experimental', 'bundle2lazylocking'):
+    if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
         op.gettransaction()
     if sorted(heads) != sorted(op.repo.heads()):
-        raise error.PushRaced('remote repository changed while pushing - '
-                              'please try again')
-
-@parthandler('check:updated-heads')
+        raise error.PushRaced(
+            b'remote repository changed while pushing - please try again'
+        )
+
+
+@parthandler(b'check:updated-heads')
 def handlecheckupdatedheads(op, inpart):
     """check for race on the heads touched by a push
 
@@ -1985,7 +2174,7 @@
         h = inpart.read(20)
     assert not h
     # trigger a transaction so that we are guaranteed to have the lock now.
-    if op.ui.configbool('experimental', 'bundle2lazylocking'):
+    if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
         op.gettransaction()
 
     currentheads = set()
@@ -1994,10 +2183,13 @@
 
     for h in heads:
         if h not in currentheads:
-            raise error.PushRaced('remote repository changed while pushing - '
-                                  'please try again')
-
-@parthandler('check:phases')
+            raise error.PushRaced(
+                b'remote repository changed while pushing - '
+                b'please try again'
+            )
+
+
+@parthandler(b'check:phases')
 def handlecheckphases(op, inpart):
     """check that phase boundaries of the repository did not change
 
@@ -2007,24 +2199,30 @@
     unfi = op.repo.unfiltered()
     cl = unfi.changelog
     phasecache = unfi._phasecache
-    msg = ('remote repository changed while pushing - please try again '
-           '(%s is %s expected %s)')
+    msg = (
+        b'remote repository changed while pushing - please try again '
+        b'(%s is %s expected %s)'
+    )
     for expectedphase, nodes in enumerate(phasetonodes):
         for n in nodes:
             actualphase = phasecache.phase(unfi, cl.rev(n))
             if actualphase != expectedphase:
-                finalmsg = msg % (nodemod.short(n),
-                                  phases.phasenames[actualphase],
-                                  phases.phasenames[expectedphase])
+                finalmsg = msg % (
+                    nodemod.short(n),
+                    phases.phasenames[actualphase],
+                    phases.phasenames[expectedphase],
+                )
                 raise error.PushRaced(finalmsg)
 
-@parthandler('output')
+
+@parthandler(b'output')
 def handleoutput(op, inpart):
     """forward output captured on the server to the client"""
     for line in inpart.read().splitlines():
-        op.ui.status(_('remote: %s\n') % line)
-
-@parthandler('replycaps')
+        op.ui.status(_(b'remote: %s\n') % line)
+
+
+@parthandler(b'replycaps')
 def handlereplycaps(op, inpart):
     """Notify that a reply bundle should be created
 
@@ -2033,84 +2231,95 @@
     if op.reply is None:
         op.reply = bundle20(op.ui, caps)
 
+
 class AbortFromPart(error.Abort):
     """Sub-class of Abort that denotes an error from a bundle2 part."""
 
-@parthandler('error:abort', ('message', 'hint'))
+
+@parthandler(b'error:abort', (b'message', b'hint'))
 def handleerrorabort(op, inpart):
     """Used to transmit abort error over the wire"""
-    raise AbortFromPart(inpart.params['message'],
-                        hint=inpart.params.get('hint'))
-
-@parthandler('error:pushkey', ('namespace', 'key', 'new', 'old', 'ret',
-                               'in-reply-to'))
+    raise AbortFromPart(
+        inpart.params[b'message'], hint=inpart.params.get(b'hint')
+    )
+
+
+@parthandler(
+    b'error:pushkey',
+    (b'namespace', b'key', b'new', b'old', b'ret', b'in-reply-to'),
+)
 def handleerrorpushkey(op, inpart):
     """Used to transmit failure of a mandatory pushkey over the wire"""
     kwargs = {}
-    for name in ('namespace', 'key', 'new', 'old', 'ret'):
+    for name in (b'namespace', b'key', b'new', b'old', b'ret'):
         value = inpart.params.get(name)
         if value is not None:
             kwargs[name] = value
-    raise error.PushkeyFailed(inpart.params['in-reply-to'],
-                              **pycompat.strkwargs(kwargs))
-
-@parthandler('error:unsupportedcontent', ('parttype', 'params'))
+    raise error.PushkeyFailed(
+        inpart.params[b'in-reply-to'], **pycompat.strkwargs(kwargs)
+    )
+
+
+@parthandler(b'error:unsupportedcontent', (b'parttype', b'params'))
 def handleerrorunsupportedcontent(op, inpart):
     """Used to transmit unknown content error over the wire"""
     kwargs = {}
-    parttype = inpart.params.get('parttype')
+    parttype = inpart.params.get(b'parttype')
     if parttype is not None:
-        kwargs['parttype'] = parttype
-    params = inpart.params.get('params')
+        kwargs[b'parttype'] = parttype
+    params = inpart.params.get(b'params')
     if params is not None:
-        kwargs['params'] = params.split('\0')
+        kwargs[b'params'] = params.split(b'\0')
 
     raise error.BundleUnknownFeatureError(**pycompat.strkwargs(kwargs))
 
-@parthandler('error:pushraced', ('message',))
+
+@parthandler(b'error:pushraced', (b'message',))
 def handleerrorpushraced(op, inpart):
     """Used to transmit push race error over the wire"""
-    raise error.ResponseError(_('push failed:'), inpart.params['message'])
-
-@parthandler('listkeys', ('namespace',))
+    raise error.ResponseError(_(b'push failed:'), inpart.params[b'message'])
+
+
+@parthandler(b'listkeys', (b'namespace',))
 def handlelistkeys(op, inpart):
     """retrieve pushkey namespace content stored in a bundle2"""
-    namespace = inpart.params['namespace']
+    namespace = inpart.params[b'namespace']
     r = pushkey.decodekeys(inpart.read())
-    op.records.add('listkeys', (namespace, r))
-
-@parthandler('pushkey', ('namespace', 'key', 'old', 'new'))
+    op.records.add(b'listkeys', (namespace, r))
+
+
+@parthandler(b'pushkey', (b'namespace', b'key', b'old', b'new'))
 def handlepushkey(op, inpart):
     """process a pushkey request"""
     dec = pushkey.decode
-    namespace = dec(inpart.params['namespace'])
-    key = dec(inpart.params['key'])
-    old = dec(inpart.params['old'])
-    new = dec(inpart.params['new'])
+    namespace = dec(inpart.params[b'namespace'])
+    key = dec(inpart.params[b'key'])
+    old = dec(inpart.params[b'old'])
+    new = dec(inpart.params[b'new'])
     # Grab the transaction to ensure that we have the lock before performing the
     # pushkey.
-    if op.ui.configbool('experimental', 'bundle2lazylocking'):
+    if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
         op.gettransaction()
     ret = op.repo.pushkey(namespace, key, old, new)
-    record = {'namespace': namespace,
-              'key': key,
-              'old': old,
-              'new': new}
-    op.records.add('pushkey', record)
+    record = {b'namespace': namespace, b'key': key, b'old': old, b'new': new}
+    op.records.add(b'pushkey', record)
     if op.reply is not None:
-        rpart = op.reply.newpart('reply:pushkey')
+        rpart = op.reply.newpart(b'reply:pushkey')
         rpart.addparam(
-            'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False)
-        rpart.addparam('return', '%i' % ret, mandatory=False)
+            b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
+        )
+        rpart.addparam(b'return', b'%i' % ret, mandatory=False)
     if inpart.mandatory and not ret:
         kwargs = {}
-        for key in ('namespace', 'key', 'new', 'old', 'ret'):
+        for key in (b'namespace', b'key', b'new', b'old', b'ret'):
             if key in inpart.params:
                 kwargs[key] = inpart.params[key]
-        raise error.PushkeyFailed(partid='%d' % inpart.id,
-                                  **pycompat.strkwargs(kwargs))
-
-@parthandler('bookmarks')
+        raise error.PushkeyFailed(
+            partid=b'%d' % inpart.id, **pycompat.strkwargs(kwargs)
+        )
+
+
+@parthandler(b'bookmarks')
 def handlebookmark(op, inpart):
     """transmit bookmark information
 
@@ -2129,95 +2338,110 @@
     """
     changes = bookmarks.binarydecode(inpart)
 
-    pushkeycompat = op.repo.ui.configbool('server', 'bookmarks-pushkey-compat')
-    bookmarksmode = op.modes.get('bookmarks', 'apply')
-
-    if bookmarksmode == 'apply':
+    pushkeycompat = op.repo.ui.configbool(
+        b'server', b'bookmarks-pushkey-compat'
+    )
+    bookmarksmode = op.modes.get(b'bookmarks', b'apply')
+
+    if bookmarksmode == b'apply':
         tr = op.gettransaction()
         bookstore = op.repo._bookmarks
         if pushkeycompat:
             allhooks = []
             for book, node in changes:
                 hookargs = tr.hookargs.copy()
-                hookargs['pushkeycompat'] = '1'
-                hookargs['namespace'] = 'bookmarks'
-                hookargs['key'] = book
-                hookargs['old'] = nodemod.hex(bookstore.get(book, ''))
-                hookargs['new'] = nodemod.hex(node if node is not None else '')
+                hookargs[b'pushkeycompat'] = b'1'
+                hookargs[b'namespace'] = b'bookmarks'
+                hookargs[b'key'] = book
+                hookargs[b'old'] = nodemod.hex(bookstore.get(book, b''))
+                hookargs[b'new'] = nodemod.hex(
+                    node if node is not None else b''
+                )
                 allhooks.append(hookargs)
 
             for hookargs in allhooks:
-                op.repo.hook('prepushkey', throw=True,
-                             **pycompat.strkwargs(hookargs))
+                op.repo.hook(
+                    b'prepushkey', throw=True, **pycompat.strkwargs(hookargs)
+                )
 
         bookstore.applychanges(op.repo, op.gettransaction(), changes)
 
         if pushkeycompat:
+
             def runhook():
                 for hookargs in allhooks:
-                    op.repo.hook('pushkey', **pycompat.strkwargs(hookargs))
+                    op.repo.hook(b'pushkey', **pycompat.strkwargs(hookargs))
+
             op.repo._afterlock(runhook)
 
-    elif bookmarksmode == 'records':
+    elif bookmarksmode == b'records':
         for book, node in changes:
-            record = {'bookmark': book, 'node': node}
-            op.records.add('bookmarks', record)
+            record = {b'bookmark': book, b'node': node}
+            op.records.add(b'bookmarks', record)
     else:
-        raise error.ProgrammingError('unkown bookmark mode: %s' % bookmarksmode)
-
-@parthandler('phase-heads')
+        raise error.ProgrammingError(
+            b'unkown bookmark mode: %s' % bookmarksmode
+        )
+
+
+@parthandler(b'phase-heads')
 def handlephases(op, inpart):
     """apply phases from bundle part to repo"""
     headsbyphase = phases.binarydecode(inpart)
     phases.updatephases(op.repo.unfiltered(), op.gettransaction, headsbyphase)
 
-@parthandler('reply:pushkey', ('return', 'in-reply-to'))
+
+@parthandler(b'reply:pushkey', (b'return', b'in-reply-to'))
 def handlepushkeyreply(op, inpart):
     """retrieve the result of a pushkey request"""
-    ret = int(inpart.params['return'])
-    partid = int(inpart.params['in-reply-to'])
-    op.records.add('pushkey', {'return': ret}, partid)
-
-@parthandler('obsmarkers')
+    ret = int(inpart.params[b'return'])
+    partid = int(inpart.params[b'in-reply-to'])
+    op.records.add(b'pushkey', {b'return': ret}, partid)
+
+
+@parthandler(b'obsmarkers')
 def handleobsmarker(op, inpart):
     """add a stream of obsmarkers to the repo"""
     tr = op.gettransaction()
     markerdata = inpart.read()
-    if op.ui.config('experimental', 'obsmarkers-exchange-debug'):
-        op.ui.write(('obsmarker-exchange: %i bytes received\n')
-                    % len(markerdata))
+    if op.ui.config(b'experimental', b'obsmarkers-exchange-debug'):
+        op.ui.writenoi18n(
+            b'obsmarker-exchange: %i bytes received\n' % len(markerdata)
+        )
     # The mergemarkers call will crash if marker creation is not enabled.
     # we want to avoid this if the part is advisory.
     if not inpart.mandatory and op.repo.obsstore.readonly:
-        op.repo.ui.debug('ignoring obsolescence markers, feature not enabled\n')
+        op.repo.ui.debug(
+            b'ignoring obsolescence markers, feature not enabled\n'
+        )
         return
     new = op.repo.obsstore.mergemarkers(tr, markerdata)
     op.repo.invalidatevolatilesets()
-    if new:
-        op.repo.ui.status(_('%i new obsolescence markers\n') % new)
-    op.records.add('obsmarkers', {'new': new})
+    op.records.add(b'obsmarkers', {b'new': new})
     if op.reply is not None:
-        rpart = op.reply.newpart('reply:obsmarkers')
+        rpart = op.reply.newpart(b'reply:obsmarkers')
         rpart.addparam(
-            'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False)
-        rpart.addparam('new', '%i' % new, mandatory=False)
-
-
-@parthandler('reply:obsmarkers', ('new', 'in-reply-to'))
+            b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
+        )
+        rpart.addparam(b'new', b'%i' % new, mandatory=False)
+
+
+@parthandler(b'reply:obsmarkers', (b'new', b'in-reply-to'))
 def handleobsmarkerreply(op, inpart):
     """retrieve the result of a pushkey request"""
-    ret = int(inpart.params['new'])
-    partid = int(inpart.params['in-reply-to'])
-    op.records.add('obsmarkers', {'new': ret}, partid)
-
-@parthandler('hgtagsfnodes')
+    ret = int(inpart.params[b'new'])
+    partid = int(inpart.params[b'in-reply-to'])
+    op.records.add(b'obsmarkers', {b'new': ret}, partid)
+
+
+@parthandler(b'hgtagsfnodes')
 def handlehgtagsfnodes(op, inpart):
     """Applies .hgtags fnodes cache entries to the local repo.
 
     Payload is pairs of 20 byte changeset nodes and filenodes.
     """
     # Grab the transaction so we ensure that we have the lock at this point.
-    if op.ui.configbool('experimental', 'bundle2lazylocking'):
+    if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
         op.gettransaction()
     cache = tags.hgtagsfnodescache(op.repo.unfiltered())
 
@@ -2226,17 +2450,19 @@
         node = inpart.read(20)
         fnode = inpart.read(20)
         if len(node) < 20 or len(fnode) < 20:
-            op.ui.debug('ignoring incomplete received .hgtags fnodes data\n')
+            op.ui.debug(b'ignoring incomplete received .hgtags fnodes data\n')
             break
         cache.setfnode(node, fnode)
         count += 1
 
     cache.write()
-    op.ui.debug('applied %i hgtags fnodes cache entries\n' % count)
-
-rbcstruct = struct.Struct('>III')
-
-@parthandler('cache:rev-branch-cache')
+    op.ui.debug(b'applied %i hgtags fnodes cache entries\n' % count)
+
+
+rbcstruct = struct.Struct(b'>III')
+
+
+@parthandler(b'cache:rev-branch-cache')
 def handlerbc(op, inpart):
     """receive a rev-branch-cache payload and update the local cache
 
@@ -2268,38 +2494,41 @@
         rawheader = inpart.read(rbcstruct.size)
     cache.write()
 
-@parthandler('pushvars')
+
+@parthandler(b'pushvars')
 def bundle2getvars(op, part):
     '''unbundle a bundle2 containing shellvars on the server'''
     # An option to disable unbundling on server-side for security reasons
-    if op.ui.configbool('push', 'pushvars.server'):
+    if op.ui.configbool(b'push', b'pushvars.server'):
         hookargs = {}
         for key, value in part.advisoryparams:
             key = key.upper()
             # We want pushed variables to have USERVAR_ prepended so we know
             # they came from the --pushvar flag.
-            key = "USERVAR_" + key
+            key = b"USERVAR_" + key
             hookargs[key] = value
         op.addhookargs(hookargs)
 
-@parthandler('stream2', ('requirements', 'filecount', 'bytecount'))
+
+@parthandler(b'stream2', (b'requirements', b'filecount', b'bytecount'))
 def handlestreamv2bundle(op, part):
 
-    requirements = urlreq.unquote(part.params['requirements']).split(',')
-    filecount = int(part.params['filecount'])
-    bytecount = int(part.params['bytecount'])
+    requirements = urlreq.unquote(part.params[b'requirements']).split(b',')
+    filecount = int(part.params[b'filecount'])
+    bytecount = int(part.params[b'bytecount'])
 
     repo = op.repo
     if len(repo):
-        msg = _('cannot apply stream clone to non empty repository')
+        msg = _(b'cannot apply stream clone to non empty repository')
         raise error.Abort(msg)
 
-    repo.ui.debug('applying stream bundle\n')
-    streamclone.applybundlev2(repo, part, filecount, bytecount,
-                              requirements)
-
-def widen_bundle(bundler, repo, oldmatcher, newmatcher, common,
-                 known, cgversion, ellipses):
+    repo.ui.debug(b'applying stream bundle\n')
+    streamclone.applybundlev2(repo, part, filecount, bytecount, requirements)
+
+
+def widen_bundle(
+    bundler, repo, oldmatcher, newmatcher, common, known, cgversion, ellipses
+):
     """generates bundle2 for widening a narrow clone
 
     bundler is the bundle to which data should be added
@@ -2315,21 +2544,31 @@
     """
     commonnodes = set()
     cl = repo.changelog
-    for r in repo.revs("::%ln", common):
+    for r in repo.revs(b"::%ln", common):
         commonnodes.add(cl.node(r))
     if commonnodes:
         # XXX: we should only send the filelogs (and treemanifest). user
         # already has the changelog and manifest
-        packer = changegroup.getbundler(cgversion, repo,
-                                        oldmatcher=oldmatcher,
-                                        matcher=newmatcher,
-                                        fullnodes=commonnodes)
-        cgdata = packer.generate({nodemod.nullid}, list(commonnodes),
-                                 False, 'narrow_widen', changelog=False)
-
-        part = bundler.newpart('changegroup', data=cgdata)
-        part.addparam('version', cgversion)
-        if 'treemanifest' in repo.requirements:
-            part.addparam('treemanifest', '1')
+        packer = changegroup.getbundler(
+            cgversion,
+            repo,
+            oldmatcher=oldmatcher,
+            matcher=newmatcher,
+            fullnodes=commonnodes,
+        )
+        cgdata = packer.generate(
+            {nodemod.nullid},
+            list(commonnodes),
+            False,
+            b'narrow_widen',
+            changelog=False,
+        )
+
+        part = bundler.newpart(b'changegroup', data=cgdata)
+        part.addparam(b'version', cgversion)
+        if b'treemanifest' in repo.requirements:
+            part.addparam(b'treemanifest', b'1')
+        if b'exp-sidedata-flag' in repo.requirements:
+            part.addparam(b'exp-sidedata', b'1')
 
     return bundler
--- a/mercurial/bundlerepo.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/bundlerepo.py	Mon Oct 21 11:09:48 2019 -0400
@@ -17,7 +17,7 @@
 import shutil
 
 from .i18n import _
-from .node import nullid
+from .node import nullid, nullrev
 
 from . import (
     bundle2,
@@ -41,6 +41,7 @@
     vfs as vfsmod,
 )
 
+
 class bundlerevlog(revlog.revlog):
     def __init__(self, opener, indexfile, cgunpacker, linkmapper):
         # How it works:
@@ -55,7 +56,7 @@
         self.bundle = cgunpacker
         n = len(self)
         self.repotiprev = n - 1
-        self.bundlerevs = set() # used by 'bundle()' revset expression
+        self.bundlerevs = set()  # used by 'bundle()' revset expression
         for deltadata in cgunpacker.deltaiter():
             node, p1, p2, cs, deltabase, delta, flags = deltadata
 
@@ -70,17 +71,27 @@
 
             for p in (p1, p2):
                 if p not in self.nodemap:
-                    raise error.LookupError(p, self.indexfile,
-                                            _("unknown parent"))
+                    raise error.LookupError(
+                        p, self.indexfile, _(b"unknown parent")
+                    )
 
             if deltabase not in self.nodemap:
-                raise LookupError(deltabase, self.indexfile,
-                                  _('unknown delta base'))
+                raise LookupError(
+                    deltabase, self.indexfile, _(b'unknown delta base')
+                )
 
             baserev = self.rev(deltabase)
             # start, size, full unc. size, base (unused), link, p1, p2, node
-            e = (revlog.offset_type(start, flags), size, -1, baserev, link,
-                 self.rev(p1), self.rev(p2), node)
+            e = (
+                revlog.offset_type(start, flags),
+                size,
+                -1,
+                baserev,
+                link,
+                self.rev(p1),
+                self.rev(p2),
+                node,
+            )
             self.index.append(e)
             self.nodemap[node] = n
             self.bundlerevs.add(n)
@@ -105,23 +116,12 @@
         elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
             return revlog.revlog.revdiff(self, rev1, rev2)
 
-        return mdiff.textdiff(self.revision(rev1, raw=True),
-                              self.revision(rev2, raw=True))
+        return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
 
-    def revision(self, nodeorrev, _df=None, raw=False):
-        """return an uncompressed revision of a given node or revision
-        number.
-        """
-        if isinstance(nodeorrev, int):
-            rev = nodeorrev
-            node = self.node(rev)
-        else:
-            node = nodeorrev
+    def _rawtext(self, node, rev, _df=None):
+        if rev is None:
             rev = self.rev(node)
-
-        if node == nullid:
-            return ""
-
+        validated = False
         rawtext = None
         chain = []
         iterrev = rev
@@ -132,25 +132,19 @@
                 break
             chain.append(iterrev)
             iterrev = self.index[iterrev][3]
-        if rawtext is None:
-            rawtext = self.baserevision(iterrev)
-
+        if iterrev == nullrev:
+            rawtext = b''
+        elif rawtext is None:
+            r = super(bundlerevlog, self)._rawtext(
+                self.node(iterrev), iterrev, _df=_df
+            )
+            __, rawtext, validated = r
+        if chain:
+            validated = False
         while chain:
             delta = self._chunk(chain.pop())
             rawtext = mdiff.patches(rawtext, [delta])
-
-        text, validatehash = self._processflags(rawtext, self.flags(rev),
-                                                'read', raw=raw)
-        if validatehash:
-            self.checkhash(text, node, rev=rev)
-        self._revisioncache = (node, rev, rawtext)
-        return text
-
-    def baserevision(self, nodeorrev):
-        # Revlog subclasses may override 'revision' method to modify format of
-        # content retrieved from revlog. To use bundlerevlog with such class one
-        # needs to override 'baserevision' and make more specific call here.
-        return revlog.revlog.revision(self, nodeorrev, raw=True)
+        return rev, rawtext, validated
 
     def addrevision(self, *args, **kwargs):
         raise NotImplementedError
@@ -164,72 +158,57 @@
     def checksize(self):
         raise NotImplementedError
 
+
 class bundlechangelog(bundlerevlog, changelog.changelog):
     def __init__(self, opener, cgunpacker):
         changelog.changelog.__init__(self, opener)
         linkmapper = lambda x: x
-        bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
-                              linkmapper)
-
-    def baserevision(self, nodeorrev):
-        # Although changelog doesn't override 'revision' method, some extensions
-        # may replace this class with another that does. Same story with
-        # manifest and filelog classes.
+        bundlerevlog.__init__(
+            self, opener, self.indexfile, cgunpacker, linkmapper
+        )
 
-        # This bypasses filtering on changelog.node() and rev() because we need
-        # revision text of the bundle base even if it is hidden.
-        oldfilter = self.filteredrevs
-        try:
-            self.filteredrevs = ()
-            return changelog.changelog.revision(self, nodeorrev, raw=True)
-        finally:
-            self.filteredrevs = oldfilter
 
 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
-    def __init__(self, opener, cgunpacker, linkmapper, dirlogstarts=None,
-                 dir=''):
+    def __init__(
+        self, opener, cgunpacker, linkmapper, dirlogstarts=None, dir=b''
+    ):
         manifest.manifestrevlog.__init__(self, opener, tree=dir)
-        bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
-                              linkmapper)
+        bundlerevlog.__init__(
+            self, opener, self.indexfile, cgunpacker, linkmapper
+        )
         if dirlogstarts is None:
             dirlogstarts = {}
-            if self.bundle.version == "03":
+            if self.bundle.version == b"03":
                 dirlogstarts = _getfilestarts(self.bundle)
         self._dirlogstarts = dirlogstarts
         self._linkmapper = linkmapper
 
-    def baserevision(self, nodeorrev):
-        node = nodeorrev
-        if isinstance(node, int):
-            node = self.node(node)
-
-        if node in self.fulltextcache:
-            result = '%s' % self.fulltextcache[node]
-        else:
-            result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
-        return result
-
     def dirlog(self, d):
         if d in self._dirlogstarts:
             self.bundle.seek(self._dirlogstarts[d])
             return bundlemanifest(
-                self.opener, self.bundle, self._linkmapper,
-                self._dirlogstarts, dir=d)
+                self.opener,
+                self.bundle,
+                self._linkmapper,
+                self._dirlogstarts,
+                dir=d,
+            )
         return super(bundlemanifest, self).dirlog(d)
 
+
 class bundlefilelog(filelog.filelog):
     def __init__(self, opener, path, cgunpacker, linkmapper):
         filelog.filelog.__init__(self, opener, path)
-        self._revlog = bundlerevlog(opener, self.indexfile,
-                                    cgunpacker, linkmapper)
+        self._revlog = bundlerevlog(
+            opener, self.indexfile, cgunpacker, linkmapper
+        )
 
-    def baserevision(self, nodeorrev):
-        return filelog.filelog.revision(self, nodeorrev, raw=True)
 
 class bundlepeer(localrepo.localpeer):
     def canpush(self):
         return False
 
+
 class bundlephasecache(phases.phasecache):
     def __init__(self, *args, **kwargs):
         super(bundlephasecache, self).__init__(*args, **kwargs)
@@ -247,15 +226,17 @@
         self.invalidate()
         self.dirty = True
 
+
 def _getfilestarts(cgunpacker):
     filespos = {}
     for chunkdata in iter(cgunpacker.filelogheader, {}):
-        fname = chunkdata['filename']
+        fname = chunkdata[b'filename']
         filespos[fname] = cgunpacker.tell()
         for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
             pass
     return filespos
 
+
 class bundlerepository(object):
     """A repository instance that is a union of a local repo and a bundle.
 
@@ -268,14 +249,15 @@
     Instances constructed directly are not usable as repository objects.
     Use instance() or makebundlerepository() to create instances.
     """
+
     def __init__(self, bundlepath, url, tempparent):
         self._tempparent = tempparent
         self._url = url
 
-        self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
+        self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
 
         self.tempfile = None
-        f = util.posixfile(bundlepath, "rb")
+        f = util.posixfile(bundlepath, b"rb")
         bundle = exchange.readbundle(self.ui, f, bundlepath)
 
         if isinstance(bundle, bundle2.unbundle20):
@@ -284,16 +266,17 @@
 
             cgpart = None
             for part in bundle.iterparts(seekable=True):
-                if part.type == 'changegroup':
+                if part.type == b'changegroup':
                     if cgpart:
-                        raise NotImplementedError("can't process "
-                                                  "multiple changegroups")
+                        raise NotImplementedError(
+                            b"can't process multiple changegroups"
+                        )
                     cgpart = part
 
                 self._handlebundle2part(bundle, part)
 
             if not cgpart:
-                raise error.Abort(_("No changegroups found"))
+                raise error.Abort(_(b"No changegroups found"))
 
             # This is required to placate a later consumer, which expects
             # the payload offset to be at the beginning of the changegroup.
@@ -304,54 +287,59 @@
 
         elif isinstance(bundle, changegroup.cg1unpacker):
             if bundle.compressed():
-                f = self._writetempbundle(bundle.read, '.hg10un',
-                                          header='HG10UN')
+                f = self._writetempbundle(
+                    bundle.read, b'.hg10un', header=b'HG10UN'
+                )
                 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
 
             self._bundlefile = bundle
             self._cgunpacker = bundle
         else:
-            raise error.Abort(_('bundle type %s cannot be read') %
-                              type(bundle))
+            raise error.Abort(
+                _(b'bundle type %s cannot be read') % type(bundle)
+            )
 
         # dict with the mapping 'filename' -> position in the changegroup.
         self._cgfilespos = {}
 
         self.firstnewrev = self.changelog.repotiprev + 1
-        phases.retractboundary(self, None, phases.draft,
-                               [ctx.node() for ctx in self[self.firstnewrev:]])
+        phases.retractboundary(
+            self,
+            None,
+            phases.draft,
+            [ctx.node() for ctx in self[self.firstnewrev :]],
+        )
 
     def _handlebundle2part(self, bundle, part):
-        if part.type != 'changegroup':
+        if part.type != b'changegroup':
             return
 
         cgstream = part
-        version = part.params.get('version', '01')
+        version = part.params.get(b'version', b'01')
         legalcgvers = changegroup.supportedincomingversions(self)
         if version not in legalcgvers:
-            msg = _('Unsupported changegroup version: %s')
+            msg = _(b'Unsupported changegroup version: %s')
             raise error.Abort(msg % version)
         if bundle.compressed():
-            cgstream = self._writetempbundle(part.read, '.cg%sun' % version)
+            cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
 
-        self._cgunpacker = changegroup.getunbundler(version, cgstream, 'UN')
+        self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
 
-    def _writetempbundle(self, readfn, suffix, header=''):
+    def _writetempbundle(self, readfn, suffix, header=b''):
         """Write a temporary file to disk
         """
-        fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
-                                        suffix=suffix)
+        fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
         self.tempfile = temp
 
         with os.fdopen(fdtemp, r'wb') as fptemp:
             fptemp.write(header)
             while True:
-                chunk = readfn(2**18)
+                chunk = readfn(2 ** 18)
                 if not chunk:
                     break
                 fptemp.write(chunk)
 
-        return self.vfs.open(self.tempfile, mode="rb")
+        return self.vfs.open(self.tempfile, mode=b"rb")
 
     @localrepo.unfilteredpropertycache
     def _phasecache(self):
@@ -379,8 +367,9 @@
         rootstore = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
         self.filestart = self._cgunpacker.tell()
 
-        return manifest.manifestlog(self.svfs, self, rootstore,
-                                    self.narrowmatch())
+        return manifest.manifestlog(
+            self.svfs, self, rootstore, self.narrowmatch()
+        )
 
     def _consumemanifest(self):
         """Consumes the manifest portion of the bundle, setting filestart so the
@@ -439,43 +428,44 @@
         return bundlepeer(self)
 
     def getcwd(self):
-        return encoding.getcwd() # always outside the repo
+        return encoding.getcwd()  # always outside the repo
 
     # Check if parents exist in localrepo before setting
     def setparents(self, p1, p2=nullid):
         p1rev = self.changelog.rev(p1)
         p2rev = self.changelog.rev(p2)
-        msg = _("setting parent to node %s that only exists in the bundle\n")
+        msg = _(b"setting parent to node %s that only exists in the bundle\n")
         if self.changelog.repotiprev < p1rev:
             self.ui.warn(msg % nodemod.hex(p1))
         if self.changelog.repotiprev < p2rev:
             self.ui.warn(msg % nodemod.hex(p2))
         return super(bundlerepository, self).setparents(p1, p2)
 
+
 def instance(ui, path, create, intents=None, createopts=None):
     if create:
-        raise error.Abort(_('cannot create new bundle repository'))
+        raise error.Abort(_(b'cannot create new bundle repository'))
     # internal config: bundle.mainreporoot
-    parentpath = ui.config("bundle", "mainreporoot")
+    parentpath = ui.config(b"bundle", b"mainreporoot")
     if not parentpath:
         # try to find the correct path to the working directory repo
         parentpath = cmdutil.findrepo(encoding.getcwd())
         if parentpath is None:
-            parentpath = ''
+            parentpath = b''
     if parentpath:
         # Try to make the full path relative so we get a nice, short URL.
         # In particular, we don't want temp dir names in test outputs.
         cwd = encoding.getcwd()
         if parentpath == cwd:
-            parentpath = ''
+            parentpath = b''
         else:
             cwd = pathutil.normasprefix(cwd)
             if parentpath.startswith(cwd):
-                parentpath = parentpath[len(cwd):]
+                parentpath = parentpath[len(cwd) :]
     u = util.url(path)
     path = u.localpath()
-    if u.scheme == 'bundle':
-        s = path.split("+", 1)
+    if u.scheme == b'bundle':
+        s = path.split(b"+", 1)
         if len(s) == 1:
             repopath, bundlename = parentpath, s[0]
         else:
@@ -485,12 +475,13 @@
 
     return makebundlerepository(ui, repopath, bundlename)
 
+
 def makebundlerepository(ui, repopath, bundlepath):
     """Make a bundle repository object based on repo and bundle paths."""
     if repopath:
-        url = 'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
+        url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
     else:
-        url = 'bundle:%s' % bundlepath
+        url = b'bundle:%s' % bundlepath
 
     # Because we can't make any guarantees about the type of the base
     # repository, we can't have a static class representing the bundle
@@ -522,6 +513,7 @@
 
     return repo
 
+
 class bundletransactionmanager(object):
     def transaction(self):
         return None
@@ -532,8 +524,10 @@
     def release(self):
         raise NotImplementedError
 
-def getremotechanges(ui, repo, peer, onlyheads=None, bundlename=None,
-                     force=False):
+
+def getremotechanges(
+    ui, repo, peer, onlyheads=None, bundlename=None, force=False
+):
     '''obtains a bundle of changes incoming from peer
 
     "onlyheads" restricts the returned changes to those reachable from the
@@ -553,8 +547,7 @@
       the changes; it closes both the original "peer" and the one returned
       here.
     '''
-    tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads,
-                                       force=force)
+    tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
     common, incoming, rheads = tmp
     if not incoming:
         try:
@@ -574,64 +567,76 @@
         # create a bundle (uncompressed if peer repo is not local)
 
         # developer config: devel.legacy.exchange
-        legexc = ui.configlist('devel', 'legacy.exchange')
-        forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
-        canbundle2 = (not forcebundle1
-                      and peer.capable('getbundle')
-                      and peer.capable('bundle2'))
+        legexc = ui.configlist(b'devel', b'legacy.exchange')
+        forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
+        canbundle2 = (
+            not forcebundle1
+            and peer.capable(b'getbundle')
+            and peer.capable(b'bundle2')
+        )
         if canbundle2:
             with peer.commandexecutor() as e:
-                b2 = e.callcommand('getbundle', {
-                    'source': 'incoming',
-                    'common': common,
-                    'heads': rheads,
-                    'bundlecaps': exchange.caps20to10(repo, role='client'),
-                    'cg': True,
-                }).result()
+                b2 = e.callcommand(
+                    b'getbundle',
+                    {
+                        b'source': b'incoming',
+                        b'common': common,
+                        b'heads': rheads,
+                        b'bundlecaps': exchange.caps20to10(
+                            repo, role=b'client'
+                        ),
+                        b'cg': True,
+                    },
+                ).result()
 
-                fname = bundle = changegroup.writechunks(ui,
-                                                         b2._forwardchunks(),
-                                                         bundlename)
+                fname = bundle = changegroup.writechunks(
+                    ui, b2._forwardchunks(), bundlename
+                )
         else:
-            if peer.capable('getbundle'):
+            if peer.capable(b'getbundle'):
                 with peer.commandexecutor() as e:
-                    cg = e.callcommand('getbundle', {
-                        'source': 'incoming',
-                        'common': common,
-                        'heads': rheads,
-                    }).result()
-            elif onlyheads is None and not peer.capable('changegroupsubset'):
+                    cg = e.callcommand(
+                        b'getbundle',
+                        {
+                            b'source': b'incoming',
+                            b'common': common,
+                            b'heads': rheads,
+                        },
+                    ).result()
+            elif onlyheads is None and not peer.capable(b'changegroupsubset'):
                 # compat with older servers when pulling all remote heads
 
                 with peer.commandexecutor() as e:
-                    cg = e.callcommand('changegroup', {
-                        'nodes': incoming,
-                        'source': 'incoming',
-                    }).result()
+                    cg = e.callcommand(
+                        b'changegroup',
+                        {b'nodes': incoming, b'source': b'incoming',},
+                    ).result()
 
                 rheads = None
             else:
                 with peer.commandexecutor() as e:
-                    cg = e.callcommand('changegroupsubset', {
-                        'bases': incoming,
-                        'heads': rheads,
-                        'source': 'incoming',
-                    }).result()
+                    cg = e.callcommand(
+                        b'changegroupsubset',
+                        {
+                            b'bases': incoming,
+                            b'heads': rheads,
+                            b'source': b'incoming',
+                        },
+                    ).result()
 
             if localrepo:
-                bundletype = "HG10BZ"
+                bundletype = b"HG10BZ"
             else:
-                bundletype = "HG10UN"
-            fname = bundle = bundle2.writebundle(ui, cg, bundlename,
-                                                     bundletype)
+                bundletype = b"HG10UN"
+            fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
         # keep written bundle?
         if bundlename:
             bundle = None
         if not localrepo:
             # use the created uncompressed bundlerepo
-            localrepo = bundlerepo = makebundlerepository(repo. baseui,
-                                                          repo.root,
-                                                          fname)
+            localrepo = bundlerepo = makebundlerepository(
+                repo.baseui, repo.root, fname
+            )
 
             # this repo contains local and peer now, so filter out local again
             common = repo.heads()
@@ -644,12 +649,12 @@
     csets = localrepo.changelog.findmissing(common, rheads)
 
     if bundlerepo:
-        reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
+        reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
 
         with peer.commandexecutor() as e:
-            remotephases = e.callcommand('listkeys', {
-                'namespace': 'phases',
-            }).result()
+            remotephases = e.callcommand(
+                b'listkeys', {b'namespace': b'phases',}
+            ).result()
 
         pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
         pullop.trmanager = bundletransactionmanager()
--- a/mercurial/cacheutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/cacheutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -8,14 +8,15 @@
 
 from . import repoview
 
+
 def cachetocopy(srcrepo):
     """return the list of cache file valuable to copy during a clone"""
     # In local clones we're copying all nodes, not just served
     # ones. Therefore copy all branch caches over.
-    cachefiles = ['branch2']
-    cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
-    cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
-    cachefiles += ['tags2']
-    cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
-    cachefiles += ['hgtagsfnodes1']
+    cachefiles = [b'branch2']
+    cachefiles += [b'branch2-%s' % f for f in repoview.filtertable]
+    cachefiles += [b'rbc-names-v1', b'rbc-revs-v1']
+    cachefiles += [b'tags2']
+    cachefiles += [b'tags2-%s' % f for f in repoview.filtertable]
+    cachefiles += [b'hgtagsfnodes1']
     return cachefiles
--- a/mercurial/cext/dirs.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/cext/dirs.c	Mon Oct 21 11:09:48 2019 -0400
@@ -26,14 +26,13 @@
  *
  * We modify Python integers for refcounting, but those integers are
  * never visible to Python code.
- *
- * We mutate strings in-place, but leave them immutable once they can
- * be seen by Python code.
  */
+/* clang-format off */
 typedef struct {
 	PyObject_HEAD
 	PyObject *dict;
 } dirsObject;
+/* clang-format on */
 
 static inline Py_ssize_t _finddir(const char *path, Py_ssize_t pos)
 {
@@ -43,7 +42,7 @@
 		pos -= 1;
 	}
 	if (pos == -1) {
-	  return 0;
+		return 0;
 	}
 
 	return pos;
@@ -57,37 +56,24 @@
 	int ret = -1;
 
 	/* This loop is super critical for performance. That's why we inline
-	* access to Python structs instead of going through a supported API.
-	* The implementation, therefore, is heavily dependent on CPython
-	* implementation details. We also commit violations of the Python
-	* "protocol" such as mutating immutable objects. But since we only
-	* mutate objects created in this function or in other well-defined
-	* locations, the references are known so these violations should go
-	* unnoticed. The code for adjusting the length of a PyBytesObject is
-	* essentially a minimal version of _PyBytes_Resize. */
+	 * access to Python structs instead of going through a supported API.
+	 * The implementation, therefore, is heavily dependent on CPython
+	 * implementation details. We also commit violations of the Python
+	 * "protocol" such as mutating immutable objects. But since we only
+	 * mutate objects created in this function or in other well-defined
+	 * locations, the references are known so these violations should go
+	 * unnoticed. */
 	while ((pos = _finddir(cpath, pos - 1)) != -1) {
 		PyObject *val;
 
-		/* It's likely that every prefix already has an entry
-		   in our dict. Try to avoid allocating and
-		   deallocating a string for each prefix we check. */
-		if (key != NULL)
-			((PyBytesObject *)key)->ob_shash = -1;
-		else {
-			/* Force Python to not reuse a small shared string. */
-			key = PyBytes_FromStringAndSize(cpath,
-							 pos < 2 ? 2 : pos);
-			if (key == NULL)
-				goto bail;
-		}
-		/* Py_SIZE(o) refers to the ob_size member of the struct. Yes,
-		* assigning to what looks like a function seems wrong. */
-		Py_SIZE(key) = pos;
-		((PyBytesObject *)key)->ob_sval[pos] = '\0';
+		key = PyBytes_FromStringAndSize(cpath, pos);
+		if (key == NULL)
+			goto bail;
 
 		val = PyDict_GetItem(dirs, key);
 		if (val != NULL) {
 			PYLONG_VALUE(val) += 1;
+			Py_CLEAR(key);
 			break;
 		}
 
@@ -134,7 +120,7 @@
 		val = PyDict_GetItem(dirs, key);
 		if (val == NULL) {
 			PyErr_SetString(PyExc_ValueError,
-					"expected a value, found none");
+			                "expected a value, found none");
 			goto bail;
 		}
 
@@ -166,7 +152,7 @@
 		if (skipchar) {
 			if (!dirstate_tuple_check(value)) {
 				PyErr_SetString(PyExc_TypeError,
-						"expected a dirstate tuple");
+				                "expected a dirstate tuple");
 				return -1;
 			}
 			if (((dirstateTupleObject *)value)->state == skipchar)
@@ -232,8 +218,8 @@
 		ret = dirs_fromdict(dirs, source, skipchar);
 	else if (skipchar)
 		PyErr_SetString(PyExc_ValueError,
-				"skip character is only supported "
-				"with a dict source");
+		                "skip character is only supported "
+		                "with a dict source");
 	else
 		ret = dirs_fromiter(dirs, source);
 
@@ -290,12 +276,12 @@
 static PySequenceMethods dirs_sequence_methods;
 
 static PyMethodDef dirs_methods[] = {
-	{"addpath", (PyCFunction)dirs_addpath, METH_VARARGS, "add a path"},
-	{"delpath", (PyCFunction)dirs_delpath, METH_VARARGS, "remove a path"},
-	{NULL} /* Sentinel */
+    {"addpath", (PyCFunction)dirs_addpath, METH_VARARGS, "add a path"},
+    {"delpath", (PyCFunction)dirs_delpath, METH_VARARGS, "remove a path"},
+    {NULL} /* Sentinel */
 };
 
-static PyTypeObject dirsType = { PyVarObject_HEAD_INIT(NULL, 0) };
+static PyTypeObject dirsType = {PyVarObject_HEAD_INIT(NULL, 0)};
 
 void dirs_module_init(PyObject *mod)
 {
--- a/mercurial/cext/osutil.c	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/cext/osutil.c	Mon Oct 21 11:09:48 2019 -0400
@@ -765,6 +765,8 @@
 
 #endif /* CMSG_LEN */
 
+/* allow disabling setprocname via compiler flags */
+#ifndef SETPROCNAME_USE_NONE
 #if defined(HAVE_SETPROCTITLE)
 /* setproctitle is the first choice - available in FreeBSD */
 #define SETPROCNAME_USE_SETPROCTITLE
@@ -775,6 +777,7 @@
 #else
 #define SETPROCNAME_USE_NONE
 #endif
+#endif /* ndef SETPROCNAME_USE_NONE */
 
 #ifndef SETPROCNAME_USE_NONE
 static PyObject *setprocname(PyObject *self, PyObject *args)
@@ -795,10 +798,17 @@
 			char *argvend;
 			extern void Py_GetArgcArgv(int *argc, char ***argv);
 			Py_GetArgcArgv(&argc, &argv);
+			/* Py_GetArgcArgv may not do much if a custom python
+			 * launcher is used that doesn't record the information
+			 * it needs. Let's handle this gracefully instead of
+			 * segfaulting. */
+			if (argv != NULL)
+				argvend = argvstart = argv[0];
+			else
+				argvend = argvstart = NULL;
 
 			/* Check the memory we can use. Typically, argv[i] and
 			 * argv[i + 1] are continuous. */
-			argvend = argvstart = argv[0];
 			for (i = 0; i < argc; ++i) {
 				if (argv[i] > argvend || argv[i] < argvstart)
 					break; /* not continuous */
--- a/mercurial/cffi/bdiff.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/cffi/bdiff.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,12 +15,13 @@
 ffi = _bdiff.ffi
 lib = _bdiff.lib
 
+
 def blocks(sa, sb):
-    a = ffi.new("struct bdiff_line**")
-    b = ffi.new("struct bdiff_line**")
-    ac = ffi.new("char[]", str(sa))
-    bc = ffi.new("char[]", str(sb))
-    l = ffi.new("struct bdiff_hunk*")
+    a = ffi.new(b"struct bdiff_line**")
+    b = ffi.new(b"struct bdiff_line**")
+    ac = ffi.new(b"char[]", str(sa))
+    bc = ffi.new(b"char[]", str(sb))
+    l = ffi.new(b"struct bdiff_hunk*")
     try:
         an = lib.bdiff_splitlines(ac, len(sa), a)
         bn = lib.bdiff_splitlines(bc, len(sb), b)
@@ -42,12 +43,13 @@
         lib.bdiff_freehunks(l.next)
     return rl
 
+
 def bdiff(sa, sb):
-    a = ffi.new("struct bdiff_line**")
-    b = ffi.new("struct bdiff_line**")
-    ac = ffi.new("char[]", str(sa))
-    bc = ffi.new("char[]", str(sb))
-    l = ffi.new("struct bdiff_hunk*")
+    a = ffi.new(b"struct bdiff_line**")
+    b = ffi.new(b"struct bdiff_line**")
+    ac = ffi.new(b"char[]", str(sa))
+    bc = ffi.new(b"char[]", str(sb))
+    l = ffi.new(b"struct bdiff_hunk*")
     try:
         an = lib.bdiff_splitlines(ac, len(sa), a)
         bn = lib.bdiff_splitlines(bc, len(sb), b)
@@ -62,8 +64,14 @@
         while h:
             if h.a1 != la or h.b1 != lb:
                 lgt = (b[0] + h.b1).l - (b[0] + lb).l
-                rl.append(struct.pack(">lll", (a[0] + la).l - a[0].l,
-                                      (a[0] + h.a1).l - a[0].l, lgt))
+                rl.append(
+                    struct.pack(
+                        b">lll",
+                        (a[0] + la).l - a[0].l,
+                        (a[0] + h.a1).l - a[0].l,
+                        lgt,
+                    )
+                )
                 rl.append(str(ffi.buffer((b[0] + lb).l, lgt)))
             la = h.a2
             lb = h.b2
@@ -73,4 +81,4 @@
         lib.free(a[0])
         lib.free(b[0])
         lib.bdiff_freehunks(l.next)
-    return "".join(rl)
+    return b"".join(rl)
--- a/mercurial/cffi/bdiffbuild.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/cffi/bdiffbuild.py	Mon Oct 21 11:09:48 2019 -0400
@@ -4,11 +4,14 @@
 import os
 
 ffi = cffi.FFI()
-with open(os.path.join(os.path.join(os.path.dirname(__file__), '..'),
-                       'bdiff.c')) as f:
-    ffi.set_source("mercurial.cffi._bdiff",
-                   f.read(), include_dirs=['mercurial'])
-ffi.cdef("""
+with open(
+    os.path.join(os.path.join(os.path.dirname(__file__), b'..'), b'bdiff.c')
+) as f:
+    ffi.set_source(
+        b"mercurial.cffi._bdiff", f.read(), include_dirs=[b'mercurial']
+    )
+ffi.cdef(
+    """
 struct bdiff_line {
     int hash, n, e;
     ssize_t len;
@@ -26,7 +29,8 @@
     struct bdiff_hunk *base);
 void bdiff_freehunks(struct bdiff_hunk *l);
 void free(void*);
-""")
+"""
+)
 
 if __name__ == '__main__':
     ffi.compile()
--- a/mercurial/cffi/mpatch.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/cffi/mpatch.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,35 +14,36 @@
 ffi = _mpatch.ffi
 lib = _mpatch.lib
 
+
 @ffi.def_extern()
 def cffi_get_next_item(arg, pos):
     all, bins = ffi.from_handle(arg)
-    container = ffi.new("struct mpatch_flist*[1]")
-    to_pass = ffi.new("char[]", str(bins[pos]))
+    container = ffi.new(b"struct mpatch_flist*[1]")
+    to_pass = ffi.new(b"char[]", str(bins[pos]))
     all.append(to_pass)
     r = lib.mpatch_decode(to_pass, len(to_pass) - 1, container)
     if r < 0:
         return ffi.NULL
     return container[0]
 
+
 def patches(text, bins):
     lgt = len(bins)
     all = []
     if not lgt:
         return text
     arg = (all, bins)
-    patch = lib.mpatch_fold(ffi.new_handle(arg),
-                            lib.cffi_get_next_item, 0, lgt)
+    patch = lib.mpatch_fold(ffi.new_handle(arg), lib.cffi_get_next_item, 0, lgt)
     if not patch:
-        raise mpatchError("cannot decode chunk")
+        raise mpatchError(b"cannot decode chunk")
     outlen = lib.mpatch_calcsize(len(text), patch)
     if outlen < 0:
         lib.mpatch_lfree(patch)
-        raise mpatchError("inconsistency detected")
-    buf = ffi.new("char[]", outlen)
+        raise mpatchError(b"inconsistency detected")
+    buf = ffi.new(b"char[]", outlen)
     if lib.mpatch_apply(buf, text, len(text), patch) < 0:
         lib.mpatch_lfree(patch)
-        raise mpatchError("error applying patches")
+        raise mpatchError(b"error applying patches")
     res = ffi.buffer(buf, outlen)[:]
     lib.mpatch_lfree(patch)
     return res
--- a/mercurial/cffi/mpatchbuild.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/cffi/mpatchbuild.py	Mon Oct 21 11:09:48 2019 -0400
@@ -4,12 +4,15 @@
 import os
 
 ffi = cffi.FFI()
-mpatch_c = os.path.join(os.path.join(os.path.dirname(__file__), '..',
-                                     'mpatch.c'))
+mpatch_c = os.path.join(
+    os.path.join(os.path.dirname(__file__), b'..', b'mpatch.c')
+)
 with open(mpatch_c) as f:
-    ffi.set_source("mercurial.cffi._mpatch", f.read(),
-                   include_dirs=["mercurial"])
-ffi.cdef("""
+    ffi.set_source(
+        b"mercurial.cffi._mpatch", f.read(), include_dirs=[b"mercurial"]
+    )
+ffi.cdef(
+    """
 
 struct mpatch_frag {
        int start, end, len;
@@ -30,7 +33,8 @@
 struct mpatch_flist *mpatch_fold(void *bins,
                        struct mpatch_flist* (*get_next_item)(void*, ssize_t),
                        ssize_t start, ssize_t end);
-""")
+"""
+)
 
 if __name__ == '__main__':
     ffi.compile()
--- a/mercurial/cffi/osutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/cffi/osutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,9 +12,7 @@
 
 from ..pure.osutil import *
 
-from .. import (
-    pycompat,
-)
+from .. import pycompat
 
 if pycompat.isdarwin:
     from . import _osutil
@@ -26,7 +24,7 @@
     # tweakable number, only affects performance, which chunks
     # of bytes do we get back from getattrlistbulk
 
-    attrkinds = [None] * 20 # we need the max no for enum VXXX, 20 is plenty
+    attrkinds = [None] * 20  # we need the max no for enum VXXX, 20 is plenty
 
     attrkinds[lib.VREG] = statmod.S_IFREG
     attrkinds[lib.VDIR] = statmod.S_IFDIR
@@ -42,8 +40,8 @@
             self.st_mtime = st_mtime
             self.st_size = st_size
 
-    tv_sec_ofs = ffi.offsetof("struct timespec", "tv_sec")
-    buf = ffi.new("char[]", listdir_batch_size)
+    tv_sec_ofs = ffi.offsetof(b"struct timespec", b"tv_sec")
+    buf = ffi.new(b"char[]", listdir_batch_size)
 
     def listdirinternal(dfd, req, stat, skip):
         ret = []
@@ -53,39 +51,54 @@
                 break
             if r == -1:
                 raise OSError(ffi.errno, os.strerror(ffi.errno))
-            cur = ffi.cast("val_attrs_t*", buf)
+            cur = ffi.cast(b"val_attrs_t*", buf)
             for i in range(r):
                 lgt = cur.length
-                assert lgt == ffi.cast('uint32_t*', cur)[0]
+                assert lgt == ffi.cast(b'uint32_t*', cur)[0]
                 ofs = cur.name_info.attr_dataoffset
                 str_lgt = cur.name_info.attr_length
-                base_ofs = ffi.offsetof('val_attrs_t', 'name_info')
-                name = str(ffi.buffer(ffi.cast("char*", cur) + base_ofs + ofs,
-                           str_lgt - 1))
+                base_ofs = ffi.offsetof(b'val_attrs_t', b'name_info')
+                name = str(
+                    ffi.buffer(
+                        ffi.cast(b"char*", cur) + base_ofs + ofs, str_lgt - 1
+                    )
+                )
                 tp = attrkinds[cur.obj_type]
-                if name == "." or name == "..":
+                if name == b"." or name == b"..":
                     continue
                 if skip == name and tp == statmod.S_ISDIR:
                     return []
                 if stat:
                     mtime = cur.mtime.tv_sec
-                    mode = (cur.accessmask & ~lib.S_IFMT)| tp
-                    ret.append((name, tp, stat_res(st_mode=mode, st_mtime=mtime,
-                                st_size=cur.datalength)))
+                    mode = (cur.accessmask & ~lib.S_IFMT) | tp
+                    ret.append(
+                        (
+                            name,
+                            tp,
+                            stat_res(
+                                st_mode=mode,
+                                st_mtime=mtime,
+                                st_size=cur.datalength,
+                            ),
+                        )
+                    )
                 else:
                     ret.append((name, tp))
-                cur = ffi.cast("val_attrs_t*", int(ffi.cast("intptr_t", cur))
-                    + lgt)
+                cur = ffi.cast(
+                    b"val_attrs_t*", int(ffi.cast(b"intptr_t", cur)) + lgt
+                )
         return ret
 
     def listdir(path, stat=False, skip=None):
-        req = ffi.new("struct attrlist*")
+        req = ffi.new(b"struct attrlist*")
         req.bitmapcount = lib.ATTR_BIT_MAP_COUNT
-        req.commonattr = (lib.ATTR_CMN_RETURNED_ATTRS |
-                          lib.ATTR_CMN_NAME |
-                          lib.ATTR_CMN_OBJTYPE |
-                          lib.ATTR_CMN_ACCESSMASK |
-                          lib.ATTR_CMN_MODTIME)
+        req.commonattr = (
+            lib.ATTR_CMN_RETURNED_ATTRS
+            | lib.ATTR_CMN_NAME
+            | lib.ATTR_CMN_OBJTYPE
+            | lib.ATTR_CMN_ACCESSMASK
+            | lib.ATTR_CMN_MODTIME
+        )
         req.fileattr = lib.ATTR_FILE_DATALENGTH
         dfd = lib.open(path, lib.O_RDONLY, 0)
         if dfd == -1:
@@ -97,6 +110,6 @@
             try:
                 lib.close(dfd)
             except BaseException:
-                pass # we ignore all the errors from closing, not
+                pass  # we ignore all the errors from closing, not
                 # much we can do about that
         return ret
--- a/mercurial/cffi/osutilbuild.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/cffi/osutilbuild.py	Mon Oct 21 11:09:48 2019 -0400
@@ -3,7 +3,9 @@
 import cffi
 
 ffi = cffi.FFI()
-ffi.set_source("mercurial.cffi._osutil", """
+ffi.set_source(
+    b"mercurial.cffi._osutil",
+    """
 #include <sys/attr.h>
 #include <sys/vnode.h>
 #include <unistd.h>
@@ -19,8 +21,11 @@
     uint32_t          accessmask;
     off_t             datalength;
 } __attribute__((aligned(4), packed)) val_attrs_t;
-""", include_dirs=['mercurial'])
-ffi.cdef('''
+""",
+    include_dirs=[b'mercurial'],
+)
+ffi.cdef(
+    '''
 
 typedef uint32_t attrgroup_t;
 
@@ -96,7 +101,8 @@
 int close(int);
 
 #define O_RDONLY ...
-''')
+'''
+)
 
 if __name__ == '__main__':
     ffi.compile()
--- a/mercurial/changegroup.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/changegroup.py	Mon Oct 21 11:09:48 2019 -0400
@@ -18,6 +18,7 @@
     nullrev,
     short,
 )
+from .pycompat import open
 
 from . import (
     error,
@@ -25,40 +26,46 @@
     mdiff,
     phases,
     pycompat,
-    repository,
     util,
 )
 
-_CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
-_CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
-_CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
+from .interfaces import repository
 
-LFS_REQUIREMENT = 'lfs'
+_CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
+_CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
+_CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
+
+LFS_REQUIREMENT = b'lfs'
 
 readexactly = util.readexactly
 
+
 def getchunk(stream):
     """return the next chunk from stream as a string"""
     d = readexactly(stream, 4)
-    l = struct.unpack(">l", d)[0]
+    l = struct.unpack(b">l", d)[0]
     if l <= 4:
         if l:
-            raise error.Abort(_("invalid chunk length %d") % l)
-        return ""
+            raise error.Abort(_(b"invalid chunk length %d") % l)
+        return b""
     return readexactly(stream, l - 4)
 
+
 def chunkheader(length):
     """return a changegroup chunk header (string)"""
-    return struct.pack(">l", length + 4)
+    return struct.pack(b">l", length + 4)
+
 
 def closechunk():
     """return a changegroup chunk header (string) for a zero-length chunk"""
-    return struct.pack(">l", 0)
+    return struct.pack(b">l", 0)
+
 
 def _fileheader(path):
     """Obtain a changegroup chunk header for a named path."""
     return chunkheader(len(path)) + path
 
+
 def writechunks(ui, chunks, filename, vfs=None):
     """Write chunks to a file and return its filename.
 
@@ -71,13 +78,13 @@
     try:
         if filename:
             if vfs:
-                fh = vfs.open(filename, "wb")
+                fh = vfs.open(filename, b"wb")
             else:
                 # Increase default buffer size because default is usually
                 # small (4k is common on Linux).
-                fh = open(filename, "wb", 131072)
+                fh = open(filename, b"wb", 131072)
         else:
-            fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
+            fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
             fh = os.fdopen(fd, r"wb")
         cleanup = filename
         for c in chunks:
@@ -93,6 +100,7 @@
             else:
                 os.unlink(cleanup)
 
+
 class cg1unpacker(object):
     """Unpacker for cg1 changegroup streams.
 
@@ -111,19 +119,19 @@
     A few other public methods exist. Those are used only for
     bundlerepo and some debug commands - their use is discouraged.
     """
+
     deltaheader = _CHANGEGROUPV1_DELTA_HEADER
     deltaheadersize = deltaheader.size
-    version = '01'
-    _grouplistcount = 1 # One list of files after the manifests
+    version = b'01'
+    _grouplistcount = 1  # One list of files after the manifests
 
     def __init__(self, fh, alg, extras=None):
         if alg is None:
-            alg = 'UN'
+            alg = b'UN'
         if alg not in util.compengines.supportedbundletypes:
-            raise error.Abort(_('unknown stream compression type: %s')
-                             % alg)
-        if alg == 'BZ':
-            alg = '_truncatedBZ'
+            raise error.Abort(_(b'unknown stream compression type: %s') % alg)
+        if alg == b'BZ':
+            alg = b'_truncatedBZ'
 
         compengine = util.compengines.forbundletype(alg)
         self._stream = compengine.decompressorreader(fh)
@@ -134,22 +142,26 @@
     # These methods (compressed, read, seek, tell) all appear to only
     # be used by bundlerepo, but it's a little hard to tell.
     def compressed(self):
-        return self._type is not None and self._type != 'UN'
+        return self._type is not None and self._type != b'UN'
+
     def read(self, l):
         return self._stream.read(l)
+
     def seek(self, pos):
         return self._stream.seek(pos)
+
     def tell(self):
         return self._stream.tell()
+
     def close(self):
         return self._stream.close()
 
     def _chunklength(self):
         d = readexactly(self._stream, 4)
-        l = struct.unpack(">l", d)[0]
+        l = struct.unpack(b">l", d)[0]
         if l <= 4:
             if l:
-                raise error.Abort(_("invalid chunk length %d") % l)
+                raise error.Abort(_(b"invalid chunk length %d") % l)
             return 0
         if self.callback:
             self.callback()
@@ -169,7 +181,7 @@
         if not l:
             return {}
         fname = readexactly(self._stream, l)
-        return {'filename': fname}
+        return {b'filename': fname}
 
     def _deltaheader(self, headertuple, prevnode):
         node, p1, p2, cs = headertuple
@@ -230,7 +242,7 @@
                 yield chunkheader(len(chunk))
                 pos = 0
                 while pos < len(chunk):
-                    next = pos + 2**20
+                    next = pos + 2 ** 20
                     yield chunk[pos:next]
                     pos = next
             yield closechunk()
@@ -247,8 +259,15 @@
         prog.complete()
         self.callback = None
 
-    def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
-              expectedtotal=None):
+    def apply(
+        self,
+        repo,
+        tr,
+        srctype,
+        url,
+        targetphase=phases.draft,
+        expectedtotal=None,
+    ):
         """Add the changegroup returned by source.read() to this repo.
         srctype is a string like 'push', 'pull', or 'unbundle'.  url is
         the URL of the repo where this changegroup is coming from.
@@ -260,24 +279,26 @@
         - number of heads stays the same: 1
         """
         repo = repo.unfiltered()
+
         def csmap(x):
-            repo.ui.debug("add changeset %s\n" % short(x))
+            repo.ui.debug(b"add changeset %s\n" % short(x))
             return len(cl)
 
         def revmap(x):
             return cl.rev(x)
 
-        changesets = files = revisions = 0
+        changesets = 0
 
         try:
             # The transaction may already carry source information. In this
             # case we use the top level data. We overwrite the argument
             # because we need to use the top level value (if they exist)
             # in this function.
-            srctype = tr.hookargs.setdefault('source', srctype)
-            tr.hookargs.setdefault('url', url)
-            repo.hook('prechangegroup',
-                      throw=True, **pycompat.strkwargs(tr.hookargs))
+            srctype = tr.hookargs.setdefault(b'source', srctype)
+            tr.hookargs.setdefault(b'url', url)
+            repo.hook(
+                b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
+            )
 
             # write changelog data to temp files so concurrent readers
             # will not see an inconsistent view
@@ -287,13 +308,15 @@
 
             trp = weakref.proxy(tr)
             # pull off the changeset group
-            repo.ui.status(_("adding changesets\n"))
+            repo.ui.status(_(b"adding changesets\n"))
             clstart = len(cl)
-            progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
-                                            total=expectedtotal)
+            progress = repo.ui.makeprogress(
+                _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
+            )
             self.callback = progress.increment
 
             efiles = set()
+
             def onchangelog(cl, node):
                 efiles.update(cl.readfiles(node))
 
@@ -303,23 +326,26 @@
             efiles = len(efiles)
 
             if not cgnodes:
-                repo.ui.develwarn('applied empty changelog from changegroup',
-                                  config='warn-empty-changegroup')
+                repo.ui.develwarn(
+                    b'applied empty changelog from changegroup',
+                    config=b'warn-empty-changegroup',
+                )
             clend = len(cl)
             changesets = clend - clstart
             progress.complete()
             self.callback = None
 
             # pull off the manifest group
-            repo.ui.status(_("adding manifests\n"))
+            repo.ui.status(_(b"adding manifests\n"))
             # We know that we'll never have more manifests than we had
             # changesets.
-            progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
-                                            total=changesets)
+            progress = repo.ui.makeprogress(
+                _(b'manifests'), unit=_(b'chunks'), total=changesets
+            )
             self._unpackmanifests(repo, revmap, trp, progress)
 
             needfiles = {}
-            if repo.ui.configbool('server', 'validate'):
+            if repo.ui.configbool(b'server', b'validate'):
                 cl = repo.changelog
                 ml = repo.manifestlog
                 # validate incoming csets have their manifests
@@ -327,47 +353,66 @@
                     mfnode = cl.changelogrevision(cset).manifest
                     mfest = ml[mfnode].readdelta()
                     # store file cgnodes we must see
-                    for f, n in mfest.iteritems():
+                    for f, n in pycompat.iteritems(mfest):
                         needfiles.setdefault(f, set()).add(n)
 
             # process the files
-            repo.ui.status(_("adding file changes\n"))
+            repo.ui.status(_(b"adding file changes\n"))
             newrevs, newfiles = _addchangegroupfiles(
-                repo, self, revmap, trp, efiles, needfiles)
-            revisions += newrevs
-            files += newfiles
+                repo, self, revmap, trp, efiles, needfiles
+            )
+
+            # making sure the value exists
+            tr.changes.setdefault(b'changegroup-count-changesets', 0)
+            tr.changes.setdefault(b'changegroup-count-revisions', 0)
+            tr.changes.setdefault(b'changegroup-count-files', 0)
+            tr.changes.setdefault(b'changegroup-count-heads', 0)
+
+            # some code use bundle operation for internal purpose. They usually
+            # set `ui.quiet` to do this outside of user sight. Size the report
+            # of such operation now happens at the end of the transaction, that
+            # ui.quiet has not direct effect on the output.
+            #
+            # To preserve this intend use an inelegant hack, we fail to report
+            # the change if `quiet` is set. We should probably move to
+            # something better, but this is a good first step to allow the "end
+            # of transaction report" to pass tests.
+            if not repo.ui.quiet:
+                tr.changes[b'changegroup-count-changesets'] += changesets
+                tr.changes[b'changegroup-count-revisions'] += newrevs
+                tr.changes[b'changegroup-count-files'] += newfiles
 
             deltaheads = 0
             if oldheads:
                 heads = cl.heads()
-                deltaheads = len(heads) - len(oldheads)
+                deltaheads += len(heads) - len(oldheads)
                 for h in heads:
                     if h not in oldheads and repo[h].closesbranch():
                         deltaheads -= 1
-            htext = ""
-            if deltaheads:
-                htext = _(" (%+d heads)") % deltaheads
 
-            repo.ui.status(_("added %d changesets"
-                             " with %d changes to %d files%s\n")
-                             % (changesets, revisions, files, htext))
+            # see previous comment about checking ui.quiet
+            if not repo.ui.quiet:
+                tr.changes[b'changegroup-count-heads'] += deltaheads
             repo.invalidatevolatilesets()
 
             if changesets > 0:
-                if 'node' not in tr.hookargs:
-                    tr.hookargs['node'] = hex(cl.node(clstart))
-                    tr.hookargs['node_last'] = hex(cl.node(clend - 1))
+                if b'node' not in tr.hookargs:
+                    tr.hookargs[b'node'] = hex(cl.node(clstart))
+                    tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
                     hookargs = dict(tr.hookargs)
                 else:
                     hookargs = dict(tr.hookargs)
-                    hookargs['node'] = hex(cl.node(clstart))
-                    hookargs['node_last'] = hex(cl.node(clend - 1))
-                repo.hook('pretxnchangegroup',
-                          throw=True, **pycompat.strkwargs(hookargs))
+                    hookargs[b'node'] = hex(cl.node(clstart))
+                    hookargs[b'node_last'] = hex(cl.node(clend - 1))
+                repo.hook(
+                    b'pretxnchangegroup',
+                    throw=True,
+                    **pycompat.strkwargs(hookargs)
+                )
 
             added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
             phaseall = None
-            if srctype in ('push', 'serve'):
+            if srctype in (b'push', b'serve'):
                 # Old servers can not push the boundary themselves.
                 # New servers won't push the boundary if changeset already
                 # exists locally as secret
@@ -398,23 +443,26 @@
                     if clstart >= len(repo):
                         return
 
-                    repo.hook("changegroup", **pycompat.strkwargs(hookargs))
+                    repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
 
                     for n in added:
                         args = hookargs.copy()
-                        args['node'] = hex(n)
-                        del args['node_last']
-                        repo.hook("incoming", **pycompat.strkwargs(args))
+                        args[b'node'] = hex(n)
+                        del args[b'node_last']
+                        repo.hook(b"incoming", **pycompat.strkwargs(args))
 
-                    newheads = [h for h in repo.heads()
-                                if h not in oldheads]
-                    repo.ui.log("incoming",
-                                "%d incoming changes - new heads: %s\n",
-                                len(added),
-                                ', '.join([hex(c[:6]) for c in newheads]))
+                    newheads = [h for h in repo.heads() if h not in oldheads]
+                    repo.ui.log(
+                        b"incoming",
+                        b"%d incoming changes - new heads: %s\n",
+                        len(added),
+                        b', '.join([hex(c[:6]) for c in newheads]),
+                    )
 
-                tr.addpostclose('changegroup-runhooks-%020i' % clstart,
-                                lambda tr: repo._afterlock(runhooks))
+                tr.addpostclose(
+                    b'changegroup-runhooks-%020i' % clstart,
+                    lambda tr: repo._afterlock(runhooks),
+                )
         finally:
             repo.ui.flush()
         # never return 0 here:
@@ -436,6 +484,7 @@
             yield chunkdata
             chain = chunkdata[0]
 
+
 class cg2unpacker(cg1unpacker):
     """Unpacker for cg2 streams.
 
@@ -443,15 +492,17 @@
     format is slightly different. All other features about the data
     remain the same.
     """
+
     deltaheader = _CHANGEGROUPV2_DELTA_HEADER
     deltaheadersize = deltaheader.size
-    version = '02'
+    version = b'02'
 
     def _deltaheader(self, headertuple, prevnode):
         node, p1, p2, deltabase, cs = headertuple
         flags = 0
         return node, p1, p2, deltabase, cs, flags
 
+
 class cg3unpacker(cg2unpacker):
     """Unpacker for cg3 streams.
 
@@ -459,10 +510,11 @@
     flags. It adds the revlog flags to the delta header and an empty chunk
     separating manifests and files.
     """
+
     deltaheader = _CHANGEGROUPV3_DELTA_HEADER
     deltaheadersize = deltaheader.size
-    version = '03'
-    _grouplistcount = 2 # One list of manifests and one list of files
+    version = b'03'
+    _grouplistcount = 2  # One list of manifests and one list of files
 
     def _deltaheader(self, headertuple, prevnode):
         node, p1, p2, deltabase, cs, flags = headertuple
@@ -472,16 +524,18 @@
         super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
         for chunkdata in iter(self.filelogheader, {}):
             # If we get here, there are directory manifests in the changegroup
-            d = chunkdata["filename"]
-            repo.ui.debug("adding %s revisions\n" % d)
+            d = chunkdata[b"filename"]
+            repo.ui.debug(b"adding %s revisions\n" % d)
             deltas = self.deltaiter()
             if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
-                raise error.Abort(_("received dir revlog group is empty"))
+                raise error.Abort(_(b"received dir revlog group is empty"))
+
 
 class headerlessfixup(object):
     def __init__(self, fh, h):
         self._h = h
         self._fh = fh
+
     def read(self, n):
         if self._h:
             d, self._h = self._h[:n], self._h[n:]
@@ -490,6 +544,7 @@
             return d
         return readexactly(self._fh, n)
 
+
 def _revisiondeltatochunks(delta, headerfn):
     """Serialize a revisiondelta to changegroup chunks."""
 
@@ -506,8 +561,7 @@
         prefix = mdiff.trivialdiffheader(len(data))
     else:
         data = delta.revision
-        prefix = mdiff.replacediffheader(delta.baserevisionsize,
-                                         len(data))
+        prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
 
     meta = headerfn(delta)
 
@@ -517,6 +571,7 @@
         yield prefix
     yield data
 
+
 def _sortnodesellipsis(store, nodes, cl, lookup):
     """Sort nodes for changegroup generation."""
     # Ellipses serving mode.
@@ -538,10 +593,20 @@
     key = lambda n: cl.rev(lookup(n))
     return sorted(nodes, key=key)
 
-def _resolvenarrowrevisioninfo(cl, store, ischangelog, rev, linkrev,
-                               linknode, clrevtolocalrev, fullclnodes,
-                               precomputedellipsis):
+
+def _resolvenarrowrevisioninfo(
+    cl,
+    store,
+    ischangelog,
+    rev,
+    linkrev,
+    linknode,
+    clrevtolocalrev,
+    fullclnodes,
+    precomputedellipsis,
+):
     linkparents = precomputedellipsis[linkrev]
+
     def local(clrev):
         """Turn a changelog revnum into a local revnum.
 
@@ -575,11 +640,11 @@
             if p in clrevtolocalrev:
                 return clrevtolocalrev[p]
             elif p in fullclnodes:
-                walk.extend([pp for pp in cl.parentrevs(p)
-                                if pp != nullrev])
+                walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
             elif p in precomputedellipsis:
-                walk.extend([pp for pp in precomputedellipsis[p]
-                                if pp != nullrev])
+                walk.extend(
+                    [pp for pp in precomputedellipsis[p] if pp != nullrev]
+                )
             else:
                 # In this case, we've got an ellipsis with parents
                 # outside the current bundle (likely an
@@ -599,16 +664,16 @@
                 # We failed to resolve a parent for this node, so
                 # we crash the changegroup construction.
                 raise error.Abort(
-                    'unable to resolve parent while packing %r %r'
-                    ' for changeset %r' % (store.indexfile, rev, clrev))
+                    b'unable to resolve parent while packing %r %r'
+                    b' for changeset %r' % (store.indexfile, rev, clrev)
+                )
 
         return nullrev
 
-    if not linkparents or (
-        store.parentrevs(rev) == (nullrev, nullrev)):
+    if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
         p1, p2 = nullrev, nullrev
     elif len(linkparents) == 1:
-        p1, = sorted(local(p) for p in linkparents)
+        (p1,) = sorted(local(p) for p in linkparents)
         p2 = nullrev
     else:
         p1, p2 = sorted(local(p) for p in linkparents)
@@ -617,10 +682,20 @@
 
     return p1node, p2node, linknode
 
-def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
-               topic=None,
-               ellipses=False, clrevtolocalrev=None, fullclnodes=None,
-               precomputedellipsis=None):
+
+def deltagroup(
+    repo,
+    store,
+    nodes,
+    ischangelog,
+    lookup,
+    forcedeltaparentprev,
+    topic=None,
+    ellipses=False,
+    clrevtolocalrev=None,
+    fullclnodes=None,
+    precomputedellipsis=None,
+):
     """Calculate deltas for a set of revisions.
 
     Is a generator of ``revisiondelta`` instances.
@@ -636,10 +711,10 @@
     if ischangelog:
         # `hg log` shows changesets in storage order. To preserve order
         # across clones, send out changesets in storage order.
-        nodesorder = 'storage'
+        nodesorder = b'storage'
     elif ellipses:
         nodes = _sortnodesellipsis(store, nodes, cl, lookup)
-        nodesorder = 'nodes'
+        nodesorder = b'nodes'
     else:
         nodesorder = None
 
@@ -680,8 +755,16 @@
                 # We could probably do this later and avoid the dict
                 # holding state. But it likely doesn't matter.
                 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
-                    cl, store, ischangelog, rev, linkrev, linknode,
-                    clrevtolocalrev, fullclnodes, precomputedellipsis)
+                    cl,
+                    store,
+                    ischangelog,
+                    rev,
+                    linkrev,
+                    linknode,
+                    clrevtolocalrev,
+                    fullclnodes,
+                    precomputedellipsis,
+                )
 
                 adjustedparents[node] = (p1node, p2node)
                 linknodes[node] = linknode
@@ -694,20 +777,21 @@
     # meter for constructing the revision deltas.
     progress = None
     if topic is not None:
-        progress = repo.ui.makeprogress(topic, unit=_('chunks'),
-                                        total=len(nodes))
+        progress = repo.ui.makeprogress(
+            topic, unit=_(b'chunks'), total=len(nodes)
+        )
 
-    configtarget = repo.ui.config('devel', 'bundle.delta')
-    if configtarget not in ('', 'p1', 'full'):
+    configtarget = repo.ui.config(b'devel', b'bundle.delta')
+    if configtarget not in (b'', b'p1', b'full'):
         msg = _("""config "devel.bundle.delta" as unknown value: %s""")
         repo.ui.warn(msg % configtarget)
 
     deltamode = repository.CG_DELTAMODE_STD
     if forcedeltaparentprev:
         deltamode = repository.CG_DELTAMODE_PREV
-    elif configtarget == 'p1':
+    elif configtarget == b'p1':
         deltamode = repository.CG_DELTAMODE_P1
-    elif configtarget == 'full':
+    elif configtarget == b'full':
         deltamode = repository.CG_DELTAMODE_FULL
 
     revisions = store.emitrevisions(
@@ -715,7 +799,8 @@
         nodesorder=nodesorder,
         revisiondata=True,
         assumehaveparentrevisions=not ellipses,
-        deltamode=deltamode)
+        deltamode=deltamode,
+    )
 
     for i, revision in enumerate(revisions):
         if progress:
@@ -739,12 +824,23 @@
     if progress:
         progress.complete()
 
+
 class cgpacker(object):
-    def __init__(self, repo, oldmatcher, matcher, version,
-                 builddeltaheader, manifestsend,
-                 forcedeltaparentprev=False,
-                 bundlecaps=None, ellipses=False,
-                 shallow=False, ellipsisroots=None, fullnodes=None):
+    def __init__(
+        self,
+        repo,
+        oldmatcher,
+        matcher,
+        version,
+        builddeltaheader,
+        manifestsend,
+        forcedeltaparentprev=False,
+        bundlecaps=None,
+        ellipses=False,
+        shallow=False,
+        ellipsisroots=None,
+        fullnodes=None,
+    ):
         """Given a source repo, construct a bundler.
 
         oldmatcher is a matcher that matches on files the client already has.
@@ -805,8 +901,9 @@
         else:
             self._verbosenote = lambda s: None
 
-    def generate(self, commonrevs, clnodes, fastpathlinkrev, source,
-                 changelog=True):
+    def generate(
+        self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
+    ):
         """Yield a sequence of changegroup byte chunks.
         If changelog is False, changelog data won't be added to changegroup
         """
@@ -814,14 +911,14 @@
         repo = self._repo
         cl = repo.changelog
 
-        self._verbosenote(_('uncompressed size of bundle content:\n'))
+        self._verbosenote(_(b'uncompressed size of bundle content:\n'))
         size = 0
 
-        clstate, deltas = self._generatechangelog(cl, clnodes,
-                                                  generate=changelog)
+        clstate, deltas = self._generatechangelog(
+            cl, clnodes, generate=changelog
+        )
         for delta in deltas:
-            for chunk in _revisiondeltatochunks(delta,
-                                                self._builddeltaheader):
+            for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
                 size += len(chunk)
                 yield chunk
 
@@ -829,11 +926,11 @@
         size += len(close)
         yield closechunk()
 
-        self._verbosenote(_('%8.i (changelog)\n') % size)
+        self._verbosenote(_(b'%8.i (changelog)\n') % size)
 
-        clrevorder = clstate['clrevorder']
-        manifests = clstate['manifests']
-        changedfiles = clstate['changedfiles']
+        clrevorder = clstate[b'clrevorder']
+        manifests = clstate[b'manifests']
+        changedfiles = clstate[b'changedfiles']
 
         # We need to make sure that the linkrev in the changegroup refers to
         # the first changeset that introduced the manifest or file revision.
@@ -854,14 +951,21 @@
         # either, because we don't discover which directory nodes to
         # send along with files. This could probably be fixed.
         fastpathlinkrev = fastpathlinkrev and (
-            'treemanifest' not in repo.requirements)
+            b'treemanifest' not in repo.requirements
+        )
 
         fnodes = {}  # needed file nodes
 
         size = 0
         it = self.generatemanifests(
-            commonrevs, clrevorder, fastpathlinkrev, manifests, fnodes, source,
-            clstate['clrevtomanifestrev'])
+            commonrevs,
+            clrevorder,
+            fastpathlinkrev,
+            manifests,
+            fnodes,
+            source,
+            clstate[b'clrevtomanifestrev'],
+        )
 
         for tree, deltas in it:
             if tree:
@@ -880,20 +984,28 @@
             size += len(close)
             yield close
 
-        self._verbosenote(_('%8.i (manifests)\n') % size)
+        self._verbosenote(_(b'%8.i (manifests)\n') % size)
         yield self._manifestsend
 
         mfdicts = None
         if self._ellipses and self._isshallow:
-            mfdicts = [(self._repo.manifestlog[n].read(), lr)
-                       for (n, lr) in manifests.iteritems()]
+            mfdicts = [
+                (self._repo.manifestlog[n].read(), lr)
+                for (n, lr) in pycompat.iteritems(manifests)
+            ]
 
         manifests.clear()
         clrevs = set(cl.rev(x) for x in clnodes)
 
-        it = self.generatefiles(changedfiles, commonrevs,
-                                source, mfdicts, fastpathlinkrev,
-                                fnodes, clrevs)
+        it = self.generatefiles(
+            changedfiles,
+            commonrevs,
+            source,
+            mfdicts,
+            fastpathlinkrev,
+            fnodes,
+            clrevs,
+        )
 
         for path, deltas in it:
             h = _fileheader(path)
@@ -910,12 +1022,12 @@
             size += len(close)
             yield close
 
-            self._verbosenote(_('%8.i  %s\n') % (size, path))
+            self._verbosenote(_(b'%8.i  %s\n') % (size, path))
 
         yield closechunk()
 
         if clnodes:
-            repo.hook('outgoing', node=hex(clnodes[0]), source=source)
+            repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
 
     def _generatechangelog(self, cl, nodes, generate=True):
         """Generate data for changelog chunks.
@@ -934,10 +1046,10 @@
         clrevtomanifestrev = {}
 
         state = {
-            'clrevorder': clrevorder,
-            'manifests': manifests,
-            'changedfiles': changedfiles,
-            'clrevtomanifestrev': clrevtomanifestrev,
+            b'clrevorder': clrevorder,
+            b'manifests': manifests,
+            b'changedfiles': changedfiles,
+            b'clrevtomanifestrev': clrevtomanifestrev,
         }
 
         if not (generate or self._ellipses):
@@ -966,8 +1078,10 @@
                 # end up with bogus linkrevs specified for manifests and
                 # we skip some manifest nodes that we should otherwise
                 # have sent.
-                if (x in self._fullclnodes
-                    or cl.rev(x) in self._precomputedellipsis):
+                if (
+                    x in self._fullclnodes
+                    or cl.rev(x) in self._precomputedellipsis
+                ):
 
                     manifestnode = c.manifest
                     # Record the first changeset introducing this manifest
@@ -978,7 +1092,8 @@
                     # mapping changelog ellipsis parents to manifest ellipsis
                     # parents)
                     clrevtomanifestrev.setdefault(
-                        cl.rev(x), mfl.rev(manifestnode))
+                        cl.rev(x), mfl.rev(manifestnode)
+                    )
                 # We can't trust the changed files list in the changeset if the
                 # client requested a shallow clone.
                 if self._isshallow:
@@ -995,18 +1110,31 @@
             return x
 
         gen = deltagroup(
-            self._repo, cl, nodes, True, lookupcl,
+            self._repo,
+            cl,
+            nodes,
+            True,
+            lookupcl,
             self._forcedeltaparentprev,
             ellipses=self._ellipses,
-            topic=_('changesets'),
+            topic=_(b'changesets'),
             clrevtolocalrev={},
             fullclnodes=self._fullclnodes,
-            precomputedellipsis=self._precomputedellipsis)
+            precomputedellipsis=self._precomputedellipsis,
+        )
 
         return state, gen
 
-    def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev,
-                          manifests, fnodes, source, clrevtolocalrev):
+    def generatemanifests(
+        self,
+        commonrevs,
+        clrevorder,
+        fastpathlinkrev,
+        manifests,
+        fnodes,
+        source,
+        clrevtolocalrev,
+    ):
         """Returns an iterator of changegroup chunks containing manifests.
 
         `source` is unused here, but is used by extensions like remotefilelog to
@@ -1014,7 +1142,7 @@
         """
         repo = self._repo
         mfl = repo.manifestlog
-        tmfnodes = {'': manifests}
+        tmfnodes = {b'': manifests}
 
         # Callback for the manifest, used to collect linkrevs for filelog
         # revisions.
@@ -1043,8 +1171,8 @@
                 clnode = nodes[x]
                 mdata = mfl.get(tree, x).readfast(shallow=True)
                 for p, n, fl in mdata.iterentries():
-                    if fl == 't': # subdirectory manifest
-                        subtree = tree + p + '/'
+                    if fl == b't':  # subdirectory manifest
+                        subtree = tree + p + b'/'
                         tmfclnodes = tmfnodes.setdefault(subtree, {})
                         tmfclnode = tmfclnodes.setdefault(n, clnode)
                         if clrevorder[clnode] < clrevorder[tmfclnode]:
@@ -1056,6 +1184,7 @@
                         if clrevorder[clnode] < clrevorder[fclnode]:
                             fclnodes[n] = clnode
                 return clnode
+
             return lookupmflinknode
 
         while tmfnodes:
@@ -1085,13 +1214,18 @@
             lookupfn = makelookupmflinknode(tree, nodes)
 
             deltas = deltagroup(
-                self._repo, store, prunednodes, False, lookupfn,
+                self._repo,
+                store,
+                prunednodes,
+                False,
+                lookupfn,
                 self._forcedeltaparentprev,
                 ellipses=self._ellipses,
-                topic=_('manifests'),
+                topic=_(b'manifests'),
                 clrevtolocalrev=clrevtolocalrev,
                 fullclnodes=self._fullclnodes,
-                precomputedellipsis=self._precomputedellipsis)
+                precomputedellipsis=self._precomputedellipsis,
+            )
 
             if not self._oldmatcher.visitdir(store.tree[:-1]):
                 yield tree, deltas
@@ -1120,14 +1254,27 @@
         return [n for n in nodes if flr(frev(n)) not in commonrevs]
 
     # The 'source' parameter is useful for extensions
-    def generatefiles(self, changedfiles, commonrevs, source,
-                      mfdicts, fastpathlinkrev, fnodes, clrevs):
-        changedfiles = [f for f in changedfiles
-                        if self._matcher(f) and not self._oldmatcher(f)]
+    def generatefiles(
+        self,
+        changedfiles,
+        commonrevs,
+        source,
+        mfdicts,
+        fastpathlinkrev,
+        fnodes,
+        clrevs,
+    ):
+        changedfiles = [
+            f
+            for f in changedfiles
+            if self._matcher(f) and not self._oldmatcher(f)
+        ]
 
         if not fastpathlinkrev:
+
             def normallinknodes(unused, fname):
                 return fnodes.get(fname, {})
+
         else:
             cln = self._repo.changelog.node
 
@@ -1135,8 +1282,9 @@
                 flinkrev = store.linkrev
                 fnode = store.node
                 revs = ((r, flinkrev(r)) for r in store)
-                return dict((fnode(r), cln(lr))
-                            for r, lr in revs if lr in clrevs)
+                return dict(
+                    (fnode(r), cln(lr)) for r, lr in revs if lr in clrevs
+                )
 
         clrevtolocalrev = {}
 
@@ -1163,17 +1311,20 @@
                         elif fnode:
                             links[fnode] = lr
                 return links
+
         else:
             linknodes = normallinknodes
 
         repo = self._repo
-        progress = repo.ui.makeprogress(_('files'), unit=_('files'),
-                                        total=len(changedfiles))
+        progress = repo.ui.makeprogress(
+            _(b'files'), unit=_(b'files'), total=len(changedfiles)
+        )
         for i, fname in enumerate(sorted(changedfiles)):
             filerevlog = repo.file(fname)
             if not filerevlog:
-                raise error.Abort(_("empty or missing file data for %s") %
-                                  fname)
+                raise error.Abort(
+                    _(b"empty or missing file data for %s") % fname
+                )
 
             clrevtolocalrev.clear()
 
@@ -1188,8 +1339,9 @@
             # has. This avoids over-sending files relatively
             # inexpensively, so it's not a problem if we under-filter
             # here.
-            filenodes = [n for n in linkrevnodes
-                         if flr(frev(n)) not in commonrevs]
+            filenodes = [
+                n for n in linkrevnodes if flr(frev(n)) not in commonrevs
+            ]
 
             if not filenodes:
                 continue
@@ -1197,124 +1349,204 @@
             progress.update(i + 1, item=fname)
 
             deltas = deltagroup(
-                self._repo, filerevlog, filenodes, False, lookupfilelog,
+                self._repo,
+                filerevlog,
+                filenodes,
+                False,
+                lookupfilelog,
                 self._forcedeltaparentprev,
                 ellipses=self._ellipses,
                 clrevtolocalrev=clrevtolocalrev,
                 fullclnodes=self._fullclnodes,
-                precomputedellipsis=self._precomputedellipsis)
+                precomputedellipsis=self._precomputedellipsis,
+            )
 
             yield fname, deltas
 
         progress.complete()
 
-def _makecg1packer(repo, oldmatcher, matcher, bundlecaps,
-                   ellipses=False, shallow=False, ellipsisroots=None,
-                   fullnodes=None):
+
+def _makecg1packer(
+    repo,
+    oldmatcher,
+    matcher,
+    bundlecaps,
+    ellipses=False,
+    shallow=False,
+    ellipsisroots=None,
+    fullnodes=None,
+):
     builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
-        d.node, d.p1node, d.p2node, d.linknode)
+        d.node, d.p1node, d.p2node, d.linknode
+    )
 
-    return cgpacker(repo, oldmatcher, matcher, b'01',
-                    builddeltaheader=builddeltaheader,
-                    manifestsend=b'',
-                    forcedeltaparentprev=True,
-                    bundlecaps=bundlecaps,
-                    ellipses=ellipses,
-                    shallow=shallow,
-                    ellipsisroots=ellipsisroots,
-                    fullnodes=fullnodes)
+    return cgpacker(
+        repo,
+        oldmatcher,
+        matcher,
+        b'01',
+        builddeltaheader=builddeltaheader,
+        manifestsend=b'',
+        forcedeltaparentprev=True,
+        bundlecaps=bundlecaps,
+        ellipses=ellipses,
+        shallow=shallow,
+        ellipsisroots=ellipsisroots,
+        fullnodes=fullnodes,
+    )
 
-def _makecg2packer(repo, oldmatcher, matcher, bundlecaps,
-                   ellipses=False, shallow=False, ellipsisroots=None,
-                   fullnodes=None):
+
+def _makecg2packer(
+    repo,
+    oldmatcher,
+    matcher,
+    bundlecaps,
+    ellipses=False,
+    shallow=False,
+    ellipsisroots=None,
+    fullnodes=None,
+):
     builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
-        d.node, d.p1node, d.p2node, d.basenode, d.linknode)
+        d.node, d.p1node, d.p2node, d.basenode, d.linknode
+    )
 
-    return cgpacker(repo, oldmatcher, matcher, b'02',
-                    builddeltaheader=builddeltaheader,
-                    manifestsend=b'',
-                    bundlecaps=bundlecaps,
-                    ellipses=ellipses,
-                    shallow=shallow,
-                    ellipsisroots=ellipsisroots,
-                    fullnodes=fullnodes)
+    return cgpacker(
+        repo,
+        oldmatcher,
+        matcher,
+        b'02',
+        builddeltaheader=builddeltaheader,
+        manifestsend=b'',
+        bundlecaps=bundlecaps,
+        ellipses=ellipses,
+        shallow=shallow,
+        ellipsisroots=ellipsisroots,
+        fullnodes=fullnodes,
+    )
 
-def _makecg3packer(repo, oldmatcher, matcher, bundlecaps,
-                   ellipses=False, shallow=False, ellipsisroots=None,
-                   fullnodes=None):
-    builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
-        d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
 
-    return cgpacker(repo, oldmatcher, matcher, b'03',
-                    builddeltaheader=builddeltaheader,
-                    manifestsend=closechunk(),
-                    bundlecaps=bundlecaps,
-                    ellipses=ellipses,
-                    shallow=shallow,
-                    ellipsisroots=ellipsisroots,
-                    fullnodes=fullnodes)
+def _makecg3packer(
+    repo,
+    oldmatcher,
+    matcher,
+    bundlecaps,
+    ellipses=False,
+    shallow=False,
+    ellipsisroots=None,
+    fullnodes=None,
+):
+    builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
+        d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
+    )
 
-_packermap = {'01': (_makecg1packer, cg1unpacker),
-             # cg2 adds support for exchanging generaldelta
-             '02': (_makecg2packer, cg2unpacker),
-             # cg3 adds support for exchanging revlog flags and treemanifests
-             '03': (_makecg3packer, cg3unpacker),
+    return cgpacker(
+        repo,
+        oldmatcher,
+        matcher,
+        b'03',
+        builddeltaheader=builddeltaheader,
+        manifestsend=closechunk(),
+        bundlecaps=bundlecaps,
+        ellipses=ellipses,
+        shallow=shallow,
+        ellipsisroots=ellipsisroots,
+        fullnodes=fullnodes,
+    )
+
+
+_packermap = {
+    b'01': (_makecg1packer, cg1unpacker),
+    # cg2 adds support for exchanging generaldelta
+    b'02': (_makecg2packer, cg2unpacker),
+    # cg3 adds support for exchanging revlog flags and treemanifests
+    b'03': (_makecg3packer, cg3unpacker),
 }
 
+
 def allsupportedversions(repo):
     versions = set(_packermap.keys())
-    if not (repo.ui.configbool('experimental', 'changegroup3') or
-            repo.ui.configbool('experimental', 'treemanifest') or
-            'treemanifest' in repo.requirements):
-        versions.discard('03')
+    needv03 = False
+    if (
+        repo.ui.configbool(b'experimental', b'changegroup3')
+        or repo.ui.configbool(b'experimental', b'treemanifest')
+        or b'treemanifest' in repo.requirements
+    ):
+        # we keep version 03 because we need to to exchange treemanifest data
+        #
+        # we also keep vresion 01 and 02, because it is possible for repo to
+        # contains both normal and tree manifest at the same time. so using
+        # older version to pull data is viable
+        #
+        # (or even to push subset of history)
+        needv03 = True
+    if b'exp-sidedata-flag' in repo.requirements:
+        needv03 = True
+        # don't attempt to use 01/02 until we do sidedata cleaning
+        versions.discard(b'01')
+        versions.discard(b'02')
+    if not needv03:
+        versions.discard(b'03')
     return versions
 
+
 # Changegroup versions that can be applied to the repo
 def supportedincomingversions(repo):
     return allsupportedversions(repo)
 
+
 # Changegroup versions that can be created from the repo
 def supportedoutgoingversions(repo):
     versions = allsupportedversions(repo)
-    if 'treemanifest' in repo.requirements:
+    if b'treemanifest' in repo.requirements:
         # Versions 01 and 02 support only flat manifests and it's just too
         # expensive to convert between the flat manifest and tree manifest on
         # the fly. Since tree manifests are hashed differently, all of history
         # would have to be converted. Instead, we simply don't even pretend to
         # support versions 01 and 02.
-        versions.discard('01')
-        versions.discard('02')
+        versions.discard(b'01')
+        versions.discard(b'02')
     if repository.NARROW_REQUIREMENT in repo.requirements:
         # Versions 01 and 02 don't support revlog flags, and we need to
         # support that for stripping and unbundling to work.
-        versions.discard('01')
-        versions.discard('02')
+        versions.discard(b'01')
+        versions.discard(b'02')
     if LFS_REQUIREMENT in repo.requirements:
         # Versions 01 and 02 don't support revlog flags, and we need to
         # mark LFS entries with REVIDX_EXTSTORED.
-        versions.discard('01')
-        versions.discard('02')
+        versions.discard(b'01')
+        versions.discard(b'02')
 
     return versions
 
+
 def localversion(repo):
     # Finds the best version to use for bundles that are meant to be used
     # locally, such as those from strip and shelve, and temporary bundles.
     return max(supportedoutgoingversions(repo))
 
+
 def safeversion(repo):
     # Finds the smallest version that it's safe to assume clients of the repo
     # will support. For example, all hg versions that support generaldelta also
     # support changegroup 02.
     versions = supportedoutgoingversions(repo)
-    if 'generaldelta' in repo.requirements:
-        versions.discard('01')
+    if b'generaldelta' in repo.requirements:
+        versions.discard(b'01')
     assert versions
     return min(versions)
 
-def getbundler(version, repo, bundlecaps=None, oldmatcher=None,
-               matcher=None, ellipses=False, shallow=False,
-               ellipsisroots=None, fullnodes=None):
+
+def getbundler(
+    version,
+    repo,
+    bundlecaps=None,
+    oldmatcher=None,
+    matcher=None,
+    ellipses=False,
+    shallow=False,
+    ellipsisroots=None,
+    fullnodes=None,
+):
     assert version in supportedoutgoingversions(repo)
 
     if matcher is None:
@@ -1322,46 +1554,79 @@
     if oldmatcher is None:
         oldmatcher = matchmod.never()
 
-    if version == '01' and not matcher.always():
-        raise error.ProgrammingError('version 01 changegroups do not support '
-                                     'sparse file matchers')
+    if version == b'01' and not matcher.always():
+        raise error.ProgrammingError(
+            b'version 01 changegroups do not support sparse file matchers'
+        )
 
     if ellipses and version in (b'01', b'02'):
         raise error.Abort(
-            _('ellipsis nodes require at least cg3 on client and server, '
-              'but negotiated version %s') % version)
+            _(
+                b'ellipsis nodes require at least cg3 on client and server, '
+                b'but negotiated version %s'
+            )
+            % version
+        )
 
     # Requested files could include files not in the local store. So
     # filter those out.
     matcher = repo.narrowmatch(matcher)
 
     fn = _packermap[version][0]
-    return fn(repo, oldmatcher, matcher, bundlecaps, ellipses=ellipses,
-              shallow=shallow, ellipsisroots=ellipsisroots,
-              fullnodes=fullnodes)
+    return fn(
+        repo,
+        oldmatcher,
+        matcher,
+        bundlecaps,
+        ellipses=ellipses,
+        shallow=shallow,
+        ellipsisroots=ellipsisroots,
+        fullnodes=fullnodes,
+    )
+
 
 def getunbundler(version, fh, alg, extras=None):
     return _packermap[version][1](fh, alg, extras=extras)
 
+
 def _changegroupinfo(repo, nodes, source):
-    if repo.ui.verbose or source == 'bundle':
-        repo.ui.status(_("%d changesets found\n") % len(nodes))
+    if repo.ui.verbose or source == b'bundle':
+        repo.ui.status(_(b"%d changesets found\n") % len(nodes))
     if repo.ui.debugflag:
-        repo.ui.debug("list of changesets:\n")
+        repo.ui.debug(b"list of changesets:\n")
         for node in nodes:
-            repo.ui.debug("%s\n" % hex(node))
+            repo.ui.debug(b"%s\n" % hex(node))
+
 
-def makechangegroup(repo, outgoing, version, source, fastpath=False,
-                    bundlecaps=None):
-    cgstream = makestream(repo, outgoing, version, source,
-                          fastpath=fastpath, bundlecaps=bundlecaps)
-    return getunbundler(version, util.chunkbuffer(cgstream), None,
-                        {'clcount': len(outgoing.missing) })
+def makechangegroup(
+    repo, outgoing, version, source, fastpath=False, bundlecaps=None
+):
+    cgstream = makestream(
+        repo,
+        outgoing,
+        version,
+        source,
+        fastpath=fastpath,
+        bundlecaps=bundlecaps,
+    )
+    return getunbundler(
+        version,
+        util.chunkbuffer(cgstream),
+        None,
+        {b'clcount': len(outgoing.missing)},
+    )
 
-def makestream(repo, outgoing, version, source, fastpath=False,
-               bundlecaps=None, matcher=None):
-    bundler = getbundler(version, repo, bundlecaps=bundlecaps,
-                         matcher=matcher)
+
+def makestream(
+    repo,
+    outgoing,
+    version,
+    source,
+    fastpath=False,
+    bundlecaps=None,
+    matcher=None,
+):
+    bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher)
 
     repo = repo.unfiltered()
     commonrevs = outgoing.common
@@ -1372,30 +1637,33 @@
     # be pulled by the client).
     heads.sort()
     fastpathlinkrev = fastpath or (
-            repo.filtername is None and heads == sorted(repo.heads()))
+        repo.filtername is None and heads == sorted(repo.heads())
+    )
 
-    repo.hook('preoutgoing', throw=True, source=source)
+    repo.hook(b'preoutgoing', throw=True, source=source)
     _changegroupinfo(repo, csets, source)
     return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
 
+
 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
     revisions = 0
     files = 0
-    progress = repo.ui.makeprogress(_('files'), unit=_('files'),
-                                    total=expectedfiles)
+    progress = repo.ui.makeprogress(
+        _(b'files'), unit=_(b'files'), total=expectedfiles
+    )
     for chunkdata in iter(source.filelogheader, {}):
         files += 1
-        f = chunkdata["filename"]
-        repo.ui.debug("adding %s revisions\n" % f)
+        f = chunkdata[b"filename"]
+        repo.ui.debug(b"adding %s revisions\n" % f)
         progress.increment()
         fl = repo.file(f)
         o = len(fl)
         try:
             deltas = source.deltaiter()
             if not fl.addgroup(deltas, revmap, trp):
-                raise error.Abort(_("received file revlog group is empty"))
+                raise error.Abort(_(b"received file revlog group is empty"))
         except error.CensoredBaseError as e:
-            raise error.Abort(_("received delta base is censored: %s") % e)
+            raise error.Abort(_(b"received delta base is censored: %s") % e)
         revisions += len(fl) - o
         if f in needfiles:
             needs = needfiles[f]
@@ -1404,20 +1672,20 @@
                 if n in needs:
                     needs.remove(n)
                 else:
-                    raise error.Abort(
-                        _("received spurious file revlog entry"))
+                    raise error.Abort(_(b"received spurious file revlog entry"))
             if not needs:
                 del needfiles[f]
     progress.complete()
 
-    for f, needs in needfiles.iteritems():
+    for f, needs in pycompat.iteritems(needfiles):
         fl = repo.file(f)
         for n in needs:
             try:
                 fl.rev(n)
             except error.LookupError:
                 raise error.Abort(
-                    _('missing file data for %s:%s - run hg verify') %
-                    (f, hex(n)))
+                    _(b'missing file data for %s:%s - run hg verify')
+                    % (f, hex(n))
+                )
 
     return revisions, files
--- a/mercurial/changelog.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/changelog.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,11 +13,10 @@
     hex,
     nullid,
 )
-from .thirdparty import (
-    attr,
-)
+from .thirdparty import attr
 
 from . import (
+    copies,
     encoding,
     error,
     pycompat,
@@ -29,7 +28,10 @@
     stringutil,
 )
 
-_defaultextra = {'branch': 'default'}
+from .revlogutils import sidedata as sidedatamod
+
+_defaultextra = {b'branch': b'default'}
+
 
 def _string_escape(text):
     """
@@ -43,17 +45,23 @@
     True
     """
     # subset of the string_escape codec
-    text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
-    return text.replace('\0', '\\0')
+    text = (
+        text.replace(b'\\', b'\\\\')
+        .replace(b'\n', b'\\n')
+        .replace(b'\r', b'\\r')
+    )
+    return text.replace(b'\0', b'\\0')
+
 
 def _string_unescape(text):
-    if '\\0' in text:
+    if b'\\0' in text:
         # fix up \0 without getting into trouble with \\0
-        text = text.replace('\\\\', '\\\\\n')
-        text = text.replace('\\0', '\0')
-        text = text.replace('\n', '')
+        text = text.replace(b'\\\\', b'\\\\\n')
+        text = text.replace(b'\\0', b'\0')
+        text = text.replace(b'\n', b'')
     return stringutil.unescapestr(text)
 
+
 def decodeextra(text):
     """
     >>> from .pycompat import bytechr as chr
@@ -66,76 +74,31 @@
     [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
     """
     extra = _defaultextra.copy()
-    for l in text.split('\0'):
+    for l in text.split(b'\0'):
         if l:
-            k, v = _string_unescape(l).split(':', 1)
+            k, v = _string_unescape(l).split(b':', 1)
             extra[k] = v
     return extra
 
+
 def encodeextra(d):
     # keys must be sorted to produce a deterministic changelog entry
     items = [
-        _string_escape('%s:%s' % (k, pycompat.bytestr(d[k])))
+        _string_escape(b'%s:%s' % (k, pycompat.bytestr(d[k])))
         for k in sorted(d)
     ]
-    return "\0".join(items)
-
-def encodecopies(files, copies):
-    items = []
-    for i, dst in enumerate(files):
-        if dst in copies:
-            items.append('%d\0%s' % (i, copies[dst]))
-    if len(items) != len(copies):
-        raise error.ProgrammingError('some copy targets missing from file list')
-    return "\n".join(items)
+    return b"\0".join(items)
 
-def decodecopies(files, data):
-    try:
-        copies = {}
-        if not data:
-            return copies
-        for l in data.split('\n'):
-            strindex, src = l.split('\0')
-            i = int(strindex)
-            dst = files[i]
-            copies[dst] = src
-        return copies
-    except (ValueError, IndexError):
-        # Perhaps someone had chosen the same key name (e.g. "p1copies") and
-        # used different syntax for the value.
-        return None
-
-def encodefileindices(files, subset):
-    subset = set(subset)
-    indices = []
-    for i, f in enumerate(files):
-        if f in subset:
-            indices.append('%d' % i)
-    return '\n'.join(indices)
-
-def decodefileindices(files, data):
-    try:
-        subset = []
-        if not data:
-            return subset
-        for strindex in data.split('\n'):
-            i = int(strindex)
-            if i < 0 or i >= len(files):
-                return None
-            subset.append(files[i])
-        return subset
-    except (ValueError, IndexError):
-        # Perhaps someone had chosen the same key name (e.g. "added") and
-        # used different syntax for the value.
-        return None
 
 def stripdesc(desc):
     """strip trailing whitespace and leading and trailing empty lines"""
-    return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
+    return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
+
 
 class appender(object):
     '''the changelog index must be updated last on disk, so we use this class
     to delay writes to it'''
+
     def __init__(self, vfs, name, mode, buf):
         self.data = buf
         fp = vfs(name, mode)
@@ -146,8 +109,10 @@
 
     def end(self):
         return self._end
+
     def tell(self):
         return self.offset
+
     def flush(self):
         pass
 
@@ -171,7 +136,7 @@
 
     def read(self, count=-1):
         '''only trick here is reads that span real file and data'''
-        ret = ""
+        ret = b""
         if self.offset < self.size:
             s = self.fp.read(count)
             ret = s
@@ -180,9 +145,9 @@
                 count -= len(s)
         if count != 0:
             doff = self.offset - self.size
-            self.data.insert(0, "".join(self.data))
+            self.data.insert(0, b"".join(self.data))
             del self.data[1:]
-            s = self.data[0][doff:doff + count]
+            s = self.data[0][doff : doff + count]
             self.offset += len(s)
             ret += s
         return ret
@@ -199,36 +164,44 @@
     def __exit__(self, *args):
         return self.fp.__exit__(*args)
 
+
 def _divertopener(opener, target):
     """build an opener that writes in 'target.a' instead of 'target'"""
-    def _divert(name, mode='r', checkambig=False):
+
+    def _divert(name, mode=b'r', checkambig=False):
         if name != target:
             return opener(name, mode)
-        return opener(name + ".a", mode)
+        return opener(name + b".a", mode)
+
     return _divert
 
+
 def _delayopener(opener, target, buf):
     """build an opener that stores chunks in 'buf' instead of 'target'"""
-    def _delay(name, mode='r', checkambig=False):
+
+    def _delay(name, mode=b'r', checkambig=False):
         if name != target:
             return opener(name, mode)
         return appender(opener, name, mode, buf)
+
     return _delay
 
+
 @attr.s
 class _changelogrevision(object):
     # Extensions might modify _defaultextra, so let the constructor below pass
     # it in
     extra = attr.ib()
     manifest = attr.ib(default=nullid)
-    user = attr.ib(default='')
+    user = attr.ib(default=b'')
     date = attr.ib(default=(0, 0))
     files = attr.ib(default=attr.Factory(list))
     filesadded = attr.ib(default=None)
     filesremoved = attr.ib(default=None)
     p1copies = attr.ib(default=None)
     p2copies = attr.ib(default=None)
-    description = attr.ib(default='')
+    description = attr.ib(default=b'')
+
 
 class changelogrevision(object):
     """Holds results of a parsed changelog revision.
@@ -241,9 +214,11 @@
     __slots__ = (
         r'_offsets',
         r'_text',
+        r'_sidedata',
+        r'_cpsd',
     )
 
-    def __new__(cls, text):
+    def __new__(cls, text, sidedata, cpsd):
         if not text:
             return _changelogrevision(extra=_defaultextra)
 
@@ -262,42 +237,44 @@
         #
         # changelog v0 doesn't use extra
 
-        nl1 = text.index('\n')
-        nl2 = text.index('\n', nl1 + 1)
-        nl3 = text.index('\n', nl2 + 1)
+        nl1 = text.index(b'\n')
+        nl2 = text.index(b'\n', nl1 + 1)
+        nl3 = text.index(b'\n', nl2 + 1)
 
         # The list of files may be empty. Which means nl3 is the first of the
         # double newline that precedes the description.
-        if text[nl3 + 1:nl3 + 2] == '\n':
+        if text[nl3 + 1 : nl3 + 2] == b'\n':
             doublenl = nl3
         else:
-            doublenl = text.index('\n\n', nl3 + 1)
+            doublenl = text.index(b'\n\n', nl3 + 1)
 
         self._offsets = (nl1, nl2, nl3, doublenl)
         self._text = text
+        self._sidedata = sidedata
+        self._cpsd = cpsd
 
         return self
 
     @property
     def manifest(self):
-        return bin(self._text[0:self._offsets[0]])
+        return bin(self._text[0 : self._offsets[0]])
 
     @property
     def user(self):
         off = self._offsets
-        return encoding.tolocal(self._text[off[0] + 1:off[1]])
+        return encoding.tolocal(self._text[off[0] + 1 : off[1]])
 
     @property
     def _rawdate(self):
         off = self._offsets
-        dateextra = self._text[off[1] + 1:off[2]]
-        return dateextra.split(' ', 2)[0:2]
+        dateextra = self._text[off[1] + 1 : off[2]]
+        return dateextra.split(b' ', 2)[0:2]
 
     @property
     def _rawextra(self):
         off = self._offsets
-        dateextra = self._text[off[1] + 1:off[2]]
-        fields = dateextra.split(' ', 2)
+        dateextra = self._text[off[1] + 1 : off[2]]
+        fields = dateextra.split(b' ', 2)
         if len(fields) != 3:
             return None
 
@@ -329,31 +306,60 @@
         if off[2] == off[3]:
             return []
 
-        return self._text[off[2] + 1:off[3]].split('\n')
+        return self._text[off[2] + 1 : off[3]].split(b'\n')
 
     @property
     def filesadded(self):
-        rawindices = self.extra.get('filesadded')
-        return rawindices and decodefileindices(self.files, rawindices)
+        if self._cpsd:
+            rawindices = self._sidedata.get(sidedatamod.SD_FILESADDED)
+            if not rawindices:
+                return []
+        else:
+            rawindices = self.extra.get(b'filesadded')
+        if rawindices is None:
+            return None
+        return copies.decodefileindices(self.files, rawindices)
 
     @property
     def filesremoved(self):
-        rawindices = self.extra.get('filesremoved')
-        return rawindices and decodefileindices(self.files, rawindices)
+        if self._cpsd:
+            rawindices = self._sidedata.get(sidedatamod.SD_FILESREMOVED)
+            if not rawindices:
+                return []
+        else:
+            rawindices = self.extra.get(b'filesremoved')
+        if rawindices is None:
+            return None
+        return copies.decodefileindices(self.files, rawindices)
 
     @property
     def p1copies(self):
-        rawcopies = self.extra.get('p1copies')
-        return rawcopies and decodecopies(self.files, rawcopies)
+        if self._cpsd:
+            rawcopies = self._sidedata.get(sidedatamod.SD_P1COPIES)
+            if not rawcopies:
+                return {}
+        else:
+            rawcopies = self.extra.get(b'p1copies')
+        if rawcopies is None:
+            return None
+        return copies.decodecopies(self.files, rawcopies)
 
     @property
     def p2copies(self):
-        rawcopies = self.extra.get('p2copies')
-        return rawcopies and decodecopies(self.files, rawcopies)
+        if self._cpsd:
+            rawcopies = self._sidedata.get(sidedatamod.SD_P2COPIES)
+            if not rawcopies:
+                return {}
+        else:
+            rawcopies = self.extra.get(b'p2copies')
+        if rawcopies is None:
+            return None
+        return copies.decodecopies(self.files, rawcopies)
 
     @property
     def description(self):
-        return encoding.tolocal(self._text[self._offsets[3] + 2:])
+        return encoding.tolocal(self._text[self._offsets[3] + 2 :])
+
 
 class changelog(revlog.revlog):
     def __init__(self, opener, trypending=False):
@@ -366,14 +372,20 @@
         It exists in a separate file to facilitate readers (such as
         hooks processes) accessing data before a transaction is finalized.
         """
-        if trypending and opener.exists('00changelog.i.a'):
-            indexfile = '00changelog.i.a'
+        if trypending and opener.exists(b'00changelog.i.a'):
+            indexfile = b'00changelog.i.a'
         else:
-            indexfile = '00changelog.i'
+            indexfile = b'00changelog.i'
 
-        datafile = '00changelog.d'
-        revlog.revlog.__init__(self, opener, indexfile, datafile=datafile,
-                               checkambig=True, mmaplargeindex=True)
+        datafile = b'00changelog.d'
+        revlog.revlog.__init__(
+            self,
+            opener,
+            indexfile,
+            datafile=datafile,
+            checkambig=True,
+            mmaplargeindex=True,
+        )
 
         if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
             # changelogs don't benefit from generaldelta.
@@ -391,9 +403,10 @@
         self._delaybuf = None
         self._divert = False
         self.filteredrevs = frozenset()
+        self._copiesstorage = opener.options.get(b'copies-storage')
 
     def tiprev(self):
-        for i in pycompat.xrange(len(self) -1, -2, -1):
+        for i in pycompat.xrange(len(self) - 1, -2, -1):
             if i not in self.filteredrevs:
                 return i
 
@@ -403,8 +416,7 @@
 
     def __contains__(self, rev):
         """filtered version of revlog.__contains__"""
-        return (0 <= rev < len(self)
-                and rev not in self.filteredrevs)
+        return 0 <= rev < len(self) and rev not in self.filteredrevs
 
     def __iter__(self):
         """filtered version of revlog.__iter__"""
@@ -468,8 +480,9 @@
         """filtered version of revlog.rev"""
         r = super(changelog, self).rev(node)
         if r in self.filteredrevs:
-            raise error.FilteredLookupError(hex(node), self.indexfile,
-                                            _('filtered node'))
+            raise error.FilteredLookupError(
+                hex(node), self.indexfile, _(b'filtered node')
+            )
         return r
 
     def node(self, rev):
@@ -497,36 +510,37 @@
         return super(changelog, self).flags(rev)
 
     def delayupdate(self, tr):
-        "delay visibility of index updates to other readers"
+        b"delay visibility of index updates to other readers"
 
         if not self._delayed:
             if len(self) == 0:
                 self._divert = True
-                if self._realopener.exists(self.indexfile + '.a'):
-                    self._realopener.unlink(self.indexfile + '.a')
+                if self._realopener.exists(self.indexfile + b'.a'):
+                    self._realopener.unlink(self.indexfile + b'.a')
                 self.opener = _divertopener(self._realopener, self.indexfile)
             else:
                 self._delaybuf = []
-                self.opener = _delayopener(self._realopener, self.indexfile,
-                                           self._delaybuf)
+                self.opener = _delayopener(
+                    self._realopener, self.indexfile, self._delaybuf
+                )
         self._delayed = True
-        tr.addpending('cl-%i' % id(self), self._writepending)
-        tr.addfinalize('cl-%i' % id(self), self._finalize)
+        tr.addpending(b'cl-%i' % id(self), self._writepending)
+        tr.addfinalize(b'cl-%i' % id(self), self._finalize)
 
     def _finalize(self, tr):
-        "finalize index updates"
+        b"finalize index updates"
         self._delayed = False
         self.opener = self._realopener
         # move redirected index data back into place
         if self._divert:
             assert not self._delaybuf
-            tmpname = self.indexfile + ".a"
+            tmpname = self.indexfile + b".a"
             nfile = self.opener.open(tmpname)
             nfile.close()
             self.opener.rename(tmpname, self.indexfile, checkambig=True)
         elif self._delaybuf:
-            fp = self.opener(self.indexfile, 'a', checkambig=True)
-            fp.write("".join(self._delaybuf))
+            fp = self.opener(self.indexfile, b'a', checkambig=True)
+            fp.write(b"".join(self._delaybuf))
             fp.close()
             self._delaybuf = None
         self._divert = False
@@ -534,18 +548,18 @@
         self._enforceinlinesize(tr)
 
     def _writepending(self, tr):
-        "create a file containing the unfinalized state for pretxnchangegroup"
+        b"create a file containing the unfinalized state for pretxnchangegroup"
         if self._delaybuf:
             # make a temporary copy of the index
             fp1 = self._realopener(self.indexfile)
-            pendingfilename = self.indexfile + ".a"
+            pendingfilename = self.indexfile + b".a"
             # register as a temp file to ensure cleanup on failure
             tr.registertmp(pendingfilename)
             # write existing data
-            fp2 = self._realopener(pendingfilename, "w")
+            fp2 = self._realopener(pendingfilename, b"w")
             fp2.write(fp1.read())
             # add pending data
-            fp2.write("".join(self._delaybuf))
+            fp2.write(b"".join(self._delaybuf))
             fp2.close()
             # switch modes so finalize can simply rename
             self._delaybuf = None
@@ -577,19 +591,18 @@
         ``changelogrevision`` instead, as it is faster for partial object
         access.
         """
-        c = changelogrevision(self.revision(node))
-        return (
-            c.manifest,
-            c.user,
-            c.date,
-            c.files,
-            c.description,
-            c.extra
+        d, s = self._revisiondata(node)
+        c = changelogrevision(
+            d, s, self._copiesstorage == b'changeset-sidedata'
         )
+        return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
 
     def changelogrevision(self, nodeorrev):
         """Obtain a ``changelogrevision`` for a node or revision."""
-        return changelogrevision(self.revision(nodeorrev))
+        text, sidedata = self._revisiondata(nodeorrev)
+        return changelogrevision(
+            text, sidedata, self._copiesstorage == b'changeset-sidedata'
+        )
 
     def readfiles(self, node):
         """
@@ -598,13 +611,26 @@
         text = self.revision(node)
         if not text:
             return []
-        last = text.index("\n\n")
-        l = text[:last].split('\n')
+        last = text.index(b"\n\n")
+        l = text[:last].split(b'\n')
         return l[3:]
 
-    def add(self, manifest, files, desc, transaction, p1, p2,
-                  user, date=None, extra=None, p1copies=None, p2copies=None,
-                  filesadded=None, filesremoved=None):
+    def add(
+        self,
+        manifest,
+        files,
+        desc,
+        transaction,
+        p1,
+        p2,
+        user,
+        date=None,
+        extra=None,
+        p1copies=None,
+        p2copies=None,
+        filesadded=None,
+        filesremoved=None,
+    ):
         # Convert to UTF-8 encoded bytestrings as the very first
         # thing: calling any method on a localstr object will turn it
         # into a str object and the cached UTF-8 string is thus lost.
@@ -615,43 +641,77 @@
         # revision text contain two "\n\n" sequences -> corrupt
         # repository since read cannot unpack the revision.
         if not user:
-            raise error.StorageError(_("empty username"))
-        if "\n" in user:
-            raise error.StorageError(_("username %r contains a newline")
-                                     % pycompat.bytestr(user))
+            raise error.StorageError(_(b"empty username"))
+        if b"\n" in user:
+            raise error.StorageError(
+                _(b"username %r contains a newline") % pycompat.bytestr(user)
+            )
 
         desc = stripdesc(desc)
 
         if date:
-            parseddate = "%d %d" % dateutil.parsedate(date)
+            parseddate = b"%d %d" % dateutil.parsedate(date)
         else:
-            parseddate = "%d %d" % dateutil.makedate()
+            parseddate = b"%d %d" % dateutil.makedate()
         if extra:
-            branch = extra.get("branch")
-            if branch in ("default", ""):
-                del extra["branch"]
-            elif branch in (".", "null", "tip"):
-                raise error.StorageError(_('the name \'%s\' is reserved')
-                                         % branch)
-        extrasentries = p1copies, p2copies, filesadded, filesremoved
-        if extra is None and any(x is not None for x in extrasentries):
-            extra = {}
+            branch = extra.get(b"branch")
+            if branch in (b"default", b""):
+                del extra[b"branch"]
+            elif branch in (b".", b"null", b"tip"):
+                raise error.StorageError(
+                    _(b'the name \'%s\' is reserved') % branch
+                )
         sortedfiles = sorted(files)
+        sidedata = None
+        if extra is not None:
+            for name in (
+                b'p1copies',
+                b'p2copies',
+                b'filesadded',
+                b'filesremoved',
+            ):
+                extra.pop(name, None)
         if p1copies is not None:
-            extra['p1copies'] = encodecopies(sortedfiles, p1copies)
+            p1copies = copies.encodecopies(sortedfiles, p1copies)
         if p2copies is not None:
-            extra['p2copies'] = encodecopies(sortedfiles, p2copies)
+            p2copies = copies.encodecopies(sortedfiles, p2copies)
         if filesadded is not None:
-            extra['filesadded'] = encodefileindices(sortedfiles, filesadded)
+            filesadded = copies.encodefileindices(sortedfiles, filesadded)
         if filesremoved is not None:
-            extra['filesremoved'] = encodefileindices(sortedfiles, filesremoved)
+            filesremoved = copies.encodefileindices(sortedfiles, filesremoved)
+        if self._copiesstorage == b'extra':
+            extrasentries = p1copies, p2copies, filesadded, filesremoved
+            if extra is None and any(x is not None for x in extrasentries):
+                extra = {}
+            if p1copies is not None:
+                extra[b'p1copies'] = p1copies
+            if p2copies is not None:
+                extra[b'p2copies'] = p2copies
+            if filesadded is not None:
+                extra[b'filesadded'] = filesadded
+            if filesremoved is not None:
+                extra[b'filesremoved'] = filesremoved
+        elif self._copiesstorage == b'changeset-sidedata':
+            sidedata = {}
+            if p1copies:
+                sidedata[sidedatamod.SD_P1COPIES] = p1copies
+            if p2copies:
+                sidedata[sidedatamod.SD_P2COPIES] = p2copies
+            if filesadded:
+                sidedata[sidedatamod.SD_FILESADDED] = filesadded
+            if filesremoved:
+                sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
+            if not sidedata:
+                sidedata = None
 
         if extra:
             extra = encodeextra(extra)
-            parseddate = "%s %s" % (parseddate, extra)
-        l = [hex(manifest), user, parseddate] + sortedfiles + ["", desc]
-        text = "\n".join(l)
-        return self.addrevision(text, transaction, len(self), p1, p2)
+            parseddate = b"%s %s" % (parseddate, extra)
+        l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
+        text = b"\n".join(l)
+        return self.addrevision(
+            text, transaction, len(self), p1, p2, sidedata=sidedata
+        )
 
     def branchinfo(self, rev):
         """return the branch name and open/close state of a revision
@@ -659,11 +719,11 @@
         This function exists because creating a changectx object
         just to access this is costly."""
         extra = self.read(rev)[5]
-        return encoding.tolocal(extra.get("branch")), 'close' in extra
+        return encoding.tolocal(extra.get(b"branch")), b'close' in extra
 
     def _nodeduplicatecallback(self, transaction, node):
         # keep track of revisions that got "re-added", eg: unbunde of know rev.
         #
         # We track them in a list to preserve their order from the source bundle
-        duplicates = transaction.changes.setdefault('revduplicates', [])
+        duplicates = transaction.changes.setdefault(b'revduplicates', [])
         duplicates.append(self.rev(node))
--- a/mercurial/chgserver.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/chgserver.py	Mon Oct 21 11:09:48 2019 -0400
@@ -51,6 +51,10 @@
 import time
 
 from .i18n import _
+from .pycompat import (
+    getattr,
+    setattr,
+)
 
 from . import (
     commandserver,
@@ -67,24 +71,27 @@
     stringutil,
 )
 
+
 def _hashlist(items):
     """return sha1 hexdigest for a list"""
     return node.hex(hashlib.sha1(stringutil.pprint(items)).digest())
 
+
 # sensitive config sections affecting confighash
 _configsections = [
-    'alias',  # affects global state commands.table
-    'eol',    # uses setconfig('eol', ...)
-    'extdiff',  # uisetup will register new commands
-    'extensions',
+    b'alias',  # affects global state commands.table
+    b'eol',  # uses setconfig('eol', ...)
+    b'extdiff',  # uisetup will register new commands
+    b'extensions',
 ]
 
 _configsectionitems = [
-    ('commands', 'show.aliasprefix'), # show.py reads it in extsetup
+    (b'commands', b'show.aliasprefix'),  # show.py reads it in extsetup
 ]
 
 # sensitive environment variables affecting confighash
-_envre = re.compile(br'''\A(?:
+_envre = re.compile(
+    br'''\A(?:
                     CHGHG
                     |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)?
                     |HG(?:ENCODING|PLAIN).*
@@ -95,7 +102,10 @@
                     |PYTHON.*
                     |TERM(?:INFO)?
                     |TZ
-                    )\Z''', re.X)
+                    )\Z''',
+    re.X,
+)
+
 
 def _confighash(ui):
     """return a quick hash for detecting config/env changes
@@ -115,15 +125,19 @@
         sectionitems.append(ui.config(section, item))
     sectionhash = _hashlist(sectionitems)
     # If $CHGHG is set, the change to $HG should not trigger a new chg server
-    if 'CHGHG' in encoding.environ:
-        ignored = {'HG'}
+    if b'CHGHG' in encoding.environ:
+        ignored = {b'HG'}
     else:
         ignored = set()
-    envitems = [(k, v) for k, v in encoding.environ.iteritems()
-                if _envre.match(k) and k not in ignored]
+    envitems = [
+        (k, v)
+        for k, v in pycompat.iteritems(encoding.environ)
+        if _envre.match(k) and k not in ignored
+    ]
     envhash = _hashlist(sorted(envitems))
     return sectionhash[:6] + envhash[:6]
 
+
 def _getmtimepaths(ui):
     """get a list of paths that should be checked to detect change
 
@@ -135,6 +149,7 @@
     modules = [m for n, m in extensions.extensions(ui)]
     try:
         from . import __version__
+
         modules.append(__version__)
     except ImportError:
         pass
@@ -148,6 +163,7 @@
             pass
     return sorted(set(files))
 
+
 def _mtimehash(paths):
     """return a quick hash for detecting file changes
 
@@ -165,6 +181,7 @@
     extensions after importing them (there is imp.find_module but that faces
     race conditions). We need to calculate confighash without importing.
     """
+
     def trystat(path):
         try:
             st = os.stat(path)
@@ -172,10 +189,13 @@
         except OSError:
             # could be ENOENT, EPERM etc. not fatal in any case
             pass
-    return _hashlist(map(trystat, paths))[:12]
+
+    return _hashlist(pycompat.maplist(trystat, paths))[:12]
+
 
 class hashstate(object):
     """a structure storing confighash, mtimehash, paths used for mtimehash"""
+
     def __init__(self, confighash, mtimehash, mtimepaths):
         self.confighash = confighash
         self.mtimehash = mtimehash
@@ -187,10 +207,15 @@
             mtimepaths = _getmtimepaths(ui)
         confighash = _confighash(ui)
         mtimehash = _mtimehash(mtimepaths)
-        ui.log('cmdserver', 'confighash = %s mtimehash = %s\n',
-               confighash, mtimehash)
+        ui.log(
+            b'cmdserver',
+            b'confighash = %s mtimehash = %s\n',
+            confighash,
+            mtimehash,
+        )
         return hashstate(confighash, mtimehash, mtimepaths)
 
+
 def _newchgui(srcui, csystem, attachio):
     class chgui(srcui.__class__):
         def __init__(self, src=None):
@@ -206,46 +231,53 @@
             #  b. or stdout is redirected by protectfinout(),
             # because the chg client is not aware of these situations and
             # will behave differently (i.e. write to stdout).
-            if (out is not self.fout
-                or not util.safehasattr(self.fout, 'fileno')
+            if (
+                out is not self.fout
+                or not util.safehasattr(self.fout, b'fileno')
                 or self.fout.fileno() != procutil.stdout.fileno()
-                or self._finoutredirected):
+                or self._finoutredirected
+            ):
                 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
             self.flush()
             return self._csystem(cmd, procutil.shellenviron(environ), cwd)
 
         def _runpager(self, cmd, env=None):
-            self._csystem(cmd, procutil.shellenviron(env), type='pager',
-                          cmdtable={'attachio': attachio})
+            self._csystem(
+                cmd,
+                procutil.shellenviron(env),
+                type=b'pager',
+                cmdtable={b'attachio': attachio},
+            )
             return True
 
     return chgui(srcui)
 
+
 def _loadnewui(srcui, args, cdebug):
     from . import dispatch  # avoid cycle
 
     newui = srcui.__class__.load()
-    for a in ['fin', 'fout', 'ferr', 'environ']:
+    for a in [b'fin', b'fout', b'ferr', b'environ']:
         setattr(newui, a, getattr(srcui, a))
-    if util.safehasattr(srcui, '_csystem'):
+    if util.safehasattr(srcui, b'_csystem'):
         newui._csystem = srcui._csystem
 
     # command line args
     options = dispatch._earlyparseopts(newui, args)
-    dispatch._parseconfig(newui, options['config'])
+    dispatch._parseconfig(newui, options[b'config'])
 
     # stolen from tortoisehg.util.copydynamicconfig()
     for section, name, value in srcui.walkconfig():
         source = srcui.configsource(section, name)
-        if ':' in source or source == '--config' or source.startswith('$'):
+        if b':' in source or source == b'--config' or source.startswith(b'$'):
             # path:line or command line, or environ
             continue
         newui.setconfig(section, name, value, source)
 
     # load wd and repo config, copied from dispatch.py
-    cwd = options['cwd']
+    cwd = options[b'cwd']
     cwd = cwd and os.path.realpath(cwd) or None
-    rpath = options['repository']
+    rpath = options[b'repository']
     path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
 
     extensions.populateui(newui)
@@ -256,6 +288,7 @@
 
     return (newui, newlui)
 
+
 class channeledsystem(object):
     """Propagate ui.system() request in the following format:
 
@@ -276,27 +309,28 @@
     and executes it defined by cmdtable, or exits the loop if the command name
     is empty.
     """
+
     def __init__(self, in_, out, channel):
         self.in_ = in_
         self.out = out
         self.channel = channel
 
-    def __call__(self, cmd, environ, cwd=None, type='system', cmdtable=None):
-        args = [type, procutil.quotecommand(cmd), os.path.abspath(cwd or '.')]
-        args.extend('%s=%s' % (k, v) for k, v in environ.iteritems())
-        data = '\0'.join(args)
-        self.out.write(struct.pack('>cI', self.channel, len(data)))
+    def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None):
+        args = [type, procutil.quotecommand(cmd), os.path.abspath(cwd or b'.')]
+        args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ))
+        data = b'\0'.join(args)
+        self.out.write(struct.pack(b'>cI', self.channel, len(data)))
         self.out.write(data)
         self.out.flush()
 
-        if type == 'system':
+        if type == b'system':
             length = self.in_.read(4)
-            length, = struct.unpack('>I', length)
+            (length,) = struct.unpack(b'>I', length)
             if length != 4:
-                raise error.Abort(_('invalid response'))
-            rc, = struct.unpack('>i', self.in_.read(4))
+                raise error.Abort(_(b'invalid response'))
+            (rc,) = struct.unpack(b'>i', self.in_.read(4))
             return rc
-        elif type == 'pager':
+        elif type == b'pager':
             while True:
                 cmd = self.in_.readline()[:-1]
                 if not cmd:
@@ -304,23 +338,30 @@
                 if cmdtable and cmd in cmdtable:
                     cmdtable[cmd]()
                 else:
-                    raise error.Abort(_('unexpected command: %s') % cmd)
+                    raise error.Abort(_(b'unexpected command: %s') % cmd)
         else:
-            raise error.ProgrammingError('invalid S channel type: %s' % type)
+            raise error.ProgrammingError(b'invalid S channel type: %s' % type)
+
 
 _iochannels = [
     # server.ch, ui.fp, mode
-    ('cin', 'fin', r'rb'),
-    ('cout', 'fout', r'wb'),
-    ('cerr', 'ferr', r'wb'),
+    (b'cin', b'fin', r'rb'),
+    (b'cout', b'fout', r'wb'),
+    (b'cerr', b'ferr', r'wb'),
 ]
 
+
 class chgcmdserver(commandserver.server):
-    def __init__(self, ui, repo, fin, fout, sock, prereposetups,
-                 hashstate, baseaddress):
+    def __init__(
+        self, ui, repo, fin, fout, sock, prereposetups, hashstate, baseaddress
+    ):
         super(chgcmdserver, self).__init__(
-            _newchgui(ui, channeledsystem(fin, fout, 'S'), self.attachio),
-            repo, fin, fout, prereposetups)
+            _newchgui(ui, channeledsystem(fin, fout, b'S'), self.attachio),
+            repo,
+            fin,
+            fout,
+            prereposetups,
+        )
         self.clientsock = sock
         self._ioattached = False
         self._oldios = []  # original (self.ch, ui.fp, fd) before "attachio"
@@ -328,7 +369,7 @@
         self.baseaddress = baseaddress
         if hashstate is not None:
             self.capabilities = self.capabilities.copy()
-            self.capabilities['validate'] = chgcmdserver.validate
+            self.capabilities[b'validate'] = chgcmdserver.validate
 
     def cleanup(self):
         super(chgcmdserver, self).cleanup()
@@ -344,9 +385,9 @@
         """
         # tell client to sendmsg() with 1-byte payload, which makes it
         # distinctive from "attachio\n" command consumed by client.read()
-        self.clientsock.sendall(struct.pack('>cI', 'I', 1))
+        self.clientsock.sendall(struct.pack(b'>cI', b'I', 1))
         clientfds = util.recvfds(self.clientsock.fileno())
-        self.ui.log('chgserver', 'received fds: %r\n', clientfds)
+        self.ui.log(b'chgserver', b'received fds: %r\n', clientfds)
 
         ui = self.ui
         ui.flush()
@@ -362,7 +403,7 @@
             # to see output immediately on pager, the mode stays unchanged
             # when client re-attached. ferr is unchanged because it should
             # be unbuffered no matter if it is a tty or not.
-            if fn == 'ferr':
+            if fn == b'ferr':
                 newfp = fp
             else:
                 # make it line buffered explicitly because the default is
@@ -376,7 +417,7 @@
             setattr(self, cn, newfp)
 
         self._ioattached = True
-        self.cresult.write(struct.pack('>i', len(clientfds)))
+        self.cresult.write(struct.pack(b'>i', len(clientfds)))
 
     def _saveio(self):
         if self._oldios:
@@ -431,29 +472,29 @@
         except error.ParseError as inst:
             dispatch._formatparse(self.ui.warn, inst)
             self.ui.flush()
-            self.cresult.write('exit 255')
+            self.cresult.write(b'exit 255')
             return
         except error.Abort as inst:
-            self.ui.error(_("abort: %s\n") % inst)
+            self.ui.error(_(b"abort: %s\n") % inst)
             if inst.hint:
-                self.ui.error(_("(%s)\n") % inst.hint)
+                self.ui.error(_(b"(%s)\n") % inst.hint)
             self.ui.flush()
-            self.cresult.write('exit 255')
+            self.cresult.write(b'exit 255')
             return
         newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
         insts = []
         if newhash.mtimehash != self.hashstate.mtimehash:
             addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
-            insts.append('unlink %s' % addr)
+            insts.append(b'unlink %s' % addr)
             # mtimehash is empty if one or more extensions fail to load.
             # to be compatible with hg, still serve the client this time.
             if self.hashstate.mtimehash:
-                insts.append('reconnect')
+                insts.append(b'reconnect')
         if newhash.confighash != self.hashstate.confighash:
             addr = _hashaddress(self.baseaddress, newhash.confighash)
-            insts.append('redirect %s' % addr)
-        self.ui.log('chgserver', 'validate: %s\n', stringutil.pprint(insts))
-        self.cresult.write('\0'.join(insts) or '\0')
+            insts.append(b'redirect %s' % addr)
+        self.ui.log(b'chgserver', b'validate: %s\n', stringutil.pprint(insts))
+        self.cresult.write(b'\0'.join(insts) or b'\0')
 
     def chdir(self):
         """Change current directory
@@ -464,7 +505,7 @@
         path = self._readstr()
         if not path:
             return
-        self.ui.log('chgserver', 'chdir to %r\n', path)
+        self.ui.log(b'chgserver', b'chdir to %r\n', path)
         os.chdir(path)
 
     def setumask(self):
@@ -477,12 +518,12 @@
         """Change umask"""
         data = self._readstr()
         if len(data) != 4:
-            raise ValueError('invalid mask length in setumask2 request')
+            raise ValueError(b'invalid mask length in setumask2 request')
         self._setumask(data)
 
     def _setumask(self, data):
-        mask = struct.unpack('>I', data)[0]
-        self.ui.log('chgserver', 'setumask %r\n', mask)
+        mask = struct.unpack(b'>I', data)[0]
+        self.ui.log(b'chgserver', b'setumask %r\n', mask)
         os.umask(mask)
 
     def runcommand(self):
@@ -504,39 +545,48 @@
         """
         l = self._readlist()
         try:
-            newenv = dict(s.split('=', 1) for s in l)
+            newenv = dict(s.split(b'=', 1) for s in l)
         except ValueError:
-            raise ValueError('unexpected value in setenv request')
-        self.ui.log('chgserver', 'setenv: %r\n', sorted(newenv.keys()))
+            raise ValueError(b'unexpected value in setenv request')
+        self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys()))
         encoding.environ.clear()
         encoding.environ.update(newenv)
 
     capabilities = commandserver.server.capabilities.copy()
-    capabilities.update({'attachio': attachio,
-                         'chdir': chdir,
-                         'runcommand': runcommand,
-                         'setenv': setenv,
-                         'setumask': setumask,
-                         'setumask2': setumask2})
+    capabilities.update(
+        {
+            b'attachio': attachio,
+            b'chdir': chdir,
+            b'runcommand': runcommand,
+            b'setenv': setenv,
+            b'setumask': setumask,
+            b'setumask2': setumask2,
+        }
+    )
 
-    if util.safehasattr(procutil, 'setprocname'):
+    if util.safehasattr(procutil, b'setprocname'):
+
         def setprocname(self):
             """Change process title"""
             name = self._readstr()
-            self.ui.log('chgserver', 'setprocname: %r\n', name)
+            self.ui.log(b'chgserver', b'setprocname: %r\n', name)
             procutil.setprocname(name)
-        capabilities['setprocname'] = setprocname
+
+        capabilities[b'setprocname'] = setprocname
+
 
 def _tempaddress(address):
-    return '%s.%d.tmp' % (address, os.getpid())
+    return b'%s.%d.tmp' % (address, os.getpid())
+
 
 def _hashaddress(address, hashstr):
     # if the basename of address contains '.', use only the left part. this
     # makes it possible for the client to pass 'server.tmp$PID' and follow by
     # an atomic rename to avoid locking when spawning new servers.
     dirname, basename = os.path.split(address)
-    basename = basename.split('.', 1)[0]
-    return '%s-%s' % (os.path.join(dirname, basename), hashstr)
+    basename = basename.split(b'.', 1)[0]
+    return b'%s-%s' % (os.path.join(dirname, basename), hashstr)
+
 
 class chgunixservicehandler(object):
     """Set of operations for chg services"""
@@ -545,7 +595,7 @@
 
     def __init__(self, ui):
         self.ui = ui
-        self._idletimeout = ui.configint('chgserver', 'idletimeout')
+        self._idletimeout = ui.configint(b'chgserver', b'idletimeout')
         self._lastactive = time.time()
 
     def bindsocket(self, sock, address):
@@ -557,7 +607,7 @@
 
     def _inithashstate(self, address):
         self._baseaddress = address
-        if self.ui.configbool('chgserver', 'skiphash'):
+        if self.ui.configbool(b'chgserver', b'skiphash'):
             self._hashstate = None
             self._realaddress = address
             return
@@ -571,7 +621,7 @@
             # one or more extensions failed to load. mtimehash becomes
             # meaningless because we do not know the paths of those extensions.
             # set mtimehash to an illegal hash value to invalidate the server.
-            self._hashstate.mtimehash = ''
+            self._hashstate.mtimehash = b''
 
     def _bind(self, sock):
         # use a unique temp address so we can stat the file and do ownership
@@ -594,8 +644,10 @@
     def _issocketowner(self):
         try:
             st = os.stat(self._realaddress)
-            return (st.st_ino == self._socketstat.st_ino and
-                    st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME])
+            return (
+                st.st_ino == self._socketstat.st_ino
+                and st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME]
+            )
         except OSError:
             return False
 
@@ -610,8 +662,9 @@
 
     def shouldexit(self):
         if not self._issocketowner():
-            self.ui.log(b'chgserver', b'%s is not owned, exiting.\n',
-                        self._realaddress)
+            self.ui.log(
+                b'chgserver', b'%s is not owned, exiting.\n', self._realaddress
+            )
             return True
         if time.time() - self._lastactive > self._idletimeout:
             self.ui.log(b'chgserver', b'being idle too long. exiting.\n')
@@ -622,8 +675,17 @@
         self._lastactive = time.time()
 
     def createcmdserver(self, repo, conn, fin, fout, prereposetups):
-        return chgcmdserver(self.ui, repo, fin, fout, conn, prereposetups,
-                            self._hashstate, self._baseaddress)
+        return chgcmdserver(
+            self.ui,
+            repo,
+            fin,
+            fout,
+            conn,
+            prereposetups,
+            self._hashstate,
+            self._baseaddress,
+        )
+
 
 def chgunixservice(ui, repo, opts):
     # CHGINTERNALMARK is set by chg client. It is an indication of things are
@@ -631,11 +693,11 @@
     # demandimport or detecting chg client started by chg client. When executed
     # here, CHGINTERNALMARK is no longer useful and hence dropped to make
     # environ cleaner.
-    if 'CHGINTERNALMARK' in encoding.environ:
-        del encoding.environ['CHGINTERNALMARK']
+    if b'CHGINTERNALMARK' in encoding.environ:
+        del encoding.environ[b'CHGINTERNALMARK']
 
     if repo:
         # one chgserver can serve multiple repos. drop repo information
-        ui.setconfig('bundle', 'mainreporoot', '', 'repo')
+        ui.setconfig(b'bundle', b'mainreporoot', b'', b'repo')
     h = chgunixservicehandler(ui)
     return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
--- a/mercurial/cmdutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/cmdutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -19,6 +19,11 @@
     nullrev,
     short,
 )
+from .pycompat import (
+    getattr,
+    open,
+    setattr,
+)
 
 from . import (
     bookmarks,
@@ -61,166 +66,293 @@
 # templates of common command options
 
 dryrunopts = [
-    ('n', 'dry-run', None,
-     _('do not perform actions, just print output')),
+    (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
 ]
 
 confirmopts = [
-    ('', 'confirm', None,
-     _('ask before applying actions')),
+    (b'', b'confirm', None, _(b'ask before applying actions')),
 ]
 
 remoteopts = [
-    ('e', 'ssh', '',
-     _('specify ssh command to use'), _('CMD')),
-    ('', 'remotecmd', '',
-     _('specify hg command to run on the remote side'), _('CMD')),
-    ('', 'insecure', None,
-     _('do not verify server certificate (ignoring web.cacerts config)')),
+    (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
+    (
+        b'',
+        b'remotecmd',
+        b'',
+        _(b'specify hg command to run on the remote side'),
+        _(b'CMD'),
+    ),
+    (
+        b'',
+        b'insecure',
+        None,
+        _(b'do not verify server certificate (ignoring web.cacerts config)'),
+    ),
 ]
 
 walkopts = [
-    ('I', 'include', [],
-     _('include names matching the given patterns'), _('PATTERN')),
-    ('X', 'exclude', [],
-     _('exclude names matching the given patterns'), _('PATTERN')),
+    (
+        b'I',
+        b'include',
+        [],
+        _(b'include names matching the given patterns'),
+        _(b'PATTERN'),
+    ),
+    (
+        b'X',
+        b'exclude',
+        [],
+        _(b'exclude names matching the given patterns'),
+        _(b'PATTERN'),
+    ),
 ]
 
 commitopts = [
-    ('m', 'message', '',
-     _('use text as commit message'), _('TEXT')),
-    ('l', 'logfile', '',
-     _('read commit message from file'), _('FILE')),
+    (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
+    (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
 ]
 
 commitopts2 = [
-    ('d', 'date', '',
-     _('record the specified date as commit date'), _('DATE')),
-    ('u', 'user', '',
-     _('record the specified user as committer'), _('USER')),
+    (
+        b'd',
+        b'date',
+        b'',
+        _(b'record the specified date as commit date'),
+        _(b'DATE'),
+    ),
+    (
+        b'u',
+        b'user',
+        b'',
+        _(b'record the specified user as committer'),
+        _(b'USER'),
+    ),
+]
+
+commitopts3 = [
+    (b'D', b'currentdate', None, _(b'record the current date as commit date')),
+    (b'U', b'currentuser', None, _(b'record the current user as committer')),
 ]
 
 formatteropts = [
-    ('T', 'template', '',
-     _('display with template'), _('TEMPLATE')),
+    (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
 ]
 
 templateopts = [
-    ('', 'style', '',
-     _('display using template map file (DEPRECATED)'), _('STYLE')),
-    ('T', 'template', '',
-     _('display with template'), _('TEMPLATE')),
+    (
+        b'',
+        b'style',
+        b'',
+        _(b'display using template map file (DEPRECATED)'),
+        _(b'STYLE'),
+    ),
+    (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
 ]
 
 logopts = [
-    ('p', 'patch', None, _('show patch')),
-    ('g', 'git', None, _('use git extended diff format')),
-    ('l', 'limit', '',
-     _('limit number of changes displayed'), _('NUM')),
-    ('M', 'no-merges', None, _('do not show merges')),
-    ('', 'stat', None, _('output diffstat-style summary of changes')),
-    ('G', 'graph', None, _("show the revision DAG")),
+    (b'p', b'patch', None, _(b'show patch')),
+    (b'g', b'git', None, _(b'use git extended diff format')),
+    (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
+    (b'M', b'no-merges', None, _(b'do not show merges')),
+    (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
+    (b'G', b'graph', None, _(b"show the revision DAG")),
 ] + templateopts
 
 diffopts = [
-    ('a', 'text', None, _('treat all files as text')),
-    ('g', 'git', None, _('use git extended diff format')),
-    ('', 'binary', None, _('generate binary diffs in git mode (default)')),
-    ('', 'nodates', None, _('omit dates from diff headers'))
+    (b'a', b'text', None, _(b'treat all files as text')),
+    (b'g', b'git', None, _(b'use git extended diff format')),
+    (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
+    (b'', b'nodates', None, _(b'omit dates from diff headers')),
 ]
 
 diffwsopts = [
-    ('w', 'ignore-all-space', None,
-     _('ignore white space when comparing lines')),
-    ('b', 'ignore-space-change', None,
-     _('ignore changes in the amount of white space')),
-    ('B', 'ignore-blank-lines', None,
-     _('ignore changes whose lines are all blank')),
-    ('Z', 'ignore-space-at-eol', None,
-     _('ignore changes in whitespace at EOL')),
+    (
+        b'w',
+        b'ignore-all-space',
+        None,
+        _(b'ignore white space when comparing lines'),
+    ),
+    (
+        b'b',
+        b'ignore-space-change',
+        None,
+        _(b'ignore changes in the amount of white space'),
+    ),
+    (
+        b'B',
+        b'ignore-blank-lines',
+        None,
+        _(b'ignore changes whose lines are all blank'),
+    ),
+    (
+        b'Z',
+        b'ignore-space-at-eol',
+        None,
+        _(b'ignore changes in whitespace at EOL'),
+    ),
 ]
 
-diffopts2 = [
-    ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
-    ('p', 'show-function', None, _('show which function each change is in')),
-    ('', 'reverse', None, _('produce a diff that undoes the changes')),
-] + diffwsopts + [
-    ('U', 'unified', '',
-     _('number of lines of context to show'), _('NUM')),
-    ('', 'stat', None, _('output diffstat-style summary of changes')),
-    ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
-]
+diffopts2 = (
+    [
+        (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
+        (
+            b'p',
+            b'show-function',
+            None,
+            _(b'show which function each change is in'),
+        ),
+        (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
+    ]
+    + diffwsopts
+    + [
+        (
+            b'U',
+            b'unified',
+            b'',
+            _(b'number of lines of context to show'),
+            _(b'NUM'),
+        ),
+        (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
+        (
+            b'',
+            b'root',
+            b'',
+            _(b'produce diffs relative to subdirectory'),
+            _(b'DIR'),
+        ),
+    ]
+)
 
 mergetoolopts = [
-    ('t', 'tool', '', _('specify merge tool'), _('TOOL')),
+    (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
 ]
 
 similarityopts = [
-    ('s', 'similarity', '',
-     _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
+    (
+        b's',
+        b'similarity',
+        b'',
+        _(b'guess renamed files by similarity (0<=s<=100)'),
+        _(b'SIMILARITY'),
+    )
 ]
 
-subrepoopts = [
-    ('S', 'subrepos', None,
-     _('recurse into subrepositories'))
-]
+subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
 
 debugrevlogopts = [
-    ('c', 'changelog', False, _('open changelog')),
-    ('m', 'manifest', False, _('open manifest')),
-    ('', 'dir', '', _('open directory manifest')),
+    (b'c', b'changelog', False, _(b'open changelog')),
+    (b'm', b'manifest', False, _(b'open manifest')),
+    (b'', b'dir', b'', _(b'open directory manifest')),
 ]
 
 # special string such that everything below this line will be ingored in the
 # editor text
-_linebelow = "^HG: ------------------------ >8 ------------------------$"
+_linebelow = b"^HG: ------------------------ >8 ------------------------$"
+
+
+def resolvecommitoptions(ui, opts):
+    """modify commit options dict to handle related options
+
+    The return value indicates that ``rewrite.update-timestamp`` is the reason
+    the ``date`` option is set.
+    """
+    if opts.get(b'date') and opts.get(b'currentdate'):
+        raise error.Abort(_(b'--date and --currentdate are mutually exclusive'))
+    if opts.get(b'user') and opts.get(b'currentuser'):
+        raise error.Abort(_(b'--user and --currentuser are mutually exclusive'))
+
+    datemaydiffer = False  # date-only change should be ignored?
+
+    if opts.get(b'currentdate'):
+        opts[b'date'] = b'%d %d' % dateutil.makedate()
+    elif (
+        not opts.get(b'date')
+        and ui.configbool(b'rewrite', b'update-timestamp')
+        and opts.get(b'currentdate') is None
+    ):
+        opts[b'date'] = b'%d %d' % dateutil.makedate()
+        datemaydiffer = True
+
+    if opts.get(b'currentuser'):
+        opts[b'user'] = ui.username()
+
+    return datemaydiffer
+
+
+def checknotesize(ui, opts):
+    """ make sure note is of valid format """
+
+    note = opts.get(b'note')
+    if not note:
+        return
+
+    if len(note) > 255:
+        raise error.Abort(_(b"cannot store a note of more than 255 bytes"))
+    if b'\n' in note:
+        raise error.Abort(_(b"note cannot contain a newline"))
+
 
 def ishunk(x):
     hunkclasses = (crecordmod.uihunk, patch.recordhunk)
     return isinstance(x, hunkclasses)
 
+
 def newandmodified(chunks, originalchunks):
     newlyaddedandmodifiedfiles = set()
+    alsorestore = set()
     for chunk in chunks:
-        if (ishunk(chunk) and chunk.header.isnewfile() and chunk not in
-            originalchunks):
+        if (
+            ishunk(chunk)
+            and chunk.header.isnewfile()
+            and chunk not in originalchunks
+        ):
             newlyaddedandmodifiedfiles.add(chunk.header.filename())
-    return newlyaddedandmodifiedfiles
+            alsorestore.update(
+                set(chunk.header.files()) - {chunk.header.filename()}
+            )
+    return newlyaddedandmodifiedfiles, alsorestore
+
 
 def parsealiases(cmd):
-    return cmd.split("|")
+    return cmd.split(b"|")
+
 
 def setupwrapcolorwrite(ui):
     # wrap ui.write so diff output can be labeled/colorized
     def wrapwrite(orig, *args, **kw):
-        label = kw.pop(r'label', '')
+        label = kw.pop(r'label', b'')
         for chunk, l in patch.difflabel(lambda: args):
             orig(chunk, label=label + l)
 
     oldwrite = ui.write
+
     def wrap(*args, **kwargs):
         return wrapwrite(oldwrite, *args, **kwargs)
+
     setattr(ui, 'write', wrap)
     return oldwrite
 
-def filterchunks(ui, originalhunks, usecurses, testfile, match,
-                 operation=None):
+
+def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
     try:
         if usecurses:
             if testfile:
                 recordfn = crecordmod.testdecorator(
-                    testfile, crecordmod.testchunkselector)
+                    testfile, crecordmod.testchunkselector
+                )
             else:
                 recordfn = crecordmod.chunkselector
 
-            return crecordmod.filterpatch(ui, originalhunks, recordfn,
-                                          operation)
+            return crecordmod.filterpatch(
+                ui, originalhunks, recordfn, operation
+            )
     except crecordmod.fallbackerror as e:
-        ui.warn('%s\n' % e.message)
-        ui.warn(_('falling back to text mode\n'))
+        ui.warn(b'%s\n' % e.message)
+        ui.warn(_(b'falling back to text mode\n'))
 
     return patch.filterpatch(ui, originalhunks, match, operation)
 
+
 def recordfilter(ui, originalhunks, match, operation=None):
     """ Prompts the user to filter the originalhunks and return a list of
     selected hunks.
@@ -229,28 +361,31 @@
     (see patch.filterpatch).
     """
     usecurses = crecordmod.checkcurses(ui)
-    testfile = ui.config('experimental', 'crecordtest')
+    testfile = ui.config(b'experimental', b'crecordtest')
     oldwrite = setupwrapcolorwrite(ui)
     try:
-        newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
-                                          testfile, match, operation)
+        newchunks, newopts = filterchunks(
+            ui, originalhunks, usecurses, testfile, match, operation
+        )
     finally:
         ui.write = oldwrite
     return newchunks, newopts
 
-def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
-            filterfn, *pats, **opts):
+
+def dorecord(
+    ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
+):
     opts = pycompat.byteskwargs(opts)
     if not ui.interactive():
         if cmdsuggest:
-            msg = _('running non-interactively, use %s instead') % cmdsuggest
+            msg = _(b'running non-interactively, use %s instead') % cmdsuggest
         else:
-            msg = _('running non-interactively')
+            msg = _(b'running non-interactively')
         raise error.Abort(msg)
 
     # make sure username is set before going interactive
-    if not opts.get('user'):
-        ui.username() # raise exception, username not provided
+    if not opts.get(b'user'):
+        ui.username()  # raise exception, username not provided
 
     def recordfunc(ui, repo, message, match, opts):
         """This is generic record driver.
@@ -266,18 +401,22 @@
         In the end we'll record interesting changes, and everything else
         will be left in place, so the user can continue working.
         """
-        if not opts.get('interactive-unshelve'):
+        if not opts.get(b'interactive-unshelve'):
             checkunfinished(repo, commit=True)
         wctx = repo[None]
         merge = len(wctx.parents()) > 1
         if merge:
-            raise error.Abort(_('cannot partially commit a merge '
-                               '(use "hg commit" instead)'))
+            raise error.Abort(
+                _(
+                    b'cannot partially commit a merge '
+                    b'(use "hg commit" instead)'
+                )
+            )
 
         def fail(f, msg):
-            raise error.Abort('%s: %s' % (f, msg))
-
-        force = opts.get('force')
+            raise error.Abort(b'%s: %s' % (f, msg))
+
+        force = opts.get(b'force')
         if not force:
             vdirs = []
             match = matchmod.badmatch(match, fail)
@@ -289,17 +428,20 @@
 
         with repo.ui.configoverride(overrides, b'record'):
             # subrepoutil.precommit() modifies the status
-            tmpstatus = scmutil.status(copymod.copy(status[0]),
-                                       copymod.copy(status[1]),
-                                       copymod.copy(status[2]),
-                                       copymod.copy(status[3]),
-                                       copymod.copy(status[4]),
-                                       copymod.copy(status[5]),
-                                       copymod.copy(status[6]))
+            tmpstatus = scmutil.status(
+                copymod.copy(status[0]),
+                copymod.copy(status[1]),
+                copymod.copy(status[2]),
+                copymod.copy(status[3]),
+                copymod.copy(status[4]),
+                copymod.copy(status[5]),
+                copymod.copy(status[6]),
+            )
 
             # Force allows -X subrepo to skip the subrepo.
             subs, commitsubs, newstate = subrepoutil.precommit(
-                repo.ui, wctx, tmpstatus, match, force=True)
+                repo.ui, wctx, tmpstatus, match, force=True
+            )
             for s in subs:
                 if s in commitsubs:
                     dirtyreason = wctx.sub(s).dirtyreason(True)
@@ -307,9 +449,13 @@
 
         if not force:
             repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
-        diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True,
-                                         section='commands',
-                                         configprefix='commit.interactive.')
+        diffopts = patch.difffeatureopts(
+            ui,
+            opts=opts,
+            whitespace=True,
+            section=b'commands',
+            configprefix=b'commit.interactive.',
+        )
         diffopts.nodates = True
         diffopts.git = True
         diffopts.showfunc = True
@@ -321,13 +467,17 @@
         try:
             chunks, newopts = filterfn(ui, originalchunks, match)
         except error.PatchError as err:
-            raise error.Abort(_('error parsing patch: %s') % err)
+            raise error.Abort(_(b'error parsing patch: %s') % err)
         opts.update(newopts)
 
         # We need to keep a backup of files that have been newly added and
         # modified during the recording process because there is a previous
-        # version without the edit in the workdir
-        newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
+        # version without the edit in the workdir. We also will need to restore
+        # files that were the sources of renames so that the patch application
+        # works.
+        newlyaddedandmodifiedfiles, alsorestore = newandmodified(
+            chunks, originalchunks
+        )
         contenders = set()
         for h in chunks:
             try:
@@ -338,7 +488,7 @@
         changed = status.modified + status.added + status.removed
         newfiles = [f for f in changed if f in contenders]
         if not newfiles:
-            ui.status(_('no changes to record\n'))
+            ui.status(_(b'no changes to record\n'))
             return 0
 
         modified = set(status.modified)
@@ -348,11 +498,14 @@
         if backupall:
             tobackup = changed
         else:
-            tobackup = [f for f in newfiles if f in modified or f in
-                        newlyaddedandmodifiedfiles]
+            tobackup = [
+                f
+                for f in newfiles
+                if f in modified or f in newlyaddedandmodifiedfiles
+            ]
         backups = {}
         if tobackup:
-            backupdir = repo.vfs.join('record-backups')
+            backupdir = repo.vfs.join(b'record-backups')
             try:
                 os.mkdir(backupdir)
             except OSError as err:
@@ -361,10 +514,11 @@
         try:
             # backup continues
             for f in tobackup:
-                fd, tmpname = pycompat.mkstemp(prefix=f.replace('/', '_') + '.',
-                                               dir=backupdir)
+                fd, tmpname = pycompat.mkstemp(
+                    prefix=f.replace(b'/', b'_') + b'.', dir=backupdir
+                )
                 os.close(fd)
-                ui.debug('backup %r as %r\n' % (f, tmpname))
+                ui.debug(b'backup %r as %r\n' % (f, tmpname))
                 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
                 backups[f] = tmpname
 
@@ -377,13 +531,15 @@
             fp.seek(0)
 
             # 2.5 optionally review / modify patch in text editor
-            if opts.get('review', False):
-                patchtext = (crecordmod.diffhelptext
-                             + crecordmod.patchhelptext
-                             + fp.read())
-                reviewedpatch = ui.edit(patchtext, "",
-                                        action="diff",
-                                        repopath=repo.path)
+            if opts.get(b'review', False):
+                patchtext = (
+                    crecordmod.diffhelptext
+                    + crecordmod.patchhelptext
+                    + fp.read()
+                )
+                reviewedpatch = ui.edit(
+                    patchtext, b"", action=b"diff", repopath=repo.path
+                )
                 fp.truncate(0)
                 fp.write(reviewedpatch)
                 fp.seek(0)
@@ -392,14 +548,19 @@
             # 3a. apply filtered patch to clean repo  (clean)
             if backups:
                 # Equivalent to hg.revert
-                m = scmutil.matchfiles(repo, backups.keys())
-                mergemod.update(repo, repo.dirstate.p1(), branchmerge=False,
-                                force=True, matcher=m)
+                m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
+                mergemod.update(
+                    repo,
+                    repo.dirstate.p1(),
+                    branchmerge=False,
+                    force=True,
+                    matcher=m,
+                )
 
             # 3b. (apply)
             if dopatch:
                 try:
-                    ui.debug('applying patch\n')
+                    ui.debug(b'applying patch\n')
                     ui.debug(fp.getvalue())
                     patch.internalpatch(ui, repo, fp, 1, eolmode=None)
                 except error.PatchError as err:
@@ -417,10 +578,10 @@
             # 5. finally restore backed-up files
             try:
                 dirstate = repo.dirstate
-                for realname, tmpname in backups.iteritems():
-                    ui.debug('restoring %r to %r\n' % (tmpname, realname))
-
-                    if dirstate[realname] == 'n':
+                for realname, tmpname in pycompat.iteritems(backups):
+                    ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
+
+                    if dirstate[realname] == b'n':
                         # without normallookup, restoring timestamp
                         # may cause partially committed files
                         # to be treated as unmodified
@@ -444,6 +605,7 @@
 
     return commit(ui, repo, recordinwlock, pats, opts)
 
+
 class dirnode(object):
     """
     Represent a directory in user working copy with information required for
@@ -481,8 +643,8 @@
 
         # the filename contains a path separator, it means it's not the direct
         # child of this directory
-        if '/' in filename:
-            subdir, filep = filename.split('/', 1)
+        if b'/' in filename:
+            subdir, filep = filename.split(b'/', 1)
 
             # does the dirnode object for subdir exists
             if subdir not in self.subdirs:
@@ -532,18 +694,19 @@
             # Making sure we terse only when the status abbreviation is
             # passed as terse argument
             if onlyst in terseargs:
-                yield onlyst, self.path + '/'
+                yield onlyst, self.path + b'/'
                 return
 
         # add the files to status list
         for st, fpath in self.iterfilepaths():
             yield st, fpath
 
-        #recurse on the subdirs
+        # recurse on the subdirs
         for dirobj in self.subdirs.values():
             for st, fpath in dirobj.tersewalk(terseargs):
                 yield st, fpath
 
+
 def tersedir(statuslist, terseargs):
     """
     Terse the status if all the files in a directory shares the same status.
@@ -558,17 +721,24 @@
     directory or not.
     """
     # the order matters here as that is used to produce final list
-    allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
+    allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
 
     # checking the argument validity
     for s in pycompat.bytestr(terseargs):
         if s not in allst:
-            raise error.Abort(_("'%s' not recognized") % s)
+            raise error.Abort(_(b"'%s' not recognized") % s)
 
     # creating a dirnode object for the root of the repo
-    rootobj = dirnode('')
-    pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
-               'ignored', 'removed')
+    rootobj = dirnode(b'')
+    pstatus = (
+        b'modified',
+        b'added',
+        b'deleted',
+        b'clean',
+        b'unknown',
+        b'ignored',
+        b'removed',
+    )
 
     tersedict = {}
     for attrname in pstatus:
@@ -593,11 +763,13 @@
 
     return tersedlist
 
+
 def _commentlines(raw):
     '''Surround lineswith a comment char and a new line'''
     lines = raw.splitlines()
-    commentedlines = ['# %s' % line for line in lines]
-    return '\n'.join(commentedlines) + '\n'
+    commentedlines = [b'# %s' % line for line in lines]
+    return b'\n'.join(commentedlines) + b'\n'
+
 
 def _conflictsmsg(repo):
     mergestate = mergemod.mergestate.read(repo)
@@ -607,31 +779,41 @@
     m = scmutil.match(repo[None])
     unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
     if unresolvedlist:
-        mergeliststr = '\n'.join(
-            ['    %s' % util.pathto(repo.root, encoding.getcwd(), path)
-             for path in sorted(unresolvedlist)])
-        msg = _('''Unresolved merge conflicts:
+        mergeliststr = b'\n'.join(
+            [
+                b'    %s' % util.pathto(repo.root, encoding.getcwd(), path)
+                for path in sorted(unresolvedlist)
+            ]
+        )
+        msg = (
+            _(
+                '''Unresolved merge conflicts:
 
 %s
 
-To mark files as resolved:  hg resolve --mark FILE''') % mergeliststr
+To mark files as resolved:  hg resolve --mark FILE'''
+            )
+            % mergeliststr
+        )
     else:
-        msg = _('No unresolved merge conflicts.')
+        msg = _(b'No unresolved merge conflicts.')
 
     return _commentlines(msg)
 
+
 def morestatus(repo, fm):
     statetuple = statemod.getrepostate(repo)
-    label = 'status.morestatus'
+    label = b'status.morestatus'
     if statetuple:
         state, helpfulmsg = statetuple
-        statemsg = _('The repository is in an unfinished *%s* state.') % state
-        fm.plain('%s\n' % _commentlines(statemsg), label=label)
+        statemsg = _(b'The repository is in an unfinished *%s* state.') % state
+        fm.plain(b'%s\n' % _commentlines(statemsg), label=label)
         conmsg = _conflictsmsg(repo)
         if conmsg:
-            fm.plain('%s\n' % conmsg, label=label)
+            fm.plain(b'%s\n' % conmsg, label=label)
         if helpfulmsg:
-            fm.plain('%s\n' % _commentlines(helpfulmsg), label=label)
+            fm.plain(b'%s\n' % _commentlines(helpfulmsg), label=label)
+
 
 def findpossible(cmd, table, strict=False):
     """
@@ -661,7 +843,7 @@
                     found = a
                     break
         if found is not None:
-            if aliases[0].startswith("debug") or found.startswith("debug"):
+            if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
                 debugchoice[found] = (aliases, table[e])
             else:
                 choice[found] = (aliases, table[e])
@@ -671,6 +853,7 @@
 
     return choice, allcmds
 
+
 def findcmd(cmd, table, strict=True):
     """Return (aliases, command table entry) for command string."""
     choice, allcmds = findpossible(cmd, table, strict)
@@ -687,36 +870,42 @@
 
     raise error.UnknownCommand(cmd, allcmds)
 
+
 def changebranch(ui, repo, revs, label):
     """ Change the branch name of given revs to label """
 
-    with repo.wlock(), repo.lock(), repo.transaction('branches'):
+    with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
         # abort in case of uncommitted merge or dirty wdir
         bailifchanged(repo)
         revs = scmutil.revrange(repo, revs)
         if not revs:
-            raise error.Abort("empty revision set")
-        roots = repo.revs('roots(%ld)', revs)
+            raise error.Abort(b"empty revision set")
+        roots = repo.revs(b'roots(%ld)', revs)
         if len(roots) > 1:
-            raise error.Abort(_("cannot change branch of non-linear revisions"))
-        rewriteutil.precheck(repo, revs, 'change branch of')
+            raise error.Abort(
+                _(b"cannot change branch of non-linear revisions")
+            )
+        rewriteutil.precheck(repo, revs, b'change branch of')
 
         root = repo[roots.first()]
         rpb = {parent.branch() for parent in root.parents()}
         if label not in rpb and label in repo.branchmap():
-            raise error.Abort(_("a branch of the same name already exists"))
-
-        if repo.revs('obsolete() and %ld', revs):
-            raise error.Abort(_("cannot change branch of a obsolete changeset"))
+            raise error.Abort(_(b"a branch of the same name already exists"))
+
+        if repo.revs(b'obsolete() and %ld', revs):
+            raise error.Abort(
+                _(b"cannot change branch of a obsolete changeset")
+            )
 
         # make sure only topological heads
-        if repo.revs('heads(%ld) - head()', revs):
-            raise error.Abort(_("cannot change branch in middle of a stack"))
+        if repo.revs(b'heads(%ld) - head()', revs):
+            raise error.Abort(_(b"cannot change branch in middle of a stack"))
 
         replacements = {}
         # avoid import cycle mercurial.cmdutil -> mercurial.context ->
         # mercurial.subrepo -> mercurial.cmdutil
         from . import context
+
         for rev in revs:
             ctx = repo[rev]
             oldbranch = ctx.branch()
@@ -730,10 +919,12 @@
                 except error.ManifestLookupError:
                     return None
 
-            ui.debug("changing branch of '%s' from '%s' to '%s'\n"
-                     % (hex(ctx.node()), oldbranch, label))
+            ui.debug(
+                b"changing branch of '%s' from '%s' to '%s'\n"
+                % (hex(ctx.node()), oldbranch, label)
+            )
             extra = ctx.extra()
-            extra['branch_change'] = hex(ctx.node())
+            extra[b'branch_change'] = hex(ctx.node())
             # While changing branch of set of linear commits, make sure that
             # we base our commits on new parent rather than old parent which
             # was obsoleted while changing the branch
@@ -744,21 +935,26 @@
             if p2 in replacements:
                 p2 = replacements[p2][0]
 
-            mc = context.memctx(repo, (p1, p2),
-                                ctx.description(),
-                                ctx.files(),
-                                filectxfn,
-                                user=ctx.user(),
-                                date=ctx.date(),
-                                extra=extra,
-                                branch=label)
+            mc = context.memctx(
+                repo,
+                (p1, p2),
+                ctx.description(),
+                ctx.files(),
+                filectxfn,
+                user=ctx.user(),
+                date=ctx.date(),
+                extra=extra,
+                branch=label,
+            )
 
             newnode = repo.commitctx(mc)
             replacements[ctx.node()] = (newnode,)
-            ui.debug('new node id is %s\n' % hex(newnode))
+            ui.debug(b'new node id is %s\n' % hex(newnode))
 
         # create obsmarkers and move bookmarks
-        scmutil.cleanupnodes(repo, replacements, 'branch-change', fixphase=True)
+        scmutil.cleanupnodes(
+            repo, replacements, b'branch-change', fixphase=True
+        )
 
         # move the working copy too
         wctx = repo[None]
@@ -769,18 +965,21 @@
                 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
                 # mercurial.cmdutil
                 from . import hg
+
                 hg.update(repo, newid[0], quietempty=True)
 
-        ui.status(_("changed branch on %d changesets\n") % len(replacements))
+        ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
+
 
 def findrepo(p):
-    while not os.path.isdir(os.path.join(p, ".hg")):
+    while not os.path.isdir(os.path.join(p, b".hg")):
         oldp, p = p, os.path.dirname(p)
         if p == oldp:
             return None
 
     return p
 
+
 def bailifchanged(repo, merge=True, hint=None):
     """ enforce the precondition that working directory must be clean.
 
@@ -791,33 +990,38 @@
     """
 
     if merge and repo.dirstate.p2() != nullid:
-        raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
+        raise error.Abort(_(b'outstanding uncommitted merge'), hint=hint)
     modified, added, removed, deleted = repo.status()[:4]
     if modified or added or removed or deleted:
-        raise error.Abort(_('uncommitted changes'), hint=hint)
+        raise error.Abort(_(b'uncommitted changes'), hint=hint)
     ctx = repo[None]
     for s in sorted(ctx.substate):
         ctx.sub(s).bailifchanged(hint=hint)
 
+
 def logmessage(ui, opts):
     """ get the log message according to -m and -l option """
-    message = opts.get('message')
-    logfile = opts.get('logfile')
+    message = opts.get(b'message')
+    logfile = opts.get(b'logfile')
 
     if message and logfile:
-        raise error.Abort(_('options --message and --logfile are mutually '
-                           'exclusive'))
+        raise error.Abort(
+            _(b'options --message and --logfile are mutually exclusive')
+        )
     if not message and logfile:
         try:
             if isstdiofilename(logfile):
                 message = ui.fin.read()
             else:
-                message = '\n'.join(util.readfile(logfile).splitlines())
+                message = b'\n'.join(util.readfile(logfile).splitlines())
         except IOError as inst:
-            raise error.Abort(_("can't read commit message '%s': %s") %
-                             (logfile, encoding.strtolocal(inst.strerror)))
+            raise error.Abort(
+                _(b"can't read commit message '%s': %s")
+                % (logfile, encoding.strtolocal(inst.strerror))
+            )
     return message
 
+
 def mergeeditform(ctxorbool, baseformname):
     """return appropriate editform name (referencing a committemplate)
 
@@ -829,14 +1033,16 @@
     """
     if isinstance(ctxorbool, bool):
         if ctxorbool:
-            return baseformname + ".merge"
+            return baseformname + b".merge"
     elif len(ctxorbool.parents()) > 1:
-        return baseformname + ".merge"
-
-    return baseformname + ".normal"
-
-def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
-                    editform='', **opts):
+        return baseformname + b".merge"
+
+    return baseformname + b".normal"
+
+
+def getcommiteditor(
+    edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
+):
     """get appropriate commit message editor according to '--edit' option
 
     'finishdesc' is a function to be called with edited commit message
@@ -857,15 +1063,15 @@
     they are specific for usage in MQ.
     """
     if edit or finishdesc or extramsg:
-        return lambda r, c, s: commitforceeditor(r, c, s,
-                                                 finishdesc=finishdesc,
-                                                 extramsg=extramsg,
-                                                 editform=editform)
+        return lambda r, c, s: commitforceeditor(
+            r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
+        )
     elif editform:
         return lambda r, c, s: commiteditor(r, c, s, editform=editform)
     else:
         return commiteditor
 
+
 def _escapecommandtemplate(tmpl):
     parts = []
     for typ, start, end in templater.scantemplate(tmpl, raw=True):
@@ -875,6 +1081,7 @@
             parts.append(tmpl[start:end])
     return b''.join(parts)
 
+
 def rendercommandtemplate(ui, tmpl, props):
     r"""Expand a literal template 'tmpl' in a way suitable for command line
 
@@ -893,6 +1100,7 @@
     t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
     return t.renderdefault(props)
 
+
 def rendertemplate(ctx, tmpl, props=None):
     """Expand a literal template 'tmpl' byte-string against one changeset
 
@@ -901,13 +1109,15 @@
     """
     repo = ctx.repo()
     tres = formatter.templateresources(repo.ui, repo)
-    t = formatter.maketemplater(repo.ui, tmpl, defaults=templatekw.keywords,
-                                resources=tres)
-    mapping = {'ctx': ctx}
+    t = formatter.maketemplater(
+        repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
+    )
+    mapping = {b'ctx': ctx}
     if props:
         mapping.update(props)
     return t.renderdefault(mapping)
 
+
 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
     r"""Convert old-style filename format string to template string
 
@@ -965,16 +1175,19 @@
                 break
             newname.append(stringutil.escapestr(pat[i:n]))
             if n + 2 > end:
-                raise error.Abort(_("incomplete format spec in output "
-                                    "filename"))
-            c = pat[n + 1:n + 2]
+                raise error.Abort(
+                    _(b"incomplete format spec in output filename")
+                )
+            c = pat[n + 1 : n + 2]
             i = n + 2
             try:
                 newname.append(expander[c])
             except KeyError:
-                raise error.Abort(_("invalid format spec '%%%s' in output "
-                                    "filename") % c)
-    return ''.join(newname)
+                raise error.Abort(
+                    _(b"invalid format spec '%%%s' in output filename") % c
+                )
+    return b''.join(newname)
+
 
 def makefilename(ctx, pat, **props):
     if not pat:
@@ -985,9 +1198,11 @@
     # disable the expansion.
     return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
 
+
 def isstdiofilename(pat):
     """True if the given pat looks like a filename denoting stdin/stdout"""
-    return not pat or pat == '-'
+    return not pat or pat == b'-'
+
 
 class _unclosablefile(object):
     def __init__(self, fp):
@@ -1008,8 +1223,9 @@
     def __exit__(self, exc_type, exc_value, exc_tb):
         pass
 
-def makefileobj(ctx, pat, mode='wb', **props):
-    writable = mode not in ('r', 'rb')
+
+def makefileobj(ctx, pat, mode=b'wb', **props):
+    writable = mode not in (b'r', b'rb')
 
     if isstdiofilename(pat):
         repo = ctx.repo()
@@ -1021,22 +1237,25 @@
     fn = makefilename(ctx, pat, **props)
     return open(fn, mode)
 
+
 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
     """opens the changelog, manifest, a filelog or a given revlog"""
-    cl = opts['changelog']
-    mf = opts['manifest']
-    dir = opts['dir']
+    cl = opts[b'changelog']
+    mf = opts[b'manifest']
+    dir = opts[b'dir']
     msg = None
     if cl and mf:
-        msg = _('cannot specify --changelog and --manifest at the same time')
+        msg = _(b'cannot specify --changelog and --manifest at the same time')
     elif cl and dir:
-        msg = _('cannot specify --changelog and --dir at the same time')
+        msg = _(b'cannot specify --changelog and --dir at the same time')
     elif cl or mf or dir:
         if file_:
-            msg = _('cannot specify filename with --changelog or --manifest')
+            msg = _(b'cannot specify filename with --changelog or --manifest')
         elif not repo:
-            msg = _('cannot specify --changelog or --manifest or --dir '
-                    'without a repository')
+            msg = _(
+                b'cannot specify --changelog or --manifest or --dir '
+                b'without a repository'
+            )
     if msg:
         raise error.Abort(msg)
 
@@ -1045,11 +1264,15 @@
         if cl:
             r = repo.unfiltered().changelog
         elif dir:
-            if 'treemanifest' not in repo.requirements:
-                raise error.Abort(_("--dir can only be used on repos with "
-                                   "treemanifest enabled"))
-            if not dir.endswith('/'):
-                dir = dir + '/'
+            if b'treemanifest' not in repo.requirements:
+                raise error.Abort(
+                    _(
+                        b"--dir can only be used on repos with "
+                        b"treemanifest enabled"
+                    )
+                )
+            if not dir.endswith(b'/'):
+                dir = dir + b'/'
             dirlog = repo.manifestlog.getstorage(dir)
             if len(dirlog):
                 r = dirlog
@@ -1065,23 +1288,25 @@
         if returnrevlog:
             if isinstance(r, revlog.revlog):
                 pass
-            elif util.safehasattr(r, '_revlog'):
+            elif util.safehasattr(r, b'_revlog'):
                 r = r._revlog
             elif r is not None:
-                raise error.Abort(_('%r does not appear to be a revlog') % r)
+                raise error.Abort(_(b'%r does not appear to be a revlog') % r)
 
     if not r:
         if not returnrevlog:
-            raise error.Abort(_('cannot give path to non-revlog'))
+            raise error.Abort(_(b'cannot give path to non-revlog'))
 
         if not file_:
-            raise error.CommandError(cmd, _('invalid arguments'))
+            raise error.CommandError(cmd, _(b'invalid arguments'))
         if not os.path.isfile(file_):
-            raise error.Abort(_("revlog '%s' not found") % file_)
-        r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
-                          file_[:-2] + ".i")
+            raise error.Abort(_(b"revlog '%s' not found") % file_)
+        r = revlog.revlog(
+            vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
+        )
     return r
 
+
 def openrevlog(repo, cmd, file_, opts):
     """Obtain a revlog backing storage of an item.
 
@@ -1094,6 +1319,7 @@
     """
     return openstorage(repo, cmd, file_, opts, returnrevlog=True)
 
+
 def copy(ui, repo, pats, opts, rename=False):
     # called with the repo lock held
     #
@@ -1101,28 +1327,34 @@
     # ossep => pathname that uses os.sep to separate directories
     cwd = repo.getcwd()
     targets = {}
-    after = opts.get("after")
-    dryrun = opts.get("dry_run")
+    after = opts.get(b"after")
+    dryrun = opts.get(b"dry_run")
     wctx = repo[None]
 
     uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
+
     def walkpat(pat):
         srcs = []
         if after:
-            badstates = '?'
+            badstates = b'?'
         else:
-            badstates = '?r'
+            badstates = b'?r'
         m = scmutil.match(wctx, [pat], opts, globbed=True)
         for abs in wctx.walk(m):
             state = repo.dirstate[abs]
             rel = uipathfn(abs)
             exact = m.exact(abs)
             if state in badstates:
-                if exact and state == '?':
-                    ui.warn(_('%s: not copying - file is not managed\n') % rel)
-                if exact and state == 'r':
-                    ui.warn(_('%s: not copying - file has been marked for'
-                              ' remove\n') % rel)
+                if exact and state == b'?':
+                    ui.warn(_(b'%s: not copying - file is not managed\n') % rel)
+                if exact and state == b'r':
+                    ui.warn(
+                        _(
+                            b'%s: not copying - file has been marked for'
+                            b' remove\n'
+                        )
+                        % rel
+                    )
                 continue
             # abs: hgsep
             # rel: ossep
@@ -1134,11 +1366,11 @@
     # otarget: ossep
     def copyfile(abssrc, relsrc, otarget, exact):
         abstarget = pathutil.canonpath(repo.root, cwd, otarget)
-        if '/' in abstarget:
+        if b'/' in abstarget:
             # We cannot normalize abstarget itself, this would prevent
             # case only renames, like a => A.
-            abspath, absname = abstarget.rsplit('/', 1)
-            abstarget = repo.dirstate.normalize(abspath) + '/' + absname
+            abspath, absname = abstarget.rsplit(b'/', 1)
+            abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
         reltarget = repo.pathto(abstarget, cwd)
         target = repo.wjoin(abstarget)
         src = repo.wjoin(abssrc)
@@ -1149,65 +1381,87 @@
         # check for collisions
         prevsrc = targets.get(abstarget)
         if prevsrc is not None:
-            ui.warn(_('%s: not overwriting - %s collides with %s\n') %
-                    (reltarget, repo.pathto(abssrc, cwd),
-                     repo.pathto(prevsrc, cwd)))
-            return True # report a failure
+            ui.warn(
+                _(b'%s: not overwriting - %s collides with %s\n')
+                % (
+                    reltarget,
+                    repo.pathto(abssrc, cwd),
+                    repo.pathto(prevsrc, cwd),
+                )
+            )
+            return True  # report a failure
 
         # check for overwrites
         exists = os.path.lexists(target)
         samefile = False
         if exists and abssrc != abstarget:
-            if (repo.dirstate.normalize(abssrc) ==
-                repo.dirstate.normalize(abstarget)):
+            if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
+                abstarget
+            ):
                 if not rename:
-                    ui.warn(_("%s: can't copy - same file\n") % reltarget)
-                    return True # report a failure
+                    ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
+                    return True  # report a failure
                 exists = False
                 samefile = True
 
-        if not after and exists or after and state in 'mn':
-            if not opts['force']:
-                if state in 'mn':
-                    msg = _('%s: not overwriting - file already committed\n')
+        if not after and exists or after and state in b'mn':
+            if not opts[b'force']:
+                if state in b'mn':
+                    msg = _(b'%s: not overwriting - file already committed\n')
                     if after:
-                        flags = '--after --force'
+                        flags = b'--after --force'
                     else:
-                        flags = '--force'
+                        flags = b'--force'
                     if rename:
-                        hint = _("('hg rename %s' to replace the file by "
-                                 'recording a rename)\n') % flags
+                        hint = (
+                            _(
+                                b"('hg rename %s' to replace the file by "
+                                b'recording a rename)\n'
+                            )
+                            % flags
+                        )
                     else:
-                        hint = _("('hg copy %s' to replace the file by "
-                                 'recording a copy)\n') % flags
+                        hint = (
+                            _(
+                                b"('hg copy %s' to replace the file by "
+                                b'recording a copy)\n'
+                            )
+                            % flags
+                        )
                 else:
-                    msg = _('%s: not overwriting - file exists\n')
+                    msg = _(b'%s: not overwriting - file exists\n')
                     if rename:
-                        hint = _("('hg rename --after' to record the rename)\n")
+                        hint = _(
+                            b"('hg rename --after' to record the rename)\n"
+                        )
                     else:
-                        hint = _("('hg copy --after' to record the copy)\n")
+                        hint = _(b"('hg copy --after' to record the copy)\n")
                 ui.warn(msg % reltarget)
                 ui.warn(hint)
-                return True # report a failure
+                return True  # report a failure
 
         if after:
             if not exists:
                 if rename:
-                    ui.warn(_('%s: not recording move - %s does not exist\n') %
-                            (relsrc, reltarget))
+                    ui.warn(
+                        _(b'%s: not recording move - %s does not exist\n')
+                        % (relsrc, reltarget)
+                    )
                 else:
-                    ui.warn(_('%s: not recording copy - %s does not exist\n') %
-                            (relsrc, reltarget))
-                return True # report a failure
+                    ui.warn(
+                        _(b'%s: not recording copy - %s does not exist\n')
+                        % (relsrc, reltarget)
+                    )
+                return True  # report a failure
         elif not dryrun:
             try:
                 if exists:
                     os.unlink(target)
-                targetdir = os.path.dirname(target) or '.'
+                targetdir = os.path.dirname(target) or b'.'
                 if not os.path.isdir(targetdir):
                     os.makedirs(targetdir)
                 if samefile:
-                    tmp = target + "~hgrename"
+                    tmp = target + b"~hgrename"
                     os.rename(src, tmp)
                     os.rename(tmp, target)
                 else:
@@ -1217,27 +1471,30 @@
                 srcexists = True
             except IOError as inst:
                 if inst.errno == errno.ENOENT:
-                    ui.warn(_('%s: deleted in working directory\n') % relsrc)
+                    ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
                     srcexists = False
                 else:
-                    ui.warn(_('%s: cannot copy - %s\n') %
-                            (relsrc, encoding.strtolocal(inst.strerror)))
-                    return True # report a failure
+                    ui.warn(
+                        _(b'%s: cannot copy - %s\n')
+                        % (relsrc, encoding.strtolocal(inst.strerror))
+                    )
+                    return True  # report a failure
 
         if ui.verbose or not exact:
             if rename:
-                ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
+                ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
             else:
-                ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
+                ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
 
         targets[abstarget] = abssrc
 
         # fix up dirstate
-        scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
-                             dryrun=dryrun, cwd=cwd)
+        scmutil.dirstatecopy(
+            ui, repo, wctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
+        )
         if rename and not dryrun:
             if not after and srcexists and not samefile:
-                rmdir = repo.ui.configbool('experimental', 'removeemptydirs')
+                rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
                 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
             wctx.forget([abssrc])
 
@@ -1257,8 +1514,9 @@
                 striplen += len(pycompat.ossep)
             res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
         elif destdirexists:
-            res = lambda p: os.path.join(dest,
-                                         os.path.basename(util.localpath(p)))
+            res = lambda p: os.path.join(
+                dest, os.path.basename(util.localpath(p))
+            )
         else:
             res = lambda p: dest
         return res
@@ -1270,8 +1528,9 @@
     def targetpathafterfn(pat, dest, srcs):
         if matchmod.patkind(pat):
             # a mercurial pattern
-            res = lambda p: os.path.join(dest,
-                                         os.path.basename(util.localpath(p)))
+            res = lambda p: os.path.join(
+                dest, os.path.basename(util.localpath(p))
+            )
         else:
             abspfx = pathutil.canonpath(repo.root, cwd, pat)
             if len(abspfx) < len(srcs[0][0]):
@@ -1296,30 +1555,34 @@
                         striplen1 += len(pycompat.ossep)
                     if evalpath(striplen1) > score:
                         striplen = striplen1
-                res = lambda p: os.path.join(dest,
-                                             util.localpath(p)[striplen:])
+                res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
             else:
                 # a file
                 if destdirexists:
-                    res = lambda p: os.path.join(dest,
-                                        os.path.basename(util.localpath(p)))
+                    res = lambda p: os.path.join(
+                        dest, os.path.basename(util.localpath(p))
+                    )
                 else:
                     res = lambda p: dest
         return res
 
     pats = scmutil.expandpats(pats)
     if not pats:
-        raise error.Abort(_('no source or destination specified'))
+        raise error.Abort(_(b'no source or destination specified'))
     if len(pats) == 1:
-        raise error.Abort(_('no destination specified'))
+        raise error.Abort(_(b'no destination specified'))
     dest = pats.pop()
     destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
     if not destdirexists:
         if len(pats) > 1 or matchmod.patkind(pats[0]):
-            raise error.Abort(_('with multiple sources, destination must be an '
-                               'existing directory'))
+            raise error.Abort(
+                _(
+                    b'with multiple sources, destination must be an '
+                    b'existing directory'
+                )
+            )
         if util.endswithsep(dest):
-            raise error.Abort(_('destination %s is not a directory') % dest)
+            raise error.Abort(_(b'destination %s is not a directory') % dest)
 
     tfn = targetpathfn
     if after:
@@ -1331,7 +1594,7 @@
             continue
         copylist.append((tfn(pat, dest, srcs), srcs))
     if not copylist:
-        raise error.Abort(_('no files to copy'))
+        raise error.Abort(_(b'no files to copy'))
 
     errors = 0
     for targetpath, srcs in copylist:
@@ -1341,10 +1604,11 @@
 
     return errors != 0
 
+
 ## facility to let extension process additional data into an import patch
 # list of identifier to be executed in order
 extrapreimport = []  # run before commit
-extrapostimport = [] # run after commit
+extrapostimport = []  # run after commit
 # mapping from identifier to actual import function
 #
 # 'preimport' are run before the commit is made and are provided the following
@@ -1362,6 +1626,7 @@
 # - ctx: the changectx created by import.
 extrapostimportmap = {}
 
+
 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
     """Utility function used by commands.import to import a single patch
 
@@ -1383,21 +1648,21 @@
     # avoid cycle context -> subrepo -> cmdutil
     from . import context
 
-    tmpname = patchdata.get('filename')
-    message = patchdata.get('message')
-    user = opts.get('user') or patchdata.get('user')
-    date = opts.get('date') or patchdata.get('date')
-    branch = patchdata.get('branch')
-    nodeid = patchdata.get('nodeid')
-    p1 = patchdata.get('p1')
-    p2 = patchdata.get('p2')
-
-    nocommit = opts.get('no_commit')
-    importbranch = opts.get('import_branch')
-    update = not opts.get('bypass')
-    strip = opts["strip"]
-    prefix = opts["prefix"]
-    sim = float(opts.get('similarity') or 0)
+    tmpname = patchdata.get(b'filename')
+    message = patchdata.get(b'message')
+    user = opts.get(b'user') or patchdata.get(b'user')
+    date = opts.get(b'date') or patchdata.get(b'date')
+    branch = patchdata.get(b'branch')
+    nodeid = patchdata.get(b'nodeid')
+    p1 = patchdata.get(b'p1')
+    p2 = patchdata.get(b'p2')
+
+    nocommit = opts.get(b'no_commit')
+    importbranch = opts.get(b'import_branch')
+    update = not opts.get(b'bypass')
+    strip = opts[b"strip"]
+    prefix = opts[b"prefix"]
+    sim = float(opts.get(b'similarity') or 0)
 
     if not tmpname:
         return None, None, False
@@ -1414,13 +1679,13 @@
     else:
         # launch the editor
         message = None
-    ui.debug('message:\n%s\n' % (message or ''))
+    ui.debug(b'message:\n%s\n' % (message or b''))
 
     if len(parents) == 1:
         parents.append(repo[nullid])
-    if opts.get('exact'):
+    if opts.get(b'exact'):
         if not nodeid or not p1:
-            raise error.Abort(_('not a Mercurial patch'))
+            raise error.Abort(_(b'not a Mercurial patch'))
         p1 = repo[p1]
         p2 = repo[p2 or nullid]
     elif p2:
@@ -1436,8 +1701,12 @@
         except error.RepoError:
             p1, p2 = parents
         if p2.node() == nullid:
-            ui.warn(_("warning: import the patch as a normal revision\n"
-                      "(use --exact to import the patch as a merge)\n"))
+            ui.warn(
+                _(
+                    b"warning: import the patch as a normal revision\n"
+                    b"(use --exact to import the patch as a merge)\n"
+                )
+            )
     else:
         p1, p2 = parents
 
@@ -1448,14 +1717,22 @@
         if p2 != parents[1]:
             repo.setparents(p1.node(), p2.node())
 
-        if opts.get('exact') or importbranch:
-            repo.dirstate.setbranch(branch or 'default')
-
-        partial = opts.get('partial', False)
+        if opts.get(b'exact') or importbranch:
+            repo.dirstate.setbranch(branch or b'default')
+
+        partial = opts.get(b'partial', False)
         files = set()
         try:
-            patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
-                        files=files, eolmode=None, similarity=sim / 100.0)
+            patch.patch(
+                ui,
+                repo,
+                tmpname,
+                strip=strip,
+                prefix=prefix,
+                files=files,
+                eolmode=None,
+                similarity=sim / 100.0,
+            )
         except error.PatchError as e:
             if not partial:
                 raise error.Abort(pycompat.bytestr(e))
@@ -1467,71 +1744,85 @@
             if message:
                 msgs.append(message)
         else:
-            if opts.get('exact') or p2:
+            if opts.get(b'exact') or p2:
                 # If you got here, you either use --force and know what
                 # you are doing or used --exact or a merge patch while
                 # being updated to its first parent.
                 m = None
             else:
                 m = scmutil.matchfiles(repo, files or [])
-            editform = mergeeditform(repo[None], 'import.normal')
-            if opts.get('exact'):
+            editform = mergeeditform(repo[None], b'import.normal')
+            if opts.get(b'exact'):
                 editor = None
             else:
-                editor = getcommiteditor(editform=editform,
-                                         **pycompat.strkwargs(opts))
+                editor = getcommiteditor(
+                    editform=editform, **pycompat.strkwargs(opts)
+                )
             extra = {}
             for idfunc in extrapreimport:
                 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
             overrides = {}
             if partial:
-                overrides[('ui', 'allowemptycommit')] = True
-            with repo.ui.configoverride(overrides, 'import'):
-                n = repo.commit(message, user,
-                                date, match=m,
-                                editor=editor, extra=extra)
+                overrides[(b'ui', b'allowemptycommit')] = True
+            with repo.ui.configoverride(overrides, b'import'):
+                n = repo.commit(
+                    message, user, date, match=m, editor=editor, extra=extra
+                )
                 for idfunc in extrapostimport:
                     extrapostimportmap[idfunc](repo[n])
     else:
-        if opts.get('exact') or importbranch:
-            branch = branch or 'default'
+        if opts.get(b'exact') or importbranch:
+            branch = branch or b'default'
         else:
             branch = p1.branch()
         store = patch.filestore()
         try:
             files = set()
             try:
-                patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
-                                files, eolmode=None)
+                patch.patchrepo(
+                    ui,
+                    repo,
+                    p1,
+                    store,
+                    tmpname,
+                    strip,
+                    prefix,
+                    files,
+                    eolmode=None,
+                )
             except error.PatchError as e:
                 raise error.Abort(stringutil.forcebytestr(e))
-            if opts.get('exact'):
+            if opts.get(b'exact'):
                 editor = None
             else:
-                editor = getcommiteditor(editform='import.bypass')
-            memctx = context.memctx(repo, (p1.node(), p2.node()),
-                                    message,
-                                    files=files,
-                                    filectxfn=store,
-                                    user=user,
-                                    date=date,
-                                    branch=branch,
-                                    editor=editor)
+                editor = getcommiteditor(editform=b'import.bypass')
+            memctx = context.memctx(
+                repo,
+                (p1.node(), p2.node()),
+                message,
+                files=files,
+                filectxfn=store,
+                user=user,
+                date=date,
+                branch=branch,
+                editor=editor,
+            )
             n = memctx.commit()
         finally:
             store.close()
-    if opts.get('exact') and nocommit:
+    if opts.get(b'exact') and nocommit:
         # --exact with --no-commit is still useful in that it does merge
         # and branch bits
-        ui.warn(_("warning: can't check exact import with --no-commit\n"))
-    elif opts.get('exact') and (not n or hex(n) != nodeid):
-        raise error.Abort(_('patch is damaged or loses information'))
-    msg = _('applied to working directory')
+        ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
+    elif opts.get(b'exact') and (not n or hex(n) != nodeid):
+        raise error.Abort(_(b'patch is damaged or loses information'))
+    msg = _(b'applied to working directory')
     if n:
         # i18n: refers to a short changeset id
-        msg = _('created %s') % short(n)
+        msg = _(b'created %s') % short(n)
     return msg, n, rejects
 
+
 # facility to let extensions include additional data in an exported patch
 # list of identifiers to be executed in order
 extraexport = []
@@ -1540,6 +1831,7 @@
 # it is given two arguments (sequencenumber, changectx)
 extraexportmap = {}
 
+
 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
     node = scmutil.binnode(ctx)
     parents = [p.node() for p in ctx.parents() if p]
@@ -1553,26 +1845,27 @@
         prev = nullid
 
     fm.context(ctx=ctx)
-    fm.plain('# HG changeset patch\n')
-    fm.write('user', '# User %s\n', ctx.user())
-    fm.plain('# Date %d %d\n' % ctx.date())
-    fm.write('date', '#      %s\n', fm.formatdate(ctx.date()))
-    fm.condwrite(branch and branch != 'default',
-                 'branch', '# Branch %s\n', branch)
-    fm.write('node', '# Node ID %s\n', hex(node))
-    fm.plain('# Parent  %s\n' % hex(prev))
+    fm.plain(b'# HG changeset patch\n')
+    fm.write(b'user', b'# User %s\n', ctx.user())
+    fm.plain(b'# Date %d %d\n' % ctx.date())
+    fm.write(b'date', b'#      %s\n', fm.formatdate(ctx.date()))
+    fm.condwrite(
+        branch and branch != b'default', b'branch', b'# Branch %s\n', branch
+    )
+    fm.write(b'node', b'# Node ID %s\n', hex(node))
+    fm.plain(b'# Parent  %s\n' % hex(prev))
     if len(parents) > 1:
-        fm.plain('# Parent  %s\n' % hex(parents[1]))
-    fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name='node'))
+        fm.plain(b'# Parent  %s\n' % hex(parents[1]))
+    fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
 
     # TODO: redesign extraexportmap function to support formatter
     for headerid in extraexport:
         header = extraexportmap[headerid](seqno, ctx)
         if header is not None:
-            fm.plain('# %s\n' % header)
-
-    fm.write('desc', '%s\n', ctx.description().rstrip())
-    fm.plain('\n')
+            fm.plain(b'# %s\n' % header)
+
+    fm.write(b'desc', b'%s\n', ctx.description().rstrip())
+    fm.plain(b'\n')
 
     if fm.isplain():
         chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
@@ -1583,17 +1876,20 @@
         # TODO: make it structured?
         fm.data(diff=b''.join(chunkiter))
 
+
 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
     """Export changesets to stdout or a single file"""
     for seqno, rev in enumerate(revs, 1):
         ctx = repo[rev]
-        if not dest.startswith('<'):
-            repo.ui.note("%s\n" % dest)
+        if not dest.startswith(b'<'):
+            repo.ui.note(b"%s\n" % dest)
         fm.startitem()
         _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
 
-def _exportfntemplate(repo, revs, basefm, fntemplate, switch_parent, diffopts,
-                      match):
+
+def _exportfntemplate(
+    repo, revs, basefm, fntemplate, switch_parent, diffopts, match
+):
     """Export changesets to possibly multiple files"""
     total = len(revs)
     revwidth = max(len(str(rev)) for rev in revs)
@@ -1601,18 +1897,21 @@
 
     for seqno, rev in enumerate(revs, 1):
         ctx = repo[rev]
-        dest = makefilename(ctx, fntemplate,
-                            total=total, seqno=seqno, revwidth=revwidth)
+        dest = makefilename(
+            ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
+        )
         filemap.setdefault(dest, []).append((seqno, rev))
 
     for dest in filemap:
         with formatter.maybereopen(basefm, dest) as fm:
-            repo.ui.note("%s\n" % dest)
+            repo.ui.note(b"%s\n" % dest)
             for seqno, rev in filemap[dest]:
                 fm.startitem()
                 ctx = repo[rev]
-                _exportsingle(repo, ctx, fm, match, switch_parent, seqno,
-                              diffopts)
+                _exportsingle(
+                    repo, ctx, fm, match, switch_parent, seqno, diffopts
+                )
+
 
 def _prefetchchangedfiles(repo, revs, match):
     allfiles = set()
@@ -1622,8 +1921,16 @@
                 allfiles.add(file)
     scmutil.prefetchfiles(repo, revs, scmutil.matchfiles(repo, allfiles))
 
-def export(repo, revs, basefm, fntemplate='hg-%h.patch', switch_parent=False,
-           opts=None, match=None):
+
+def export(
+    repo,
+    revs,
+    basefm,
+    fntemplate=b'hg-%h.patch',
+    switch_parent=False,
+    opts=None,
+    match=None,
+):
     '''export changesets as hg patches
 
     Args:
@@ -1649,40 +1956,55 @@
     _prefetchchangedfiles(repo, revs, match)
 
     if not fntemplate:
-        _exportfile(repo, revs, basefm, '<unnamed>', switch_parent, opts, match)
+        _exportfile(
+            repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
+        )
     else:
-        _exportfntemplate(repo, revs, basefm, fntemplate, switch_parent, opts,
-                          match)
+        _exportfntemplate(
+            repo, revs, basefm, fntemplate, switch_parent, opts, match
+        )
+
 
 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
     """Export changesets to the given file stream"""
     _prefetchchangedfiles(repo, revs, match)
 
-    dest = getattr(fp, 'name', '<unnamed>')
-    with formatter.formatter(repo.ui, fp, 'export', {}) as fm:
+    dest = getattr(fp, 'name', b'<unnamed>')
+    with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
         _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
 
+
 def showmarker(fm, marker, index=None):
     """utility function to display obsolescence marker in a readable way
 
     To be used by debug function."""
     if index is not None:
-        fm.write('index', '%i ', index)
-    fm.write('prednode', '%s ', hex(marker.prednode()))
+        fm.write(b'index', b'%i ', index)
+    fm.write(b'prednode', b'%s ', hex(marker.prednode()))
     succs = marker.succnodes()
-    fm.condwrite(succs, 'succnodes', '%s ',
-                 fm.formatlist(map(hex, succs), name='node'))
-    fm.write('flag', '%X ', marker.flags())
+    fm.condwrite(
+        succs,
+        b'succnodes',
+        b'%s ',
+        fm.formatlist(map(hex, succs), name=b'node'),
+    )
+    fm.write(b'flag', b'%X ', marker.flags())
     parents = marker.parentnodes()
     if parents is not None:
-        fm.write('parentnodes', '{%s} ',
-                 fm.formatlist(map(hex, parents), name='node', sep=', '))
-    fm.write('date', '(%s) ', fm.formatdate(marker.date()))
+        fm.write(
+            b'parentnodes',
+            b'{%s} ',
+            fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
+        )
+    fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
     meta = marker.metadata().copy()
-    meta.pop('date', None)
+    meta.pop(b'date', None)
     smeta = pycompat.rapply(pycompat.maybebytestr, meta)
-    fm.write('metadata', '{%s}', fm.formatdict(smeta, fmt='%r: %r', sep=', '))
-    fm.plain('\n')
+    fm.write(
+        b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
+    )
+    fm.plain(b'\n')
+
 
 def finddate(ui, repo, date):
     """Find the tipmost changeset that matches the given date spec"""
@@ -1696,14 +2018,17 @@
         if df(d[0]):
             results[ctx.rev()] = d
 
-    for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
+    for ctx in walkchangerevs(repo, m, {b'rev': None}, prep):
         rev = ctx.rev()
         if rev in results:
-            ui.status(_("found revision %s from %s\n") %
-                      (rev, dateutil.datestr(results[rev])))
-            return '%d' % rev
-
-    raise error.Abort(_("revision matching date not found"))
+            ui.status(
+                _(b"found revision %s from %s\n")
+                % (rev, dateutil.datestr(results[rev]))
+            )
+            return b'%d' % rev
+
+    raise error.Abort(_(b"revision matching date not found"))
+
 
 def increasingwindows(windowsize=8, sizelimit=512):
     while True:
@@ -1711,24 +2036,27 @@
         if windowsize < sizelimit:
             windowsize *= 2
 
+
 def _walkrevs(repo, opts):
     # Default --rev value depends on --follow but --follow behavior
     # depends on revisions resolved from --rev...
-    follow = opts.get('follow') or opts.get('follow_first')
-    if opts.get('rev'):
-        revs = scmutil.revrange(repo, opts['rev'])
+    follow = opts.get(b'follow') or opts.get(b'follow_first')
+    if opts.get(b'rev'):
+        revs = scmutil.revrange(repo, opts[b'rev'])
     elif follow and repo.dirstate.p1() == nullid:
         revs = smartset.baseset()
     elif follow:
-        revs = repo.revs('reverse(:.)')
+        revs = repo.revs(b'reverse(:.)')
     else:
         revs = smartset.spanset(repo)
         revs.reverse()
     return revs
 
+
 class FileWalkError(Exception):
     pass
 
+
 def walkfilerevs(repo, match, follow, revs, fncache):
     '''Walks the file history for the matched files.
 
@@ -1740,6 +2068,7 @@
     wanted = set()
     copies = []
     minrev, maxrev = min(revs), max(revs)
+
     def filerevs(filelog, last):
         """
         Only files, no patterns.  Check the history of each file.
@@ -1764,17 +2093,24 @@
                 if p != nullrev:
                     parentlinkrevs.append(filelog.linkrev(p))
             n = filelog.node(j)
-            revs.append((linkrev, parentlinkrevs,
-                         follow and filelog.renamed(n)))
+            revs.append(
+                (linkrev, parentlinkrevs, follow and filelog.renamed(n))
+            )
 
         return reversed(revs)
+
     def iterfiles():
-        pctx = repo['.']
+        pctx = repo[b'.']
         for filename in match.files():
             if follow:
                 if filename not in pctx:
-                    raise error.Abort(_('cannot follow file not in parent '
-                                       'revision: "%s"') % filename)
+                    raise error.Abort(
+                        _(
+                            b'cannot follow file not in parent '
+                            b'revision: "%s"'
+                        )
+                        % filename
+                    )
                 yield filename, pctx[filename].filenode()
             else:
                 yield filename, None
@@ -1789,8 +2125,9 @@
                 # try to find matching entries on the slow path.
                 if follow:
                     raise error.Abort(
-                        _('cannot follow nonexistent file: "%s"') % file_)
-                raise FileWalkError("Cannot walk via filelog")
+                        _(b'cannot follow nonexistent file: "%s"') % file_
+                    )
+                raise FileWalkError(b"Cannot walk via filelog")
             else:
                 continue
 
@@ -1826,6 +2163,7 @@
 
     return wanted
 
+
 class _followfilter(object):
     def __init__(self, repo, onlyfirst=False):
         self.repo = repo
@@ -1838,8 +2176,9 @@
             if self.onlyfirst:
                 return self.repo.changelog.parentrevs(rev)[0:1]
             else:
-                return filter(lambda x: x != nullrev,
-                              self.repo.changelog.parentrevs(rev))
+                return filter(
+                    lambda x: x != nullrev, self.repo.changelog.parentrevs(rev)
+                )
 
         if self.startrev == nullrev:
             self.startrev = rev
@@ -1864,6 +2203,7 @@
 
         return False
 
+
 def walkchangerevs(repo, match, opts, prepare):
     '''Iterate over files and the revs in which they changed.
 
@@ -1879,13 +2219,13 @@
     yielding each context, the iterator will first call the prepare
     function on each context in the window in forward order.'''
 
-    allfiles = opts.get('all_files')
-    follow = opts.get('follow') or opts.get('follow_first')
+    allfiles = opts.get(b'all_files')
+    follow = opts.get(b'follow') or opts.get(b'follow_first')
     revs = _walkrevs(repo, opts)
     if not revs:
         return []
     wanted = set()
-    slowpath = match.anypats() or (not match.always() and opts.get('removed'))
+    slowpath = match.anypats() or (not match.always() and opts.get(b'removed'))
     fncache = {}
     change = repo.__getitem__
 
@@ -1909,7 +2249,7 @@
             # of the paths was not a file. Check to see if at least one of them
             # existed in history, otherwise simply return
             for path in match.files():
-                if path == '.' or path in repo.store:
+                if path == b'.' or path in repo.store:
                     break
             else:
                 return []
@@ -1919,8 +2259,9 @@
         # changed files
 
         if follow:
-            raise error.Abort(_('can only follow copies/renames for explicit '
-                               'filenames'))
+            raise error.Abort(
+                _(b'can only follow copies/renames for explicit filenames')
+            )
 
         # The slow path checks files modified in every changeset.
         # This is really slow on large repos, so compute the set lazily.
@@ -1957,7 +2298,7 @@
 
     # it might be worthwhile to do this in the iterator if the rev range
     # is descending and the prune args are all within that range
-    for rev in opts.get('prune', ()):
+    for rev in opts.get(b'prune', ()):
         rev = repo[rev].rev()
         ff = _followfilter(repo)
         stop = min(revs[0], revs[-1])
@@ -1969,10 +2310,13 @@
     # revision range, yielding only revisions in wanted.
     def iterate():
         if follow and match.always():
-            ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
+            ff = _followfilter(repo, onlyfirst=opts.get(b'follow_first'))
+
             def want(rev):
                 return ff.match(rev) and rev in wanted
+
         else:
+
             def want(rev):
                 return rev in wanted
 
@@ -1991,6 +2335,7 @@
                 fns = fncache.get(rev)
                 ctx = change(rev)
                 if not fns:
+
                     def fns_generator():
                         if allfiles:
                             fiter = iter(ctx)
@@ -1999,6 +2344,7 @@
                         for f in fiter:
                             if match(f):
                                 yield f
+
                     fns = fns_generator()
                 prepare(ctx, fns)
             for rev in nrevs:
@@ -2009,6 +2355,7 @@
 
     return iterate()
 
+
 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
     bad = []
 
@@ -2025,16 +2372,24 @@
     dirstate = repo.dirstate
     # We don't want to just call wctx.walk here, since it would return a lot of
     # clean files, which we aren't interested in and takes time.
-    for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
-                                  unknown=True, ignored=False, full=False)):
+    for f in sorted(
+        dirstate.walk(
+            badmatch,
+            subrepos=sorted(wctx.substate),
+            unknown=True,
+            ignored=False,
+            full=False,
+        )
+    ):
         exact = match.exact(f)
         if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
             if cca:
                 cca(f)
             names.append(f)
             if ui.verbose or not exact:
-                ui.status(_('adding %s\n') % uipathfn(f),
-                          label='ui.addremove.added')
+                ui.status(
+                    _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
+                )
 
     for subpath in sorted(wctx.substate):
         sub = wctx.sub(subpath)
@@ -2043,33 +2398,39 @@
             subprefix = repo.wvfs.reljoin(prefix, subpath)
             subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
             if opts.get(r'subrepos'):
-                bad.extend(sub.add(ui, submatch, subprefix, subuipathfn, False,
-                                   **opts))
+                bad.extend(
+                    sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
+                )
             else:
-                bad.extend(sub.add(ui, submatch, subprefix, subuipathfn, True,
-                                   **opts))
+                bad.extend(
+                    sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
+                )
         except error.LookupError:
-            ui.status(_("skipping missing subrepository: %s\n")
-                           % uipathfn(subpath))
+            ui.status(
+                _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
+            )
 
     if not opts.get(r'dry_run'):
         rejected = wctx.add(names, prefix)
         bad.extend(f for f in rejected if f in match.files())
     return bad
 
+
 def addwebdirpath(repo, serverpath, webconf):
     webconf[serverpath] = repo.root
-    repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
-
-    for r in repo.revs('filelog("path:.hgsub")'):
+    repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
+
+    for r in repo.revs(b'filelog("path:.hgsub")'):
         ctx = repo[r]
         for subpath in ctx.substate:
             ctx.sub(subpath).addwebdirpath(serverpath, webconf)
 
-def forget(ui, repo, match, prefix, uipathfn, explicitonly, dryrun,
-           interactive):
+
+def forget(
+    ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
+):
     if dryrun and interactive:
-        raise error.Abort(_("cannot specify both --dry-run and --interactive"))
+        raise error.Abort(_(b"cannot specify both --dry-run and --interactive"))
     bad = []
     badfn = lambda x, y: bad.append(x) or match.bad(x, y)
     wctx = repo[None]
@@ -2086,14 +2447,19 @@
         subprefix = repo.wvfs.reljoin(prefix, subpath)
         subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
         try:
-            subbad, subforgot = sub.forget(submatch, subprefix, subuipathfn,
-                                           dryrun=dryrun,
-                                           interactive=interactive)
-            bad.extend([subpath + '/' + f for f in subbad])
-            forgot.extend([subpath + '/' + f for f in subforgot])
+            subbad, subforgot = sub.forget(
+                submatch,
+                subprefix,
+                subuipathfn,
+                dryrun=dryrun,
+                interactive=interactive,
+            )
+            bad.extend([subpath + b'/' + f for f in subbad])
+            forgot.extend([subpath + b'/' + f for f in subforgot])
         except error.LookupError:
-            ui.status(_("skipping missing subrepository: %s\n")
-                           % uipathfn(subpath))
+            ui.status(
+                _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
+            )
 
     if not explicitonly:
         for f in match.files():
@@ -2106,42 +2472,51 @@
                         # purely from data cached by the status walk above.
                         if repo.dirstate.normalize(f) in repo.dirstate:
                             continue
-                        ui.warn(_('not removing %s: '
-                                  'file is already untracked\n')
-                                % uipathfn(f))
+                        ui.warn(
+                            _(
+                                b'not removing %s: '
+                                b'file is already untracked\n'
+                            )
+                            % uipathfn(f)
+                        )
                     bad.append(f)
 
     if interactive:
-        responses = _('[Ynsa?]'
-                      '$$ &Yes, forget this file'
-                      '$$ &No, skip this file'
-                      '$$ &Skip remaining files'
-                      '$$ Include &all remaining files'
-                      '$$ &? (display help)')
+        responses = _(
+            b'[Ynsa?]'
+            b'$$ &Yes, forget this file'
+            b'$$ &No, skip this file'
+            b'$$ &Skip remaining files'
+            b'$$ Include &all remaining files'
+            b'$$ &? (display help)'
+        )
         for filename in forget[:]:
-            r = ui.promptchoice(_('forget %s %s') %
-                                (uipathfn(filename), responses))
-            if r == 4: # ?
+            r = ui.promptchoice(
+                _(b'forget %s %s') % (uipathfn(filename), responses)
+            )
+            if r == 4:  # ?
                 while r == 4:
                     for c, t in ui.extractchoices(responses)[1]:
-                        ui.write('%s - %s\n' % (c, encoding.lower(t)))
-                    r = ui.promptchoice(_('forget %s %s') %
-                                        (uipathfn(filename), responses))
-            if r == 0: # yes
+                        ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
+                    r = ui.promptchoice(
+                        _(b'forget %s %s') % (uipathfn(filename), responses)
+                    )
+            if r == 0:  # yes
                 continue
-            elif r == 1: # no
+            elif r == 1:  # no
                 forget.remove(filename)
-            elif r == 2: # Skip
+            elif r == 2:  # Skip
                 fnindex = forget.index(filename)
                 del forget[fnindex:]
                 break
-            elif r == 3: # All
+            elif r == 3:  # All
                 break
 
     for f in forget:
         if ui.verbose or not match.exact(f) or interactive:
-            ui.status(_('removing %s\n') % uipathfn(f),
-                      label='ui.addremove.removed')
+            ui.status(
+                _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
+            )
 
     if not dryrun:
         rejected = wctx.forget(forget, prefix)
@@ -2149,16 +2524,17 @@
         forgot.extend(f for f in forget if f not in rejected)
     return bad, forgot
 
+
 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
     ret = 1
 
-    needsfctx = ui.verbose or {'size', 'flags'} & fm.datahint()
+    needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
     for f in ctx.matches(m):
         fm.startitem()
         fm.context(ctx=ctx)
         if needsfctx:
             fc = ctx[f]
-            fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
+            fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
         fm.data(path=f)
         fm.plain(fmt % uipathfn(f))
         ret = 0
@@ -2166,21 +2542,27 @@
     for subpath in sorted(ctx.substate):
         submatch = matchmod.subdirmatcher(subpath, m)
         subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
-        if (subrepos or m.exact(subpath) or any(submatch.files())):
+        if subrepos or m.exact(subpath) or any(submatch.files()):
             sub = ctx.sub(subpath)
             try:
                 recurse = m.exact(subpath) or subrepos
-                if sub.printfiles(ui, submatch, subuipathfn, fm, fmt,
-                                  recurse) == 0:
+                if (
+                    sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
+                    == 0
+                ):
                     ret = 0
             except error.LookupError:
-                ui.status(_("skipping missing subrepository: %s\n")
-                               % uipathfn(subpath))
+                ui.status(
+                    _(b"skipping missing subrepository: %s\n")
+                    % uipathfn(subpath)
+                )
 
     return ret
 
-def remove(ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun,
-           warnings=None):
+
+def remove(
+    ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
+):
     ret = 0
     s = repo.status(match=m, clean=True)
     modified, added, deleted, clean = s[0], s[1], s[3], s[6]
@@ -2194,8 +2576,9 @@
         warn = False
 
     subs = sorted(wctx.substate)
-    progress = ui.makeprogress(_('searching'), total=len(subs),
-                               unit=_('subrepos'))
+    progress = ui.makeprogress(
+        _(b'searching'), total=len(subs), unit=_(b'subrepos')
+    )
     for subpath in subs:
         submatch = matchmod.subdirmatcher(subpath, m)
         subprefix = repo.wvfs.reljoin(prefix, subpath)
@@ -2204,39 +2587,52 @@
             progress.increment()
             sub = wctx.sub(subpath)
             try:
-                if sub.removefiles(submatch, subprefix, subuipathfn, after,
-                                   force, subrepos, dryrun, warnings):
+                if sub.removefiles(
+                    submatch,
+                    subprefix,
+                    subuipathfn,
+                    after,
+                    force,
+                    subrepos,
+                    dryrun,
+                    warnings,
+                ):
                     ret = 1
             except error.LookupError:
-                warnings.append(_("skipping missing subrepository: %s\n")
-                               % uipathfn(subpath))
+                warnings.append(
+                    _(b"skipping missing subrepository: %s\n")
+                    % uipathfn(subpath)
+                )
     progress.complete()
 
     # warn about failure to delete explicit files/dirs
     deleteddirs = util.dirs(deleted)
     files = m.files()
-    progress = ui.makeprogress(_('deleting'), total=len(files),
-                               unit=_('files'))
+    progress = ui.makeprogress(
+        _(b'deleting'), total=len(files), unit=_(b'files')
+    )
     for f in files:
+
         def insubrepo():
             for subpath in wctx.substate:
-                if f.startswith(subpath + '/'):
+                if f.startswith(subpath + b'/'):
                     return True
             return False
 
         progress.increment()
         isdir = f in deleteddirs or wctx.hasdir(f)
-        if (f in repo.dirstate or isdir or f == '.'
-            or insubrepo() or f in subs):
+        if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
             continue
 
         if repo.wvfs.exists(f):
             if repo.wvfs.isdir(f):
-                warnings.append(_('not removing %s: no tracked files\n')
-                        % uipathfn(f))
+                warnings.append(
+                    _(b'not removing %s: no tracked files\n') % uipathfn(f)
+                )
             else:
-                warnings.append(_('not removing %s: file is untracked\n')
-                        % uipathfn(f))
+                warnings.append(
+                    _(b'not removing %s: file is untracked\n') % uipathfn(f)
+                )
         # missing files will generate a warning elsewhere
         ret = 1
     progress.complete()
@@ -2246,40 +2642,54 @@
     elif after:
         list = deleted
         remaining = modified + added + clean
-        progress = ui.makeprogress(_('skipping'), total=len(remaining),
-                                   unit=_('files'))
+        progress = ui.makeprogress(
+            _(b'skipping'), total=len(remaining), unit=_(b'files')
+        )
         for f in remaining:
             progress.increment()
             if ui.verbose or (f in files):
-                warnings.append(_('not removing %s: file still exists\n')
-                                % uipathfn(f))
+                warnings.append(
+                    _(b'not removing %s: file still exists\n') % uipathfn(f)
+                )
             ret = 1
         progress.complete()
     else:
         list = deleted + clean
-        progress = ui.makeprogress(_('skipping'),
-                                   total=(len(modified) + len(added)),
-                                   unit=_('files'))
+        progress = ui.makeprogress(
+            _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
+        )
         for f in modified:
             progress.increment()
-            warnings.append(_('not removing %s: file is modified (use -f'
-                      ' to force removal)\n') % uipathfn(f))
+            warnings.append(
+                _(
+                    b'not removing %s: file is modified (use -f'
+                    b' to force removal)\n'
+                )
+                % uipathfn(f)
+            )
             ret = 1
         for f in added:
             progress.increment()
-            warnings.append(_("not removing %s: file has been marked for add"
-                      " (use 'hg forget' to undo add)\n") % uipathfn(f))
+            warnings.append(
+                _(
+                    b"not removing %s: file has been marked for add"
+                    b" (use 'hg forget' to undo add)\n"
+                )
+                % uipathfn(f)
+            )
             ret = 1
         progress.complete()
 
     list = sorted(list)
-    progress = ui.makeprogress(_('deleting'), total=len(list),
-                               unit=_('files'))
+    progress = ui.makeprogress(
+        _(b'deleting'), total=len(list), unit=_(b'files')
+    )
     for f in list:
         if ui.verbose or not m.exact(f):
             progress.increment()
-            ui.status(_('removing %s\n') % uipathfn(f),
-                      label='ui.addremove.removed')
+            ui.status(
+                _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
+            )
     progress.complete()
 
     if not dryrun:
@@ -2287,9 +2697,10 @@
             if not after:
                 for f in list:
                     if f in added:
-                        continue # we never unlink added files on remove
-                    rmdir = repo.ui.configbool('experimental',
-                                               'removeemptydirs')
+                        continue  # we never unlink added files on remove
+                    rmdir = repo.ui.configbool(
+                        b'experimental', b'removeemptydirs'
+                    )
                     repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
             repo[None].forget(list)
 
@@ -2299,8 +2710,10 @@
 
     return ret
 
+
 def _catfmtneedsdata(fm):
-    return not fm.datahint() or 'data' in fm.datahint()
+    return not fm.datahint() or b'data' in fm.datahint()
+
 
 def _updatecatformatter(fm, ctx, matcher, path, decode):
     """Hook for adding data to the formatter used by ``hg cat``.
@@ -2317,9 +2730,10 @@
             data = ctx.repo().wwritedata(path, data)
     fm.startitem()
     fm.context(ctx=ctx)
-    fm.write('data', '%s', data)
+    fm.write(b'data', b'%s', data)
     fm.data(path=path)
 
+
 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
     err = 1
     opts = pycompat.byteskwargs(opts)
@@ -2327,15 +2741,16 @@
     def write(path):
         filename = None
         if fntemplate:
-            filename = makefilename(ctx, fntemplate,
-                                    pathname=os.path.join(prefix, path))
+            filename = makefilename(
+                ctx, fntemplate, pathname=os.path.join(prefix, path)
+            )
             # attempt to create the directory if it does not already exist
             try:
                 os.makedirs(os.path.dirname(filename))
             except OSError:
                 pass
         with formatter.maybereopen(basefm, filename) as fm:
-            _updatecatformatter(fm, ctx, matcher, path, opts.get('decode'))
+            _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
 
     # Automation often uses hg cat on single files, so special case it
     # for performance to avoid the cost of parsing the manifest.
@@ -2365,50 +2780,59 @@
         try:
             submatch = matchmod.subdirmatcher(subpath, matcher)
             subprefix = os.path.join(prefix, subpath)
-            if not sub.cat(submatch, basefm, fntemplate, subprefix,
-                           **pycompat.strkwargs(opts)):
+            if not sub.cat(
+                submatch,
+                basefm,
+                fntemplate,
+                subprefix,
+                **pycompat.strkwargs(opts)
+            ):
                 err = 0
         except error.RepoLookupError:
-            ui.status(_("skipping missing subrepository: %s\n") %
-                      uipathfn(subpath))
+            ui.status(
+                _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
+            )
 
     return err
 
+
 def commit(ui, repo, commitfunc, pats, opts):
     '''commit the specified files or all outstanding changes'''
-    date = opts.get('date')
+    date = opts.get(b'date')
     if date:
-        opts['date'] = dateutil.parsedate(date)
+        opts[b'date'] = dateutil.parsedate(date)
     message = logmessage(ui, opts)
     matcher = scmutil.match(repo[None], pats, opts)
 
     dsguard = None
     # extract addremove carefully -- this function can be called from a command
     # that doesn't support addremove
-    if opts.get('addremove'):
-        dsguard = dirstateguard.dirstateguard(repo, 'commit')
+    if opts.get(b'addremove'):
+        dsguard = dirstateguard.dirstateguard(repo, b'commit')
     with dsguard or util.nullcontextmanager():
         if dsguard:
             relative = scmutil.anypats(pats, opts)
             uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
-            if scmutil.addremove(repo, matcher, "", uipathfn, opts) != 0:
+            if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
                 raise error.Abort(
-                    _("failed to mark all new/missing files as added/removed"))
+                    _(b"failed to mark all new/missing files as added/removed")
+                )
 
         return commitfunc(ui, repo, message, matcher, opts)
 
+
 def samefile(f, ctx1, ctx2):
     if f in ctx1.manifest():
         a = ctx1.filectx(f)
         if f in ctx2.manifest():
             b = ctx2.filectx(f)
-            return (not a.cmp(b)
-                    and a.flags() == b.flags())
+            return not a.cmp(b) and a.flags() == b.flags()
         else:
             return False
     else:
         return f not in ctx2.manifest()
 
+
 def amend(ui, repo, old, extra, pats, opts):
     # avoid cycle context -> subrepo -> cmdutil
     from . import context
@@ -2416,12 +2840,12 @@
     # amend will reuse the existing user if not specified, but the obsolete
     # marker creation requires that the current user's name is specified.
     if obsolete.isenabled(repo, obsolete.createmarkersopt):
-        ui.username() # raise exception if username not set
-
-    ui.note(_('amending changeset %s\n') % old)
+        ui.username()  # raise exception if username not set
+
+    ui.note(_(b'amending changeset %s\n') % old)
     base = old.p1()
 
-    with repo.wlock(), repo.lock(), repo.transaction('amend'):
+    with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
         # Participating changesets:
         #
         # wctx     o - workingctx that contains changes from working copy
@@ -2441,22 +2865,13 @@
         # Also update it from the from the wctx
         extra.update(wctx.extra())
 
-        user = opts.get('user') or old.user()
-
-        datemaydiffer = False  # date-only change should be ignored?
-        if opts.get('date') and opts.get('currentdate'):
-            raise error.Abort(_('--date and --currentdate are mutually '
-                                'exclusive'))
-        if opts.get('date'):
-            date = dateutil.parsedate(opts.get('date'))
-        elif opts.get('currentdate'):
-            date = dateutil.makedate()
-        elif (ui.configbool('rewrite', 'update-timestamp')
-              and opts.get('currentdate') is None):
-            date = dateutil.makedate()
-            datemaydiffer = True
-        else:
-            date = old.date()
+        # date-only change should be ignored?
+        datemaydiffer = resolvecommitoptions(ui, opts)
+
+        date = old.date()
+        if opts.get(b'date'):
+            date = dateutil.parsedate(opts.get(b'date'))
+        user = opts.get(b'user') or old.user()
 
         if len(old.parents()) > 1:
             # ctx.files() isn't reliable for merges, so fall back to the
@@ -2470,17 +2885,20 @@
         matcher = scmutil.match(wctx, pats, opts)
         relative = scmutil.anypats(pats, opts)
         uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
-        if (opts.get('addremove')
-            and scmutil.addremove(repo, matcher, "", uipathfn, opts)):
+        if opts.get(b'addremove') and scmutil.addremove(
+            repo, matcher, b"", uipathfn, opts
+        ):
             raise error.Abort(
-                _("failed to mark all new/missing files as added/removed"))
+                _(b"failed to mark all new/missing files as added/removed")
+            )
 
         # Check subrepos. This depends on in-place wctx._status update in
         # subrepo.precommit(). To minimize the risk of this hack, we do
         # nothing if .hgsub does not exist.
-        if '.hgsub' in wctx or '.hgsub' in old:
+        if b'.hgsub' in wctx or b'.hgsub' in old:
             subs, commitsubs, newsubstate = subrepoutil.precommit(
-                ui, wctx, wctx._status, matcher)
+                ui, wctx, wctx._status, matcher
+            )
             # amend should abort if commitsubrepos is enabled
             assert not commitsubs
             if subs:
@@ -2491,7 +2909,7 @@
 
         filestoamend = set(f for f in wctx.files() if matcher(f))
 
-        changes = (len(filestoamend) > 0)
+        changes = len(filestoamend) > 0
         if changes:
             # Recompute copies (avoid recording a -> b -> a)
             copied = copies.pathcopies(base, wctx, matcher)
@@ -2505,8 +2923,11 @@
             # was removed, it's no longer relevant. If X is missing (aka
             # deleted), old X must be preserved.
             files.update(filestoamend)
-            files = [f for f in files if (f not in filestoamend
-                                          or not samefile(f, wctx, base))]
+            files = [
+                f
+                for f in files
+                if (f not in filestoamend or not samefile(f, wctx, base))
+            ]
 
             def filectxfn(repo, ctx_, path):
                 try:
@@ -2524,16 +2945,21 @@
 
                     fctx = wctx[path]
                     flags = fctx.flags()
-                    mctx = context.memfilectx(repo, ctx_,
-                                              fctx.path(), fctx.data(),
-                                              islink='l' in flags,
-                                              isexec='x' in flags,
-                                              copysource=copied.get(path))
+                    mctx = context.memfilectx(
+                        repo,
+                        ctx_,
+                        fctx.path(),
+                        fctx.data(),
+                        islink=b'l' in flags,
+                        isexec=b'x' in flags,
+                        copysource=copied.get(path),
+                    )
                     return mctx
                 except KeyError:
                     return None
+
         else:
-            ui.note(_('copying changeset %s to %s\n') % (old, base))
+            ui.note(_(b'copying changeset %s to %s\n') % (old, base))
 
             # Use version of files as in the old cset
             def filectxfn(repo, ctx_, path):
@@ -2546,39 +2972,43 @@
         # the message of the changeset to amend.
         message = logmessage(ui, opts)
 
-        editform = mergeeditform(old, 'commit.amend')
+        editform = mergeeditform(old, b'commit.amend')
 
         if not message:
             message = old.description()
             # Default if message isn't provided and --edit is not passed is to
             # invoke editor, but allow --no-edit. If somehow we don't have any
             # description, let's always start the editor.
-            doedit = not message or opts.get('edit') in [True, None]
+            doedit = not message or opts.get(b'edit') in [True, None]
         else:
             # Default if message is provided is to not invoke editor, but allow
             # --edit.
-            doedit = opts.get('edit') is True
+            doedit = opts.get(b'edit') is True
         editor = getcommiteditor(edit=doedit, editform=editform)
 
         pureextra = extra.copy()
-        extra['amend_source'] = old.hex()
-
-        new = context.memctx(repo,
-                             parents=[base.node(), old.p2().node()],
-                             text=message,
-                             files=files,
-                             filectxfn=filectxfn,
-                             user=user,
-                             date=date,
-                             extra=extra,
-                             editor=editor)
+        extra[b'amend_source'] = old.hex()
+
+        new = context.memctx(
+            repo,
+            parents=[base.node(), old.p2().node()],
+            text=message,
+            files=files,
+            filectxfn=filectxfn,
+            user=user,
+            date=date,
+            extra=extra,
+            editor=editor,
+        )
 
         newdesc = changelog.stripdesc(new.description())
-        if ((not changes)
+        if (
+            (not changes)
             and newdesc == old.description()
             and user == old.user()
             and (date == old.date() or datemaydiffer)
-            and pureextra == old.extra()):
+            and pureextra == old.extra()
+        ):
             # nothing changed. continuing here would create a new node
             # anyway because of the amend_source noise.
             #
@@ -2586,7 +3016,7 @@
             return old.node()
 
         commitphase = None
-        if opts.get('secret'):
+        if opts.get(b'secret'):
             commitphase = phases.secret
         newid = repo.commitctx(new)
 
@@ -2594,12 +3024,18 @@
         repo.setparents(newid, nullid)
         mapping = {old.node(): (newid,)}
         obsmetadata = None
-        if opts.get('note'):
-            obsmetadata = {'note': encoding.fromlocal(opts['note'])}
-        backup = ui.configbool('rewrite', 'backup-bundle')
-        scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata,
-                             fixphase=True, targetphase=commitphase,
-                             backup=backup)
+        if opts.get(b'note'):
+            obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
+        backup = ui.configbool(b'rewrite', b'backup-bundle')
+        scmutil.cleanupnodes(
+            repo,
+            mapping,
+            b'amend',
+            metadata=obsmetadata,
+            fixphase=True,
+            targetphase=commitphase,
+            backup=backup,
+        )
 
         # Fixing the dirstate because localrepo.commitctx does not update
         # it. This is rather convenient because we did not need to update
@@ -2622,25 +3058,36 @@
 
     return newid
 
-def commiteditor(repo, ctx, subs, editform=''):
+
+def commiteditor(repo, ctx, subs, editform=b''):
     if ctx.description():
         return ctx.description()
-    return commitforceeditor(repo, ctx, subs, editform=editform,
-                             unchangedmessagedetection=True)
-
-def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
-                      editform='', unchangedmessagedetection=False):
+    return commitforceeditor(
+        repo, ctx, subs, editform=editform, unchangedmessagedetection=True
+    )
+
+
+def commitforceeditor(
+    repo,
+    ctx,
+    subs,
+    finishdesc=None,
+    extramsg=None,
+    editform=b'',
+    unchangedmessagedetection=False,
+):
     if not extramsg:
-        extramsg = _("Leave message empty to abort commit.")
-
-    forms = [e for e in editform.split('.') if e]
-    forms.insert(0, 'changeset')
+        extramsg = _(b"Leave message empty to abort commit.")
+
+    forms = [e for e in editform.split(b'.') if e]
+    forms.insert(0, b'changeset')
     templatetext = None
     while forms:
-        ref = '.'.join(forms)
-        if repo.ui.config('committemplate', ref):
+        ref = b'.'.join(forms)
+        if repo.ui.config(b'committemplate', ref):
             templatetext = committext = buildcommittemplate(
-                repo, ctx, subs, extramsg, ref)
+                repo, ctx, subs, extramsg, ref
+            )
             break
         forms.pop()
     else:
@@ -2655,73 +3102,91 @@
     repo.dirstate.write(tr)
     pending = tr and tr.writepending() and repo.root
 
-    editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
-                              editform=editform, pending=pending,
-                              repopath=repo.path, action='commit')
+    editortext = repo.ui.edit(
+        committext,
+        ctx.user(),
+        ctx.extra(),
+        editform=editform,
+        pending=pending,
+        repopath=repo.path,
+        action=b'commit',
+    )
     text = editortext
 
     # strip away anything below this special string (used for editors that want
     # to display the diff)
     stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
     if stripbelow:
-        text = text[:stripbelow.start()]
-
-    text = re.sub("(?m)^HG:.*(\n|$)", "", text)
+        text = text[: stripbelow.start()]
+
+    text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
     os.chdir(olddir)
 
     if finishdesc:
         text = finishdesc(text)
     if not text.strip():
-        raise error.Abort(_("empty commit message"))
+        raise error.Abort(_(b"empty commit message"))
     if unchangedmessagedetection and editortext == templatetext:
-        raise error.Abort(_("commit message unchanged"))
+        raise error.Abort(_(b"commit message unchanged"))
 
     return text
 
+
 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
     ui = repo.ui
     spec = formatter.templatespec(ref, None, None)
     t = logcmdutil.changesettemplater(ui, repo, spec)
-    t.t.cache.update((k, templater.unquotestring(v))
-                     for k, v in repo.ui.configitems('committemplate'))
+    t.t.cache.update(
+        (k, templater.unquotestring(v))
+        for k, v in repo.ui.configitems(b'committemplate')
+    )
 
     if not extramsg:
-        extramsg = '' # ensure that extramsg is string
+        extramsg = b''  # ensure that extramsg is string
 
     ui.pushbuffer()
     t.show(ctx, extramsg=extramsg)
     return ui.popbuffer()
 
+
 def hgprefix(msg):
-    return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
+    return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
+
 
 def buildcommittext(repo, ctx, subs, extramsg):
     edittext = []
     modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
     if ctx.description():
         edittext.append(ctx.description())
-    edittext.append("")
-    edittext.append("") # Empty line between message and comments.
-    edittext.append(hgprefix(_("Enter commit message."
-                      "  Lines beginning with 'HG:' are removed.")))
+    edittext.append(b"")
+    edittext.append(b"")  # Empty line between message and comments.
+    edittext.append(
+        hgprefix(
+            _(
+                b"Enter commit message."
+                b"  Lines beginning with 'HG:' are removed."
+            )
+        )
+    )
     edittext.append(hgprefix(extramsg))
-    edittext.append("HG: --")
-    edittext.append(hgprefix(_("user: %s") % ctx.user()))
+    edittext.append(b"HG: --")
+    edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
     if ctx.p2():
-        edittext.append(hgprefix(_("branch merge")))
+        edittext.append(hgprefix(_(b"branch merge")))
     if ctx.branch():
-        edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
+        edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
     if bookmarks.isactivewdirparent(repo):
-        edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
-    edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
-    edittext.extend([hgprefix(_("added %s") % f) for f in added])
-    edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
-    edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
+        edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
+    edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
+    edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
+    edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
+    edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
     if not added and not modified and not removed:
-        edittext.append(hgprefix(_("no files changed")))
-    edittext.append("")
-
-    return "\n".join(edittext)
+        edittext.append(hgprefix(_(b"no files changed")))
+    edittext.append(b"")
+
+    return b"\n".join(edittext)
+
 
 def commitstatus(repo, node, branch, bheads=None, opts=None):
     if opts is None:
@@ -2729,9 +3194,15 @@
     ctx = repo[node]
     parents = ctx.parents()
 
-    if (not opts.get('amend') and bheads and node not in bheads and not
-        [x for x in parents if x.node() in bheads and x.branch() == branch]):
-        repo.ui.status(_('created new head\n'))
+    if (
+        not opts.get(b'amend')
+        and bheads
+        and node not in bheads
+        and not [
+            x for x in parents if x.node() in bheads and x.branch() == branch
+        ]
+    ):
+        repo.ui.status(_(b'created new head\n'))
         # The message is not printed for initial roots. For the other
         # changesets, it is printed in the following situations:
         #
@@ -2761,19 +3232,25 @@
         #
         # H H  n  head merge: head count decreases
 
-    if not opts.get('close_branch'):
+    if not opts.get(b'close_branch'):
         for r in parents:
             if r.closesbranch() and r.branch() == branch:
-                repo.ui.status(_('reopening closed branch head %d\n') % r.rev())
+                repo.ui.status(
+                    _(b'reopening closed branch head %d\n') % r.rev()
+                )
 
     if repo.ui.debugflag:
-        repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx.hex()))
+        repo.ui.write(
+            _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
+        )
     elif repo.ui.verbose:
-        repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx))
+        repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
+
 
 def postcommitstatus(repo, pats, opts):
     return repo.status(match=scmutil.match(repo[None], pats, opts))
 
+
 def revert(ui, repo, ctx, parents, *pats, **opts):
     opts = pycompat.byteskwargs(opts)
     parent, p2 = parents
@@ -2799,7 +3276,7 @@
         ## filling of the `names` mapping
         # walk dirstate to fill `names`
 
-        interactive = opts.get('interactive', False)
+        interactive = opts.get(b'interactive', False)
         wctx = repo[None]
         m = scmutil.match(wctx, pats, opts)
 
@@ -2818,11 +3295,11 @@
                     return
                 if path in ctx.substate:
                     return
-                path_ = path + '/'
+                path_ = path + b'/'
                 for f in names:
                     if f.startswith(path_):
                         return
-                ui.warn("%s: %s\n" % (uipathfn(path), msg))
+                ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
 
             for abs in ctx.walk(matchmod.badmatch(m, badfn)):
                 if abs not in names:
@@ -2831,8 +3308,9 @@
             # Find status of all file in `names`.
             m = scmutil.matchfiles(repo, names)
 
-            changes = repo.status(node1=node, match=m,
-                                  unknown=True, ignored=True, clean=True)
+            changes = repo.status(
+                node1=node, match=m, unknown=True, ignored=True, clean=True
+            )
         else:
             changes = repo.status(node1=node, match=m)
             for kind in changes:
@@ -2842,12 +3320,12 @@
             m = scmutil.matchfiles(repo, names)
 
         modified = set(changes.modified)
-        added    = set(changes.added)
-        removed  = set(changes.removed)
+        added = set(changes.added)
+        removed = set(changes.removed)
         _deleted = set(changes.deleted)
-        unknown  = set(changes.unknown)
+        unknown = set(changes.unknown)
         unknown.update(changes.ignored)
-        clean    = set(changes.clean)
+        clean = set(changes.clean)
         modadded = set()
 
         # We need to account for the state of the file in the dirstate,
@@ -2864,8 +3342,8 @@
         else:
             changes = repo.status(node1=parent, match=m)
             dsmodified = set(changes.modified)
-            dsadded    = set(changes.added)
-            dsremoved  = set(changes.removed)
+            dsadded = set(changes.added)
+            dsremoved = set(changes.removed)
             # store all local modifications, useful later for rename detection
             localchanges = dsmodified | dsadded
 
@@ -2880,7 +3358,7 @@
 
             # tell newly modified apart.
             dsmodified &= modified
-            dsmodified |= modified & dsadded # dirstate added may need backup
+            dsmodified |= modified & dsadded  # dirstate added may need backup
             modified -= dsmodified
 
             # We need to wait for some post-processing to update this set
@@ -2902,7 +3380,7 @@
         for f in localchanges:
             src = repo.dirstate.copied(f)
             # XXX should we check for rename down to target node?
-            if src and src not in names and repo.dirstate[src] == 'r':
+            if src and src not in names and repo.dirstate[src] == b'r':
                 dsremoved.add(src)
                 names[src] = True
 
@@ -2916,12 +3394,12 @@
         # distinguish between file to forget and the other
         added = set()
         for abs in dsadded:
-            if repo.dirstate[abs] != 'a':
+            if repo.dirstate[abs] != b'a':
                 added.add(abs)
         dsadded -= added
 
         for abs in deladded:
-            if repo.dirstate[abs] == 'a':
+            if repo.dirstate[abs] == b'a':
                 dsadded.add(abs)
         deladded -= dsadded
 
@@ -2945,25 +3423,26 @@
 
         # action to be actually performed by revert
         # (<list of file>, message>) tuple
-        actions = {'revert': ([], _('reverting %s\n')),
-                   'add': ([], _('adding %s\n')),
-                   'remove': ([], _('removing %s\n')),
-                   'drop': ([], _('removing %s\n')),
-                   'forget': ([], _('forgetting %s\n')),
-                   'undelete': ([], _('undeleting %s\n')),
-                   'noop': (None, _('no changes needed to %s\n')),
-                   'unknown': (None, _('file not managed: %s\n')),
-                  }
+        actions = {
+            b'revert': ([], _(b'reverting %s\n')),
+            b'add': ([], _(b'adding %s\n')),
+            b'remove': ([], _(b'removing %s\n')),
+            b'drop': ([], _(b'removing %s\n')),
+            b'forget': ([], _(b'forgetting %s\n')),
+            b'undelete': ([], _(b'undeleting %s\n')),
+            b'noop': (None, _(b'no changes needed to %s\n')),
+            b'unknown': (None, _(b'file not managed: %s\n')),
+        }
 
         # "constant" that convey the backup strategy.
         # All set to `discard` if `no-backup` is set do avoid checking
         # no_backup lower in the code.
         # These values are ordered for comparison purposes
-        backupinteractive = 3 # do backup if interactively modified
+        backupinteractive = 3  # do backup if interactively modified
         backup = 2  # unconditionally do backup
-        check = 1   # check if the existing file differs from target
-        discard = 0 # never do backup
-        if opts.get('no_backup'):
+        check = 1  # check if the existing file differs from target
+        discard = 0  # never do backup
+        if opts.get(b'no_backup'):
             backupinteractive = backup = check = discard
         if interactive:
             dsmodifiedbackup = backupinteractive
@@ -2971,45 +3450,44 @@
             dsmodifiedbackup = backup
         tobackup = set()
 
-        backupanddel = actions['remove']
-        if not opts.get('no_backup'):
-            backupanddel = actions['drop']
+        backupanddel = actions[b'remove']
+        if not opts.get(b'no_backup'):
+            backupanddel = actions[b'drop']
 
         disptable = (
             # dispatch table:
             #   file state
             #   action
             #   make backup
-
             ## Sets that results that will change file on disk
             # Modified compared to target, no local change
-            (modified,      actions['revert'],   discard),
+            (modified, actions[b'revert'], discard),
             # Modified compared to target, but local file is deleted
-            (deleted,       actions['revert'],   discard),
+            (deleted, actions[b'revert'], discard),
             # Modified compared to target, local change
-            (dsmodified,    actions['revert'],   dsmodifiedbackup),
+            (dsmodified, actions[b'revert'], dsmodifiedbackup),
             # Added since target
-            (added,         actions['remove'],   discard),
+            (added, actions[b'remove'], discard),
             # Added in working directory
-            (dsadded,       actions['forget'],   discard),
+            (dsadded, actions[b'forget'], discard),
             # Added since target, have local modification
-            (modadded,      backupanddel,        backup),
+            (modadded, backupanddel, backup),
             # Added since target but file is missing in working directory
-            (deladded,      actions['drop'],   discard),
+            (deladded, actions[b'drop'], discard),
             # Removed since  target, before working copy parent
-            (removed,       actions['add'],      discard),
+            (removed, actions[b'add'], discard),
             # Same as `removed` but an unknown file exists at the same path
-            (removunk,      actions['add'],      check),
+            (removunk, actions[b'add'], check),
             # Removed since targe, marked as such in working copy parent
-            (dsremoved,     actions['undelete'], discard),
+            (dsremoved, actions[b'undelete'], discard),
             # Same as `dsremoved` but an unknown file exists at the same path
-            (dsremovunk,    actions['undelete'], check),
+            (dsremovunk, actions[b'undelete'], check),
             ## the following sets does not result in any file changes
             # File with no modification
-            (clean,         actions['noop'],     discard),
+            (clean, actions[b'noop'], discard),
             # Existing file, not tracked anywhere
-            (unknown,       actions['unknown'],  discard),
-            )
+            (unknown, actions[b'unknown'], discard),
+        )
 
         for abs, exact in sorted(names.items()):
             # target file to be touch on disk (relative to cwd)
@@ -3027,48 +3505,75 @@
                         # .orig files (issue4793)
                         if dobackup == backupinteractive:
                             tobackup.add(abs)
-                        elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
+                        elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
                             absbakname = scmutil.backuppath(ui, repo, abs)
-                            bakname = os.path.relpath(absbakname,
-                                                      start=repo.root)
-                            ui.note(_('saving current version of %s as %s\n') %
-                                    (uipathfn(abs), uipathfn(bakname)))
-                            if not opts.get('dry_run'):
+                            bakname = os.path.relpath(
+                                absbakname, start=repo.root
+                            )
+                            ui.note(
+                                _(b'saving current version of %s as %s\n')
+                                % (uipathfn(abs), uipathfn(bakname))
+                            )
+                            if not opts.get(b'dry_run'):
                                 if interactive:
                                     util.copyfile(target, absbakname)
                                 else:
                                     util.rename(target, absbakname)
-                    if opts.get('dry_run'):
+                    if opts.get(b'dry_run'):
                         if ui.verbose or not exact:
                             ui.status(msg % uipathfn(abs))
                 elif exact:
                     ui.warn(msg % uipathfn(abs))
                 break
 
-        if not opts.get('dry_run'):
-            needdata = ('revert', 'add', 'undelete')
+        if not opts.get(b'dry_run'):
+            needdata = (b'revert', b'add', b'undelete')
             oplist = [actions[name][0] for name in needdata]
             prefetch = scmutil.prefetchfiles
             matchfiles = scmutil.matchfiles
-            prefetch(repo, [ctx.rev()],
-                     matchfiles(repo,
-                                [f for sublist in oplist for f in sublist]))
+            prefetch(
+                repo,
+                [ctx.rev()],
+                matchfiles(repo, [f for sublist in oplist for f in sublist]),
+            )
             match = scmutil.match(repo[None], pats)
-            _performrevert(repo, parents, ctx, names, uipathfn, actions,
-                           match, interactive, tobackup)
+            _performrevert(
+                repo,
+                parents,
+                ctx,
+                names,
+                uipathfn,
+                actions,
+                match,
+                interactive,
+                tobackup,
+            )
 
         if targetsubs:
             # Revert the subrepos on the revert list
             for sub in targetsubs:
                 try:
-                    wctx.sub(sub).revert(ctx.substate[sub], *pats,
-                                         **pycompat.strkwargs(opts))
+                    wctx.sub(sub).revert(
+                        ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
+                    )
                 except KeyError:
-                    raise error.Abort("subrepository '%s' does not exist in %s!"
-                                      % (sub, short(ctx.node())))
-
-def _performrevert(repo, parents, ctx, names, uipathfn, actions,
-                   match, interactive=False, tobackup=None):
+                    raise error.Abort(
+                        b"subrepository '%s' does not exist in %s!"
+                        % (sub, short(ctx.node()))
+                    )
+
+
+def _performrevert(
+    repo,
+    parents,
+    ctx,
+    names,
+    uipathfn,
+    actions,
+    match,
+    interactive=False,
+    tobackup=None,
+):
     """function that actually perform all the actions computed for revert
 
     This is an independent function to let extension to plug in and react to
@@ -3086,7 +3591,7 @@
 
     def doremove(f):
         try:
-            rmdir = repo.ui.configbool('experimental', 'removeemptydirs')
+            rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
             repo.wvfs.unlinkpath(f, rmdir=rmdir)
         except OSError:
             pass
@@ -3098,34 +3603,36 @@
             repo.ui.status(actions[action][1] % uipathfn(f))
 
     audit_path = pathutil.pathauditor(repo.root, cached=True)
-    for f in actions['forget'][0]:
+    for f in actions[b'forget'][0]:
         if interactive:
             choice = repo.ui.promptchoice(
-                _("forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f))
+                _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
+            )
             if choice == 0:
-                prntstatusmsg('forget', f)
+                prntstatusmsg(b'forget', f)
                 repo.dirstate.drop(f)
             else:
                 excluded_files.append(f)
         else:
-            prntstatusmsg('forget', f)
+            prntstatusmsg(b'forget', f)
             repo.dirstate.drop(f)
-    for f in actions['remove'][0]:
+    for f in actions[b'remove'][0]:
         audit_path(f)
         if interactive:
             choice = repo.ui.promptchoice(
-                _("remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f))
+                _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
+            )
             if choice == 0:
-                prntstatusmsg('remove', f)
+                prntstatusmsg(b'remove', f)
                 doremove(f)
             else:
                 excluded_files.append(f)
         else:
-            prntstatusmsg('remove', f)
+            prntstatusmsg(b'remove', f)
             doremove(f)
-    for f in actions['drop'][0]:
+    for f in actions[b'drop'][0]:
         audit_path(f)
-        prntstatusmsg('drop', f)
+        prntstatusmsg(b'drop', f)
         repo.dirstate.remove(f)
 
     normal = None
@@ -3141,22 +3648,26 @@
     newlyaddedandmodifiedfiles = set()
     if interactive:
         # Prompt the user for changes to revert
-        torevert = [f for f in actions['revert'][0] if f not in excluded_files]
+        torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
         m = scmutil.matchfiles(repo, torevert)
-        diffopts = patch.difffeatureopts(repo.ui, whitespace=True,
-                                         section='commands',
-                                         configprefix='revert.interactive.')
+        diffopts = patch.difffeatureopts(
+            repo.ui,
+            whitespace=True,
+            section=b'commands',
+            configprefix=b'revert.interactive.',
+        )
         diffopts.nodates = True
         diffopts.git = True
-        operation = 'apply'
+        operation = b'apply'
         if node == parent:
-            if repo.ui.configbool('experimental',
-                                  'revert.interactive.select-to-keep'):
-                operation = 'keep'
+            if repo.ui.configbool(
+                b'experimental', b'revert.interactive.select-to-keep'
+            ):
+                operation = b'keep'
             else:
-                operation = 'discard'
-
-        if operation == 'apply':
+                operation = b'discard'
+
+        if operation == b'apply':
             diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
         else:
             diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
@@ -3164,22 +3675,30 @@
 
         try:
 
-            chunks, opts = recordfilter(repo.ui, originalchunks, match,
-                                        operation=operation)
-            if operation == 'discard':
+            chunks, opts = recordfilter(
+                repo.ui, originalchunks, match, operation=operation
+            )
+            if operation == b'discard':
                 chunks = patch.reversehunks(chunks)
 
         except error.PatchError as err:
-            raise error.Abort(_('error parsing patch: %s') % err)
-
-        newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
+            raise error.Abort(_(b'error parsing patch: %s') % err)
+
+        # FIXME: when doing an interactive revert of a copy, there's no way of
+        # performing a partial revert of the added file, the only option is
+        # "remove added file <name> (Yn)?", so we don't need to worry about the
+        # alsorestore value. Ideally we'd be able to partially revert
+        # copied/renamed files.
+        newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
+            chunks, originalchunks
+        )
         if tobackup is None:
             tobackup = set()
         # Apply changes
         fp = stringio()
         # chunks are serialized per file, but files aren't sorted
         for f in sorted(set(c.header.filename() for c in chunks if ishunk(c))):
-            prntstatusmsg('revert', f)
+            prntstatusmsg(b'revert', f)
         files = set()
         for c in chunks:
             if ishunk(c):
@@ -3192,7 +3711,7 @@
                     tobackup.remove(abs)
                 if abs not in files:
                     files.add(abs)
-                    if operation == 'keep':
+                    if operation == b'keep':
                         checkout(abs)
             c.write(fp)
         dopatch = fp.tell()
@@ -3204,43 +3723,47 @@
                 raise error.Abort(pycompat.bytestr(err))
         del fp
     else:
-        for f in actions['revert'][0]:
-            prntstatusmsg('revert', f)
+        for f in actions[b'revert'][0]:
+            prntstatusmsg(b'revert', f)
             checkout(f)
             if normal:
                 normal(f)
 
-    for f in actions['add'][0]:
+    for f in actions[b'add'][0]:
         # Don't checkout modified files, they are already created by the diff
         if f not in newlyaddedandmodifiedfiles:
-            prntstatusmsg('add', f)
+            prntstatusmsg(b'add', f)
             checkout(f)
             repo.dirstate.add(f)
 
     normal = repo.dirstate.normallookup
     if node == parent and p2 == nullid:
         normal = repo.dirstate.normal
-    for f in actions['undelete'][0]:
+    for f in actions[b'undelete'][0]:
         if interactive:
             choice = repo.ui.promptchoice(
-                _("add back removed file %s (Yn)?$$ &Yes $$ &No") % f)
+                _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
+            )
             if choice == 0:
-                prntstatusmsg('undelete', f)
+                prntstatusmsg(b'undelete', f)
                 checkout(f)
                 normal(f)
             else:
                 excluded_files.append(f)
         else:
-            prntstatusmsg('undelete', f)
+            prntstatusmsg(b'undelete', f)
             checkout(f)
             normal(f)
 
     copied = copies.pathcopies(repo[parent], ctx)
 
-    for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
+    for f in (
+        actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
+    ):
         if f in copied:
             repo.dirstate.copy(copied[f], f)
 
+
 # a list of (ui, repo, otherpeer, opts, missing) functions called by
 # commands.outgoing.  "missing" is "missing" of the result of
 # "findcommonoutgoing()"
@@ -3268,19 +3791,27 @@
     # Check for non-clearable states first, so things like rebase will take
     # precedence over update.
     for state in statemod._unfinishedstates:
-        if (state._clearable or (commit and state._allowcommit) or
-            state._reportonly):
+        if (
+            state._clearable
+            or (commit and state._allowcommit)
+            or state._reportonly
+        ):
             continue
         if state.isunfinished(repo):
             raise error.Abort(state.msg(), hint=state.hint())
 
     for s in statemod._unfinishedstates:
-        if (not s._clearable or (commit and s._allowcommit) or
-            (s._opname == 'merge' and skipmerge) or s._reportonly):
+        if (
+            not s._clearable
+            or (commit and s._allowcommit)
+            or (s._opname == b'merge' and skipmerge)
+            or s._reportonly
+        ):
             continue
         if s.isunfinished(repo):
             raise error.Abort(s.msg(), hint=s.hint())
 
+
 def clearunfinished(repo):
     '''Check for unfinished operations (as above), and clear the ones
     that are clearable.
@@ -3292,11 +3823,12 @@
             raise error.Abort(state.msg(), hint=state.hint())
 
     for s in statemod._unfinishedstates:
-        if s._opname == 'merge' or state._reportonly:
+        if s._opname == b'merge' or state._reportonly:
             continue
         if s._clearable and s.isunfinished(repo):
             util.unlink(repo.vfs.join(s._fname))
 
+
 def getunfinishedstate(repo):
     ''' Checks for unfinished operations and returns statecheck object
         for it'''
@@ -3305,6 +3837,7 @@
             return state
     return None
 
+
 def howtocontinue(repo):
     '''Check for an unfinished operation and return the command to finish
     it.
@@ -3316,16 +3849,17 @@
     Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
     a boolean.
     '''
-    contmsg = _("continue: %s")
+    contmsg = _(b"continue: %s")
     for state in statemod._unfinishedstates:
         if not state._continueflag:
             continue
         if state.isunfinished(repo):
             return contmsg % state.continuemsg(), True
     if repo[None].dirty(missing=True, merge=False, branch=False):
-        return contmsg % _("hg commit"), False
+        return contmsg % _(b"hg commit"), False
     return None, None
 
+
 def checkafterresolved(repo):
     '''Inform the user about the next action after completing hg resolve
 
@@ -3337,9 +3871,10 @@
     msg, warning = howtocontinue(repo)
     if msg is not None:
         if warning:
-            repo.ui.warn("%s\n" % msg)
+            repo.ui.warn(b"%s\n" % msg)
         else:
-            repo.ui.note("%s\n" % msg)
+            repo.ui.note(b"%s\n" % msg)
+
 
 def wrongtooltocontinue(repo, task):
     '''Raise an abort suggesting how to properly continue if there is an
@@ -3354,74 +3889,85 @@
     hint = None
     if after[1]:
         hint = after[0]
-    raise error.Abort(_('no %s in progress') % task, hint=hint)
+    raise error.Abort(_(b'no %s in progress') % task, hint=hint)
+
 
 def abortgraft(ui, repo, graftstate):
     """abort the interrupted graft and rollbacks to the state before interrupted
     graft"""
     if not graftstate.exists():
-        raise error.Abort(_("no interrupted graft to abort"))
+        raise error.Abort(_(b"no interrupted graft to abort"))
     statedata = readgraftstate(repo, graftstate)
-    newnodes = statedata.get('newnodes')
+    newnodes = statedata.get(b'newnodes')
     if newnodes is None:
         # and old graft state which does not have all the data required to abort
         # the graft
-        raise error.Abort(_("cannot abort using an old graftstate"))
+        raise error.Abort(_(b"cannot abort using an old graftstate"))
 
     # changeset from which graft operation was started
     if len(newnodes) > 0:
         startctx = repo[newnodes[0]].p1()
     else:
-        startctx = repo['.']
+        startctx = repo[b'.']
     # whether to strip or not
     cleanup = False
     from . import hg
+
     if newnodes:
         newnodes = [repo[r].rev() for r in newnodes]
         cleanup = True
         # checking that none of the newnodes turned public or is public
         immutable = [c for c in newnodes if not repo[c].mutable()]
         if immutable:
-            repo.ui.warn(_("cannot clean up public changesets %s\n")
-                         % ', '.join(bytes(repo[r]) for r in immutable),
-                         hint=_("see 'hg help phases' for details"))
+            repo.ui.warn(
+                _(b"cannot clean up public changesets %s\n")
+                % b', '.join(bytes(repo[r]) for r in immutable),
+                hint=_(b"see 'hg help phases' for details"),
+            )
             cleanup = False
 
         # checking that no new nodes are created on top of grafted revs
         desc = set(repo.changelog.descendants(newnodes))
         if desc - set(newnodes):
-            repo.ui.warn(_("new changesets detected on destination "
-                           "branch, can't strip\n"))
+            repo.ui.warn(
+                _(
+                    b"new changesets detected on destination "
+                    b"branch, can't strip\n"
+                )
+            )
             cleanup = False
 
         if cleanup:
             with repo.wlock(), repo.lock():
                 hg.updaterepo(repo, startctx.node(), overwrite=True)
                 # stripping the new nodes created
-                strippoints = [c.node() for c in repo.set("roots(%ld)",
-                                                          newnodes)]
+                strippoints = [
+                    c.node() for c in repo.set(b"roots(%ld)", newnodes)
+                ]
                 repair.strip(repo.ui, repo, strippoints, backup=False)
 
     if not cleanup:
         # we don't update to the startnode if we can't strip
-        startctx = repo['.']
+        startctx = repo[b'.']
         hg.updaterepo(repo, startctx.node(), overwrite=True)
 
-    ui.status(_("graft aborted\n"))
-    ui.status(_("working directory is now at %s\n") % startctx.hex()[:12])
+    ui.status(_(b"graft aborted\n"))
+    ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
     graftstate.delete()
     return 0
 
+
 def readgraftstate(repo, graftstate):
     """read the graft state file and return a dict of the data stored in it"""
     try:
         return graftstate.read()
     except error.CorruptedState:
-        nodes = repo.vfs.read('graftstate').splitlines()
-        return {'nodes': nodes}
+        nodes = repo.vfs.read(b'graftstate').splitlines()
+        return {b'nodes': nodes}
+
 
 def hgabortgraft(ui, repo):
     """ abort logic for aborting graft using 'hg abort'"""
     with repo.wlock():
-        graftstate = statemod.cmdstate(repo, 'graftstate')
+        graftstate = statemod.cmdstate(repo, b'graftstate')
         return abortgraft(ui, repo, graftstate)
--- a/mercurial/color.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/color.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,39 +10,39 @@
 import re
 
 from .i18n import _
+from .pycompat import getattr
 
 from . import (
     encoding,
     pycompat,
 )
 
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
 
 try:
     import curses
+
     # Mapping from effect name to terminfo attribute name (or raw code) or
     # color number.  This will also force-load the curses module.
     _baseterminfoparams = {
-        'none': (True, 'sgr0', ''),
-        'standout': (True, 'smso', ''),
-        'underline': (True, 'smul', ''),
-        'reverse': (True, 'rev', ''),
-        'inverse': (True, 'rev', ''),
-        'blink': (True, 'blink', ''),
-        'dim': (True, 'dim', ''),
-        'bold': (True, 'bold', ''),
-        'invisible': (True, 'invis', ''),
-        'italic': (True, 'sitm', ''),
-        'black': (False, curses.COLOR_BLACK, ''),
-        'red': (False, curses.COLOR_RED, ''),
-        'green': (False, curses.COLOR_GREEN, ''),
-        'yellow': (False, curses.COLOR_YELLOW, ''),
-        'blue': (False, curses.COLOR_BLUE, ''),
-        'magenta': (False, curses.COLOR_MAGENTA, ''),
-        'cyan': (False, curses.COLOR_CYAN, ''),
-        'white': (False, curses.COLOR_WHITE, ''),
+        b'none': (True, b'sgr0', b''),
+        b'standout': (True, b'smso', b''),
+        b'underline': (True, b'smul', b''),
+        b'reverse': (True, b'rev', b''),
+        b'inverse': (True, b'rev', b''),
+        b'blink': (True, b'blink', b''),
+        b'dim': (True, b'dim', b''),
+        b'bold': (True, b'bold', b''),
+        b'invisible': (True, b'invis', b''),
+        b'italic': (True, b'sitm', b''),
+        b'black': (False, curses.COLOR_BLACK, b''),
+        b'red': (False, curses.COLOR_RED, b''),
+        b'green': (False, curses.COLOR_GREEN, b''),
+        b'yellow': (False, curses.COLOR_YELLOW, b''),
+        b'blue': (False, curses.COLOR_BLUE, b''),
+        b'magenta': (False, curses.COLOR_MAGENTA, b''),
+        b'cyan': (False, curses.COLOR_CYAN, b''),
+        b'white': (False, curses.COLOR_WHITE, b''),
     }
 except ImportError:
     curses = None
@@ -50,106 +50,108 @@
 
 # start and stop parameters for effects
 _effects = {
-    'none': 0,
-    'black': 30,
-    'red': 31,
-    'green': 32,
-    'yellow': 33,
-    'blue': 34,
-    'magenta': 35,
-    'cyan': 36,
-    'white': 37,
-    'bold': 1,
-    'italic': 3,
-    'underline': 4,
-    'inverse': 7,
-    'dim': 2,
-    'black_background': 40,
-    'red_background': 41,
-    'green_background': 42,
-    'yellow_background': 43,
-    'blue_background': 44,
-    'purple_background': 45,
-    'cyan_background': 46,
-    'white_background': 47,
-    }
+    b'none': 0,
+    b'black': 30,
+    b'red': 31,
+    b'green': 32,
+    b'yellow': 33,
+    b'blue': 34,
+    b'magenta': 35,
+    b'cyan': 36,
+    b'white': 37,
+    b'bold': 1,
+    b'italic': 3,
+    b'underline': 4,
+    b'inverse': 7,
+    b'dim': 2,
+    b'black_background': 40,
+    b'red_background': 41,
+    b'green_background': 42,
+    b'yellow_background': 43,
+    b'blue_background': 44,
+    b'purple_background': 45,
+    b'cyan_background': 46,
+    b'white_background': 47,
+}
 
 _defaultstyles = {
-    'grep.match': 'red bold',
-    'grep.linenumber': 'green',
-    'grep.rev': 'blue',
-    'grep.sep': 'cyan',
-    'grep.filename': 'magenta',
-    'grep.user': 'magenta',
-    'grep.date': 'magenta',
-    'grep.inserted': 'green bold',
-    'grep.deleted': 'red bold',
-    'bookmarks.active': 'green',
-    'branches.active': 'none',
-    'branches.closed': 'black bold',
-    'branches.current': 'green',
-    'branches.inactive': 'none',
-    'diff.changed': 'white',
-    'diff.deleted': 'red',
-    'diff.deleted.changed': 'red bold underline',
-    'diff.deleted.unchanged': 'red',
-    'diff.diffline': 'bold',
-    'diff.extended': 'cyan bold',
-    'diff.file_a': 'red bold',
-    'diff.file_b': 'green bold',
-    'diff.hunk': 'magenta',
-    'diff.inserted': 'green',
-    'diff.inserted.changed': 'green bold underline',
-    'diff.inserted.unchanged': 'green',
-    'diff.tab': '',
-    'diff.trailingwhitespace': 'bold red_background',
-    'changeset.public': '',
-    'changeset.draft': '',
-    'changeset.secret': '',
-    'diffstat.deleted': 'red',
-    'diffstat.inserted': 'green',
-    'formatvariant.name.mismatchconfig': 'red',
-    'formatvariant.name.mismatchdefault': 'yellow',
-    'formatvariant.name.uptodate': 'green',
-    'formatvariant.repo.mismatchconfig': 'red',
-    'formatvariant.repo.mismatchdefault': 'yellow',
-    'formatvariant.repo.uptodate': 'green',
-    'formatvariant.config.special': 'yellow',
-    'formatvariant.config.default': 'green',
-    'formatvariant.default': '',
-    'histedit.remaining': 'red bold',
-    'ui.addremove.added': 'green',
-    'ui.addremove.removed': 'red',
-    'ui.error': 'red',
-    'ui.prompt': 'yellow',
-    'log.changeset': 'yellow',
-    'patchbomb.finalsummary': '',
-    'patchbomb.from': 'magenta',
-    'patchbomb.to': 'cyan',
-    'patchbomb.subject': 'green',
-    'patchbomb.diffstats': '',
-    'rebase.rebased': 'blue',
-    'rebase.remaining': 'red bold',
-    'resolve.resolved': 'green bold',
-    'resolve.unresolved': 'red bold',
-    'shelve.age': 'cyan',
-    'shelve.newest': 'green bold',
-    'shelve.name': 'blue bold',
-    'status.added': 'green bold',
-    'status.clean': 'none',
-    'status.copied': 'none',
-    'status.deleted': 'cyan bold underline',
-    'status.ignored': 'black bold',
-    'status.modified': 'blue bold',
-    'status.removed': 'red bold',
-    'status.unknown': 'magenta bold underline',
-    'tags.normal': 'green',
-    'tags.local': 'black bold',
+    b'grep.match': b'red bold',
+    b'grep.linenumber': b'green',
+    b'grep.rev': b'blue',
+    b'grep.sep': b'cyan',
+    b'grep.filename': b'magenta',
+    b'grep.user': b'magenta',
+    b'grep.date': b'magenta',
+    b'grep.inserted': b'green bold',
+    b'grep.deleted': b'red bold',
+    b'bookmarks.active': b'green',
+    b'branches.active': b'none',
+    b'branches.closed': b'black bold',
+    b'branches.current': b'green',
+    b'branches.inactive': b'none',
+    b'diff.changed': b'white',
+    b'diff.deleted': b'red',
+    b'diff.deleted.changed': b'red bold underline',
+    b'diff.deleted.unchanged': b'red',
+    b'diff.diffline': b'bold',
+    b'diff.extended': b'cyan bold',
+    b'diff.file_a': b'red bold',
+    b'diff.file_b': b'green bold',
+    b'diff.hunk': b'magenta',
+    b'diff.inserted': b'green',
+    b'diff.inserted.changed': b'green bold underline',
+    b'diff.inserted.unchanged': b'green',
+    b'diff.tab': b'',
+    b'diff.trailingwhitespace': b'bold red_background',
+    b'changeset.public': b'',
+    b'changeset.draft': b'',
+    b'changeset.secret': b'',
+    b'diffstat.deleted': b'red',
+    b'diffstat.inserted': b'green',
+    b'formatvariant.name.mismatchconfig': b'red',
+    b'formatvariant.name.mismatchdefault': b'yellow',
+    b'formatvariant.name.uptodate': b'green',
+    b'formatvariant.repo.mismatchconfig': b'red',
+    b'formatvariant.repo.mismatchdefault': b'yellow',
+    b'formatvariant.repo.uptodate': b'green',
+    b'formatvariant.config.special': b'yellow',
+    b'formatvariant.config.default': b'green',
+    b'formatvariant.default': b'',
+    b'histedit.remaining': b'red bold',
+    b'ui.addremove.added': b'green',
+    b'ui.addremove.removed': b'red',
+    b'ui.error': b'red',
+    b'ui.prompt': b'yellow',
+    b'log.changeset': b'yellow',
+    b'patchbomb.finalsummary': b'',
+    b'patchbomb.from': b'magenta',
+    b'patchbomb.to': b'cyan',
+    b'patchbomb.subject': b'green',
+    b'patchbomb.diffstats': b'',
+    b'rebase.rebased': b'blue',
+    b'rebase.remaining': b'red bold',
+    b'resolve.resolved': b'green bold',
+    b'resolve.unresolved': b'red bold',
+    b'shelve.age': b'cyan',
+    b'shelve.newest': b'green bold',
+    b'shelve.name': b'blue bold',
+    b'status.added': b'green bold',
+    b'status.clean': b'none',
+    b'status.copied': b'none',
+    b'status.deleted': b'cyan bold underline',
+    b'status.ignored': b'black bold',
+    b'status.modified': b'blue bold',
+    b'status.removed': b'red bold',
+    b'status.unknown': b'magenta bold underline',
+    b'tags.normal': b'green',
+    b'tags.local': b'black bold',
 }
 
+
 def loadcolortable(ui, extname, colortable):
     _defaultstyles.update(colortable)
 
+
 def _terminfosetup(ui, mode, formatted):
     '''Initialize terminfo data and the terminal if we're in terminfo mode.'''
 
@@ -157,16 +159,16 @@
     if curses is None:
         return
     # Otherwise, see what the config file says.
-    if mode not in ('auto', 'terminfo'):
+    if mode not in (b'auto', b'terminfo'):
         return
     ui._terminfoparams.update(_baseterminfoparams)
 
-    for key, val in ui.configitems('color'):
-        if key.startswith('color.'):
-            newval = (False, int(val), '')
+    for key, val in ui.configitems(b'color'):
+        if key.startswith(b'color.'):
+            newval = (False, int(val), b'')
             ui._terminfoparams[key[6:]] = newval
-        elif key.startswith('terminfo.'):
-            newval = (True, '', val.replace('\\E', '\x1b'))
+        elif key.startswith(b'terminfo.'):
+            newval = (True, b'', val.replace(b'\\E', b'\x1b'))
             ui._terminfoparams[key[9:]] = newval
     try:
         curses.setupterm()
@@ -180,16 +182,21 @@
         if not c and not curses.tigetstr(pycompat.sysstr(e)):
             # Most terminals don't support dim, invis, etc, so don't be
             # noisy and use ui.debug().
-            ui.debug("no terminfo entry for %s\n" % e)
+            ui.debug(b"no terminfo entry for %s\n" % e)
             del ui._terminfoparams[key]
     if not curses.tigetstr(r'setaf') or not curses.tigetstr(r'setab'):
         # Only warn about missing terminfo entries if we explicitly asked for
         # terminfo mode and we're in a formatted terminal.
-        if mode == "terminfo" and formatted:
-            ui.warn(_("no terminfo entry for setab/setaf: reverting to "
-              "ECMA-48 color\n"))
+        if mode == b"terminfo" and formatted:
+            ui.warn(
+                _(
+                    b"no terminfo entry for setab/setaf: reverting to "
+                    b"ECMA-48 color\n"
+                )
+            )
         ui._terminfoparams.clear()
 
+
 def setup(ui):
     """configure color on a ui
 
@@ -197,22 +204,26 @@
     the configuration looking for custom colors and effect definitions."""
     mode = _modesetup(ui)
     ui._colormode = mode
-    if mode and mode != 'debug':
+    if mode and mode != b'debug':
         configstyles(ui)
 
+
 def _modesetup(ui):
-    if ui.plain('color'):
+    if ui.plain(b'color'):
         return None
-    config = ui.config('ui', 'color')
-    if config == 'debug':
-        return 'debug'
+    config = ui.config(b'ui', b'color')
+    if config == b'debug':
+        return b'debug'
 
-    auto = (config == 'auto')
+    auto = config == b'auto'
     always = False
     if not auto and stringutil.parsebool(config):
         # We want the config to behave like a boolean, "on" is actually auto,
         # but "always" value is treated as a special case to reduce confusion.
-        if ui.configsource('ui', 'color') == '--color' or config == 'always':
+        if (
+            ui.configsource(b'ui', b'color') == b'--color'
+            or config == b'always'
+        ):
             always = True
         else:
             auto = True
@@ -220,64 +231,65 @@
     if not always and not auto:
         return None
 
-    formatted = (always or (encoding.environ.get('TERM') != 'dumb'
-                 and ui.formatted()))
+    formatted = always or (
+        encoding.environ.get(b'TERM') != b'dumb' and ui.formatted()
+    )
 
-    mode = ui.config('color', 'mode')
+    mode = ui.config(b'color', b'mode')
 
     # If pager is active, color.pagermode overrides color.mode.
     if getattr(ui, 'pageractive', False):
-        mode = ui.config('color', 'pagermode', mode)
+        mode = ui.config(b'color', b'pagermode', mode)
 
     realmode = mode
     if pycompat.iswindows:
         from . import win32
 
-        term = encoding.environ.get('TERM')
+        term = encoding.environ.get(b'TERM')
         # TERM won't be defined in a vanilla cmd.exe environment.
 
         # UNIX-like environments on Windows such as Cygwin and MSYS will
         # set TERM. They appear to make a best effort attempt at setting it
         # to something appropriate. However, not all environments with TERM
         # defined support ANSI.
-        ansienviron = term and 'xterm' in term
+        ansienviron = term and b'xterm' in term
 
-        if mode == 'auto':
+        if mode == b'auto':
             # Since "ansi" could result in terminal gibberish, we error on the
             # side of selecting "win32". However, if w32effects is not defined,
             # we almost certainly don't support "win32", so don't even try.
             # w32effects is not populated when stdout is redirected, so checking
             # it first avoids win32 calls in a state known to error out.
             if ansienviron or not w32effects or win32.enablevtmode():
-                realmode = 'ansi'
+                realmode = b'ansi'
             else:
-                realmode = 'win32'
+                realmode = b'win32'
         # An empty w32effects is a clue that stdout is redirected, and thus
         # cannot enable VT mode.
-        elif mode == 'ansi' and w32effects and not ansienviron:
+        elif mode == b'ansi' and w32effects and not ansienviron:
             win32.enablevtmode()
-    elif mode == 'auto':
-        realmode = 'ansi'
+    elif mode == b'auto':
+        realmode = b'ansi'
 
     def modewarn():
         # only warn if color.mode was explicitly set and we're in
         # a formatted terminal
         if mode == realmode and formatted:
-            ui.warn(_('warning: failed to set color mode to %s\n') % mode)
+            ui.warn(_(b'warning: failed to set color mode to %s\n') % mode)
 
-    if realmode == 'win32':
+    if realmode == b'win32':
         ui._terminfoparams.clear()
         if not w32effects:
             modewarn()
             return None
-    elif realmode == 'ansi':
+    elif realmode == b'ansi':
         ui._terminfoparams.clear()
-    elif realmode == 'terminfo':
+    elif realmode == b'terminfo':
         _terminfosetup(ui, mode, formatted)
         if not ui._terminfoparams:
             ## FIXME Shouldn't we return None in this case too?
             modewarn()
-            realmode = 'ansi'
+            realmode = b'ansi'
     else:
         return None
 
@@ -285,48 +297,56 @@
         return realmode
     return None
 
+
 def configstyles(ui):
     ui._styles.update(_defaultstyles)
-    for status, cfgeffects in ui.configitems('color'):
-        if '.' not in status or status.startswith(('color.', 'terminfo.')):
+    for status, cfgeffects in ui.configitems(b'color'):
+        if b'.' not in status or status.startswith((b'color.', b'terminfo.')):
             continue
-        cfgeffects = ui.configlist('color', status)
+        cfgeffects = ui.configlist(b'color', status)
         if cfgeffects:
             good = []
             for e in cfgeffects:
                 if valideffect(ui, e):
                     good.append(e)
                 else:
-                    ui.warn(_("ignoring unknown color/effect %s "
-                              "(configured in color.%s)\n")
-                            % (stringutil.pprint(e), status))
-            ui._styles[status] = ' '.join(good)
+                    ui.warn(
+                        _(
+                            b"ignoring unknown color/effect %s "
+                            b"(configured in color.%s)\n"
+                        )
+                        % (stringutil.pprint(e), status)
+                    )
+            ui._styles[status] = b' '.join(good)
+
 
 def _activeeffects(ui):
     '''Return the effects map for the color mode set on the ui.'''
-    if ui._colormode == 'win32':
+    if ui._colormode == b'win32':
         return w32effects
     elif ui._colormode is not None:
         return _effects
     return {}
 
+
 def valideffect(ui, effect):
-    'Determine if the effect is valid or not.'
-    return ((not ui._terminfoparams and effect in _activeeffects(ui))
-             or (effect in ui._terminfoparams
-                 or effect[:-11] in ui._terminfoparams))
+    b'Determine if the effect is valid or not.'
+    return (not ui._terminfoparams and effect in _activeeffects(ui)) or (
+        effect in ui._terminfoparams or effect[:-11] in ui._terminfoparams
+    )
+
 
 def _effect_str(ui, effect):
     '''Helper function for render_effects().'''
 
     bg = False
-    if effect.endswith('_background'):
+    if effect.endswith(b'_background'):
         bg = True
         effect = effect[:-11]
     try:
         attr, val, termcode = ui._terminfoparams[effect]
     except KeyError:
-        return ''
+        return b''
     if attr:
         if termcode:
             return termcode
@@ -337,6 +357,7 @@
     else:
         return curses.tparm(curses.tigetstr(r'setaf'), val)
 
+
 def _mergeeffects(text, start, stop):
     """Insert start sequence at every occurrence of stop sequence
 
@@ -352,52 +373,64 @@
         if not t:
             continue
         parts.extend([start, t, stop])
-    return ''.join(parts)
+    return b''.join(parts)
+
 
 def _render_effects(ui, text, effects):
-    'Wrap text in commands to turn on each effect.'
+    b'Wrap text in commands to turn on each effect.'
     if not text:
         return text
     if ui._terminfoparams:
-        start = ''.join(_effect_str(ui, effect)
-                        for effect in ['none'] + effects.split())
-        stop = _effect_str(ui, 'none')
+        start = b''.join(
+            _effect_str(ui, effect) for effect in [b'none'] + effects.split()
+        )
+        stop = _effect_str(ui, b'none')
     else:
         activeeffects = _activeeffects(ui)
-        start = [pycompat.bytestr(activeeffects[e])
-                 for e in ['none'] + effects.split()]
-        start = '\033[' + ';'.join(start) + 'm'
-        stop = '\033[' + pycompat.bytestr(activeeffects['none']) + 'm'
+        start = [
+            pycompat.bytestr(activeeffects[e])
+            for e in [b'none'] + effects.split()
+        ]
+        start = b'\033[' + b';'.join(start) + b'm'
+        stop = b'\033[' + pycompat.bytestr(activeeffects[b'none']) + b'm'
     return _mergeeffects(text, start, stop)
 
+
 _ansieffectre = re.compile(br'\x1b\[[0-9;]*m')
 
+
 def stripeffects(text):
     """Strip ANSI control codes which could be inserted by colorlabel()"""
-    return _ansieffectre.sub('', text)
+    return _ansieffectre.sub(b'', text)
+
 
 def colorlabel(ui, msg, label):
     """add color control code according to the mode"""
-    if ui._colormode == 'debug':
+    if ui._colormode == b'debug':
         if label and msg:
-            if msg.endswith('\n'):
-                msg = "[%s|%s]\n" % (label, msg[:-1])
+            if msg.endswith(b'\n'):
+                msg = b"[%s|%s]\n" % (label, msg[:-1])
             else:
-                msg = "[%s|%s]" % (label, msg)
+                msg = b"[%s|%s]" % (label, msg)
     elif ui._colormode is not None:
         effects = []
         for l in label.split():
-            s = ui._styles.get(l, '')
+            s = ui._styles.get(l, b'')
             if s:
                 effects.append(s)
             elif valideffect(ui, l):
                 effects.append(l)
-        effects = ' '.join(effects)
+        effects = b' '.join(effects)
         if effects:
-            msg = '\n'.join([_render_effects(ui, line, effects)
-                             for line in msg.split('\n')])
+            msg = b'\n'.join(
+                [
+                    _render_effects(ui, line, effects)
+                    for line in msg.split(b'\n')
+                ]
+            )
     return msg
 
+
 w32effects = None
 if pycompat.iswindows:
     import ctypes
@@ -409,24 +442,27 @@
     _INVALID_HANDLE_VALUE = -1
 
     class _COORD(ctypes.Structure):
-        _fields_ = [(r'X', ctypes.c_short),
-                    (r'Y', ctypes.c_short)]
+        _fields_ = [(r'X', ctypes.c_short), (r'Y', ctypes.c_short)]
 
     class _SMALL_RECT(ctypes.Structure):
-        _fields_ = [(r'Left', ctypes.c_short),
-                    (r'Top', ctypes.c_short),
-                    (r'Right', ctypes.c_short),
-                    (r'Bottom', ctypes.c_short)]
+        _fields_ = [
+            (r'Left', ctypes.c_short),
+            (r'Top', ctypes.c_short),
+            (r'Right', ctypes.c_short),
+            (r'Bottom', ctypes.c_short),
+        ]
 
     class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
-        _fields_ = [(r'dwSize', _COORD),
-                    (r'dwCursorPosition', _COORD),
-                    (r'wAttributes', _WORD),
-                    (r'srWindow', _SMALL_RECT),
-                    (r'dwMaximumWindowSize', _COORD)]
+        _fields_ = [
+            (r'dwSize', _COORD),
+            (r'dwCursorPosition', _COORD),
+            (r'wAttributes', _WORD),
+            (r'srWindow', _SMALL_RECT),
+            (r'dwMaximumWindowSize', _COORD),
+        ]
 
-    _STD_OUTPUT_HANDLE = 0xfffffff5 # (DWORD)-11
-    _STD_ERROR_HANDLE = 0xfffffff4  # (DWORD)-12
+    _STD_OUTPUT_HANDLE = 0xFFFFFFF5  # (DWORD)-11
+    _STD_ERROR_HANDLE = 0xFFFFFFF4  # (DWORD)-12
 
     _FOREGROUND_BLUE = 0x0001
     _FOREGROUND_GREEN = 0x0002
@@ -443,53 +479,57 @@
 
     # http://msdn.microsoft.com/en-us/library/ms682088%28VS.85%29.aspx
     w32effects = {
-        'none': -1,
-        'black': 0,
-        'red': _FOREGROUND_RED,
-        'green': _FOREGROUND_GREEN,
-        'yellow': _FOREGROUND_RED | _FOREGROUND_GREEN,
-        'blue': _FOREGROUND_BLUE,
-        'magenta': _FOREGROUND_BLUE | _FOREGROUND_RED,
-        'cyan': _FOREGROUND_BLUE | _FOREGROUND_GREEN,
-        'white': _FOREGROUND_RED | _FOREGROUND_GREEN | _FOREGROUND_BLUE,
-        'bold': _FOREGROUND_INTENSITY,
-        'black_background': 0x100,                  # unused value > 0x0f
-        'red_background': _BACKGROUND_RED,
-        'green_background': _BACKGROUND_GREEN,
-        'yellow_background': _BACKGROUND_RED | _BACKGROUND_GREEN,
-        'blue_background': _BACKGROUND_BLUE,
-        'purple_background': _BACKGROUND_BLUE | _BACKGROUND_RED,
-        'cyan_background': _BACKGROUND_BLUE | _BACKGROUND_GREEN,
-        'white_background': (_BACKGROUND_RED | _BACKGROUND_GREEN |
-                             _BACKGROUND_BLUE),
-        'bold_background': _BACKGROUND_INTENSITY,
-        'underline': _COMMON_LVB_UNDERSCORE,  # double-byte charsets only
-        'inverse': _COMMON_LVB_REVERSE_VIDEO, # double-byte charsets only
+        b'none': -1,
+        b'black': 0,
+        b'red': _FOREGROUND_RED,
+        b'green': _FOREGROUND_GREEN,
+        b'yellow': _FOREGROUND_RED | _FOREGROUND_GREEN,
+        b'blue': _FOREGROUND_BLUE,
+        b'magenta': _FOREGROUND_BLUE | _FOREGROUND_RED,
+        b'cyan': _FOREGROUND_BLUE | _FOREGROUND_GREEN,
+        b'white': _FOREGROUND_RED | _FOREGROUND_GREEN | _FOREGROUND_BLUE,
+        b'bold': _FOREGROUND_INTENSITY,
+        b'black_background': 0x100,  # unused value > 0x0f
+        b'red_background': _BACKGROUND_RED,
+        b'green_background': _BACKGROUND_GREEN,
+        b'yellow_background': _BACKGROUND_RED | _BACKGROUND_GREEN,
+        b'blue_background': _BACKGROUND_BLUE,
+        b'purple_background': _BACKGROUND_BLUE | _BACKGROUND_RED,
+        b'cyan_background': _BACKGROUND_BLUE | _BACKGROUND_GREEN,
+        b'white_background': (
+            _BACKGROUND_RED | _BACKGROUND_GREEN | _BACKGROUND_BLUE
+        ),
+        b'bold_background': _BACKGROUND_INTENSITY,
+        b'underline': _COMMON_LVB_UNDERSCORE,  # double-byte charsets only
+        b'inverse': _COMMON_LVB_REVERSE_VIDEO,  # double-byte charsets only
     }
 
-    passthrough = {_FOREGROUND_INTENSITY,
-                   _BACKGROUND_INTENSITY,
-                   _COMMON_LVB_UNDERSCORE,
-                   _COMMON_LVB_REVERSE_VIDEO}
+    passthrough = {
+        _FOREGROUND_INTENSITY,
+        _BACKGROUND_INTENSITY,
+        _COMMON_LVB_UNDERSCORE,
+        _COMMON_LVB_REVERSE_VIDEO,
+    }
 
     stdout = _kernel32.GetStdHandle(
-                  _STD_OUTPUT_HANDLE)  # don't close the handle returned
+        _STD_OUTPUT_HANDLE
+    )  # don't close the handle returned
     if stdout is None or stdout == _INVALID_HANDLE_VALUE:
         w32effects = None
     else:
         csbi = _CONSOLE_SCREEN_BUFFER_INFO()
-        if not _kernel32.GetConsoleScreenBufferInfo(
-                    stdout, ctypes.byref(csbi)):
+        if not _kernel32.GetConsoleScreenBufferInfo(stdout, ctypes.byref(csbi)):
             # stdout may not support GetConsoleScreenBufferInfo()
             # when called from subprocess or redirected
             w32effects = None
         else:
             origattr = csbi.wAttributes
-            ansire = re.compile(br'\033\[([^m]*)m([^\033]*)(.*)',
-                                re.MULTILINE | re.DOTALL)
+            ansire = re.compile(
+                br'\033\[([^m]*)m([^\033]*)(.*)', re.MULTILINE | re.DOTALL
+            )
 
     def win32print(ui, writefunc, text, **opts):
-        label = opts.get(r'label', '')
+        label = opts.get(r'label', b'')
         attr = origattr
 
         def mapcolor(val, attr):
@@ -497,14 +537,14 @@
                 return origattr
             elif val in passthrough:
                 return attr | val
-            elif val > 0x0f:
-                return (val & 0x70) | (attr & 0x8f)
+            elif val > 0x0F:
+                return (val & 0x70) | (attr & 0x8F)
             else:
-                return (val & 0x07) | (attr & 0xf8)
+                return (val & 0x07) | (attr & 0xF8)
 
         # determine console attributes based on labels
         for l in label.split():
-            style = ui._styles.get(l, '')
+            style = ui._styles.get(l, b'')
             for effect in style.split():
                 try:
                     attr = mapcolor(w32effects[effect], attr)
--- a/mercurial/commands.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/commands.py	Mon Oct 21 11:09:48 2019 -0400
@@ -22,6 +22,7 @@
     wdirhex,
     wdirrev,
 )
+from .pycompat import open
 from . import (
     archival,
     bookmarks,
@@ -80,37 +81,69 @@
 # common command options
 
 globalopts = [
-    ('R', 'repository', '',
-     _('repository root directory or name of overlay bundle file'),
-     _('REPO')),
-    ('', 'cwd', '',
-     _('change working directory'), _('DIR')),
-    ('y', 'noninteractive', None,
-     _('do not prompt, automatically pick the first choice for all prompts')),
-    ('q', 'quiet', None, _('suppress output')),
-    ('v', 'verbose', None, _('enable additional output')),
-    ('', 'color', '',
-     # i18n: 'always', 'auto', 'never', and 'debug' are keywords
-     # and should not be translated
-     _("when to colorize (boolean, always, auto, never, or debug)"),
-     _('TYPE')),
-    ('', 'config', [],
-     _('set/override config option (use \'section.name=value\')'),
-     _('CONFIG')),
-    ('', 'debug', None, _('enable debugging output')),
-    ('', 'debugger', None, _('start debugger')),
-    ('', 'encoding', encoding.encoding, _('set the charset encoding'),
-     _('ENCODE')),
-    ('', 'encodingmode', encoding.encodingmode,
-     _('set the charset encoding mode'), _('MODE')),
-    ('', 'traceback', None, _('always print a traceback on exception')),
-    ('', 'time', None, _('time how long the command takes')),
-    ('', 'profile', None, _('print command execution profile')),
-    ('', 'version', None, _('output version information and exit')),
-    ('h', 'help', None, _('display help and exit')),
-    ('', 'hidden', False, _('consider hidden changesets')),
-    ('', 'pager', 'auto',
-     _("when to paginate (boolean, always, auto, or never)"), _('TYPE')),
+    (
+        b'R',
+        b'repository',
+        b'',
+        _(b'repository root directory or name of overlay bundle file'),
+        _(b'REPO'),
+    ),
+    (b'', b'cwd', b'', _(b'change working directory'), _(b'DIR')),
+    (
+        b'y',
+        b'noninteractive',
+        None,
+        _(
+            b'do not prompt, automatically pick the first choice for all prompts'
+        ),
+    ),
+    (b'q', b'quiet', None, _(b'suppress output')),
+    (b'v', b'verbose', None, _(b'enable additional output')),
+    (
+        b'',
+        b'color',
+        b'',
+        # i18n: 'always', 'auto', 'never', and 'debug' are keywords
+        # and should not be translated
+        _(b"when to colorize (boolean, always, auto, never, or debug)"),
+        _(b'TYPE'),
+    ),
+    (
+        b'',
+        b'config',
+        [],
+        _(b'set/override config option (use \'section.name=value\')'),
+        _(b'CONFIG'),
+    ),
+    (b'', b'debug', None, _(b'enable debugging output')),
+    (b'', b'debugger', None, _(b'start debugger')),
+    (
+        b'',
+        b'encoding',
+        encoding.encoding,
+        _(b'set the charset encoding'),
+        _(b'ENCODE'),
+    ),
+    (
+        b'',
+        b'encodingmode',
+        encoding.encodingmode,
+        _(b'set the charset encoding mode'),
+        _(b'MODE'),
+    ),
+    (b'', b'traceback', None, _(b'always print a traceback on exception')),
+    (b'', b'time', None, _(b'time how long the command takes')),
+    (b'', b'profile', None, _(b'print command execution profile')),
+    (b'', b'version', None, _(b'output version information and exit')),
+    (b'h', b'help', None, _(b'display help and exit')),
+    (b'', b'hidden', False, _(b'consider hidden changesets')),
+    (
+        b'',
+        b'pager',
+        b'auto',
+        _(b"when to paginate (boolean, always, auto, or never)"),
+        _(b'TYPE'),
+    ),
 ]
 
 dryrunopts = cmdutil.dryrunopts
@@ -118,6 +151,7 @@
 walkopts = cmdutil.walkopts
 commitopts = cmdutil.commitopts
 commitopts2 = cmdutil.commitopts2
+commitopts3 = cmdutil.commitopts3
 formatteropts = cmdutil.formatteropts
 templateopts = cmdutil.templateopts
 logopts = cmdutil.logopts
@@ -131,9 +165,13 @@
 
 # Commands start here, listed alphabetically
 
-@command('abort',
-    dryrunopts, helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
-    helpbasic=True)
+
+@command(
+    b'abort',
+    dryrunopts,
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
+    helpbasic=True,
+)
 def abort(ui, repo, **opts):
     """abort an unfinished operation (EXPERIMENTAL)
 
@@ -145,20 +183,31 @@
     dryrun = opts.get(r'dry_run')
     abortstate = cmdutil.getunfinishedstate(repo)
     if not abortstate:
-        raise error.Abort(_('no operation in progress'))
+        raise error.Abort(_(b'no operation in progress'))
     if not abortstate.abortfunc:
-        raise error.Abort((_("%s in progress but does not support 'hg abort'") %
-                            (abortstate._opname)), hint=abortstate.hint())
+        raise error.Abort(
+            (
+                _(b"%s in progress but does not support 'hg abort'")
+                % (abortstate._opname)
+            ),
+            hint=abortstate.hint(),
+        )
     if dryrun:
-        ui.status(_('%s in progress, will be aborted\n') % (abortstate._opname))
+        ui.status(
+            _(b'%s in progress, will be aborted\n') % (abortstate._opname)
+        )
         return
     return abortstate.abortfunc(ui, repo)
 
-@command('add',
+
+@command(
+    b'add',
     walkopts + subrepoopts + dryrunopts,
-    _('[OPTION]... [FILE]...'),
+    _(b'[OPTION]... [FILE]...'),
     helpcategory=command.CATEGORY_WORKING_DIRECTORY,
-    helpbasic=True, inferrepo=True)
+    helpbasic=True,
+    inferrepo=True,
+)
 def add(ui, repo, *pats, **opts):
     """add the specified files on the next commit
 
@@ -204,14 +253,17 @@
 
     m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts))
     uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
-    rejected = cmdutil.add(ui, repo, m, "", uipathfn, False, **opts)
+    rejected = cmdutil.add(ui, repo, m, b"", uipathfn, False, **opts)
     return rejected and 1 or 0
 
-@command('addremove',
+
+@command(
+    b'addremove',
     similarityopts + subrepoopts + walkopts + dryrunopts,
-    _('[OPTION]... [FILE]...'),
+    _(b'[OPTION]... [FILE]...'),
     helpcategory=command.CATEGORY_WORKING_DIRECTORY,
-    inferrepo=True)
+    inferrepo=True,
+)
 def addremove(ui, repo, *pats, **opts):
     """add all new files, delete all missing files
 
@@ -275,30 +327,53 @@
     Returns 0 if all files are successfully added.
     """
     opts = pycompat.byteskwargs(opts)
-    if not opts.get('similarity'):
-        opts['similarity'] = '100'
+    if not opts.get(b'similarity'):
+        opts[b'similarity'] = b'100'
     matcher = scmutil.match(repo[None], pats, opts)
     relative = scmutil.anypats(pats, opts)
     uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
-    return scmutil.addremove(repo, matcher, "", uipathfn, opts)
-
-@command('annotate|blame',
-    [('r', 'rev', '', _('annotate the specified revision'), _('REV')),
-    ('', 'follow', None,
-     _('follow copies/renames and list the filename (DEPRECATED)')),
-    ('', 'no-follow', None, _("don't follow copies and renames")),
-    ('a', 'text', None, _('treat all files as text')),
-    ('u', 'user', None, _('list the author (long with -v)')),
-    ('f', 'file', None, _('list the filename')),
-    ('d', 'date', None, _('list the date (short with -q)')),
-    ('n', 'number', None, _('list the revision number (default)')),
-    ('c', 'changeset', None, _('list the changeset')),
-    ('l', 'line-number', None, _('show line number at the first appearance')),
-    ('', 'skip', [], _('revision to not display (EXPERIMENTAL)'), _('REV')),
-    ] + diffwsopts + walkopts + formatteropts,
-    _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
+    return scmutil.addremove(repo, matcher, b"", uipathfn, opts)
+
+
+@command(
+    b'annotate|blame',
+    [
+        (b'r', b'rev', b'', _(b'annotate the specified revision'), _(b'REV')),
+        (
+            b'',
+            b'follow',
+            None,
+            _(b'follow copies/renames and list the filename (DEPRECATED)'),
+        ),
+        (b'', b'no-follow', None, _(b"don't follow copies and renames")),
+        (b'a', b'text', None, _(b'treat all files as text')),
+        (b'u', b'user', None, _(b'list the author (long with -v)')),
+        (b'f', b'file', None, _(b'list the filename')),
+        (b'd', b'date', None, _(b'list the date (short with -q)')),
+        (b'n', b'number', None, _(b'list the revision number (default)')),
+        (b'c', b'changeset', None, _(b'list the changeset')),
+        (
+            b'l',
+            b'line-number',
+            None,
+            _(b'show line number at the first appearance'),
+        ),
+        (
+            b'',
+            b'skip',
+            [],
+            _(b'revision to not display (EXPERIMENTAL)'),
+            _(b'REV'),
+        ),
+    ]
+    + diffwsopts
+    + walkopts
+    + formatteropts,
+    _(b'[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
     helpcategory=command.CATEGORY_FILE_CONTENTS,
-    helpbasic=True, inferrepo=True)
+    helpbasic=True,
+    inferrepo=True,
+)
 def annotate(ui, repo, *pats, **opts):
     """show changeset information by line for each file
 
@@ -339,97 +414,122 @@
     """
     opts = pycompat.byteskwargs(opts)
     if not pats:
-        raise error.Abort(_('at least one filename or pattern is required'))
-
-    if opts.get('follow'):
+        raise error.Abort(_(b'at least one filename or pattern is required'))
+
+    if opts.get(b'follow'):
         # --follow is deprecated and now just an alias for -f/--file
         # to mimic the behavior of Mercurial before version 1.5
-        opts['file'] = True
-
-    if (not opts.get('user') and not opts.get('changeset')
-        and not opts.get('date') and not opts.get('file')):
-        opts['number'] = True
-
-    linenumber = opts.get('line_number') is not None
-    if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
-        raise error.Abort(_('at least one of -n/-c is required for -l'))
-
-    rev = opts.get('rev')
+        opts[b'file'] = True
+
+    if (
+        not opts.get(b'user')
+        and not opts.get(b'changeset')
+        and not opts.get(b'date')
+        and not opts.get(b'file')
+    ):
+        opts[b'number'] = True
+
+    linenumber = opts.get(b'line_number') is not None
+    if (
+        linenumber
+        and (not opts.get(b'changeset'))
+        and (not opts.get(b'number'))
+    ):
+        raise error.Abort(_(b'at least one of -n/-c is required for -l'))
+
+    rev = opts.get(b'rev')
     if rev:
-        repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+        repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
     ctx = scmutil.revsingle(repo, rev)
 
-    ui.pager('annotate')
-    rootfm = ui.formatter('annotate', opts)
+    ui.pager(b'annotate')
+    rootfm = ui.formatter(b'annotate', opts)
     if ui.debugflag:
         shorthex = pycompat.identity
     else:
+
         def shorthex(h):
             return h[:12]
+
     if ui.quiet:
         datefunc = dateutil.shortdate
     else:
         datefunc = dateutil.datestr
     if ctx.rev() is None:
-        if opts.get('changeset'):
+        if opts.get(b'changeset'):
             # omit "+" suffix which is appended to node hex
             def formatrev(rev):
                 if rev == wdirrev:
-                    return '%d' % ctx.p1().rev()
+                    return b'%d' % ctx.p1().rev()
                 else:
-                    return '%d' % rev
+                    return b'%d' % rev
+
         else:
+
             def formatrev(rev):
                 if rev == wdirrev:
-                    return '%d+' % ctx.p1().rev()
+                    return b'%d+' % ctx.p1().rev()
                 else:
-                    return '%d ' % rev
+                    return b'%d ' % rev
+
         def formathex(h):
             if h == wdirhex:
-                return '%s+' % shorthex(hex(ctx.p1().node()))
+                return b'%s+' % shorthex(hex(ctx.p1().node()))
             else:
-                return '%s ' % shorthex(h)
+                return b'%s ' % shorthex(h)
+
     else:
         formatrev = b'%d'.__mod__
         formathex = shorthex
 
     opmap = [
-        ('user', ' ', lambda x: x.fctx.user(), ui.shortuser),
-        ('rev', ' ', lambda x: scmutil.intrev(x.fctx), formatrev),
-        ('node', ' ', lambda x: hex(scmutil.binnode(x.fctx)), formathex),
-        ('date', ' ', lambda x: x.fctx.date(), util.cachefunc(datefunc)),
-        ('path', ' ', lambda x: x.fctx.path(), pycompat.bytestr),
-        ('lineno', ':', lambda x: x.lineno, pycompat.bytestr),
+        (b'user', b' ', lambda x: x.fctx.user(), ui.shortuser),
+        (b'rev', b' ', lambda x: scmutil.intrev(x.fctx), formatrev),
+        (b'node', b' ', lambda x: hex(scmutil.binnode(x.fctx)), formathex),
+        (b'date', b' ', lambda x: x.fctx.date(), util.cachefunc(datefunc)),
+        (b'path', b' ', lambda x: x.fctx.path(), pycompat.bytestr),
+        (b'lineno', b':', lambda x: x.lineno, pycompat.bytestr),
     ]
     opnamemap = {
-        'rev': 'number',
-        'node': 'changeset',
-        'path': 'file',
-        'lineno': 'line_number',
+        b'rev': b'number',
+        b'node': b'changeset',
+        b'path': b'file',
+        b'lineno': b'line_number',
     }
 
     if rootfm.isplain():
+
         def makefunc(get, fmt):
             return lambda x: fmt(get(x))
+
     else:
+
         def makefunc(get, fmt):
             return get
+
     datahint = rootfm.datahint()
-    funcmap = [(makefunc(get, fmt), sep) for fn, sep, get, fmt in opmap
-               if opts.get(opnamemap.get(fn, fn)) or fn in datahint]
-    funcmap[0] = (funcmap[0][0], '') # no separator in front of first column
-    fields = ' '.join(fn for fn, sep, get, fmt in opmap
-                      if opts.get(opnamemap.get(fn, fn)) or fn in datahint)
+    funcmap = [
+        (makefunc(get, fmt), sep)
+        for fn, sep, get, fmt in opmap
+        if opts.get(opnamemap.get(fn, fn)) or fn in datahint
+    ]
+    funcmap[0] = (funcmap[0][0], b'')  # no separator in front of first column
+    fields = b' '.join(
+        fn
+        for fn, sep, get, fmt in opmap
+        if opts.get(opnamemap.get(fn, fn)) or fn in datahint
+    )
 
     def bad(x, y):
-        raise error.Abort("%s: %s" % (x, y))
+        raise error.Abort(b"%s: %s" % (x, y))
 
     m = scmutil.match(ctx, pats, opts, badfn=bad)
 
-    follow = not opts.get('no_follow')
-    diffopts = patch.difffeatureopts(ui, opts, section='annotate',
-                                     whitespace=True)
-    skiprevs = opts.get('skip')
+    follow = not opts.get(b'no_follow')
+    diffopts = patch.difffeatureopts(
+        ui, opts, section=b'annotate', whitespace=True
+    )
+    skiprevs = opts.get(b'skip')
     if skiprevs:
         skiprevs = scmutil.revrange(repo, skiprevs)
 
@@ -438,13 +538,14 @@
         fctx = ctx[abs]
         rootfm.startitem()
         rootfm.data(path=abs)
-        if not opts.get('text') and fctx.isbinary():
-            rootfm.plain(_("%s: binary file\n") % uipathfn(abs))
+        if not opts.get(b'text') and fctx.isbinary():
+            rootfm.plain(_(b"%s: binary file\n") % uipathfn(abs))
             continue
 
-        fm = rootfm.nested('lines', tmpl='{rev}: {line}')
-        lines = fctx.annotate(follow=follow, skiprevs=skiprevs,
-                              diffopts=diffopts)
+        fm = rootfm.nested(b'lines', tmpl=b'{rev}: {line}')
+        lines = fctx.annotate(
+            follow=follow, skiprevs=skiprevs, diffopts=diffopts
+        )
         if not lines:
             fm.end()
             continue
@@ -456,36 +557,47 @@
             if fm.isplain():
                 sizes = [encoding.colwidth(x) for x in l]
                 ml = max(sizes)
-                formats.append([sep + ' ' * (ml - w) + '%s' for w in sizes])
+                formats.append([sep + b' ' * (ml - w) + b'%s' for w in sizes])
             else:
-                formats.append(['%s' for x in l])
+                formats.append([b'%s' for x in l])
             pieces.append(l)
 
         for f, p, n in zip(zip(*formats), zip(*pieces), lines):
             fm.startitem()
             fm.context(fctx=n.fctx)
-            fm.write(fields, "".join(f), *p)
+            fm.write(fields, b"".join(f), *p)
             if n.skip:
-                fmt = "* %s"
+                fmt = b"* %s"
             else:
-                fmt = ": %s"
-            fm.write('line', fmt, n.text)
-
-        if not lines[-1].text.endswith('\n'):
-            fm.plain('\n')
+                fmt = b": %s"
+            fm.write(b'line', fmt, n.text)
+
+        if not lines[-1].text.endswith(b'\n'):
+            fm.plain(b'\n')
         fm.end()
 
     rootfm.end()
 
-@command('archive',
-    [('', 'no-decode', None, _('do not pass files through decoders')),
-    ('p', 'prefix', '', _('directory prefix for files in archive'),
-     _('PREFIX')),
-    ('r', 'rev', '', _('revision to distribute'), _('REV')),
-    ('t', 'type', '', _('type of distribution to create'), _('TYPE')),
-    ] + subrepoopts + walkopts,
-    _('[OPTION]... DEST'),
-    helpcategory=command.CATEGORY_IMPORT_EXPORT)
+
+@command(
+    b'archive',
+    [
+        (b'', b'no-decode', None, _(b'do not pass files through decoders')),
+        (
+            b'p',
+            b'prefix',
+            b'',
+            _(b'directory prefix for files in archive'),
+            _(b'PREFIX'),
+        ),
+        (b'r', b'rev', b'', _(b'revision to distribute'), _(b'REV')),
+        (b't', b'type', b'', _(b'type of distribution to create'), _(b'TYPE')),
+    ]
+    + subrepoopts
+    + walkopts,
+    _(b'[OPTION]... DEST'),
+    helpcategory=command.CATEGORY_IMPORT_EXPORT,
+)
 def archive(ui, repo, dest, **opts):
     '''create an unversioned archive of a repository revision
 
@@ -513,6 +625,7 @@
     :``tar``:   tar archive, uncompressed
     :``tbz2``:  tar archive, compressed using bzip2
     :``tgz``:   tar archive, compressed using gzip
+    :``txz``:   tar archive, compressed using lzma (only in Python 3)
     :``uzip``:  zip archive, uncompressed
     :``zip``:   zip archive, compressed using deflate
 
@@ -528,44 +641,74 @@
     '''
 
     opts = pycompat.byteskwargs(opts)
-    rev = opts.get('rev')
+    rev = opts.get(b'rev')
     if rev:
-        repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+        repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
     ctx = scmutil.revsingle(repo, rev)
     if not ctx:
-        raise error.Abort(_('no working directory: please specify a revision'))
+        raise error.Abort(_(b'no working directory: please specify a revision'))
     node = ctx.node()
     dest = cmdutil.makefilename(ctx, dest)
     if os.path.realpath(dest) == repo.root:
-        raise error.Abort(_('repository root cannot be destination'))
-
-    kind = opts.get('type') or archival.guesskind(dest) or 'files'
-    prefix = opts.get('prefix')
-
-    if dest == '-':
-        if kind == 'files':
-            raise error.Abort(_('cannot archive plain files to stdout'))
+        raise error.Abort(_(b'repository root cannot be destination'))
+
+    kind = opts.get(b'type') or archival.guesskind(dest) or b'files'
+    prefix = opts.get(b'prefix')
+
+    if dest == b'-':
+        if kind == b'files':
+            raise error.Abort(_(b'cannot archive plain files to stdout'))
         dest = cmdutil.makefileobj(ctx, dest)
         if not prefix:
-            prefix = os.path.basename(repo.root) + '-%h'
+            prefix = os.path.basename(repo.root) + b'-%h'
 
     prefix = cmdutil.makefilename(ctx, prefix)
     match = scmutil.match(ctx, [], opts)
-    archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
-                     match, prefix, subrepos=opts.get('subrepos'))
-
-@command('backout',
-    [('', 'merge', None, _('merge with old dirstate parent after backout')),
-    ('', 'commit', None,
-     _('commit if no conflicts were encountered (DEPRECATED)')),
-    ('', 'no-commit', None, _('do not commit')),
-    ('', 'parent', '',
-     _('parent to choose when backing out merge (DEPRECATED)'), _('REV')),
-    ('r', 'rev', '', _('revision to backout'), _('REV')),
-    ('e', 'edit', False, _('invoke editor on commit messages')),
-    ] + mergetoolopts + walkopts + commitopts + commitopts2,
-    _('[OPTION]... [-r] REV'),
-    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
+    archival.archive(
+        repo,
+        dest,
+        node,
+        kind,
+        not opts.get(b'no_decode'),
+        match,
+        prefix,
+        subrepos=opts.get(b'subrepos'),
+    )
+
+
+@command(
+    b'backout',
+    [
+        (
+            b'',
+            b'merge',
+            None,
+            _(b'merge with old dirstate parent after backout'),
+        ),
+        (
+            b'',
+            b'commit',
+            None,
+            _(b'commit if no conflicts were encountered (DEPRECATED)'),
+        ),
+        (b'', b'no-commit', None, _(b'do not commit')),
+        (
+            b'',
+            b'parent',
+            b'',
+            _(b'parent to choose when backing out merge (DEPRECATED)'),
+            _(b'REV'),
+        ),
+        (b'r', b'rev', b'', _(b'revision to backout'), _(b'REV')),
+        (b'e', b'edit', False, _(b'invoke editor on commit messages')),
+    ]
+    + mergetoolopts
+    + walkopts
+    + commitopts
+    + commitopts2,
+    _(b'[OPTION]... [-r] REV'),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
+)
 def backout(ui, repo, node=None, rev=None, **opts):
     '''reverse effect of earlier changeset
 
@@ -621,25 +764,26 @@
     with repo.wlock(), repo.lock():
         return _dobackout(ui, repo, node, rev, **opts)
 
+
 def _dobackout(ui, repo, node=None, rev=None, **opts):
     opts = pycompat.byteskwargs(opts)
-    if opts.get('commit') and opts.get('no_commit'):
-        raise error.Abort(_("cannot use --commit with --no-commit"))
-    if opts.get('merge') and opts.get('no_commit'):
-        raise error.Abort(_("cannot use --merge with --no-commit"))
+    if opts.get(b'commit') and opts.get(b'no_commit'):
+        raise error.Abort(_(b"cannot use --commit with --no-commit"))
+    if opts.get(b'merge') and opts.get(b'no_commit'):
+        raise error.Abort(_(b"cannot use --merge with --no-commit"))
 
     if rev and node:
-        raise error.Abort(_("please specify just one revision"))
+        raise error.Abort(_(b"please specify just one revision"))
 
     if not rev:
         rev = node
 
     if not rev:
-        raise error.Abort(_("please specify a revision to backout"))
-
-    date = opts.get('date')
+        raise error.Abort(_(b"please specify a revision to backout"))
+
+    date = opts.get(b'date')
     if date:
-        opts['date'] = dateutil.parsedate(date)
+        opts[b'date'] = dateutil.parsedate(date)
 
     cmdutil.checkunfinished(repo)
     cmdutil.bailifchanged(repo)
@@ -647,94 +791,128 @@
 
     op1, op2 = repo.dirstate.parents()
     if not repo.changelog.isancestor(node, op1):
-        raise error.Abort(_('cannot backout change that is not an ancestor'))
+        raise error.Abort(_(b'cannot backout change that is not an ancestor'))
 
     p1, p2 = repo.changelog.parents(node)
     if p1 == nullid:
-        raise error.Abort(_('cannot backout a change with no parents'))
+        raise error.Abort(_(b'cannot backout a change with no parents'))
     if p2 != nullid:
-        if not opts.get('parent'):
-            raise error.Abort(_('cannot backout a merge changeset'))
-        p = repo.lookup(opts['parent'])
+        if not opts.get(b'parent'):
+            raise error.Abort(_(b'cannot backout a merge changeset'))
+        p = repo.lookup(opts[b'parent'])
         if p not in (p1, p2):
-            raise error.Abort(_('%s is not a parent of %s') %
-                             (short(p), short(node)))
+            raise error.Abort(
+                _(b'%s is not a parent of %s') % (short(p), short(node))
+            )
         parent = p
     else:
-        if opts.get('parent'):
-            raise error.Abort(_('cannot use --parent on non-merge changeset'))
+        if opts.get(b'parent'):
+            raise error.Abort(_(b'cannot use --parent on non-merge changeset'))
         parent = p1
 
     # the backout should appear on the same branch
     branch = repo.dirstate.branch()
     bheads = repo.branchheads(branch)
     rctx = scmutil.revsingle(repo, hex(parent))
-    if not opts.get('merge') and op1 != node:
-        with dirstateguard.dirstateguard(repo, 'backout'):
-            overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
-            with ui.configoverride(overrides, 'backout'):
-                stats = mergemod.update(repo, parent, branchmerge=True,
-                                        force=True, ancestor=node,
-                                        mergeancestor=False)
+    if not opts.get(b'merge') and op1 != node:
+        with dirstateguard.dirstateguard(repo, b'backout'):
+            overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
+            with ui.configoverride(overrides, b'backout'):
+                stats = mergemod.update(
+                    repo,
+                    parent,
+                    branchmerge=True,
+                    force=True,
+                    ancestor=node,
+                    mergeancestor=False,
+                )
             repo.setparents(op1, op2)
         hg._showstats(repo, stats)
         if stats.unresolvedcount:
-            repo.ui.status(_("use 'hg resolve' to retry unresolved "
-                             "file merges\n"))
+            repo.ui.status(
+                _(b"use 'hg resolve' to retry unresolved file merges\n")
+            )
             return 1
     else:
         hg.clean(repo, node, show_stats=False)
         repo.dirstate.setbranch(branch)
         cmdutil.revert(ui, repo, rctx, repo.dirstate.parents())
 
-    if opts.get('no_commit'):
-        msg = _("changeset %s backed out, "
-                "don't forget to commit.\n")
+    if opts.get(b'no_commit'):
+        msg = _(b"changeset %s backed out, don't forget to commit.\n")
         ui.status(msg % short(node))
         return 0
 
     def commitfunc(ui, repo, message, match, opts):
-        editform = 'backout'
-        e = cmdutil.getcommiteditor(editform=editform,
-                                    **pycompat.strkwargs(opts))
+        editform = b'backout'
+        e = cmdutil.getcommiteditor(
+            editform=editform, **pycompat.strkwargs(opts)
+        )
         if not message:
             # we don't translate commit messages
-            message = "Backed out changeset %s" % short(node)
+            message = b"Backed out changeset %s" % short(node)
             e = cmdutil.getcommiteditor(edit=True, editform=editform)
-        return repo.commit(message, opts.get('user'), opts.get('date'),
-                           match, editor=e)
+        return repo.commit(
+            message, opts.get(b'user'), opts.get(b'date'), match, editor=e
+        )
+
     newnode = cmdutil.commit(ui, repo, commitfunc, [], opts)
     if not newnode:
-        ui.status(_("nothing changed\n"))
+        ui.status(_(b"nothing changed\n"))
         return 1
     cmdutil.commitstatus(repo, newnode, branch, bheads)
 
     def nice(node):
-        return '%d:%s' % (repo.changelog.rev(node), short(node))
-    ui.status(_('changeset %s backs out changeset %s\n') %
-              (nice(repo.changelog.tip()), nice(node)))
-    if opts.get('merge') and op1 != node:
+        return b'%d:%s' % (repo.changelog.rev(node), short(node))
+
+    ui.status(
+        _(b'changeset %s backs out changeset %s\n')
+        % (nice(repo.changelog.tip()), nice(node))
+    )
+    if opts.get(b'merge') and op1 != node:
         hg.clean(repo, op1, show_stats=False)
-        ui.status(_('merging with changeset %s\n')
-                  % nice(repo.changelog.tip()))
-        overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
-        with ui.configoverride(overrides, 'backout'):
+        ui.status(
+            _(b'merging with changeset %s\n') % nice(repo.changelog.tip())
+        )
+        overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
+        with ui.configoverride(overrides, b'backout'):
             return hg.merge(repo, hex(repo.changelog.tip()))
     return 0
 
-@command('bisect',
-    [('r', 'reset', False, _('reset bisect state')),
-    ('g', 'good', False, _('mark changeset good')),
-    ('b', 'bad', False, _('mark changeset bad')),
-    ('s', 'skip', False, _('skip testing changeset')),
-    ('e', 'extend', False, _('extend the bisect range')),
-    ('c', 'command', '', _('use command to check changeset state'), _('CMD')),
-    ('U', 'noupdate', False, _('do not update to target'))],
-    _("[-gbsr] [-U] [-c CMD] [REV]"),
-    helpcategory=command.CATEGORY_CHANGE_NAVIGATION)
-def bisect(ui, repo, rev=None, extra=None, command=None,
-               reset=None, good=None, bad=None, skip=None, extend=None,
-               noupdate=None):
+
+@command(
+    b'bisect',
+    [
+        (b'r', b'reset', False, _(b'reset bisect state')),
+        (b'g', b'good', False, _(b'mark changeset good')),
+        (b'b', b'bad', False, _(b'mark changeset bad')),
+        (b's', b'skip', False, _(b'skip testing changeset')),
+        (b'e', b'extend', False, _(b'extend the bisect range')),
+        (
+            b'c',
+            b'command',
+            b'',
+            _(b'use command to check changeset state'),
+            _(b'CMD'),
+        ),
+        (b'U', b'noupdate', False, _(b'do not update to target')),
+    ],
+    _(b"[-gbsr] [-U] [-c CMD] [REV]"),
+    helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
+)
+def bisect(
+    ui,
+    repo,
+    rev=None,
+    extra=None,
+    command=None,
+    reset=None,
+    good=None,
+    bad=None,
+    skip=None,
+    extend=None,
+    noupdate=None,
+):
     """subdivision search of changesets
 
     This command helps to find changesets which introduce problems. To
@@ -817,32 +995,33 @@
     Returns 0 on success.
     """
     # backward compatibility
-    if rev in "good bad reset init".split():
-        ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
+    if rev in b"good bad reset init".split():
+        ui.warn(_(b"(use of 'hg bisect <cmd>' is deprecated)\n"))
         cmd, rev, extra = rev, extra, None
-        if cmd == "good":
+        if cmd == b"good":
             good = True
-        elif cmd == "bad":
+        elif cmd == b"bad":
             bad = True
         else:
             reset = True
     elif extra:
-        raise error.Abort(_('incompatible arguments'))
+        raise error.Abort(_(b'incompatible arguments'))
 
     incompatibles = {
-        '--bad': bad,
-        '--command': bool(command),
-        '--extend': extend,
-        '--good': good,
-        '--reset': reset,
-        '--skip': skip,
+        b'--bad': bad,
+        b'--command': bool(command),
+        b'--extend': extend,
+        b'--good': good,
+        b'--reset': reset,
+        b'--skip': skip,
     }
 
     enabled = [x for x in incompatibles if incompatibles[x]]
 
     if len(enabled) > 1:
-        raise error.Abort(_('%s and %s are incompatible') %
-                          tuple(sorted(enabled)[0:2]))
+        raise error.Abort(
+            _(b'%s and %s are incompatible') % tuple(sorted(enabled)[0:2])
+        )
 
     if reset:
         hbisect.resetstate(repo)
@@ -855,15 +1034,15 @@
         if rev:
             nodes = [repo[i].node() for i in scmutil.revrange(repo, [rev])]
         else:
-            nodes = [repo.lookup('.')]
+            nodes = [repo.lookup(b'.')]
         if good:
-            state['good'] += nodes
+            state[b'good'] += nodes
         elif bad:
-            state['bad'] += nodes
+            state[b'bad'] += nodes
         elif skip:
-            state['skip'] += nodes
+            state[b'skip'] += nodes
         hbisect.save_state(repo, state)
-        if not (state['good'] and state['bad']):
+        if not (state[b'good'] and state[b'bad']):
             return
 
     def mayupdate(repo, node, show_stats=True):
@@ -880,38 +1059,46 @@
         changesets = 1
         if noupdate:
             try:
-                node = state['current'][0]
+                node = state[b'current'][0]
             except LookupError:
-                raise error.Abort(_('current bisect revision is unknown - '
-                                   'start a new bisect to fix'))
+                raise error.Abort(
+                    _(
+                        b'current bisect revision is unknown - '
+                        b'start a new bisect to fix'
+                    )
+                )
         else:
             node, p2 = repo.dirstate.parents()
             if p2 != nullid:
-                raise error.Abort(_('current bisect revision is a merge'))
+                raise error.Abort(_(b'current bisect revision is a merge'))
         if rev:
             node = repo[scmutil.revsingle(repo, rev, node)].node()
         try:
             while changesets:
                 # update state
-                state['current'] = [node]
+                state[b'current'] = [node]
                 hbisect.save_state(repo, state)
-                status = ui.system(command, environ={'HG_NODE': hex(node)},
-                                   blockedtag='bisect_check')
+                status = ui.system(
+                    command,
+                    environ={b'HG_NODE': hex(node)},
+                    blockedtag=b'bisect_check',
+                )
                 if status == 125:
-                    transition = "skip"
+                    transition = b"skip"
                 elif status == 0:
-                    transition = "good"
+                    transition = b"good"
                 # status < 0 means process was killed
                 elif status == 127:
-                    raise error.Abort(_("failed to execute %s") % command)
+                    raise error.Abort(_(b"failed to execute %s") % command)
                 elif status < 0:
-                    raise error.Abort(_("%s killed") % command)
+                    raise error.Abort(_(b"%s killed") % command)
                 else:
-                    transition = "bad"
+                    transition = b"bad"
                 state[transition].append(node)
                 ctx = repo[node]
-                ui.status(_('changeset %d:%s: %s\n') % (ctx.rev(), ctx,
-                                                        transition))
+                ui.status(
+                    _(b'changeset %d:%s: %s\n') % (ctx.rev(), ctx, transition)
+                )
                 hbisect.checkstate(state)
                 # bisect
                 nodes, changesets, bgood = hbisect.bisect(repo, state)
@@ -919,7 +1106,7 @@
                 node = nodes[0]
                 mayupdate(repo, node, show_stats=False)
         finally:
-            state['current'] = [node]
+            state[b'current'] = [node]
             hbisect.save_state(repo, state)
         hbisect.printresult(ui, repo, state, displayer, nodes, bgood)
         return
@@ -932,40 +1119,51 @@
         if not changesets:
             extendnode = hbisect.extendrange(repo, state, nodes, good)
             if extendnode is not None:
-                ui.write(_("Extending search to changeset %d:%s\n")
-                         % (extendnode.rev(), extendnode))
-                state['current'] = [extendnode.node()]
+                ui.write(
+                    _(b"Extending search to changeset %d:%s\n")
+                    % (extendnode.rev(), extendnode)
+                )
+                state[b'current'] = [extendnode.node()]
                 hbisect.save_state(repo, state)
                 return mayupdate(repo, extendnode.node())
-        raise error.Abort(_("nothing to extend"))
+        raise error.Abort(_(b"nothing to extend"))
 
     if changesets == 0:
         hbisect.printresult(ui, repo, state, displayer, nodes, good)
     else:
-        assert len(nodes) == 1 # only a single node can be tested next
+        assert len(nodes) == 1  # only a single node can be tested next
         node = nodes[0]
         # compute the approximate number of remaining tests
         tests, size = 0, 2
         while size <= changesets:
             tests, size = tests + 1, size * 2
         rev = repo.changelog.rev(node)
-        ui.write(_("Testing changeset %d:%s "
-                   "(%d changesets remaining, ~%d tests)\n")
-                 % (rev, short(node), changesets, tests))
-        state['current'] = [node]
+        ui.write(
+            _(
+                b"Testing changeset %d:%s "
+                b"(%d changesets remaining, ~%d tests)\n"
+            )
+            % (rev, short(node), changesets, tests)
+        )
+        state[b'current'] = [node]
         hbisect.save_state(repo, state)
         return mayupdate(repo, node)
 
-@command('bookmarks|bookmark',
-    [('f', 'force', False, _('force')),
-    ('r', 'rev', '', _('revision for bookmark action'), _('REV')),
-    ('d', 'delete', False, _('delete a given bookmark')),
-    ('m', 'rename', '', _('rename a given bookmark'), _('OLD')),
-    ('i', 'inactive', False, _('mark a bookmark inactive')),
-    ('l', 'list', False, _('list existing bookmarks')),
-    ] + formatteropts,
-    _('hg bookmarks [OPTIONS]... [NAME]...'),
-    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+
+@command(
+    b'bookmarks|bookmark',
+    [
+        (b'f', b'force', False, _(b'force')),
+        (b'r', b'rev', b'', _(b'revision for bookmark action'), _(b'REV')),
+        (b'd', b'delete', False, _(b'delete a given bookmark')),
+        (b'm', b'rename', b'', _(b'rename a given bookmark'), _(b'OLD')),
+        (b'i', b'inactive', False, _(b'mark a bookmark inactive')),
+        (b'l', b'list', False, _(b'list existing bookmarks')),
+    ]
+    + formatteropts,
+    _(b'hg bookmarks [OPTIONS]... [NAME]...'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def bookmark(ui, repo, *names, **opts):
     '''create a new bookmark or list existing bookmarks
 
@@ -1027,66 +1225,80 @@
           hg book -ql .
     '''
     opts = pycompat.byteskwargs(opts)
-    force = opts.get('force')
-    rev = opts.get('rev')
-    inactive = opts.get('inactive')  # meaning add/rename to inactive bookmark
-
-    selactions = [k for k in ['delete', 'rename', 'list'] if opts.get(k)]
+    force = opts.get(b'force')
+    rev = opts.get(b'rev')
+    inactive = opts.get(b'inactive')  # meaning add/rename to inactive bookmark
+
+    selactions = [k for k in [b'delete', b'rename', b'list'] if opts.get(k)]
     if len(selactions) > 1:
-        raise error.Abort(_('--%s and --%s are incompatible')
-                          % tuple(selactions[:2]))
+        raise error.Abort(
+            _(b'--%s and --%s are incompatible') % tuple(selactions[:2])
+        )
     if selactions:
         action = selactions[0]
     elif names or rev:
-        action = 'add'
+        action = b'add'
     elif inactive:
-        action = 'inactive'  # meaning deactivate
+        action = b'inactive'  # meaning deactivate
     else:
-        action = 'list'
-
-    if rev and action in {'delete', 'rename', 'list'}:
-        raise error.Abort(_("--rev is incompatible with --%s") % action)
-    if inactive and action in {'delete', 'list'}:
-        raise error.Abort(_("--inactive is incompatible with --%s") % action)
-    if not names and action in {'add', 'delete'}:
-        raise error.Abort(_("bookmark name required"))
-
-    if action in {'add', 'delete', 'rename', 'inactive'}:
-        with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
-            if action == 'delete':
+        action = b'list'
+
+    if rev and action in {b'delete', b'rename', b'list'}:
+        raise error.Abort(_(b"--rev is incompatible with --%s") % action)
+    if inactive and action in {b'delete', b'list'}:
+        raise error.Abort(_(b"--inactive is incompatible with --%s") % action)
+    if not names and action in {b'add', b'delete'}:
+        raise error.Abort(_(b"bookmark name required"))
+
+    if action in {b'add', b'delete', b'rename', b'inactive'}:
+        with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
+            if action == b'delete':
                 names = pycompat.maplist(repo._bookmarks.expandname, names)
                 bookmarks.delete(repo, tr, names)
-            elif action == 'rename':
+            elif action == b'rename':
                 if not names:
-                    raise error.Abort(_("new bookmark name required"))
+                    raise error.Abort(_(b"new bookmark name required"))
                 elif len(names) > 1:
-                    raise error.Abort(_("only one new bookmark name allowed"))
-                oldname = repo._bookmarks.expandname(opts['rename'])
+                    raise error.Abort(_(b"only one new bookmark name allowed"))
+                oldname = repo._bookmarks.expandname(opts[b'rename'])
                 bookmarks.rename(repo, tr, oldname, names[0], force, inactive)
-            elif action == 'add':
+            elif action == b'add':
                 bookmarks.addbookmarks(repo, tr, names, rev, force, inactive)
-            elif action == 'inactive':
+            elif action == b'inactive':
                 if len(repo._bookmarks) == 0:
-                    ui.status(_("no bookmarks set\n"))
+                    ui.status(_(b"no bookmarks set\n"))
                 elif not repo._activebookmark:
-                    ui.status(_("no active bookmark\n"))
+                    ui.status(_(b"no active bookmark\n"))
                 else:
                     bookmarks.deactivate(repo)
-    elif action == 'list':
+    elif action == b'list':
         names = pycompat.maplist(repo._bookmarks.expandname, names)
-        with ui.formatter('bookmarks', opts) as fm:
+        with ui.formatter(b'bookmarks', opts) as fm:
             bookmarks.printbookmarks(ui, repo, fm, names)
     else:
-        raise error.ProgrammingError('invalid action: %s' % action)
-
-@command('branch',
-    [('f', 'force', None,
-     _('set branch name even if it shadows an existing branch')),
-     ('C', 'clean', None, _('reset branch name to parent branch name')),
-     ('r', 'rev', [], _('change branches of the given revs (EXPERIMENTAL)')),
+        raise error.ProgrammingError(b'invalid action: %s' % action)
+
+
+@command(
+    b'branch',
+    [
+        (
+            b'f',
+            b'force',
+            None,
+            _(b'set branch name even if it shadows an existing branch'),
+        ),
+        (b'C', b'clean', None, _(b'reset branch name to parent branch name')),
+        (
+            b'r',
+            b'rev',
+            [],
+            _(b'change branches of the given revs (EXPERIMENTAL)'),
+        ),
     ],
-    _('[-fC] [NAME]'),
-    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+    _(b'[-fC] [NAME]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def branch(ui, repo, label=None, **opts):
     """set or show the current branch name
 
@@ -1117,53 +1329,67 @@
     Returns 0 on success.
     """
     opts = pycompat.byteskwargs(opts)
-    revs = opts.get('rev')
+    revs = opts.get(b'rev')
     if label:
         label = label.strip()
 
-    if not opts.get('clean') and not label:
+    if not opts.get(b'clean') and not label:
         if revs:
-            raise error.Abort(_("no branch name specified for the revisions"))
-        ui.write("%s\n" % repo.dirstate.branch())
+            raise error.Abort(_(b"no branch name specified for the revisions"))
+        ui.write(b"%s\n" % repo.dirstate.branch())
         return
 
     with repo.wlock():
-        if opts.get('clean'):
-            label = repo['.'].branch()
+        if opts.get(b'clean'):
+            label = repo[b'.'].branch()
             repo.dirstate.setbranch(label)
-            ui.status(_('reset working directory to branch %s\n') % label)
+            ui.status(_(b'reset working directory to branch %s\n') % label)
         elif label:
 
-            scmutil.checknewlabel(repo, label, 'branch')
+            scmutil.checknewlabel(repo, label, b'branch')
             if revs:
                 return cmdutil.changebranch(ui, repo, revs, label)
 
-            if not opts.get('force') and label in repo.branchmap():
+            if not opts.get(b'force') and label in repo.branchmap():
                 if label not in [p.branch() for p in repo[None].parents()]:
-                    raise error.Abort(_('a branch of the same name already'
-                                       ' exists'),
-                                     # i18n: "it" refers to an existing branch
-                                     hint=_("use 'hg update' to switch to it"))
+                    raise error.Abort(
+                        _(b'a branch of the same name already exists'),
+                        # i18n: "it" refers to an existing branch
+                        hint=_(b"use 'hg update' to switch to it"),
+                    )
 
             repo.dirstate.setbranch(label)
-            ui.status(_('marked working directory as branch %s\n') % label)
+            ui.status(_(b'marked working directory as branch %s\n') % label)
 
             # find any open named branches aside from default
             for n, h, t, c in repo.branchmap().iterbranches():
-                if n != "default" and not c:
+                if n != b"default" and not c:
                     return 0
-            ui.status(_('(branches are permanent and global, '
-                        'did you want a bookmark?)\n'))
-
-@command('branches',
-    [('a', 'active', False,
-      _('show only branches that have unmerged heads (DEPRECATED)')),
-     ('c', 'closed', False, _('show normal and closed branches')),
-     ('r', 'rev', [], _('show branch name(s) of the given rev'))
-    ] + formatteropts,
-    _('[-c]'),
+            ui.status(
+                _(
+                    b'(branches are permanent and global, '
+                    b'did you want a bookmark?)\n'
+                )
+            )
+
+
+@command(
+    b'branches',
+    [
+        (
+            b'a',
+            b'active',
+            False,
+            _(b'show only branches that have unmerged heads (DEPRECATED)'),
+        ),
+        (b'c', b'closed', False, _(b'show normal and closed branches')),
+        (b'r', b'rev', [], _(b'show branch name(s) of the given rev')),
+    ]
+    + formatteropts,
+    _(b'[-c]'),
     helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
-    intents={INTENT_READONLY})
+    intents={INTENT_READONLY},
+)
 def branches(ui, repo, active=False, closed=False, **opts):
     """list repository named branches
 
@@ -1189,15 +1415,15 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    revs = opts.get('rev')
+    revs = opts.get(b'rev')
     selectedbranches = None
     if revs:
         revs = scmutil.revrange(repo, revs)
         getbi = repo.revbranchcache().branchinfo
         selectedbranches = {getbi(r)[0] for r in revs}
 
-    ui.pager('branches')
-    fm = ui.formatter('branches', opts)
+    ui.pager(b'branches')
+    fm = ui.formatter(b'branches', opts)
     hexfunc = fm.hexfunc
 
     allheads = set(repo.heads())
@@ -1210,55 +1436,90 @@
             openheads = set(repo.branchmap().iteropen(heads))
             isactive = bool(openheads & allheads)
         branches.append((tag, repo[tip], isactive, not isclosed))
-    branches.sort(key=lambda i: (i[2], i[1].rev(), i[0], i[3]),
-                  reverse=True)
+    branches.sort(key=lambda i: (i[2], i[1].rev(), i[0], i[3]), reverse=True)
 
     for tag, ctx, isactive, isopen in branches:
         if active and not isactive:
             continue
         if isactive:
-            label = 'branches.active'
-            notice = ''
+            label = b'branches.active'
+            notice = b''
         elif not isopen:
             if not closed:
                 continue
-            label = 'branches.closed'
-            notice = _(' (closed)')
+            label = b'branches.closed'
+            notice = _(b' (closed)')
         else:
-            label = 'branches.inactive'
-            notice = _(' (inactive)')
-        current = (tag == repo.dirstate.branch())
+            label = b'branches.inactive'
+            notice = _(b' (inactive)')
+        current = tag == repo.dirstate.branch()
         if current:
-            label = 'branches.current'
+            label = b'branches.current'
 
         fm.startitem()
-        fm.write('branch', '%s', tag, label=label)
+        fm.write(b'branch', b'%s', tag, label=label)
         rev = ctx.rev()
-        padsize = max(31 - len("%d" % rev) - encoding.colwidth(tag), 0)
-        fmt = ' ' * padsize + ' %d:%s'
-        fm.condwrite(not ui.quiet, 'rev node', fmt, rev, hexfunc(ctx.node()),
-                     label='log.changeset changeset.%s' % ctx.phasestr())
+        padsize = max(31 - len(b"%d" % rev) - encoding.colwidth(tag), 0)
+        fmt = b' ' * padsize + b' %d:%s'
+        fm.condwrite(
+            not ui.quiet,
+            b'rev node',
+            fmt,
+            rev,
+            hexfunc(ctx.node()),
+            label=b'log.changeset changeset.%s' % ctx.phasestr(),
+        )
         fm.context(ctx=ctx)
         fm.data(active=isactive, closed=not isopen, current=current)
         if not ui.quiet:
             fm.plain(notice)
-        fm.plain('\n')
+        fm.plain(b'\n')
     fm.end()
 
-@command('bundle',
-    [('f', 'force', None, _('run even when the destination is unrelated')),
-    ('r', 'rev', [], _('a changeset intended to be added to the destination'),
-     _('REV')),
-    ('b', 'branch', [], _('a specific branch you would like to bundle'),
-     _('BRANCH')),
-    ('', 'base', [],
-     _('a base changeset assumed to be available at the destination'),
-     _('REV')),
-    ('a', 'all', None, _('bundle all changesets in the repository')),
-    ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE')),
-    ] + remoteopts,
-    _('[-f] [-t BUNDLESPEC] [-a] [-r REV]... [--base REV]... FILE [DEST]'),
-    helpcategory=command.CATEGORY_IMPORT_EXPORT)
+
+@command(
+    b'bundle',
+    [
+        (
+            b'f',
+            b'force',
+            None,
+            _(b'run even when the destination is unrelated'),
+        ),
+        (
+            b'r',
+            b'rev',
+            [],
+            _(b'a changeset intended to be added to the destination'),
+            _(b'REV'),
+        ),
+        (
+            b'b',
+            b'branch',
+            [],
+            _(b'a specific branch you would like to bundle'),
+            _(b'BRANCH'),
+        ),
+        (
+            b'',
+            b'base',
+            [],
+            _(b'a base changeset assumed to be available at the destination'),
+            _(b'REV'),
+        ),
+        (b'a', b'all', None, _(b'bundle all changesets in the repository')),
+        (
+            b't',
+            b'type',
+            b'bzip2',
+            _(b'bundle compression type to use'),
+            _(b'TYPE'),
+        ),
+    ]
+    + remoteopts,
+    _(b'[-f] [-t BUNDLESPEC] [-a] [-r REV]... [--base REV]... FILE [DEST]'),
+    helpcategory=command.CATEGORY_IMPORT_EXPORT,
+)
 def bundle(ui, repo, fname, dest=None, **opts):
     """create a bundle file
 
@@ -1289,105 +1550,135 @@
     """
     opts = pycompat.byteskwargs(opts)
     revs = None
-    if 'rev' in opts:
-        revstrings = opts['rev']
+    if b'rev' in opts:
+        revstrings = opts[b'rev']
         revs = scmutil.revrange(repo, revstrings)
         if revstrings and not revs:
-            raise error.Abort(_('no commits to bundle'))
-
-    bundletype = opts.get('type', 'bzip2').lower()
+            raise error.Abort(_(b'no commits to bundle'))
+
+    bundletype = opts.get(b'type', b'bzip2').lower()
     try:
         bundlespec = exchange.parsebundlespec(repo, bundletype, strict=False)
     except error.UnsupportedBundleSpecification as e:
-        raise error.Abort(pycompat.bytestr(e),
-                          hint=_("see 'hg help bundlespec' for supported "
-                                 "values for --type"))
-    cgversion = bundlespec.contentopts["cg.version"]
+        raise error.Abort(
+            pycompat.bytestr(e),
+            hint=_(b"see 'hg help bundlespec' for supported values for --type"),
+        )
+    cgversion = bundlespec.contentopts[b"cg.version"]
 
     # Packed bundles are a pseudo bundle format for now.
-    if cgversion == 's1':
-        raise error.Abort(_('packed bundles cannot be produced by "hg bundle"'),
-                          hint=_("use 'hg debugcreatestreamclonebundle'"))
-
-    if opts.get('all'):
+    if cgversion == b's1':
+        raise error.Abort(
+            _(b'packed bundles cannot be produced by "hg bundle"'),
+            hint=_(b"use 'hg debugcreatestreamclonebundle'"),
+        )
+
+    if opts.get(b'all'):
         if dest:
-            raise error.Abort(_("--all is incompatible with specifying "
-                                "a destination"))
-        if opts.get('base'):
-            ui.warn(_("ignoring --base because --all was specified\n"))
+            raise error.Abort(
+                _(b"--all is incompatible with specifying a destination")
+            )
+        if opts.get(b'base'):
+            ui.warn(_(b"ignoring --base because --all was specified\n"))
         base = [nullrev]
     else:
-        base = scmutil.revrange(repo, opts.get('base'))
+        base = scmutil.revrange(repo, opts.get(b'base'))
     if cgversion not in changegroup.supportedoutgoingversions(repo):
-        raise error.Abort(_("repository does not support bundle version %s") %
-                          cgversion)
+        raise error.Abort(
+            _(b"repository does not support bundle version %s") % cgversion
+        )
 
     if base:
         if dest:
-            raise error.Abort(_("--base is incompatible with specifying "
-                               "a destination"))
+            raise error.Abort(
+                _(b"--base is incompatible with specifying a destination")
+            )
         common = [repo[rev].node() for rev in base]
         heads = [repo[r].node() for r in revs] if revs else None
         outgoing = discovery.outgoing(repo, common, heads)
     else:
-        dest = ui.expandpath(dest or 'default-push', dest or 'default')
-        dest, branches = hg.parseurl(dest, opts.get('branch'))
+        dest = ui.expandpath(dest or b'default-push', dest or b'default')
+        dest, branches = hg.parseurl(dest, opts.get(b'branch'))
         other = hg.peer(repo, opts, dest)
         revs = [repo[r].hex() for r in revs]
         revs, checkout = hg.addbranchrevs(repo, repo, branches, revs)
         heads = revs and pycompat.maplist(repo.lookup, revs) or revs
-        outgoing = discovery.findcommonoutgoing(repo, other,
-                                                onlyheads=heads,
-                                                force=opts.get('force'),
-                                                portable=True)
+        outgoing = discovery.findcommonoutgoing(
+            repo,
+            other,
+            onlyheads=heads,
+            force=opts.get(b'force'),
+            portable=True,
+        )
 
     if not outgoing.missing:
         scmutil.nochangesfound(ui, repo, not base and outgoing.excluded)
         return 1
 
-    if cgversion == '01': #bundle1
-        bversion = 'HG10' + bundlespec.wirecompression
+    if cgversion == b'01':  # bundle1
+        bversion = b'HG10' + bundlespec.wirecompression
         bcompression = None
-    elif cgversion in ('02', '03'):
-        bversion = 'HG20'
+    elif cgversion in (b'02', b'03'):
+        bversion = b'HG20'
         bcompression = bundlespec.wirecompression
     else:
         raise error.ProgrammingError(
-            'bundle: unexpected changegroup version %s' % cgversion)
+            b'bundle: unexpected changegroup version %s' % cgversion
+        )
 
     # TODO compression options should be derived from bundlespec parsing.
     # This is a temporary hack to allow adjusting bundle compression
     # level without a) formalizing the bundlespec changes to declare it
     # b) introducing a command flag.
     compopts = {}
-    complevel = ui.configint('experimental',
-                             'bundlecomplevel.' + bundlespec.compression)
+    complevel = ui.configint(
+        b'experimental', b'bundlecomplevel.' + bundlespec.compression
+    )
     if complevel is None:
-        complevel = ui.configint('experimental', 'bundlecomplevel')
+        complevel = ui.configint(b'experimental', b'bundlecomplevel')
     if complevel is not None:
-        compopts['level'] = complevel
+        compopts[b'level'] = complevel
 
     # Allow overriding the bundling of obsmarker in phases through
     # configuration while we don't have a bundle version that include them
-    if repo.ui.configbool('experimental', 'evolution.bundle-obsmarker'):
-        bundlespec.contentopts['obsolescence'] = True
-    if repo.ui.configbool('experimental', 'bundle-phases'):
-        bundlespec.contentopts['phases'] = True
-
-    bundle2.writenewbundle(ui, repo, 'bundle', fname, bversion, outgoing,
-                           bundlespec.contentopts, compression=bcompression,
-                           compopts=compopts)
-
-@command('cat',
-    [('o', 'output', '',
-     _('print output to file with formatted name'), _('FORMAT')),
-    ('r', 'rev', '', _('print the given revision'), _('REV')),
-    ('', 'decode', None, _('apply any matching decode filter')),
-    ] + walkopts + formatteropts,
-    _('[OPTION]... FILE...'),
+    if repo.ui.configbool(b'experimental', b'evolution.bundle-obsmarker'):
+        bundlespec.contentopts[b'obsolescence'] = True
+    if repo.ui.configbool(b'experimental', b'bundle-phases'):
+        bundlespec.contentopts[b'phases'] = True
+
+    bundle2.writenewbundle(
+        ui,
+        repo,
+        b'bundle',
+        fname,
+        bversion,
+        outgoing,
+        bundlespec.contentopts,
+        compression=bcompression,
+        compopts=compopts,
+    )
+
+
+@command(
+    b'cat',
+    [
+        (
+            b'o',
+            b'output',
+            b'',
+            _(b'print output to file with formatted name'),
+            _(b'FORMAT'),
+        ),
+        (b'r', b'rev', b'', _(b'print the given revision'), _(b'REV')),
+        (b'', b'decode', None, _(b'apply any matching decode filter')),
+    ]
+    + walkopts
+    + formatteropts,
+    _(b'[OPTION]... FILE...'),
     helpcategory=command.CATEGORY_FILE_CONTENTS,
     inferrepo=True,
-    intents={INTENT_READONLY})
+    intents={INTENT_READONLY},
+)
 def cat(ui, repo, file1, *pats, **opts):
     """output the current or given revision of files
 
@@ -1423,42 +1714,75 @@
     Returns 0 on success.
     """
     opts = pycompat.byteskwargs(opts)
-    rev = opts.get('rev')
+    rev = opts.get(b'rev')
     if rev:
-        repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+        repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
     ctx = scmutil.revsingle(repo, rev)
     m = scmutil.match(ctx, (file1,) + pats, opts)
-    fntemplate = opts.pop('output', '')
+    fntemplate = opts.pop(b'output', b'')
     if cmdutil.isstdiofilename(fntemplate):
-        fntemplate = ''
+        fntemplate = b''
 
     if fntemplate:
-        fm = formatter.nullformatter(ui, 'cat', opts)
+        fm = formatter.nullformatter(ui, b'cat', opts)
     else:
-        ui.pager('cat')
-        fm = ui.formatter('cat', opts)
+        ui.pager(b'cat')
+        fm = ui.formatter(b'cat', opts)
     with fm:
-        return cmdutil.cat(ui, repo, ctx, m, fm, fntemplate, '',
-                           **pycompat.strkwargs(opts))
-
-@command('clone',
-    [('U', 'noupdate', None, _('the clone will include an empty working '
-                               'directory (only a repository)')),
-    ('u', 'updaterev', '', _('revision, tag, or branch to check out'),
-        _('REV')),
-    ('r', 'rev', [], _('do not clone everything, but include this changeset'
-                       ' and its ancestors'), _('REV')),
-    ('b', 'branch', [], _('do not clone everything, but include this branch\'s'
-                          ' changesets and their ancestors'), _('BRANCH')),
-    ('', 'pull', None, _('use pull protocol to copy metadata')),
-    ('', 'uncompressed', None,
-       _('an alias to --stream (DEPRECATED)')),
-    ('', 'stream', None,
-       _('clone with minimal data processing')),
-    ] + remoteopts,
-    _('[OPTION]... SOURCE [DEST]'),
+        return cmdutil.cat(
+            ui, repo, ctx, m, fm, fntemplate, b'', **pycompat.strkwargs(opts)
+        )
+
+
+@command(
+    b'clone',
+    [
+        (
+            b'U',
+            b'noupdate',
+            None,
+            _(
+                b'the clone will include an empty working '
+                b'directory (only a repository)'
+            ),
+        ),
+        (
+            b'u',
+            b'updaterev',
+            b'',
+            _(b'revision, tag, or branch to check out'),
+            _(b'REV'),
+        ),
+        (
+            b'r',
+            b'rev',
+            [],
+            _(
+                b'do not clone everything, but include this changeset'
+                b' and its ancestors'
+            ),
+            _(b'REV'),
+        ),
+        (
+            b'b',
+            b'branch',
+            [],
+            _(
+                b'do not clone everything, but include this branch\'s'
+                b' changesets and their ancestors'
+            ),
+            _(b'BRANCH'),
+        ),
+        (b'', b'pull', None, _(b'use pull protocol to copy metadata')),
+        (b'', b'uncompressed', None, _(b'an alias to --stream (DEPRECATED)')),
+        (b'', b'stream', None, _(b'clone with minimal data processing')),
+    ]
+    + remoteopts,
+    _(b'[OPTION]... SOURCE [DEST]'),
     helpcategory=command.CATEGORY_REPO_CREATION,
-    helpbasic=True, norepo=True)
+    helpbasic=True,
+    norepo=True,
+)
 def clone(ui, source, dest=None, **opts):
     """make a copy of an existing repository
 
@@ -1568,51 +1892,72 @@
     Returns 0 on success.
     """
     opts = pycompat.byteskwargs(opts)
-    if opts.get('noupdate') and opts.get('updaterev'):
-        raise error.Abort(_("cannot specify both --noupdate and --updaterev"))
+    if opts.get(b'noupdate') and opts.get(b'updaterev'):
+        raise error.Abort(_(b"cannot specify both --noupdate and --updaterev"))
 
     # --include/--exclude can come from narrow or sparse.
     includepats, excludepats = None, None
 
     # hg.clone() differentiates between None and an empty set. So make sure
     # patterns are sets if narrow is requested without patterns.
-    if opts.get('narrow'):
+    if opts.get(b'narrow'):
         includepats = set()
         excludepats = set()
 
-        if opts.get('include'):
-            includepats = narrowspec.parsepatterns(opts.get('include'))
-        if opts.get('exclude'):
-            excludepats = narrowspec.parsepatterns(opts.get('exclude'))
-
-    r = hg.clone(ui, opts, source, dest,
-                 pull=opts.get('pull'),
-                 stream=opts.get('stream') or opts.get('uncompressed'),
-                 revs=opts.get('rev'),
-                 update=opts.get('updaterev') or not opts.get('noupdate'),
-                 branch=opts.get('branch'),
-                 shareopts=opts.get('shareopts'),
-                 storeincludepats=includepats,
-                 storeexcludepats=excludepats,
-                 depth=opts.get('depth') or None)
+        if opts.get(b'include'):
+            includepats = narrowspec.parsepatterns(opts.get(b'include'))
+        if opts.get(b'exclude'):
+            excludepats = narrowspec.parsepatterns(opts.get(b'exclude'))
+
+    r = hg.clone(
+        ui,
+        opts,
+        source,
+        dest,
+        pull=opts.get(b'pull'),
+        stream=opts.get(b'stream') or opts.get(b'uncompressed'),
+        revs=opts.get(b'rev'),
+        update=opts.get(b'updaterev') or not opts.get(b'noupdate'),
+        branch=opts.get(b'branch'),
+        shareopts=opts.get(b'shareopts'),
+        storeincludepats=includepats,
+        storeexcludepats=excludepats,
+        depth=opts.get(b'depth') or None,
+    )
 
     return r is None
 
-@command('commit|ci',
-    [('A', 'addremove', None,
-     _('mark new/missing files as added/removed before committing')),
-    ('', 'close-branch', None,
-     _('mark a branch head as closed')),
-    ('', 'amend', None, _('amend the parent of the working directory')),
-    ('s', 'secret', None, _('use the secret phase for committing')),
-    ('e', 'edit', None, _('invoke editor on commit messages')),
-    ('', 'force-close-branch', None,
-     _('forcibly close branch from a non-head changeset (ADVANCED)')),
-    ('i', 'interactive', None, _('use interactive mode')),
-    ] + walkopts + commitopts + commitopts2 + subrepoopts,
-    _('[OPTION]... [FILE]...'),
-    helpcategory=command.CATEGORY_COMMITTING, helpbasic=True,
-    inferrepo=True)
+
+@command(
+    b'commit|ci',
+    [
+        (
+            b'A',
+            b'addremove',
+            None,
+            _(b'mark new/missing files as added/removed before committing'),
+        ),
+        (b'', b'close-branch', None, _(b'mark a branch head as closed')),
+        (b'', b'amend', None, _(b'amend the parent of the working directory')),
+        (b's', b'secret', None, _(b'use the secret phase for committing')),
+        (b'e', b'edit', None, _(b'invoke editor on commit messages')),
+        (
+            b'',
+            b'force-close-branch',
+            None,
+            _(b'forcibly close branch from a non-head changeset (ADVANCED)'),
+        ),
+        (b'i', b'interactive', None, _(b'use interactive mode')),
+    ]
+    + walkopts
+    + commitopts
+    + commitopts2
+    + subrepoopts,
+    _(b'[OPTION]... [FILE]...'),
+    helpcategory=command.CATEGORY_COMMITTING,
+    helpbasic=True,
+    inferrepo=True,
+)
 def commit(ui, repo, *pats, **opts):
     """commit the specified files or all outstanding changes
 
@@ -1672,22 +2017,23 @@
     with repo.wlock(), repo.lock():
         return _docommit(ui, repo, *pats, **opts)
 
+
 def _docommit(ui, repo, *pats, **opts):
     if opts.get(r'interactive'):
         opts.pop(r'interactive')
-        ret = cmdutil.dorecord(ui, repo, commit, None, False,
-                               cmdutil.recordfilter, *pats,
-                               **opts)
+        ret = cmdutil.dorecord(
+            ui, repo, commit, None, False, cmdutil.recordfilter, *pats, **opts
+        )
         # ret can be 0 (no changes to record) or the value returned by
         # commit(), 1 if nothing changed or None on success.
         return 1 if ret == 0 else ret
 
     opts = pycompat.byteskwargs(opts)
-    if opts.get('subrepos'):
-        if opts.get('amend'):
-            raise error.Abort(_('cannot amend with --subrepos'))
+    if opts.get(b'subrepos'):
+        if opts.get(b'amend'):
+            raise error.Abort(_(b'cannot amend with --subrepos'))
         # Let --subrepos on the command line override config setting.
-        ui.setconfig('ui', 'commitsubrepos', True, 'commit')
+        ui.setconfig(b'ui', b'commitsubrepos', True, b'commit')
 
     cmdutil.checkunfinished(repo, commit=True)
 
@@ -1695,30 +2041,38 @@
     bheads = repo.branchheads(branch)
 
     extra = {}
-    if opts.get('close_branch') or opts.get('force_close_branch'):
-        extra['close'] = '1'
-
-        if repo['.'].closesbranch():
-            raise error.Abort(_('current revision is already a branch closing'
-                                ' head'))
+    if opts.get(b'close_branch') or opts.get(b'force_close_branch'):
+        extra[b'close'] = b'1'
+
+        if repo[b'.'].closesbranch():
+            raise error.Abort(
+                _(b'current revision is already a branch closing head')
+            )
         elif not bheads:
-            raise error.Abort(_('branch "%s" has no heads to close') % branch)
-        elif (branch == repo['.'].branch() and repo['.'].node() not in bheads
-              and not opts.get('force_close_branch')):
-            hint = _('use --force-close-branch to close branch from a non-head'
-                     ' changeset')
-            raise error.Abort(_('can only close branch heads'), hint=hint)
-        elif opts.get('amend'):
-            if (repo['.'].p1().branch() != branch and
-                repo['.'].p2().branch() != branch):
-                raise error.Abort(_('can only close branch heads'))
-
-    if opts.get('amend'):
-        if ui.configbool('ui', 'commitsubrepos'):
-            raise error.Abort(_('cannot amend with ui.commitsubrepos enabled'))
-
-        old = repo['.']
-        rewriteutil.precheck(repo, [old.rev()], 'amend')
+            raise error.Abort(_(b'branch "%s" has no heads to close') % branch)
+        elif (
+            branch == repo[b'.'].branch()
+            and repo[b'.'].node() not in bheads
+            and not opts.get(b'force_close_branch')
+        ):
+            hint = _(
+                b'use --force-close-branch to close branch from a non-head'
+                b' changeset'
+            )
+            raise error.Abort(_(b'can only close branch heads'), hint=hint)
+        elif opts.get(b'amend'):
+            if (
+                repo[b'.'].p1().branch() != branch
+                and repo[b'.'].p2().branch() != branch
+            ):
+                raise error.Abort(_(b'can only close branch heads'))
+
+    if opts.get(b'amend'):
+        if ui.configbool(b'ui', b'commitsubrepos'):
+            raise error.Abort(_(b'cannot amend with ui.commitsubrepos enabled'))
+
+        old = repo[b'.']
+        rewriteutil.precheck(repo, [old.rev()], b'amend')
 
         # Currently histedit gets confused if an amend happens while histedit
         # is in progress. Since we have a checkunfinished command, we are
@@ -1731,54 +2085,78 @@
 
         node = cmdutil.amend(ui, repo, old, extra, pats, opts)
         if node == old.node():
-            ui.status(_("nothing changed\n"))
+            ui.status(_(b"nothing changed\n"))
             return 1
     else:
+
         def commitfunc(ui, repo, message, match, opts):
             overrides = {}
-            if opts.get('secret'):
-                overrides[('phases', 'new-commit')] = 'secret'
+            if opts.get(b'secret'):
+                overrides[(b'phases', b'new-commit')] = b'secret'
 
             baseui = repo.baseui
-            with baseui.configoverride(overrides, 'commit'):
-                with ui.configoverride(overrides, 'commit'):
-                    editform = cmdutil.mergeeditform(repo[None],
-                                                     'commit.normal')
+            with baseui.configoverride(overrides, b'commit'):
+                with ui.configoverride(overrides, b'commit'):
+                    editform = cmdutil.mergeeditform(
+                        repo[None], b'commit.normal'
+                    )
                     editor = cmdutil.getcommiteditor(
-                        editform=editform, **pycompat.strkwargs(opts))
-                    return repo.commit(message,
-                                       opts.get('user'),
-                                       opts.get('date'),
-                                       match,
-                                       editor=editor,
-                                       extra=extra)
+                        editform=editform, **pycompat.strkwargs(opts)
+                    )
+                    return repo.commit(
+                        message,
+                        opts.get(b'user'),
+                        opts.get(b'date'),
+                        match,
+                        editor=editor,
+                        extra=extra,
+                    )
 
         node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
 
         if not node:
             stat = cmdutil.postcommitstatus(repo, pats, opts)
             if stat[3]:
-                ui.status(_("nothing changed (%d missing files, see "
-                            "'hg status')\n") % len(stat[3]))
+                ui.status(
+                    _(
+                        b"nothing changed (%d missing files, see "
+                        b"'hg status')\n"
+                    )
+                    % len(stat[3])
+                )
             else:
-                ui.status(_("nothing changed\n"))
+                ui.status(_(b"nothing changed\n"))
             return 1
 
     cmdutil.commitstatus(repo, node, branch, bheads, opts)
 
-    if not ui.quiet and ui.configbool('commands', 'commit.post-status'):
-        status(ui, repo, modified=True, added=True, removed=True, deleted=True,
-               unknown=True, subrepos=opts.get('subrepos'))
-
-@command('config|showconfig|debugconfig',
-    [('u', 'untrusted', None, _('show untrusted configuration options')),
-     ('e', 'edit', None, _('edit user config')),
-     ('l', 'local', None, _('edit repository config')),
-     ('g', 'global', None, _('edit global config'))] + formatteropts,
-    _('[-u] [NAME]...'),
+    if not ui.quiet and ui.configbool(b'commands', b'commit.post-status'):
+        status(
+            ui,
+            repo,
+            modified=True,
+            added=True,
+            removed=True,
+            deleted=True,
+            unknown=True,
+            subrepos=opts.get(b'subrepos'),
+        )
+
+
+@command(
+    b'config|showconfig|debugconfig',
+    [
+        (b'u', b'untrusted', None, _(b'show untrusted configuration options')),
+        (b'e', b'edit', None, _(b'edit user config')),
+        (b'l', b'local', None, _(b'edit repository config')),
+        (b'g', b'global', None, _(b'edit global config')),
+    ]
+    + formatteropts,
+    _(b'[-u] [NAME]...'),
     helpcategory=command.CATEGORY_HELP,
     optionalrepo=True,
-    intents={INTENT_READONLY})
+    intents={INTENT_READONLY},
+)
 def config(ui, repo, *values, **opts):
     """show combined config settings from all hgrc files
 
@@ -1814,15 +2192,15 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    if opts.get('edit') or opts.get('local') or opts.get('global'):
-        if opts.get('local') and opts.get('global'):
-            raise error.Abort(_("can't use --local and --global together"))
-
-        if opts.get('local'):
+    if opts.get(b'edit') or opts.get(b'local') or opts.get(b'global'):
+        if opts.get(b'local') and opts.get(b'global'):
+            raise error.Abort(_(b"can't use --local and --global together"))
+
+        if opts.get(b'local'):
             if not repo:
-                raise error.Abort(_("can't use --local outside a repository"))
-            paths = [repo.vfs.join('hgrc')]
-        elif opts.get('global'):
+                raise error.Abort(_(b"can't use --local outside a repository"))
+            paths = [repo.vfs.join(b'hgrc')]
+        elif opts.get(b'global'):
             paths = rcutil.systemrcpath()
         else:
             paths = rcutil.userrcpath()
@@ -1831,40 +2209,43 @@
             if os.path.exists(f):
                 break
         else:
-            if opts.get('global'):
-                samplehgrc = uimod.samplehgrcs['global']
-            elif opts.get('local'):
-                samplehgrc = uimod.samplehgrcs['local']
+            if opts.get(b'global'):
+                samplehgrc = uimod.samplehgrcs[b'global']
+            elif opts.get(b'local'):
+                samplehgrc = uimod.samplehgrcs[b'local']
             else:
-                samplehgrc = uimod.samplehgrcs['user']
+                samplehgrc = uimod.samplehgrcs[b'user']
 
             f = paths[0]
-            fp = open(f, "wb")
+            fp = open(f, b"wb")
             fp.write(util.tonativeeol(samplehgrc))
             fp.close()
 
         editor = ui.geteditor()
-        ui.system("%s \"%s\"" % (editor, f),
-                  onerr=error.Abort, errprefix=_("edit failed"),
-                  blockedtag='config_edit')
+        ui.system(
+            b"%s \"%s\"" % (editor, f),
+            onerr=error.Abort,
+            errprefix=_(b"edit failed"),
+            blockedtag=b'config_edit',
+        )
         return
-    ui.pager('config')
-    fm = ui.formatter('config', opts)
+    ui.pager(b'config')
+    fm = ui.formatter(b'config', opts)
     for t, f in rcutil.rccomponents():
-        if t == 'path':
-            ui.debug('read config from: %s\n' % f)
-        elif t == 'items':
+        if t == b'path':
+            ui.debug(b'read config from: %s\n' % f)
+        elif t == b'items':
             for section, name, value, source in f:
-                ui.debug('set config by: %s\n' % source)
+                ui.debug(b'set config by: %s\n' % source)
         else:
-            raise error.ProgrammingError('unknown rctype: %s' % t)
-    untrusted = bool(opts.get('untrusted'))
+            raise error.ProgrammingError(b'unknown rctype: %s' % t)
+    untrusted = bool(opts.get(b'untrusted'))
 
     selsections = selentries = []
     if values:
-        selsections = [v for v in values if '.' not in v]
-        selentries = [v for v in values if '.' in v]
-    uniquesel = (len(selentries) == 1 and not selsections)
+        selsections = [v for v in values if b'.' not in v]
+        selentries = [v for v in values if b'.' in v]
+    uniquesel = len(selentries) == 1 and not selsections
     selsections = set(selsections)
     selentries = set(selentries)
 
@@ -1872,28 +2253,34 @@
     for section, name, value in ui.walkconfig(untrusted=untrusted):
         source = ui.configsource(section, name, untrusted)
         value = pycompat.bytestr(value)
+        defaultvalue = ui.configdefault(section, name)
         if fm.isplain():
-            source = source or 'none'
-            value = value.replace('\n', '\\n')
-        entryname = section + '.' + name
+            source = source or b'none'
+            value = value.replace(b'\n', b'\\n')
+        entryname = section + b'.' + name
         if values and not (section in selsections or entryname in selentries):
             continue
         fm.startitem()
-        fm.condwrite(ui.debugflag, 'source', '%s: ', source)
+        fm.condwrite(ui.debugflag, b'source', b'%s: ', source)
         if uniquesel:
             fm.data(name=entryname)
-            fm.write('value', '%s\n', value)
+            fm.write(b'value', b'%s\n', value)
         else:
-            fm.write('name value', '%s=%s\n', entryname, value)
+            fm.write(b'name value', b'%s=%s\n', entryname, value)
+        fm.data(defaultvalue=defaultvalue)
         matched = True
     fm.end()
     if matched:
         return 0
     return 1
 
-@command('continue',
-    dryrunopts, helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
-    helpbasic=True)
+
+@command(
+    b'continue',
+    dryrunopts,
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
+    helpbasic=True,
+)
 def continuecmd(ui, repo, **opts):
     """resumes an interrupted operation (EXPERIMENTAL)
 
@@ -1905,22 +2292,37 @@
     dryrun = opts.get(r'dry_run')
     contstate = cmdutil.getunfinishedstate(repo)
     if not contstate:
-        raise error.Abort(_('no operation in progress'))
+        raise error.Abort(_(b'no operation in progress'))
     if not contstate.continuefunc:
-        raise error.Abort((_("%s in progress but does not support "
-                             "'hg continue'") % (contstate._opname)),
-                             hint=contstate.continuemsg())
+        raise error.Abort(
+            (
+                _(b"%s in progress but does not support 'hg continue'")
+                % (contstate._opname)
+            ),
+            hint=contstate.continuemsg(),
+        )
     if dryrun:
-        ui.status(_('%s in progress, will be resumed\n') % (contstate._opname))
+        ui.status(_(b'%s in progress, will be resumed\n') % (contstate._opname))
         return
     return contstate.continuefunc(ui, repo)
 
-@command('copy|cp',
-    [('A', 'after', None, _('record a copy that has already occurred')),
-    ('f', 'force', None, _('forcibly copy over an existing managed file')),
-    ] + walkopts + dryrunopts,
-    _('[OPTION]... SOURCE... DEST'),
-    helpcategory=command.CATEGORY_FILE_CONTENTS)
+
+@command(
+    b'copy|cp',
+    [
+        (b'A', b'after', None, _(b'record a copy that has already occurred')),
+        (
+            b'f',
+            b'force',
+            None,
+            _(b'forcibly copy over an existing managed file'),
+        ),
+    ]
+    + walkopts
+    + dryrunopts,
+    _(b'[OPTION]... SOURCE... DEST'),
+    helpcategory=command.CATEGORY_FILE_CONTENTS,
+)
 def copy(ui, repo, *pats, **opts):
     """mark files as copied for the next commit
 
@@ -1941,23 +2343,30 @@
     with repo.wlock(False):
         return cmdutil.copy(ui, repo, pats, opts)
 
+
 @command(
-    'debugcommands', [], _('[COMMAND]'),
+    b'debugcommands',
+    [],
+    _(b'[COMMAND]'),
     helpcategory=command.CATEGORY_HELP,
-    norepo=True)
-def debugcommands(ui, cmd='', *args):
+    norepo=True,
+)
+def debugcommands(ui, cmd=b'', *args):
     """list all available commands and options"""
-    for cmd, vals in sorted(table.iteritems()):
-        cmd = cmd.split('|')[0]
-        opts = ', '.join([i[1] for i in vals[1]])
-        ui.write('%s: %s\n' % (cmd, opts))
-
-@command('debugcomplete',
-    [('o', 'options', None, _('show the command options'))],
-    _('[-o] CMD'),
+    for cmd, vals in sorted(pycompat.iteritems(table)):
+        cmd = cmd.split(b'|')[0]
+        opts = b', '.join([i[1] for i in vals[1]])
+        ui.write(b'%s: %s\n' % (cmd, opts))
+
+
+@command(
+    b'debugcomplete',
+    [(b'o', b'options', None, _(b'show the command options'))],
+    _(b'[-o] CMD'),
     helpcategory=command.CATEGORY_HELP,
-    norepo=True)
-def debugcomplete(ui, cmd='', **opts):
+    norepo=True,
+)
+def debugcomplete(ui, cmd=b'', **opts):
     """returns the completion list associated with the given command"""
 
     if opts.get(r'options'):
@@ -1968,26 +2377,36 @@
             otables.append(entry[1])
         for t in otables:
             for o in t:
-                if "(DEPRECATED)" in o[3]:
+                if b"(DEPRECATED)" in o[3]:
                     continue
                 if o[0]:
-                    options.append('-%s' % o[0])
-                options.append('--%s' % o[1])
-        ui.write("%s\n" % "\n".join(options))
+                    options.append(b'-%s' % o[0])
+                options.append(b'--%s' % o[1])
+        ui.write(b"%s\n" % b"\n".join(options))
         return
 
     cmdlist, unused_allcmds = cmdutil.findpossible(cmd, table)
     if ui.verbose:
-        cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
-    ui.write("%s\n" % "\n".join(sorted(cmdlist)))
-
-@command('diff',
-    [('r', 'rev', [], _('revision'), _('REV')),
-    ('c', 'change', '', _('change made by revision'), _('REV'))
-    ] + diffopts + diffopts2 + walkopts + subrepoopts,
-    _('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'),
+        cmdlist = [b' '.join(c[0]) for c in cmdlist.values()]
+    ui.write(b"%s\n" % b"\n".join(sorted(cmdlist)))
+
+
+@command(
+    b'diff',
+    [
+        (b'r', b'rev', [], _(b'revision'), _(b'REV')),
+        (b'c', b'change', b'', _(b'change made by revision'), _(b'REV')),
+    ]
+    + diffopts
+    + diffopts2
+    + walkopts
+    + subrepoopts,
+    _(b'[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'),
     helpcategory=command.CATEGORY_FILE_CONTENTS,
-    helpbasic=True, inferrepo=True, intents={INTENT_READONLY})
+    helpbasic=True,
+    inferrepo=True,
+    intents={INTENT_READONLY},
+)
 def diff(ui, repo, *pats, **opts):
     """diff repository (or selected files)
 
@@ -2047,20 +2466,20 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    revs = opts.get('rev')
-    change = opts.get('change')
-    stat = opts.get('stat')
-    reverse = opts.get('reverse')
+    revs = opts.get(b'rev')
+    change = opts.get(b'change')
+    stat = opts.get(b'stat')
+    reverse = opts.get(b'reverse')
 
     if revs and change:
-        msg = _('cannot specify --rev and --change at the same time')
+        msg = _(b'cannot specify --rev and --change at the same time')
         raise error.Abort(msg)
     elif change:
-        repo = scmutil.unhidehashlikerevs(repo, [change], 'nowarn')
+        repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn')
         ctx2 = scmutil.revsingle(repo, change, None)
         ctx1 = ctx2.p1()
     else:
-        repo = scmutil.unhidehashlikerevs(repo, revs, 'nowarn')
+        repo = scmutil.unhidehashlikerevs(repo, revs, b'nowarn')
         ctx1, ctx2 = scmutil.revpair(repo, revs)
     node1, node2 = ctx1.node(), ctx2.node()
 
@@ -2070,22 +2489,47 @@
     diffopts = patch.diffallopts(ui, opts)
     m = scmutil.match(ctx2, pats, opts)
     m = repo.narrowmatch(m)
-    ui.pager('diff')
-    logcmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
-                              listsubrepos=opts.get('subrepos'),
-                              root=opts.get('root'))
-
-@command('export',
-    [('B', 'bookmark', '',
-     _('export changes only reachable by given bookmark'), _('BOOKMARK')),
-    ('o', 'output', '',
-     _('print output to file with formatted name'), _('FORMAT')),
-    ('', 'switch-parent', None, _('diff against the second parent')),
-    ('r', 'rev', [], _('revisions to export'), _('REV')),
-    ] + diffopts + formatteropts,
-    _('[OPTION]... [-o OUTFILESPEC] [-r] [REV]...'),
+    ui.pager(b'diff')
+    logcmdutil.diffordiffstat(
+        ui,
+        repo,
+        diffopts,
+        node1,
+        node2,
+        m,
+        stat=stat,
+        listsubrepos=opts.get(b'subrepos'),
+        root=opts.get(b'root'),
+    )
+
+
+@command(
+    b'export',
+    [
+        (
+            b'B',
+            b'bookmark',
+            b'',
+            _(b'export changes only reachable by given bookmark'),
+            _(b'BOOKMARK'),
+        ),
+        (
+            b'o',
+            b'output',
+            b'',
+            _(b'print output to file with formatted name'),
+            _(b'FORMAT'),
+        ),
+        (b'', b'switch-parent', None, _(b'diff against the second parent')),
+        (b'r', b'rev', [], _(b'revisions to export'), _(b'REV')),
+    ]
+    + diffopts
+    + formatteropts,
+    _(b'[OPTION]... [-o OUTFILESPEC] [-r] [REV]...'),
     helpcategory=command.CATEGORY_IMPORT_EXPORT,
-    helpbasic=True, intents={INTENT_READONLY})
+    helpbasic=True,
+    intents={INTENT_READONLY},
+)
 def export(ui, repo, *changesets, **opts):
     """dump the header and diffs for one or more changesets
 
@@ -2161,52 +2605,75 @@
     Returns 0 on success.
     """
     opts = pycompat.byteskwargs(opts)
-    bookmark = opts.get('bookmark')
-    changesets += tuple(opts.get('rev', []))
+    bookmark = opts.get(b'bookmark')
+    changesets += tuple(opts.get(b'rev', []))
 
     if bookmark and changesets:
-        raise error.Abort(_("-r and -B are mutually exclusive"))
+        raise error.Abort(_(b"-r and -B are mutually exclusive"))
 
     if bookmark:
         if bookmark not in repo._bookmarks:
-            raise error.Abort(_("bookmark '%s' not found") % bookmark)
+            raise error.Abort(_(b"bookmark '%s' not found") % bookmark)
 
         revs = scmutil.bookmarkrevs(repo, bookmark)
     else:
         if not changesets:
-            changesets = ['.']
-
-        repo = scmutil.unhidehashlikerevs(repo, changesets, 'nowarn')
+            changesets = [b'.']
+
+        repo = scmutil.unhidehashlikerevs(repo, changesets, b'nowarn')
         revs = scmutil.revrange(repo, changesets)
 
     if not revs:
-        raise error.Abort(_("export requires at least one changeset"))
+        raise error.Abort(_(b"export requires at least one changeset"))
     if len(revs) > 1:
-        ui.note(_('exporting patches:\n'))
+        ui.note(_(b'exporting patches:\n'))
     else:
-        ui.note(_('exporting patch:\n'))
-
-    fntemplate = opts.get('output')
+        ui.note(_(b'exporting patch:\n'))
+
+    fntemplate = opts.get(b'output')
     if cmdutil.isstdiofilename(fntemplate):
-        fntemplate = ''
+        fntemplate = b''
 
     if fntemplate:
-        fm = formatter.nullformatter(ui, 'export', opts)
+        fm = formatter.nullformatter(ui, b'export', opts)
     else:
-        ui.pager('export')
-        fm = ui.formatter('export', opts)
+        ui.pager(b'export')
+        fm = ui.formatter(b'export', opts)
     with fm:
-        cmdutil.export(repo, revs, fm, fntemplate=fntemplate,
-                       switch_parent=opts.get('switch_parent'),
-                       opts=patch.diffallopts(ui, opts))
-
-@command('files',
-    [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')),
-     ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
-    ] + walkopts + formatteropts + subrepoopts,
-    _('[OPTION]... [FILE]...'),
+        cmdutil.export(
+            repo,
+            revs,
+            fm,
+            fntemplate=fntemplate,
+            switch_parent=opts.get(b'switch_parent'),
+            opts=patch.diffallopts(ui, opts),
+        )
+
+
+@command(
+    b'files',
+    [
+        (
+            b'r',
+            b'rev',
+            b'',
+            _(b'search the repository as it is in REV'),
+            _(b'REV'),
+        ),
+        (
+            b'0',
+            b'print0',
+            None,
+            _(b'end filenames with NUL, for use with xargs'),
+        ),
+    ]
+    + walkopts
+    + formatteropts
+    + subrepoopts,
+    _(b'[OPTION]... [FILE]...'),
     helpcategory=command.CATEGORY_WORKING_DIRECTORY,
-    intents={INTENT_READONLY})
+    intents={INTENT_READONLY},
+)
 def files(ui, repo, *pats, **opts):
     """list tracked files
 
@@ -2262,30 +2729,35 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    rev = opts.get('rev')
+    rev = opts.get(b'rev')
     if rev:
-        repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+        repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
     ctx = scmutil.revsingle(repo, rev, None)
 
-    end = '\n'
-    if opts.get('print0'):
-        end = '\0'
-    fmt = '%s' + end
+    end = b'\n'
+    if opts.get(b'print0'):
+        end = b'\0'
+    fmt = b'%s' + end
 
     m = scmutil.match(ctx, pats, opts)
-    ui.pager('files')
+    ui.pager(b'files')
     uipathfn = scmutil.getuipathfn(ctx.repo(), legacyrelativevalue=True)
-    with ui.formatter('files', opts) as fm:
-        return cmdutil.files(ui, ctx, m, uipathfn, fm, fmt,
-                             opts.get('subrepos'))
+    with ui.formatter(b'files', opts) as fm:
+        return cmdutil.files(
+            ui, ctx, m, uipathfn, fm, fmt, opts.get(b'subrepos')
+        )
+
 
 @command(
-    'forget',
-    [('i', 'interactive', None, _('use interactive mode')),
-    ] + walkopts + dryrunopts,
-    _('[OPTION]... FILE...'),
+    b'forget',
+    [(b'i', b'interactive', None, _(b'use interactive mode')),]
+    + walkopts
+    + dryrunopts,
+    _(b'[OPTION]... FILE...'),
     helpcategory=command.CATEGORY_WORKING_DIRECTORY,
-    helpbasic=True, inferrepo=True)
+    helpbasic=True,
+    inferrepo=True,
+)
 def forget(ui, repo, *pats, **opts):
     """forget the specified files on the next commit
 
@@ -2317,36 +2789,66 @@
 
     opts = pycompat.byteskwargs(opts)
     if not pats:
-        raise error.Abort(_('no files specified'))
+        raise error.Abort(_(b'no files specified'))
 
     m = scmutil.match(repo[None], pats, opts)
-    dryrun, interactive = opts.get('dry_run'), opts.get('interactive')
+    dryrun, interactive = opts.get(b'dry_run'), opts.get(b'interactive')
     uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
-    rejected = cmdutil.forget(ui, repo, m, prefix="", uipathfn=uipathfn,
-                              explicitonly=False, dryrun=dryrun,
-                              interactive=interactive)[0]
+    rejected = cmdutil.forget(
+        ui,
+        repo,
+        m,
+        prefix=b"",
+        uipathfn=uipathfn,
+        explicitonly=False,
+        dryrun=dryrun,
+        interactive=interactive,
+    )[0]
     return rejected and 1 or 0
 
+
 @command(
-    'graft',
-    [('r', 'rev', [], _('revisions to graft'), _('REV')),
-     ('', 'base', '',
-      _('base revision when doing the graft merge (ADVANCED)'), _('REV')),
-     ('c', 'continue', False, _('resume interrupted graft')),
-     ('', 'stop', False, _('stop interrupted graft')),
-     ('', 'abort', False, _('abort interrupted graft')),
-     ('e', 'edit', False, _('invoke editor on commit messages')),
-     ('', 'log', None, _('append graft info to log message')),
-     ('', 'no-commit', None,
-      _("don't commit, just apply the changes in working directory")),
-     ('f', 'force', False, _('force graft')),
-     ('D', 'currentdate', False,
-      _('record the current date as commit date')),
-     ('U', 'currentuser', False,
-      _('record the current user as committer'))]
-    + commitopts2 + mergetoolopts  + dryrunopts,
-    _('[OPTION]... [-r REV]... REV...'),
-    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
+    b'graft',
+    [
+        (b'r', b'rev', [], _(b'revisions to graft'), _(b'REV')),
+        (
+            b'',
+            b'base',
+            b'',
+            _(b'base revision when doing the graft merge (ADVANCED)'),
+            _(b'REV'),
+        ),
+        (b'c', b'continue', False, _(b'resume interrupted graft')),
+        (b'', b'stop', False, _(b'stop interrupted graft')),
+        (b'', b'abort', False, _(b'abort interrupted graft')),
+        (b'e', b'edit', False, _(b'invoke editor on commit messages')),
+        (b'', b'log', None, _(b'append graft info to log message')),
+        (
+            b'',
+            b'no-commit',
+            None,
+            _(b"don't commit, just apply the changes in working directory"),
+        ),
+        (b'f', b'force', False, _(b'force graft')),
+        (
+            b'D',
+            b'currentdate',
+            False,
+            _(b'record the current date as commit date'),
+        ),
+        (
+            b'U',
+            b'currentuser',
+            False,
+            _(b'record the current user as committer'),
+        ),
+    ]
+    + commitopts2
+    + mergetoolopts
+    + dryrunopts,
+    _(b'[OPTION]... [-r REV]... REV...'),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
+)
 def graft(ui, repo, *revs, **opts):
     '''copy changes from other branches onto the current branch
 
@@ -2445,95 +2947,125 @@
     with repo.wlock():
         return _dograft(ui, repo, *revs, **opts)
 
+
 def _dograft(ui, repo, *revs, **opts):
     opts = pycompat.byteskwargs(opts)
-    if revs and opts.get('rev'):
-        ui.warn(_('warning: inconsistent use of --rev might give unexpected '
-                  'revision ordering!\n'))
+    if revs and opts.get(b'rev'):
+        ui.warn(
+            _(
+                b'warning: inconsistent use of --rev might give unexpected '
+                b'revision ordering!\n'
+            )
+        )
 
     revs = list(revs)
-    revs.extend(opts.get('rev'))
+    revs.extend(opts.get(b'rev'))
     basectx = None
-    if opts.get('base'):
-        basectx = scmutil.revsingle(repo, opts['base'], None)
+    if opts.get(b'base'):
+        basectx = scmutil.revsingle(repo, opts[b'base'], None)
     # a dict of data to be stored in state file
     statedata = {}
     # list of new nodes created by ongoing graft
-    statedata['newnodes'] = []
-
-    if opts.get('user') and opts.get('currentuser'):
-        raise error.Abort(_('--user and --currentuser are mutually exclusive'))
-    if opts.get('date') and opts.get('currentdate'):
-        raise error.Abort(_('--date and --currentdate are mutually exclusive'))
-    if not opts.get('user') and opts.get('currentuser'):
-        opts['user'] = ui.username()
-    if not opts.get('date') and opts.get('currentdate'):
-        opts['date'] = "%d %d" % dateutil.makedate()
-
-    editor = cmdutil.getcommiteditor(editform='graft',
-                                     **pycompat.strkwargs(opts))
+    statedata[b'newnodes'] = []
+
+    if opts.get(b'user') and opts.get(b'currentuser'):
+        raise error.Abort(_(b'--user and --currentuser are mutually exclusive'))
+    if opts.get(b'date') and opts.get(b'currentdate'):
+        raise error.Abort(_(b'--date and --currentdate are mutually exclusive'))
+    if not opts.get(b'user') and opts.get(b'currentuser'):
+        opts[b'user'] = ui.username()
+    if not opts.get(b'date') and opts.get(b'currentdate'):
+        opts[b'date'] = b"%d %d" % dateutil.makedate()
+
+    editor = cmdutil.getcommiteditor(
+        editform=b'graft', **pycompat.strkwargs(opts)
+    )
 
     cont = False
-    if opts.get('no_commit'):
-        if opts.get('edit'):
-            raise error.Abort(_("cannot specify --no-commit and "
-                                "--edit together"))
-        if opts.get('currentuser'):
-            raise error.Abort(_("cannot specify --no-commit and "
-                                "--currentuser together"))
-        if opts.get('currentdate'):
-            raise error.Abort(_("cannot specify --no-commit and "
-                                "--currentdate together"))
-        if opts.get('log'):
-            raise error.Abort(_("cannot specify --no-commit and "
-                                "--log together"))
-
-    graftstate = statemod.cmdstate(repo, 'graftstate')
-
-    if opts.get('stop'):
-        if opts.get('continue'):
-            raise error.Abort(_("cannot use '--continue' and "
-                                "'--stop' together"))
-        if opts.get('abort'):
-            raise error.Abort(_("cannot use '--abort' and '--stop' together"))
-
-        if any((opts.get('edit'), opts.get('log'), opts.get('user'),
-                opts.get('date'), opts.get('currentdate'),
-                opts.get('currentuser'), opts.get('rev'))):
-            raise error.Abort(_("cannot specify any other flag with '--stop'"))
+    if opts.get(b'no_commit'):
+        if opts.get(b'edit'):
+            raise error.Abort(
+                _(b"cannot specify --no-commit and --edit together")
+            )
+        if opts.get(b'currentuser'):
+            raise error.Abort(
+                _(b"cannot specify --no-commit and --currentuser together")
+            )
+        if opts.get(b'currentdate'):
+            raise error.Abort(
+                _(b"cannot specify --no-commit and --currentdate together")
+            )
+        if opts.get(b'log'):
+            raise error.Abort(
+                _(b"cannot specify --no-commit and --log together")
+            )
+
+    graftstate = statemod.cmdstate(repo, b'graftstate')
+
+    if opts.get(b'stop'):
+        if opts.get(b'continue'):
+            raise error.Abort(
+                _(b"cannot use '--continue' and '--stop' together")
+            )
+        if opts.get(b'abort'):
+            raise error.Abort(_(b"cannot use '--abort' and '--stop' together"))
+
+        if any(
+            (
+                opts.get(b'edit'),
+                opts.get(b'log'),
+                opts.get(b'user'),
+                opts.get(b'date'),
+                opts.get(b'currentdate'),
+                opts.get(b'currentuser'),
+                opts.get(b'rev'),
+            )
+        ):
+            raise error.Abort(_(b"cannot specify any other flag with '--stop'"))
         return _stopgraft(ui, repo, graftstate)
-    elif opts.get('abort'):
-        if opts.get('continue'):
-            raise error.Abort(_("cannot use '--continue' and "
-                                "'--abort' together"))
-        if any((opts.get('edit'), opts.get('log'), opts.get('user'),
-                opts.get('date'), opts.get('currentdate'),
-                opts.get('currentuser'), opts.get('rev'))):
-            raise error.Abort(_("cannot specify any other flag with '--abort'"))
+    elif opts.get(b'abort'):
+        if opts.get(b'continue'):
+            raise error.Abort(
+                _(b"cannot use '--continue' and '--abort' together")
+            )
+        if any(
+            (
+                opts.get(b'edit'),
+                opts.get(b'log'),
+                opts.get(b'user'),
+                opts.get(b'date'),
+                opts.get(b'currentdate'),
+                opts.get(b'currentuser'),
+                opts.get(b'rev'),
+            )
+        ):
+            raise error.Abort(
+                _(b"cannot specify any other flag with '--abort'")
+            )
 
         return cmdutil.abortgraft(ui, repo, graftstate)
-    elif opts.get('continue'):
+    elif opts.get(b'continue'):
         cont = True
         if revs:
-            raise error.Abort(_("can't specify --continue and revisions"))
+            raise error.Abort(_(b"can't specify --continue and revisions"))
         # read in unfinished revisions
         if graftstate.exists():
             statedata = cmdutil.readgraftstate(repo, graftstate)
-            if statedata.get('date'):
-                opts['date'] = statedata['date']
-            if statedata.get('user'):
-                opts['user'] = statedata['user']
-            if statedata.get('log'):
-                opts['log'] = True
-            if statedata.get('no_commit'):
-                opts['no_commit'] = statedata.get('no_commit')
-            nodes = statedata['nodes']
+            if statedata.get(b'date'):
+                opts[b'date'] = statedata[b'date']
+            if statedata.get(b'user'):
+                opts[b'user'] = statedata[b'user']
+            if statedata.get(b'log'):
+                opts[b'log'] = True
+            if statedata.get(b'no_commit'):
+                opts[b'no_commit'] = statedata.get(b'no_commit')
+            nodes = statedata[b'nodes']
             revs = [repo[node].rev() for node in nodes]
         else:
-            cmdutil.wrongtooltocontinue(repo, _('graft'))
+            cmdutil.wrongtooltocontinue(repo, _(b'graft'))
     else:
         if not revs:
-            raise error.Abort(_('no revisions specified'))
+            raise error.Abort(_(b'no revisions specified'))
         cmdutil.checkunfinished(repo)
         cmdutil.bailifchanged(repo)
         revs = scmutil.revrange(repo, revs)
@@ -2541,14 +3073,14 @@
     skipped = set()
     if basectx is None:
         # check for merges
-        for rev in repo.revs('%ld and merge()', revs):
-            ui.warn(_('skipping ungraftable merge revision %d\n') % rev)
+        for rev in repo.revs(b'%ld and merge()', revs):
+            ui.warn(_(b'skipping ungraftable merge revision %d\n') % rev)
             skipped.add(rev)
     revs = [r for r in revs if r not in skipped]
     if not revs:
         return -1
     if basectx is not None and len(revs) != 1:
-        raise error.Abort(_('only one revision allowed with --base '))
+        raise error.Abort(_(b'only one revision allowed with --base '))
 
     # Don't check in the --continue case, in effect retaining --force across
     # --continues. That's because without --force, any revisions we decided to
@@ -2556,16 +3088,17 @@
     # way to the graftstate. With --force, any revisions we would have otherwise
     # skipped would not have been filtered out, and if they hadn't been applied
     # already, they'd have been in the graftstate.
-    if not (cont or opts.get('force')) and basectx is None:
+    if not (cont or opts.get(b'force')) and basectx is None:
         # check for ancestors of dest branch
-        crev = repo['.'].rev()
+        crev = repo[b'.'].rev()
         ancestors = repo.changelog.ancestors([crev], inclusive=True)
         # XXX make this lazy in the future
         # don't mutate while iterating, create a copy
         for rev in list(revs):
             if rev in ancestors:
-                ui.warn(_('skipping ancestor revision %d:%s\n') %
-                        (rev, repo[rev]))
+                ui.warn(
+                    _(b'skipping ancestor revision %d:%s\n') % (rev, repo[rev])
+                )
                 # XXX remove on list is slow
                 revs.remove(rev)
         if not revs:
@@ -2573,180 +3106,245 @@
 
         # analyze revs for earlier grafts
         ids = {}
-        for ctx in repo.set("%ld", revs):
+        for ctx in repo.set(b"%ld", revs):
             ids[ctx.hex()] = ctx.rev()
-            n = ctx.extra().get('source')
+            n = ctx.extra().get(b'source')
             if n:
                 ids[n] = ctx.rev()
 
         # check ancestors for earlier grafts
-        ui.debug('scanning for duplicate grafts\n')
+        ui.debug(b'scanning for duplicate grafts\n')
 
         # The only changesets we can be sure doesn't contain grafts of any
         # revs, are the ones that are common ancestors of *all* revs:
-        for rev in repo.revs('only(%d,ancestor(%ld))', crev, revs):
+        for rev in repo.revs(b'only(%d,ancestor(%ld))', crev, revs):
             ctx = repo[rev]
-            n = ctx.extra().get('source')
+            n = ctx.extra().get(b'source')
             if n in ids:
                 try:
                     r = repo[n].rev()
                 except error.RepoLookupError:
                     r = None
                 if r in revs:
-                    ui.warn(_('skipping revision %d:%s '
-                              '(already grafted to %d:%s)\n')
-                            % (r, repo[r], rev, ctx))
+                    ui.warn(
+                        _(
+                            b'skipping revision %d:%s '
+                            b'(already grafted to %d:%s)\n'
+                        )
+                        % (r, repo[r], rev, ctx)
+                    )
                     revs.remove(r)
                 elif ids[n] in revs:
                     if r is None:
-                        ui.warn(_('skipping already grafted revision %d:%s '
-                                  '(%d:%s also has unknown origin %s)\n')
-                                % (ids[n], repo[ids[n]], rev, ctx, n[:12]))
+                        ui.warn(
+                            _(
+                                b'skipping already grafted revision %d:%s '
+                                b'(%d:%s also has unknown origin %s)\n'
+                            )
+                            % (ids[n], repo[ids[n]], rev, ctx, n[:12])
+                        )
                     else:
-                        ui.warn(_('skipping already grafted revision %d:%s '
-                                  '(%d:%s also has origin %d:%s)\n')
-                                % (ids[n], repo[ids[n]], rev, ctx, r, n[:12]))
+                        ui.warn(
+                            _(
+                                b'skipping already grafted revision %d:%s '
+                                b'(%d:%s also has origin %d:%s)\n'
+                            )
+                            % (ids[n], repo[ids[n]], rev, ctx, r, n[:12])
+                        )
                     revs.remove(ids[n])
             elif ctx.hex() in ids:
                 r = ids[ctx.hex()]
                 if r in revs:
-                    ui.warn(_('skipping already grafted revision %d:%s '
-                              '(was grafted from %d:%s)\n') %
-                            (r, repo[r], rev, ctx))
+                    ui.warn(
+                        _(
+                            b'skipping already grafted revision %d:%s '
+                            b'(was grafted from %d:%s)\n'
+                        )
+                        % (r, repo[r], rev, ctx)
+                    )
                     revs.remove(r)
         if not revs:
             return -1
 
-    if opts.get('no_commit'):
-        statedata['no_commit'] = True
-    for pos, ctx in enumerate(repo.set("%ld", revs)):
-        desc = '%d:%s "%s"' % (ctx.rev(), ctx,
-                               ctx.description().split('\n', 1)[0])
+    if opts.get(b'no_commit'):
+        statedata[b'no_commit'] = True
+    for pos, ctx in enumerate(repo.set(b"%ld", revs)):
+        desc = b'%d:%s "%s"' % (
+            ctx.rev(),
+            ctx,
+            ctx.description().split(b'\n', 1)[0],
+        )
         names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node())
         if names:
-            desc += ' (%s)' % ' '.join(names)
-        ui.status(_('grafting %s\n') % desc)
-        if opts.get('dry_run'):
+            desc += b' (%s)' % b' '.join(names)
+        ui.status(_(b'grafting %s\n') % desc)
+        if opts.get(b'dry_run'):
             continue
 
-        source = ctx.extra().get('source')
+        source = ctx.extra().get(b'source')
         extra = {}
         if source:
-            extra['source'] = source
-            extra['intermediate-source'] = ctx.hex()
+            extra[b'source'] = source
+            extra[b'intermediate-source'] = ctx.hex()
         else:
-            extra['source'] = ctx.hex()
+            extra[b'source'] = ctx.hex()
         user = ctx.user()
-        if opts.get('user'):
-            user = opts['user']
-            statedata['user'] = user
+        if opts.get(b'user'):
+            user = opts[b'user']
+            statedata[b'user'] = user
         date = ctx.date()
-        if opts.get('date'):
-            date = opts['date']
-            statedata['date'] = date
+        if opts.get(b'date'):
+            date = opts[b'date']
+            statedata[b'date'] = date
         message = ctx.description()
-        if opts.get('log'):
-            message += '\n(grafted from %s)' % ctx.hex()
-            statedata['log'] = True
+        if opts.get(b'log'):
+            message += b'\n(grafted from %s)' % ctx.hex()
+            statedata[b'log'] = True
 
         # we don't merge the first commit when continuing
         if not cont:
             # perform the graft merge with p1(rev) as 'ancestor'
-            overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
+            overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
             base = ctx.p1() if basectx is None else basectx
-            with ui.configoverride(overrides, 'graft'):
-                stats = mergemod.graft(repo, ctx, base, ['local', 'graft'])
+            with ui.configoverride(overrides, b'graft'):
+                stats = mergemod.graft(repo, ctx, base, [b'local', b'graft'])
             # report any conflicts
             if stats.unresolvedcount > 0:
                 # write out state for --continue
                 nodes = [repo[rev].hex() for rev in revs[pos:]]
-                statedata['nodes'] = nodes
+                statedata[b'nodes'] = nodes
                 stateversion = 1
                 graftstate.save(stateversion, statedata)
-                hint = _("use 'hg resolve' and 'hg graft --continue'")
+                hint = _(b"use 'hg resolve' and 'hg graft --continue'")
                 raise error.Abort(
-                    _("unresolved conflicts, can't continue"),
-                    hint=hint)
+                    _(b"unresolved conflicts, can't continue"), hint=hint
+                )
         else:
             cont = False
 
         # commit if --no-commit is false
-        if not opts.get('no_commit'):
-            node = repo.commit(text=message, user=user, date=date, extra=extra,
-                               editor=editor)
+        if not opts.get(b'no_commit'):
+            node = repo.commit(
+                text=message, user=user, date=date, extra=extra, editor=editor
+            )
             if node is None:
                 ui.warn(
-                    _('note: graft of %d:%s created no changes to commit\n') %
-                    (ctx.rev(), ctx))
+                    _(b'note: graft of %d:%s created no changes to commit\n')
+                    % (ctx.rev(), ctx)
+                )
             # checking that newnodes exist because old state files won't have it
-            elif statedata.get('newnodes') is not None:
-                statedata['newnodes'].append(node)
+            elif statedata.get(b'newnodes') is not None:
+                statedata[b'newnodes'].append(node)
 
     # remove state when we complete successfully
-    if not opts.get('dry_run'):
+    if not opts.get(b'dry_run'):
         graftstate.delete()
 
     return 0
 
+
 def _stopgraft(ui, repo, graftstate):
     """stop the interrupted graft"""
     if not graftstate.exists():
-        raise error.Abort(_("no interrupted graft found"))
-    pctx = repo['.']
+        raise error.Abort(_(b"no interrupted graft found"))
+    pctx = repo[b'.']
     hg.updaterepo(repo, pctx.node(), overwrite=True)
     graftstate.delete()
-    ui.status(_("stopped the interrupted graft\n"))
-    ui.status(_("working directory is now at %s\n") % pctx.hex()[:12])
+    ui.status(_(b"stopped the interrupted graft\n"))
+    ui.status(_(b"working directory is now at %s\n") % pctx.hex()[:12])
     return 0
 
+
 statemod.addunfinished(
-    'graft', fname='graftstate', clearable=True, stopflag=True,
-    continueflag=True, abortfunc=cmdutil.hgabortgraft,
-    cmdhint=_("use 'hg graft --continue' or 'hg graft --stop' to stop")
+    b'graft',
+    fname=b'graftstate',
+    clearable=True,
+    stopflag=True,
+    continueflag=True,
+    abortfunc=cmdutil.hgabortgraft,
+    cmdhint=_(b"use 'hg graft --continue' or 'hg graft --stop' to stop"),
 )
 
-@command('grep',
-    [('0', 'print0', None, _('end fields with NUL')),
-    ('', 'all', None, _('print all revisions that match (DEPRECATED) ')),
-    ('', 'diff', None, _('print all revisions when the term was introduced '
-                         'or removed')),
-    ('a', 'text', None, _('treat all files as text')),
-    ('f', 'follow', None,
-     _('follow changeset history,'
-       ' or file history across copies and renames')),
-    ('i', 'ignore-case', None, _('ignore case when matching')),
-    ('l', 'files-with-matches', None,
-     _('print only filenames and revisions that match')),
-    ('n', 'line-number', None, _('print matching line numbers')),
-    ('r', 'rev', [],
-     _('only search files changed within revision range'), _('REV')),
-    ('', 'all-files', None,
-     _('include all files in the changeset while grepping (EXPERIMENTAL)')),
-    ('u', 'user', None, _('list the author (long with -v)')),
-    ('d', 'date', None, _('list the date (short with -q)')),
-    ] + formatteropts + walkopts,
-    _('[OPTION]... PATTERN [FILE]...'),
+
+@command(
+    b'grep',
+    [
+        (b'0', b'print0', None, _(b'end fields with NUL')),
+        (b'', b'all', None, _(b'print all revisions that match (DEPRECATED) ')),
+        (
+            b'',
+            b'diff',
+            None,
+            _(
+                b'search revision differences for when the pattern was added '
+                b'or removed'
+            ),
+        ),
+        (b'a', b'text', None, _(b'treat all files as text')),
+        (
+            b'f',
+            b'follow',
+            None,
+            _(
+                b'follow changeset history,'
+                b' or file history across copies and renames'
+            ),
+        ),
+        (b'i', b'ignore-case', None, _(b'ignore case when matching')),
+        (
+            b'l',
+            b'files-with-matches',
+            None,
+            _(b'print only filenames and revisions that match'),
+        ),
+        (b'n', b'line-number', None, _(b'print matching line numbers')),
+        (
+            b'r',
+            b'rev',
+            [],
+            _(b'search files changed within revision range'),
+            _(b'REV'),
+        ),
+        (
+            b'',
+            b'all-files',
+            None,
+            _(
+                b'include all files in the changeset while grepping (DEPRECATED)'
+            ),
+        ),
+        (b'u', b'user', None, _(b'list the author (long with -v)')),
+        (b'd', b'date', None, _(b'list the date (short with -q)')),
+    ]
+    + formatteropts
+    + walkopts,
+    _(b'[--diff] [OPTION]... PATTERN [FILE]...'),
     helpcategory=command.CATEGORY_FILE_CONTENTS,
     inferrepo=True,
-    intents={INTENT_READONLY})
+    intents={INTENT_READONLY},
+)
 def grep(ui, repo, pattern, *pats, **opts):
-    """search revision history for a pattern in specified files
-
-    Search revision history for a regular expression in the specified
-    files or the entire project.
-
-    By default, grep prints the most recent revision number for each
-    file in which it finds a match. To get it to print every revision
-    that contains a change in match status ("-" for a match that becomes
-    a non-match, or "+" for a non-match that becomes a match), use the
-    --diff flag.
+    """search for a pattern in specified files
+
+    Search the working directory or revision history for a regular
+    expression in the specified files for the entire repository.
+
+    By default, grep searches the repository files in the working
+    directory and prints the files where it finds a match. To specify
+    historical revisions instead of the working directory, use the
+    --rev flag.
+
+    To search instead historical revision differences that contains a
+    change in match status ("-" for a match that becomes a non-match,
+    or "+" for a non-match that becomes a match), use the --diff flag.
 
     PATTERN can be any Python (roughly Perl-compatible) regular
     expression.
 
-    If no FILEs are specified (and -f/--follow isn't set), all files in
-    the repository are searched, including those that don't exist in the
-    current branch or have been deleted in a prior changeset.
+    If no FILEs are specified and the --rev flag isn't supplied, all
+    files in the working directory are searched. When using the --rev
+    flag and specifying FILEs, use the --follow argument to also
+    follow the specified FILEs across renames and copies.
 
     .. container:: verbose
 
@@ -2769,31 +3367,32 @@
       See :hg:`help templates.operators` for the list expansion syntax.
 
     Returns 0 if a match is found, 1 otherwise.
+
     """
     opts = pycompat.byteskwargs(opts)
-    diff = opts.get('all') or opts.get('diff')
-    all_files = opts.get('all_files')
-    if diff and opts.get('all_files'):
-        raise error.Abort(_('--diff and --all-files are mutually exclusive'))
-    # TODO: remove "not opts.get('rev')" if --all-files -rMULTIREV gets working
-    if opts.get('all_files') is None and not opts.get('rev') and not diff:
-        # experimental config: commands.grep.all-files
-        opts['all_files'] = ui.configbool('commands', 'grep.all-files')
-    plaingrep = opts.get('all_files') and not opts.get('rev')
+    diff = opts.get(b'all') or opts.get(b'diff')
+    if diff and opts.get(b'all_files'):
+        raise error.Abort(_(b'--diff and --all-files are mutually exclusive'))
+    if opts.get(b'all_files') is None and not diff:
+        opts[b'all_files'] = True
+    plaingrep = opts.get(b'all_files') and not opts.get(b'rev')
+    all_files = opts.get(b'all_files')
     if plaingrep:
-        opts['rev'] = ['wdir()']
+        opts[b'rev'] = [b'wdir()']
 
     reflags = re.M
-    if opts.get('ignore_case'):
+    if opts.get(b'ignore_case'):
         reflags |= re.I
     try:
         regexp = util.re.compile(pattern, reflags)
     except re.error as inst:
-        ui.warn(_("grep: invalid match pattern: %s\n") % pycompat.bytestr(inst))
+        ui.warn(
+            _(b"grep: invalid match pattern: %s\n") % pycompat.bytestr(inst)
+        )
         return 1
-    sep, eol = ':', '\n'
-    if opts.get('print0'):
-        sep = eol = '\0'
+    sep, eol = b':', b'\n'
+    if opts.get(b'print0'):
+        sep = eol = b'\0'
 
     getfile = util.lrucachefunc(repo.file)
 
@@ -2805,9 +3404,9 @@
             if not match:
                 break
             mstart, mend = match.span()
-            linenum += body.count('\n', begin, mstart) + 1
-            lstart = body.rfind('\n', begin, mstart) + 1 or begin
-            begin = body.find('\n', mend) + 1 or len(body) + 1
+            linenum += body.count(b'\n', begin, mstart) + 1
+            lstart = body.rfind(b'\n', begin, mstart) + 1 or begin
+            begin = body.find(b'\n', mend) + 1 or len(body) + 1
             lend = begin - 1
             yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
 
@@ -2837,6 +3436,7 @@
 
     matches = {}
     copies = {}
+
     def grepbody(fn, rev, body):
         matches[rev].setdefault(fn, [])
         m = matches[rev][fn]
@@ -2849,17 +3449,18 @@
         for tag, alo, ahi, blo, bhi in sm.get_opcodes():
             if tag == r'insert':
                 for i in pycompat.xrange(blo, bhi):
-                    yield ('+', b[i])
+                    yield (b'+', b[i])
             elif tag == r'delete':
                 for i in pycompat.xrange(alo, ahi):
-                    yield ('-', a[i])
+                    yield (b'-', a[i])
             elif tag == r'replace':
                 for i in pycompat.xrange(alo, ahi):
-                    yield ('-', a[i])
+                    yield (b'-', a[i])
                 for i in pycompat.xrange(blo, bhi):
-                    yield ('+', b[i])
+                    yield (b'+', b[i])
 
     uipathfn = scmutil.getuipathfn(repo)
+
     def display(fm, fn, ctx, pstates, states):
         rev = scmutil.intrev(ctx)
         if fm.isplain():
@@ -2867,10 +3468,11 @@
         else:
             formatuser = pycompat.bytestr
         if ui.quiet:
-            datefmt = '%Y-%m-%d'
+            datefmt = b'%Y-%m-%d'
         else:
-            datefmt = '%a %b %d %H:%M:%S %Y %1%2'
+            datefmt = b'%a %b %d %H:%M:%S %Y %1%2'
         found = False
+
         @util.cachefunc
         def binary():
             flog = getfile(fn)
@@ -2879,46 +3481,72 @@
             except error.WdirUnsupported:
                 return ctx[fn].isbinary()
 
-        fieldnamemap = {'linenumber': 'lineno'}
+        fieldnamemap = {b'linenumber': b'lineno'}
         if diff:
             iter = difflinestates(pstates, states)
         else:
-            iter = [('', l) for l in states]
+            iter = [(b'', l) for l in states]
         for change, l in iter:
             fm.startitem()
             fm.context(ctx=ctx)
             fm.data(node=fm.hexfunc(scmutil.binnode(ctx)), path=fn)
-            fm.plain(uipathfn(fn), label='grep.filename')
+            fm.plain(uipathfn(fn), label=b'grep.filename')
 
             cols = [
-                ('rev', '%d', rev, not plaingrep, ''),
-                ('linenumber', '%d', l.linenum, opts.get('line_number'), ''),
+                (b'rev', b'%d', rev, not plaingrep, b''),
+                (
+                    b'linenumber',
+                    b'%d',
+                    l.linenum,
+                    opts.get(b'line_number'),
+                    b'',
+                ),
             ]
             if diff:
                 cols.append(
-                    ('change', '%s', change, True,
-                     'grep.inserted ' if change == '+' else 'grep.deleted ')
+                    (
+                        b'change',
+                        b'%s',
+                        change,
+                        True,
+                        b'grep.inserted '
+                        if change == b'+'
+                        else b'grep.deleted ',
+                    )
                 )
-            cols.extend([
-                ('user', '%s', formatuser(ctx.user()), opts.get('user'), ''),
-                ('date', '%s', fm.formatdate(ctx.date(), datefmt),
-                 opts.get('date'), ''),
-            ])
+            cols.extend(
+                [
+                    (
+                        b'user',
+                        b'%s',
+                        formatuser(ctx.user()),
+                        opts.get(b'user'),
+                        b'',
+                    ),
+                    (
+                        b'date',
+                        b'%s',
+                        fm.formatdate(ctx.date(), datefmt),
+                        opts.get(b'date'),
+                        b'',
+                    ),
+                ]
+            )
             for name, fmt, data, cond, extra_label in cols:
                 if cond:
-                    fm.plain(sep, label='grep.sep')
+                    fm.plain(sep, label=b'grep.sep')
                 field = fieldnamemap.get(name, name)
-                label = extra_label + ('grep.%s' % name)
+                label = extra_label + (b'grep.%s' % name)
                 fm.condwrite(cond, field, fmt, data, label=label)
-            if not opts.get('files_with_matches'):
-                fm.plain(sep, label='grep.sep')
-                if not opts.get('text') and binary():
-                    fm.plain(_(" Binary file matches"))
+            if not opts.get(b'files_with_matches'):
+                fm.plain(sep, label=b'grep.sep')
+                if not opts.get(b'text') and binary():
+                    fm.plain(_(b" Binary file matches"))
                 else:
-                    displaymatches(fm.nested('texts', tmpl='{text}'), l)
+                    displaymatches(fm.nested(b'texts', tmpl=b'{text}'), l)
             fm.plain(eol)
             found = True
-            if opts.get('files_with_matches'):
+            if opts.get(b'files_with_matches'):
                 break
         return found
 
@@ -2927,15 +3555,15 @@
         for s, e in l.findpos():
             if p < s:
                 fm.startitem()
-                fm.write('text', '%s', l.line[p:s])
+                fm.write(b'text', b'%s', l.line[p:s])
                 fm.data(matched=False)
             fm.startitem()
-            fm.write('text', '%s', l.line[s:e], label='grep.match')
+            fm.write(b'text', b'%s', l.line[s:e], label=b'grep.match')
             fm.data(matched=True)
             p = e
         if p < len(l.line):
             fm.startitem()
-            fm.write('text', '%s', l.line[p:])
+            fm.write(b'text', b'%s', l.line[p:])
             fm.data(matched=False)
         fm.end()
 
@@ -2943,9 +3571,10 @@
     revfiles = {}
     match = scmutil.match(repo[None], pats, opts)
     found = False
-    follow = opts.get('follow')
+    follow = opts.get(b'follow')
 
     getrenamed = scmutil.getrenamedfn(repo)
+
     def prep(ctx, fns):
         rev = ctx.rev()
         pctx = ctx.p1()
@@ -2986,8 +3615,8 @@
                 except error.LookupError:
                     pass
 
-    ui.pager('grep')
-    fm = ui.formatter('grep', opts)
+    ui.pager(b'grep')
+    fm = ui.formatter(b'grep', opts)
     for ctx in cmdutil.walkchangerevs(repo, match, opts, prep):
         rev = ctx.rev()
         parent = ctx.p1().rev()
@@ -3015,16 +3644,31 @@
 
     return not found
 
-@command('heads',
-    [('r', 'rev', '',
-     _('show only heads which are descendants of STARTREV'), _('STARTREV')),
-    ('t', 'topo', False, _('show topological heads only')),
-    ('a', 'active', False, _('show active branchheads only (DEPRECATED)')),
-    ('c', 'closed', False, _('show normal and closed branch heads')),
-    ] + templateopts,
-    _('[-ct] [-r STARTREV] [REV]...'),
+
+@command(
+    b'heads',
+    [
+        (
+            b'r',
+            b'rev',
+            b'',
+            _(b'show only heads which are descendants of STARTREV'),
+            _(b'STARTREV'),
+        ),
+        (b't', b'topo', False, _(b'show topological heads only')),
+        (
+            b'a',
+            b'active',
+            False,
+            _(b'show active branchheads only (DEPRECATED)'),
+        ),
+        (b'c', b'closed', False, _(b'show normal and closed branch heads')),
+    ]
+    + templateopts,
+    _(b'[-ct] [-r STARTREV] [REV]...'),
     helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
-    intents={INTENT_READONLY})
+    intents={INTENT_READONLY},
+)
 def heads(ui, repo, *branchrevs, **opts):
     """show branch heads
 
@@ -3052,58 +3696,68 @@
 
     opts = pycompat.byteskwargs(opts)
     start = None
-    rev = opts.get('rev')
+    rev = opts.get(b'rev')
     if rev:
-        repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+        repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
         start = scmutil.revsingle(repo, rev, None).node()
 
-    if opts.get('topo'):
+    if opts.get(b'topo'):
         heads = [repo[h] for h in repo.heads(start)]
     else:
         heads = []
         for branch in repo.branchmap():
-            heads += repo.branchheads(branch, start, opts.get('closed'))
+            heads += repo.branchheads(branch, start, opts.get(b'closed'))
         heads = [repo[h] for h in heads]
 
     if branchrevs:
-        branches = set(repo[r].branch()
-                       for r in scmutil.revrange(repo, branchrevs))
+        branches = set(
+            repo[r].branch() for r in scmutil.revrange(repo, branchrevs)
+        )
         heads = [h for h in heads if h.branch() in branches]
 
-    if opts.get('active') and branchrevs:
+    if opts.get(b'active') and branchrevs:
         dagheads = repo.heads(start)
         heads = [h for h in heads if h.node() in dagheads]
 
     if branchrevs:
         haveheads = set(h.branch() for h in heads)
         if branches - haveheads:
-            headless = ', '.join(b for b in branches - haveheads)
-            msg = _('no open branch heads found on branches %s')
-            if opts.get('rev'):
-                msg += _(' (started at %s)') % opts['rev']
-            ui.warn((msg + '\n') % headless)
+            headless = b', '.join(b for b in branches - haveheads)
+            msg = _(b'no open branch heads found on branches %s')
+            if opts.get(b'rev'):
+                msg += _(b' (started at %s)') % opts[b'rev']
+            ui.warn((msg + b'\n') % headless)
 
     if not heads:
         return 1
 
-    ui.pager('heads')
-    heads = sorted(heads, key=lambda x: -x.rev())
+    ui.pager(b'heads')
+    heads = sorted(heads, key=lambda x: -(x.rev()))
     displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
     for ctx in heads:
         displayer.show(ctx)
     displayer.close()
 
-@command('help',
-    [('e', 'extension', None, _('show only help for extensions')),
-     ('c', 'command', None, _('show only help for commands')),
-     ('k', 'keyword', None, _('show topics matching keyword')),
-     ('s', 'system', [],
-      _('show help for specific platform(s)'), _('PLATFORM')),
-     ],
-    _('[-eck] [-s PLATFORM] [TOPIC]'),
+
+@command(
+    b'help',
+    [
+        (b'e', b'extension', None, _(b'show only help for extensions')),
+        (b'c', b'command', None, _(b'show only help for commands')),
+        (b'k', b'keyword', None, _(b'show topics matching keyword')),
+        (
+            b's',
+            b'system',
+            [],
+            _(b'show help for specific platform(s)'),
+            _(b'PLATFORM'),
+        ),
+    ],
+    _(b'[-eck] [-s PLATFORM] [TOPIC]'),
     helpcategory=command.CATEGORY_HELP,
     norepo=True,
-    intents={INTENT_READONLY})
+    intents={INTENT_READONLY},
+)
 def help_(ui, name=None, **opts):
     """show help for a given topic or a help overview
 
@@ -3117,39 +3771,53 @@
 
     keep = opts.get(r'system') or []
     if len(keep) == 0:
-        if pycompat.sysplatform.startswith('win'):
-            keep.append('windows')
-        elif pycompat.sysplatform == 'OpenVMS':
-            keep.append('vms')
-        elif pycompat.sysplatform == 'plan9':
-            keep.append('plan9')
+        if pycompat.sysplatform.startswith(b'win'):
+            keep.append(b'windows')
+        elif pycompat.sysplatform == b'OpenVMS':
+            keep.append(b'vms')
+        elif pycompat.sysplatform == b'plan9':
+            keep.append(b'plan9')
         else:
-            keep.append('unix')
+            keep.append(b'unix')
             keep.append(pycompat.sysplatform.lower())
     if ui.verbose:
-        keep.append('verbose')
+        keep.append(b'verbose')
 
     commands = sys.modules[__name__]
     formatted = help.formattedhelp(ui, commands, name, keep=keep, **opts)
-    ui.pager('help')
+    ui.pager(b'help')
     ui.write(formatted)
 
 
-@command('identify|id',
-    [('r', 'rev', '',
-     _('identify the specified revision'), _('REV')),
-    ('n', 'num', None, _('show local revision number')),
-    ('i', 'id', None, _('show global revision id')),
-    ('b', 'branch', None, _('show branch')),
-    ('t', 'tags', None, _('show tags')),
-    ('B', 'bookmarks', None, _('show bookmarks')),
-    ] + remoteopts + formatteropts,
-    _('[-nibtB] [-r REV] [SOURCE]'),
+@command(
+    b'identify|id',
+    [
+        (b'r', b'rev', b'', _(b'identify the specified revision'), _(b'REV')),
+        (b'n', b'num', None, _(b'show local revision number')),
+        (b'i', b'id', None, _(b'show global revision id')),
+        (b'b', b'branch', None, _(b'show branch')),
+        (b't', b'tags', None, _(b'show tags')),
+        (b'B', b'bookmarks', None, _(b'show bookmarks')),
+    ]
+    + remoteopts
+    + formatteropts,
+    _(b'[-nibtB] [-r REV] [SOURCE]'),
     helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
     optionalrepo=True,
-    intents={INTENT_READONLY})
-def identify(ui, repo, source=None, rev=None,
-             num=None, id=None, branch=None, tags=None, bookmarks=None, **opts):
+    intents={INTENT_READONLY},
+)
+def identify(
+    ui,
+    repo,
+    source=None,
+    rev=None,
+    num=None,
+    id=None,
+    branch=None,
+    tags=None,
+    bookmarks=None,
+    **opts
+):
     """identify the working directory or specified revision
 
     Print a summary identifying the repository state at REV using one or
@@ -3198,8 +3866,9 @@
 
     opts = pycompat.byteskwargs(opts)
     if not repo and not source:
-        raise error.Abort(_("there is no Mercurial repository here "
-                           "(.hg not found)"))
+        raise error.Abort(
+            _(b"there is no Mercurial repository here (.hg not found)")
+        )
 
     default = not (num or id or branch or tags or bookmarks)
     output = []
@@ -3207,21 +3876,22 @@
 
     if source:
         source, branches = hg.parseurl(ui.expandpath(source))
-        peer = hg.peer(repo or ui, opts, source) # only pass ui when no repo
+        peer = hg.peer(repo or ui, opts, source)  # only pass ui when no repo
         repo = peer.local()
         revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
 
-    fm = ui.formatter('identify', opts)
+    fm = ui.formatter(b'identify', opts)
     fm.startitem()
 
     if not repo:
         if num or branch or tags:
             raise error.Abort(
-                _("can't query remote revision number, branch, or tags"))
+                _(b"can't query remote revision number, branch, or tags")
+            )
         if not rev and revs:
             rev = revs[0]
         if not rev:
-            rev = "tip"
+            rev = b"tip"
 
         remoterev = peer.lookup(rev)
         hexrev = fm.hexfunc(remoterev)
@@ -3233,10 +3903,15 @@
         def getbms():
             bms = []
 
-            if 'bookmarks' in peer.listkeys('namespaces'):
+            if b'bookmarks' in peer.listkeys(b'namespaces'):
                 hexremoterev = hex(remoterev)
-                bms = [bm for bm, bmr in peer.listkeys('bookmarks').iteritems()
-                       if bmr == hexremoterev]
+                bms = [
+                    bm
+                    for bm, bmr in pycompat.iteritems(
+                        peer.listkeys(b'bookmarks')
+                    )
+                    if bmr == hexremoterev
+                ]
 
             return sorted(bms)
 
@@ -3245,16 +3920,16 @@
                 output.extend(getbms())
             elif default and not ui.quiet:
                 # multiple bookmarks for a single parent separated by '/'
-                bm = '/'.join(getbms())
+                bm = b'/'.join(getbms())
                 if bm:
                     output.append(bm)
         else:
             fm.data(node=hex(remoterev))
-            if bookmarks or 'bookmarks' in fm.datahint():
-                fm.data(bookmarks=fm.formatlist(getbms(), name='bookmark'))
+            if bookmarks or b'bookmarks' in fm.datahint():
+                fm.data(bookmarks=fm.formatlist(getbms(), name=b'bookmark'))
     else:
         if rev:
-            repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+            repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
         ctx = scmutil.revsingle(repo, rev, None)
 
         if ctx.rev() is None:
@@ -3264,22 +3939,25 @@
             for p in parents:
                 taglist.extend(p.tags())
 
-            dirty = ""
+            dirty = b""
             if ctx.dirty(missing=True, merge=False, branch=False):
-                dirty = '+'
+                dirty = b'+'
             fm.data(dirty=dirty)
 
             hexoutput = [fm.hexfunc(p.node()) for p in parents]
             if default or id:
-                output = ["%s%s" % ('+'.join(hexoutput), dirty)]
-            fm.data(id="%s%s" % ('+'.join(hexoutput), dirty))
+                output = [b"%s%s" % (b'+'.join(hexoutput), dirty)]
+            fm.data(id=b"%s%s" % (b'+'.join(hexoutput), dirty))
 
             if num:
-                numoutput = ["%d" % p.rev() for p in parents]
-                output.append("%s%s" % ('+'.join(numoutput), dirty))
-
-            fm.data(parents=fm.formatlist([fm.hexfunc(p.node())
-                                           for p in parents], name='node'))
+                numoutput = [b"%d" % p.rev() for p in parents]
+                output.append(b"%s%s" % (b'+'.join(numoutput), dirty))
+
+            fm.data(
+                parents=fm.formatlist(
+                    [fm.hexfunc(p.node()) for p in parents], name=b'node'
+                )
+            )
         else:
             hexoutput = fm.hexfunc(ctx.node())
             if default or id:
@@ -3292,16 +3970,16 @@
 
         if default and not ui.quiet:
             b = ctx.branch()
-            if b != 'default':
-                output.append("(%s)" % b)
+            if b != b'default':
+                output.append(b"(%s)" % b)
 
             # multiple tags for a single parent separated by '/'
-            t = '/'.join(taglist)
+            t = b'/'.join(taglist)
             if t:
                 output.append(t)
 
             # multiple bookmarks for a single parent separated by '/'
-            bm = '/'.join(ctx.bookmarks())
+            bm = b'/'.join(ctx.bookmarks())
             if bm:
                 output.append(bm)
         else:
@@ -3316,36 +3994,63 @@
 
         fm.data(node=ctx.hex())
         fm.data(branch=ctx.branch())
-        fm.data(tags=fm.formatlist(taglist, name='tag', sep=':'))
-        fm.data(bookmarks=fm.formatlist(ctx.bookmarks(), name='bookmark'))
+        fm.data(tags=fm.formatlist(taglist, name=b'tag', sep=b':'))
+        fm.data(bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'))
         fm.context(ctx=ctx)
 
-    fm.plain("%s\n" % ' '.join(output))
+    fm.plain(b"%s\n" % b' '.join(output))
     fm.end()
 
-@command('import|patch',
-    [('p', 'strip', 1,
-     _('directory strip option for patch. This has the same '
-       'meaning as the corresponding patch option'), _('NUM')),
-    ('b', 'base', '', _('base path (DEPRECATED)'), _('PATH')),
-    ('e', 'edit', False, _('invoke editor on commit messages')),
-    ('f', 'force', None,
-     _('skip check for outstanding uncommitted changes (DEPRECATED)')),
-    ('', 'no-commit', None,
-     _("don't commit, just update the working directory")),
-    ('', 'bypass', None,
-     _("apply patch without touching the working directory")),
-    ('', 'partial', None,
-     _('commit even if some hunks fail')),
-    ('', 'exact', None,
-     _('abort if patch would apply lossily')),
-    ('', 'prefix', '',
-     _('apply patch to subdirectory'), _('DIR')),
-    ('', 'import-branch', None,
-     _('use any branch information in patch (implied by --exact)'))] +
-    commitopts + commitopts2 + similarityopts,
-    _('[OPTION]... PATCH...'),
-    helpcategory=command.CATEGORY_IMPORT_EXPORT)
+
+@command(
+    b'import|patch',
+    [
+        (
+            b'p',
+            b'strip',
+            1,
+            _(
+                b'directory strip option for patch. This has the same '
+                b'meaning as the corresponding patch option'
+            ),
+            _(b'NUM'),
+        ),
+        (b'b', b'base', b'', _(b'base path (DEPRECATED)'), _(b'PATH')),
+        (b'e', b'edit', False, _(b'invoke editor on commit messages')),
+        (
+            b'f',
+            b'force',
+            None,
+            _(b'skip check for outstanding uncommitted changes (DEPRECATED)'),
+        ),
+        (
+            b'',
+            b'no-commit',
+            None,
+            _(b"don't commit, just update the working directory"),
+        ),
+        (
+            b'',
+            b'bypass',
+            None,
+            _(b"apply patch without touching the working directory"),
+        ),
+        (b'', b'partial', None, _(b'commit even if some hunks fail')),
+        (b'', b'exact', None, _(b'abort if patch would apply lossily')),
+        (b'', b'prefix', b'', _(b'apply patch to subdirectory'), _(b'DIR')),
+        (
+            b'',
+            b'import-branch',
+            None,
+            _(b'use any branch information in patch (implied by --exact)'),
+        ),
+    ]
+    + commitopts
+    + commitopts2
+    + similarityopts,
+    _(b'[OPTION]... PATCH...'),
+    helpcategory=command.CATEGORY_IMPORT_EXPORT,
+)
 def import_(ui, repo, patch1=None, *patches, **opts):
     """import an ordered set of patches
 
@@ -3447,104 +4152,128 @@
 
     opts = pycompat.byteskwargs(opts)
     if not patch1:
-        raise error.Abort(_('need at least one patch to import'))
+        raise error.Abort(_(b'need at least one patch to import'))
 
     patches = (patch1,) + patches
 
-    date = opts.get('date')
+    date = opts.get(b'date')
     if date:
-        opts['date'] = dateutil.parsedate(date)
-
-    exact = opts.get('exact')
-    update = not opts.get('bypass')
-    if not update and opts.get('no_commit'):
-        raise error.Abort(_('cannot use --no-commit with --bypass'))
+        opts[b'date'] = dateutil.parsedate(date)
+
+    exact = opts.get(b'exact')
+    update = not opts.get(b'bypass')
+    if not update and opts.get(b'no_commit'):
+        raise error.Abort(_(b'cannot use --no-commit with --bypass'))
     try:
-        sim = float(opts.get('similarity') or 0)
+        sim = float(opts.get(b'similarity') or 0)
     except ValueError:
-        raise error.Abort(_('similarity must be a number'))
+        raise error.Abort(_(b'similarity must be a number'))
     if sim < 0 or sim > 100:
-        raise error.Abort(_('similarity must be between 0 and 100'))
+        raise error.Abort(_(b'similarity must be between 0 and 100'))
     if sim and not update:
-        raise error.Abort(_('cannot use --similarity with --bypass'))
+        raise error.Abort(_(b'cannot use --similarity with --bypass'))
     if exact:
-        if opts.get('edit'):
-            raise error.Abort(_('cannot use --exact with --edit'))
-        if opts.get('prefix'):
-            raise error.Abort(_('cannot use --exact with --prefix'))
-
-    base = opts["base"]
+        if opts.get(b'edit'):
+            raise error.Abort(_(b'cannot use --exact with --edit'))
+        if opts.get(b'prefix'):
+            raise error.Abort(_(b'cannot use --exact with --prefix'))
+
+    base = opts[b"base"]
     msgs = []
     ret = 0
 
     with repo.wlock():
         if update:
             cmdutil.checkunfinished(repo)
-            if (exact or not opts.get('force')):
+            if exact or not opts.get(b'force'):
                 cmdutil.bailifchanged(repo)
 
-        if not opts.get('no_commit'):
+        if not opts.get(b'no_commit'):
             lock = repo.lock
-            tr = lambda: repo.transaction('import')
+            tr = lambda: repo.transaction(b'import')
             dsguard = util.nullcontextmanager
         else:
             lock = util.nullcontextmanager
             tr = util.nullcontextmanager
-            dsguard = lambda: dirstateguard.dirstateguard(repo, 'import')
+            dsguard = lambda: dirstateguard.dirstateguard(repo, b'import')
         with lock(), tr(), dsguard():
             parents = repo[None].parents()
             for patchurl in patches:
-                if patchurl == '-':
-                    ui.status(_('applying patch from stdin\n'))
+                if patchurl == b'-':
+                    ui.status(_(b'applying patch from stdin\n'))
                     patchfile = ui.fin
-                    patchurl = 'stdin'      # for error message
+                    patchurl = b'stdin'  # for error message
                 else:
                     patchurl = os.path.join(base, patchurl)
-                    ui.status(_('applying %s\n') % patchurl)
+                    ui.status(_(b'applying %s\n') % patchurl)
                     patchfile = hg.openpath(ui, patchurl, sendaccept=False)
 
                 haspatch = False
                 for hunk in patch.split(patchfile):
                     with patch.extract(ui, hunk) as patchdata:
-                        msg, node, rej = cmdutil.tryimportone(ui, repo,
-                                                              patchdata,
-                                                              parents, opts,
-                                                              msgs, hg.clean)
+                        msg, node, rej = cmdutil.tryimportone(
+                            ui, repo, patchdata, parents, opts, msgs, hg.clean
+                        )
                     if msg:
                         haspatch = True
-                        ui.note(msg + '\n')
+                        ui.note(msg + b'\n')
                     if update or exact:
                         parents = repo[None].parents()
                     else:
                         parents = [repo[node]]
                     if rej:
-                        ui.write_err(_("patch applied partially\n"))
-                        ui.write_err(_("(fix the .rej files and run "
-                                       "`hg commit --amend`)\n"))
+                        ui.write_err(_(b"patch applied partially\n"))
+                        ui.write_err(
+                            _(
+                                b"(fix the .rej files and run "
+                                b"`hg commit --amend`)\n"
+                            )
+                        )
                         ret = 1
                         break
 
                 if not haspatch:
-                    raise error.Abort(_('%s: no diffs found') % patchurl)
+                    raise error.Abort(_(b'%s: no diffs found') % patchurl)
 
             if msgs:
-                repo.savecommitmessage('\n* * *\n'.join(msgs))
+                repo.savecommitmessage(b'\n* * *\n'.join(msgs))
         return ret
 
-@command('incoming|in',
-    [('f', 'force', None,
-     _('run even if remote repository is unrelated')),
-    ('n', 'newest-first', None, _('show newest record first')),
-    ('', 'bundle', '',
-     _('file to store the bundles into'), _('FILE')),
-    ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
-    ('B', 'bookmarks', False, _("compare bookmarks")),
-    ('b', 'branch', [],
-     _('a specific branch you would like to pull'), _('BRANCH')),
-    ] + logopts + remoteopts + subrepoopts,
-    _('[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]'),
-    helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT)
-def incoming(ui, repo, source="default", **opts):
+
+@command(
+    b'incoming|in',
+    [
+        (
+            b'f',
+            b'force',
+            None,
+            _(b'run even if remote repository is unrelated'),
+        ),
+        (b'n', b'newest-first', None, _(b'show newest record first')),
+        (b'', b'bundle', b'', _(b'file to store the bundles into'), _(b'FILE')),
+        (
+            b'r',
+            b'rev',
+            [],
+            _(b'a remote changeset intended to be added'),
+            _(b'REV'),
+        ),
+        (b'B', b'bookmarks', False, _(b"compare bookmarks")),
+        (
+            b'b',
+            b'branch',
+            [],
+            _(b'a specific branch you would like to pull'),
+            _(b'BRANCH'),
+        ),
+    ]
+    + logopts
+    + remoteopts
+    + subrepoopts,
+    _(b'[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]'),
+    helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
+)
+def incoming(ui, repo, source=b"default", **opts):
     """show new changesets found in source
 
     Show new changesets found in the specified path/URL or the default
@@ -3599,28 +4328,31 @@
     Returns 0 if there are incoming changes, 1 otherwise.
     """
     opts = pycompat.byteskwargs(opts)
-    if opts.get('graph'):
+    if opts.get(b'graph'):
         logcmdutil.checkunsupportedgraphflags([], opts)
+
         def display(other, chlist, displayer):
             revdag = logcmdutil.graphrevs(other, chlist, opts)
-            logcmdutil.displaygraph(ui, repo, revdag, displayer,
-                                    graphmod.asciiedges)
+            logcmdutil.displaygraph(
+                ui, repo, revdag, displayer, graphmod.asciiedges
+            )
 
         hg._incoming(display, lambda: 1, ui, repo, source, opts, buffered=True)
         return 0
 
-    if opts.get('bundle') and opts.get('subrepos'):
-        raise error.Abort(_('cannot combine --bundle and --subrepos'))
-
-    if opts.get('bookmarks'):
-        source, branches = hg.parseurl(ui.expandpath(source),
-                                       opts.get('branch'))
+    if opts.get(b'bundle') and opts.get(b'subrepos'):
+        raise error.Abort(_(b'cannot combine --bundle and --subrepos'))
+
+    if opts.get(b'bookmarks'):
+        source, branches = hg.parseurl(
+            ui.expandpath(source), opts.get(b'branch')
+        )
         other = hg.peer(repo, opts, source)
-        if 'bookmarks' not in other.listkeys('namespaces'):
-            ui.warn(_("remote doesn't support bookmarks\n"))
+        if b'bookmarks' not in other.listkeys(b'namespaces'):
+            ui.warn(_(b"remote doesn't support bookmarks\n"))
             return 0
-        ui.pager('incoming')
-        ui.status(_('comparing with %s\n') % util.hidepassword(source))
+        ui.pager(b'incoming')
+        ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
         return bookmarks.incoming(ui, repo, other)
 
     repo._subtoppath = ui.expandpath(source)
@@ -3630,10 +4362,15 @@
         del repo._subtoppath
 
 
-@command('init', remoteopts, _('[-e CMD] [--remotecmd CMD] [DEST]'),
-         helpcategory=command.CATEGORY_REPO_CREATION,
-         helpbasic=True, norepo=True)
-def init(ui, dest=".", **opts):
+@command(
+    b'init',
+    remoteopts,
+    _(b'[-e CMD] [--remotecmd CMD] [DEST]'),
+    helpcategory=command.CATEGORY_REPO_CREATION,
+    helpbasic=True,
+    norepo=True,
+)
+def init(ui, dest=b".", **opts):
     """create a new repository in the given directory
 
     Initialize a new repository in the given directory. If the given
@@ -3649,13 +4386,34 @@
     opts = pycompat.byteskwargs(opts)
     hg.peer(ui, opts, ui.expandpath(dest), create=True)
 
-@command('locate',
-    [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')),
-    ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
-    ('f', 'fullpath', None, _('print complete paths from the filesystem root')),
-    ] + walkopts,
-    _('[OPTION]... [PATTERN]...'),
-    helpcategory=command.CATEGORY_WORKING_DIRECTORY)
+
+@command(
+    b'locate',
+    [
+        (
+            b'r',
+            b'rev',
+            b'',
+            _(b'search the repository as it is in REV'),
+            _(b'REV'),
+        ),
+        (
+            b'0',
+            b'print0',
+            None,
+            _(b'end filenames with NUL, for use with xargs'),
+        ),
+        (
+            b'f',
+            b'fullpath',
+            None,
+            _(b'print complete paths from the filesystem root'),
+        ),
+    ]
+    + walkopts,
+    _(b'[OPTION]... [PATTERN]...'),
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
+)
 def locate(ui, repo, *pats, **opts):
     """locate files matching specific patterns (DEPRECATED)
 
@@ -3679,17 +4437,18 @@
     Returns 0 if a match is found, 1 otherwise.
     """
     opts = pycompat.byteskwargs(opts)
-    if opts.get('print0'):
-        end = '\0'
+    if opts.get(b'print0'):
+        end = b'\0'
     else:
-        end = '\n'
-    ctx = scmutil.revsingle(repo, opts.get('rev'), None)
+        end = b'\n'
+    ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
 
     ret = 1
-    m = scmutil.match(ctx, pats, opts, default='relglob',
-                      badfn=lambda x, y: False)
-
-    ui.pager('locate')
+    m = scmutil.match(
+        ctx, pats, opts, default=b'relglob', badfn=lambda x, y: False
+    )
+
+    ui.pager(b'locate')
     if ctx.rev() is None:
         # When run on the working copy, "locate" includes removed files, so
         # we get the list of files from the dirstate.
@@ -3698,7 +4457,7 @@
         filesgen = ctx.matches(m)
     uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=bool(pats))
     for abs in filesgen:
-        if opts.get('fullpath'):
+        if opts.get(b'fullpath'):
             ui.write(repo.wjoin(abs), end)
         else:
             ui.write(uipathfn(abs), end)
@@ -3706,35 +4465,98 @@
 
     return ret
 
-@command('log|history',
-    [('f', 'follow', None,
-     _('follow changeset history, or file history across copies and renames')),
-    ('', 'follow-first', None,
-     _('only follow the first parent of merge changesets (DEPRECATED)')),
-    ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
-    ('C', 'copies', None, _('show copied files')),
-    ('k', 'keyword', [],
-     _('do case-insensitive search for a given text'), _('TEXT')),
-    ('r', 'rev', [], _('show the specified revision or revset'), _('REV')),
-    ('L', 'line-range', [],
-     _('follow line range of specified file (EXPERIMENTAL)'),
-     _('FILE,RANGE')),
-    ('', 'removed', None, _('include revisions where files were removed')),
-    ('m', 'only-merges', None,
-     _('show only merges (DEPRECATED) (use -r "merge()" instead)')),
-    ('u', 'user', [], _('revisions committed by user'), _('USER')),
-    ('', 'only-branch', [],
-     _('show only changesets within the given named branch (DEPRECATED)'),
-     _('BRANCH')),
-    ('b', 'branch', [],
-     _('show changesets within the given named branch'), _('BRANCH')),
-    ('P', 'prune', [],
-     _('do not display revision or any of its ancestors'), _('REV')),
-    ] + logopts + walkopts,
-    _('[OPTION]... [FILE]'),
+
+@command(
+    b'log|history',
+    [
+        (
+            b'f',
+            b'follow',
+            None,
+            _(
+                b'follow changeset history, or file history across copies and renames'
+            ),
+        ),
+        (
+            b'',
+            b'follow-first',
+            None,
+            _(b'only follow the first parent of merge changesets (DEPRECATED)'),
+        ),
+        (
+            b'd',
+            b'date',
+            b'',
+            _(b'show revisions matching date spec'),
+            _(b'DATE'),
+        ),
+        (b'C', b'copies', None, _(b'show copied files')),
+        (
+            b'k',
+            b'keyword',
+            [],
+            _(b'do case-insensitive search for a given text'),
+            _(b'TEXT'),
+        ),
+        (
+            b'r',
+            b'rev',
+            [],
+            _(b'show the specified revision or revset'),
+            _(b'REV'),
+        ),
+        (
+            b'L',
+            b'line-range',
+            [],
+            _(b'follow line range of specified file (EXPERIMENTAL)'),
+            _(b'FILE,RANGE'),
+        ),
+        (
+            b'',
+            b'removed',
+            None,
+            _(b'include revisions where files were removed'),
+        ),
+        (
+            b'm',
+            b'only-merges',
+            None,
+            _(b'show only merges (DEPRECATED) (use -r "merge()" instead)'),
+        ),
+        (b'u', b'user', [], _(b'revisions committed by user'), _(b'USER')),
+        (
+            b'',
+            b'only-branch',
+            [],
+            _(
+                b'show only changesets within the given named branch (DEPRECATED)'
+            ),
+            _(b'BRANCH'),
+        ),
+        (
+            b'b',
+            b'branch',
+            [],
+            _(b'show changesets within the given named branch'),
+            _(b'BRANCH'),
+        ),
+        (
+            b'P',
+            b'prune',
+            [],
+            _(b'do not display revision or any of its ancestors'),
+            _(b'REV'),
+        ),
+    ]
+    + logopts
+    + walkopts,
+    _(b'[OPTION]... [FILE]'),
     helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
-    helpbasic=True, inferrepo=True,
-    intents={INTENT_READONLY})
+    helpbasic=True,
+    inferrepo=True,
+    intents={INTENT_READONLY},
+)
 def log(ui, repo, *pats, **opts):
     """show revision history of entire repository or files
 
@@ -3864,18 +4686,18 @@
 
     """
     opts = pycompat.byteskwargs(opts)
-    linerange = opts.get('line_range')
-
-    if linerange and not opts.get('follow'):
-        raise error.Abort(_('--line-range requires --follow'))
+    linerange = opts.get(b'line_range')
+
+    if linerange and not opts.get(b'follow'):
+        raise error.Abort(_(b'--line-range requires --follow'))
 
     if linerange and pats:
         # TODO: take pats as patterns with no line-range filter
         raise error.Abort(
-            _('FILE arguments are not compatible with --line-range option')
+            _(b'FILE arguments are not compatible with --line-range option')
         )
 
-    repo = scmutil.unhidehashlikerevs(repo, opts.get('rev'), 'nowarn')
+    repo = scmutil.unhidehashlikerevs(repo, opts.get(b'rev'), b'nowarn')
     revs, differ = logcmdutil.getrevs(repo, pats, opts)
     if linerange:
         # TODO: should follow file history from logcmdutil._initialrevs(),
@@ -3883,28 +4705,34 @@
         revs, differ = logcmdutil.getlinerangerevs(repo, revs, opts)
 
     getcopies = None
-    if opts.get('copies'):
+    if opts.get(b'copies'):
         endrev = None
         if revs:
             endrev = revs.max() + 1
         getcopies = scmutil.getcopiesfn(repo, endrev=endrev)
 
-    ui.pager('log')
-    displayer = logcmdutil.changesetdisplayer(ui, repo, opts, differ,
-                                              buffered=True)
-    if opts.get('graph'):
+    ui.pager(b'log')
+    displayer = logcmdutil.changesetdisplayer(
+        ui, repo, opts, differ, buffered=True
+    )
+    if opts.get(b'graph'):
         displayfn = logcmdutil.displaygraphrevs
     else:
         displayfn = logcmdutil.displayrevs
     displayfn(ui, repo, revs, displayer, getcopies)
 
-@command('manifest',
-    [('r', 'rev', '', _('revision to display'), _('REV')),
-     ('', 'all', False, _("list files from all revisions"))]
-         + formatteropts,
-    _('[-r REV]'),
+
+@command(
+    b'manifest',
+    [
+        (b'r', b'rev', b'', _(b'revision to display'), _(b'REV')),
+        (b'', b'all', False, _(b"list files from all revisions")),
+    ]
+    + formatteropts,
+    _(b'[-r REV]'),
     helpcategory=command.CATEGORY_MAINTENANCE,
-    intents={INTENT_READONLY})
+    intents={INTENT_READONLY},
+)
 def manifest(ui, repo, node=None, rev=None, **opts):
     """output the current or given revision of the project manifest
 
@@ -3921,56 +4749,70 @@
     Returns 0 on success.
     """
     opts = pycompat.byteskwargs(opts)
-    fm = ui.formatter('manifest', opts)
-
-    if opts.get('all'):
+    fm = ui.formatter(b'manifest', opts)
+
+    if opts.get(b'all'):
         if rev or node:
-            raise error.Abort(_("can't specify a revision with --all"))
+            raise error.Abort(_(b"can't specify a revision with --all"))
 
         res = set()
         for rev in repo:
             ctx = repo[rev]
             res |= set(ctx.files())
 
-        ui.pager('manifest')
+        ui.pager(b'manifest')
         for f in sorted(res):
             fm.startitem()
-            fm.write("path", '%s\n', f)
+            fm.write(b"path", b'%s\n', f)
         fm.end()
         return
 
     if rev and node:
-        raise error.Abort(_("please specify just one revision"))
+        raise error.Abort(_(b"please specify just one revision"))
 
     if not node:
         node = rev
 
-    char = {'l': '@', 'x': '*', '': '', 't': 'd'}
-    mode = {'l': '644', 'x': '755', '': '644', 't': '755'}
+    char = {b'l': b'@', b'x': b'*', b'': b'', b't': b'd'}
+    mode = {b'l': b'644', b'x': b'755', b'': b'644', b't': b'755'}
     if node:
-        repo = scmutil.unhidehashlikerevs(repo, [node], 'nowarn')
+        repo = scmutil.unhidehashlikerevs(repo, [node], b'nowarn')
     ctx = scmutil.revsingle(repo, node)
     mf = ctx.manifest()
-    ui.pager('manifest')
+    ui.pager(b'manifest')
     for f in ctx:
         fm.startitem()
         fm.context(ctx=ctx)
         fl = ctx[f].flags()
-        fm.condwrite(ui.debugflag, 'hash', '%s ', hex(mf[f]))
-        fm.condwrite(ui.verbose, 'mode type', '%s %1s ', mode[fl], char[fl])
-        fm.write('path', '%s\n', f)
+        fm.condwrite(ui.debugflag, b'hash', b'%s ', hex(mf[f]))
+        fm.condwrite(ui.verbose, b'mode type', b'%s %1s ', mode[fl], char[fl])
+        fm.write(b'path', b'%s\n', f)
     fm.end()
 
-@command('merge',
-    [('f', 'force', None,
-      _('force a merge including outstanding changes (DEPRECATED)')),
-    ('r', 'rev', '', _('revision to merge'), _('REV')),
-    ('P', 'preview', None,
-     _('review revisions to merge (no merge is performed)')),
-    ('', 'abort', None, _('abort the ongoing merge')),
-     ] + mergetoolopts,
-    _('[-P] [[-r] REV]'),
-    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, helpbasic=True)
+
+@command(
+    b'merge',
+    [
+        (
+            b'f',
+            b'force',
+            None,
+            _(b'force a merge including outstanding changes (DEPRECATED)'),
+        ),
+        (b'r', b'rev', b'', _(b'revision to merge'), _(b'REV')),
+        (
+            b'P',
+            b'preview',
+            None,
+            _(b'review revisions to merge (no merge is performed)'),
+        ),
+        (b'', b'abort', None, _(b'abort the ongoing merge')),
+    ]
+    + mergetoolopts,
+    _(b'[-P] [[-r] REV]'),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
+    helpbasic=True,
+)
 def merge(ui, repo, node=None, **opts):
     """merge another revision into working directory
 
@@ -4001,24 +4843,26 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    abort = opts.get('abort')
+    abort = opts.get(b'abort')
     if abort and repo.dirstate.p2() == nullid:
-        cmdutil.wrongtooltocontinue(repo, _('merge'))
+        cmdutil.wrongtooltocontinue(repo, _(b'merge'))
     if abort:
         state = cmdutil.getunfinishedstate(repo)
-        if state and state._opname != 'merge':
-            raise error.Abort(_('cannot abort merge with %s in progress') %
-                                (state._opname), hint=state.hint())
+        if state and state._opname != b'merge':
+            raise error.Abort(
+                _(b'cannot abort merge with %s in progress') % (state._opname),
+                hint=state.hint(),
+            )
         if node:
-            raise error.Abort(_("cannot specify a node with --abort"))
-        if opts.get('rev'):
-            raise error.Abort(_("cannot specify both --rev and --abort"))
-        if opts.get('preview'):
-            raise error.Abort(_("cannot specify --preview with --abort"))
-    if opts.get('rev') and node:
-        raise error.Abort(_("please specify just one revision"))
+            raise error.Abort(_(b"cannot specify a node with --abort"))
+        if opts.get(b'rev'):
+            raise error.Abort(_(b"cannot specify both --rev and --abort"))
+        if opts.get(b'preview'):
+            raise error.Abort(_(b"cannot specify --preview with --abort"))
+    if opts.get(b'rev') and node:
+        raise error.Abort(_(b"please specify just one revision"))
     if not node:
-        node = opts.get('rev')
+        node = opts.get(b'rev')
 
     if node:
         node = scmutil.revsingle(repo, node).node()
@@ -4026,9 +4870,9 @@
     if not node and not abort:
         node = repo[destutil.destmerge(repo)].node()
 
-    if opts.get('preview'):
+    if opts.get(b'preview'):
         # find nodes that are ancestors of p2 but not of p1
-        p1 = repo.lookup('.')
+        p1 = repo.lookup(b'.')
         p2 = node
         nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
 
@@ -4039,32 +4883,66 @@
         return 0
 
     # ui.forcemerge is an internal variable, do not document
-    overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
-    with ui.configoverride(overrides, 'merge'):
-        force = opts.get('force')
-        labels = ['working copy', 'merge rev']
-        return hg.merge(repo, node, force=force, mergeforce=force,
-                        labels=labels, abort=abort)
+    overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
+    with ui.configoverride(overrides, b'merge'):
+        force = opts.get(b'force')
+        labels = [b'working copy', b'merge rev']
+        return hg.merge(
+            repo,
+            node,
+            force=force,
+            mergeforce=force,
+            labels=labels,
+            abort=abort,
+        )
+
 
 statemod.addunfinished(
-    'merge', fname=None, clearable=True, allowcommit=True,
-    cmdmsg=_('outstanding uncommitted merge'), abortfunc=hg.abortmerge,
-    statushint=_('To continue:    hg commit\n'
-                 'To abort:       hg merge --abort'),
-    cmdhint=_("use 'hg commit' or 'hg merge --abort'")
+    b'merge',
+    fname=None,
+    clearable=True,
+    allowcommit=True,
+    cmdmsg=_(b'outstanding uncommitted merge'),
+    abortfunc=hg.abortmerge,
+    statushint=_(
+        b'To continue:    hg commit\nTo abort:       hg merge --abort'
+    ),
+    cmdhint=_(b"use 'hg commit' or 'hg merge --abort'"),
 )
 
-@command('outgoing|out',
-    [('f', 'force', None, _('run even when the destination is unrelated')),
-    ('r', 'rev', [],
-     _('a changeset intended to be included in the destination'), _('REV')),
-    ('n', 'newest-first', None, _('show newest record first')),
-    ('B', 'bookmarks', False, _('compare bookmarks')),
-    ('b', 'branch', [], _('a specific branch you would like to push'),
-     _('BRANCH')),
-    ] + logopts + remoteopts + subrepoopts,
-    _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]'),
-    helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT)
+
+@command(
+    b'outgoing|out',
+    [
+        (
+            b'f',
+            b'force',
+            None,
+            _(b'run even when the destination is unrelated'),
+        ),
+        (
+            b'r',
+            b'rev',
+            [],
+            _(b'a changeset intended to be included in the destination'),
+            _(b'REV'),
+        ),
+        (b'n', b'newest-first', None, _(b'show newest record first')),
+        (b'B', b'bookmarks', False, _(b'compare bookmarks')),
+        (
+            b'b',
+            b'branch',
+            [],
+            _(b'a specific branch you would like to push'),
+            _(b'BRANCH'),
+        ),
+    ]
+    + logopts
+    + remoteopts
+    + subrepoopts,
+    _(b'[-M] [-p] [-n] [-f] [-r REV]... [DEST]'),
+    helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
+)
 def outgoing(ui, repo, dest=None, **opts):
     """show changesets not found in the destination
 
@@ -4103,13 +4981,15 @@
     """
     # hg._outgoing() needs to re-resolve the path in order to handle #branch
     # style URLs, so don't overwrite dest.
-    path = ui.paths.getpath(dest, default=('default-push', 'default'))
+    path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
     if not path:
-        raise error.Abort(_('default repository not configured!'),
-                          hint=_("see 'hg help config.paths'"))
+        raise error.Abort(
+            _(b'default repository not configured!'),
+            hint=_(b"see 'hg help config.paths'"),
+        )
 
     opts = pycompat.byteskwargs(opts)
-    if opts.get('graph'):
+    if opts.get(b'graph'):
         logcmdutil.checkunsupportedgraphflags([], opts)
         o, other = hg._outgoing(ui, repo, dest, opts)
         if not o:
@@ -4117,21 +4997,22 @@
             return
 
         revdag = logcmdutil.graphrevs(repo, o, opts)
-        ui.pager('outgoing')
+        ui.pager(b'outgoing')
         displayer = logcmdutil.changesetdisplayer(ui, repo, opts, buffered=True)
-        logcmdutil.displaygraph(ui, repo, revdag, displayer,
-                                graphmod.asciiedges)
+        logcmdutil.displaygraph(
+            ui, repo, revdag, displayer, graphmod.asciiedges
+        )
         cmdutil.outgoinghooks(ui, repo, other, opts, o)
         return 0
 
-    if opts.get('bookmarks'):
+    if opts.get(b'bookmarks'):
         dest = path.pushloc or path.loc
         other = hg.peer(repo, opts, dest)
-        if 'bookmarks' not in other.listkeys('namespaces'):
-            ui.warn(_("remote doesn't support bookmarks\n"))
+        if b'bookmarks' not in other.listkeys(b'namespaces'):
+            ui.warn(_(b"remote doesn't support bookmarks\n"))
             return 0
-        ui.status(_('comparing with %s\n') % util.hidepassword(dest))
-        ui.pager('outgoing')
+        ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
+        ui.pager(b'outgoing')
         return bookmarks.outgoing(ui, repo, other)
 
     repo._subtoppath = path.pushloc or path.loc
@@ -4140,12 +5021,23 @@
     finally:
         del repo._subtoppath
 
-@command('parents',
-    [('r', 'rev', '', _('show parents of the specified revision'), _('REV')),
-    ] + templateopts,
-    _('[-r REV] [FILE]'),
+
+@command(
+    b'parents',
+    [
+        (
+            b'r',
+            b'rev',
+            b'',
+            _(b'show parents of the specified revision'),
+            _(b'REV'),
+        ),
+    ]
+    + templateopts,
+    _(b'[-r REV] [FILE]'),
     helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
-    inferrepo=True)
+    inferrepo=True,
+)
 def parents(ui, repo, file_=None, **opts):
     """show the parents of the working directory or revision (DEPRECATED)
 
@@ -4168,15 +5060,15 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    rev = opts.get('rev')
+    rev = opts.get(b'rev')
     if rev:
-        repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+        repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
     ctx = scmutil.revsingle(repo, rev, None)
 
     if file_:
         m = scmutil.match(ctx, (file_,), opts)
         if m.anypats() or len(m.files()) != 1:
-            raise error.Abort(_('can only specify an explicit filename'))
+            raise error.Abort(_(b'can only specify an explicit filename'))
         file_ = m.files()[0]
         filenodes = []
         for cp in ctx.parents():
@@ -4187,7 +5079,7 @@
             except error.LookupError:
                 pass
         if not filenodes:
-            raise error.Abort(_("'%s' not found in manifest!") % file_)
+            raise error.Abort(_(b"'%s' not found in manifest!") % file_)
         p = []
         for fn in filenodes:
             fctx = repo.filectx(file_, fileid=fn)
@@ -4201,9 +5093,15 @@
             displayer.show(repo[n])
     displayer.close()
 
-@command('paths', formatteropts, _('[NAME]'),
+
+@command(
+    b'paths',
+    formatteropts,
+    _(b'[NAME]'),
     helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
-    optionalrepo=True, intents={INTENT_READONLY})
+    optionalrepo=True,
+    intents={INTENT_READONLY},
+)
 def paths(ui, repo, search=None, **opts):
     """show aliases for remote repositories
 
@@ -4247,52 +5145,59 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    ui.pager('paths')
+    ui.pager(b'paths')
     if search:
-        pathitems = [(name, path) for name, path in ui.paths.iteritems()
-                     if name == search]
+        pathitems = [
+            (name, path)
+            for name, path in pycompat.iteritems(ui.paths)
+            if name == search
+        ]
     else:
-        pathitems = sorted(ui.paths.iteritems())
-
-    fm = ui.formatter('paths', opts)
+        pathitems = sorted(pycompat.iteritems(ui.paths))
+
+    fm = ui.formatter(b'paths', opts)
     if fm.isplain():
         hidepassword = util.hidepassword
     else:
         hidepassword = bytes
     if ui.quiet:
-        namefmt = '%s\n'
+        namefmt = b'%s\n'
     else:
-        namefmt = '%s = '
+        namefmt = b'%s = '
     showsubopts = not search and not ui.quiet
 
     for name, path in pathitems:
         fm.startitem()
-        fm.condwrite(not search, 'name', namefmt, name)
-        fm.condwrite(not ui.quiet, 'url', '%s\n', hidepassword(path.rawloc))
+        fm.condwrite(not search, b'name', namefmt, name)
+        fm.condwrite(not ui.quiet, b'url', b'%s\n', hidepassword(path.rawloc))
         for subopt, value in sorted(path.suboptions.items()):
-            assert subopt not in ('name', 'url')
+            assert subopt not in (b'name', b'url')
             if showsubopts:
-                fm.plain('%s:%s = ' % (name, subopt))
-            fm.condwrite(showsubopts, subopt, '%s\n', value)
+                fm.plain(b'%s:%s = ' % (name, subopt))
+            fm.condwrite(showsubopts, subopt, b'%s\n', value)
 
     fm.end()
 
     if search and not pathitems:
         if not ui.quiet:
-            ui.warn(_("not found!\n"))
+            ui.warn(_(b"not found!\n"))
         return 1
     else:
         return 0
 
-@command('phase',
-    [('p', 'public', False, _('set changeset phase to public')),
-     ('d', 'draft', False, _('set changeset phase to draft')),
-     ('s', 'secret', False, _('set changeset phase to secret')),
-     ('f', 'force', False, _('allow to move boundary backward')),
-     ('r', 'rev', [], _('target revision'), _('REV')),
+
+@command(
+    b'phase',
+    [
+        (b'p', b'public', False, _(b'set changeset phase to public')),
+        (b'd', b'draft', False, _(b'set changeset phase to draft')),
+        (b's', b'secret', False, _(b'set changeset phase to secret')),
+        (b'f', b'force', False, _(b'allow to move boundary backward')),
+        (b'r', b'rev', [], _(b'target revision'), _(b'REV')),
     ],
-    _('[-p|-d|-s] [-f] [-r] [REV...]'),
-    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+    _(b'[-p|-d|-s] [-f] [-r] [REV...]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def phase(ui, repo, *revs, **opts):
     """set or show the current phase name
 
@@ -4316,12 +5221,12 @@
     for idx, name in enumerate(phases.cmdphasenames):
         if opts[name]:
             if targetphase is not None:
-                raise error.Abort(_('only one phase can be specified'))
+                raise error.Abort(_(b'only one phase can be specified'))
             targetphase = idx
 
     # look for specified revision
     revs = list(revs)
-    revs.extend(opts['rev'])
+    revs.extend(opts[b'rev'])
     if not revs:
         # display both parents as the second parent phase can influence
         # the phase of a merge commit
@@ -4334,12 +5239,12 @@
         # display
         for r in revs:
             ctx = repo[r]
-            ui.write('%i: %s\n' % (ctx.rev(), ctx.phasestr()))
+            ui.write(b'%i: %s\n' % (ctx.rev(), ctx.phasestr()))
     else:
-        with repo.lock(), repo.transaction("phase") as tr:
+        with repo.lock(), repo.transaction(b"phase") as tr:
             # set phase
             if not revs:
-                raise error.Abort(_('empty revision set'))
+                raise error.Abort(_(b'empty revision set'))
             nodes = [repo[r].node() for r in revs]
             # moving revision from public to draft may hide them
             # We have to check result on an unfiltered repository
@@ -4347,28 +5252,33 @@
             getphase = unfi._phasecache.phase
             olddata = [getphase(unfi, r) for r in unfi]
             phases.advanceboundary(repo, tr, targetphase, nodes)
-            if opts['force']:
+            if opts[b'force']:
                 phases.retractboundary(repo, tr, targetphase, nodes)
         getphase = unfi._phasecache.phase
         newdata = [getphase(unfi, r) for r in unfi]
         changes = sum(newdata[r] != olddata[r] for r in unfi)
         cl = unfi.changelog
-        rejected = [n for n in nodes
-                    if newdata[cl.rev(n)] < targetphase]
+        rejected = [n for n in nodes if newdata[cl.rev(n)] < targetphase]
         if rejected:
-            ui.warn(_('cannot move %i changesets to a higher '
-                      'phase, use --force\n') % len(rejected))
+            ui.warn(
+                _(
+                    b'cannot move %i changesets to a higher '
+                    b'phase, use --force\n'
+                )
+                % len(rejected)
+            )
             ret = 1
         if changes:
-            msg = _('phase changed for %i changesets\n') % changes
+            msg = _(b'phase changed for %i changesets\n') % changes
             if ret:
                 ui.status(msg)
             else:
                 ui.note(msg)
         else:
-            ui.warn(_('no phases changed\n'))
+            ui.warn(_(b'no phases changed\n'))
     return ret
 
+
 def postincoming(ui, repo, modheads, optupdate, checkout, brev):
     """Run after a changegroup has been added via pull/unbundle
 
@@ -4385,34 +5295,62 @@
         try:
             return hg.updatetotally(ui, repo, checkout, brev)
         except error.UpdateAbort as inst:
-            msg = _("not updating: %s") % stringutil.forcebytestr(inst)
+            msg = _(b"not updating: %s") % stringutil.forcebytestr(inst)
             hint = inst.hint
             raise error.UpdateAbort(msg, hint=hint)
     if modheads is not None and modheads > 1:
         currentbranchheads = len(repo.branchheads())
         if currentbranchheads == modheads:
-            ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
+            ui.status(
+                _(b"(run 'hg heads' to see heads, 'hg merge' to merge)\n")
+            )
         elif currentbranchheads > 1:
-            ui.status(_("(run 'hg heads .' to see heads, 'hg merge' to "
-                        "merge)\n"))
+            ui.status(
+                _(b"(run 'hg heads .' to see heads, 'hg merge' to merge)\n")
+            )
         else:
-            ui.status(_("(run 'hg heads' to see heads)\n"))
-    elif not ui.configbool('commands', 'update.requiredest'):
-        ui.status(_("(run 'hg update' to get a working copy)\n"))
-
-@command('pull',
-    [('u', 'update', None,
-     _('update to new branch head if new descendants were pulled')),
-    ('f', 'force', None, _('run even when remote repository is unrelated')),
-    ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
-    ('B', 'bookmark', [], _("bookmark to pull"), _('BOOKMARK')),
-    ('b', 'branch', [], _('a specific branch you would like to pull'),
-     _('BRANCH')),
-    ] + remoteopts,
-    _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'),
+            ui.status(_(b"(run 'hg heads' to see heads)\n"))
+    elif not ui.configbool(b'commands', b'update.requiredest'):
+        ui.status(_(b"(run 'hg update' to get a working copy)\n"))
+
+
+@command(
+    b'pull',
+    [
+        (
+            b'u',
+            b'update',
+            None,
+            _(b'update to new branch head if new descendants were pulled'),
+        ),
+        (
+            b'f',
+            b'force',
+            None,
+            _(b'run even when remote repository is unrelated'),
+        ),
+        (
+            b'r',
+            b'rev',
+            [],
+            _(b'a remote changeset intended to be added'),
+            _(b'REV'),
+        ),
+        (b'B', b'bookmark', [], _(b"bookmark to pull"), _(b'BOOKMARK')),
+        (
+            b'b',
+            b'branch',
+            [],
+            _(b'a specific branch you would like to pull'),
+            _(b'BRANCH'),
+        ),
+    ]
+    + remoteopts,
+    _(b'[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'),
     helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
-    helpbasic=True)
-def pull(ui, repo, source="default", **opts):
+    helpbasic=True,
+)
+def pull(ui, repo, source=b"default", **opts):
     """pull changes from the specified source
 
     Pull changes from a remote repository to a local one.
@@ -4443,22 +5381,25 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    if ui.configbool('commands', 'update.requiredest') and opts.get('update'):
-        msg = _('update destination required by configuration')
-        hint = _('use hg pull followed by hg update DEST')
+    if ui.configbool(b'commands', b'update.requiredest') and opts.get(
+        b'update'
+    ):
+        msg = _(b'update destination required by configuration')
+        hint = _(b'use hg pull followed by hg update DEST')
         raise error.Abort(msg, hint=hint)
 
-    source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
-    ui.status(_('pulling from %s\n') % util.hidepassword(source))
+    source, branches = hg.parseurl(ui.expandpath(source), opts.get(b'branch'))
+    ui.status(_(b'pulling from %s\n') % util.hidepassword(source))
     other = hg.peer(repo, opts, source)
     try:
-        revs, checkout = hg.addbranchrevs(repo, other, branches,
-                                          opts.get('rev'))
+        revs, checkout = hg.addbranchrevs(
+            repo, other, branches, opts.get(b'rev')
+        )
 
         pullopargs = {}
 
         nodes = None
-        if opts.get('bookmark') or revs:
+        if opts.get(b'bookmark') or revs:
             # The list of bookmark used here is the same used to actually update
             # the bookmark names, to avoid the race from issue 4689 and we do
             # all lookup and bookmark queries in one go so they see the same
@@ -4466,23 +5407,25 @@
             nodes = []
             fnodes = []
             revs = revs or []
-            if revs and not other.capable('lookup'):
-                err = _("other repository doesn't support revision lookup, "
-                        "so a rev cannot be specified.")
+            if revs and not other.capable(b'lookup'):
+                err = _(
+                    b"other repository doesn't support revision lookup, "
+                    b"so a rev cannot be specified."
+                )
                 raise error.Abort(err)
             with other.commandexecutor() as e:
-                fremotebookmarks = e.callcommand('listkeys', {
-                    'namespace': 'bookmarks'
-                })
+                fremotebookmarks = e.callcommand(
+                    b'listkeys', {b'namespace': b'bookmarks'}
+                )
                 for r in revs:
-                    fnodes.append(e.callcommand('lookup', {'key': r}))
+                    fnodes.append(e.callcommand(b'lookup', {b'key': r}))
             remotebookmarks = fremotebookmarks.result()
             remotebookmarks = bookmarks.unhexlifybookmarks(remotebookmarks)
-            pullopargs['remotebookmarks'] = remotebookmarks
-            for b in opts.get('bookmark', []):
+            pullopargs[b'remotebookmarks'] = remotebookmarks
+            for b in opts.get(b'bookmark', []):
                 b = repo._bookmarks.expandname(b)
                 if b not in remotebookmarks:
-                    raise error.Abort(_('remote bookmark %s not found!') % b)
+                    raise error.Abort(_(b'remote bookmark %s not found!') % b)
                 nodes.append(remotebookmarks[b])
             for i, rev in enumerate(revs):
                 node = fnodes[i].result()
@@ -4491,14 +5434,18 @@
                     checkout = node
 
         wlock = util.nullcontextmanager()
-        if opts.get('update'):
+        if opts.get(b'update'):
             wlock = repo.wlock()
         with wlock:
-            pullopargs.update(opts.get('opargs', {}))
-            modheads = exchange.pull(repo, other, heads=nodes,
-                                     force=opts.get('force'),
-                                     bookmarks=opts.get('bookmark', ()),
-                                     opargs=pullopargs).cgresult
+            pullopargs.update(opts.get(b'opargs', {}))
+            modheads = exchange.pull(
+                repo,
+                other,
+                heads=nodes,
+                force=opts.get(b'force'),
+                bookmarks=opts.get(b'bookmark', ()),
+                opargs=pullopargs,
+            ).cgresult
 
             # brev is a name, which might be a bookmark to be activated at
             # the end of the update. In other words, it is an explicit
@@ -4511,18 +5458,19 @@
                 # order below depends on implementation of
                 # hg.addbranchrevs(). opts['bookmark'] is ignored,
                 # because 'checkout' is determined without it.
-                if opts.get('rev'):
-                    brev = opts['rev'][0]
-                elif opts.get('branch'):
-                    brev = opts['branch'][0]
+                if opts.get(b'rev'):
+                    brev = opts[b'rev'][0]
+                elif opts.get(b'branch'):
+                    brev = opts[b'branch'][0]
                 else:
                     brev = branches[0]
             repo._subtoppath = source
             try:
-                ret = postincoming(ui, repo, modheads, opts.get('update'),
-                                   checkout, brev)
+                ret = postincoming(
+                    ui, repo, modheads, opts.get(b'update'), checkout, brev
+                )
             except error.FilteredRepoLookupError as exc:
-                msg = _('cannot update to target: %s') % exc.args[0]
+                msg = _(b'cannot update to target: %s') % exc.args[0]
                 exc.args = (msg,) + exc.args[1:]
                 raise
             finally:
@@ -4532,21 +5480,45 @@
         other.close()
     return ret
 
-@command('push',
-    [('f', 'force', None, _('force push')),
-    ('r', 'rev', [],
-     _('a changeset intended to be included in the destination'),
-     _('REV')),
-    ('B', 'bookmark', [], _("bookmark to push"), _('BOOKMARK')),
-    ('b', 'branch', [],
-     _('a specific branch you would like to push'), _('BRANCH')),
-    ('', 'new-branch', False, _('allow pushing a new branch')),
-    ('', 'pushvars', [], _('variables that can be sent to server (ADVANCED)')),
-    ('', 'publish', False, _('push the changeset as public (EXPERIMENTAL)')),
-    ] + remoteopts,
-    _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'),
+
+@command(
+    b'push',
+    [
+        (b'f', b'force', None, _(b'force push')),
+        (
+            b'r',
+            b'rev',
+            [],
+            _(b'a changeset intended to be included in the destination'),
+            _(b'REV'),
+        ),
+        (b'B', b'bookmark', [], _(b"bookmark to push"), _(b'BOOKMARK')),
+        (
+            b'b',
+            b'branch',
+            [],
+            _(b'a specific branch you would like to push'),
+            _(b'BRANCH'),
+        ),
+        (b'', b'new-branch', False, _(b'allow pushing a new branch')),
+        (
+            b'',
+            b'pushvars',
+            [],
+            _(b'variables that can be sent to server (ADVANCED)'),
+        ),
+        (
+            b'',
+            b'publish',
+            False,
+            _(b'push the changeset as public (EXPERIMENTAL)'),
+        ),
+    ]
+    + remoteopts,
+    _(b'[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'),
     helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
-    helpbasic=True)
+    helpbasic=True,
+)
 def push(ui, repo, dest=None, **opts):
     """push changes to the specified destination
 
@@ -4605,48 +5577,58 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    if opts.get('bookmark'):
-        ui.setconfig('bookmarks', 'pushing', opts['bookmark'], 'push')
-        for b in opts['bookmark']:
+    if opts.get(b'bookmark'):
+        ui.setconfig(b'bookmarks', b'pushing', opts[b'bookmark'], b'push')
+        for b in opts[b'bookmark']:
             # translate -B options to -r so changesets get pushed
             b = repo._bookmarks.expandname(b)
             if b in repo._bookmarks:
-                opts.setdefault('rev', []).append(b)
+                opts.setdefault(b'rev', []).append(b)
             else:
                 # if we try to push a deleted bookmark, translate it to null
                 # this lets simultaneous -r, -b options continue working
-                opts.setdefault('rev', []).append("null")
-
-    path = ui.paths.getpath(dest, default=('default-push', 'default'))
+                opts.setdefault(b'rev', []).append(b"null")
+
+    path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
     if not path:
-        raise error.Abort(_('default repository not configured!'),
-                         hint=_("see 'hg help config.paths'"))
+        raise error.Abort(
+            _(b'default repository not configured!'),
+            hint=_(b"see 'hg help config.paths'"),
+        )
     dest = path.pushloc or path.loc
-    branches = (path.branch, opts.get('branch') or [])
-    ui.status(_('pushing to %s\n') % util.hidepassword(dest))
-    revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
+    branches = (path.branch, opts.get(b'branch') or [])
+    ui.status(_(b'pushing to %s\n') % util.hidepassword(dest))
+    revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
     other = hg.peer(repo, opts, dest)
 
     if revs:
         revs = [repo[r].node() for r in scmutil.revrange(repo, revs)]
         if not revs:
-            raise error.Abort(_("specified revisions evaluate to an empty set"),
-                             hint=_("use different revision arguments"))
+            raise error.Abort(
+                _(b"specified revisions evaluate to an empty set"),
+                hint=_(b"use different revision arguments"),
+            )
     elif path.pushrev:
         # It doesn't make any sense to specify ancestor revisions. So limit
         # to DAG heads to make discovery simpler.
-        expr = revsetlang.formatspec('heads(%r)', path.pushrev)
+        expr = revsetlang.formatspec(b'heads(%r)', path.pushrev)
         revs = scmutil.revrange(repo, [expr])
         revs = [repo[rev].node() for rev in revs]
         if not revs:
-            raise error.Abort(_('default push revset for path evaluates to an '
-                                'empty set'))
+            raise error.Abort(
+                _(b'default push revset for path evaluates to an empty set')
+            )
+    elif ui.configbool(b'commands', b'push.require-revs'):
+        raise error.Abort(
+            _(b'no revisions specified to push'),
+            hint=_(b'did you mean "hg push -r ."?'),
+        )
 
     repo._subtoppath = dest
     try:
         # push subrepos depth-first for coherent ordering
-        c = repo['.']
-        subs = c.substate # only repos that are committed
+        c = repo[b'.']
+        subs = c.substate  # only repos that are committed
         for s in sorted(subs):
             result = c.sub(s).push(opts)
             if result == 0:
@@ -4654,14 +5636,19 @@
     finally:
         del repo._subtoppath
 
-    opargs = dict(opts.get('opargs', {})) # copy opargs since we may mutate it
-    opargs.setdefault('pushvars', []).extend(opts.get('pushvars', []))
-
-    pushop = exchange.push(repo, other, opts.get('force'), revs=revs,
-                           newbranch=opts.get('new_branch'),
-                           bookmarks=opts.get('bookmark', ()),
-                           publish=opts.get('publish'),
-                           opargs=opargs)
+    opargs = dict(opts.get(b'opargs', {}))  # copy opargs since we may mutate it
+    opargs.setdefault(b'pushvars', []).extend(opts.get(b'pushvars', []))
+
+    pushop = exchange.push(
+        repo,
+        other,
+        opts.get(b'force'),
+        revs=revs,
+        newbranch=opts.get(b'new_branch'),
+        bookmarks=opts.get(b'bookmark', ()),
+        publish=opts.get(b'publish'),
+        opargs=opargs,
+    )
 
     result = not pushop.cgresult
 
@@ -4673,10 +5660,12 @@
 
     return result
 
-@command('recover',
-    [('','verify', True, "run `hg verify` after succesful recover"),
-    ],
-    helpcategory=command.CATEGORY_MAINTENANCE)
+
+@command(
+    b'recover',
+    [(b'', b'verify', True, b"run `hg verify` after succesful recover"),],
+    helpcategory=command.CATEGORY_MAINTENANCE,
+)
 def recover(ui, repo, **opts):
     """roll back an interrupted transaction
 
@@ -4693,20 +5682,29 @@
         if opts[r'verify']:
             return hg.verify(repo)
         else:
-            msg = _("(verify step skipped, run  `hg verify` to check your "
-                    "repository content)\n")
+            msg = _(
+                b"(verify step skipped, run  `hg verify` to check your "
+                b"repository content)\n"
+            )
             ui.warn(msg)
             return 0
     return 1
 
-@command('remove|rm',
-    [('A', 'after', None, _('record delete for missing files')),
-    ('f', 'force', None,
-     _('forget added files, delete modified files')),
-    ] + subrepoopts + walkopts + dryrunopts,
-    _('[OPTION]... FILE...'),
+
+@command(
+    b'remove|rm',
+    [
+        (b'A', b'after', None, _(b'record delete for missing files')),
+        (b'f', b'force', None, _(b'forget added files, delete modified files')),
+    ]
+    + subrepoopts
+    + walkopts
+    + dryrunopts,
+    _(b'[OPTION]... FILE...'),
     helpcategory=command.CATEGORY_WORKING_DIRECTORY,
-    helpbasic=True, inferrepo=True)
+    helpbasic=True,
+    inferrepo=True,
+)
 def remove(ui, repo, *pats, **opts):
     """remove the specified files on the next commit
 
@@ -4747,23 +5745,35 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    after, force = opts.get('after'), opts.get('force')
-    dryrun = opts.get('dry_run')
+    after, force = opts.get(b'after'), opts.get(b'force')
+    dryrun = opts.get(b'dry_run')
     if not pats and not after:
-        raise error.Abort(_('no files specified'))
+        raise error.Abort(_(b'no files specified'))
 
     m = scmutil.match(repo[None], pats, opts)
-    subrepos = opts.get('subrepos')
+    subrepos = opts.get(b'subrepos')
     uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
-    return cmdutil.remove(ui, repo, m, "", uipathfn, after, force, subrepos,
-                          dryrun=dryrun)
-
-@command('rename|move|mv',
-    [('A', 'after', None, _('record a rename that has already occurred')),
-    ('f', 'force', None, _('forcibly move over an existing managed file')),
-    ] + walkopts + dryrunopts,
-    _('[OPTION]... SOURCE... DEST'),
-    helpcategory=command.CATEGORY_WORKING_DIRECTORY)
+    return cmdutil.remove(
+        ui, repo, m, b"", uipathfn, after, force, subrepos, dryrun=dryrun
+    )
+
+
+@command(
+    b'rename|move|mv',
+    [
+        (b'A', b'after', None, _(b'record a rename that has already occurred')),
+        (
+            b'f',
+            b'force',
+            None,
+            _(b'forcibly move over an existing managed file'),
+        ),
+    ]
+    + walkopts
+    + dryrunopts,
+    _(b'[OPTION]... SOURCE... DEST'),
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
+)
 def rename(ui, repo, *pats, **opts):
     """rename files; equivalent of copy + remove
 
@@ -4784,17 +5794,24 @@
     with repo.wlock(False):
         return cmdutil.copy(ui, repo, pats, opts, rename=True)
 
-@command('resolve',
-    [('a', 'all', None, _('select all unresolved files')),
-    ('l', 'list', None, _('list state of files needing merge')),
-    ('m', 'mark', None, _('mark files as resolved')),
-    ('u', 'unmark', None, _('mark files as unresolved')),
-    ('n', 'no-status', None, _('hide status prefix')),
-    ('', 're-merge', None, _('re-merge files'))]
-    + mergetoolopts + walkopts + formatteropts,
-    _('[OPTION]... [FILE]...'),
+
+@command(
+    b'resolve',
+    [
+        (b'a', b'all', None, _(b'select all unresolved files')),
+        (b'l', b'list', None, _(b'list state of files needing merge')),
+        (b'm', b'mark', None, _(b'mark files as resolved')),
+        (b'u', b'unmark', None, _(b'mark files as unresolved')),
+        (b'n', b'no-status', None, _(b'hide status prefix')),
+        (b'', b're-merge', None, _(b're-merge files')),
+    ]
+    + mergetoolopts
+    + walkopts
+    + formatteropts,
+    _(b'[OPTION]... [FILE]...'),
     helpcategory=command.CATEGORY_WORKING_DIRECTORY,
-    inferrepo=True)
+    inferrepo=True,
+)
 def resolve(ui, repo, *pats, **opts):
     """redo merges or set/view the merge status of files
 
@@ -4848,43 +5865,54 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    confirm = ui.configbool('commands', 'resolve.confirm')
-    flaglist = 'all mark unmark list no_status re_merge'.split()
-    all, mark, unmark, show, nostatus, remerge = [
-        opts.get(o) for o in flaglist]
+    confirm = ui.configbool(b'commands', b'resolve.confirm')
+    flaglist = b'all mark unmark list no_status re_merge'.split()
+    all, mark, unmark, show, nostatus, remerge = [opts.get(o) for o in flaglist]
 
     actioncount = len(list(filter(None, [show, mark, unmark, remerge])))
     if actioncount > 1:
-        raise error.Abort(_("too many actions specified"))
-    elif (actioncount == 0
-          and ui.configbool('commands', 'resolve.explicit-re-merge')):
-        hint = _('use --mark, --unmark, --list or --re-merge')
-        raise error.Abort(_('no action specified'), hint=hint)
+        raise error.Abort(_(b"too many actions specified"))
+    elif actioncount == 0 and ui.configbool(
+        b'commands', b'resolve.explicit-re-merge'
+    ):
+        hint = _(b'use --mark, --unmark, --list or --re-merge')
+        raise error.Abort(_(b'no action specified'), hint=hint)
     if pats and all:
-        raise error.Abort(_("can't specify --all and patterns"))
+        raise error.Abort(_(b"can't specify --all and patterns"))
     if not (all or pats or show or mark or unmark):
-        raise error.Abort(_('no files or directories specified'),
-                         hint=('use --all to re-merge all unresolved files'))
+        raise error.Abort(
+            _(b'no files or directories specified'),
+            hint=b'use --all to re-merge all unresolved files',
+        )
 
     if confirm:
         if all:
-            if ui.promptchoice(_(b're-merge all unresolved files (yn)?'
-                                 b'$$ &Yes $$ &No')):
-                raise error.Abort(_('user quit'))
+            if ui.promptchoice(
+                _(b're-merge all unresolved files (yn)?$$ &Yes $$ &No')
+            ):
+                raise error.Abort(_(b'user quit'))
         if mark and not pats:
-            if ui.promptchoice(_(b'mark all unresolved files as resolved (yn)?'
-                                 b'$$ &Yes $$ &No')):
-                raise error.Abort(_('user quit'))
+            if ui.promptchoice(
+                _(
+                    b'mark all unresolved files as resolved (yn)?'
+                    b'$$ &Yes $$ &No'
+                )
+            ):
+                raise error.Abort(_(b'user quit'))
         if unmark and not pats:
-            if ui.promptchoice(_(b'mark all resolved files as unresolved (yn)?'
-                                 b'$$ &Yes $$ &No')):
-                raise error.Abort(_('user quit'))
+            if ui.promptchoice(
+                _(
+                    b'mark all resolved files as unresolved (yn)?'
+                    b'$$ &Yes $$ &No'
+                )
+            ):
+                raise error.Abort(_(b'user quit'))
 
     uipathfn = scmutil.getuipathfn(repo)
 
     if show:
-        ui.pager('resolve')
-        fm = ui.formatter('resolve', opts)
+        ui.pager(b'resolve')
+        fm = ui.formatter(b'resolve', opts)
         ms = mergemod.mergestate.read(repo)
         wctx = repo[None]
         m = scmutil.match(wctx, pats, opts)
@@ -4893,12 +5921,17 @@
         # as 'P'.  Resolved path conflicts show as 'R', the same as normal
         # resolved conflicts.
         mergestateinfo = {
-            mergemod.MERGE_RECORD_UNRESOLVED: ('resolve.unresolved', 'U'),
-            mergemod.MERGE_RECORD_RESOLVED: ('resolve.resolved', 'R'),
-            mergemod.MERGE_RECORD_UNRESOLVED_PATH: ('resolve.unresolved', 'P'),
-            mergemod.MERGE_RECORD_RESOLVED_PATH: ('resolve.resolved', 'R'),
-            mergemod.MERGE_RECORD_DRIVER_RESOLVED: ('resolve.driverresolved',
-                                                    'D'),
+            mergemod.MERGE_RECORD_UNRESOLVED: (b'resolve.unresolved', b'U'),
+            mergemod.MERGE_RECORD_RESOLVED: (b'resolve.resolved', b'R'),
+            mergemod.MERGE_RECORD_UNRESOLVED_PATH: (
+                b'resolve.unresolved',
+                b'P',
+            ),
+            mergemod.MERGE_RECORD_RESOLVED_PATH: (b'resolve.resolved', b'R'),
+            mergemod.MERGE_RECORD_DRIVER_RESOLVED: (
+                b'resolve.driverresolved',
+                b'D',
+            ),
         }
 
         for f in ms:
@@ -4908,9 +5941,9 @@
             label, key = mergestateinfo[ms[f]]
             fm.startitem()
             fm.context(ctx=wctx)
-            fm.condwrite(not nostatus, 'mergestatus', '%s ', key, label=label)
+            fm.condwrite(not nostatus, b'mergestatus', b'%s ', key, label=label)
             fm.data(path=f)
-            fm.plain('%s\n' % uipathfn(f), label=label)
+            fm.plain(b'%s\n' % uipathfn(f), label=label)
         fm.end()
         return 0
 
@@ -4919,12 +5952,15 @@
 
         if not (ms.active() or repo.dirstate.p2() != nullid):
             raise error.Abort(
-                _('resolve command not applicable when not merging'))
+                _(b'resolve command not applicable when not merging')
+            )
 
         wctx = repo[None]
 
-        if (ms.mergedriver
-            and ms.mdstate() == mergemod.MERGE_DRIVER_STATE_UNMARKED):
+        if (
+            ms.mergedriver
+            and ms.mdstate() == mergemod.MERGE_DRIVER_STATE_UNMARKED
+        ):
             proceed = mergemod.driverpreprocess(repo, ms, wctx)
             ms.commit()
             # allow mark and unmark to go through
@@ -4939,8 +5975,8 @@
         tocomplete = []
         hasconflictmarkers = []
         if mark:
-            markcheck = ui.config('commands', 'resolve.mark-check')
-            if markcheck not in ['warn', 'abort']:
+            markcheck = ui.config(b'commands', b'resolve.mark-check')
+            if markcheck not in [b'warn', b'abort']:
                 # Treat all invalid / unrecognized values as 'none'.
                 markcheck = False
         for f in ms:
@@ -4955,33 +5991,43 @@
                 exact = m.exact(f)
                 if mark:
                     if exact:
-                        ui.warn(_('not marking %s as it is driver-resolved\n')
-                                % uipathfn(f))
+                        ui.warn(
+                            _(b'not marking %s as it is driver-resolved\n')
+                            % uipathfn(f)
+                        )
                 elif unmark:
                     if exact:
-                        ui.warn(_('not unmarking %s as it is driver-resolved\n')
-                                % uipathfn(f))
+                        ui.warn(
+                            _(b'not unmarking %s as it is driver-resolved\n')
+                            % uipathfn(f)
+                        )
                 else:
                     runconclude = True
                 continue
 
             # path conflicts must be resolved manually
-            if ms[f] in (mergemod.MERGE_RECORD_UNRESOLVED_PATH,
-                         mergemod.MERGE_RECORD_RESOLVED_PATH):
+            if ms[f] in (
+                mergemod.MERGE_RECORD_UNRESOLVED_PATH,
+                mergemod.MERGE_RECORD_RESOLVED_PATH,
+            ):
                 if mark:
                     ms.mark(f, mergemod.MERGE_RECORD_RESOLVED_PATH)
                 elif unmark:
                     ms.mark(f, mergemod.MERGE_RECORD_UNRESOLVED_PATH)
                 elif ms[f] == mergemod.MERGE_RECORD_UNRESOLVED_PATH:
-                    ui.warn(_('%s: path conflict must be resolved manually\n')
-                            % uipathfn(f))
+                    ui.warn(
+                        _(b'%s: path conflict must be resolved manually\n')
+                        % uipathfn(f)
+                    )
                 continue
 
             if mark:
                 if markcheck:
                     fdata = repo.wvfs.tryread(f)
-                    if (filemerge.hasconflictmarkers(fdata) and
-                        ms[f] != mergemod.MERGE_RECORD_RESOLVED):
+                    if (
+                        filemerge.hasconflictmarkers(fdata)
+                        and ms[f] != mergemod.MERGE_RECORD_RESOLVED
+                    ):
                         hasconflictmarkers.append(f)
                 ms.mark(f, mergemod.MERGE_RECORD_RESOLVED)
             elif unmark:
@@ -4990,15 +6036,15 @@
                 # backup pre-resolve (merge uses .orig for its own purposes)
                 a = repo.wjoin(f)
                 try:
-                    util.copyfile(a, a + ".resolve")
+                    util.copyfile(a, a + b".resolve")
                 except (IOError, OSError) as inst:
                     if inst.errno != errno.ENOENT:
                         raise
 
                 try:
                     # preresolve file
-                    overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
-                    with ui.configoverride(overrides, 'resolve'):
+                    overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
+                    with ui.configoverride(overrides, b'resolve'):
                         complete, r = ms.preresolve(f, wctx)
                     if not complete:
                         tocomplete.append(f)
@@ -5011,25 +6057,34 @@
                 # for merges that are complete
                 if complete:
                     try:
-                        util.rename(a + ".resolve",
-                                    scmutil.backuppath(ui, repo, f))
+                        util.rename(
+                            a + b".resolve", scmutil.backuppath(ui, repo, f)
+                        )
                     except OSError as inst:
                         if inst.errno != errno.ENOENT:
                             raise
 
         if hasconflictmarkers:
-            ui.warn(_('warning: the following files still have conflict '
-                      'markers:\n') + ''.join('  ' + uipathfn(f) + '\n'
-                                              for f in hasconflictmarkers))
-            if markcheck == 'abort' and not all and not pats:
-                raise error.Abort(_('conflict markers detected'),
-                                  hint=_('use --all to mark anyway'))
+            ui.warn(
+                _(
+                    b'warning: the following files still have conflict '
+                    b'markers:\n'
+                )
+                + b''.join(
+                    b'  ' + uipathfn(f) + b'\n' for f in hasconflictmarkers
+                )
+            )
+            if markcheck == b'abort' and not all and not pats:
+                raise error.Abort(
+                    _(b'conflict markers detected'),
+                    hint=_(b'use --all to mark anyway'),
+                )
 
         for f in tocomplete:
             try:
                 # resolve file
-                overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
-                with ui.configoverride(overrides, 'resolve'):
+                overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
+                with ui.configoverride(overrides, b'resolve'):
                     r = ms.resolve(f, wctx)
                 if r:
                     ret = 1
@@ -5039,7 +6094,7 @@
             # replace filemerge's .orig file with our resolve file
             a = repo.wjoin(f)
             try:
-                util.rename(a + ".resolve", scmutil.backuppath(ui, repo, f))
+                util.rename(a + b".resolve", scmutil.backuppath(ui, repo, f))
             except OSError as inst:
                 if inst.errno != errno.ENOENT:
                     raise
@@ -5049,31 +6104,35 @@
 
         if not didwork and pats:
             hint = None
-            if not any([p for p in pats if p.find(':') >= 0]):
-                pats = ['path:%s' % p for p in pats]
+            if not any([p for p in pats if p.find(b':') >= 0]):
+                pats = [b'path:%s' % p for p in pats]
                 m = scmutil.match(wctx, pats, opts)
                 for f in ms:
                     if not m(f):
                         continue
+
                     def flag(o):
-                        if o == 're_merge':
-                            return '--re-merge '
-                        return '-%s ' % o[0:1]
-                    flags = ''.join([flag(o) for o in flaglist if opts.get(o)])
-                    hint = _("(try: hg resolve %s%s)\n") % (
-                             flags,
-                             ' '.join(pats))
+                        if o == b're_merge':
+                            return b'--re-merge '
+                        return b'-%s ' % o[0:1]
+
+                    flags = b''.join([flag(o) for o in flaglist if opts.get(o)])
+                    hint = _(b"(try: hg resolve %s%s)\n") % (
+                        flags,
+                        b' '.join(pats),
+                    )
                     break
-            ui.warn(_("arguments do not match paths that need resolving\n"))
+            ui.warn(_(b"arguments do not match paths that need resolving\n"))
             if hint:
                 ui.warn(hint)
-        elif ms.mergedriver and ms.mdstate() != 's':
+        elif ms.mergedriver and ms.mdstate() != b's':
             # run conclude step when either a driver-resolved file is requested
             # or there are no driver-resolved files
             # we can't use 'ret' to determine whether any files are unresolved
             # because we might not have tried to resolve some
-            if ((runconclude or not list(ms.driverresolved()))
-                and not list(ms.unresolved())):
+            if (runconclude or not list(ms.driverresolved())) and not list(
+                ms.unresolved()
+            ):
                 proceed = mergemod.driverconclude(repo, ms, wctx)
                 ms.commit()
                 if not proceed:
@@ -5083,23 +6142,33 @@
     unresolvedf = list(ms.unresolved())
     driverresolvedf = list(ms.driverresolved())
     if not unresolvedf and not driverresolvedf:
-        ui.status(_('(no more unresolved files)\n'))
+        ui.status(_(b'(no more unresolved files)\n'))
         cmdutil.checkafterresolved(repo)
     elif not unresolvedf:
-        ui.status(_('(no more unresolved files -- '
-                    'run "hg resolve --all" to conclude)\n'))
+        ui.status(
+            _(
+                b'(no more unresolved files -- '
+                b'run "hg resolve --all" to conclude)\n'
+            )
+        )
 
     return ret
 
-@command('revert',
-    [('a', 'all', None, _('revert all changes when no arguments given')),
-    ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
-    ('r', 'rev', '', _('revert to the specified revision'), _('REV')),
-    ('C', 'no-backup', None, _('do not save backup copies of files')),
-    ('i', 'interactive', None, _('interactively select the changes')),
-    ] + walkopts + dryrunopts,
-    _('[OPTION]... [-r REV] [NAME]...'),
-    helpcategory=command.CATEGORY_WORKING_DIRECTORY)
+
+@command(
+    b'revert',
+    [
+        (b'a', b'all', None, _(b'revert all changes when no arguments given')),
+        (b'd', b'date', b'', _(b'tipmost revision matching date'), _(b'DATE')),
+        (b'r', b'rev', b'', _(b'revert to the specified revision'), _(b'REV')),
+        (b'C', b'no-backup', None, _(b'do not save backup copies of files')),
+        (b'i', b'interactive', None, _(b'interactively select the changes')),
+    ]
+    + walkopts
+    + dryrunopts,
+    _(b'[OPTION]... [-r REV] [NAME]...'),
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
+)
 def revert(ui, repo, *pats, **opts):
     """restore files to their checkout state
 
@@ -5138,51 +6207,73 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    if opts.get("date"):
-        if opts.get("rev"):
-            raise error.Abort(_("you can't specify a revision and a date"))
-        opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
+    if opts.get(b"date"):
+        if opts.get(b"rev"):
+            raise error.Abort(_(b"you can't specify a revision and a date"))
+        opts[b"rev"] = cmdutil.finddate(ui, repo, opts[b"date"])
 
     parent, p2 = repo.dirstate.parents()
-    if not opts.get('rev') and p2 != nullid:
+    if not opts.get(b'rev') and p2 != nullid:
         # revert after merge is a trap for new users (issue2915)
-        raise error.Abort(_('uncommitted merge with no revision specified'),
-                         hint=_("use 'hg update' or see 'hg help revert'"))
-
-    rev = opts.get('rev')
+        raise error.Abort(
+            _(b'uncommitted merge with no revision specified'),
+            hint=_(b"use 'hg update' or see 'hg help revert'"),
+        )
+
+    rev = opts.get(b'rev')
     if rev:
-        repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+        repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
     ctx = scmutil.revsingle(repo, rev)
 
-    if (not (pats or opts.get('include') or opts.get('exclude') or
-             opts.get('all') or opts.get('interactive'))):
-        msg = _("no files or directories specified")
+    if not (
+        pats
+        or opts.get(b'include')
+        or opts.get(b'exclude')
+        or opts.get(b'all')
+        or opts.get(b'interactive')
+    ):
+        msg = _(b"no files or directories specified")
         if p2 != nullid:
-            hint = _("uncommitted merge, use --all to discard all changes,"
-                     " or 'hg update -C .' to abort the merge")
+            hint = _(
+                b"uncommitted merge, use --all to discard all changes,"
+                b" or 'hg update -C .' to abort the merge"
+            )
             raise error.Abort(msg, hint=hint)
         dirty = any(repo.status())
         node = ctx.node()
         if node != parent:
             if dirty:
-                hint = _("uncommitted changes, use --all to discard all"
-                         " changes, or 'hg update %d' to update") % ctx.rev()
+                hint = (
+                    _(
+                        b"uncommitted changes, use --all to discard all"
+                        b" changes, or 'hg update %d' to update"
+                    )
+                    % ctx.rev()
+                )
             else:
-                hint = _("use --all to revert all files,"
-                         " or 'hg update %d' to update") % ctx.rev()
+                hint = (
+                    _(
+                        b"use --all to revert all files,"
+                        b" or 'hg update %d' to update"
+                    )
+                    % ctx.rev()
+                )
         elif dirty:
-            hint = _("uncommitted changes, use --all to discard all changes")
+            hint = _(b"uncommitted changes, use --all to discard all changes")
         else:
-            hint = _("use --all to revert all files")
+            hint = _(b"use --all to revert all files")
         raise error.Abort(msg, hint=hint)
 
-    return cmdutil.revert(ui, repo, ctx, (parent, p2), *pats,
-                          **pycompat.strkwargs(opts))
+    return cmdutil.revert(
+        ui, repo, ctx, (parent, p2), *pats, **pycompat.strkwargs(opts)
+    )
+
 
 @command(
-    'rollback',
-    dryrunopts + [('f', 'force', False, _('ignore safety measures'))],
-    helpcategory=command.CATEGORY_MAINTENANCE)
+    b'rollback',
+    dryrunopts + [(b'f', b'force', False, _(b'ignore safety measures'))],
+    helpcategory=command.CATEGORY_MAINTENANCE,
+)
 def rollback(ui, repo, **opts):
     """roll back the last transaction (DANGEROUS) (DEPRECATED)
 
@@ -5228,15 +6319,20 @@
 
     Returns 0 on success, 1 if no rollback data is available.
     """
-    if not ui.configbool('ui', 'rollback'):
-        raise error.Abort(_('rollback is disabled because it is unsafe'),
-                          hint=('see `hg help -v rollback` for information'))
-    return repo.rollback(dryrun=opts.get(r'dry_run'),
-                         force=opts.get(r'force'))
+    if not ui.configbool(b'ui', b'rollback'):
+        raise error.Abort(
+            _(b'rollback is disabled because it is unsafe'),
+            hint=b'see `hg help -v rollback` for information',
+        )
+    return repo.rollback(dryrun=opts.get(r'dry_run'), force=opts.get(r'force'))
+
 
 @command(
-    'root', [] + formatteropts, intents={INTENT_READONLY},
-    helpcategory=command.CATEGORY_WORKING_DIRECTORY)
+    b'root',
+    [] + formatteropts,
+    intents={INTENT_READONLY},
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
+)
 def root(ui, repo, **opts):
     """print the root (top) of the current working directory
 
@@ -5255,41 +6351,101 @@
     Returns 0 on success.
     """
     opts = pycompat.byteskwargs(opts)
-    with ui.formatter('root', opts) as fm:
+    with ui.formatter(b'root', opts) as fm:
         fm.startitem()
-        fm.write('reporoot', '%s\n', repo.root)
+        fm.write(b'reporoot', b'%s\n', repo.root)
         fm.data(hgpath=repo.path, storepath=repo.spath)
 
-@command('serve',
-    [('A', 'accesslog', '', _('name of access log file to write to'),
-     _('FILE')),
-    ('d', 'daemon', None, _('run server in background')),
-    ('', 'daemon-postexec', [], _('used internally by daemon mode')),
-    ('E', 'errorlog', '', _('name of error log file to write to'), _('FILE')),
-    # use string type, then we can check if something was passed
-    ('p', 'port', '', _('port to listen on (default: 8000)'), _('PORT')),
-    ('a', 'address', '', _('address to listen on (default: all interfaces)'),
-     _('ADDR')),
-    ('', 'prefix', '', _('prefix path to serve from (default: server root)'),
-     _('PREFIX')),
-    ('n', 'name', '',
-     _('name to show in web pages (default: working directory)'), _('NAME')),
-    ('', 'web-conf', '',
-     _("name of the hgweb config file (see 'hg help hgweb')"), _('FILE')),
-    ('', 'webdir-conf', '', _('name of the hgweb config file (DEPRECATED)'),
-     _('FILE')),
-    ('', 'pid-file', '', _('name of file to write process ID to'), _('FILE')),
-    ('', 'stdio', None, _('for remote clients (ADVANCED)')),
-    ('', 'cmdserver', '', _('for remote clients (ADVANCED)'), _('MODE')),
-    ('t', 'templates', '', _('web templates to use'), _('TEMPLATE')),
-    ('', 'style', '', _('template style to use'), _('STYLE')),
-    ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
-    ('', 'certificate', '', _('SSL certificate file'), _('FILE')),
-    ('', 'print-url', None, _('start and print only the URL'))]
-     + subrepoopts,
-    _('[OPTION]...'),
+
+@command(
+    b'serve',
+    [
+        (
+            b'A',
+            b'accesslog',
+            b'',
+            _(b'name of access log file to write to'),
+            _(b'FILE'),
+        ),
+        (b'd', b'daemon', None, _(b'run server in background')),
+        (b'', b'daemon-postexec', [], _(b'used internally by daemon mode')),
+        (
+            b'E',
+            b'errorlog',
+            b'',
+            _(b'name of error log file to write to'),
+            _(b'FILE'),
+        ),
+        # use string type, then we can check if something was passed
+        (
+            b'p',
+            b'port',
+            b'',
+            _(b'port to listen on (default: 8000)'),
+            _(b'PORT'),
+        ),
+        (
+            b'a',
+            b'address',
+            b'',
+            _(b'address to listen on (default: all interfaces)'),
+            _(b'ADDR'),
+        ),
+        (
+            b'',
+            b'prefix',
+            b'',
+            _(b'prefix path to serve from (default: server root)'),
+            _(b'PREFIX'),
+        ),
+        (
+            b'n',
+            b'name',
+            b'',
+            _(b'name to show in web pages (default: working directory)'),
+            _(b'NAME'),
+        ),
+        (
+            b'',
+            b'web-conf',
+            b'',
+            _(b"name of the hgweb config file (see 'hg help hgweb')"),
+            _(b'FILE'),
+        ),
+        (
+            b'',
+            b'webdir-conf',
+            b'',
+            _(b'name of the hgweb config file (DEPRECATED)'),
+            _(b'FILE'),
+        ),
+        (
+            b'',
+            b'pid-file',
+            b'',
+            _(b'name of file to write process ID to'),
+            _(b'FILE'),
+        ),
+        (b'', b'stdio', None, _(b'for remote clients (ADVANCED)')),
+        (
+            b'',
+            b'cmdserver',
+            b'',
+            _(b'for remote clients (ADVANCED)'),
+            _(b'MODE'),
+        ),
+        (b't', b'templates', b'', _(b'web templates to use'), _(b'TEMPLATE')),
+        (b'', b'style', b'', _(b'template style to use'), _(b'STYLE')),
+        (b'6', b'ipv6', None, _(b'use IPv6 in addition to IPv4')),
+        (b'', b'certificate', b'', _(b'SSL certificate file'), _(b'FILE')),
+        (b'', b'print-url', None, _(b'start and print only the URL')),
+    ]
+    + subrepoopts,
+    _(b'[OPTION]...'),
     helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
-    helpbasic=True, optionalrepo=True)
+    helpbasic=True,
+    optionalrepo=True,
+)
 def serve(ui, repo, **opts):
     """start stand-alone webserver
 
@@ -5316,53 +6472,82 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    if opts["stdio"] and opts["cmdserver"]:
-        raise error.Abort(_("cannot use --stdio with --cmdserver"))
-    if opts["print_url"] and ui.verbose:
-        raise error.Abort(_("cannot use --print-url with --verbose"))
-
-    if opts["stdio"]:
+    if opts[b"stdio"] and opts[b"cmdserver"]:
+        raise error.Abort(_(b"cannot use --stdio with --cmdserver"))
+    if opts[b"print_url"] and ui.verbose:
+        raise error.Abort(_(b"cannot use --print-url with --verbose"))
+
+    if opts[b"stdio"]:
         if repo is None:
-            raise error.RepoError(_("there is no Mercurial repository here"
-                                    " (.hg not found)"))
+            raise error.RepoError(
+                _(b"there is no Mercurial repository here (.hg not found)")
+            )
         s = wireprotoserver.sshserver(ui, repo)
         s.serve_forever()
 
     service = server.createservice(ui, repo, opts)
     return server.runservice(opts, initfn=service.init, runfn=service.run)
 
-@command('shelve',
-         [('A', 'addremove', None,
-           _('mark new/missing files as added/removed before shelving')),
-          ('u', 'unknown', None,
-           _('store unknown files in the shelve')),
-          ('', 'cleanup', None,
-           _('delete all shelved changes')),
-          ('', 'date', '',
-           _('shelve with the specified commit date'), _('DATE')),
-          ('d', 'delete', None,
-           _('delete the named shelved change(s)')),
-          ('e', 'edit', False,
-           _('invoke editor on commit messages')),
-          ('k', 'keep', False,
-           _('shelve, but keep changes in the working directory')),
-          ('l', 'list', None,
-           _('list current shelves')),
-          ('m', 'message', '',
-           _('use text as shelve message'), _('TEXT')),
-          ('n', 'name', '',
-           _('use the given name for the shelved commit'), _('NAME')),
-          ('p', 'patch', None,
-           _('output patches for changes (provide the names of the shelved '
-             'changes as positional arguments)')),
-          ('i', 'interactive', None,
-           _('interactive mode')),
-          ('', 'stat', None,
-           _('output diffstat-style summary of changes (provide the names of '
-             'the shelved changes as positional arguments)')
-           )] + cmdutil.walkopts,
-         _('hg shelve [OPTION]... [FILE]...'),
-         helpcategory=command.CATEGORY_WORKING_DIRECTORY)
+
+@command(
+    b'shelve',
+    [
+        (
+            b'A',
+            b'addremove',
+            None,
+            _(b'mark new/missing files as added/removed before shelving'),
+        ),
+        (b'u', b'unknown', None, _(b'store unknown files in the shelve')),
+        (b'', b'cleanup', None, _(b'delete all shelved changes')),
+        (
+            b'',
+            b'date',
+            b'',
+            _(b'shelve with the specified commit date'),
+            _(b'DATE'),
+        ),
+        (b'd', b'delete', None, _(b'delete the named shelved change(s)')),
+        (b'e', b'edit', False, _(b'invoke editor on commit messages')),
+        (
+            b'k',
+            b'keep',
+            False,
+            _(b'shelve, but keep changes in the working directory'),
+        ),
+        (b'l', b'list', None, _(b'list current shelves')),
+        (b'm', b'message', b'', _(b'use text as shelve message'), _(b'TEXT')),
+        (
+            b'n',
+            b'name',
+            b'',
+            _(b'use the given name for the shelved commit'),
+            _(b'NAME'),
+        ),
+        (
+            b'p',
+            b'patch',
+            None,
+            _(
+                b'output patches for changes (provide the names of the shelved '
+                b'changes as positional arguments)'
+            ),
+        ),
+        (b'i', b'interactive', None, _(b'interactive mode')),
+        (
+            b'',
+            b'stat',
+            None,
+            _(
+                b'output diffstat-style summary of changes (provide the names of '
+                b'the shelved changes as positional arguments)'
+            ),
+        ),
+    ]
+    + cmdutil.walkopts,
+    _(b'hg shelve [OPTION]... [FILE]...'),
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
+)
 def shelve(ui, repo, *pats, **opts):
     '''save and set aside changes from the working directory
 
@@ -5398,61 +6583,88 @@
     '''
     opts = pycompat.byteskwargs(opts)
     allowables = [
-        ('addremove', {'create'}), # 'create' is pseudo action
-        ('unknown', {'create'}),
-        ('cleanup', {'cleanup'}),
-#       ('date', {'create'}), # ignored for passing '--date "0 0"' in tests
-        ('delete', {'delete'}),
-        ('edit', {'create'}),
-        ('keep', {'create'}),
-        ('list', {'list'}),
-        ('message', {'create'}),
-        ('name', {'create'}),
-        ('patch', {'patch', 'list'}),
-        ('stat', {'stat', 'list'}),
+        (b'addremove', {b'create'}),  # 'create' is pseudo action
+        (b'unknown', {b'create'}),
+        (b'cleanup', {b'cleanup'}),
+        #       ('date', {'create'}), # ignored for passing '--date "0 0"' in tests
+        (b'delete', {b'delete'}),
+        (b'edit', {b'create'}),
+        (b'keep', {b'create'}),
+        (b'list', {b'list'}),
+        (b'message', {b'create'}),
+        (b'name', {b'create'}),
+        (b'patch', {b'patch', b'list'}),
+        (b'stat', {b'stat', b'list'}),
     ]
+
     def checkopt(opt):
         if opts.get(opt):
             for i, allowable in allowables:
                 if opts[i] and opt not in allowable:
-                    raise error.Abort(_("options '--%s' and '--%s' may not be "
-                                       "used together") % (opt, i))
+                    raise error.Abort(
+                        _(
+                            b"options '--%s' and '--%s' may not be "
+                            b"used together"
+                        )
+                        % (opt, i)
+                    )
             return True
-    if checkopt('cleanup'):
+
+    if checkopt(b'cleanup'):
         if pats:
-            raise error.Abort(_("cannot specify names when using '--cleanup'"))
+            raise error.Abort(_(b"cannot specify names when using '--cleanup'"))
         return shelvemod.cleanupcmd(ui, repo)
-    elif checkopt('delete'):
+    elif checkopt(b'delete'):
         return shelvemod.deletecmd(ui, repo, pats)
-    elif checkopt('list'):
+    elif checkopt(b'list'):
         return shelvemod.listcmd(ui, repo, pats, opts)
-    elif checkopt('patch') or checkopt('stat'):
+    elif checkopt(b'patch') or checkopt(b'stat'):
         return shelvemod.patchcmds(ui, repo, pats, opts)
     else:
         return shelvemod.createcmd(ui, repo, pats, opts)
 
-_NOTTERSE = 'nothing'
-
-@command('status|st',
-    [('A', 'all', None, _('show status of all files')),
-    ('m', 'modified', None, _('show only modified files')),
-    ('a', 'added', None, _('show only added files')),
-    ('r', 'removed', None, _('show only removed files')),
-    ('d', 'deleted', None, _('show only deleted (but tracked) files')),
-    ('c', 'clean', None, _('show only files without changes')),
-    ('u', 'unknown', None, _('show only unknown (not tracked) files')),
-    ('i', 'ignored', None, _('show only ignored files')),
-    ('n', 'no-status', None, _('hide status prefix')),
-    ('t', 'terse', _NOTTERSE, _('show the terse output (EXPERIMENTAL)')),
-    ('C', 'copies', None, _('show source of copied files')),
-    ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
-    ('', 'rev', [], _('show difference from revision'), _('REV')),
-    ('', 'change', '', _('list the changed files of a revision'), _('REV')),
-    ] + walkopts + subrepoopts + formatteropts,
-    _('[OPTION]... [FILE]...'),
+
+_NOTTERSE = b'nothing'
+
+
+@command(
+    b'status|st',
+    [
+        (b'A', b'all', None, _(b'show status of all files')),
+        (b'm', b'modified', None, _(b'show only modified files')),
+        (b'a', b'added', None, _(b'show only added files')),
+        (b'r', b'removed', None, _(b'show only removed files')),
+        (b'd', b'deleted', None, _(b'show only deleted (but tracked) files')),
+        (b'c', b'clean', None, _(b'show only files without changes')),
+        (b'u', b'unknown', None, _(b'show only unknown (not tracked) files')),
+        (b'i', b'ignored', None, _(b'show only ignored files')),
+        (b'n', b'no-status', None, _(b'hide status prefix')),
+        (b't', b'terse', _NOTTERSE, _(b'show the terse output (EXPERIMENTAL)')),
+        (b'C', b'copies', None, _(b'show source of copied files')),
+        (
+            b'0',
+            b'print0',
+            None,
+            _(b'end filenames with NUL, for use with xargs'),
+        ),
+        (b'', b'rev', [], _(b'show difference from revision'), _(b'REV')),
+        (
+            b'',
+            b'change',
+            b'',
+            _(b'list the changed files of a revision'),
+            _(b'REV'),
+        ),
+    ]
+    + walkopts
+    + subrepoopts
+    + formatteropts,
+    _(b'[OPTION]... [FILE]...'),
     helpcategory=command.CATEGORY_WORKING_DIRECTORY,
-    helpbasic=True, inferrepo=True,
-    intents={INTENT_READONLY})
+    helpbasic=True,
+    inferrepo=True,
+    intents={INTENT_READONLY},
+)
 def status(ui, repo, *pats, **opts):
     """show changed files in the working directory
 
@@ -5549,44 +6761,47 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    revs = opts.get('rev')
-    change = opts.get('change')
-    terse = opts.get('terse')
+    revs = opts.get(b'rev')
+    change = opts.get(b'change')
+    terse = opts.get(b'terse')
     if terse is _NOTTERSE:
         if revs:
-            terse = ''
+            terse = b''
         else:
-            terse = ui.config('commands', 'status.terse')
+            terse = ui.config(b'commands', b'status.terse')
 
     if revs and change:
-        msg = _('cannot specify --rev and --change at the same time')
+        msg = _(b'cannot specify --rev and --change at the same time')
         raise error.Abort(msg)
     elif revs and terse:
-        msg = _('cannot use --terse with --rev')
+        msg = _(b'cannot use --terse with --rev')
         raise error.Abort(msg)
     elif change:
-        repo = scmutil.unhidehashlikerevs(repo, [change], 'nowarn')
+        repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn')
         ctx2 = scmutil.revsingle(repo, change, None)
         ctx1 = ctx2.p1()
     else:
-        repo = scmutil.unhidehashlikerevs(repo, revs, 'nowarn')
+        repo = scmutil.unhidehashlikerevs(repo, revs, b'nowarn')
         ctx1, ctx2 = scmutil.revpair(repo, revs)
 
     forcerelativevalue = None
-    if ui.hasconfig('commands', 'status.relative'):
-        forcerelativevalue = ui.configbool('commands', 'status.relative')
-    uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=bool(pats),
-                                   forcerelativevalue=forcerelativevalue)
-
-    if opts.get('print0'):
-        end = '\0'
+    if ui.hasconfig(b'commands', b'status.relative'):
+        forcerelativevalue = ui.configbool(b'commands', b'status.relative')
+    uipathfn = scmutil.getuipathfn(
+        repo,
+        legacyrelativevalue=bool(pats),
+        forcerelativevalue=forcerelativevalue,
+    )
+
+    if opts.get(b'print0'):
+        end = b'\0'
     else:
-        end = '\n'
+        end = b'\n'
     copy = {}
-    states = 'modified added removed deleted unknown ignored clean'.split()
+    states = b'modified added removed deleted unknown ignored clean'.split()
     show = [k for k in states if opts.get(k)]
-    if opts.get('all'):
-        show += ui.quiet and (states[:4] + ['clean']) or states
+    if opts.get(b'all'):
+        show += ui.quiet and (states[:4] + [b'clean']) or states
 
     if not show:
         if ui.quiet:
@@ -5597,53 +6812,73 @@
     m = scmutil.match(ctx2, pats, opts)
     if terse:
         # we need to compute clean and unknown to terse
-        stat = repo.status(ctx1.node(), ctx2.node(), m,
-                           'ignored' in show or 'i' in terse,
-                            clean=True, unknown=True,
-                            listsubrepos=opts.get('subrepos'))
+        stat = repo.status(
+            ctx1.node(),
+            ctx2.node(),
+            m,
+            b'ignored' in show or b'i' in terse,
+            clean=True,
+            unknown=True,
+            listsubrepos=opts.get(b'subrepos'),
+        )
 
         stat = cmdutil.tersedir(stat, terse)
     else:
-        stat = repo.status(ctx1.node(), ctx2.node(), m,
-                           'ignored' in show, 'clean' in show,
-                           'unknown' in show, opts.get('subrepos'))
-
-    changestates = zip(states, pycompat.iterbytestr('MAR!?IC'), stat)
-
-    if (opts.get('all') or opts.get('copies')
-        or ui.configbool('ui', 'statuscopies')) and not opts.get('no_status'):
+        stat = repo.status(
+            ctx1.node(),
+            ctx2.node(),
+            m,
+            b'ignored' in show,
+            b'clean' in show,
+            b'unknown' in show,
+            opts.get(b'subrepos'),
+        )
+
+    changestates = zip(states, pycompat.iterbytestr(b'MAR!?IC'), stat)
+
+    if (
+        opts.get(b'all')
+        or opts.get(b'copies')
+        or ui.configbool(b'ui', b'statuscopies')
+    ) and not opts.get(b'no_status'):
         copy = copies.pathcopies(ctx1, ctx2, m)
 
-    ui.pager('status')
-    fm = ui.formatter('status', opts)
-    fmt = '%s' + end
-    showchar = not opts.get('no_status')
+    ui.pager(b'status')
+    fm = ui.formatter(b'status', opts)
+    fmt = b'%s' + end
+    showchar = not opts.get(b'no_status')
 
     for state, char, files in changestates:
         if state in show:
-            label = 'status.' + state
+            label = b'status.' + state
             for f in files:
                 fm.startitem()
                 fm.context(ctx=ctx2)
                 fm.data(path=f)
-                fm.condwrite(showchar, 'status', '%s ', char, label=label)
+                fm.condwrite(showchar, b'status', b'%s ', char, label=label)
                 fm.plain(fmt % uipathfn(f), label=label)
                 if f in copy:
                     fm.data(source=copy[f])
-                    fm.plain(('  %s' + end) % uipathfn(copy[f]),
-                             label='status.copied')
-
-    if ((ui.verbose or ui.configbool('commands', 'status.verbose'))
-        and not ui.plain()):
+                    fm.plain(
+                        (b'  %s' + end) % uipathfn(copy[f]),
+                        label=b'status.copied',
+                    )
+
+    if (
+        ui.verbose or ui.configbool(b'commands', b'status.verbose')
+    ) and not ui.plain():
         cmdutil.morestatus(repo, fm)
     fm.end()
 
-@command('summary|sum',
-    [('', 'remote', None, _('check for push and pull'))],
-    '[--remote]',
+
+@command(
+    b'summary|sum',
+    [(b'', b'remote', None, _(b'check for push and pull'))],
+    b'[--remote]',
     helpcategory=command.CATEGORY_WORKING_DIRECTORY,
     helpbasic=True,
-    intents={INTENT_READONLY})
+    intents={INTENT_READONLY},
+)
 def summary(ui, repo, **opts):
     """summarize working directory state
 
@@ -5657,7 +6892,7 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    ui.pager('summary')
+    ui.pager(b'summary')
     ctx = repo[None]
     parents = ctx.parents()
     pnode = parents[0].node()
@@ -5666,9 +6901,10 @@
     try:
         ms = mergemod.mergestate.read(repo)
     except error.UnsupportedMergeRecords as e:
-        s = ' '.join(e.recordtypes)
+        s = b' '.join(e.recordtypes)
         ui.warn(
-            _('warning: merge state has unsupported record types: %s\n') % s)
+            _(b'warning: merge state has unsupported record types: %s\n') % s
+        )
         unresolved = []
     else:
         unresolved = list(ms.unresolved())
@@ -5677,57 +6913,61 @@
         # label with log.changeset (instead of log.parent) since this
         # shows a working directory parent *changeset*:
         # i18n: column positioning for "hg summary"
-        ui.write(_('parent: %d:%s ') % (p.rev(), p),
-                 label=logcmdutil.changesetlabels(p))
-        ui.write(' '.join(p.tags()), label='log.tag')
+        ui.write(
+            _(b'parent: %d:%s ') % (p.rev(), p),
+            label=logcmdutil.changesetlabels(p),
+        )
+        ui.write(b' '.join(p.tags()), label=b'log.tag')
         if p.bookmarks():
             marks.extend(p.bookmarks())
         if p.rev() == -1:
             if not len(repo):
-                ui.write(_(' (empty repository)'))
+                ui.write(_(b' (empty repository)'))
             else:
-                ui.write(_(' (no revision checked out)'))
+                ui.write(_(b' (no revision checked out)'))
         if p.obsolete():
-            ui.write(_(' (obsolete)'))
+            ui.write(_(b' (obsolete)'))
         if p.isunstable():
-            instabilities = (ui.label(instability, 'trouble.%s' % instability)
-                             for instability in p.instabilities())
-            ui.write(' ('
-                     + ', '.join(instabilities)
-                     + ')')
-        ui.write('\n')
+            instabilities = (
+                ui.label(instability, b'trouble.%s' % instability)
+                for instability in p.instabilities()
+            )
+            ui.write(b' (' + b', '.join(instabilities) + b')')
+        ui.write(b'\n')
         if p.description():
-            ui.status(' ' + p.description().splitlines()[0].strip() + '\n',
-                      label='log.summary')
+            ui.status(
+                b' ' + p.description().splitlines()[0].strip() + b'\n',
+                label=b'log.summary',
+            )
 
     branch = ctx.branch()
     bheads = repo.branchheads(branch)
     # i18n: column positioning for "hg summary"
-    m = _('branch: %s\n') % branch
-    if branch != 'default':
-        ui.write(m, label='log.branch')
+    m = _(b'branch: %s\n') % branch
+    if branch != b'default':
+        ui.write(m, label=b'log.branch')
     else:
-        ui.status(m, label='log.branch')
+        ui.status(m, label=b'log.branch')
 
     if marks:
         active = repo._activebookmark
         # i18n: column positioning for "hg summary"
-        ui.write(_('bookmarks:'), label='log.bookmark')
+        ui.write(_(b'bookmarks:'), label=b'log.bookmark')
         if active is not None:
             if active in marks:
-                ui.write(' *' + active, label=bookmarks.activebookmarklabel)
+                ui.write(b' *' + active, label=bookmarks.activebookmarklabel)
                 marks.remove(active)
             else:
-                ui.write(' [%s]' % active, label=bookmarks.activebookmarklabel)
+                ui.write(b' [%s]' % active, label=bookmarks.activebookmarklabel)
         for m in marks:
-            ui.write(' ' + m, label='log.bookmark')
-        ui.write('\n', label='log.bookmark')
+            ui.write(b' ' + m, label=b'log.bookmark')
+        ui.write(b'\n', label=b'log.bookmark')
 
     status = repo.status(unknown=True)
 
     c = repo.dirstate.copies()
     copied, renamed = [], []
-    for d, s in c.iteritems():
+    for d, s in pycompat.iteritems(c):
         if s in status.removed:
             status.removed.remove(s)
             renamed.append(d)
@@ -5738,40 +6978,49 @@
 
     subs = [s for s in ctx.substate if ctx.sub(s).dirty()]
 
-    labels = [(ui.label(_('%d modified'), 'status.modified'), status.modified),
-              (ui.label(_('%d added'), 'status.added'), status.added),
-              (ui.label(_('%d removed'), 'status.removed'), status.removed),
-              (ui.label(_('%d renamed'), 'status.copied'), renamed),
-              (ui.label(_('%d copied'), 'status.copied'), copied),
-              (ui.label(_('%d deleted'), 'status.deleted'), status.deleted),
-              (ui.label(_('%d unknown'), 'status.unknown'), status.unknown),
-              (ui.label(_('%d unresolved'), 'resolve.unresolved'), unresolved),
-              (ui.label(_('%d subrepos'), 'status.modified'), subs)]
+    labels = [
+        (ui.label(_(b'%d modified'), b'status.modified'), status.modified),
+        (ui.label(_(b'%d added'), b'status.added'), status.added),
+        (ui.label(_(b'%d removed'), b'status.removed'), status.removed),
+        (ui.label(_(b'%d renamed'), b'status.copied'), renamed),
+        (ui.label(_(b'%d copied'), b'status.copied'), copied),
+        (ui.label(_(b'%d deleted'), b'status.deleted'), status.deleted),
+        (ui.label(_(b'%d unknown'), b'status.unknown'), status.unknown),
+        (ui.label(_(b'%d unresolved'), b'resolve.unresolved'), unresolved),
+        (ui.label(_(b'%d subrepos'), b'status.modified'), subs),
+    ]
     t = []
     for l, s in labels:
         if s:
             t.append(l % len(s))
 
-    t = ', '.join(t)
+    t = b', '.join(t)
     cleanworkdir = False
 
-    if repo.vfs.exists('graftstate'):
-        t += _(' (graft in progress)')
-    if repo.vfs.exists('updatestate'):
-        t += _(' (interrupted update)')
+    if repo.vfs.exists(b'graftstate'):
+        t += _(b' (graft in progress)')
+    if repo.vfs.exists(b'updatestate'):
+        t += _(b' (interrupted update)')
     elif len(parents) > 1:
-        t += _(' (merge)')
+        t += _(b' (merge)')
     elif branch != parents[0].branch():
-        t += _(' (new branch)')
-    elif (parents[0].closesbranch() and
-          pnode in repo.branchheads(branch, closed=True)):
-        t += _(' (head closed)')
-    elif not (status.modified or status.added or status.removed or renamed or
-              copied or subs):
-        t += _(' (clean)')
+        t += _(b' (new branch)')
+    elif parents[0].closesbranch() and pnode in repo.branchheads(
+        branch, closed=True
+    ):
+        t += _(b' (head closed)')
+    elif not (
+        status.modified
+        or status.added
+        or status.removed
+        or renamed
+        or copied
+        or subs
+    ):
+        t += _(b' (clean)')
         cleanworkdir = True
     elif pnode not in bheads:
-        t += _(' (new branch head)')
+        t += _(b' (new branch head)')
 
     if parents:
         pendingphase = max(p.phase() for p in parents)
@@ -5779,56 +7028,59 @@
         pendingphase = phases.public
 
     if pendingphase > phases.newcommitphase(ui):
-        t += ' (%s)' % phases.phasenames[pendingphase]
+        t += b' (%s)' % phases.phasenames[pendingphase]
 
     if cleanworkdir:
         # i18n: column positioning for "hg summary"
-        ui.status(_('commit: %s\n') % t.strip())
+        ui.status(_(b'commit: %s\n') % t.strip())
     else:
         # i18n: column positioning for "hg summary"
-        ui.write(_('commit: %s\n') % t.strip())
+        ui.write(_(b'commit: %s\n') % t.strip())
 
     # all ancestors of branch heads - all ancestors of parent = new csets
-    new = len(repo.changelog.findmissing([pctx.node() for pctx in parents],
-                                         bheads))
+    new = len(
+        repo.changelog.findmissing([pctx.node() for pctx in parents], bheads)
+    )
 
     if new == 0:
         # i18n: column positioning for "hg summary"
-        ui.status(_('update: (current)\n'))
+        ui.status(_(b'update: (current)\n'))
     elif pnode not in bheads:
         # i18n: column positioning for "hg summary"
-        ui.write(_('update: %d new changesets (update)\n') % new)
+        ui.write(_(b'update: %d new changesets (update)\n') % new)
     else:
         # i18n: column positioning for "hg summary"
-        ui.write(_('update: %d new changesets, %d branch heads (merge)\n') %
-                 (new, len(bheads)))
+        ui.write(
+            _(b'update: %d new changesets, %d branch heads (merge)\n')
+            % (new, len(bheads))
+        )
 
     t = []
-    draft = len(repo.revs('draft()'))
+    draft = len(repo.revs(b'draft()'))
     if draft:
-        t.append(_('%d draft') % draft)
-    secret = len(repo.revs('secret()'))
+        t.append(_(b'%d draft') % draft)
+    secret = len(repo.revs(b'secret()'))
     if secret:
-        t.append(_('%d secret') % secret)
+        t.append(_(b'%d secret') % secret)
 
     if draft or secret:
-        ui.status(_('phases: %s\n') % ', '.join(t))
+        ui.status(_(b'phases: %s\n') % b', '.join(t))
 
     if obsolete.isenabled(repo, obsolete.createmarkersopt):
-        for trouble in ("orphan", "contentdivergent", "phasedivergent"):
-            numtrouble = len(repo.revs(trouble + "()"))
+        for trouble in (b"orphan", b"contentdivergent", b"phasedivergent"):
+            numtrouble = len(repo.revs(trouble + b"()"))
             # We write all the possibilities to ease translation
             troublemsg = {
-               "orphan": _("orphan: %d changesets"),
-               "contentdivergent": _("content-divergent: %d changesets"),
-               "phasedivergent": _("phase-divergent: %d changesets"),
+                b"orphan": _(b"orphan: %d changesets"),
+                b"contentdivergent": _(b"content-divergent: %d changesets"),
+                b"phasedivergent": _(b"phase-divergent: %d changesets"),
             }
             if numtrouble > 0:
-                ui.status(troublemsg[trouble] % numtrouble + "\n")
+                ui.status(troublemsg[trouble] % numtrouble + b"\n")
 
     cmdutil.summaryhooks(ui, repo)
 
-    if opts.get('remote'):
+    if opts.get(b'remote'):
         needsincoming, needsoutgoing = True, True
     else:
         needsincoming, needsoutgoing = False, False
@@ -5841,18 +7093,18 @@
             return
 
     def getincoming():
-        source, branches = hg.parseurl(ui.expandpath('default'))
+        source, branches = hg.parseurl(ui.expandpath(b'default'))
         sbranch = branches[0]
         try:
             other = hg.peer(repo, {}, source)
         except error.RepoError:
-            if opts.get('remote'):
+            if opts.get(b'remote'):
                 raise
             return source, sbranch, None, None, None
         revs, checkout = hg.addbranchrevs(repo, other, branches, None)
         if revs:
             revs = [other.lookup(rev) for rev in revs]
-        ui.debug('comparing with %s\n' % util.hidepassword(source))
+        ui.debug(b'comparing with %s\n' % util.hidepassword(source))
         repo.ui.pushbuffer()
         commoninc = discovery.findcommonincoming(repo, other, heads=revs)
         repo.ui.popbuffer()
@@ -5864,31 +7116,32 @@
         source = sbranch = sother = commoninc = incoming = None
 
     def getoutgoing():
-        dest, branches = hg.parseurl(ui.expandpath('default-push', 'default'))
+        dest, branches = hg.parseurl(ui.expandpath(b'default-push', b'default'))
         dbranch = branches[0]
         revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
         if source != dest:
             try:
                 dother = hg.peer(repo, {}, dest)
             except error.RepoError:
-                if opts.get('remote'):
+                if opts.get(b'remote'):
                     raise
                 return dest, dbranch, None, None
-            ui.debug('comparing with %s\n' % util.hidepassword(dest))
+            ui.debug(b'comparing with %s\n' % util.hidepassword(dest))
         elif sother is None:
             # there is no explicit destination peer, but source one is invalid
             return dest, dbranch, None, None
         else:
             dother = sother
-        if (source != dest or (sbranch is not None and sbranch != dbranch)):
+        if source != dest or (sbranch is not None and sbranch != dbranch):
             common = None
         else:
             common = commoninc
         if revs:
             revs = [repo.lookup(rev) for rev in revs]
         repo.ui.pushbuffer()
-        outgoing = discovery.findcommonoutgoing(repo, dother, onlyheads=revs,
-                                                commoninc=common)
+        outgoing = discovery.findcommonoutgoing(
+            repo, dother, onlyheads=revs, commoninc=common
+        )
         repo.ui.popbuffer()
         return dest, dbranch, dother, outgoing
 
@@ -5897,43 +7150,54 @@
     else:
         dest = dbranch = dother = outgoing = None
 
-    if opts.get('remote'):
+    if opts.get(b'remote'):
         t = []
         if incoming:
-            t.append(_('1 or more incoming'))
+            t.append(_(b'1 or more incoming'))
         o = outgoing.missing
         if o:
-            t.append(_('%d outgoing') % len(o))
+            t.append(_(b'%d outgoing') % len(o))
         other = dother or sother
-        if 'bookmarks' in other.listkeys('namespaces'):
+        if b'bookmarks' in other.listkeys(b'namespaces'):
             counts = bookmarks.summary(repo, other)
             if counts[0] > 0:
-                t.append(_('%d incoming bookmarks') % counts[0])
+                t.append(_(b'%d incoming bookmarks') % counts[0])
             if counts[1] > 0:
-                t.append(_('%d outgoing bookmarks') % counts[1])
+                t.append(_(b'%d outgoing bookmarks') % counts[1])
 
         if t:
             # i18n: column positioning for "hg summary"
-            ui.write(_('remote: %s\n') % (', '.join(t)))
+            ui.write(_(b'remote: %s\n') % (b', '.join(t)))
         else:
             # i18n: column positioning for "hg summary"
-            ui.status(_('remote: (synced)\n'))
-
-    cmdutil.summaryremotehooks(ui, repo, opts,
-                               ((source, sbranch, sother, commoninc),
-                                (dest, dbranch, dother, outgoing)))
-
-@command('tag',
-    [('f', 'force', None, _('force tag')),
-    ('l', 'local', None, _('make the tag local')),
-    ('r', 'rev', '', _('revision to tag'), _('REV')),
-    ('', 'remove', None, _('remove a tag')),
-    # -l/--local is already there, commitopts cannot be used
-    ('e', 'edit', None, _('invoke editor on commit messages')),
-    ('m', 'message', '', _('use text as commit message'), _('TEXT')),
-    ] + commitopts2,
-    _('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...'),
-    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
+            ui.status(_(b'remote: (synced)\n'))
+
+    cmdutil.summaryremotehooks(
+        ui,
+        repo,
+        opts,
+        (
+            (source, sbranch, sother, commoninc),
+            (dest, dbranch, dother, outgoing),
+        ),
+    )
+
+
+@command(
+    b'tag',
+    [
+        (b'f', b'force', None, _(b'force tag')),
+        (b'l', b'local', None, _(b'make the tag local')),
+        (b'r', b'rev', b'', _(b'revision to tag'), _(b'REV')),
+        (b'', b'remove', None, _(b'remove a tag')),
+        # -l/--local is already there, commitopts cannot be used
+        (b'e', b'edit', None, _(b'invoke editor on commit messages')),
+        (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
+    ]
+    + commitopts2,
+    _(b'[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+)
 def tag(ui, repo, name1, *names, **opts):
     """add one or more tags for the current or given revision
 
@@ -5968,85 +7232,110 @@
     """
     opts = pycompat.byteskwargs(opts)
     with repo.wlock(), repo.lock():
-        rev_ = "."
+        rev_ = b"."
         names = [t.strip() for t in (name1,) + names]
         if len(names) != len(set(names)):
-            raise error.Abort(_('tag names must be unique'))
+            raise error.Abort(_(b'tag names must be unique'))
         for n in names:
-            scmutil.checknewlabel(repo, n, 'tag')
+            scmutil.checknewlabel(repo, n, b'tag')
             if not n:
-                raise error.Abort(_('tag names cannot consist entirely of '
-                                   'whitespace'))
-        if opts.get('rev') and opts.get('remove'):
-            raise error.Abort(_("--rev and --remove are incompatible"))
-        if opts.get('rev'):
-            rev_ = opts['rev']
-        message = opts.get('message')
-        if opts.get('remove'):
-            if opts.get('local'):
-                expectedtype = 'local'
+                raise error.Abort(
+                    _(b'tag names cannot consist entirely of whitespace')
+                )
+        if opts.get(b'rev') and opts.get(b'remove'):
+            raise error.Abort(_(b"--rev and --remove are incompatible"))
+        if opts.get(b'rev'):
+            rev_ = opts[b'rev']
+        message = opts.get(b'message')
+        if opts.get(b'remove'):
+            if opts.get(b'local'):
+                expectedtype = b'local'
             else:
-                expectedtype = 'global'
+                expectedtype = b'global'
 
             for n in names:
-                if repo.tagtype(n) == 'global':
+                if repo.tagtype(n) == b'global':
                     alltags = tagsmod.findglobaltags(ui, repo)
                     if alltags[n][0] == nullid:
-                        raise error.Abort(_("tag '%s' is already removed") % n)
+                        raise error.Abort(_(b"tag '%s' is already removed") % n)
                 if not repo.tagtype(n):
-                    raise error.Abort(_("tag '%s' does not exist") % n)
+                    raise error.Abort(_(b"tag '%s' does not exist") % n)
                 if repo.tagtype(n) != expectedtype:
-                    if expectedtype == 'global':
-                        raise error.Abort(_("tag '%s' is not a global tag") % n)
+                    if expectedtype == b'global':
+                        raise error.Abort(
+                            _(b"tag '%s' is not a global tag") % n
+                        )
                     else:
-                        raise error.Abort(_("tag '%s' is not a local tag") % n)
-            rev_ = 'null'
+                        raise error.Abort(_(b"tag '%s' is not a local tag") % n)
+            rev_ = b'null'
             if not message:
                 # we don't translate commit messages
-                message = 'Removed tag %s' % ', '.join(names)
-        elif not opts.get('force'):
+                message = b'Removed tag %s' % b', '.join(names)
+        elif not opts.get(b'force'):
             for n in names:
                 if n in repo.tags():
-                    raise error.Abort(_("tag '%s' already exists "
-                                       "(use -f to force)") % n)
-        if not opts.get('local'):
+                    raise error.Abort(
+                        _(b"tag '%s' already exists (use -f to force)") % n
+                    )
+        if not opts.get(b'local'):
             p1, p2 = repo.dirstate.parents()
             if p2 != nullid:
-                raise error.Abort(_('uncommitted merge'))
+                raise error.Abort(_(b'uncommitted merge'))
             bheads = repo.branchheads()
-            if not opts.get('force') and bheads and p1 not in bheads:
-                raise error.Abort(_('working directory is not at a branch head '
-                                    '(use -f to force)'))
+            if not opts.get(b'force') and bheads and p1 not in bheads:
+                raise error.Abort(
+                    _(
+                        b'working directory is not at a branch head '
+                        b'(use -f to force)'
+                    )
+                )
         node = scmutil.revsingle(repo, rev_).node()
 
         if not message:
             # we don't translate commit messages
-            message = ('Added tag %s for changeset %s' %
-                       (', '.join(names), short(node)))
-
-        date = opts.get('date')
+            message = b'Added tag %s for changeset %s' % (
+                b', '.join(names),
+                short(node),
+            )
+
+        date = opts.get(b'date')
         if date:
             date = dateutil.parsedate(date)
 
-        if opts.get('remove'):
-            editform = 'tag.remove'
+        if opts.get(b'remove'):
+            editform = b'tag.remove'
         else:
-            editform = 'tag.add'
-        editor = cmdutil.getcommiteditor(editform=editform,
-                                         **pycompat.strkwargs(opts))
+            editform = b'tag.add'
+        editor = cmdutil.getcommiteditor(
+            editform=editform, **pycompat.strkwargs(opts)
+        )
 
         # don't allow tagging the null rev
-        if (not opts.get('remove') and
-            scmutil.revsingle(repo, rev_).rev() == nullrev):
-            raise error.Abort(_("cannot tag null revision"))
-
-        tagsmod.tag(repo, names, node, message, opts.get('local'),
-                    opts.get('user'), date, editor=editor)
+        if (
+            not opts.get(b'remove')
+            and scmutil.revsingle(repo, rev_).rev() == nullrev
+        ):
+            raise error.Abort(_(b"cannot tag null revision"))
+
+        tagsmod.tag(
+            repo,
+            names,
+            node,
+            message,
+            opts.get(b'local'),
+            opts.get(b'user'),
+            date,
+            editor=editor,
+        )
+
 
 @command(
-    'tags', formatteropts, '',
+    b'tags',
+    formatteropts,
+    b'',
     helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
-    intents={INTENT_READONLY})
+    intents={INTENT_READONLY},
+)
 def tags(ui, repo, **opts):
     """list repository tags
 
@@ -6068,35 +7357,47 @@
     """
 
     opts = pycompat.byteskwargs(opts)
-    ui.pager('tags')
-    fm = ui.formatter('tags', opts)
+    ui.pager(b'tags')
+    fm = ui.formatter(b'tags', opts)
     hexfunc = fm.hexfunc
 
     for t, n in reversed(repo.tagslist()):
         hn = hexfunc(n)
-        label = 'tags.normal'
-        tagtype = ''
-        if repo.tagtype(t) == 'local':
-            label = 'tags.local'
-            tagtype = 'local'
+        label = b'tags.normal'
+        tagtype = b''
+        if repo.tagtype(t) == b'local':
+            label = b'tags.local'
+            tagtype = b'local'
 
         fm.startitem()
         fm.context(repo=repo)
-        fm.write('tag', '%s', t, label=label)
-        fmt = " " * (30 - encoding.colwidth(t)) + ' %5d:%s'
-        fm.condwrite(not ui.quiet, 'rev node', fmt,
-                     repo.changelog.rev(n), hn, label=label)
-        fm.condwrite(ui.verbose and tagtype, 'type', ' %s',
-                     tagtype, label=label)
-        fm.plain('\n')
+        fm.write(b'tag', b'%s', t, label=label)
+        fmt = b" " * (30 - encoding.colwidth(t)) + b' %5d:%s'
+        fm.condwrite(
+            not ui.quiet,
+            b'rev node',
+            fmt,
+            repo.changelog.rev(n),
+            hn,
+            label=label,
+        )
+        fm.condwrite(
+            ui.verbose and tagtype, b'type', b' %s', tagtype, label=label
+        )
+        fm.plain(b'\n')
     fm.end()
 
-@command('tip',
-    [('p', 'patch', None, _('show patch')),
-    ('g', 'git', None, _('use git extended diff format')),
-    ] + templateopts,
-    _('[-p] [-g]'),
-    helpcategory=command.CATEGORY_CHANGE_NAVIGATION)
+
+@command(
+    b'tip',
+    [
+        (b'p', b'patch', None, _(b'show patch')),
+        (b'g', b'git', None, _(b'use git extended diff format')),
+    ]
+    + templateopts,
+    _(b'[-p] [-g]'),
+    helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
+)
 def tip(ui, repo, **opts):
     """show the tip revision (DEPRECATED)
 
@@ -6115,14 +7416,23 @@
     """
     opts = pycompat.byteskwargs(opts)
     displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
-    displayer.show(repo['tip'])
+    displayer.show(repo[b'tip'])
     displayer.close()
 
-@command('unbundle',
-    [('u', 'update', None,
-     _('update to new branch head if changesets were unbundled'))],
-    _('[-u] FILE...'),
-    helpcategory=command.CATEGORY_IMPORT_EXPORT)
+
+@command(
+    b'unbundle',
+    [
+        (
+            b'u',
+            b'update',
+            None,
+            _(b'update to new branch head if changesets were unbundled'),
+        )
+    ],
+    _(b'[-u] FILE...'),
+    helpcategory=command.CATEGORY_IMPORT_EXPORT,
+)
 def unbundle(ui, repo, fname1, *fnames, **opts):
     """apply one or more bundle files
 
@@ -6138,43 +7448,66 @@
             gen = exchange.readbundle(ui, f, fname)
             if isinstance(gen, streamclone.streamcloneapplier):
                 raise error.Abort(
-                        _('packed bundles cannot be applied with '
-                          '"hg unbundle"'),
-                        hint=_('use "hg debugapplystreamclonebundle"'))
-            url = 'bundle:' + fname
+                    _(
+                        b'packed bundles cannot be applied with '
+                        b'"hg unbundle"'
+                    ),
+                    hint=_(b'use "hg debugapplystreamclonebundle"'),
+                )
+            url = b'bundle:' + fname
             try:
-                txnname = 'unbundle'
+                txnname = b'unbundle'
                 if not isinstance(gen, bundle2.unbundle20):
-                    txnname = 'unbundle\n%s' % util.hidepassword(url)
+                    txnname = b'unbundle\n%s' % util.hidepassword(url)
                 with repo.transaction(txnname) as tr:
-                    op = bundle2.applybundle(repo, gen, tr, source='unbundle',
-                                             url=url)
+                    op = bundle2.applybundle(
+                        repo, gen, tr, source=b'unbundle', url=url
+                    )
             except error.BundleUnknownFeatureError as exc:
                 raise error.Abort(
-                    _('%s: unknown bundle feature, %s') % (fname, exc),
-                    hint=_("see https://mercurial-scm.org/"
-                           "wiki/BundleFeature for more "
-                           "information"))
+                    _(b'%s: unknown bundle feature, %s') % (fname, exc),
+                    hint=_(
+                        b"see https://mercurial-scm.org/"
+                        b"wiki/BundleFeature for more "
+                        b"information"
+                    ),
+                )
             modheads = bundle2.combinechangegroupresults(op)
 
     return postincoming(ui, repo, modheads, opts.get(r'update'), None, None)
 
-@command('unshelve',
-         [('a', 'abort', None,
-           _('abort an incomplete unshelve operation')),
-          ('c', 'continue', None,
-           _('continue an incomplete unshelve operation')),
-          ('i', 'interactive', None,
-           _('use interactive mode (EXPERIMENTAL)')),
-          ('k', 'keep', None,
-           _('keep shelve after unshelving')),
-          ('n', 'name', '',
-           _('restore shelved change with given name'), _('NAME')),
-          ('t', 'tool', '', _('specify merge tool')),
-          ('', 'date', '',
-           _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
-         _('hg unshelve [OPTION]... [FILE]... [-n SHELVED]'),
-         helpcategory=command.CATEGORY_WORKING_DIRECTORY)
+
+@command(
+    b'unshelve',
+    [
+        (b'a', b'abort', None, _(b'abort an incomplete unshelve operation')),
+        (
+            b'c',
+            b'continue',
+            None,
+            _(b'continue an incomplete unshelve operation'),
+        ),
+        (b'i', b'interactive', None, _(b'use interactive mode (EXPERIMENTAL)')),
+        (b'k', b'keep', None, _(b'keep shelve after unshelving')),
+        (
+            b'n',
+            b'name',
+            b'',
+            _(b'restore shelved change with given name'),
+            _(b'NAME'),
+        ),
+        (b't', b'tool', b'', _(b'specify merge tool')),
+        (
+            b'',
+            b'date',
+            b'',
+            _(b'set date for temporary commits (DEPRECATED)'),
+            _(b'DATE'),
+        ),
+    ],
+    _(b'hg unshelve [OPTION]... [FILE]... [-n SHELVED]'),
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
+)
 def unshelve(ui, repo, *shelved, **opts):
     """restore a shelved change to the working directory
 
@@ -6222,23 +7555,31 @@
     with repo.wlock():
         return shelvemod.dounshelve(ui, repo, *shelved, **opts)
 
+
 statemod.addunfinished(
-    'unshelve', fname='shelvedstate', continueflag=True,
+    b'unshelve',
+    fname=b'shelvedstate',
+    continueflag=True,
     abortfunc=shelvemod.hgabortunshelve,
     continuefunc=shelvemod.hgcontinueunshelve,
-    cmdmsg=_('unshelve already in progress'),
+    cmdmsg=_(b'unshelve already in progress'),
 )
 
-@command('update|up|checkout|co',
-    [('C', 'clean', None, _('discard uncommitted changes (no backup)')),
-    ('c', 'check', None, _('require clean working directory')),
-    ('m', 'merge', None, _('merge uncommitted changes')),
-    ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
-    ('r', 'rev', '', _('revision'), _('REV'))
-     ] + mergetoolopts,
-    _('[-C|-c|-m] [-d DATE] [[-r] REV]'),
+
+@command(
+    b'update|up|checkout|co',
+    [
+        (b'C', b'clean', None, _(b'discard uncommitted changes (no backup)')),
+        (b'c', b'check', None, _(b'require clean working directory')),
+        (b'm', b'merge', None, _(b'merge uncommitted changes')),
+        (b'd', b'date', b'', _(b'tipmost revision matching date'), _(b'DATE')),
+        (b'r', b'rev', b'', _(b'revision'), _(b'REV')),
+    ]
+    + mergetoolopts,
+    _(b'[-C|-c|-m] [-d DATE] [[-r] REV]'),
     helpcategory=command.CATEGORY_WORKING_DIRECTORY,
-    helpbasic=True)
+    helpbasic=True,
+)
 def update(ui, repo, node=None, **opts):
     """update working directory (or switch revisions)
 
@@ -6300,28 +7641,34 @@
     check = opts.get(r'check')
     merge = opts.get(r'merge')
     if rev and node:
-        raise error.Abort(_("please specify just one revision"))
-
-    if ui.configbool('commands', 'update.requiredest'):
+        raise error.Abort(_(b"please specify just one revision"))
+
+    if ui.configbool(b'commands', b'update.requiredest'):
         if not node and not rev and not date:
-            raise error.Abort(_('you must specify a destination'),
-                              hint=_('for example: hg update ".::"'))
-
-    if rev is None or rev == '':
+            raise error.Abort(
+                _(b'you must specify a destination'),
+                hint=_(b'for example: hg update ".::"'),
+            )
+
+    if rev is None or rev == b'':
         rev = node
 
     if date and rev is not None:
-        raise error.Abort(_("you can't specify a revision and a date"))
+        raise error.Abort(_(b"you can't specify a revision and a date"))
 
     if len([x for x in (clean, check, merge) if x]) > 1:
-        raise error.Abort(_("can only specify one of -C/--clean, -c/--check, "
-                            "or -m/--merge"))
+        raise error.Abort(
+            _(
+                b"can only specify one of -C/--clean, -c/--check, "
+                b"or -m/--merge"
+            )
+        )
 
     updatecheck = None
     if check:
-        updatecheck = 'abort'
+        updatecheck = b'abort'
     elif merge:
-        updatecheck = 'none'
+        updatecheck = b'none'
 
     with repo.wlock():
         cmdutil.clearunfinished(repo)
@@ -6331,26 +7678,30 @@
         # if we defined a bookmark, we have to remember the original name
         brev = rev
         if rev:
-            repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+            repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
         ctx = scmutil.revsingle(repo, rev, default=None)
         rev = ctx.rev()
         hidden = ctx.hidden()
-        overrides = {('ui', 'forcemerge'): opts.get(r'tool', '')}
-        with ui.configoverride(overrides, 'update'):
-            ret = hg.updatetotally(ui, repo, rev, brev, clean=clean,
-                                   updatecheck=updatecheck)
+        overrides = {(b'ui', b'forcemerge'): opts.get(r'tool', b'')}
+        with ui.configoverride(overrides, b'update'):
+            ret = hg.updatetotally(
+                ui, repo, rev, brev, clean=clean, updatecheck=updatecheck
+            )
         if hidden:
             ctxstr = ctx.hex()[:12]
-            ui.warn(_("updated to hidden changeset %s\n") % ctxstr)
+            ui.warn(_(b"updated to hidden changeset %s\n") % ctxstr)
 
             if ctx.obsolete():
                 obsfatemsg = obsutil._getfilteredreason(repo, ctxstr, ctx)
-                ui.warn("(%s)\n" % obsfatemsg)
+                ui.warn(b"(%s)\n" % obsfatemsg)
         return ret
 
-@command('verify',
-         [('', 'full', False, 'perform more checks (EXPERIMENTAL)')],
-         helpcategory=command.CATEGORY_MAINTENANCE)
+
+@command(
+    b'verify',
+    [(b'', b'full', False, b'perform more checks (EXPERIMENTAL)')],
+    helpcategory=command.CATEGORY_MAINTENANCE,
+)
 def verify(ui, repo, **opts):
     """verify the integrity of the repository
 
@@ -6370,13 +7721,18 @@
     opts = pycompat.byteskwargs(opts)
 
     level = None
-    if opts['full']:
+    if opts[b'full']:
         level = verifymod.VERIFY_FULL
     return hg.verify(repo, level)
 
+
 @command(
-    'version', [] + formatteropts, helpcategory=command.CATEGORY_HELP,
-    norepo=True, intents={INTENT_READONLY})
+    b'version',
+    [] + formatteropts,
+    helpcategory=command.CATEGORY_HELP,
+    norepo=True,
+    intents={INTENT_READONLY},
+)
 def version_(ui, **opts):
     """output version and copyright information
 
@@ -6397,23 +7753,24 @@
     """
     opts = pycompat.byteskwargs(opts)
     if ui.verbose:
-        ui.pager('version')
-    fm = ui.formatter("version", opts)
+        ui.pager(b'version')
+    fm = ui.formatter(b"version", opts)
     fm.startitem()
-    fm.write("ver", _("Mercurial Distributed SCM (version %s)\n"),
-             util.version())
+    fm.write(
+        b"ver", _(b"Mercurial Distributed SCM (version %s)\n"), util.version()
+    )
     license = _(
-        "(see https://mercurial-scm.org for more information)\n"
-        "\nCopyright (C) 2005-2019 Matt Mackall and others\n"
-        "This is free software; see the source for copying conditions. "
-        "There is NO\nwarranty; "
-        "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
+        b"(see https://mercurial-scm.org for more information)\n"
+        b"\nCopyright (C) 2005-2019 Matt Mackall and others\n"
+        b"This is free software; see the source for copying conditions. "
+        b"There is NO\nwarranty; "
+        b"not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
     )
     if not ui.quiet:
         fm.plain(license)
 
     if ui.verbose:
-        fm.plain(_("\nEnabled extensions:\n\n"))
+        fm.plain(_(b"\nEnabled extensions:\n\n"))
     # format names and versions into columns
     names = []
     vers = []
@@ -6422,27 +7779,30 @@
         names.append(name)
         vers.append(extensions.moduleversion(module) or None)
         isinternals.append(extensions.ismoduleinternal(module))
-    fn = fm.nested("extensions", tmpl='{name}\n')
+    fn = fm.nested(b"extensions", tmpl=b'{name}\n')
     if names:
-        namefmt = "  %%-%ds  " % max(len(n) for n in names)
-        places = [_("external"), _("internal")]
+        namefmt = b"  %%-%ds  " % max(len(n) for n in names)
+        places = [_(b"external"), _(b"internal")]
         for n, v, p in zip(names, vers, isinternals):
             fn.startitem()
-            fn.condwrite(ui.verbose, "name", namefmt, n)
+            fn.condwrite(ui.verbose, b"name", namefmt, n)
             if ui.verbose:
-                fn.plain("%s  " % places[p])
+                fn.plain(b"%s  " % places[p])
             fn.data(bundled=p)
-            fn.condwrite(ui.verbose and v, "ver", "%s", v)
+            fn.condwrite(ui.verbose and v, b"ver", b"%s", v)
             if ui.verbose:
-                fn.plain("\n")
+                fn.plain(b"\n")
     fn.end()
     fm.end()
 
+
 def loadcmdtable(ui, name, cmdtable):
     """Load command functions from specified cmdtable
     """
     overrides = [cmd for cmd in cmdtable if cmd in table]
     if overrides:
-        ui.warn(_("extension '%s' overrides commands: %s\n")
-                % (name, " ".join(overrides)))
+        ui.warn(
+            _(b"extension '%s' overrides commands: %s\n")
+            % (name, b" ".join(overrides))
+        )
     table.update(cmdtable)
--- a/mercurial/commandserver.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/commandserver.py	Mon Oct 21 11:09:48 2019 -0400
@@ -18,11 +18,13 @@
 
 try:
     import selectors
+
     selectors.BaseSelector
 except ImportError:
     from .thirdparty import selectors2 as selectors
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     encoding,
     error,
@@ -37,6 +39,7 @@
     procutil,
 )
 
+
 class channeledoutput(object):
     """
     Write data to out in the following format:
@@ -44,19 +47,20 @@
     data length (unsigned int),
     data
     """
+
     def __init__(self, out, channel):
         self.out = out
         self.channel = channel
 
     @property
     def name(self):
-        return '<%c-channel>' % self.channel
+        return b'<%c-channel>' % self.channel
 
     def write(self, data):
         if not data:
             return
         # single write() to guarantee the same atomicity as the underlying file
-        self.out.write(struct.pack('>cI', self.channel, len(data)) + data)
+        self.out.write(struct.pack(b'>cI', self.channel, len(data)) + data)
         self.out.flush()
 
     def __getattr__(self, attr):
@@ -64,6 +68,7 @@
             raise AttributeError(attr)
         return getattr(self.out, attr)
 
+
 class channeledmessage(object):
     """
     Write encoded message and metadata to out in the following format:
@@ -92,6 +97,7 @@
     def __getattr__(self, attr):
         return getattr(self._cout, attr)
 
+
 class channeledinput(object):
     """
     Read data from in_.
@@ -114,7 +120,7 @@
 
     @property
     def name(self):
-        return '<%c-channel>' % self.channel
+        return b'<%c-channel>' % self.channel
 
     def read(self, size=-1):
         if size < 0:
@@ -133,34 +139,34 @@
 
     def _read(self, size, channel):
         if not size:
-            return ''
+            return b''
         assert size > 0
 
         # tell the client we need at most size bytes
-        self.out.write(struct.pack('>cI', channel, size))
+        self.out.write(struct.pack(b'>cI', channel, size))
         self.out.flush()
 
         length = self.in_.read(4)
-        length = struct.unpack('>I', length)[0]
+        length = struct.unpack(b'>I', length)[0]
         if not length:
-            return ''
+            return b''
         else:
             return self.in_.read(length)
 
     def readline(self, size=-1):
         if size < 0:
             size = self.maxchunksize
-            s = self._read(size, 'L')
+            s = self._read(size, b'L')
             buf = s
             # keep asking for more until there's either no more or
             # we got a full line
-            while s and s[-1] != '\n':
-                s = self._read(size, 'L')
+            while s and s[-1] != b'\n':
+                s = self._read(size, b'L')
                 buf += s
 
             return buf
         else:
-            return self._read(size, 'L')
+            return self._read(size, b'L')
 
     def __iter__(self):
         return self
@@ -178,10 +184,12 @@
             raise AttributeError(attr)
         return getattr(self.in_, attr)
 
+
 _messageencoders = {
     b'cbor': lambda v: b''.join(cborutil.streamencode(v)),
 }
 
+
 def _selectmessageencoder(ui):
     # experimental config: cmdserver.message-encodings
     encnames = ui.configlist(b'cmdserver', b'message-encodings')
@@ -189,14 +197,17 @@
         f = _messageencoders.get(n)
         if f:
             return n, f
-    raise error.Abort(b'no supported message encodings: %s'
-                      % b' '.join(encnames))
+    raise error.Abort(
+        b'no supported message encodings: %s' % b' '.join(encnames)
+    )
+
 
 class server(object):
     """
     Listens for commands on fin, runs them and writes the output on a channel
     based stream to fout.
     """
+
     def __init__(self, ui, repo, fin, fout, prereposetups=None):
         self.cwd = encoding.getcwd()
 
@@ -211,11 +222,11 @@
             self.repo = self.repoui = None
         self._prereposetups = prereposetups
 
-        self.cdebug = channeledoutput(fout, 'd')
-        self.cerr = channeledoutput(fout, 'e')
-        self.cout = channeledoutput(fout, 'o')
-        self.cin = channeledinput(fin, fout, 'I')
-        self.cresult = channeledoutput(fout, 'r')
+        self.cdebug = channeledoutput(fout, b'd')
+        self.cerr = channeledoutput(fout, b'e')
+        self.cout = channeledoutput(fout, b'o')
+        self.cin = channeledinput(fin, fout, b'I')
+        self.cresult = channeledoutput(fout, b'r')
 
         if self.ui.config(b'cmdserver', b'log') == b'-':
             # switch log stream of server's ui to the 'd' (debug) channel
@@ -238,7 +249,7 @@
 
     def _read(self, size):
         if not size:
-            return ''
+            return b''
 
         data = self.client.read(size)
 
@@ -254,16 +265,16 @@
         format:
         data length (uint32), data
         """
-        length = struct.unpack('>I', self._read(4))[0]
+        length = struct.unpack(b'>I', self._read(4))[0]
         if not length:
-            return ''
+            return b''
         return self._read(length)
 
     def _readlist(self):
         """read a list of NULL separated strings from the channel"""
         s = self._readstr()
         if s:
-            return s.split('\0')
+            return s.split(b'\0')
         else:
             return []
 
@@ -282,7 +293,7 @@
             self.repo.baseui = copiedui
             # clone ui without using ui.copy because this is protected
             repoui = self.repoui.__class__(self.repoui)
-            repoui.copy = copiedui.copy # redo copy protection
+            repoui.copy = copiedui.copy  # redo copy protection
             uis.append(repoui)
             self.repo.ui = self.repo.dirstate._ui = repoui
             self.repo.invalidateall()
@@ -292,19 +303,26 @@
             # any kind of interaction must use server channels, but chg may
             # replace channels by fully functional tty files. so nontty is
             # enforced only if cin is a channel.
-            if not util.safehasattr(self.cin, 'fileno'):
-                ui.setconfig('ui', 'nontty', 'true', 'commandserver')
+            if not util.safehasattr(self.cin, b'fileno'):
+                ui.setconfig(b'ui', b'nontty', b'true', b'commandserver')
 
-        req = dispatch.request(args[:], copiedui, self.repo, self.cin,
-                               self.cout, self.cerr, self.cmsg,
-                               prereposetups=self._prereposetups)
+        req = dispatch.request(
+            args[:],
+            copiedui,
+            self.repo,
+            self.cin,
+            self.cout,
+            self.cerr,
+            self.cmsg,
+            prereposetups=self._prereposetups,
+        )
 
         try:
             ret = dispatch.dispatch(req) & 255
-            self.cresult.write(struct.pack('>i', int(ret)))
+            self.cresult.write(struct.pack(b'>i', int(ret)))
         finally:
             # restore old cwd
-            if '--cwd' in args:
+            if b'--cwd' in args:
                 os.chdir(self.cwd)
 
     def getencoding(self):
@@ -320,24 +338,23 @@
             else:
                 # clients are expected to check what commands are supported by
                 # looking at the servers capabilities
-                raise error.Abort(_('unknown command %s') % cmd)
+                raise error.Abort(_(b'unknown command %s') % cmd)
 
-        return cmd != ''
+        return cmd != b''
 
-    capabilities = {'runcommand': runcommand,
-                    'getencoding': getencoding}
+    capabilities = {b'runcommand': runcommand, b'getencoding': getencoding}
 
     def serve(self):
-        hellomsg = 'capabilities: ' + ' '.join(sorted(self.capabilities))
-        hellomsg += '\n'
-        hellomsg += 'encoding: ' + encoding.encoding
-        hellomsg += '\n'
+        hellomsg = b'capabilities: ' + b' '.join(sorted(self.capabilities))
+        hellomsg += b'\n'
+        hellomsg += b'encoding: ' + encoding.encoding
+        hellomsg += b'\n'
         if self.cmsg:
-            hellomsg += 'message-encoding: %s\n' % self.cmsg.encoding
-        hellomsg += 'pid: %d' % procutil.getpid()
-        if util.safehasattr(os, 'getpgid'):
-            hellomsg += '\n'
-            hellomsg += 'pgid: %d' % os.getpgid(0)
+            hellomsg += b'message-encoding: %s\n' % self.cmsg.encoding
+        hellomsg += b'pid: %d' % procutil.getpid()
+        if util.safehasattr(os, b'getpgid'):
+            hellomsg += b'\n'
+            hellomsg += b'pgid: %d' % os.getpgid(0)
 
         # write the hello msg in -one- chunk
         self.cout.write(hellomsg)
@@ -352,6 +369,7 @@
 
         return 0
 
+
 def setuplogging(ui, repo=None, fp=None):
     """Set up server logging facility
 
@@ -377,8 +395,13 @@
         # developer config: cmdserver.max-log-size
         maxsize = ui.configbytes(b'cmdserver', b'max-log-size')
         vfs = vfsmod.vfs(os.path.dirname(logpath))
-        logger = loggingutil.filelogger(vfs, os.path.basename(logpath), tracked,
-                                        maxfiles=maxfiles, maxsize=maxsize)
+        logger = loggingutil.filelogger(
+            vfs,
+            os.path.basename(logpath),
+            tracked,
+            maxfiles=maxfiles,
+            maxsize=maxsize,
+        )
 
     targetuis = {ui}
     if repo:
@@ -387,6 +410,7 @@
     for u in targetuis:
         u.setlogger(b'cmdserver', logger)
 
+
 class pipeservice(object):
     def __init__(self, ui, repo, opts):
         self.ui = ui
@@ -406,6 +430,7 @@
             finally:
                 sv.cleanup()
 
+
 def _initworkerprocess():
     # use a different process group from the master process, in order to:
     # 1. make the current process group no longer "orphaned" (because the
@@ -423,6 +448,7 @@
     # same state inherited from parent.
     random.seed()
 
+
 def _serverequest(ui, repo, conn, createcmdserver, prereposetups):
     fin = conn.makefile(r'rb')
     fout = conn.makefile(r'wb')
@@ -434,7 +460,7 @@
         # handle exceptions that may be raised by command server. most of
         # known exceptions are caught by dispatch.
         except error.Abort as inst:
-            ui.error(_('abort: %s\n') % inst)
+            ui.error(_(b'abort: %s\n') % inst)
         except IOError as inst:
             if inst.errno != errno.EPIPE:
                 raise
@@ -442,13 +468,13 @@
             pass
         finally:
             sv.cleanup()
-    except: # re-raises
+    except:  # re-raises
         # also write traceback to error channel. otherwise client cannot
         # see it because it is written to server's stderr by default.
         if sv:
             cerr = sv.cerr
         else:
-            cerr = channeledoutput(fout, 'e')
+            cerr = channeledoutput(fout, b'e')
         cerr.write(encoding.strtolocal(traceback.format_exc()))
         raise
     finally:
@@ -459,6 +485,7 @@
             if inst.errno != errno.EPIPE:
                 raise
 
+
 class unixservicehandler(object):
     """Set of pluggable operations for unix-mode services
 
@@ -474,7 +501,7 @@
     def bindsocket(self, sock, address):
         util.bindunixsocket(sock, address)
         sock.listen(socket.SOMAXCONN)
-        self.ui.status(_('listening at %s\n') % address)
+        self.ui.status(_(b'listening at %s\n') % address)
         self.ui.flush()  # avoid buffering of status message
 
     def unlinksocket(self, address):
@@ -492,6 +519,7 @@
         serves for the current connection"""
         return server(self.ui, repo, fin, fout, prereposetups)
 
+
 class unixforkingservice(object):
     """
     Listens on unix domain socket and forks server per connection
@@ -500,11 +528,11 @@
     def __init__(self, ui, repo, opts, handler=None):
         self.ui = ui
         self.repo = repo
-        self.address = opts['address']
-        if not util.safehasattr(socket, 'AF_UNIX'):
-            raise error.Abort(_('unsupported platform'))
+        self.address = opts[b'address']
+        if not util.safehasattr(socket, b'AF_UNIX'):
+            raise error.Abort(_(b'unsupported platform'))
         if not self.address:
-            raise error.Abort(_('no socket path specified with --address'))
+            raise error.Abort(_(b'no socket path specified with --address'))
         self._servicehandler = handler or unixservicehandler(ui)
         self._sock = None
         self._mainipc = None
@@ -515,7 +543,7 @@
         # experimental config: cmdserver.max-repo-cache
         maxlen = ui.configint(b'cmdserver', b'max-repo-cache')
         if maxlen < 0:
-            raise error.Abort(_('negative max-repo-cache size not allowed'))
+            raise error.Abort(_(b'negative max-repo-cache size not allowed'))
         self._repoloader = repocache.repoloader(ui, maxlen)
 
     def init(self):
@@ -526,7 +554,7 @@
         o = socket.socketpair(socket.AF_UNIX, socket.SOCK_DGRAM)
         self._mainipc, self._workeripc = o
         self._servicehandler.bindsocket(self._sock, self.address)
-        if util.safehasattr(procutil, 'unblocksignal'):
+        if util.safehasattr(procutil, b'unblocksignal'):
             procutil.unblocksignal(signal.SIGCHLD)
         o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
         self._oldsigchldhandler = o
@@ -558,10 +586,12 @@
         exiting = False
         h = self._servicehandler
         selector = selectors.DefaultSelector()
-        selector.register(self._sock, selectors.EVENT_READ,
-                          self._acceptnewconnection)
-        selector.register(self._mainipc, selectors.EVENT_READ,
-                          self._handlemainipc)
+        selector.register(
+            self._sock, selectors.EVENT_READ, self._acceptnewconnection
+        )
+        selector.register(
+            self._mainipc, selectors.EVENT_READ, self._handlemainipc
+        )
         while True:
             if not exiting and h.shouldexit():
                 # clients can no longer connect() to the domain socket, so
@@ -605,8 +635,9 @@
         pid = os.fork()
         if pid:
             try:
-                self.ui.log(b'cmdserver', b'forked worker process (pid=%d)\n',
-                            pid)
+                self.ui.log(
+                    b'cmdserver', b'forked worker process (pid=%d)\n', pid
+                )
                 self._workerpids.add(pid)
                 h.newconnection()
             finally:
@@ -662,8 +693,13 @@
         _initworkerprocess()
         h = self._servicehandler
         try:
-            _serverequest(self.ui, self.repo, conn, h.createcmdserver,
-                          prereposetups=[self._reposetup])
+            _serverequest(
+                self.ui,
+                self.repo,
+                conn,
+                h.createcmdserver,
+                prereposetups=[self._reposetup],
+            )
         finally:
             gc.collect()  # trigger __del__ since worker process uses os._exit
 
@@ -677,8 +713,9 @@
                 try:
                     self._cmdserveripc.send(self.root)
                 except socket.error:
-                    self.ui.log(b'cmdserver',
-                                b'failed to send repo root to master\n')
+                    self.ui.log(
+                        b'cmdserver', b'failed to send repo root to master\n'
+                    )
 
         repo.__class__ = unixcmdserverrepo
         repo._cmdserveripc = self._workeripc
--- a/mercurial/config.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/config.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,12 +11,15 @@
 import os
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
+    encoding,
     error,
     pycompat,
     util,
 )
 
+
 class config(object):
     def __init__(self, data=None, includepaths=None):
         self._data = {}
@@ -28,17 +31,23 @@
             self._source = data._source.copy()
         else:
             self._source = util.cowdict()
+
     def copy(self):
         return config(self)
+
     def __contains__(self, section):
         return section in self._data
+
     def hasitem(self, section, item):
         return item in self._data.get(section, {})
+
     def __getitem__(self, section):
         return self._data.get(section, {})
+
     def __iter__(self):
         for d in self.sections():
             yield d
+
     def update(self, src):
         self._source = self._source.preparewrite()
         for s, n in src._unset:
@@ -55,6 +64,7 @@
                 self._data[s] = util.cowsortdict()
             self._data[s].update(src._data[s])
         self._source.update(src._source)
+
     def get(self, section, item, default=None):
         return self._data.get(section, {}).get(item, default)
 
@@ -71,19 +81,25 @@
             return (section, item)
 
     def source(self, section, item):
-        return self._source.get((section, item), "")
+        return self._source.get((section, item), b"")
+
     def sections(self):
         return sorted(self._data.keys())
+
     def items(self, section):
-        return list(self._data.get(section, {}).iteritems())
-    def set(self, section, item, value, source=""):
+        return list(pycompat.iteritems(self._data.get(section, {})))
+
+    def set(self, section, item, value, source=b""):
         if pycompat.ispy3:
-            assert not isinstance(section, str), (
-                'config section may not be unicode strings on Python 3')
-            assert not isinstance(item, str), (
-                'config item may not be unicode strings on Python 3')
-            assert not isinstance(value, str), (
-                'config values may not be unicode strings on Python 3')
+            assert not isinstance(
+                section, str
+            ), b'config section may not be unicode strings on Python 3'
+            assert not isinstance(
+                item, str
+            ), b'config item may not be unicode strings on Python 3'
+            assert not isinstance(
+                value, str
+            ), b'config values may not be unicode strings on Python 3'
         if section not in self:
             self._data[section] = util.cowsortdict()
         else:
@@ -117,7 +133,7 @@
         commentre = util.re.compile(br'(;|#)')
         unsetre = util.re.compile(br'%unset\s+(\S+)')
         includere = util.re.compile(br'%include\s+(\S|\S.*\S)\s*$')
-        section = ""
+        section = b""
         item = None
         line = 0
         cont = False
@@ -127,7 +143,7 @@
 
         for l in data.splitlines(True):
             line += 1
-            if line == 1 and l.startswith('\xef\xbb\xbf'):
+            if line == 1 and l.startswith(b'\xef\xbb\xbf'):
                 # Someone set us up the BOM
                 l = l[3:]
             if cont:
@@ -137,8 +153,8 @@
                 if m:
                     if sections and section not in sections:
                         continue
-                    v = self.get(section, item) + "\n" + m.group(1)
-                    self.set(section, item, v, "%s:%d" % (src, line))
+                    v = self.get(section, item) + b"\n" + m.group(1)
+                    self.set(section, item, v, b"%s:%d" % (src, line))
                     continue
                 item = None
                 cont = False
@@ -156,9 +172,11 @@
                         break
                     except IOError as inst:
                         if inst.errno != errno.ENOENT:
-                            raise error.ParseError(_("cannot include %s (%s)")
-                                                   % (inc, inst.strerror),
-                                                   "%s:%d" % (src, line))
+                            raise error.ParseError(
+                                _(b"cannot include %s (%s)")
+                                % (inc, encoding.strtolocal(inst.strerror)),
+                                b"%s:%d" % (src, line),
+                            )
                 continue
             if emptyre.match(l):
                 continue
@@ -176,7 +194,7 @@
                 cont = True
                 if sections and section not in sections:
                     continue
-                self.set(section, item, m.group(2), "%s:%d" % (src, line))
+                self.set(section, item, m.group(2), b"%s:%d" % (src, line))
                 continue
             m = unsetre.match(l)
             if m:
@@ -189,16 +207,21 @@
                 self._unset.append((section, name))
                 continue
 
-            raise error.ParseError(l.rstrip(), ("%s:%d" % (src, line)))
+            raise error.ParseError(l.rstrip(), (b"%s:%d" % (src, line)))
 
     def read(self, path, fp=None, sections=None, remap=None):
         if not fp:
-            fp = util.posixfile(path, 'rb')
-        assert getattr(fp, 'mode', r'rb') == r'rb', (
-            'config files must be opened in binary mode, got fp=%r mode=%r' % (
-                fp, fp.mode))
-        self.parse(path, fp.read(),
-                   sections=sections, remap=remap, include=self.read)
+            fp = util.posixfile(path, b'rb')
+        assert (
+            getattr(fp, 'mode', r'rb') == r'rb'
+        ), b'config files must be opened in binary mode, got fp=%r mode=%r' % (
+            fp,
+            fp.mode,
+        )
+        self.parse(
+            path, fp.read(), sections=sections, remap=remap, include=self.read
+        )
+
 
 def parselist(value):
     """parse a configuration value as a list of comma/space separated strings
@@ -209,76 +232,82 @@
 
     def _parse_plain(parts, s, offset):
         whitespace = False
-        while offset < len(s) and (s[offset:offset + 1].isspace()
-                                   or s[offset:offset + 1] == ','):
+        while offset < len(s) and (
+            s[offset : offset + 1].isspace() or s[offset : offset + 1] == b','
+        ):
             whitespace = True
             offset += 1
         if offset >= len(s):
             return None, parts, offset
         if whitespace:
-            parts.append('')
-        if s[offset:offset + 1] == '"' and not parts[-1]:
+            parts.append(b'')
+        if s[offset : offset + 1] == b'"' and not parts[-1]:
             return _parse_quote, parts, offset + 1
-        elif s[offset:offset + 1] == '"' and parts[-1][-1:] == '\\':
-            parts[-1] = parts[-1][:-1] + s[offset:offset + 1]
+        elif s[offset : offset + 1] == b'"' and parts[-1][-1:] == b'\\':
+            parts[-1] = parts[-1][:-1] + s[offset : offset + 1]
             return _parse_plain, parts, offset + 1
-        parts[-1] += s[offset:offset + 1]
+        parts[-1] += s[offset : offset + 1]
         return _parse_plain, parts, offset + 1
 
     def _parse_quote(parts, s, offset):
-        if offset < len(s) and s[offset:offset + 1] == '"': # ""
-            parts.append('')
+        if offset < len(s) and s[offset : offset + 1] == b'"':  # ""
+            parts.append(b'')
             offset += 1
-            while offset < len(s) and (s[offset:offset + 1].isspace() or
-                    s[offset:offset + 1] == ','):
+            while offset < len(s) and (
+                s[offset : offset + 1].isspace()
+                or s[offset : offset + 1] == b','
+            ):
                 offset += 1
             return _parse_plain, parts, offset
 
-        while offset < len(s) and s[offset:offset + 1] != '"':
-            if (s[offset:offset + 1] == '\\' and offset + 1 < len(s)
-                    and s[offset + 1:offset + 2] == '"'):
+        while offset < len(s) and s[offset : offset + 1] != b'"':
+            if (
+                s[offset : offset + 1] == b'\\'
+                and offset + 1 < len(s)
+                and s[offset + 1 : offset + 2] == b'"'
+            ):
                 offset += 1
-                parts[-1] += '"'
+                parts[-1] += b'"'
             else:
-                parts[-1] += s[offset:offset + 1]
+                parts[-1] += s[offset : offset + 1]
             offset += 1
 
         if offset >= len(s):
             real_parts = _configlist(parts[-1])
             if not real_parts:
-                parts[-1] = '"'
+                parts[-1] = b'"'
             else:
-                real_parts[0] = '"' + real_parts[0]
+                real_parts[0] = b'"' + real_parts[0]
                 parts = parts[:-1]
                 parts.extend(real_parts)
             return None, parts, offset
 
         offset += 1
-        while offset < len(s) and s[offset:offset + 1] in [' ', ',']:
+        while offset < len(s) and s[offset : offset + 1] in [b' ', b',']:
             offset += 1
 
         if offset < len(s):
-            if offset + 1 == len(s) and s[offset:offset + 1] == '"':
-                parts[-1] += '"'
+            if offset + 1 == len(s) and s[offset : offset + 1] == b'"':
+                parts[-1] += b'"'
                 offset += 1
             else:
-                parts.append('')
+                parts.append(b'')
         else:
             return None, parts, offset
 
         return _parse_plain, parts, offset
 
     def _configlist(s):
-        s = s.rstrip(' ,')
+        s = s.rstrip(b' ,')
         if not s:
             return []
-        parser, parts, offset = _parse_plain, [''], 0
+        parser, parts, offset = _parse_plain, [b''], 0
         while parser:
             parser, parts, offset = parser(parts, s, offset)
         return parts
 
     if value is not None and isinstance(value, bytes):
-        result = _configlist(value.lstrip(' ,\n'))
+        result = _configlist(value.lstrip(b' ,\n'))
     else:
         result = value
     return result or []
--- a/mercurial/configitems.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/configitems.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,6 +15,7 @@
     error,
 )
 
+
 def loadconfigtable(ui, extname, configtable):
     """update config item known to the ui with the extension ones"""
     for section, items in sorted(configtable.items()):
@@ -22,12 +23,13 @@
         knownkeys = set(knownitems)
         newkeys = set(items)
         for key in sorted(knownkeys & newkeys):
-            msg = "extension '%s' overwrite config item '%s.%s'"
+            msg = b"extension '%s' overwrite config item '%s.%s'"
             msg %= (extname, section, key)
-            ui.develwarn(msg, config='warn-config')
+            ui.develwarn(msg, config=b'warn-config')
 
         knownitems.update(items)
 
+
 class configitem(object):
     """represent a known config item
 
@@ -38,18 +40,28 @@
     :generic: this is a generic definition, match name using regular expression.
     """
 
-    def __init__(self, section, name, default=None, alias=(),
-                 generic=False, priority=0):
+    def __init__(
+        self,
+        section,
+        name,
+        default=None,
+        alias=(),
+        generic=False,
+        priority=0,
+        experimental=False,
+    ):
         self.section = section
         self.name = name
         self.default = default
         self.alias = list(alias)
         self.generic = generic
         self.priority = priority
+        self.experimental = experimental
         self._re = None
         if generic:
             self._re = re.compile(self.name)
 
+
 class itemregister(dict):
     """A specialized dictionary that can handle wild-card selection"""
 
@@ -90,1413 +102,1442 @@
 
         return None
 
+
 coreitems = {}
 
+
 def _register(configtable, *args, **kwargs):
     item = configitem(*args, **kwargs)
     section = configtable.setdefault(item.section, itemregister())
     if item.name in section:
-        msg = "duplicated config item registration for '%s.%s'"
+        msg = b"duplicated config item registration for '%s.%s'"
         raise error.ProgrammingError(msg % (item.section, item.name))
     section[item.name] = item
 
+
 # special value for case where the default is derived from other values
 dynamicdefault = object()
 
 # Registering actual config items
 
+
 def getitemregister(configtable):
     f = functools.partial(_register, configtable)
     # export pseudo enum as configitem.*
     f.dynamicdefault = dynamicdefault
     return f
 
+
 coreconfigitem = getitemregister(coreitems)
 
-def _registerdiffopts(section, configprefix=''):
-    coreconfigitem(section, configprefix + 'nodates',
-        default=False,
+
+def _registerdiffopts(section, configprefix=b''):
+    coreconfigitem(
+        section, configprefix + b'nodates', default=False,
     )
-    coreconfigitem(section, configprefix + 'showfunc',
-        default=False,
+    coreconfigitem(
+        section, configprefix + b'showfunc', default=False,
     )
-    coreconfigitem(section, configprefix + 'unified',
-        default=None,
+    coreconfigitem(
+        section, configprefix + b'unified', default=None,
     )
-    coreconfigitem(section, configprefix + 'git',
-        default=False,
+    coreconfigitem(
+        section, configprefix + b'git', default=False,
     )
-    coreconfigitem(section, configprefix + 'ignorews',
-        default=False,
+    coreconfigitem(
+        section, configprefix + b'ignorews', default=False,
     )
-    coreconfigitem(section, configprefix + 'ignorewsamount',
-        default=False,
+    coreconfigitem(
+        section, configprefix + b'ignorewsamount', default=False,
     )
-    coreconfigitem(section, configprefix + 'ignoreblanklines',
-        default=False,
+    coreconfigitem(
+        section, configprefix + b'ignoreblanklines', default=False,
     )
-    coreconfigitem(section, configprefix + 'ignorewseol',
-        default=False,
+    coreconfigitem(
+        section, configprefix + b'ignorewseol', default=False,
     )
-    coreconfigitem(section, configprefix + 'nobinary',
-        default=False,
+    coreconfigitem(
+        section, configprefix + b'nobinary', default=False,
     )
-    coreconfigitem(section, configprefix + 'noprefix',
-        default=False,
+    coreconfigitem(
+        section, configprefix + b'noprefix', default=False,
     )
-    coreconfigitem(section, configprefix + 'word-diff',
-        default=False,
+    coreconfigitem(
+        section, configprefix + b'word-diff', default=False,
     )
 
-coreconfigitem('alias', '.*',
-    default=dynamicdefault,
-    generic=True,
+
+coreconfigitem(
+    b'alias', b'.*', default=dynamicdefault, generic=True,
 )
-coreconfigitem('auth', 'cookiefile',
-    default=None,
+coreconfigitem(
+    b'auth', b'cookiefile', default=None,
 )
-_registerdiffopts(section='annotate')
+_registerdiffopts(section=b'annotate')
 # bookmarks.pushing: internal hack for discovery
-coreconfigitem('bookmarks', 'pushing',
-    default=list,
+coreconfigitem(
+    b'bookmarks', b'pushing', default=list,
 )
 # bundle.mainreporoot: internal hack for bundlerepo
-coreconfigitem('bundle', 'mainreporoot',
-    default='',
+coreconfigitem(
+    b'bundle', b'mainreporoot', default=b'',
+)
+coreconfigitem(
+    b'censor', b'policy', default=b'abort', experimental=True,
+)
+coreconfigitem(
+    b'chgserver', b'idletimeout', default=3600,
 )
-coreconfigitem('censor', 'policy',
-    default='abort',
+coreconfigitem(
+    b'chgserver', b'skiphash', default=False,
 )
-coreconfigitem('chgserver', 'idletimeout',
-    default=3600,
+coreconfigitem(
+    b'cmdserver', b'log', default=None,
+)
+coreconfigitem(
+    b'cmdserver', b'max-log-files', default=7,
 )
-coreconfigitem('chgserver', 'skiphash',
-    default=False,
+coreconfigitem(
+    b'cmdserver', b'max-log-size', default=b'1 MB',
 )
-coreconfigitem('cmdserver', 'log',
-    default=None,
+coreconfigitem(
+    b'cmdserver', b'max-repo-cache', default=0, experimental=True,
+)
+coreconfigitem(
+    b'cmdserver', b'message-encodings', default=list, experimental=True,
 )
-coreconfigitem('cmdserver', 'max-log-files',
-    default=7,
+coreconfigitem(
+    b'cmdserver',
+    b'track-log',
+    default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
 )
-coreconfigitem('cmdserver', 'max-log-size',
-    default='1 MB',
+coreconfigitem(
+    b'color', b'.*', default=None, generic=True,
+)
+coreconfigitem(
+    b'color', b'mode', default=b'auto',
+)
+coreconfigitem(
+    b'color', b'pagermode', default=dynamicdefault,
 )
-coreconfigitem('cmdserver', 'max-repo-cache',
-    default=0,
+_registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
+coreconfigitem(
+    b'commands', b'commit.post-status', default=False,
 )
-coreconfigitem('cmdserver', 'message-encodings',
-    default=list,
+coreconfigitem(
+    b'commands', b'grep.all-files', default=False, experimental=True,
+)
+coreconfigitem(
+    b'commands', b'push.require-revs', default=False,
 )
-coreconfigitem('cmdserver', 'track-log',
-    default=lambda: ['chgserver', 'cmdserver', 'repocache'],
+coreconfigitem(
+    b'commands', b'resolve.confirm', default=False,
+)
+coreconfigitem(
+    b'commands', b'resolve.explicit-re-merge', default=False,
 )
-coreconfigitem('color', '.*',
-    default=None,
-    generic=True,
+coreconfigitem(
+    b'commands', b'resolve.mark-check', default=b'none',
+)
+_registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
+coreconfigitem(
+    b'commands', b'show.aliasprefix', default=list,
 )
-coreconfigitem('color', 'mode',
-    default='auto',
+coreconfigitem(
+    b'commands', b'status.relative', default=False,
+)
+coreconfigitem(
+    b'commands', b'status.skipstates', default=[], experimental=True,
 )
-coreconfigitem('color', 'pagermode',
-    default=dynamicdefault,
+coreconfigitem(
+    b'commands', b'status.terse', default=b'',
+)
+coreconfigitem(
+    b'commands', b'status.verbose', default=False,
 )
-_registerdiffopts(section='commands', configprefix='commit.interactive.')
-coreconfigitem('commands', 'commit.post-status',
-    default=False,
+coreconfigitem(
+    b'commands', b'update.check', default=None,
+)
+coreconfigitem(
+    b'commands', b'update.requiredest', default=False,
 )
-coreconfigitem('commands', 'grep.all-files',
-    default=False,
+coreconfigitem(
+    b'committemplate', b'.*', default=None, generic=True,
+)
+coreconfigitem(
+    b'convert', b'bzr.saverev', default=True,
 )
-coreconfigitem('commands', 'resolve.confirm',
-    default=False,
+coreconfigitem(
+    b'convert', b'cvsps.cache', default=True,
+)
+coreconfigitem(
+    b'convert', b'cvsps.fuzz', default=60,
+)
+coreconfigitem(
+    b'convert', b'cvsps.logencoding', default=None,
 )
-coreconfigitem('commands', 'resolve.explicit-re-merge',
-    default=False,
+coreconfigitem(
+    b'convert', b'cvsps.mergefrom', default=None,
+)
+coreconfigitem(
+    b'convert', b'cvsps.mergeto', default=None,
 )
-coreconfigitem('commands', 'resolve.mark-check',
-    default='none',
+coreconfigitem(
+    b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
+)
+coreconfigitem(
+    b'convert', b'git.extrakeys', default=list,
 )
-_registerdiffopts(section='commands', configprefix='revert.interactive.')
-coreconfigitem('commands', 'show.aliasprefix',
-    default=list,
+coreconfigitem(
+    b'convert', b'git.findcopiesharder', default=False,
+)
+coreconfigitem(
+    b'convert', b'git.remoteprefix', default=b'remote',
+)
+coreconfigitem(
+    b'convert', b'git.renamelimit', default=400,
+)
+coreconfigitem(
+    b'convert', b'git.saverev', default=True,
 )
-coreconfigitem('commands', 'status.relative',
-    default=False,
+coreconfigitem(
+    b'convert', b'git.similarity', default=50,
+)
+coreconfigitem(
+    b'convert', b'git.skipsubmodules', default=False,
 )
-coreconfigitem('commands', 'status.skipstates',
-    default=[],
+coreconfigitem(
+    b'convert', b'hg.clonebranches', default=False,
 )
-coreconfigitem('commands', 'status.terse',
-    default='',
+coreconfigitem(
+    b'convert', b'hg.ignoreerrors', default=False,
 )
-coreconfigitem('commands', 'status.verbose',
-    default=False,
+coreconfigitem(
+    b'convert', b'hg.preserve-hash', default=False,
+)
+coreconfigitem(
+    b'convert', b'hg.revs', default=None,
+)
+coreconfigitem(
+    b'convert', b'hg.saverev', default=False,
 )
-coreconfigitem('commands', 'update.check',
-    default=None,
+coreconfigitem(
+    b'convert', b'hg.sourcename', default=None,
+)
+coreconfigitem(
+    b'convert', b'hg.startrev', default=None,
 )
-coreconfigitem('commands', 'update.requiredest',
-    default=False,
+coreconfigitem(
+    b'convert', b'hg.tagsbranch', default=b'default',
+)
+coreconfigitem(
+    b'convert', b'hg.usebranchnames', default=True,
 )
-coreconfigitem('committemplate', '.*',
-    default=None,
-    generic=True,
+coreconfigitem(
+    b'convert', b'ignoreancestorcheck', default=False, experimental=True,
+)
+coreconfigitem(
+    b'convert', b'localtimezone', default=False,
 )
-coreconfigitem('convert', 'bzr.saverev',
-    default=True,
+coreconfigitem(
+    b'convert', b'p4.encoding', default=dynamicdefault,
+)
+coreconfigitem(
+    b'convert', b'p4.startrev', default=0,
 )
-coreconfigitem('convert', 'cvsps.cache',
-    default=True,
+coreconfigitem(
+    b'convert', b'skiptags', default=False,
+)
+coreconfigitem(
+    b'convert', b'svn.debugsvnlog', default=True,
 )
-coreconfigitem('convert', 'cvsps.fuzz',
-    default=60,
+coreconfigitem(
+    b'convert', b'svn.trunk', default=None,
 )
-coreconfigitem('convert', 'cvsps.logencoding',
-    default=None,
+coreconfigitem(
+    b'convert', b'svn.tags', default=None,
 )
-coreconfigitem('convert', 'cvsps.mergefrom',
-    default=None,
+coreconfigitem(
+    b'convert', b'svn.branches', default=None,
 )
-coreconfigitem('convert', 'cvsps.mergeto',
-    default=None,
+coreconfigitem(
+    b'convert', b'svn.startrev', default=0,
+)
+coreconfigitem(
+    b'debug', b'dirstate.delaywrite', default=0,
 )
-coreconfigitem('convert', 'git.committeractions',
-    default=lambda: ['messagedifferent'],
+coreconfigitem(
+    b'defaults', b'.*', default=None, generic=True,
+)
+coreconfigitem(
+    b'devel', b'all-warnings', default=False,
 )
-coreconfigitem('convert', 'git.extrakeys',
-    default=list,
+coreconfigitem(
+    b'devel', b'bundle2.debug', default=False,
+)
+coreconfigitem(
+    b'devel', b'bundle.delta', default=b'',
 )
-coreconfigitem('convert', 'git.findcopiesharder',
-    default=False,
+coreconfigitem(
+    b'devel', b'cache-vfs', default=None,
+)
+coreconfigitem(
+    b'devel', b'check-locks', default=False,
 )
-coreconfigitem('convert', 'git.remoteprefix',
-    default='remote',
+coreconfigitem(
+    b'devel', b'check-relroot', default=False,
+)
+coreconfigitem(
+    b'devel', b'default-date', default=None,
 )
-coreconfigitem('convert', 'git.renamelimit',
-    default=400,
+coreconfigitem(
+    b'devel', b'deprec-warn', default=False,
+)
+coreconfigitem(
+    b'devel', b'disableloaddefaultcerts', default=False,
 )
-coreconfigitem('convert', 'git.saverev',
-    default=True,
+coreconfigitem(
+    b'devel', b'warn-empty-changegroup', default=False,
+)
+coreconfigitem(
+    b'devel', b'legacy.exchange', default=list,
 )
-coreconfigitem('convert', 'git.similarity',
-    default=50,
+coreconfigitem(
+    b'devel', b'servercafile', default=b'',
 )
-coreconfigitem('convert', 'git.skipsubmodules',
-    default=False,
+coreconfigitem(
+    b'devel', b'serverexactprotocol', default=b'',
+)
+coreconfigitem(
+    b'devel', b'serverrequirecert', default=False,
 )
-coreconfigitem('convert', 'hg.clonebranches',
-    default=False,
+coreconfigitem(
+    b'devel', b'strip-obsmarkers', default=True,
+)
+coreconfigitem(
+    b'devel', b'warn-config', default=None,
 )
-coreconfigitem('convert', 'hg.ignoreerrors',
-    default=False,
+coreconfigitem(
+    b'devel', b'warn-config-default', default=None,
+)
+coreconfigitem(
+    b'devel', b'user.obsmarker', default=None,
 )
-coreconfigitem('convert', 'hg.preserve-hash',
-    default=False,
+coreconfigitem(
+    b'devel', b'warn-config-unknown', default=None,
+)
+coreconfigitem(
+    b'devel', b'debug.copies', default=False,
 )
-coreconfigitem('convert', 'hg.revs',
-    default=None,
+coreconfigitem(
+    b'devel', b'debug.extensions', default=False,
+)
+coreconfigitem(
+    b'devel', b'debug.peer-request', default=False,
 )
-coreconfigitem('convert', 'hg.saverev',
-    default=False,
+coreconfigitem(
+    b'devel', b'discovery.randomize', default=True,
 )
-coreconfigitem('convert', 'hg.sourcename',
-    default=None,
+_registerdiffopts(section=b'diff')
+coreconfigitem(
+    b'email', b'bcc', default=None,
+)
+coreconfigitem(
+    b'email', b'cc', default=None,
+)
+coreconfigitem(
+    b'email', b'charsets', default=list,
 )
-coreconfigitem('convert', 'hg.startrev',
-    default=None,
+coreconfigitem(
+    b'email', b'from', default=None,
 )
-coreconfigitem('convert', 'hg.tagsbranch',
-    default='default',
+coreconfigitem(
+    b'email', b'method', default=b'smtp',
+)
+coreconfigitem(
+    b'email', b'reply-to', default=None,
 )
-coreconfigitem('convert', 'hg.usebranchnames',
-    default=True,
+coreconfigitem(
+    b'email', b'to', default=None,
+)
+coreconfigitem(
+    b'experimental', b'archivemetatemplate', default=dynamicdefault,
 )
-coreconfigitem('convert', 'ignoreancestorcheck',
-    default=False,
+coreconfigitem(
+    b'experimental', b'auto-publish', default=b'publish',
+)
+coreconfigitem(
+    b'experimental', b'bundle-phases', default=False,
 )
-coreconfigitem('convert', 'localtimezone',
-    default=False,
+coreconfigitem(
+    b'experimental', b'bundle2-advertise', default=True,
+)
+coreconfigitem(
+    b'experimental', b'bundle2-output-capture', default=False,
 )
-coreconfigitem('convert', 'p4.encoding',
-    default=dynamicdefault,
+coreconfigitem(
+    b'experimental', b'bundle2.pushback', default=False,
+)
+coreconfigitem(
+    b'experimental', b'bundle2lazylocking', default=False,
 )
-coreconfigitem('convert', 'p4.startrev',
-    default=0,
+coreconfigitem(
+    b'experimental', b'bundlecomplevel', default=None,
+)
+coreconfigitem(
+    b'experimental', b'bundlecomplevel.bzip2', default=None,
 )
-coreconfigitem('convert', 'skiptags',
-    default=False,
+coreconfigitem(
+    b'experimental', b'bundlecomplevel.gzip', default=None,
+)
+coreconfigitem(
+    b'experimental', b'bundlecomplevel.none', default=None,
 )
-coreconfigitem('convert', 'svn.debugsvnlog',
-    default=True,
+coreconfigitem(
+    b'experimental', b'bundlecomplevel.zstd', default=None,
 )
-coreconfigitem('convert', 'svn.trunk',
-    default=None,
+coreconfigitem(
+    b'experimental', b'changegroup3', default=False,
+)
+coreconfigitem(
+    b'experimental', b'cleanup-as-archived', default=False,
 )
-coreconfigitem('convert', 'svn.tags',
-    default=None,
+coreconfigitem(
+    b'experimental', b'clientcompressionengines', default=list,
+)
+coreconfigitem(
+    b'experimental', b'copytrace', default=b'on',
 )
-coreconfigitem('convert', 'svn.branches',
-    default=None,
+coreconfigitem(
+    b'experimental', b'copytrace.movecandidateslimit', default=100,
+)
+coreconfigitem(
+    b'experimental', b'copytrace.sourcecommitlimit', default=100,
 )
-coreconfigitem('convert', 'svn.startrev',
-    default=0,
+coreconfigitem(
+    b'experimental', b'copies.read-from', default=b"filelog-only",
+)
+coreconfigitem(
+    b'experimental', b'copies.write-to', default=b'filelog-only',
 )
-coreconfigitem('debug', 'dirstate.delaywrite',
-    default=0,
+coreconfigitem(
+    b'experimental', b'crecordtest', default=None,
+)
+coreconfigitem(
+    b'experimental', b'directaccess', default=False,
 )
-coreconfigitem('defaults', '.*',
-    default=None,
-    generic=True,
+coreconfigitem(
+    b'experimental', b'directaccess.revnums', default=False,
+)
+coreconfigitem(
+    b'experimental', b'editortmpinhg', default=False,
 )
-coreconfigitem('devel', 'all-warnings',
+coreconfigitem(
+    b'experimental', b'evolution', default=list,
+)
+coreconfigitem(
+    b'experimental',
+    b'evolution.allowdivergence',
     default=False,
-)
-coreconfigitem('devel', 'bundle2.debug',
-    default=False,
-)
-coreconfigitem('devel', 'bundle.delta',
-    default='',
-)
-coreconfigitem('devel', 'cache-vfs',
-    default=None,
-)
-coreconfigitem('devel', 'check-locks',
-    default=False,
-)
-coreconfigitem('devel', 'check-relroot',
-    default=False,
-)
-coreconfigitem('devel', 'default-date',
-    default=None,
-)
-coreconfigitem('devel', 'deprec-warn',
-    default=False,
+    alias=[(b'experimental', b'allowdivergence')],
 )
-coreconfigitem('devel', 'disableloaddefaultcerts',
-    default=False,
-)
-coreconfigitem('devel', 'warn-empty-changegroup',
-    default=False,
-)
-coreconfigitem('devel', 'legacy.exchange',
-    default=list,
-)
-coreconfigitem('devel', 'servercafile',
-    default='',
-)
-coreconfigitem('devel', 'serverexactprotocol',
-    default='',
-)
-coreconfigitem('devel', 'serverrequirecert',
-    default=False,
-)
-coreconfigitem('devel', 'strip-obsmarkers',
-    default=True,
-)
-coreconfigitem('devel', 'warn-config',
-    default=None,
+coreconfigitem(
+    b'experimental', b'evolution.allowunstable', default=None,
 )
-coreconfigitem('devel', 'warn-config-default',
-    default=None,
-)
-coreconfigitem('devel', 'user.obsmarker',
-    default=None,
-)
-coreconfigitem('devel', 'warn-config-unknown',
-    default=None,
-)
-coreconfigitem('devel', 'debug.copies',
-    default=False,
-)
-coreconfigitem('devel', 'debug.extensions',
-    default=False,
-)
-coreconfigitem('devel', 'debug.peer-request',
-    default=False,
-)
-_registerdiffopts(section='diff')
-coreconfigitem('email', 'bcc',
-    default=None,
+coreconfigitem(
+    b'experimental', b'evolution.createmarkers', default=None,
 )
-coreconfigitem('email', 'cc',
-    default=None,
-)
-coreconfigitem('email', 'charsets',
-    default=list,
-)
-coreconfigitem('email', 'from',
-    default=None,
-)
-coreconfigitem('email', 'method',
-    default='smtp',
-)
-coreconfigitem('email', 'reply-to',
-    default=None,
-)
-coreconfigitem('email', 'to',
-    default=None,
-)
-coreconfigitem('experimental', 'archivemetatemplate',
-    default=dynamicdefault,
-)
-coreconfigitem('experimental', 'auto-publish',
-    default='publish',
-)
-coreconfigitem('experimental', 'bundle-phases',
-    default=False,
+coreconfigitem(
+    b'experimental',
+    b'evolution.effect-flags',
+    default=True,
+    alias=[(b'experimental', b'effect-flags')],
 )
-coreconfigitem('experimental', 'bundle2-advertise',
-    default=True,
-)
-coreconfigitem('experimental', 'bundle2-output-capture',
-    default=False,
-)
-coreconfigitem('experimental', 'bundle2.pushback',
-    default=False,
-)
-coreconfigitem('experimental', 'bundle2lazylocking',
-    default=False,
-)
-coreconfigitem('experimental', 'bundlecomplevel',
-    default=None,
-)
-coreconfigitem('experimental', 'bundlecomplevel.bzip2',
-    default=None,
-)
-coreconfigitem('experimental', 'bundlecomplevel.gzip',
-    default=None,
+coreconfigitem(
+    b'experimental', b'evolution.exchange', default=None,
 )
-coreconfigitem('experimental', 'bundlecomplevel.none',
-    default=None,
-)
-coreconfigitem('experimental', 'bundlecomplevel.zstd',
-    default=None,
-)
-coreconfigitem('experimental', 'changegroup3',
-    default=False,
-)
-coreconfigitem('experimental', 'cleanup-as-archived',
-    default=False,
-)
-coreconfigitem('experimental', 'clientcompressionengines',
-    default=list,
-)
-coreconfigitem('experimental', 'copytrace',
-    default='on',
-)
-coreconfigitem('experimental', 'copytrace.movecandidateslimit',
-    default=100,
-)
-coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
-    default=100,
+coreconfigitem(
+    b'experimental', b'evolution.bundle-obsmarker', default=False,
 )
-coreconfigitem('experimental', 'copies.read-from',
-    default="filelog-only",
-)
-coreconfigitem('experimental', 'copies.write-to',
-    default='filelog-only',
-)
-coreconfigitem('experimental', 'crecordtest',
-    default=None,
-)
-coreconfigitem('experimental', 'directaccess',
-    default=False,
-)
-coreconfigitem('experimental', 'directaccess.revnums',
-    default=False,
-)
-coreconfigitem('experimental', 'editortmpinhg',
-    default=False,
-)
-coreconfigitem('experimental', 'evolution',
-    default=list,
-)
-coreconfigitem('experimental', 'evolution.allowdivergence',
-    default=False,
-    alias=[('experimental', 'allowdivergence')]
+coreconfigitem(
+    b'experimental', b'log.topo', default=False,
 )
-coreconfigitem('experimental', 'evolution.allowunstable',
-    default=None,
-)
-coreconfigitem('experimental', 'evolution.createmarkers',
-    default=None,
-)
-coreconfigitem('experimental', 'evolution.effect-flags',
-    default=True,
-    alias=[('experimental', 'effect-flags')]
+coreconfigitem(
+    b'experimental', b'evolution.report-instabilities', default=True,
 )
-coreconfigitem('experimental', 'evolution.exchange',
-    default=None,
-)
-coreconfigitem('experimental', 'evolution.bundle-obsmarker',
-    default=False,
-)
-coreconfigitem('experimental', 'log.topo',
-    default=False,
-)
-coreconfigitem('experimental', 'evolution.report-instabilities',
-    default=True,
-)
-coreconfigitem('experimental', 'evolution.track-operation',
-    default=True,
+coreconfigitem(
+    b'experimental', b'evolution.track-operation', default=True,
 )
 # repo-level config to exclude a revset visibility
 #
 # The target use case is to use `share` to expose different subset of the same
 # repository, especially server side. See also `server.view`.
-coreconfigitem('experimental', 'extra-filter-revs',
-    default=None,
+coreconfigitem(
+    b'experimental', b'extra-filter-revs', default=None,
 )
-coreconfigitem('experimental', 'maxdeltachainspan',
-    default=-1,
+coreconfigitem(
+    b'experimental', b'maxdeltachainspan', default=-1,
 )
-coreconfigitem('experimental', 'mergetempdirprefix',
-    default=None,
+coreconfigitem(
+    b'experimental', b'mergetempdirprefix', default=None,
 )
-coreconfigitem('experimental', 'mmapindexthreshold',
-    default=None,
+coreconfigitem(
+    b'experimental', b'mmapindexthreshold', default=None,
+)
+coreconfigitem(
+    b'experimental', b'narrow', default=False,
 )
-coreconfigitem('experimental', 'narrow',
-    default=False,
+coreconfigitem(
+    b'experimental', b'nonnormalparanoidcheck', default=False,
 )
-coreconfigitem('experimental', 'nonnormalparanoidcheck',
-    default=False,
+coreconfigitem(
+    b'experimental', b'exportableenviron', default=list,
 )
-coreconfigitem('experimental', 'exportableenviron',
-    default=list,
+coreconfigitem(
+    b'experimental', b'extendedheader.index', default=None,
 )
-coreconfigitem('experimental', 'extendedheader.index',
-    default=None,
+coreconfigitem(
+    b'experimental', b'extendedheader.similarity', default=False,
 )
-coreconfigitem('experimental', 'extendedheader.similarity',
-    default=False,
+coreconfigitem(
+    b'experimental', b'graphshorten', default=False,
 )
-coreconfigitem('experimental', 'graphshorten',
-    default=False,
+coreconfigitem(
+    b'experimental', b'graphstyle.parent', default=dynamicdefault,
 )
-coreconfigitem('experimental', 'graphstyle.parent',
-    default=dynamicdefault,
+coreconfigitem(
+    b'experimental', b'graphstyle.missing', default=dynamicdefault,
 )
-coreconfigitem('experimental', 'graphstyle.missing',
-    default=dynamicdefault,
+coreconfigitem(
+    b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
 )
-coreconfigitem('experimental', 'graphstyle.grandparent',
-    default=dynamicdefault,
+coreconfigitem(
+    b'experimental', b'hook-track-tags', default=False,
 )
-coreconfigitem('experimental', 'hook-track-tags',
-    default=False,
+coreconfigitem(
+    b'experimental', b'httppeer.advertise-v2', default=False,
 )
-coreconfigitem('experimental', 'httppeer.advertise-v2',
-    default=False,
+coreconfigitem(
+    b'experimental', b'httppeer.v2-encoder-order', default=None,
 )
-coreconfigitem('experimental', 'httppeer.v2-encoder-order',
-    default=None,
+coreconfigitem(
+    b'experimental', b'httppostargs', default=False,
 )
-coreconfigitem('experimental', 'httppostargs',
-    default=False,
-)
-coreconfigitem('experimental', 'mergedriver',
-    default=None,
+coreconfigitem(
+    b'experimental', b'mergedriver', default=None,
 )
-coreconfigitem('experimental', 'nointerrupt', default=False)
-coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True)
+coreconfigitem(b'experimental', b'nointerrupt', default=False)
+coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
 
-coreconfigitem('experimental', 'obsmarkers-exchange-debug',
-    default=False,
+coreconfigitem(
+    b'experimental', b'obsmarkers-exchange-debug', default=False,
+)
+coreconfigitem(
+    b'experimental', b'remotenames', default=False,
 )
-coreconfigitem('experimental', 'remotenames',
-    default=False,
+coreconfigitem(
+    b'experimental', b'removeemptydirs', default=True,
 )
-coreconfigitem('experimental', 'removeemptydirs',
-    default=True,
+coreconfigitem(
+    b'experimental', b'revert.interactive.select-to-keep', default=False,
 )
-coreconfigitem('experimental', 'revert.interactive.select-to-keep',
-    default=False,
+coreconfigitem(
+    b'experimental', b'revisions.prefixhexnode', default=False,
 )
-coreconfigitem('experimental', 'revisions.prefixhexnode',
-    default=False,
+coreconfigitem(
+    b'experimental', b'revlogv2', default=None,
 )
-coreconfigitem('experimental', 'revlogv2',
-    default=None,
+coreconfigitem(
+    b'experimental', b'revisions.disambiguatewithin', default=None,
 )
-coreconfigitem('experimental', 'revisions.disambiguatewithin',
-    default=None,
+coreconfigitem(
+    b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
 )
-coreconfigitem('experimental', 'server.filesdata.recommended-batch-size',
-    default=50000,
-)
-coreconfigitem('experimental', 'server.manifestdata.recommended-batch-size',
+coreconfigitem(
+    b'experimental',
+    b'server.manifestdata.recommended-batch-size',
     default=100000,
 )
-coreconfigitem('experimental', 'server.stream-narrow-clones',
-    default=False,
+coreconfigitem(
+    b'experimental', b'server.stream-narrow-clones', default=False,
 )
-coreconfigitem('experimental', 'single-head-per-branch',
-    default=False,
+coreconfigitem(
+    b'experimental', b'single-head-per-branch', default=False,
 )
-coreconfigitem('experimental', 'sshserver.support-v2',
+coreconfigitem(
+    b'experimental',
+    b'single-head-per-branch:account-closed-heads',
     default=False,
 )
-coreconfigitem('experimental', 'sparse-read',
-    default=False,
+coreconfigitem(
+    b'experimental', b'sshserver.support-v2', default=False,
+)
+coreconfigitem(
+    b'experimental', b'sparse-read', default=False,
 )
-coreconfigitem('experimental', 'sparse-read.density-threshold',
-    default=0.50,
+coreconfigitem(
+    b'experimental', b'sparse-read.density-threshold', default=0.50,
 )
-coreconfigitem('experimental', 'sparse-read.min-gap-size',
-    default='65K',
+coreconfigitem(
+    b'experimental', b'sparse-read.min-gap-size', default=b'65K',
+)
+coreconfigitem(
+    b'experimental', b'treemanifest', default=False,
 )
-coreconfigitem('experimental', 'treemanifest',
-    default=False,
+coreconfigitem(
+    b'experimental', b'update.atomic-file', default=False,
 )
-coreconfigitem('experimental', 'update.atomic-file',
-    default=False,
+coreconfigitem(
+    b'experimental', b'sshpeer.advertise-v2', default=False,
 )
-coreconfigitem('experimental', 'sshpeer.advertise-v2',
-    default=False,
+coreconfigitem(
+    b'experimental', b'web.apiserver', default=False,
 )
-coreconfigitem('experimental', 'web.apiserver',
-    default=False,
+coreconfigitem(
+    b'experimental', b'web.api.http-v2', default=False,
+)
+coreconfigitem(
+    b'experimental', b'web.api.debugreflect', default=False,
 )
-coreconfigitem('experimental', 'web.api.http-v2',
-    default=False,
+coreconfigitem(
+    b'experimental', b'worker.wdir-get-thread-safe', default=False,
 )
-coreconfigitem('experimental', 'web.api.debugreflect',
-    default=False,
+coreconfigitem(
+    b'experimental', b'xdiff', default=False,
 )
-coreconfigitem('experimental', 'worker.wdir-get-thread-safe',
-    default=False,
+coreconfigitem(
+    b'extensions', b'.*', default=None, generic=True,
 )
-coreconfigitem('experimental', 'xdiff',
-    default=False,
+coreconfigitem(
+    b'extdata', b'.*', default=None, generic=True,
+)
+coreconfigitem(
+    b'format', b'bookmarks-in-store', default=False,
 )
-coreconfigitem('extensions', '.*',
-    default=None,
-    generic=True,
+coreconfigitem(
+    b'format', b'chunkcachesize', default=None, experimental=True,
+)
+coreconfigitem(
+    b'format', b'dotencode', default=True,
+)
+coreconfigitem(
+    b'format', b'generaldelta', default=False, experimental=True,
 )
-coreconfigitem('extdata', '.*',
-    default=None,
-    generic=True,
+coreconfigitem(
+    b'format', b'manifestcachesize', default=None, experimental=True,
 )
-coreconfigitem('format', 'bookmarks-in-store',
-    default=False,
+coreconfigitem(
+    b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
 )
-coreconfigitem('format', 'chunkcachesize',
-    default=None,
+coreconfigitem(
+    b'format', b'obsstore-version', default=None,
 )
-coreconfigitem('format', 'dotencode',
-    default=True,
-)
-coreconfigitem('format', 'generaldelta',
-    default=False,
+coreconfigitem(
+    b'format', b'sparse-revlog', default=True,
 )
-coreconfigitem('format', 'manifestcachesize',
-    default=None,
+coreconfigitem(
+    b'format',
+    b'revlog-compression',
+    default=b'zlib',
+    alias=[(b'experimental', b'format.compression')],
 )
-coreconfigitem('format', 'maxchainlen',
-    default=dynamicdefault,
+coreconfigitem(
+    b'format', b'usefncache', default=True,
+)
+coreconfigitem(
+    b'format', b'usegeneraldelta', default=True,
 )
-coreconfigitem('format', 'obsstore-version',
-    default=None,
-)
-coreconfigitem('format', 'sparse-revlog',
-    default=True,
+coreconfigitem(
+    b'format', b'usestore', default=True,
 )
-coreconfigitem('format', 'revlog-compression',
-    default='zlib',
-    alias=[('experimental', 'format.compression')]
+coreconfigitem(
+    b'format',
+    b'exp-use-copies-side-data-changeset',
+    default=False,
+    experimental=True,
 )
-coreconfigitem('format', 'usefncache',
-    default=True,
+coreconfigitem(
+    b'format', b'exp-use-side-data', default=False, experimental=True,
+)
+coreconfigitem(
+    b'format', b'internal-phase', default=False, experimental=True,
 )
-coreconfigitem('format', 'usegeneraldelta',
-    default=True,
+coreconfigitem(
+    b'fsmonitor', b'warn_when_unused', default=True,
 )
-coreconfigitem('format', 'usestore',
-    default=True,
+coreconfigitem(
+    b'fsmonitor', b'warn_update_file_count', default=50000,
 )
-coreconfigitem('format', 'internal-phase',
-    default=False,
+coreconfigitem(
+    b'help', br'hidden-command\..*', default=False, generic=True,
 )
-coreconfigitem('fsmonitor', 'warn_when_unused',
-    default=True,
+coreconfigitem(
+    b'help', br'hidden-topic\..*', default=False, generic=True,
+)
+coreconfigitem(
+    b'hooks', b'.*', default=dynamicdefault, generic=True,
 )
-coreconfigitem('fsmonitor', 'warn_update_file_count',
-    default=50000,
+coreconfigitem(
+    b'hgweb-paths', b'.*', default=list, generic=True,
+)
+coreconfigitem(
+    b'hostfingerprints', b'.*', default=list, generic=True,
+)
+coreconfigitem(
+    b'hostsecurity', b'ciphers', default=None,
 )
-coreconfigitem('help', br'hidden-command\..*',
-    default=False,
-    generic=True,
+coreconfigitem(
+    b'hostsecurity', b'disabletls10warning', default=False,
 )
-coreconfigitem('help', br'hidden-topic\..*',
-    default=False,
-    generic=True,
+coreconfigitem(
+    b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
 )
-coreconfigitem('hooks', '.*',
+coreconfigitem(
+    b'hostsecurity',
+    b'.*:minimumprotocol$',
     default=dynamicdefault,
     generic=True,
 )
-coreconfigitem('hgweb-paths', '.*',
-    default=list,
-    generic=True,
+coreconfigitem(
+    b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
+)
+coreconfigitem(
+    b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
 )
-coreconfigitem('hostfingerprints', '.*',
-    default=list,
-    generic=True,
+coreconfigitem(
+    b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
 )
-coreconfigitem('hostsecurity', 'ciphers',
-    default=None,
-)
-coreconfigitem('hostsecurity', 'disabletls10warning',
-    default=False,
+
+coreconfigitem(
+    b'http_proxy', b'always', default=False,
 )
-coreconfigitem('hostsecurity', 'minimumprotocol',
-    default=dynamicdefault,
+coreconfigitem(
+    b'http_proxy', b'host', default=None,
 )
-coreconfigitem('hostsecurity', '.*:minimumprotocol$',
-    default=dynamicdefault,
-    generic=True,
+coreconfigitem(
+    b'http_proxy', b'no', default=list,
 )
-coreconfigitem('hostsecurity', '.*:ciphers$',
-    default=dynamicdefault,
-    generic=True,
+coreconfigitem(
+    b'http_proxy', b'passwd', default=None,
 )
-coreconfigitem('hostsecurity', '.*:fingerprints$',
-    default=list,
-    generic=True,
+coreconfigitem(
+    b'http_proxy', b'user', default=None,
 )
-coreconfigitem('hostsecurity', '.*:verifycertsfile$',
-    default=None,
-    generic=True,
+
+coreconfigitem(
+    b'http', b'timeout', default=None,
 )
 
-coreconfigitem('http_proxy', 'always',
-    default=False,
-)
-coreconfigitem('http_proxy', 'host',
-    default=None,
+coreconfigitem(
+    b'logtoprocess', b'commandexception', default=None,
 )
-coreconfigitem('http_proxy', 'no',
-    default=list,
+coreconfigitem(
+    b'logtoprocess', b'commandfinish', default=None,
 )
-coreconfigitem('http_proxy', 'passwd',
-    default=None,
+coreconfigitem(
+    b'logtoprocess', b'command', default=None,
 )
-coreconfigitem('http_proxy', 'user',
-    default=None,
+coreconfigitem(
+    b'logtoprocess', b'develwarn', default=None,
 )
-
-coreconfigitem('http', 'timeout',
-    default=None,
+coreconfigitem(
+    b'logtoprocess', b'uiblocked', default=None,
 )
-
-coreconfigitem('logtoprocess', 'commandexception',
-    default=None,
+coreconfigitem(
+    b'merge', b'checkunknown', default=b'abort',
 )
-coreconfigitem('logtoprocess', 'commandfinish',
-    default=None,
-)
-coreconfigitem('logtoprocess', 'command',
-    default=None,
+coreconfigitem(
+    b'merge', b'checkignored', default=b'abort',
 )
-coreconfigitem('logtoprocess', 'develwarn',
-    default=None,
-)
-coreconfigitem('logtoprocess', 'uiblocked',
-    default=None,
+coreconfigitem(
+    b'experimental', b'merge.checkpathconflicts', default=False,
 )
-coreconfigitem('merge', 'checkunknown',
-    default='abort',
+coreconfigitem(
+    b'merge', b'followcopies', default=True,
 )
-coreconfigitem('merge', 'checkignored',
-    default='abort',
-)
-coreconfigitem('experimental', 'merge.checkpathconflicts',
-    default=False,
+coreconfigitem(
+    b'merge', b'on-failure', default=b'continue',
 )
-coreconfigitem('merge', 'followcopies',
-    default=True,
+coreconfigitem(
+    b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
 )
-coreconfigitem('merge', 'on-failure',
-    default='continue',
-)
-coreconfigitem('merge', 'preferancestor',
-        default=lambda: ['*'],
+coreconfigitem(
+    b'merge', b'strict-capability-check', default=False,
 )
-coreconfigitem('merge', 'strict-capability-check',
-    default=False,
+coreconfigitem(
+    b'merge-tools', b'.*', default=None, generic=True,
 )
-coreconfigitem('merge-tools', '.*',
-    default=None,
-    generic=True,
-)
-coreconfigitem('merge-tools', br'.*\.args$',
-    default="$local $base $other",
+coreconfigitem(
+    b'merge-tools',
+    br'.*\.args$',
+    default=b"$local $base $other",
     generic=True,
     priority=-1,
 )
-coreconfigitem('merge-tools', br'.*\.binary$',
-    default=False,
-    generic=True,
-    priority=-1,
-)
-coreconfigitem('merge-tools', br'.*\.check$',
-    default=list,
-    generic=True,
-    priority=-1,
-)
-coreconfigitem('merge-tools', br'.*\.checkchanged$',
-    default=False,
-    generic=True,
-    priority=-1,
-)
-coreconfigitem('merge-tools', br'.*\.executable$',
-    default=dynamicdefault,
-    generic=True,
-    priority=-1,
-)
-coreconfigitem('merge-tools', br'.*\.fixeol$',
-    default=False,
-    generic=True,
-    priority=-1,
+coreconfigitem(
+    b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
 )
-coreconfigitem('merge-tools', br'.*\.gui$',
-    default=False,
-    generic=True,
-    priority=-1,
-)
-coreconfigitem('merge-tools', br'.*\.mergemarkers$',
-    default='basic',
-    generic=True,
-    priority=-1,
+coreconfigitem(
+    b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
 )
-coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
-    default=dynamicdefault,  # take from ui.mergemarkertemplate
-    generic=True,
-    priority=-1,
-)
-coreconfigitem('merge-tools', br'.*\.priority$',
-    default=0,
-    generic=True,
-    priority=-1,
-)
-coreconfigitem('merge-tools', br'.*\.premerge$',
-    default=dynamicdefault,
-    generic=True,
-    priority=-1,
-)
-coreconfigitem('merge-tools', br'.*\.symlink$',
+coreconfigitem(
+    b'merge-tools',
+    br'.*\.checkchanged$',
     default=False,
     generic=True,
     priority=-1,
 )
-coreconfigitem('pager', 'attend-.*',
+coreconfigitem(
+    b'merge-tools',
+    br'.*\.executable$',
     default=dynamicdefault,
     generic=True,
-)
-coreconfigitem('pager', 'ignore',
-    default=list,
-)
-coreconfigitem('pager', 'pager',
-    default=dynamicdefault,
-)
-coreconfigitem('patch', 'eol',
-    default='strict',
-)
-coreconfigitem('patch', 'fuzz',
-    default=2,
-)
-coreconfigitem('paths', 'default',
-    default=None,
-)
-coreconfigitem('paths', 'default-push',
-    default=None,
-)
-coreconfigitem('paths', '.*',
-    default=None,
-    generic=True,
-)
-coreconfigitem('phases', 'checksubrepos',
-    default='follow',
-)
-coreconfigitem('phases', 'new-commit',
-    default='draft',
-)
-coreconfigitem('phases', 'publish',
-    default=True,
-)
-coreconfigitem('profiling', 'enabled',
-    default=False,
-)
-coreconfigitem('profiling', 'format',
-    default='text',
-)
-coreconfigitem('profiling', 'freq',
-    default=1000,
-)
-coreconfigitem('profiling', 'limit',
-    default=30,
-)
-coreconfigitem('profiling', 'nested',
-    default=0,
-)
-coreconfigitem('profiling', 'output',
-    default=None,
-)
-coreconfigitem('profiling', 'showmax',
-    default=0.999,
-)
-coreconfigitem('profiling', 'showmin',
-    default=dynamicdefault,
-)
-coreconfigitem('profiling', 'showtime',
-    default=True,
+    priority=-1,
 )
-coreconfigitem('profiling', 'sort',
-    default='inlinetime',
-)
-coreconfigitem('profiling', 'statformat',
-    default='hotpath',
-)
-coreconfigitem('profiling', 'time-track',
-    default=dynamicdefault,
-)
-coreconfigitem('profiling', 'type',
-    default='stat',
-)
-coreconfigitem('progress', 'assume-tty',
-    default=False,
-)
-coreconfigitem('progress', 'changedelay',
-    default=1,
-)
-coreconfigitem('progress', 'clear-complete',
-    default=True,
-)
-coreconfigitem('progress', 'debug',
-    default=False,
-)
-coreconfigitem('progress', 'delay',
-    default=3,
-)
-coreconfigitem('progress', 'disable',
-    default=False,
+coreconfigitem(
+    b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
 )
-coreconfigitem('progress', 'estimateinterval',
-    default=60.0,
-)
-coreconfigitem('progress', 'format',
-    default=lambda: ['topic', 'bar', 'number', 'estimate'],
-)
-coreconfigitem('progress', 'refresh',
-    default=0.1,
-)
-coreconfigitem('progress', 'width',
-    default=dynamicdefault,
-)
-coreconfigitem('push', 'pushvars.server',
-    default=False,
-)
-coreconfigitem('rewrite', 'backup-bundle',
-    default=True,
-    alias=[('ui', 'history-editing-backup')],
-)
-coreconfigitem('rewrite', 'update-timestamp',
-    default=False,
-)
-coreconfigitem('storage', 'new-repo-backend',
-    default='revlogv1',
-)
-coreconfigitem('storage', 'revlog.optimize-delta-parent-choice',
-    default=True,
-    alias=[('format', 'aggressivemergedeltas')],
-)
-coreconfigitem('storage', 'revlog.reuse-external-delta',
-    default=True,
+coreconfigitem(
+    b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
 )
-coreconfigitem('storage', 'revlog.reuse-external-delta-parent',
-    default=None,
-)
-coreconfigitem('storage', 'revlog.zlib.level',
-    default=None,
-)
-coreconfigitem('storage', 'revlog.zstd.level',
-    default=None,
-)
-coreconfigitem('server', 'bookmarks-pushkey-compat',
-    default=True,
-)
-coreconfigitem('server', 'bundle1',
-    default=True,
-)
-coreconfigitem('server', 'bundle1gd',
-    default=None,
-)
-coreconfigitem('server', 'bundle1.pull',
-    default=None,
-)
-coreconfigitem('server', 'bundle1gd.pull',
-    default=None,
-)
-coreconfigitem('server', 'bundle1.push',
-    default=None,
-)
-coreconfigitem('server', 'bundle1gd.push',
-    default=None,
-)
-coreconfigitem('server', 'bundle2.stream',
-    default=True,
-    alias=[('experimental', 'bundle2.stream')]
-)
-coreconfigitem('server', 'compressionengines',
-    default=list,
-)
-coreconfigitem('server', 'concurrent-push-mode',
-    default='strict',
-)
-coreconfigitem('server', 'disablefullbundle',
-    default=False,
-)
-coreconfigitem('server', 'maxhttpheaderlen',
-    default=1024,
-)
-coreconfigitem('server', 'pullbundle',
-    default=False,
-)
-coreconfigitem('server', 'preferuncompressed',
-    default=False,
-)
-coreconfigitem('server', 'streamunbundle',
-    default=False,
-)
-coreconfigitem('server', 'uncompressed',
-    default=True,
+coreconfigitem(
+    b'merge-tools',
+    br'.*\.mergemarkers$',
+    default=b'basic',
+    generic=True,
+    priority=-1,
 )
-coreconfigitem('server', 'uncompressedallowsecret',
-    default=False,
-)
-coreconfigitem('server', 'view',
-    default='served',
-)
-coreconfigitem('server', 'validate',
-    default=False,
-)
-coreconfigitem('server', 'zliblevel',
-    default=-1,
-)
-coreconfigitem('server', 'zstdlevel',
-    default=3,
-)
-coreconfigitem('share', 'pool',
-    default=None,
-)
-coreconfigitem('share', 'poolnaming',
-    default='identity',
-)
-coreconfigitem('shelve','maxbackups',
-    default=10,
-)
-coreconfigitem('smtp', 'host',
-    default=None,
-)
-coreconfigitem('smtp', 'local_hostname',
-    default=None,
+coreconfigitem(
+    b'merge-tools',
+    br'.*\.mergemarkertemplate$',
+    default=dynamicdefault,  # take from ui.mergemarkertemplate
+    generic=True,
+    priority=-1,
 )
-coreconfigitem('smtp', 'password',
-    default=None,
-)
-coreconfigitem('smtp', 'port',
-    default=dynamicdefault,
-)
-coreconfigitem('smtp', 'tls',
-    default='none',
-)
-coreconfigitem('smtp', 'username',
-    default=None,
-)
-coreconfigitem('sparse', 'missingwarning',
-    default=True,
+coreconfigitem(
+    b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
 )
-coreconfigitem('subrepos', 'allowed',
-    default=dynamicdefault,  # to make backporting simpler
-)
-coreconfigitem('subrepos', 'hg:allowed',
-    default=dynamicdefault,
-)
-coreconfigitem('subrepos', 'git:allowed',
-    default=dynamicdefault,
-)
-coreconfigitem('subrepos', 'svn:allowed',
-    default=dynamicdefault,
-)
-coreconfigitem('templates', '.*',
-    default=None,
-    generic=True,
-)
-coreconfigitem('templateconfig', '.*',
+coreconfigitem(
+    b'merge-tools',
+    br'.*\.premerge$',
     default=dynamicdefault,
     generic=True,
+    priority=-1,
 )
-coreconfigitem('trusted', 'groups',
-    default=list,
+coreconfigitem(
+    b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
+)
+coreconfigitem(
+    b'pager', b'attend-.*', default=dynamicdefault, generic=True,
 )
-coreconfigitem('trusted', 'users',
-    default=list,
+coreconfigitem(
+    b'pager', b'ignore', default=list,
+)
+coreconfigitem(
+    b'pager', b'pager', default=dynamicdefault,
+)
+coreconfigitem(
+    b'patch', b'eol', default=b'strict',
 )
-coreconfigitem('ui', '_usedassubrepo',
-    default=False,
+coreconfigitem(
+    b'patch', b'fuzz', default=2,
 )
-coreconfigitem('ui', 'allowemptycommit',
-    default=False,
+coreconfigitem(
+    b'paths', b'default', default=None,
+)
+coreconfigitem(
+    b'paths', b'default-push', default=None,
 )
-coreconfigitem('ui', 'archivemeta',
-    default=True,
+coreconfigitem(
+    b'paths', b'.*', default=None, generic=True,
 )
-coreconfigitem('ui', 'askusername',
-    default=False,
+coreconfigitem(
+    b'phases', b'checksubrepos', default=b'follow',
+)
+coreconfigitem(
+    b'phases', b'new-commit', default=b'draft',
 )
-coreconfigitem('ui', 'clonebundlefallback',
-    default=False,
+coreconfigitem(
+    b'phases', b'publish', default=True,
+)
+coreconfigitem(
+    b'profiling', b'enabled', default=False,
+)
+coreconfigitem(
+    b'profiling', b'format', default=b'text',
 )
-coreconfigitem('ui', 'clonebundleprefers',
-    default=list,
+coreconfigitem(
+    b'profiling', b'freq', default=1000,
 )
-coreconfigitem('ui', 'clonebundles',
-    default=True,
+coreconfigitem(
+    b'profiling', b'limit', default=30,
+)
+coreconfigitem(
+    b'profiling', b'nested', default=0,
 )
-coreconfigitem('ui', 'color',
-    default='auto',
+coreconfigitem(
+    b'profiling', b'output', default=None,
 )
-coreconfigitem('ui', 'commitsubrepos',
-    default=False,
+coreconfigitem(
+    b'profiling', b'showmax', default=0.999,
+)
+coreconfigitem(
+    b'profiling', b'showmin', default=dynamicdefault,
 )
-coreconfigitem('ui', 'debug',
-    default=False,
+coreconfigitem(
+    b'profiling', b'showtime', default=True,
 )
-coreconfigitem('ui', 'debugger',
-    default=None,
+coreconfigitem(
+    b'profiling', b'sort', default=b'inlinetime',
+)
+coreconfigitem(
+    b'profiling', b'statformat', default=b'hotpath',
 )
-coreconfigitem('ui', 'editor',
-    default=dynamicdefault,
+coreconfigitem(
+    b'profiling', b'time-track', default=dynamicdefault,
+)
+coreconfigitem(
+    b'profiling', b'type', default=b'stat',
 )
-coreconfigitem('ui', 'fallbackencoding',
-    default=None,
+coreconfigitem(
+    b'progress', b'assume-tty', default=False,
 )
-coreconfigitem('ui', 'forcecwd',
-    default=None,
+coreconfigitem(
+    b'progress', b'changedelay', default=1,
+)
+coreconfigitem(
+    b'progress', b'clear-complete', default=True,
 )
-coreconfigitem('ui', 'forcemerge',
-    default=None,
+coreconfigitem(
+    b'progress', b'debug', default=False,
 )
-coreconfigitem('ui', 'formatdebug',
-    default=False,
+coreconfigitem(
+    b'progress', b'delay', default=3,
+)
+coreconfigitem(
+    b'progress', b'disable', default=False,
 )
-coreconfigitem('ui', 'formatjson',
-    default=False,
+coreconfigitem(
+    b'progress', b'estimateinterval', default=60.0,
 )
-coreconfigitem('ui', 'formatted',
-    default=None,
+coreconfigitem(
+    b'progress',
+    b'format',
+    default=lambda: [b'topic', b'bar', b'number', b'estimate'],
+)
+coreconfigitem(
+    b'progress', b'refresh', default=0.1,
 )
-coreconfigitem('ui', 'graphnodetemplate',
-    default=None,
+coreconfigitem(
+    b'progress', b'width', default=dynamicdefault,
+)
+coreconfigitem(
+    b'push', b'pushvars.server', default=False,
 )
-coreconfigitem('ui', 'interactive',
-    default=None,
+coreconfigitem(
+    b'rewrite',
+    b'backup-bundle',
+    default=True,
+    alias=[(b'ui', b'history-editing-backup')],
 )
-coreconfigitem('ui', 'interface',
-    default=None,
+coreconfigitem(
+    b'rewrite', b'update-timestamp', default=False,
+)
+coreconfigitem(
+    b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
 )
-coreconfigitem('ui', 'interface.chunkselector',
-    default=None,
+coreconfigitem(
+    b'storage',
+    b'revlog.optimize-delta-parent-choice',
+    default=True,
+    alias=[(b'format', b'aggressivemergedeltas')],
 )
-coreconfigitem('ui', 'large-file-limit',
-    default=10000000,
+coreconfigitem(
+    b'storage', b'revlog.reuse-external-delta', default=True,
 )
-coreconfigitem('ui', 'logblockedtimes',
-    default=False,
+coreconfigitem(
+    b'storage', b'revlog.reuse-external-delta-parent', default=None,
+)
+coreconfigitem(
+    b'storage', b'revlog.zlib.level', default=None,
 )
-coreconfigitem('ui', 'logtemplate',
-    default=None,
+coreconfigitem(
+    b'storage', b'revlog.zstd.level', default=None,
+)
+coreconfigitem(
+    b'server', b'bookmarks-pushkey-compat', default=True,
 )
-coreconfigitem('ui', 'merge',
-    default=None,
+coreconfigitem(
+    b'server', b'bundle1', default=True,
+)
+coreconfigitem(
+    b'server', b'bundle1gd', default=None,
+)
+coreconfigitem(
+    b'server', b'bundle1.pull', default=None,
 )
-coreconfigitem('ui', 'mergemarkers',
-    default='basic',
+coreconfigitem(
+    b'server', b'bundle1gd.pull', default=None,
+)
+coreconfigitem(
+    b'server', b'bundle1.push', default=None,
+)
+coreconfigitem(
+    b'server', b'bundle1gd.push', default=None,
 )
-coreconfigitem('ui', 'mergemarkertemplate',
-    default=('{node|short} '
-            '{ifeq(tags, "tip", "", '
-            'ifeq(tags, "", "", "{tags} "))}'
-            '{if(bookmarks, "{bookmarks} ")}'
-            '{ifeq(branch, "default", "", "{branch} ")}'
-            '- {author|user}: {desc|firstline}')
+coreconfigitem(
+    b'server',
+    b'bundle2.stream',
+    default=True,
+    alias=[(b'experimental', b'bundle2.stream')],
+)
+coreconfigitem(
+    b'server', b'compressionengines', default=list,
 )
-coreconfigitem('ui', 'message-output',
-    default='stdio',
+coreconfigitem(
+    b'server', b'concurrent-push-mode', default=b'strict',
 )
-coreconfigitem('ui', 'nontty',
-    default=False,
+coreconfigitem(
+    b'server', b'disablefullbundle', default=False,
+)
+coreconfigitem(
+    b'server', b'maxhttpheaderlen', default=1024,
 )
-coreconfigitem('ui', 'origbackuppath',
-    default=None,
+coreconfigitem(
+    b'server', b'pullbundle', default=False,
+)
+coreconfigitem(
+    b'server', b'preferuncompressed', default=False,
+)
+coreconfigitem(
+    b'server', b'streamunbundle', default=False,
 )
-coreconfigitem('ui', 'paginate',
-    default=True,
+coreconfigitem(
+    b'server', b'uncompressed', default=True,
 )
-coreconfigitem('ui', 'patch',
-    default=None,
+coreconfigitem(
+    b'server', b'uncompressedallowsecret', default=False,
+)
+coreconfigitem(
+    b'server', b'view', default=b'served',
 )
-coreconfigitem('ui', 'pre-merge-tool-output-template',
-    default=None,
+coreconfigitem(
+    b'server', b'validate', default=False,
 )
-coreconfigitem('ui', 'portablefilenames',
-    default='warn',
+coreconfigitem(
+    b'server', b'zliblevel', default=-1,
+)
+coreconfigitem(
+    b'server', b'zstdlevel', default=3,
 )
-coreconfigitem('ui', 'promptecho',
-    default=False,
+coreconfigitem(
+    b'share', b'pool', default=None,
 )
-coreconfigitem('ui', 'quiet',
-    default=False,
+coreconfigitem(
+    b'share', b'poolnaming', default=b'identity',
+)
+coreconfigitem(
+    b'shelve', b'maxbackups', default=10,
 )
-coreconfigitem('ui', 'quietbookmarkmove',
-    default=False,
+coreconfigitem(
+    b'smtp', b'host', default=None,
+)
+coreconfigitem(
+    b'smtp', b'local_hostname', default=None,
 )
-coreconfigitem('ui', 'relative-paths',
-    default='legacy',
+coreconfigitem(
+    b'smtp', b'password', default=None,
 )
-coreconfigitem('ui', 'remotecmd',
-    default='hg',
+coreconfigitem(
+    b'smtp', b'port', default=dynamicdefault,
+)
+coreconfigitem(
+    b'smtp', b'tls', default=b'none',
 )
-coreconfigitem('ui', 'report_untrusted',
-    default=True,
+coreconfigitem(
+    b'smtp', b'username', default=None,
 )
-coreconfigitem('ui', 'rollback',
-    default=True,
+coreconfigitem(
+    b'sparse', b'missingwarning', default=True, experimental=True,
 )
-coreconfigitem('ui', 'signal-safe-lock',
-    default=True,
+coreconfigitem(
+    b'subrepos',
+    b'allowed',
+    default=dynamicdefault,  # to make backporting simpler
 )
-coreconfigitem('ui', 'slash',
-    default=False,
+coreconfigitem(
+    b'subrepos', b'hg:allowed', default=dynamicdefault,
+)
+coreconfigitem(
+    b'subrepos', b'git:allowed', default=dynamicdefault,
+)
+coreconfigitem(
+    b'subrepos', b'svn:allowed', default=dynamicdefault,
 )
-coreconfigitem('ui', 'ssh',
-    default='ssh',
+coreconfigitem(
+    b'templates', b'.*', default=None, generic=True,
+)
+coreconfigitem(
+    b'templateconfig', b'.*', default=dynamicdefault, generic=True,
+)
+coreconfigitem(
+    b'trusted', b'groups', default=list,
 )
-coreconfigitem('ui', 'ssherrorhint',
-    default=None,
+coreconfigitem(
+    b'trusted', b'users', default=list,
 )
-coreconfigitem('ui', 'statuscopies',
-    default=False,
+coreconfigitem(
+    b'ui', b'_usedassubrepo', default=False,
+)
+coreconfigitem(
+    b'ui', b'allowemptycommit', default=False,
 )
-coreconfigitem('ui', 'strict',
-    default=False,
+coreconfigitem(
+    b'ui', b'archivemeta', default=True,
 )
-coreconfigitem('ui', 'style',
-    default='',
+coreconfigitem(
+    b'ui', b'askusername', default=False,
+)
+coreconfigitem(
+    b'ui', b'clonebundlefallback', default=False,
 )
-coreconfigitem('ui', 'supportcontact',
-    default=None,
+coreconfigitem(
+    b'ui', b'clonebundleprefers', default=list,
+)
+coreconfigitem(
+    b'ui', b'clonebundles', default=True,
 )
-coreconfigitem('ui', 'textwidth',
-    default=78,
+coreconfigitem(
+    b'ui', b'color', default=b'auto',
+)
+coreconfigitem(
+    b'ui', b'commitsubrepos', default=False,
 )
-coreconfigitem('ui', 'timeout',
-    default='600',
+coreconfigitem(
+    b'ui', b'debug', default=False,
+)
+coreconfigitem(
+    b'ui', b'debugger', default=None,
+)
+coreconfigitem(
+    b'ui', b'editor', default=dynamicdefault,
 )
-coreconfigitem('ui', 'timeout.warn',
-    default=0,
+coreconfigitem(
+    b'ui', b'fallbackencoding', default=None,
 )
-coreconfigitem('ui', 'traceback',
-    default=False,
+coreconfigitem(
+    b'ui', b'forcecwd', default=None,
+)
+coreconfigitem(
+    b'ui', b'forcemerge', default=None,
 )
-coreconfigitem('ui', 'tweakdefaults',
-    default=False,
+coreconfigitem(
+    b'ui', b'formatdebug', default=False,
+)
+coreconfigitem(
+    b'ui', b'formatjson', default=False,
+)
+coreconfigitem(
+    b'ui', b'formatted', default=None,
 )
-coreconfigitem('ui', 'username',
-    alias=[('ui', 'user')]
+coreconfigitem(
+    b'ui', b'graphnodetemplate', default=None,
 )
-coreconfigitem('ui', 'verbose',
-    default=False,
+coreconfigitem(
+    b'ui', b'interactive', default=None,
+)
+coreconfigitem(
+    b'ui', b'interface', default=None,
 )
-coreconfigitem('verify', 'skipflags',
-    default=None,
+coreconfigitem(
+    b'ui', b'interface.chunkselector', default=None,
+)
+coreconfigitem(
+    b'ui', b'large-file-limit', default=10000000,
+)
+coreconfigitem(
+    b'ui', b'logblockedtimes', default=False,
 )
-coreconfigitem('web', 'allowbz2',
-    default=False,
+coreconfigitem(
+    b'ui', b'logtemplate', default=None,
 )
-coreconfigitem('web', 'allowgz',
-    default=False,
+coreconfigitem(
+    b'ui', b'merge', default=None,
+)
+coreconfigitem(
+    b'ui', b'mergemarkers', default=b'basic',
 )
-coreconfigitem('web', 'allow-pull',
-    alias=[('web', 'allowpull')],
-    default=True,
+coreconfigitem(
+    b'ui',
+    b'mergemarkertemplate',
+    default=(
+        b'{node|short} '
+        b'{ifeq(tags, "tip", "", '
+        b'ifeq(tags, "", "", "{tags} "))}'
+        b'{if(bookmarks, "{bookmarks} ")}'
+        b'{ifeq(branch, "default", "", "{branch} ")}'
+        b'- {author|user}: {desc|firstline}'
+    ),
 )
-coreconfigitem('web', 'allow-push',
-    alias=[('web', 'allow_push')],
-    default=list,
+coreconfigitem(
+    b'ui', b'message-output', default=b'stdio',
 )
-coreconfigitem('web', 'allowzip',
-    default=False,
+coreconfigitem(
+    b'ui', b'nontty', default=False,
 )
-coreconfigitem('web', 'archivesubrepos',
-    default=False,
+coreconfigitem(
+    b'ui', b'origbackuppath', default=None,
+)
+coreconfigitem(
+    b'ui', b'paginate', default=True,
+)
+coreconfigitem(
+    b'ui', b'patch', default=None,
 )
-coreconfigitem('web', 'cache',
-    default=True,
+coreconfigitem(
+    b'ui', b'pre-merge-tool-output-template', default=None,
 )
-coreconfigitem('web', 'comparisoncontext',
-    default=5,
+coreconfigitem(
+    b'ui', b'portablefilenames', default=b'warn',
+)
+coreconfigitem(
+    b'ui', b'promptecho', default=False,
 )
-coreconfigitem('web', 'contact',
-    default=None,
+coreconfigitem(
+    b'ui', b'quiet', default=False,
 )
-coreconfigitem('web', 'deny_push',
-    default=list,
+coreconfigitem(
+    b'ui', b'quietbookmarkmove', default=False,
+)
+coreconfigitem(
+    b'ui', b'relative-paths', default=b'legacy',
 )
-coreconfigitem('web', 'guessmime',
-    default=False,
+coreconfigitem(
+    b'ui', b'remotecmd', default=b'hg',
 )
-coreconfigitem('web', 'hidden',
-    default=False,
+coreconfigitem(
+    b'ui', b'report_untrusted', default=True,
+)
+coreconfigitem(
+    b'ui', b'rollback', default=True,
 )
-coreconfigitem('web', 'labels',
-    default=list,
+coreconfigitem(
+    b'ui', b'signal-safe-lock', default=True,
+)
+coreconfigitem(
+    b'ui', b'slash', default=False,
+)
+coreconfigitem(
+    b'ui', b'ssh', default=b'ssh',
 )
-coreconfigitem('web', 'logoimg',
-    default='hglogo.png',
+coreconfigitem(
+    b'ui', b'ssherrorhint', default=None,
 )
-coreconfigitem('web', 'logourl',
-    default='https://mercurial-scm.org/',
+coreconfigitem(
+    b'ui', b'statuscopies', default=False,
+)
+coreconfigitem(
+    b'ui', b'strict', default=False,
 )
-coreconfigitem('web', 'accesslog',
-    default='-',
+coreconfigitem(
+    b'ui', b'style', default=b'',
 )
-coreconfigitem('web', 'address',
-    default='',
+coreconfigitem(
+    b'ui', b'supportcontact', default=None,
+)
+coreconfigitem(
+    b'ui', b'textwidth', default=78,
 )
-coreconfigitem('web', 'allow-archive',
-    alias=[('web', 'allow_archive')],
-    default=list,
+coreconfigitem(
+    b'ui', b'timeout', default=b'600',
+)
+coreconfigitem(
+    b'ui', b'timeout.warn', default=0,
 )
-coreconfigitem('web', 'allow_read',
-    default=list,
+coreconfigitem(
+    b'ui', b'traceback', default=False,
+)
+coreconfigitem(
+    b'ui', b'tweakdefaults', default=False,
 )
-coreconfigitem('web', 'baseurl',
-    default=None,
+coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
+coreconfigitem(
+    b'ui', b'verbose', default=False,
+)
+coreconfigitem(
+    b'verify', b'skipflags', default=None,
 )
-coreconfigitem('web', 'cacerts',
-    default=None,
+coreconfigitem(
+    b'web', b'allowbz2', default=False,
 )
-coreconfigitem('web', 'certificate',
-    default=None,
+coreconfigitem(
+    b'web', b'allowgz', default=False,
+)
+coreconfigitem(
+    b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
 )
-coreconfigitem('web', 'collapse',
-    default=False,
+coreconfigitem(
+    b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
 )
-coreconfigitem('web', 'csp',
-    default=None,
+coreconfigitem(
+    b'web', b'allowzip', default=False,
+)
+coreconfigitem(
+    b'web', b'archivesubrepos', default=False,
 )
-coreconfigitem('web', 'deny_read',
-    default=list,
+coreconfigitem(
+    b'web', b'cache', default=True,
 )
-coreconfigitem('web', 'descend',
-    default=True,
+coreconfigitem(
+    b'web', b'comparisoncontext', default=5,
+)
+coreconfigitem(
+    b'web', b'contact', default=None,
 )
-coreconfigitem('web', 'description',
-    default="",
+coreconfigitem(
+    b'web', b'deny_push', default=list,
+)
+coreconfigitem(
+    b'web', b'guessmime', default=False,
+)
+coreconfigitem(
+    b'web', b'hidden', default=False,
 )
-coreconfigitem('web', 'encoding',
-    default=lambda: encoding.encoding,
+coreconfigitem(
+    b'web', b'labels', default=list,
 )
-coreconfigitem('web', 'errorlog',
-    default='-',
+coreconfigitem(
+    b'web', b'logoimg', default=b'hglogo.png',
+)
+coreconfigitem(
+    b'web', b'logourl', default=b'https://mercurial-scm.org/',
 )
-coreconfigitem('web', 'ipv6',
-    default=False,
+coreconfigitem(
+    b'web', b'accesslog', default=b'-',
 )
-coreconfigitem('web', 'maxchanges',
-    default=10,
+coreconfigitem(
+    b'web', b'address', default=b'',
+)
+coreconfigitem(
+    b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
 )
-coreconfigitem('web', 'maxfiles',
-    default=10,
+coreconfigitem(
+    b'web', b'allow_read', default=list,
+)
+coreconfigitem(
+    b'web', b'baseurl', default=None,
 )
-coreconfigitem('web', 'maxshortchanges',
-    default=60,
+coreconfigitem(
+    b'web', b'cacerts', default=None,
+)
+coreconfigitem(
+    b'web', b'certificate', default=None,
 )
-coreconfigitem('web', 'motd',
-    default='',
+coreconfigitem(
+    b'web', b'collapse', default=False,
+)
+coreconfigitem(
+    b'web', b'csp', default=None,
+)
+coreconfigitem(
+    b'web', b'deny_read', default=list,
 )
-coreconfigitem('web', 'name',
-    default=dynamicdefault,
+coreconfigitem(
+    b'web', b'descend', default=True,
 )
-coreconfigitem('web', 'port',
-    default=8000,
+coreconfigitem(
+    b'web', b'description', default=b"",
+)
+coreconfigitem(
+    b'web', b'encoding', default=lambda: encoding.encoding,
 )
-coreconfigitem('web', 'prefix',
-    default='',
+coreconfigitem(
+    b'web', b'errorlog', default=b'-',
 )
-coreconfigitem('web', 'push_ssl',
-    default=True,
+coreconfigitem(
+    b'web', b'ipv6', default=False,
+)
+coreconfigitem(
+    b'web', b'maxchanges', default=10,
 )
-coreconfigitem('web', 'refreshinterval',
-    default=20,
+coreconfigitem(
+    b'web', b'maxfiles', default=10,
 )
-coreconfigitem('web', 'server-header',
-    default=None,
+coreconfigitem(
+    b'web', b'maxshortchanges', default=60,
+)
+coreconfigitem(
+    b'web', b'motd', default=b'',
 )
-coreconfigitem('web', 'static',
-    default=None,
+coreconfigitem(
+    b'web', b'name', default=dynamicdefault,
+)
+coreconfigitem(
+    b'web', b'port', default=8000,
+)
+coreconfigitem(
+    b'web', b'prefix', default=b'',
 )
-coreconfigitem('web', 'staticurl',
-    default=None,
+coreconfigitem(
+    b'web', b'push_ssl', default=True,
 )
-coreconfigitem('web', 'stripes',
-    default=1,
+coreconfigitem(
+    b'web', b'refreshinterval', default=20,
+)
+coreconfigitem(
+    b'web', b'server-header', default=None,
 )
-coreconfigitem('web', 'style',
-    default='paper',
+coreconfigitem(
+    b'web', b'static', default=None,
 )
-coreconfigitem('web', 'templates',
-    default=None,
+coreconfigitem(
+    b'web', b'staticurl', default=None,
+)
+coreconfigitem(
+    b'web', b'stripes', default=1,
 )
-coreconfigitem('web', 'view',
-    default='served',
+coreconfigitem(
+    b'web', b'style', default=b'paper',
+)
+coreconfigitem(
+    b'web', b'templates', default=None,
 )
-coreconfigitem('worker', 'backgroundclose',
-    default=dynamicdefault,
+coreconfigitem(
+    b'web', b'view', default=b'served', experimental=True,
+)
+coreconfigitem(
+    b'worker', b'backgroundclose', default=dynamicdefault,
 )
 # Windows defaults to a limit of 512 open files. A buffer of 128
 # should give us enough headway.
-coreconfigitem('worker', 'backgroundclosemaxqueue',
-    default=384,
+coreconfigitem(
+    b'worker', b'backgroundclosemaxqueue', default=384,
 )
-coreconfigitem('worker', 'backgroundcloseminfilecount',
-    default=2048,
+coreconfigitem(
+    b'worker', b'backgroundcloseminfilecount', default=2048,
 )
-coreconfigitem('worker', 'backgroundclosethreadcount',
-    default=4,
+coreconfigitem(
+    b'worker', b'backgroundclosethreadcount', default=4,
 )
-coreconfigitem('worker', 'enabled',
-    default=True,
+coreconfigitem(
+    b'worker', b'enabled', default=True,
 )
-coreconfigitem('worker', 'numcpus',
-    default=None,
+coreconfigitem(
+    b'worker', b'numcpus', default=None,
 )
 
 # Rebase related configuration moved to core because other extension are doing
 # strange things. For example, shelve import the extensions to reuse some bit
 # without formally loading it.
-coreconfigitem('commands', 'rebase.requiredest',
-            default=False,
+coreconfigitem(
+    b'commands', b'rebase.requiredest', default=False,
 )
-coreconfigitem('experimental', 'rebaseskipobsolete',
-    default=True,
+coreconfigitem(
+    b'experimental', b'rebaseskipobsolete', default=True,
 )
-coreconfigitem('rebase', 'singletransaction',
-    default=False,
+coreconfigitem(
+    b'rebase', b'singletransaction', default=False,
 )
-coreconfigitem('rebase', 'experimental.inmemory',
-    default=False,
+coreconfigitem(
+    b'rebase', b'experimental.inmemory', default=False,
 )
--- a/mercurial/context.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/context.py	Mon Oct 21 11:09:48 2019 -0400
@@ -23,7 +23,12 @@
     wdirfilenodeids,
     wdirhex,
 )
+from .pycompat import (
+    getattr,
+    open,
+)
 from . import (
+    copies,
     dagop,
     encoding,
     error,
@@ -48,6 +53,7 @@
 
 propertycache = util.propertycache
 
+
 class basectx(object):
     """A basectx object represents the common logic for its children:
     changectx: read-only context that is already present in the repo,
@@ -97,8 +103,9 @@
         """
         return match
 
-    def _buildstatus(self, other, s, match, listignored, listclean,
-                     listunknown):
+    def _buildstatus(
+        self, other, s, match, listignored, listclean, listunknown
+    ):
         """build a status with respect to another context"""
         # Load earliest manifest first for caching reasons. More specifically,
         # if you have revisions 1000 and 1001, 1001 is probably stored as a
@@ -119,7 +126,7 @@
         deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
         deletedset = set(deleted)
         d = mf1.diff(mf2, match=match, clean=listclean)
-        for fn, value in d.iteritems():
+        for fn, value in pycompat.iteritems(d):
             if fn in deletedset:
                 continue
             if value is None:
@@ -145,15 +152,22 @@
 
         if removed:
             # need to filter files if they are already reported as removed
-            unknown = [fn for fn in unknown if fn not in mf1 and
-                                               (not match or match(fn))]
-            ignored = [fn for fn in ignored if fn not in mf1 and
-                                               (not match or match(fn))]
+            unknown = [
+                fn
+                for fn in unknown
+                if fn not in mf1 and (not match or match(fn))
+            ]
+            ignored = [
+                fn
+                for fn in ignored
+                if fn not in mf1 and (not match or match(fn))
+            ]
             # if they're deleted, don't report them as removed
             removed = [fn for fn in removed if fn not in deletedset]
 
-        return scmutil.status(modified, added, removed, deleted, unknown,
-                              ignored, clean)
+        return scmutil.status(
+            modified, added, removed, deleted, unknown, ignored, clean
+        )
 
     @propertycache
     def substate(self):
@@ -164,18 +178,25 @@
 
     def rev(self):
         return self._rev
+
     def node(self):
         return self._node
+
     def hex(self):
         return hex(self.node())
+
     def manifest(self):
         return self._manifest
+
     def manifestctx(self):
         return self._manifestctx
+
     def repo(self):
         return self._repo
+
     def phasestr(self):
         return phases.phasenames[self.phase()]
+
     def mutable(self):
         return self.phase() > phases.public
 
@@ -184,29 +205,29 @@
 
     def obsolete(self):
         """True if the changeset is obsolete"""
-        return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
+        return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
 
     def extinct(self):
         """True if the changeset is extinct"""
-        return self.rev() in obsmod.getrevs(self._repo, 'extinct')
+        return self.rev() in obsmod.getrevs(self._repo, b'extinct')
 
     def orphan(self):
         """True if the changeset is not obsolete, but its ancestor is"""
-        return self.rev() in obsmod.getrevs(self._repo, 'orphan')
+        return self.rev() in obsmod.getrevs(self._repo, b'orphan')
 
     def phasedivergent(self):
         """True if the changeset tries to be a successor of a public changeset
 
         Only non-public and non-obsolete changesets may be phase-divergent.
         """
-        return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
+        return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
 
     def contentdivergent(self):
         """Is a successor of a changeset with multiple possible successor sets
 
         Only non-public and non-obsolete changesets may be content-divergent.
         """
-        return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
+        return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
 
     def isunstable(self):
         """True if the changeset is either orphan, phase-divergent or
@@ -223,11 +244,11 @@
         """
         instabilities = []
         if self.orphan():
-            instabilities.append('orphan')
+            instabilities.append(b'orphan')
         if self.phasedivergent():
-            instabilities.append('phase-divergent')
+            instabilities.append(b'phase-divergent')
         if self.contentdivergent():
-            instabilities.append('content-divergent')
+            instabilities.append(b'content-divergent')
         return instabilities
 
     def parents(self):
@@ -248,18 +269,22 @@
             try:
                 return self._manifest[path], self._manifest.flags(path)
             except KeyError:
-                raise error.ManifestLookupError(self._node, path,
-                                                _('not found in manifest'))
+                raise error.ManifestLookupError(
+                    self._node, path, _(b'not found in manifest')
+                )
         if r'_manifestdelta' in self.__dict__ or path in self.files():
             if path in self._manifestdelta:
-                return (self._manifestdelta[path],
-                        self._manifestdelta.flags(path))
+                return (
+                    self._manifestdelta[path],
+                    self._manifestdelta.flags(path),
+                )
         mfl = self._repo.manifestlog
         try:
             node, flag = mfl[self._changeset.manifest].find(path)
         except KeyError:
-            raise error.ManifestLookupError(self._node, path,
-                                            _('not found in manifest'))
+            raise error.ManifestLookupError(
+                self._node, path, _(b'not found in manifest')
+            )
 
         return node, flag
 
@@ -270,29 +295,15 @@
         try:
             return self._fileinfo(path)[1]
         except error.LookupError:
-            return ''
+            return b''
 
     @propertycache
     def _copies(self):
-        p1copies = {}
-        p2copies = {}
-        p1 = self.p1()
-        p2 = self.p2()
-        narrowmatch = self._repo.narrowmatch()
-        for dst in self.files():
-            if not narrowmatch(dst) or dst not in self:
-                continue
-            copied = self[dst].renamed()
-            if not copied:
-                continue
-            src, srcnode = copied
-            if src in p1 and p1[src].filenode() == srcnode:
-                p1copies[dst] = src
-            elif src in p2 and p2[src].filenode() == srcnode:
-                p2copies[dst] = src
-        return p1copies, p2copies
+        return copies.computechangesetcopies(self)
+
     def p1copies(self):
         return self._copies[0]
+
     def p2copies(self):
         return self._copies[1]
 
@@ -309,26 +320,59 @@
         '''
         return subrepo.subrepo(self, path, allowwdir=True)
 
-    def match(self, pats=None, include=None, exclude=None, default='glob',
-              listsubrepos=False, badfn=None):
+    def match(
+        self,
+        pats=None,
+        include=None,
+        exclude=None,
+        default=b'glob',
+        listsubrepos=False,
+        badfn=None,
+    ):
         r = self._repo
-        return matchmod.match(r.root, r.getcwd(), pats,
-                              include, exclude, default,
-                              auditor=r.nofsauditor, ctx=self,
-                              listsubrepos=listsubrepos, badfn=badfn)
-
-    def diff(self, ctx2=None, match=None, changes=None, opts=None,
-             losedatafn=None, pathfn=None, copy=None,
-             copysourcematch=None, hunksfilterfn=None):
+        return matchmod.match(
+            r.root,
+            r.getcwd(),
+            pats,
+            include,
+            exclude,
+            default,
+            auditor=r.nofsauditor,
+            ctx=self,
+            listsubrepos=listsubrepos,
+            badfn=badfn,
+        )
+
+    def diff(
+        self,
+        ctx2=None,
+        match=None,
+        changes=None,
+        opts=None,
+        losedatafn=None,
+        pathfn=None,
+        copy=None,
+        copysourcematch=None,
+        hunksfilterfn=None,
+    ):
         """Returns a diff generator for the given contexts and matcher"""
         if ctx2 is None:
             ctx2 = self.p1()
         if ctx2 is not None:
             ctx2 = self._repo[ctx2]
-        return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
-                          opts=opts, losedatafn=losedatafn, pathfn=pathfn,
-                          copy=copy, copysourcematch=copysourcematch,
-                          hunksfilterfn=hunksfilterfn)
+        return patch.diff(
+            self._repo,
+            ctx2,
+            self,
+            match=match,
+            changes=changes,
+            opts=opts,
+            losedatafn=losedatafn,
+            pathfn=pathfn,
+            copy=copy,
+            copysourcematch=copysourcematch,
+            hunksfilterfn=hunksfilterfn,
+        )
 
     def dirs(self):
         return self._manifest.dirs()
@@ -336,8 +380,15 @@
     def hasdir(self, dir):
         return self._manifest.hasdir(dir)
 
-    def status(self, other=None, match=None, listignored=False,
-               listclean=False, listunknown=False, listsubrepos=False):
+    def status(
+        self,
+        other=None,
+        match=None,
+        listignored=False,
+        listclean=False,
+        listunknown=False,
+        listsubrepos=False,
+    ):
         """return status of files between two nodes or node and working
         directory.
 
@@ -362,22 +413,23 @@
         # then we'd be done. But the special case of the above call means we
         # just copy the manifest of the parent.
         reversed = False
-        if (not isinstance(ctx1, changectx)
-            and isinstance(ctx2, changectx)):
+        if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
             reversed = True
             ctx1, ctx2 = ctx2, ctx1
 
         match = self._repo.narrowmatch(match)
         match = ctx2._matchstatus(ctx1, match)
         r = scmutil.status([], [], [], [], [], [], [])
-        r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
-                              listunknown)
+        r = ctx2._buildstatus(
+            ctx1, r, match, listignored, listclean, listunknown
+        )
 
         if reversed:
             # Reverse added and removed. Clear deleted, unknown and ignored as
             # these make no sense to reverse.
-            r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
-                               r.clean)
+            r = scmutil.status(
+                r.modified, r.removed, r.added, [], [], [], r.clean
+            )
 
         if listsubrepos:
             for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
@@ -389,21 +441,28 @@
                     # won't contain that subpath. The best we can do ignore it.
                     rev2 = None
                 submatch = matchmod.subdirmatcher(subpath, match)
-                s = sub.status(rev2, match=submatch, ignored=listignored,
-                               clean=listclean, unknown=listunknown,
-                               listsubrepos=True)
+                s = sub.status(
+                    rev2,
+                    match=submatch,
+                    ignored=listignored,
+                    clean=listclean,
+                    unknown=listunknown,
+                    listsubrepos=True,
+                )
                 for rfiles, sfiles in zip(r, s):
-                    rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
+                    rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
 
         for l in r:
             l.sort()
 
         return r
 
+
 class changectx(basectx):
     """A changecontext object makes access to data related to a particular
     changeset convenient. It represents a read-only context already present in
     the repo."""
+
     def __init__(self, repo, rev, node):
         super(changectx, self).__init__(repo)
         self._rev = rev
@@ -454,82 +513,124 @@
             c.description,
             c.extra,
         )
+
     def manifestnode(self):
         return self._changeset.manifest
 
     def user(self):
         return self._changeset.user
+
     def date(self):
         return self._changeset.date
+
     def files(self):
         return self._changeset.files
+
     def filesmodified(self):
         modified = set(self.files())
         modified.difference_update(self.filesadded())
         modified.difference_update(self.filesremoved())
         return sorted(modified)
+
     def filesadded(self):
-        source = self._repo.ui.config('experimental', 'copies.read-from')
-        if (source == 'changeset-only' or
-            (source == 'compatibility' and
-             self._changeset.filesadded is not None)):
-            return self._changeset.filesadded or []
-
-        added = []
-        for f in self.files():
-            if not any(f in p for p in self.parents()):
-                added.append(f)
-        return added
+        filesadded = self._changeset.filesadded
+        compute_on_none = True
+        if self._repo.filecopiesmode == b'changeset-sidedata':
+            compute_on_none = False
+        else:
+            source = self._repo.ui.config(b'experimental', b'copies.read-from')
+            if source == b'changeset-only':
+                compute_on_none = False
+            elif source != b'compatibility':
+                # filelog mode, ignore any changelog content
+                filesadded = None
+        if filesadded is None:
+            if compute_on_none:
+                filesadded = copies.computechangesetfilesadded(self)
+            else:
+                filesadded = []
+        return filesadded
+
     def filesremoved(self):
-        source = self._repo.ui.config('experimental', 'copies.read-from')
-        if (source == 'changeset-only' or
-            (source == 'compatibility' and
-             self._changeset.filesremoved is not None)):
-            return self._changeset.filesremoved or []
-
-        removed = []
-        for f in self.files():
-            if f not in self:
-                removed.append(f)
-        return removed
+        filesremoved = self._changeset.filesremoved
+        compute_on_none = True
+        if self._repo.filecopiesmode == b'changeset-sidedata':
+            compute_on_none = False
+        else:
+            source = self._repo.ui.config(b'experimental', b'copies.read-from')
+            if source == b'changeset-only':
+                compute_on_none = False
+            elif source != b'compatibility':
+                # filelog mode, ignore any changelog content
+                filesremoved = None
+        if filesremoved is None:
+            if compute_on_none:
+                filesremoved = copies.computechangesetfilesremoved(self)
+            else:
+                filesremoved = []
+        return filesremoved
 
     @propertycache
     def _copies(self):
-        source = self._repo.ui.config('experimental', 'copies.read-from')
         p1copies = self._changeset.p1copies
         p2copies = self._changeset.p2copies
-        # If config says to get copy metadata only from changeset, then return
-        # that, defaulting to {} if there was no copy metadata.
-        # In compatibility mode, we return copy data from the changeset if
-        # it was recorded there, and otherwise we fall back to getting it from
-        # the filelogs (below).
-        if (source == 'changeset-only' or
-            (source == 'compatibility' and p1copies is not None)):
-            return p1copies or {}, p2copies or {}
-
-        # Otherwise (config said to read only from filelog, or we are in
-        # compatiblity mode and there is not data in the changeset), we get
-        # the copy metadata from the filelogs.
-        return super(changectx, self)._copies
+        compute_on_none = True
+        if self._repo.filecopiesmode == b'changeset-sidedata':
+            compute_on_none = False
+        else:
+            source = self._repo.ui.config(b'experimental', b'copies.read-from')
+            # If config says to get copy metadata only from changeset, then
+            # return that, defaulting to {} if there was no copy metadata.  In
+            # compatibility mode, we return copy data from the changeset if it
+            # was recorded there, and otherwise we fall back to getting it from
+            # the filelogs (below).
+            #
+            # If we are in compatiblity mode and there is not data in the
+            # changeset), we get the copy metadata from the filelogs.
+            #
+            # otherwise, when config said to read only from filelog, we get the
+            # copy metadata from the filelogs.
+            if source == b'changeset-only':
+                compute_on_none = False
+            elif source != b'compatibility':
+                # filelog mode, ignore any changelog content
+                p1copies = p2copies = None
+        if p1copies is None:
+            if compute_on_none:
+                p1copies, p2copies = super(changectx, self)._copies
+            else:
+                if p1copies is None:
+                    p1copies = {}
+        if p2copies is None:
+            p2copies = {}
+        return p1copies, p2copies
+
     def description(self):
         return self._changeset.description
+
     def branch(self):
-        return encoding.tolocal(self._changeset.extra.get("branch"))
+        return encoding.tolocal(self._changeset.extra.get(b"branch"))
+
     def closesbranch(self):
-        return 'close' in self._changeset.extra
+        return b'close' in self._changeset.extra
+
     def extra(self):
         """Return a dict of extra information."""
         return self._changeset.extra
+
     def tags(self):
         """Return a list of byte tag names"""
         return self._repo.nodetags(self._node)
+
     def bookmarks(self):
         """Return a list of byte bookmark names."""
         return self._repo.nodebookmarks(self._node)
+
     def phase(self):
         return self._repo._phasecache.phase(self._repo, self._rev)
+
     def hidden(self):
-        return self._rev in repoview.filterrevs(self._repo, 'visible')
+        return self._rev in repoview.filterrevs(self._repo, b'visible')
 
     def isinmemory(self):
         return False
@@ -559,8 +660,9 @@
         """get a file context from this changeset"""
         if fileid is None:
             fileid = self.filenode(path)
-        return filectx(self._repo, path, fileid=fileid,
-                       changectx=self, filelog=filelog)
+        return filectx(
+            self._repo, path, fileid=fileid, changectx=self, filelog=filelog
+        )
 
     def ancestor(self, c2, warn=False):
         """return the "best" ancestor context of self and c2
@@ -579,7 +681,7 @@
             anc = cahs[0]
         else:
             # experimental config: merge.preferancestor
-            for r in self._repo.ui.configlist('merge', 'preferancestor'):
+            for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
                 try:
                     ctx = scmutil.revsymbol(self._repo, r)
                 except error.RepoLookupError:
@@ -591,11 +693,20 @@
                 anc = self._repo.changelog.ancestor(self._node, n2)
             if warn:
                 self._repo.ui.status(
-                    (_("note: using %s as ancestor of %s and %s\n") %
-                     (short(anc), short(self._node), short(n2))) +
-                    ''.join(_("      alternatively, use --config "
-                              "merge.preferancestor=%s\n") %
-                            short(n) for n in sorted(cahs) if n != anc))
+                    (
+                        _(b"note: using %s as ancestor of %s and %s\n")
+                        % (short(anc), short(self._node), short(n2))
+                    )
+                    + b''.join(
+                        _(
+                            b"      alternatively, use --config "
+                            b"merge.preferancestor=%s\n"
+                        )
+                        % short(n)
+                        for n in sorted(cahs)
+                        if n != anc
+                    )
+                )
         return self._repo[anc]
 
     def isancestorof(self, other):
@@ -609,10 +720,9 @@
         def bad(fn, msg):
             # The manifest doesn't know about subrepos, so don't complain about
             # paths into valid subrepos.
-            if any(fn == s or fn.startswith(s + '/')
-                   for s in self.substate):
+            if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
                 return
-            match.bad(fn, _('no such file in rev %s') % self)
+            match.bad(fn, _(b'no such file in rev %s') % self)
 
         m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
         return self._manifest.walk(m)
@@ -620,6 +730,7 @@
     def matches(self, match):
         return self.walk(match)
 
+
 class basefilectx(object):
     """A filecontext object represents the common logic for its children:
     filectx: read-only access to a filerevision that is already present
@@ -628,6 +739,7 @@
                     directory,
     memfilectx: a filecontext that represents files in-memory,
     """
+
     @propertycache
     def _filelog(self):
         return self._repo.file(self._path)
@@ -670,9 +782,9 @@
 
     def __bytes__(self):
         try:
-            return "%s@%s" % (self.path(), self._changectx)
+            return b"%s@%s" % (self.path(), self._changectx)
         except error.LookupError:
-            return "%s@???" % self.path()
+            return b"%s@???" % self.path()
 
     __str__ = encoding.strmethod(__bytes__)
 
@@ -687,8 +799,11 @@
 
     def __eq__(self, other):
         try:
-            return (type(self) == type(other) and self._path == other._path
-                    and self._filenode == other._filenode)
+            return (
+                type(self) == type(other)
+                and self._path == other._path
+                and self._filenode == other._filenode
+            )
         except AttributeError:
             return False
 
@@ -697,53 +812,77 @@
 
     def filerev(self):
         return self._filerev
+
     def filenode(self):
         return self._filenode
+
     @propertycache
     def _flags(self):
         return self._changectx.flags(self._path)
+
     def flags(self):
         return self._flags
+
     def filelog(self):
         return self._filelog
+
     def rev(self):
         return self._changeid
+
     def linkrev(self):
         return self._filelog.linkrev(self._filerev)
+
     def node(self):
         return self._changectx.node()
+
     def hex(self):
         return self._changectx.hex()
+
     def user(self):
         return self._changectx.user()
+
     def date(self):
         return self._changectx.date()
+
     def files(self):
         return self._changectx.files()
+
     def description(self):
         return self._changectx.description()
+
     def branch(self):
         return self._changectx.branch()
+
     def extra(self):
         return self._changectx.extra()
+
     def phase(self):
         return self._changectx.phase()
+
     def phasestr(self):
         return self._changectx.phasestr()
+
     def obsolete(self):
         return self._changectx.obsolete()
+
     def instabilities(self):
         return self._changectx.instabilities()
+
     def manifest(self):
         return self._changectx.manifest()
+
     def changectx(self):
         return self._changectx
+
     def renamed(self):
         return self._copied
+
     def copysource(self):
         return self._copied and self._copied[0]
+
     def repo(self):
         return self._repo
+
     def size(self):
         return len(self.data())
 
@@ -755,10 +894,12 @@
             return stringutil.binary(self.data())
         except IOError:
             return False
+
     def isexec(self):
-        return 'x' in self.flags()
+        return b'x' in self.flags()
+
     def islink(self):
-        return 'l' in self.flags()
+        return b'l' in self.flags()
 
     def isabsent(self):
         """whether this filectx represents a file not in self._changectx
@@ -768,6 +909,7 @@
         return False
 
     _customcmp = False
+
     def cmp(self, fctx):
         """compare with other file context
 
@@ -778,7 +920,8 @@
 
         if self._filenode is None:
             raise error.ProgrammingError(
-                'filectx.cmp() must be reimplemented if not backed by revlog')
+                b'filectx.cmp() must be reimplemented if not backed by revlog'
+            )
 
         if fctx._filenode is None:
             if self._repo._encodefilterpats:
@@ -823,12 +966,11 @@
         if srcrev is None:
             # wctx case, used by workingfilectx during mergecopy
             revs = [p.rev() for p in self._repo[None].parents()]
-            inclusive = True # we skipped the real (revless) source
+            inclusive = True  # we skipped the real (revless) source
         else:
             revs = [srcrev]
         if memberanc is None:
-            memberanc = iteranc = cl.ancestors(revs, lkr,
-                                               inclusive=inclusive)
+            memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
         # check if this linkrev is an ancestor of srcrev
         if lkr not in memberanc:
             if iteranc is None:
@@ -838,8 +980,8 @@
             for a in iteranc:
                 if stoprev is not None and a < stoprev:
                     return None
-                ac = cl.read(a) # get changeset data (we avoid object creation)
-                if path in ac[3]: # checking the 'files' field.
+                ac = cl.read(a)  # get changeset data (we avoid object creation)
+                if path in ac[3]:  # checking the 'files' field.
                     # The file has been touched, check if the content is
                     # similar to the one we search for.
                     if fnode == mfl[ac[0]].readfast().get(path):
@@ -990,14 +1132,16 @@
             if base.rev() is None:
                 # wctx is not inclusive, but works because _ancestrycontext
                 # is used to test filelog revisions
-                ac = cl.ancestors([p.rev() for p in base.parents()],
-                                  inclusive=True)
+                ac = cl.ancestors(
+                    [p.rev() for p in base.parents()], inclusive=True
+                )
             else:
                 ac = cl.ancestors([base.rev()], inclusive=True)
             base._ancestrycontext = ac
 
-        return dagop.annotate(base, parents, skiprevs=skiprevs,
-                              diffopts=diffopts)
+        return dagop.annotate(
+            base, parents, skiprevs=skiprevs, diffopts=diffopts
+        )
 
     def ancestors(self, followfirst=False):
         visit = {}
@@ -1022,21 +1166,32 @@
         """
         return self._repo.wwritedata(self.path(), self.data())
 
+
 class filectx(basefilectx):
     """A filecontext object makes access to data related to a particular
        filerevision convenient."""
-    def __init__(self, repo, path, changeid=None, fileid=None,
-                 filelog=None, changectx=None):
+
+    def __init__(
+        self,
+        repo,
+        path,
+        changeid=None,
+        fileid=None,
+        filelog=None,
+        changectx=None,
+    ):
         """changeid must be a revision number, if specified.
            fileid can be a file revision or node."""
         self._repo = repo
         self._path = path
 
-        assert (changeid is not None
-                or fileid is not None
-                or changectx is not None), (
-                    "bad args: changeid=%r, fileid=%r, changectx=%r"
-                    % (changeid, fileid, changectx))
+        assert (
+            changeid is not None or fileid is not None or changectx is not None
+        ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
+            changeid,
+            fileid,
+            changectx,
+        )
 
         if filelog is not None:
             self._filelog = filelog
@@ -1074,11 +1229,16 @@
     def filectx(self, fileid, changeid=None):
         '''opens an arbitrary revision of the file without
         opening a new filelog'''
-        return filectx(self._repo, self._path, fileid=fileid,
-                       filelog=self._filelog, changeid=changeid)
+        return filectx(
+            self._repo,
+            self._path,
+            fileid=fileid,
+            filelog=self._filelog,
+            changeid=changeid,
+        )
 
     def rawdata(self):
-        return self._filelog.revision(self._filenode, raw=True)
+        return self._filelog.rawdata(self._filenode)
 
     def rawflags(self):
         """low-level revlog flags"""
@@ -1088,10 +1248,12 @@
         try:
             return self._filelog.read(self._filenode)
         except error.CensoredNodeError:
-            if self._repo.ui.config("censor", "policy") == "ignore":
-                return ""
-            raise error.Abort(_("censored node: %s") % short(self._filenode),
-                             hint=_("set censor.policy to ignore errors"))
+            if self._repo.ui.config(b"censor", b"policy") == b"ignore":
+                return b""
+            raise error.Abort(
+                _(b"censored node: %s") % short(self._filenode),
+                hint=_(b"set censor.policy to ignore errors"),
+            )
 
     def size(self):
         return self._filelog.size(self._filerev)
@@ -1125,14 +1287,26 @@
     def children(self):
         # hard for renames
         c = self._filelog.children(self._filenode)
-        return [filectx(self._repo, self._path, fileid=x,
-                        filelog=self._filelog) for x in c]
+        return [
+            filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
+            for x in c
+        ]
+
 
 class committablectx(basectx):
     """A committablectx object provides common functionality for a context that
     wants the ability to commit, e.g. workingctx or memctx."""
-    def __init__(self, repo, text="", user=None, date=None, extra=None,
-                 changes=None, branch=None):
+
+    def __init__(
+        self,
+        repo,
+        text=b"",
+        user=None,
+        date=None,
+        extra=None,
+        changes=None,
+        branch=None,
+    ):
         super(committablectx, self).__init__(repo)
         self._rev = None
         self._node = None
@@ -1148,12 +1322,12 @@
         if extra:
             self._extra = extra.copy()
         if branch is not None:
-            self._extra['branch'] = encoding.fromlocal(branch)
-        if not self._extra.get('branch'):
-            self._extra['branch'] = 'default'
+            self._extra[b'branch'] = encoding.fromlocal(branch)
+        if not self._extra.get(b'branch'):
+            self._extra[b'branch'] = b'default'
 
     def __bytes__(self):
-        return bytes(self._parents[0]) + "+"
+        return bytes(self._parents[0]) + b"+"
 
     __str__ = encoding.strmethod(__bytes__)
 
@@ -1173,7 +1347,7 @@
     @propertycache
     def _date(self):
         ui = self._repo.ui
-        date = ui.configdate('devel', 'default-date')
+        date = ui.configdate(b'devel', b'default-date')
         if date is None:
             date = dateutil.makedate()
         return date
@@ -1183,31 +1357,43 @@
 
     def manifestnode(self):
         return None
+
     def user(self):
         return self._user or self._repo.ui.username()
+
     def date(self):
         return self._date
+
     def description(self):
         return self._text
+
     def files(self):
-        return sorted(self._status.modified + self._status.added +
-                      self._status.removed)
+        return sorted(
+            self._status.modified + self._status.added + self._status.removed
+        )
+
     def modified(self):
         return self._status.modified
+
     def added(self):
         return self._status.added
+
     def removed(self):
         return self._status.removed
+
     def deleted(self):
         return self._status.deleted
+
     filesmodified = modified
     filesadded = added
     filesremoved = removed
 
     def branch(self):
-        return encoding.tolocal(self._extra['branch'])
+        return encoding.tolocal(self._extra[b'branch'])
+
     def closesbranch(self):
-        return 'close' in self._extra
+        return b'close' in self._extra
+
     def extra(self):
         return self._extra
 
@@ -1224,7 +1410,7 @@
         return b
 
     def phase(self):
-        phase = phases.draft # default phase to draft
+        phase = phases.draft  # default phase to draft
         for p in self.parents():
             phase = max(phase, p.phase())
         return phase
@@ -1237,13 +1423,14 @@
 
     def ancestor(self, c2):
         """return the "best" ancestor context of self and c2"""
-        return self._parents[0].ancestor(c2) # punt on two parents for now
+        return self._parents[0].ancestor(c2)  # punt on two parents for now
 
     def ancestors(self):
         for p in self._parents:
             yield p
         for a in self._repo.changelog.ancestors(
-            [p.rev() for p in self._parents]):
+            [p.rev() for p in self._parents]
+        ):
             yield self._repo[a]
 
     def markcommitted(self, node):
@@ -1259,6 +1446,7 @@
     def dirty(self, missing=False, merge=True, branch=True):
         return False
 
+
 class workingctx(committablectx):
     """A workingctx object makes access to data related to
     the current working directory convenient.
@@ -1268,25 +1456,28 @@
     changes - a list of file lists as returned by localrepo.status()
                or None to use the repository status.
     """
-    def __init__(self, repo, text="", user=None, date=None, extra=None,
-                 changes=None):
+
+    def __init__(
+        self, repo, text=b"", user=None, date=None, extra=None, changes=None
+    ):
         branch = None
-        if not extra or 'branch' not in extra:
+        if not extra or b'branch' not in extra:
             try:
                 branch = repo.dirstate.branch()
             except UnicodeDecodeError:
-                raise error.Abort(_('branch name not in UTF-8!'))
-        super(workingctx, self).__init__(repo, text, user, date, extra, changes,
-                                         branch=branch)
+                raise error.Abort(_(b'branch name not in UTF-8!'))
+        super(workingctx, self).__init__(
+            repo, text, user, date, extra, changes, branch=branch
+        )
 
     def __iter__(self):
         d = self._repo.dirstate
         for f in d:
-            if d[f] != 'r':
+            if d[f] != b'r':
                 yield f
 
     def __contains__(self, key):
-        return self._repo.dirstate[key] not in "?r"
+        return self._repo.dirstate[key] not in b"?r"
 
     def hex(self):
         return wdirhex
@@ -1314,9 +1505,11 @@
         if len(parents) < 2:
             # when we have one parent, it's easy: copy from parent
             man = parents[0].manifest()
+
             def func(f):
                 f = copiesget(f, f)
                 return man.flags(f)
+
         else:
             # merges are tricky: we try to reconstruct the unstored
             # result from the merge (issue1802)
@@ -1325,7 +1518,7 @@
             m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
 
             def func(f):
-                f = copiesget(f, f) # may be wrong for merges with copies
+                f = copiesget(f, f)  # may be wrong for merges with copies
                 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
                 if fl1 == fl2:
                     return fl1
@@ -1333,7 +1526,7 @@
                     return fl2
                 if fl2 == fla:
                     return fl1
-                return '' # punt for conflicts
+                return b''  # punt for conflicts
 
         return func
 
@@ -1346,31 +1539,36 @@
             try:
                 return self._manifest.flags(path)
             except KeyError:
-                return ''
+                return b''
 
         try:
             return self._flagfunc(path)
         except OSError:
-            return ''
+            return b''
 
     def filectx(self, path, filelog=None):
         """get a file context from the working directory"""
-        return workingfilectx(self._repo, path, workingctx=self,
-                              filelog=filelog)
+        return workingfilectx(
+            self._repo, path, workingctx=self, filelog=filelog
+        )
 
     def dirty(self, missing=False, merge=True, branch=True):
-        "check whether a working directory is modified"
+        b"check whether a working directory is modified"
         # check subrepos first
         for s in sorted(self.substate):
             if self.sub(s).dirty(missing=missing):
                 return True
         # check current working dir
-        return ((merge and self.p2()) or
-                (branch and self.branch() != self.p1().branch()) or
-                self.modified() or self.added() or self.removed() or
-                (missing and self.deleted()))
-
-    def add(self, list, prefix=""):
+        return (
+            (merge and self.p2())
+            or (branch and self.branch() != self.p1().branch())
+            or self.modified()
+            or self.added()
+            or self.removed()
+            or (missing and self.deleted())
+        )
+
+    def add(self, list, prefix=b""):
         with self._repo.wlock():
             ui, ds = self._repo.ui, self._repo.dirstate
             uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
@@ -1384,38 +1582,47 @@
                 try:
                     st = lstat(f)
                 except OSError:
-                    ui.warn(_("%s does not exist!\n") % uipath(f))
+                    ui.warn(_(b"%s does not exist!\n") % uipath(f))
                     rejected.append(f)
                     continue
-                limit = ui.configbytes('ui', 'large-file-limit')
+                limit = ui.configbytes(b'ui', b'large-file-limit')
                 if limit != 0 and st.st_size > limit:
-                    ui.warn(_("%s: up to %d MB of RAM may be required "
-                              "to manage this file\n"
-                              "(use 'hg revert %s' to cancel the "
-                              "pending addition)\n")
-                            % (f, 3 * st.st_size // 1000000, uipath(f)))
+                    ui.warn(
+                        _(
+                            b"%s: up to %d MB of RAM may be required "
+                            b"to manage this file\n"
+                            b"(use 'hg revert %s' to cancel the "
+                            b"pending addition)\n"
+                        )
+                        % (f, 3 * st.st_size // 1000000, uipath(f))
+                    )
                 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
-                    ui.warn(_("%s not added: only files and symlinks "
-                              "supported currently\n") % uipath(f))
+                    ui.warn(
+                        _(
+                            b"%s not added: only files and symlinks "
+                            b"supported currently\n"
+                        )
+                        % uipath(f)
+                    )
                     rejected.append(f)
-                elif ds[f] in 'amn':
-                    ui.warn(_("%s already tracked!\n") % uipath(f))
-                elif ds[f] == 'r':
+                elif ds[f] in b'amn':
+                    ui.warn(_(b"%s already tracked!\n") % uipath(f))
+                elif ds[f] == b'r':
                     ds.normallookup(f)
                 else:
                     ds.add(f)
             return rejected
 
-    def forget(self, files, prefix=""):
+    def forget(self, files, prefix=b""):
         with self._repo.wlock():
             ds = self._repo.dirstate
             uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
             rejected = []
             for f in files:
                 if f not in ds:
-                    self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
+                    self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
                     rejected.append(f)
-                elif ds[f] != 'a':
+                elif ds[f] != b'a':
                     ds.remove(f)
                 else:
                     ds.drop(f)
@@ -1427,33 +1634,51 @@
         except OSError as err:
             if err.errno != errno.ENOENT:
                 raise
-            self._repo.ui.warn(_("%s does not exist!\n")
-                               % self._repo.dirstate.pathto(dest))
+            self._repo.ui.warn(
+                _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
+            )
             return
         if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
-            self._repo.ui.warn(_("copy failed: %s is not a file or a "
-                                 "symbolic link\n")
-                               % self._repo.dirstate.pathto(dest))
+            self._repo.ui.warn(
+                _(b"copy failed: %s is not a file or a symbolic link\n")
+                % self._repo.dirstate.pathto(dest)
+            )
         else:
             with self._repo.wlock():
                 ds = self._repo.dirstate
-                if ds[dest] in '?':
+                if ds[dest] in b'?':
                     ds.add(dest)
-                elif ds[dest] in 'r':
+                elif ds[dest] in b'r':
                     ds.normallookup(dest)
                 ds.copy(source, dest)
 
-    def match(self, pats=None, include=None, exclude=None, default='glob',
-              listsubrepos=False, badfn=None):
+    def match(
+        self,
+        pats=None,
+        include=None,
+        exclude=None,
+        default=b'glob',
+        listsubrepos=False,
+        badfn=None,
+    ):
         r = self._repo
 
         # Only a case insensitive filesystem needs magic to translate user input
         # to actual case in the filesystem.
         icasefs = not util.fscasesensitive(r.root)
-        return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
-                              default, auditor=r.auditor, ctx=self,
-                              listsubrepos=listsubrepos, badfn=badfn,
-                              icasefs=icasefs)
+        return matchmod.match(
+            r.root,
+            r.getcwd(),
+            pats,
+            include,
+            exclude,
+            default,
+            auditor=r.auditor,
+            ctx=self,
+            listsubrepos=listsubrepos,
+            badfn=badfn,
+            icasefs=icasefs,
+        )
 
     def _filtersuspectsymlink(self, files):
         if not files or self._repo.dirstate._checklink:
@@ -1465,12 +1690,17 @@
         # symlink
         sane = []
         for f in files:
-            if self.flags(f) == 'l':
+            if self.flags(f) == b'l':
                 d = self[f].data()
-                if (d == '' or len(d) >= 1024 or '\n' in d
-                    or stringutil.binary(d)):
-                    self._repo.ui.debug('ignoring suspect symlink placeholder'
-                                        ' "%s"\n' % f)
+                if (
+                    d == b''
+                    or len(d) >= 1024
+                    or b'\n' in d
+                    or stringutil.binary(d)
+                ):
+                    self._repo.ui.debug(
+                        b'ignoring suspect symlink placeholder "%s"\n' % f
+                    )
                     continue
             sane.append(f)
         return sane
@@ -1489,8 +1719,11 @@
             try:
                 # This will return True for a file that got replaced by a
                 # directory in the interim, but fixing that is pretty hard.
-                if (f not in pctx or self.flags(f) != pctx.flags(f)
-                    or pctx[f].cmp(self[f])):
+                if (
+                    f not in pctx
+                    or self.flags(f) != pctx.flags(f)
+                    or pctx[f].cmp(self[f])
+                ):
                     modified.append(f)
                 else:
                     fixup.append(f)
@@ -1537,8 +1770,9 @@
                         # consistency, because .hg/dirstate was
                         # already changed simultaneously after last
                         # caching (see also issue5584 for detail)
-                        self._repo.ui.debug('skip updating dirstate: '
-                                            'identity mismatch\n')
+                        self._repo.ui.debug(
+                            b'skip updating dirstate: identity mismatch\n'
+                        )
             except error.LockError:
                 pass
             finally:
@@ -1548,10 +1782,11 @@
     def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
         '''Gets the status from the dirstate -- internal use only.'''
         subrepos = []
-        if '.hgsub' in self:
+        if b'.hgsub' in self:
             subrepos = sorted(self.substate)
-        cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
-                                            clean=clean, unknown=unknown)
+        cmp, s = self._repo.dirstate.status(
+            match, subrepos, ignored=ignored, clean=clean, unknown=unknown
+        )
 
         # check for any possibly clean files
         fixup = []
@@ -1569,8 +1804,9 @@
             # cache for performance
             if s.unknown or s.ignored or s.clean:
                 # "_status" is cached with list*=False in the normal route
-                self._status = scmutil.status(s.modified, s.added, s.removed,
-                                              s.deleted, [], [], [])
+                self._status = scmutil.status(
+                    s.modified, s.added, s.removed, s.deleted, [], [], []
+                )
             else:
                 self._status = s
 
@@ -1583,9 +1819,10 @@
         parents = self._repo.dirstate.parents()
         p1manifest = self._repo[parents[0]].manifest()
         p2manifest = self._repo[parents[1]].manifest()
+        changedset = set(self.added()) | set(self.modified())
         narrowmatch = self._repo.narrowmatch()
         for dst, src in self._repo.dirstate.copies().items():
-            if not narrowmatch(dst):
+            if dst not in changedset or not narrowmatch(dst):
                 continue
             if src in p1manifest:
                 p1copies[dst] = src
@@ -1611,8 +1848,10 @@
         man = parents[0].manifest().copy()
 
         ff = self._flagfunc
-        for i, l in ((addednodeid, status.added),
-                     (modifiednodeid, status.modified)):
+        for i, l in (
+            (addednodeid, status.added),
+            (modifiednodeid, status.modified),
+        ):
             for f in l:
                 man[f] = i
                 try:
@@ -1626,8 +1865,9 @@
 
         return man
 
-    def _buildstatus(self, other, s, match, listignored, listclean,
-                     listunknown):
+    def _buildstatus(
+        self, other, s, match, listignored, listclean, listunknown
+    ):
         """build a status with respect to another context
 
         This includes logic for maintaining the fast path of status when
@@ -1640,10 +1880,10 @@
         # might have accidentally ended up with the entire contents of the file
         # they are supposed to be linking to.
         s.modified[:] = self._filtersuspectsymlink(s.modified)
-        if other != self._repo['.']:
-            s = super(workingctx, self)._buildstatus(other, s, match,
-                                                     listignored, listclean,
-                                                     listunknown)
+        if other != self._repo[b'.']:
+            s = super(workingctx, self)._buildstatus(
+                other, s, match, listignored, listclean, listunknown
+            )
         return s
 
     def _matchstatus(self, other, match):
@@ -1656,26 +1896,34 @@
         If we aren't comparing against the working directory's parent, then we
         just use the default match object sent to us.
         """
-        if other != self._repo['.']:
+        if other != self._repo[b'.']:
+
             def bad(f, msg):
                 # 'f' may be a directory pattern from 'match.files()',
                 # so 'f not in ctx1' is not enough
                 if f not in other and not other.hasdir(f):
-                    self._repo.ui.warn('%s: %s\n' %
-                                       (self._repo.dirstate.pathto(f), msg))
+                    self._repo.ui.warn(
+                        b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
+                    )
+
             match.bad = bad
         return match
 
     def walk(self, match):
         '''Generates matching file names.'''
-        return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
-                                               subrepos=sorted(self.substate),
-                                               unknown=True, ignored=False))
+        return sorted(
+            self._repo.dirstate.walk(
+                self._repo.narrowmatch(match),
+                subrepos=sorted(self.substate),
+                unknown=True,
+                ignored=False,
+            )
+        )
 
     def matches(self, match):
         match = self._repo.narrowmatch(match)
         ds = self._repo.dirstate
-        return sorted(f for f in ds.matches(match) if ds[f] != 'r')
+        return sorted(f for f in ds.matches(match) if ds[f] != b'r')
 
     def markcommitted(self, node):
         with self._repo.dirstate.parentchange():
@@ -1692,9 +1940,11 @@
 
         sparse.aftercommit(self._repo, node)
 
+
 class committablefilectx(basefilectx):
     """A committablefilectx provides common functionality for a file context
     that wants the ability to commit, e.g. workingfilectx or memfilectx."""
+
     def __init__(self, repo, path, filelog=None, ctx=None):
         self._repo = repo
         self._path = path
@@ -1723,6 +1973,7 @@
 
     def parents(self):
         '''return parent filectxs, following copies if necessary'''
+
         def filenode(ctx, path):
             return ctx._manifest.get(path, nullid)
 
@@ -1739,15 +1990,20 @@
         for pc in pcl[1:]:
             pl.append((path, filenode(pc, path), fl))
 
-        return [self._parentfilectx(p, fileid=n, filelog=l)
-                for p, n, l in pl if n != nullid]
+        return [
+            self._parentfilectx(p, fileid=n, filelog=l)
+            for p, n, l in pl
+            if n != nullid
+        ]
 
     def children(self):
         return []
 
+
 class workingfilectx(committablefilectx):
     """A workingfilectx object makes access to data related to a particular
        file in the working directory convenient."""
+
     def __init__(self, repo, path, filelog=None, workingctx=None):
         super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
 
@@ -1757,13 +2013,16 @@
 
     def data(self):
         return self._repo.wread(self._path)
+
     def copysource(self):
         return self._repo.dirstate.copied(self._path)
 
     def size(self):
         return self._repo.wvfs.lstat(self._path).st_size
+
     def lstat(self):
         return self._repo.wvfs.lstat(self._path)
+
     def date(self):
         t, tz = self._changectx.date()
         try:
@@ -1793,15 +2052,16 @@
 
     def remove(self, ignoremissing=False):
         """wraps unlink for a repo's working directory"""
-        rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
-        self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
-                                   rmdir=rmdir)
+        rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
+        self._repo.wvfs.unlinkpath(
+            self._path, ignoremissing=ignoremissing, rmdir=rmdir
+        )
 
     def write(self, data, flags, backgroundclose=False, **kwargs):
         """wraps repo.wwrite"""
-        return self._repo.wwrite(self._path, data, flags,
-                                 backgroundclose=backgroundclose,
-                                 **kwargs)
+        return self._repo.wwrite(
+            self._path, data, flags, backgroundclose=backgroundclose, **kwargs
+        )
 
     def markcopied(self, src):
         """marks this file a copy of `src`"""
@@ -1814,7 +2074,9 @@
         wvfs = self._repo.wvfs
         f = self._path
         wvfs.audit(f)
-        if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
+        if self._repo.ui.configbool(
+            b'experimental', b'merge.checkpathconflicts'
+        ):
             # remove files under the directory as they should already be
             # warned and backed up
             if wvfs.isdir(f) and not wvfs.islink(f):
@@ -1831,6 +2093,7 @@
     def setflags(self, l, x):
         self._repo.wvfs.setflags(self._path, l, x)
 
+
 class overlayworkingctx(committablectx):
     """Wraps another mutable context with a write-back cache that can be
     converted into a commit context.
@@ -1856,19 +2119,20 @@
         # Drop old manifest cache as it is now out of date.
         # This is necessary when, e.g., rebasing several nodes with one
         # ``overlayworkingctx`` (e.g. with --collapse).
-        util.clearcachedproperty(self, '_manifest')
+        util.clearcachedproperty(self, b'_manifest')
 
     def data(self, path):
         if self.isdirty(path):
-            if self._cache[path]['exists']:
-                if self._cache[path]['data'] is not None:
-                    return self._cache[path]['data']
+            if self._cache[path][b'exists']:
+                if self._cache[path][b'data'] is not None:
+                    return self._cache[path][b'data']
                 else:
                     # Must fallback here, too, because we only set flags.
                     return self._wrappedctx[path].data()
             else:
-                raise error.ProgrammingError("No such file or directory: %s" %
-                                             path)
+                raise error.ProgrammingError(
+                    b"No such file or directory: %s" % path
+                )
         else:
             return self._wrappedctx[path].data()
 
@@ -1891,23 +2155,33 @@
     @propertycache
     def _flagfunc(self):
         def f(path):
-            return self._cache[path]['flags']
+            return self._cache[path][b'flags']
+
         return f
 
     def files(self):
         return sorted(self.added() + self.modified() + self.removed())
 
     def modified(self):
-        return [f for f in self._cache.keys() if self._cache[f]['exists'] and
-                self._existsinparent(f)]
+        return [
+            f
+            for f in self._cache.keys()
+            if self._cache[f][b'exists'] and self._existsinparent(f)
+        ]
 
     def added(self):
-        return [f for f in self._cache.keys() if self._cache[f]['exists'] and
-                not self._existsinparent(f)]
+        return [
+            f
+            for f in self._cache.keys()
+            if self._cache[f][b'exists'] and not self._existsinparent(f)
+        ]
 
     def removed(self):
-        return [f for f in self._cache.keys() if
-                not self._cache[f]['exists'] and self._existsinparent(f)]
+        return [
+            f
+            for f in self._cache.keys()
+            if not self._cache[f][b'exists'] and self._existsinparent(f)
+        ]
 
     def p1copies(self):
         copies = self._repo._wrappedctx.p1copies().copy()
@@ -1915,8 +2189,8 @@
         for f in self._cache.keys():
             if not narrowmatch(f):
                 continue
-            copies.pop(f, None) # delete if it exists
-            source = self._cache[f]['copied']
+            copies.pop(f, None)  # delete if it exists
+            source = self._cache[f][b'copied']
             if source:
                 copies[f] = source
         return copies
@@ -1927,8 +2201,8 @@
         for f in self._cache.keys():
             if not narrowmatch(f):
                 continue
-            copies.pop(f, None) # delete if it exists
-            source = self._cache[f]['copied']
+            copies.pop(f, None)  # delete if it exists
+            source = self._cache[f][b'copied']
             if source:
                 copies[f] = source
         return copies
@@ -1938,33 +2212,39 @@
 
     def filedate(self, path):
         if self.isdirty(path):
-            return self._cache[path]['date']
+            return self._cache[path][b'date']
         else:
             return self._wrappedctx[path].date()
 
     def markcopied(self, path, origin):
-        self._markdirty(path, exists=True, date=self.filedate(path),
-                        flags=self.flags(path), copied=origin)
+        self._markdirty(
+            path,
+            exists=True,
+            date=self.filedate(path),
+            flags=self.flags(path),
+            copied=origin,
+        )
 
     def copydata(self, path):
         if self.isdirty(path):
-            return self._cache[path]['copied']
+            return self._cache[path][b'copied']
         else:
             return None
 
     def flags(self, path):
         if self.isdirty(path):
-            if self._cache[path]['exists']:
-                return self._cache[path]['flags']
+            if self._cache[path][b'exists']:
+                return self._cache[path][b'flags']
             else:
-                raise error.ProgrammingError("No such file or directory: %s" %
-                                             self._path)
+                raise error.ProgrammingError(
+                    b"No such file or directory: %s" % self._path
+                )
         else:
             return self._wrappedctx[path].flags()
 
     def __contains__(self, key):
         if key in self._cache:
-            return self._cache[key]['exists']
+            return self._cache[key][b'exists']
         return key in self.p1()
 
     def _existsinparent(self, path):
@@ -1984,23 +2264,26 @@
         IMM, we'll never check that a path is actually writable -- e.g., because
         it adds `a/foo`, but `a` is actually a file in the other commit.
         """
+
         def fail(path, component):
             # p1() is the base and we're receiving "writes" for p2()'s
             # files.
-            if 'l' in self.p1()[component].flags():
-                raise error.Abort("error: %s conflicts with symlink %s "
-                                  "in %d." % (path, component,
-                                              self.p1().rev()))
+            if b'l' in self.p1()[component].flags():
+                raise error.Abort(
+                    b"error: %s conflicts with symlink %s "
+                    b"in %d." % (path, component, self.p1().rev())
+                )
             else:
-                raise error.Abort("error: '%s' conflicts with file '%s' in "
-                                  "%d." % (path, component,
-                                           self.p1().rev()))
+                raise error.Abort(
+                    b"error: '%s' conflicts with file '%s' in "
+                    b"%d." % (path, component, self.p1().rev())
+                )
 
         # Test that each new directory to be created to write this path from p2
         # is not a file in p1.
-        components = path.split('/')
+        components = path.split(b'/')
         for i in pycompat.xrange(len(components)):
-            component = "/".join(components[0:i])
+            component = b"/".join(components[0:i])
             if component in self:
                 fail(path, component)
 
@@ -2016,27 +2299,28 @@
             mfiles = [m for m in mfiles if m in self]
             if not mfiles:
                 return
-            raise error.Abort("error: file '%s' cannot be written because "
-                              " '%s/' is a directory in %s (containing %d "
-                              "entries: %s)"
-                              % (path, path, self.p1(), len(mfiles),
-                                 ', '.join(mfiles)))
-
-    def write(self, path, data, flags='', **kwargs):
+            raise error.Abort(
+                b"error: file '%s' cannot be written because "
+                b" '%s/' is a directory in %s (containing %d "
+                b"entries: %s)"
+                % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
+            )
+
+    def write(self, path, data, flags=b'', **kwargs):
         if data is None:
-            raise error.ProgrammingError("data must be non-None")
+            raise error.ProgrammingError(b"data must be non-None")
         self._auditconflicts(path)
-        self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
-                        flags=flags)
+        self._markdirty(
+            path, exists=True, data=data, date=dateutil.makedate(), flags=flags
+        )
 
     def setflags(self, path, l, x):
-        flag = ''
+        flag = b''
         if l:
-            flag = 'l'
+            flag = b'l'
         elif x:
-            flag = 'x'
-        self._markdirty(path, exists=True, date=dateutil.makedate(),
-                        flags=flag)
+            flag = b'x'
+        self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
 
     def remove(self, path):
         self._markdirty(path, exists=False)
@@ -2048,32 +2332,43 @@
         if self.isdirty(path):
             # If this path exists and is a symlink, "follow" it by calling
             # exists on the destination path.
-            if (self._cache[path]['exists'] and
-                        'l' in self._cache[path]['flags']):
-                return self.exists(self._cache[path]['data'].strip())
+            if (
+                self._cache[path][b'exists']
+                and b'l' in self._cache[path][b'flags']
+            ):
+                return self.exists(self._cache[path][b'data'].strip())
             else:
-                return self._cache[path]['exists']
+                return self._cache[path][b'exists']
 
         return self._existsinparent(path)
 
     def lexists(self, path):
         """lexists returns True if the path exists"""
         if self.isdirty(path):
-            return self._cache[path]['exists']
+            return self._cache[path][b'exists']
 
         return self._existsinparent(path)
 
     def size(self, path):
         if self.isdirty(path):
-            if self._cache[path]['exists']:
-                return len(self._cache[path]['data'])
+            if self._cache[path][b'exists']:
+                return len(self._cache[path][b'data'])
             else:
-                raise error.ProgrammingError("No such file or directory: %s" %
-                                             self._path)
+                raise error.ProgrammingError(
+                    b"No such file or directory: %s" % self._path
+                )
         return self._wrappedctx[path].size()
 
-    def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
-                 user=None, editor=None):
+    def tomemctx(
+        self,
+        text,
+        branch=None,
+        extra=None,
+        date=None,
+        parents=None,
+        user=None,
+        editor=None,
+    ):
         """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
         committed.
 
@@ -2093,19 +2388,35 @@
             parents = (self._repo[parents[0]], self._repo[parents[1]])
 
         files = self.files()
+
         def getfile(repo, memctx, path):
-            if self._cache[path]['exists']:
-                return memfilectx(repo, memctx, path,
-                                  self._cache[path]['data'],
-                                  'l' in self._cache[path]['flags'],
-                                  'x' in self._cache[path]['flags'],
-                                  self._cache[path]['copied'])
+            if self._cache[path][b'exists']:
+                return memfilectx(
+                    repo,
+                    memctx,
+                    path,
+                    self._cache[path][b'data'],
+                    b'l' in self._cache[path][b'flags'],
+                    b'x' in self._cache[path][b'flags'],
+                    self._cache[path][b'copied'],
+                )
             else:
                 # Returning None, but including the path in `files`, is
                 # necessary for memctx to register a deletion.
                 return None
-        return memctx(self._repo, parents, text, files, getfile, date=date,
-                      extra=extra, user=user, branch=branch, editor=editor)
+
+        return memctx(
+            self._repo,
+            parents,
+            text,
+            files,
+            getfile,
+            date=date,
+            extra=extra,
+            user=user,
+            branch=branch,
+            editor=editor,
+        )
 
     def isdirty(self, path):
         return path in self._cache
@@ -2130,15 +2441,19 @@
         # This won't be perfect, but can help performance significantly when
         # using things like remotefilelog.
         scmutil.prefetchfiles(
-            self.repo(), [self.p1().rev()],
-            scmutil.matchfiles(self.repo(), self._cache.keys()))
+            self.repo(),
+            [self.p1().rev()],
+            scmutil.matchfiles(self.repo(), self._cache.keys()),
+        )
 
         for path in self._cache.keys():
             cache = self._cache[path]
             try:
                 underlying = self._wrappedctx[path]
-                if (underlying.data() == cache['data'] and
-                            underlying.flags() == cache['flags']):
+                if (
+                    underlying.data() == cache[b'data']
+                    and underlying.flags() == cache[b'flags']
+                ):
                     keys.append(path)
             except error.ManifestLookupError:
                 # Path not in the underlying manifest (created).
@@ -2148,36 +2463,38 @@
             del self._cache[path]
         return keys
 
-    def _markdirty(self, path, exists, data=None, date=None, flags='',
-        copied=None):
+    def _markdirty(
+        self, path, exists, data=None, date=None, flags=b'', copied=None
+    ):
         # data not provided, let's see if we already have some; if not, let's
         # grab it from our underlying context, so that we always have data if
         # the file is marked as existing.
         if exists and data is None:
             oldentry = self._cache.get(path) or {}
-            data = oldentry.get('data')
+            data = oldentry.get(b'data')
             if data is None:
                 data = self._wrappedctx[path].data()
 
         self._cache[path] = {
-            'exists': exists,
-            'data': data,
-            'date': date,
-            'flags': flags,
-            'copied': copied,
+            b'exists': exists,
+            b'data': data,
+            b'date': date,
+            b'flags': flags,
+            b'copied': copied,
         }
 
     def filectx(self, path, filelog=None):
-        return overlayworkingfilectx(self._repo, path, parent=self,
-                                     filelog=filelog)
+        return overlayworkingfilectx(
+            self._repo, path, parent=self, filelog=filelog
+        )
+
 
 class overlayworkingfilectx(committablefilectx):
     """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
     cache, which can be flushed through later by calling ``flush()``."""
 
     def __init__(self, repo, path, filelog=None, parent=None):
-        super(overlayworkingfilectx, self).__init__(repo, path, filelog,
-                                                    parent)
+        super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
         self._repo = repo
         self._parent = parent
         self._path = path
@@ -2227,6 +2544,7 @@
     def clearunknown(self):
         pass
 
+
 class workingcommitctx(workingctx):
     """A workingcommitctx object makes access to data related to
     the revision being committed convenient.
@@ -2234,10 +2552,13 @@
     This hides changes in the working directory, if they aren't
     committed in this context.
     """
-    def __init__(self, repo, changes,
-                 text="", user=None, date=None, extra=None):
-        super(workingcommitctx, self).__init__(repo, text, user, date, extra,
-                                               changes)
+
+    def __init__(
+        self, repo, changes, text=b"", user=None, date=None, extra=None
+    ):
+        super(workingcommitctx, self).__init__(
+            repo, text, user, date, extra, changes
+        )
 
     def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
         """Return matched files only in ``self._status``
@@ -2249,10 +2570,15 @@
             clean = [f for f in self._manifest if f not in self._changedset]
         else:
             clean = []
-        return scmutil.status([f for f in self._status.modified if match(f)],
-                              [f for f in self._status.added if match(f)],
-                              [f for f in self._status.removed if match(f)],
-                              [], [], [], clean)
+        return scmutil.status(
+            [f for f in self._status.modified if match(f)],
+            [f for f in self._status.added if match(f)],
+            [f for f in self._status.removed if match(f)],
+            [],
+            [],
+            [],
+            clean,
+        )
 
     @propertycache
     def _changedset(self):
@@ -2263,6 +2589,7 @@
         changed.update(self._status.removed)
         return changed
 
+
 def makecachingfilectxfn(func):
     """Create a filectxfn that caches based on the path.
 
@@ -2279,36 +2606,54 @@
 
     return getfilectx
 
+
 def memfilefromctx(ctx):
     """Given a context return a memfilectx for ctx[path]
 
     This is a convenience method for building a memctx based on another
     context.
     """
+
     def getfilectx(repo, memctx, path):
         fctx = ctx[path]
         copysource = fctx.copysource()
-        return memfilectx(repo, memctx, path, fctx.data(),
-                          islink=fctx.islink(), isexec=fctx.isexec(),
-                          copysource=copysource)
+        return memfilectx(
+            repo,
+            memctx,
+            path,
+            fctx.data(),
+            islink=fctx.islink(),
+            isexec=fctx.isexec(),
+            copysource=copysource,
+        )
 
     return getfilectx
 
+
 def memfilefrompatch(patchstore):
     """Given a patch (e.g. patchstore object) return a memfilectx
 
     This is a convenience method for building a memctx based on a patchstore.
     """
+
     def getfilectx(repo, memctx, path):
         data, mode, copysource = patchstore.getfile(path)
         if data is None:
             return None
         islink, isexec = mode
-        return memfilectx(repo, memctx, path, data, islink=islink,
-                          isexec=isexec, copysource=copysource)
+        return memfilectx(
+            repo,
+            memctx,
+            path,
+            data,
+            islink=islink,
+            isexec=isexec,
+            copysource=copysource,
+        )
 
     return getfilectx
 
+
 class memctx(committablectx):
     """Use memctx to perform in-memory commits via localrepo.commitctx().
 
@@ -2342,10 +2687,22 @@
     # this field to determine what to do in filectxfn.
     _returnnoneformissingfiles = True
 
-    def __init__(self, repo, parents, text, files, filectxfn, user=None,
-                 date=None, extra=None, branch=None, editor=False):
-        super(memctx, self).__init__(repo, text, user, date, extra,
-                                     branch=branch)
+    def __init__(
+        self,
+        repo,
+        parents,
+        text,
+        files,
+        filectxfn,
+        user=None,
+        date=None,
+        extra=None,
+        branch=None,
+        editor=False,
+    ):
+        super(memctx, self).__init__(
+            repo, text, user, date, extra, branch=branch
+        )
         self._rev = None
         self._node = None
         parents = [(p or nullid) for p in parents]
@@ -2424,13 +2781,23 @@
 
         return scmutil.status(modified, added, removed, [], [], [], [])
 
+
 class memfilectx(committablefilectx):
     """memfilectx represents an in-memory file to commit.
 
     See memctx and committablefilectx for more details.
     """
-    def __init__(self, repo, changectx, path, data, islink=False,
-                 isexec=False, copysource=None):
+
+    def __init__(
+        self,
+        repo,
+        changectx,
+        path,
+        data,
+        islink=False,
+        isexec=False,
+        copysource=None,
+    ):
         """
         path is the normalized file path relative to repository root.
         data is the file content as a string.
@@ -2441,11 +2808,11 @@
         super(memfilectx, self).__init__(repo, path, None, changectx)
         self._data = data
         if islink:
-            self._flags = 'l'
+            self._flags = b'l'
         elif isexec:
-            self._flags = 'x'
+            self._flags = b'x'
         else:
-            self._flags = ''
+            self._flags = b''
         self._copysource = copysource
 
     def copysource(self):
@@ -2482,8 +2849,18 @@
     dateutil.parsedate() and defaults to current date, extra is a dictionary of
     metadata or is left empty.
     """
-    def __init__(self, repo, originalctx, parents=None, text=None, user=None,
-                 date=None, extra=None, editor=False):
+
+    def __init__(
+        self,
+        repo,
+        originalctx,
+        parents=None,
+        text=None,
+        user=None,
+        date=None,
+        extra=None,
+        editor=False,
+    ):
         if text is None:
             text = originalctx.description()
         super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
@@ -2504,11 +2881,15 @@
         # manifests of our commit parents
         mp1, mp2 = self.manifestctx().parents
         if p1 != nullid and p1.manifestnode() != mp1:
-            raise RuntimeError(r"can't reuse the manifest: its p1 "
-                               r"doesn't match the new ctx p1")
+            raise RuntimeError(
+                r"can't reuse the manifest: its p1 "
+                r"doesn't match the new ctx p1"
+            )
         if p2 != nullid and p2.manifestnode() != mp2:
-            raise RuntimeError(r"can't reuse the manifest: "
-                               r"its p2 doesn't match the new ctx p2")
+            raise RuntimeError(
+                r"can't reuse the manifest: "
+                r"its p2 doesn't match the new ctx p2"
+            )
 
         self._files = originalctx.files()
         self.substate = {}
@@ -2562,10 +2943,12 @@
 
         return scmutil.status(modified, added, removed, [], [], [], [])
 
+
 class arbitraryfilectx(object):
     """Allows you to use filectx-like functions on a file in an arbitrary
     location on disk, possibly not in the working directory.
     """
+
     def __init__(self, path, repo=None):
         # Repo is optional because contrib/simplemerge uses this class.
         self._repo = repo
@@ -2574,7 +2957,7 @@
     def cmp(self, fctx):
         # filecmp follows symlinks whereas `cmp` should not, so skip the fast
         # path if either side is a symlink.
-        symlinks = ('l' in self.flags() or 'l' in fctx.flags())
+        symlinks = b'l' in self.flags() or b'l' in fctx.flags()
         if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
             # Add a fast-path for merge if both sides are disk-backed.
             # Note that filecmp uses the opposite return values (True if same)
@@ -2586,13 +2969,13 @@
         return self._path
 
     def flags(self):
-        return ''
+        return b''
 
     def data(self):
         return util.readfile(self._path)
 
     def decodeddata(self):
-        with open(self._path, "rb") as f:
+        with open(self._path, b"rb") as f:
             return f.read()
 
     def remove(self):
@@ -2600,5 +2983,5 @@
 
     def write(self, data, flags, **kwargs):
         assert not flags
-        with open(self._path, "wb") as f:
+        with open(self._path, b"wb") as f:
             f.write(data)
--- a/mercurial/copies.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/copies.py	Mon Oct 21 11:09:48 2019 -0400
@@ -8,104 +8,26 @@
 from __future__ import absolute_import
 
 import collections
-import heapq
 import os
 
 from .i18n import _
 
+
+from .revlogutils.flagutil import REVIDX_SIDEDATA
+
 from . import (
+    error,
     match as matchmod,
     node,
     pathutil,
+    pycompat,
     util,
 )
-from .utils import (
-    stringutil,
-)
 
-def _findlimit(repo, ctxa, ctxb):
-    """
-    Find the last revision that needs to be checked to ensure that a full
-    transitive closure for file copies can be properly calculated.
-    Generally, this means finding the earliest revision number that's an
-    ancestor of a or b but not both, except when a or b is a direct descendent
-    of the other, in which case we can return the minimum revnum of a and b.
-    """
-
-    # basic idea:
-    # - mark a and b with different sides
-    # - if a parent's children are all on the same side, the parent is
-    #   on that side, otherwise it is on no side
-    # - walk the graph in topological order with the help of a heap;
-    #   - add unseen parents to side map
-    #   - clear side of any parent that has children on different sides
-    #   - track number of interesting revs that might still be on a side
-    #   - track the lowest interesting rev seen
-    #   - quit when interesting revs is zero
-
-    cl = repo.changelog
-    wdirparents = None
-    a = ctxa.rev()
-    b = ctxb.rev()
-    if a is None:
-        wdirparents = (ctxa.p1(), ctxa.p2())
-        a = node.wdirrev
-    if b is None:
-        assert not wdirparents
-        wdirparents = (ctxb.p1(), ctxb.p2())
-        b = node.wdirrev
-
-    side = {a: -1, b: 1}
-    visit = [-a, -b]
-    heapq.heapify(visit)
-    interesting = len(visit)
-    limit = node.wdirrev
+from .revlogutils import sidedata as sidedatamod
 
-    while interesting:
-        r = -heapq.heappop(visit)
-        if r == node.wdirrev:
-            parents = [pctx.rev() for pctx in wdirparents]
-        else:
-            parents = cl.parentrevs(r)
-        if parents[1] == node.nullrev:
-            parents = parents[:1]
-        for p in parents:
-            if p not in side:
-                # first time we see p; add it to visit
-                side[p] = side[r]
-                if side[p]:
-                    interesting += 1
-                heapq.heappush(visit, -p)
-            elif side[p] and side[p] != side[r]:
-                # p was interesting but now we know better
-                side[p] = 0
-                interesting -= 1
-        if side[r]:
-            limit = r # lowest rev visited
-            interesting -= 1
+from .utils import stringutil
 
-    # Consider the following flow (see test-commit-amend.t under issue4405):
-    # 1/ File 'a0' committed
-    # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
-    # 3/ Move back to first commit
-    # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
-    # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
-    #
-    # During the amend in step five, we will be in this state:
-    #
-    # @  3 temporary amend commit for a1-amend
-    # |
-    # o  2 a1-amend
-    # |
-    # | o  1 a1
-    # |/
-    # o  0 a0
-    #
-    # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
-    # yet the filelog has the copy information in rev 1 and we will not look
-    # back far enough unless we also look at the a and b as candidates.
-    # This only occurs when a is a descendent of b or visa-versa.
-    return min(limit, a, b)
 
 def _filter(src, dst, t):
     """filters out invalid copies after chaining"""
@@ -140,19 +62,22 @@
         elif k not in dst:
             del t[k]
 
+
 def _chain(a, b):
     """chain two sets of copies 'a' and 'b'"""
     t = a.copy()
-    for k, v in b.iteritems():
-        if v in t:
-            t[k] = t[v]
-        else:
-            t[k] = v
+    for k, v in pycompat.iteritems(b):
+        t[k] = t.get(v, v)
     return t
 
-def _tracefile(fctx, am, basemf, limit):
+
+def _tracefile(fctx, am, basemf):
     """return file context that is the ancestor of fctx present in ancestor
-    manifest am, stopping after the first ancestor lower than limit"""
+    manifest am
+
+    Note: we used to try and stop after a given limit, however checking if that
+    limit is reached turned out to be very expensive. we are better off
+    disabling that feature."""
 
     for f in fctx.ancestors():
         path = f.path()
@@ -160,17 +85,17 @@
             return path
         if basemf and basemf.get(path, None) == f.filenode():
             return path
-        if not f.isintroducedafter(limit):
-            return None
+
 
 def _dirstatecopies(repo, match=None):
     ds = repo.dirstate
     c = ds.copies().copy()
     for k in list(c):
-        if ds[k] not in 'anm' or (match and not match(k)):
+        if ds[k] not in b'anm' or (match and not match(k)):
             del c[k]
     return c
 
+
 def _computeforwardmissing(a, b, match=None):
     """Computes which files are in b but not a.
     This is its own function so extensions can easily wrap this call to see what
@@ -180,10 +105,15 @@
     mb = b.manifest()
     return mb.filesnotin(ma, match=match)
 
+
 def usechangesetcentricalgo(repo):
     """Checks if we should use changeset-centric copy algorithms"""
-    return (repo.ui.config('experimental', 'copies.read-from') in
-            ('changeset-only', 'compatibility'))
+    if repo.filecopiesmode == b'changeset-sidedata':
+        return True
+    readfrom = repo.ui.config(b'experimental', b'copies.read-from')
+    changesetsource = (b'changeset-only', b'compatibility')
+    return readfrom in changesetsource
+
 
 def _committedforwardcopies(a, b, base, match):
     """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
@@ -194,14 +124,10 @@
     if usechangesetcentricalgo(repo):
         return _changesetforwardcopies(a, b, match)
 
-    debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
+    debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
     dbg = repo.ui.debug
     if debug:
-        dbg('debug.copies:    looking into rename from %s to %s\n'
-            % (a, b))
-    limit = _findlimit(repo, a, b)
-    if debug:
-        dbg('debug.copies:      search limit: %d\n' % limit)
+        dbg(b'debug.copies:    looking into rename from %s to %s\n' % (a, b))
     am = a.manifest()
     basemf = None if base is None else base.manifest()
 
@@ -225,34 +151,113 @@
     ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
 
     if debug:
-        dbg('debug.copies:      missing files to search: %d\n' % len(missing))
+        dbg(b'debug.copies:      missing files to search: %d\n' % len(missing))
 
     for f in sorted(missing):
         if debug:
-            dbg('debug.copies:        tracing file: %s\n' % f)
+            dbg(b'debug.copies:        tracing file: %s\n' % f)
         fctx = b[f]
         fctx._ancestrycontext = ancestrycontext
 
         if debug:
             start = util.timer()
-        opath = _tracefile(fctx, am, basemf, limit)
+        opath = _tracefile(fctx, am, basemf)
         if opath:
             if debug:
-                dbg('debug.copies:          rename of: %s\n' % opath)
+                dbg(b'debug.copies:          rename of: %s\n' % opath)
             cm[f] = opath
         if debug:
-            dbg('debug.copies:          time: %f seconds\n'
-                % (util.timer() - start))
+            dbg(
+                b'debug.copies:          time: %f seconds\n'
+                % (util.timer() - start)
+            )
     return cm
 
+
+def _revinfogetter(repo):
+    """return a function that return multiple data given a <rev>"i
+
+    * p1: revision number of first parent
+    * p2: revision number of first parent
+    * p1copies: mapping of copies from p1
+    * p2copies: mapping of copies from p2
+    * removed: a list of removed files
+    """
+    cl = repo.changelog
+    parents = cl.parentrevs
+
+    if repo.filecopiesmode == b'changeset-sidedata':
+        changelogrevision = cl.changelogrevision
+        flags = cl.flags
+
+        # A small cache to avoid doing the work twice for merges
+        #
+        # In the vast majority of cases, if we ask information for a revision
+        # about 1 parent, we'll later ask it for the other. So it make sense to
+        # keep the information around when reaching the first parent of a merge
+        # and dropping it after it was provided for the second parents.
+        #
+        # It exists cases were only one parent of the merge will be walked. It
+        # happens when the "destination" the copy tracing is descendant from a
+        # new root, not common with the "source". In that case, we will only walk
+        # through merge parents that are descendant of changesets common
+        # between "source" and "destination".
+        #
+        # With the current case implementation if such changesets have a copy
+        # information, we'll keep them in memory until the end of
+        # _changesetforwardcopies. We don't expect the case to be frequent
+        # enough to matters.
+        #
+        # In addition, it would be possible to reach pathological case, were
+        # many first parent are met before any second parent is reached. In
+        # that case the cache could grow. If this even become an issue one can
+        # safely introduce a maximum cache size. This would trade extra CPU/IO
+        # time to save memory.
+        merge_caches = {}
+
+        def revinfo(rev):
+            p1, p2 = parents(rev)
+            if flags(rev) & REVIDX_SIDEDATA:
+                e = merge_caches.pop(rev, None)
+                if e is not None:
+                    return e
+                c = changelogrevision(rev)
+                p1copies = c.p1copies
+                p2copies = c.p2copies
+                removed = c.filesremoved
+                if p1 != node.nullrev and p2 != node.nullrev:
+                    # XXX some case we over cache, IGNORE
+                    merge_caches[rev] = (p1, p2, p1copies, p2copies, removed)
+            else:
+                p1copies = {}
+                p2copies = {}
+                removed = ()
+            return p1, p2, p1copies, p2copies, removed
+
+    else:
+
+        def revinfo(rev):
+            p1, p2 = parents(rev)
+            ctx = repo[rev]
+            p1copies, p2copies = ctx._copies
+            removed = ctx.filesremoved()
+            return p1, p2, p1copies, p2copies, removed
+
+    return revinfo
+
+
 def _changesetforwardcopies(a, b, match):
     if a.rev() in (node.nullrev, b.rev()):
         return {}
 
-    repo = a.repo()
+    repo = a.repo().unfiltered()
     children = {}
+    revinfo = _revinfogetter(repo)
+
     cl = repo.changelog
     missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
+    mrset = set(missingrevs)
+    roots = set()
     for r in missingrevs:
         for p in cl.parentrevs(r):
             if p == node.nullrev:
@@ -261,58 +266,75 @@
                 children[p] = [r]
             else:
                 children[p].append(r)
+            if p not in mrset:
+                roots.add(p)
+    if not roots:
+        # no common revision to track copies from
+        return {}
+    min_root = min(roots)
 
-    roots = set(children) - set(missingrevs)
-    # 'work' contains 3-tuples of a (revision number, parent number, copies).
-    # The parent number is only used for knowing which parent the copies dict
-    # came from.
-    # NOTE: To reduce costly copying the 'copies' dicts, we reuse the same
-    # instance for *one* of the child nodes (the last one). Once an instance
-    # has been put on the queue, it is thus no longer safe to modify it.
-    # Conversely, it *is* safe to modify an instance popped off the queue.
-    work = [(r, 1, {}) for r in roots]
-    heapq.heapify(work)
+    from_head = set(
+        cl.reachableroots(min_root, [b.rev()], list(roots), includepath=True)
+    )
+
+    iterrevs = set(from_head)
+    iterrevs &= mrset
+    iterrevs.update(roots)
+    iterrevs.remove(b.rev())
+    all_copies = {r: {} for r in roots}
     alwaysmatch = match.always()
-    while work:
-        r, i1, copies = heapq.heappop(work)
-        if work and work[0][0] == r:
-            # We are tracing copies from both parents
-            r, i2, copies2 = heapq.heappop(work)
-            for dst, src in copies2.items():
+    for r in sorted(iterrevs):
+        copies = all_copies.pop(r)
+        for i, c in enumerate(children[r]):
+            p1, p2, p1copies, p2copies, removed = revinfo(c)
+            if r == p1:
+                parent = 1
+                childcopies = p1copies
+            else:
+                assert r == p2
+                parent = 2
+                childcopies = p2copies
+            if not alwaysmatch:
+                childcopies = {
+                    dst: src for dst, src in childcopies.items() if match(dst)
+                }
+            newcopies = copies
+            if childcopies:
+                newcopies = _chain(newcopies, childcopies)
+                # _chain makes a copies, we can avoid doing so in some
+                # simple/linear cases.
+                assert newcopies is not copies
+            for f in removed:
+                if f in newcopies:
+                    if newcopies is copies:
+                        # copy on write to avoid affecting potential other
+                        # branches.  when there are no other branches, this
+                        # could be avoided.
+                        newcopies = copies.copy()
+                    del newcopies[f]
+            othercopies = all_copies.get(c)
+            if othercopies is None:
+                all_copies[c] = newcopies
+            else:
+                # we are the second parent to work on c, we need to merge our
+                # work with the other.
+                #
                 # Unlike when copies are stored in the filelog, we consider
                 # it a copy even if the destination already existed on the
                 # other branch. It's simply too expensive to check if the
                 # file existed in the manifest.
-                if dst not in copies:
-                    # If it was copied on the p1 side, leave it as copied from
-                    # that side, even if it was also copied on the p2 side.
-                    copies[dst] = copies2[dst]
-        if r == b.rev():
-            return copies
-        for i, c in enumerate(children[r]):
-            childctx = repo[c]
-            if r == childctx.p1().rev():
-                parent = 1
-                childcopies = childctx.p1copies()
-            else:
-                assert r == childctx.p2().rev()
-                parent = 2
-                childcopies = childctx.p2copies()
-            if not alwaysmatch:
-                childcopies = {dst: src for dst, src in childcopies.items()
-                               if match(dst)}
-            # Copy the dict only if later iterations will also need it
-            if i != len(children[r]) - 1:
-                newcopies = copies.copy()
-            else:
-                newcopies = copies
-            if childcopies:
-                newcopies = _chain(newcopies, childcopies)
-            for f in childctx.filesremoved():
-                if f in newcopies:
-                    del newcopies[f]
-            heapq.heappush(work, (c, parent, newcopies))
-    assert False
+                #
+                # In case of conflict, parent 1 take precedence over parent 2.
+                # This is an arbitrary choice made anew when implementing
+                # changeset based copies. It was made without regards with
+                # potential filelog related behavior.
+                if parent == 1:
+                    othercopies.update(newcopies)
+                else:
+                    newcopies.update(othercopies)
+                    all_copies[c] = newcopies
+    return all_copies[b.rev()]
+
 
 def _forwardcopies(a, b, base=None, match=None):
     """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
@@ -326,11 +348,12 @@
         # combine copies from dirstate if necessary
         copies = _chain(cm, _dirstatecopies(b._repo, match))
     else:
-        copies  = _committedforwardcopies(a, b, base, match)
+        copies = _committedforwardcopies(a, b, base, match)
     return copies
 
+
 def _backwardrenames(a, b, match):
-    if a._repo.ui.config('experimental', 'copytrace') == 'off':
+    if a._repo.ui.config(b'experimental', b'copytrace') == b'off':
         return {}
 
     # Even though we're not taking copies into account, 1:n rename situations
@@ -341,7 +364,7 @@
     # to filter the source instead.
     f = _forwardcopies(b, a)
     r = {}
-    for k, v in sorted(f.iteritems()):
+    for k, v in sorted(pycompat.iteritems(f)):
         if match and not match(v):
             continue
         # remove copies
@@ -350,38 +373,43 @@
         r[v] = k
     return r
 
+
 def pathcopies(x, y, match=None):
     """find {dst@y: src@x} copy mapping for directed compare"""
     repo = x._repo
-    debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
+    debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
     if debug:
-        repo.ui.debug('debug.copies: searching copies from %s to %s\n'
-                      % (x, y))
+        repo.ui.debug(
+            b'debug.copies: searching copies from %s to %s\n' % (x, y)
+        )
     if x == y or not x or not y:
         return {}
     a = y.ancestor(x)
     if a == x:
         if debug:
-            repo.ui.debug('debug.copies: search mode: forward\n')
+            repo.ui.debug(b'debug.copies: search mode: forward\n')
         if y.rev() is None and x == y.p1():
             # short-circuit to avoid issues with merge states
             return _dirstatecopies(repo, match)
         copies = _forwardcopies(x, y, match=match)
     elif a == y:
         if debug:
-            repo.ui.debug('debug.copies: search mode: backward\n')
+            repo.ui.debug(b'debug.copies: search mode: backward\n')
         copies = _backwardrenames(x, y, match=match)
     else:
         if debug:
-            repo.ui.debug('debug.copies: search mode: combined\n')
+            repo.ui.debug(b'debug.copies: search mode: combined\n')
         base = None
         if a.rev() != node.nullrev:
             base = x
-        copies = _chain(_backwardrenames(x, a, match=match),
-                        _forwardcopies(a, y, base, match=match))
+        copies = _chain(
+            _backwardrenames(x, a, match=match),
+            _forwardcopies(a, y, base, match=match),
+        )
     _filter(x, y, copies)
     return copies
 
+
 def mergecopies(repo, c1, c2, base):
     """
     Finds moves and copies between context c1 and c2 that are relevant for
@@ -438,7 +466,7 @@
     if c2.node() is None and c1.node() == repo.dirstate.p1():
         return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {}
 
-    copytracing = repo.ui.config('experimental', 'copytrace')
+    copytracing = repo.ui.config(b'experimental', b'copytrace')
     if stringutil.parsebool(copytracing) is False:
         # stringutil.parsebool() returns None when it is unable to parse the
         # value, so we should rely on making sure copytracing is on such cases
@@ -451,7 +479,7 @@
     # Copy trace disabling is explicitly below the node == p1 logic above
     # because the logic above is required for a simple copy to be kept across a
     # rebase.
-    if copytracing == 'heuristics':
+    if copytracing == b'heuristics':
         # Do full copytracing if only non-public revisions are involved as
         # that will be fast enough and will also cover the copies which could
         # be missed by heuristics
@@ -461,6 +489,7 @@
     else:
         return _fullcopytracing(repo, c1, c2, base)
 
+
 def _isfullcopytraceable(repo, c1, base):
     """ Checks that if base, source and destination are all no-public branches,
     if yes let's use the full copytrace algorithm for increased capabilities
@@ -473,14 +502,17 @@
     if c1.rev() is None:
         c1 = c1.p1()
     if c1.mutable() and base.mutable():
-        sourcecommitlimit = repo.ui.configint('experimental',
-                                              'copytrace.sourcecommitlimit')
-        commits = len(repo.revs('%d::%d', base.rev(), c1.rev()))
+        sourcecommitlimit = repo.ui.configint(
+            b'experimental', b'copytrace.sourcecommitlimit'
+        )
+        commits = len(repo.revs(b'%d::%d', base.rev(), c1.rev()))
         return commits < sourcecommitlimit
     return False
 
-def _checksinglesidecopies(src, dsts1, m1, m2, mb, c2, base,
-                           copy, renamedelete):
+
+def _checksinglesidecopies(
+    src, dsts1, m1, m2, mb, c2, base, copy, renamedelete
+):
     if src not in m2:
         # deleted on side 2
         if src not in m1:
@@ -496,6 +528,7 @@
                 # "both created" case in manifestmerge otherwise)
                 copy[dst] = src
 
+
 def _fullcopytracing(repo, c1, c2, base):
     """ The full copytracing algorithm which finds all the new files that were
     added from merge base up to the top commit and for each file it checks if
@@ -536,7 +569,7 @@
                 # to 'b' and 'c' and deletes 'a', and side 2 copies 'a' to 'c'
                 # and 'd' and deletes 'a'.
                 if dsts1 & dsts2:
-                    for dst in (dsts1 & dsts2):
+                    for dst in dsts1 & dsts2:
                         copy[dst] = src
                 else:
                     diverge[src] = sorted(dsts1 | dsts2)
@@ -544,18 +577,20 @@
                 # copied on both sides
                 dsts1 = set(dsts1)
                 dsts2 = set(dsts2)
-                for dst in (dsts1 & dsts2):
+                for dst in dsts1 & dsts2:
                     copy[dst] = src
             # TODO: Handle cases where it was renamed on one side and copied
             # on the other side
         elif dsts1:
             # copied/renamed only on side 1
-            _checksinglesidecopies(src, dsts1, m1, m2, mb, c2, base,
-                                   copy, renamedelete)
+            _checksinglesidecopies(
+                src, dsts1, m1, m2, mb, c2, base, copy, renamedelete
+            )
         elif dsts2:
             # copied/renamed only on side 2
-            _checksinglesidecopies(src, dsts2, m2, m1, mb, c1, base,
-                                   copy, renamedelete)
+            _checksinglesidecopies(
+                src, dsts2, m2, m1, mb, c1, base, copy, renamedelete
+            )
 
     renamedeleteset = set()
     divergeset = set()
@@ -570,11 +605,11 @@
     u1 = sorted(addedinm1 - addedinm2)
     u2 = sorted(addedinm2 - addedinm1)
 
-    header = "  unmatched files in %s"
+    header = b"  unmatched files in %s"
     if u1:
-        repo.ui.debug("%s:\n   %s\n" % (header % 'local', "\n   ".join(u1)))
+        repo.ui.debug(b"%s:\n   %s\n" % (header % b'local', b"\n   ".join(u1)))
     if u2:
-        repo.ui.debug("%s:\n   %s\n" % (header % 'other', "\n   ".join(u2)))
+        repo.ui.debug(b"%s:\n   %s\n" % (header % b'other', b"\n   ".join(u2)))
 
     fullcopy = copies1.copy()
     fullcopy.update(copies2)
@@ -582,21 +617,24 @@
         return copy, {}, diverge, renamedelete, {}
 
     if repo.ui.debugflag:
-        repo.ui.debug("  all copies found (* = to merge, ! = divergent, "
-                      "% = renamed and deleted):\n")
+        repo.ui.debug(
+            b"  all copies found (* = to merge, ! = divergent, "
+            b"% = renamed and deleted):\n"
+        )
         for f in sorted(fullcopy):
-            note = ""
+            note = b""
             if f in copy:
-                note += "*"
+                note += b"*"
             if f in divergeset:
-                note += "!"
+                note += b"!"
             if f in renamedeleteset:
-                note += "%"
-            repo.ui.debug("   src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
-                                                              note))
+                note += b"%"
+            repo.ui.debug(
+                b"   src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f, note)
+            )
     del divergeset
 
-    repo.ui.debug("  checking for directory renames\n")
+    repo.ui.debug(b"  checking for directory renames\n")
 
     # generate a directory move map
     d1, d2 = c1.dirs(), c2.dirs()
@@ -605,7 +643,7 @@
 
     # examine each file copy for a potential directory move, which is
     # when all the files in a directory are moved to a new directory
-    for dst, src in fullcopy.iteritems():
+    for dst, src in pycompat.iteritems(fullcopy):
         dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
         if dsrc in invalid:
             # already seen to be uninteresting
@@ -631,11 +669,12 @@
     if not dirmove:
         return copy, {}, diverge, renamedelete, {}
 
-    dirmove = {k + "/": v + "/" for k, v in dirmove.iteritems()}
+    dirmove = {k + b"/": v + b"/" for k, v in pycompat.iteritems(dirmove)}
 
     for d in dirmove:
-        repo.ui.debug("   discovered dir src: '%s' -> dst: '%s'\n" %
-                      (d, dirmove[d]))
+        repo.ui.debug(
+            b"   discovered dir src: '%s' -> dst: '%s'\n" % (d, dirmove[d])
+        )
 
     movewithdir = {}
     # check unaccounted nonoverlapping files against directory moves
@@ -644,15 +683,18 @@
             for d in dirmove:
                 if f.startswith(d):
                     # new file added in a directory that was moved, move it
-                    df = dirmove[d] + f[len(d):]
+                    df = dirmove[d] + f[len(d) :]
                     if df not in copy:
                         movewithdir[f] = df
-                        repo.ui.debug(("   pending file src: '%s' -> "
-                                       "dst: '%s'\n") % (f, df))
+                        repo.ui.debug(
+                            b"   pending file src: '%s' -> dst: '%s'\n"
+                            % (f, df)
+                        )
                     break
 
     return copy, movewithdir, diverge, renamedelete, dirmove
 
+
 def _heuristicscopytracing(repo, c1, c2, base):
     """ Fast copytracing using filename heuristics
 
@@ -687,23 +729,25 @@
 
     changedfiles = set()
     m1 = c1.manifest()
-    if not repo.revs('%d::%d', base.rev(), c2.rev()):
+    if not repo.revs(b'%d::%d', base.rev(), c2.rev()):
         # If base is not in c2 branch, we switch to fullcopytracing
-        repo.ui.debug("switching to full copytracing as base is not "
-                      "an ancestor of c2\n")
+        repo.ui.debug(
+            b"switching to full copytracing as base is not "
+            b"an ancestor of c2\n"
+        )
         return _fullcopytracing(repo, c1, c2, base)
 
     ctx = c2
     while ctx != base:
         if len(ctx.parents()) == 2:
             # To keep things simple let's not handle merges
-            repo.ui.debug("switching to full copytracing because of merges\n")
+            repo.ui.debug(b"switching to full copytracing because of merges\n")
             return _fullcopytracing(repo, c1, c2, base)
         changedfiles.update(ctx.files())
         ctx = ctx.p1()
 
     cp = _forwardcopies(base, c2)
-    for dst, src in cp.iteritems():
+    for dst, src in pycompat.iteritems(cp):
         if src in m1:
             copies[dst] = src
 
@@ -735,13 +779,18 @@
             f2 = c2.filectx(f)
             # we can have a lot of candidates which can slow down the heuristics
             # config value to limit the number of candidates moves to check
-            maxcandidates = repo.ui.configint('experimental',
-                                              'copytrace.movecandidateslimit')
+            maxcandidates = repo.ui.configint(
+                b'experimental', b'copytrace.movecandidateslimit'
+            )
 
             if len(movecandidates) > maxcandidates:
-                repo.ui.status(_("skipping copytracing for '%s', more "
-                                 "candidates than the limit: %d\n")
-                               % (f, len(movecandidates)))
+                repo.ui.status(
+                    _(
+                        b"skipping copytracing for '%s', more "
+                        b"candidates than the limit: %d\n"
+                    )
+                    % (f, len(movecandidates))
+                )
                 continue
 
             for candidate in movecandidates:
@@ -754,6 +803,7 @@
 
     return copies, {}, {}, {}, {}
 
+
 def _related(f1, f2):
     """return True if f1 and f2 filectx have a common ancestor
 
@@ -764,7 +814,7 @@
     """
 
     if f1 == f2:
-        return True # a match
+        return True  # a match
 
     g1, g2 = f1.ancestors(), f2.ancestors()
     try:
@@ -781,11 +831,12 @@
                 f1 = next(g1)
             elif f2r > f1r:
                 f2 = next(g2)
-            else: # f1 and f2 point to files in the same linkrev
-                return f1 == f2 # true if they point to the same file
+            else:  # f1 and f2 point to files in the same linkrev
+                return f1 == f2  # true if they point to the same file
     except StopIteration:
         return False
 
+
 def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None):
     """reproduce copies from fromrev to rev in the dirstate
 
@@ -795,17 +846,169 @@
     copies between fromrev and rev.
     """
     exclude = {}
-    ctraceconfig = repo.ui.config('experimental', 'copytrace')
+    ctraceconfig = repo.ui.config(b'experimental', b'copytrace')
     bctrace = stringutil.parsebool(ctraceconfig)
-    if (skiprev is not None and
-        (ctraceconfig == 'heuristics' or bctrace or bctrace is None)):
+    if skiprev is not None and (
+        ctraceconfig == b'heuristics' or bctrace or bctrace is None
+    ):
         # copytrace='off' skips this line, but not the entire function because
         # the line below is O(size of the repo) during a rebase, while the rest
         # of the function is much faster (and is required for carrying copy
         # metadata across the rebase anyway).
         exclude = pathcopies(repo[fromrev], repo[skiprev])
-    for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
+    for dst, src in pycompat.iteritems(pathcopies(repo[fromrev], repo[rev])):
         if dst in exclude:
             continue
         if dst in wctx:
             wctx[dst].markcopied(src)
+
+
+def computechangesetfilesadded(ctx):
+    """return the list of files added in a changeset
+    """
+    added = []
+    for f in ctx.files():
+        if not any(f in p for p in ctx.parents()):
+            added.append(f)
+    return added
+
+
+def computechangesetfilesremoved(ctx):
+    """return the list of files removed in a changeset
+    """
+    removed = []
+    for f in ctx.files():
+        if f not in ctx:
+            removed.append(f)
+    return removed
+
+
+def computechangesetcopies(ctx):
+    """return the copies data for a changeset
+
+    The copies data are returned as a pair of dictionnary (p1copies, p2copies).
+
+    Each dictionnary are in the form: `{newname: oldname}`
+    """
+    p1copies = {}
+    p2copies = {}
+    p1 = ctx.p1()
+    p2 = ctx.p2()
+    narrowmatch = ctx._repo.narrowmatch()
+    for dst in ctx.files():
+        if not narrowmatch(dst) or dst not in ctx:
+            continue
+        copied = ctx[dst].renamed()
+        if not copied:
+            continue
+        src, srcnode = copied
+        if src in p1 and p1[src].filenode() == srcnode:
+            p1copies[dst] = src
+        elif src in p2 and p2[src].filenode() == srcnode:
+            p2copies[dst] = src
+    return p1copies, p2copies
+
+
+def encodecopies(files, copies):
+    items = []
+    for i, dst in enumerate(files):
+        if dst in copies:
+            items.append(b'%d\0%s' % (i, copies[dst]))
+    if len(items) != len(copies):
+        raise error.ProgrammingError(
+            b'some copy targets missing from file list'
+        )
+    return b"\n".join(items)
+
+
+def decodecopies(files, data):
+    try:
+        copies = {}
+        if not data:
+            return copies
+        for l in data.split(b'\n'):
+            strindex, src = l.split(b'\0')
+            i = int(strindex)
+            dst = files[i]
+            copies[dst] = src
+        return copies
+    except (ValueError, IndexError):
+        # Perhaps someone had chosen the same key name (e.g. "p1copies") and
+        # used different syntax for the value.
+        return None
+
+
+def encodefileindices(files, subset):
+    subset = set(subset)
+    indices = []
+    for i, f in enumerate(files):
+        if f in subset:
+            indices.append(b'%d' % i)
+    return b'\n'.join(indices)
+
+
+def decodefileindices(files, data):
+    try:
+        subset = []
+        if not data:
+            return subset
+        for strindex in data.split(b'\n'):
+            i = int(strindex)
+            if i < 0 or i >= len(files):
+                return None
+            subset.append(files[i])
+        return subset
+    except (ValueError, IndexError):
+        # Perhaps someone had chosen the same key name (e.g. "added") and
+        # used different syntax for the value.
+        return None
+
+
+def _getsidedata(srcrepo, rev):
+    ctx = srcrepo[rev]
+    filescopies = computechangesetcopies(ctx)
+    filesadded = computechangesetfilesadded(ctx)
+    filesremoved = computechangesetfilesremoved(ctx)
+    sidedata = {}
+    if any([filescopies, filesadded, filesremoved]):
+        sortedfiles = sorted(ctx.files())
+        p1copies, p2copies = filescopies
+        p1copies = encodecopies(sortedfiles, p1copies)
+        p2copies = encodecopies(sortedfiles, p2copies)
+        filesadded = encodefileindices(sortedfiles, filesadded)
+        filesremoved = encodefileindices(sortedfiles, filesremoved)
+        if p1copies:
+            sidedata[sidedatamod.SD_P1COPIES] = p1copies
+        if p2copies:
+            sidedata[sidedatamod.SD_P2COPIES] = p2copies
+        if filesadded:
+            sidedata[sidedatamod.SD_FILESADDED] = filesadded
+        if filesremoved:
+            sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
+    return sidedata
+
+
+def getsidedataadder(srcrepo, destrepo):
+    def sidedatacompanion(revlog, rev):
+        sidedata = {}
+        if util.safehasattr(revlog, 'filteredrevs'):  # this is a changelog
+            sidedata = _getsidedata(srcrepo, rev)
+        return False, (), sidedata
+
+    return sidedatacompanion
+
+
+def getsidedataremover(srcrepo, destrepo):
+    def sidedatacompanion(revlog, rev):
+        f = ()
+        if util.safehasattr(revlog, 'filteredrevs'):  # this is a changelog
+            if revlog.flags(rev) & REVIDX_SIDEDATA:
+                f = (
+                    sidedatamod.SD_P1COPIES,
+                    sidedatamod.SD_P2COPIES,
+                    sidedatamod.SD_FILESADDED,
+                    sidedatamod.SD_FILESREMOVED,
+                )
+        return False, f, {}
+
+    return sidedatacompanion
--- a/mercurial/crecord.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/crecord.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,6 +16,10 @@
 import signal
 
 from .i18n import _
+from .pycompat import (
+    getattr,
+    open,
+)
 from . import (
     encoding,
     error,
@@ -24,56 +28,67 @@
     scmutil,
     util,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
+
 stringio = util.stringio
 
 # patch comments based on the git one
-diffhelptext = _("""# To remove '-' lines, make them ' ' lines (context).
+diffhelptext = _(
+    """# To remove '-' lines, make them ' ' lines (context).
 # To remove '+' lines, delete them.
 # Lines starting with # will be removed from the patch.
-""")
+"""
+)
 
-hunkhelptext = _("""#
+hunkhelptext = _(
+    """#
 # If the patch applies cleanly, the edited hunk will immediately be
 # added to the record list. If it does not apply cleanly, a rejects file
 # will be generated. You can use that when you try again. If all lines
 # of the hunk are removed, then the edit is aborted and the hunk is left
 # unchanged.
-""")
+"""
+)
 
-patchhelptext = _("""#
+patchhelptext = _(
+    """#
 # If the patch applies cleanly, the edited patch will immediately
 # be finalised. If it does not apply cleanly, rejects files will be
 # generated. You can use those when you try again.
-""")
+"""
+)
 
 try:
     import curses
+
     curses.error
 except ImportError:
     # I have no idea if wcurses works with crecord...
     try:
         import wcurses as curses
+
         curses.error
     except ImportError:
         # wcurses is not shipped on Windows by default, or python is not
         # compiled with curses
         curses = False
 
+
 class fallbackerror(error.Abort):
     """Error that indicates the client should try to fallback to text mode."""
+
     # Inherits from error.Abort so that existing behavior is preserved if the
     # calling code does not know how to fallback.
 
+
 def checkcurses(ui):
     """Return True if the user wants to use curses
 
     This method returns True if curses is found (and that python is built with
     it) and that the user has the correct flag for the ui.
     """
-    return curses and ui.interface("chunkselector") == "curses"
+    return curses and ui.interface(b"chunkselector") == b"curses"
+
 
 class patchnode(object):
     """abstract class for patch graph nodes
@@ -81,14 +96,14 @@
     """
 
     def firstchild(self):
-        raise NotImplementedError("method must be implemented by subclass")
+        raise NotImplementedError(b"method must be implemented by subclass")
 
     def lastchild(self):
-        raise NotImplementedError("method must be implemented by subclass")
+        raise NotImplementedError(b"method must be implemented by subclass")
 
     def allchildren(self):
-        "Return a list of all of the direct children of this node"
-        raise NotImplementedError("method must be implemented by subclass")
+        b"Return a list of all of the direct children of this node"
+        raise NotImplementedError(b"method must be implemented by subclass")
 
     def nextsibling(self):
         """
@@ -96,7 +111,7 @@
         of different types between the current item and this closest item.
         If no such item exists, return None.
         """
-        raise NotImplementedError("method must be implemented by subclass")
+        raise NotImplementedError(b"method must be implemented by subclass")
 
     def prevsibling(self):
         """
@@ -104,10 +119,10 @@
         items of different types between the current item and this closest item.
         If no such item exists, return None.
         """
-        raise NotImplementedError("method must be implemented by subclass")
+        raise NotImplementedError(b"method must be implemented by subclass")
 
     def parentitem(self):
-        raise NotImplementedError("method must be implemented by subclass")
+        raise NotImplementedError(b"method must be implemented by subclass")
 
     def nextitem(self, skipfolded=True):
         """
@@ -152,7 +167,7 @@
                 # else return grandparent's next sibling (or None)
                 return self.parentitem().parentitem().nextsibling()
 
-            except AttributeError: # parent and/or grandparent was None
+            except AttributeError:  # parent and/or grandparent was None
                 return None
 
     def previtem(self):
@@ -167,11 +182,11 @@
         prevsibling = self.prevsibling()
         if prevsibling is not None:
             prevsiblinglastchild = prevsibling.lastchild()
-            if ((prevsiblinglastchild is not None) and
-                not prevsibling.folded):
+            if (prevsiblinglastchild is not None) and not prevsibling.folded:
                 prevsiblinglclc = prevsiblinglastchild.lastchild()
-                if ((prevsiblinglclc is not None) and
-                    not prevsiblinglastchild.folded):
+                if (
+                    prevsiblinglclc is not None
+                ) and not prevsiblinglastchild.folded:
                     return prevsiblinglclc
                 else:
                     return prevsiblinglastchild
@@ -181,16 +196,19 @@
         # try parent (or None)
         return self.parentitem()
 
-class patch(patchnode, list): # todo: rename patchroot
+
+class patch(patchnode, list):  # todo: rename patchroot
     """
     list of header objects representing the patch.
     """
+
     def __init__(self, headerlist):
         self.extend(headerlist)
         # add parent patch object reference to each header
         for header in self:
             header.patch = self
 
+
 class uiheader(patchnode):
     """patch header
 
@@ -246,28 +264,30 @@
         return None
 
     def firstchild(self):
-        "return the first child of this item, if one exists.  otherwise None."
+        b"return the first child of this item, if one exists.  otherwise None."
         if len(self.hunks) > 0:
             return self.hunks[0]
         else:
             return None
 
     def lastchild(self):
-        "return the last child of this item, if one exists.  otherwise None."
+        b"return the last child of this item, if one exists.  otherwise None."
         if len(self.hunks) > 0:
             return self.hunks[-1]
         else:
             return None
 
     def allchildren(self):
-        "return a list of all of the direct children of this node"
+        b"return a list of all of the direct children of this node"
         return self.hunks
 
     def __getattr__(self, name):
         return getattr(self.nonuiheader, name)
 
+
 class uihunkline(patchnode):
-    "represents a changed line in a hunk"
+    b"represents a changed line in a hunk"
+
     def __init__(self, linetext, hunk):
         self.linetext = linetext
         self.applied = True
@@ -284,7 +304,7 @@
         numlinesinhunk = len(self.hunk.changedlines)
         indexofthisline = self.hunk.changedlines.index(self)
 
-        if (indexofthisline < numlinesinhunk - 1):
+        if indexofthisline < numlinesinhunk - 1:
             nextline = self.hunk.changedlines[indexofthisline + 1]
             return nextline
         else:
@@ -299,21 +319,23 @@
             return None
 
     def parentitem(self):
-        "return the parent to the current item"
+        b"return the parent to the current item"
         return self.hunk
 
     def firstchild(self):
-        "return the first child of this item, if one exists.  otherwise None."
+        b"return the first child of this item, if one exists.  otherwise None."
         # hunk-lines don't have children
         return None
 
     def lastchild(self):
-        "return the last child of this item, if one exists.  otherwise None."
+        b"return the last child of this item, if one exists.  otherwise None."
         # hunk-lines don't have children
         return None
 
+
 class uihunk(patchnode):
     """ui patch hunk, wraps a hunk and keep track of ui behavior """
+
     maxcontext = 3
 
     def __init__(self, hunk, header):
@@ -335,7 +357,7 @@
         numhunksinheader = len(self.header.hunks)
         indexofthishunk = self.header.hunks.index(self)
 
-        if (indexofthishunk < numhunksinheader - 1):
+        if indexofthishunk < numhunksinheader - 1:
             nexthunk = self.header.hunks[indexofthishunk + 1]
             return nexthunk
         else:
@@ -350,42 +372,53 @@
             return None
 
     def parentitem(self):
-        "return the parent to the current item"
+        b"return the parent to the current item"
         return self.header
 
     def firstchild(self):
-        "return the first child of this item, if one exists.  otherwise None."
+        b"return the first child of this item, if one exists.  otherwise None."
         if len(self.changedlines) > 0:
             return self.changedlines[0]
         else:
             return None
 
     def lastchild(self):
-        "return the last child of this item, if one exists.  otherwise None."
+        b"return the last child of this item, if one exists.  otherwise None."
         if len(self.changedlines) > 0:
             return self.changedlines[-1]
         else:
             return None
 
     def allchildren(self):
-        "return a list of all of the direct children of this node"
+        b"return a list of all of the direct children of this node"
         return self.changedlines
 
     def countchanges(self):
         """changedlines -> (n+,n-)"""
-        add = len([l for l in self.changedlines if l.applied
-                    and l.prettystr().startswith('+')])
-        rem = len([l for l in self.changedlines if l.applied
-                    and l.prettystr().startswith('-')])
+        add = len(
+            [
+                l
+                for l in self.changedlines
+                if l.applied and l.prettystr().startswith(b'+')
+            ]
+        )
+        rem = len(
+            [
+                l
+                for l in self.changedlines
+                if l.applied and l.prettystr().startswith(b'-')
+            ]
+        )
         return add, rem
 
     def getfromtoline(self):
         # calculate the number of removed lines converted to context lines
         removedconvertedtocontext = self.originalremoved - self.removed
 
-        contextlen = (len(self.before) + len(self.after) +
-                      removedconvertedtocontext)
-        if self.after and self.after[-1] == '\\ No newline at end of file\n':
+        contextlen = (
+            len(self.before) + len(self.after) + removedconvertedtocontext
+        )
+        if self.after and self.after[-1] == b'\\ No newline at end of file\n':
             contextlen -= 1
         fromlen = contextlen + self.removed
         tolen = contextlen + self.added
@@ -403,9 +436,13 @@
             if tolen == 0 and toline > 0:
                 toline -= 1
 
-        fromtoline = '@@ -%d,%d +%d,%d @@%s\n' % (
-            fromline, fromlen, toline, tolen,
-            self.proc and (' ' + self.proc))
+        fromtoline = b'@@ -%d,%d +%d,%d @@%s\n' % (
+            fromline,
+            fromlen,
+            toline,
+            tolen,
+            self.proc and (b' ' + self.proc),
+        )
         return fromtoline
 
     def write(self, fp):
@@ -420,10 +457,10 @@
             changedlinestr = changedline.prettystr()
             if changedline.applied:
                 hunklinelist.append(changedlinestr)
-            elif changedlinestr.startswith("-"):
-                hunklinelist.append(" " + changedlinestr[1:])
+            elif changedlinestr.startswith(b"-"):
+                hunklinelist.append(b" " + changedlinestr[1:])
 
-        fp.write(''.join(self.before + hunklinelist + self.after))
+        fp.write(b''.join(self.before + hunklinelist + self.after))
 
     pretty = write
 
@@ -468,17 +505,18 @@
         for line in self.changedlines:
             text = line.linetext
             if line.applied:
-                if text.startswith('+'):
+                if text.startswith(b'+'):
                     dels.append(text[1:])
-                elif text.startswith('-'):
+                elif text.startswith(b'-'):
                     adds.append(text[1:])
-            elif text.startswith('+'):
+            elif text.startswith(b'+'):
                 dels.append(text[1:])
                 adds.append(text[1:])
-        hunk = ['-%s' % l for l in dels] + ['+%s' % l for l in adds]
+        hunk = [b'-%s' % l for l in dels] + [b'+%s' % l for l in adds]
         h = self._hunk
-        return patchmod.recordhunk(h.header, h.toline, h.fromline, h.proc,
-                                   h.before, hunk, h.after)
+        return patchmod.recordhunk(
+            h.header, h.toline, h.fromline, h.proc, h.before, hunk, h.after
+        )
 
     def __getattr__(self, name):
         return getattr(self._hunk, name)
@@ -486,6 +524,7 @@
     def __repr__(self):
         return r'<hunk %r@%d>' % (self.filename(), self.fromline)
 
+
 def filterpatch(ui, chunks, chunkselector, operation=None):
     """interactively filter patch chunks into applied-only chunks"""
     chunks = list(chunks)
@@ -502,8 +541,9 @@
     ret = chunkselector(ui, uiheaders, operation=operation)
     appliedhunklist = []
     for hdr in uiheaders:
-        if (hdr.applied and
-            (hdr.special() or len([h for h in hdr.hunks if h.applied]) > 0)):
+        if hdr.applied and (
+            hdr.special() or len([h for h in hdr.hunks if h.applied]) > 0
+        ):
             appliedhunklist.append(hdr)
             fixoffset = 0
             for hnk in hdr.hunks:
@@ -512,25 +552,26 @@
                     # adjust the 'to'-line offset of the hunk to be correct
                     # after de-activating some of the other hunks for this file
                     if fixoffset:
-                        #hnk = copy.copy(hnk) # necessary??
+                        # hnk = copy.copy(hnk) # necessary??
                         hnk.toline += fixoffset
                 else:
                     fixoffset += hnk.removed - hnk.added
 
     return (appliedhunklist, ret)
 
+
 def chunkselector(ui, headerlist, operation=None):
     """
     curses interface to get selection of chunks, and mark the applied flags
     of the chosen chunks.
     """
-    ui.write(_('starting interactive selection\n'))
+    ui.write(_(b'starting interactive selection\n'))
     chunkselector = curseschunkselector(headerlist, ui, operation)
     # This is required for ncurses to display non-ASCII characters in
     # default user locale encoding correctly.  --immerrr
     locale.setlocale(locale.LC_ALL, r'')
     origsigtstp = sentinel = object()
-    if util.safehasattr(signal, 'SIGTSTP'):
+    if util.safehasattr(signal, b'SIGTSTP'):
         origsigtstp = signal.getsignal(signal.SIGTSTP)
     try:
         curses.wrapper(chunkselector.main)
@@ -542,11 +583,14 @@
             signal.signal(signal.SIGTSTP, origsigtstp)
     return chunkselector.opts
 
+
 def testdecorator(testfn, f):
     def u(*args, **kwargs):
         return f(testfn, *args, **kwargs)
+
     return u
 
+
 def testchunkselector(testfn, ui, headerlist, operation=None):
     """
     test interface to get selection of chunks, and mark the applied flags
@@ -557,26 +601,29 @@
     class dummystdscr(object):
         def clear(self):
             pass
+
         def refresh(self):
             pass
 
     chunkselector.stdscr = dummystdscr()
     if testfn and os.path.exists(testfn):
-        testf = open(testfn, 'rb')
-        testcommands = [x.rstrip('\n') for x in testf.readlines()]
+        testf = open(testfn, b'rb')
+        testcommands = [x.rstrip(b'\n') for x in testf.readlines()]
         testf.close()
         while True:
             if chunkselector.handlekeypressed(testcommands.pop(0), test=True):
                 break
     return chunkselector.opts
 
-_headermessages = { # {operation: text}
-    'apply': _('Select hunks to apply'),
-    'discard': _('Select hunks to discard'),
-    'keep': _('Select hunks to keep'),
-    None: _('Select hunks to record'),
+
+_headermessages = {  # {operation: text}
+    b'apply': _(b'Select hunks to apply'),
+    b'discard': _(b'Select hunks to discard'),
+    b'keep': _(b'Select hunks to keep'),
+    None: _(b'Select hunks to record'),
 }
 
+
 class curseschunkselector(object):
     def __init__(self, headerlist, ui, operation=None):
         # put the headers into a patch object
@@ -602,7 +649,7 @@
         # long as not explicitly set to a falsy value - especially,
         # when not set at all. This is to stay most compatible with
         # previous (color only) behaviour.
-        uicolor = stringutil.parsebool(self.ui.config('ui', 'color'))
+        uicolor = stringutil.parsebool(self.ui.config(b'ui', b'color'))
         self.usecolor = uicolor is not False
 
         # the currently selected header, hunk, or hunk-line
@@ -631,18 +678,17 @@
         # (used for determining when the selected item begins/ends)
         self.linesprintedtopadsofar = 0
 
-        # the first line of the pad which is visible on the screen
-        self.firstlineofpadtoprint = 0
-
         # stores optional text for a commit comment provided by the user
-        self.commenttext = ""
+        self.commenttext = b""
 
         # if the last 'toggle all' command caused all changes to be applied
         self.waslasttoggleallapplied = True
 
         # affects some ui text
         if operation not in _headermessages:
-            raise error.ProgrammingError('unexpected operation: %s' % operation)
+            raise error.ProgrammingError(
+                b'unexpected operation: %s' % operation
+            )
         self.operation = operation
 
     def uparrowevent(self):
@@ -691,7 +737,7 @@
         a hunk is currently selected, then select the next hunk, if one exists,
         or if not, the next header if one exists.
         """
-        #self.startprintline += 1 #debug
+        # self.startprintline += 1 #debug
         currentitem = self.currentselecteditem
 
         nextitem = currentitem.nextitem()
@@ -807,7 +853,7 @@
         self.currentselecteditem = currentitem
 
     def updatescroll(self):
-        "scroll the screen to fully show the currently-selected"
+        b"scroll the screen to fully show the currently-selected"
         selstart = self.selecteditemstartline
         selend = self.selecteditemendline
 
@@ -825,7 +871,7 @@
             self.scrolllines(selstart - padstartbuffered)
 
     def scrolllines(self, numlines):
-        "scroll the screen up (down) by numlines when numlines >0 (<0)."
+        b"scroll the screen up (down) by numlines when numlines >0 (<0)."
         self.firstlineofpadtoprint += numlines
         if self.firstlineofpadtoprint < 0:
             self.firstlineofpadtoprint = 0
@@ -872,19 +918,20 @@
             nosiblingsapplied = not (True in siblingappliedstatus)
 
             siblingspartialstatus = [hnk.partial for hnk in item.header.hunks]
-            somesiblingspartial = (True in siblingspartialstatus)
+            somesiblingspartial = True in siblingspartialstatus
 
-            #cases where applied or partial should be removed from header
+            # cases where applied or partial should be removed from header
 
             # if no 'sibling' hunks are applied (including this hunk)
             if nosiblingsapplied:
                 if not item.header.special():
                     item.header.applied = False
                     item.header.partial = False
-            else: # some/all parent siblings are applied
+            else:  # some/all parent siblings are applied
                 item.header.applied = True
-                item.header.partial = (somesiblingspartial or
-                                        not allsiblingsapplied)
+                item.header.partial = (
+                    somesiblingspartial or not allsiblingsapplied
+                )
 
         elif isinstance(item, uihunkline):
             siblingappliedstatus = [ln.applied for ln in item.hunk.changedlines]
@@ -898,18 +945,20 @@
             elif allsiblingsapplied:
                 item.hunk.applied = True
                 item.hunk.partial = False
-            else: # some siblings applied
+            else:  # some siblings applied
                 item.hunk.applied = True
                 item.hunk.partial = True
 
-            parentsiblingsapplied = [hnk.applied for hnk
-                                     in item.hunk.header.hunks]
+            parentsiblingsapplied = [
+                hnk.applied for hnk in item.hunk.header.hunks
+            ]
             noparentsiblingsapplied = not (True in parentsiblingsapplied)
             allparentsiblingsapplied = not (False in parentsiblingsapplied)
 
-            parentsiblingspartial = [hnk.partial for hnk
-                                     in item.hunk.header.hunks]
-            someparentsiblingspartial = (True in parentsiblingspartial)
+            parentsiblingspartial = [
+                hnk.partial for hnk in item.hunk.header.hunks
+            ]
+            someparentsiblingspartial = True in parentsiblingspartial
 
             # if all parent hunks are not applied, un-apply header
             if noparentsiblingsapplied:
@@ -917,14 +966,15 @@
                     item.hunk.header.applied = False
                     item.hunk.header.partial = False
             # set the applied and partial status of the header if needed
-            else: # some/all parent siblings are applied
+            else:  # some/all parent siblings are applied
                 item.hunk.header.applied = True
-                item.hunk.header.partial = (someparentsiblingspartial or
-                                            not allparentsiblingsapplied)
+                item.hunk.header.partial = (
+                    someparentsiblingspartial or not allparentsiblingsapplied
+                )
 
     def toggleall(self):
-        "toggle the applied flag of all items."
-        if self.waslasttoggleallapplied: # then unapply them this time
+        b"toggle the applied flag of all items."
+        if self.waslasttoggleallapplied:  # then unapply them this time
             for item in self.headerlist:
                 if item.applied:
                     self.toggleapply(item)
@@ -935,9 +985,11 @@
         self.waslasttoggleallapplied = not self.waslasttoggleallapplied
 
     def toggleallbetween(self):
-        "toggle applied on or off for all items in range [lastapplied,current]."
-        if (not self.lastapplieditem or
-            self.currentselecteditem == self.lastapplieditem):
+        b"toggle applied on or off for all items in range [lastapplied,current]."
+        if (
+            not self.lastapplieditem
+            or self.currentselecteditem == self.lastapplieditem
+        ):
             # Treat this like a normal 'x'/' '
             self.toggleapply()
             return
@@ -945,7 +997,7 @@
         startitem = self.lastapplieditem
         enditem = self.currentselecteditem
         # Verify that enditem is "after" startitem, otherwise swap them.
-        for direction in ['forward', 'reverse']:
+        for direction in [b'forward', b'reverse']:
             nextitem = startitem.nextitem()
             while nextitem and nextitem != enditem:
                 nextitem = nextitem.nextitem()
@@ -974,7 +1026,7 @@
             nextitem = nextitem.nextitem()
 
     def togglefolded(self, item=None, foldparent=False):
-        "toggle folded flag of specified item (defaults to currently selected)"
+        b"toggle folded flag of specified item (defaults to currently selected)"
         if item is None:
             item = self.currentselecteditem
         if foldparent or (isinstance(item, uiheader) and item.neverunfolded):
@@ -985,7 +1037,7 @@
                 item.neverunfolded = False
 
             # also fold any foldable children of the parent/current item
-            if isinstance(item, uiheader): # the original or 'new' item
+            if isinstance(item, uiheader):  # the original or 'new' item
                 for child in item.allchildren():
                     child.folded = not item.folded
 
@@ -1004,11 +1056,22 @@
         # turn tabs into spaces
         instr = instr.expandtabs(4)
         strwidth = encoding.colwidth(instr)
-        numspaces = (width - ((strwidth + xstart) % width))
-        return instr + " " * numspaces
+        numspaces = width - ((strwidth + xstart) % width)
+        return instr + b" " * numspaces
 
-    def printstring(self, window, text, fgcolor=None, bgcolor=None, pair=None,
-        pairname=None, attrlist=None, towin=True, align=True, showwhtspc=False):
+    def printstring(
+        self,
+        window,
+        text,
+        fgcolor=None,
+        bgcolor=None,
+        pair=None,
+        pairname=None,
+        attrlist=None,
+        towin=True,
+        align=True,
+        showwhtspc=False,
+    ):
         """
         print the string, text, with the specified colors and attributes, to
         the specified curses window object.
@@ -1030,8 +1093,11 @@
         # preprocess the text, converting tabs to spaces
         text = text.expandtabs(4)
         # strip \n, and convert control characters to ^[char] representation
-        text = re.sub(br'[\x00-\x08\x0a-\x1f]',
-                lambda m:'^' + chr(ord(m.group()) + 64), text.strip('\n'))
+        text = re.sub(
+            br'[\x00-\x08\x0a-\x1f]',
+            lambda m: b'^' + chr(ord(m.group()) + 64),
+            text.strip(b'\n'),
+        )
 
         if pair is not None:
             colorpair = pair
@@ -1060,11 +1126,11 @@
                     colorpair |= textattr
 
         y, xstart = self.chunkpad.getyx()
-        t = "" # variable for counting lines printed
+        t = b""  # variable for counting lines printed
         # if requested, show trailing whitespace
         if showwhtspc:
             origlen = len(text)
-            text = text.rstrip(' \n') # tabs have already been expanded
+            text = text.rstrip(b' \n')  # tabs have already been expanded
             strippedlen = len(text)
             numtrailingspaces = origlen - strippedlen
 
@@ -1073,15 +1139,15 @@
         t += text
 
         if showwhtspc:
-                wscolorpair = colorpair | curses.A_REVERSE
-                if towin:
-                    for i in range(numtrailingspaces):
-                        window.addch(curses.ACS_CKBOARD, wscolorpair)
-                t += " " * numtrailingspaces
+            wscolorpair = colorpair | curses.A_REVERSE
+            if towin:
+                for i in range(numtrailingspaces):
+                    window.addch(curses.ACS_CKBOARD, wscolorpair)
+            t += b" " * numtrailingspaces
 
         if align:
             if towin:
-                extrawhitespace = self.alignstring("", window)
+                extrawhitespace = self.alignstring(b"", window)
                 window.addstr(extrawhitespace, colorpair)
             else:
                 # need to use t, since the x position hasn't incremented
@@ -1090,36 +1156,38 @@
 
         # is reset to 0 at the beginning of printitem()
 
-        linesprinted = (xstart + len(t)) / self.xscreensize
+        linesprinted = (xstart + len(t)) // self.xscreensize
         self.linesprintedtopadsofar += linesprinted
         return t
 
     def _getstatuslinesegments(self):
         """-> [str]. return segments"""
         selected = self.currentselecteditem.applied
-        spaceselect = _('space/enter: select')
-        spacedeselect = _('space/enter: deselect')
+        spaceselect = _(b'space/enter: select')
+        spacedeselect = _(b'space/enter: deselect')
         # Format the selected label into a place as long as the longer of the
         # two possible labels.  This may vary by language.
         spacelen = max(len(spaceselect), len(spacedeselect))
-        selectedlabel = '%-*s' % (spacelen,
-                                  spacedeselect if selected else spaceselect)
+        selectedlabel = b'%-*s' % (
+            spacelen,
+            spacedeselect if selected else spaceselect,
+        )
         segments = [
             _headermessages[self.operation],
-            '-',
-            _('[x]=selected **=collapsed'),
-            _('c: confirm'),
-            _('q: abort'),
-            _('arrow keys: move/expand/collapse'),
+            b'-',
+            _(b'[x]=selected **=collapsed'),
+            _(b'c: confirm'),
+            _(b'q: abort'),
+            _(b'arrow keys: move/expand/collapse'),
             selectedlabel,
-            _('?: help'),
+            _(b'?: help'),
         ]
         return segments
 
     def _getstatuslines(self):
         """() -> [str]. return short help used in the top status window"""
         if self.errorstr is not None:
-            lines = [self.errorstr, _('Press any key to continue')]
+            lines = [self.errorstr, _(b'Press any key to continue')]
         else:
             # wrap segments to lines
             segments = self._getstatuslinesegments()
@@ -1128,7 +1196,7 @@
             lastwidth = width
             for s in segments:
                 w = encoding.colwidth(s)
-                sep = ' ' * (1 + (s and s[0] not in '-['))
+                sep = b' ' * (1 + (s and s[0] not in b'-['))
                 if lastwidth + w + len(sep) >= width:
                     lines.append(s)
                     lastwidth = w
@@ -1149,7 +1217,7 @@
         # print out the status lines at the top
         try:
             for line in self._getstatuslines():
-                printstring(self.statuswin, line, pairname="legend")
+                printstring(self.statuswin, line, pairname=b"legend")
             self.statuswin.refresh()
         except curses.error:
             pass
@@ -1160,10 +1228,14 @@
         try:
             self.printitem()
             self.updatescroll()
-            self.chunkpad.refresh(self.firstlineofpadtoprint, 0,
-                                  self.numstatuslines, 0,
-                                  self.yscreensize - self.numstatuslines,
-                                  self.xscreensize)
+            self.chunkpad.refresh(
+                self.firstlineofpadtoprint,
+                0,
+                self.numstatuslines,
+                0,
+                self.yscreensize - self.numstatuslines,
+                self.xscreensize,
+            )
         except curses.error:
             pass
 
@@ -1176,48 +1248,51 @@
         # create checkbox string
         if item.applied:
             if not isinstance(item, uihunkline) and item.partial:
-                checkbox = "[~]"
+                checkbox = b"[~]"
             else:
-                checkbox = "[x]"
+                checkbox = b"[x]"
         else:
-            checkbox = "[ ]"
+            checkbox = b"[ ]"
 
         try:
             if item.folded:
-                checkbox += "**"
+                checkbox += b"**"
                 if isinstance(item, uiheader):
                     # one of "m", "a", or "d" (modified, added, deleted)
                     filestatus = item.changetype
 
-                    checkbox += filestatus + " "
+                    checkbox += filestatus + b" "
             else:
-                checkbox += "  "
+                checkbox += b"  "
                 if isinstance(item, uiheader):
                     # add two more spaces for headers
-                    checkbox += "  "
-        except AttributeError: # not foldable
-            checkbox += "  "
+                    checkbox += b"  "
+        except AttributeError:  # not foldable
+            checkbox += b"  "
 
         return checkbox
 
-    def printheader(self, header, selected=False, towin=True,
-                    ignorefolding=False):
+    def printheader(
+        self, header, selected=False, towin=True, ignorefolding=False
+    ):
         """
         print the header to the pad.  if countlines is True, don't print
         anything, but just count the number of lines which would be printed.
         """
 
-        outstr = ""
+        outstr = b""
         text = header.prettystr()
         chunkindex = self.chunklist.index(header)
 
         if chunkindex != 0 and not header.folded:
             # add separating line before headers
-            outstr += self.printstring(self.chunkpad, '_' * self.xscreensize,
-                                       towin=towin, align=False)
+            outstr += self.printstring(
+                self.chunkpad, b'_' * self.xscreensize, towin=towin, align=False
+            )
         # select color-pair based on if the header is selected
-        colorpair = self.getcolorpair(name=selected and "selected" or "normal",
-                                      attrlist=[curses.A_BOLD])
+        colorpair = self.getcolorpair(
+            name=selected and b"selected" or b"normal", attrlist=[curses.A_BOLD]
+        )
 
         # print out each line of the chunk, expanding it to screen width
 
@@ -1225,46 +1300,53 @@
         indentnumchars = 0
         checkbox = self.getstatusprefixstring(header)
         if not header.folded or ignorefolding:
-            textlist = text.split("\n")
+            textlist = text.split(b"\n")
             linestr = checkbox + textlist[0]
         else:
             linestr = checkbox + header.filename()
-        outstr += self.printstring(self.chunkpad, linestr, pair=colorpair,
-                                   towin=towin)
+        outstr += self.printstring(
+            self.chunkpad, linestr, pair=colorpair, towin=towin
+        )
         if not header.folded or ignorefolding:
             if len(textlist) > 1:
                 for line in textlist[1:]:
-                    linestr = " "*(indentnumchars + len(checkbox)) + line
-                    outstr += self.printstring(self.chunkpad, linestr,
-                                               pair=colorpair, towin=towin)
+                    linestr = b" " * (indentnumchars + len(checkbox)) + line
+                    outstr += self.printstring(
+                        self.chunkpad, linestr, pair=colorpair, towin=towin
+                    )
 
         return outstr
 
-    def printhunklinesbefore(self, hunk, selected=False, towin=True,
-                             ignorefolding=False):
-        "includes start/end line indicator"
-        outstr = ""
+    def printhunklinesbefore(
+        self, hunk, selected=False, towin=True, ignorefolding=False
+    ):
+        b"includes start/end line indicator"
+        outstr = b""
         # where hunk is in list of siblings
         hunkindex = hunk.header.hunks.index(hunk)
 
         if hunkindex != 0:
             # add separating line before headers
-            outstr += self.printstring(self.chunkpad, ' '*self.xscreensize,
-                                       towin=towin, align=False)
+            outstr += self.printstring(
+                self.chunkpad, b' ' * self.xscreensize, towin=towin, align=False
+            )
 
-        colorpair = self.getcolorpair(name=selected and "selected" or "normal",
-                                      attrlist=[curses.A_BOLD])
+        colorpair = self.getcolorpair(
+            name=selected and b"selected" or b"normal", attrlist=[curses.A_BOLD]
+        )
 
         # print out from-to line with checkbox
         checkbox = self.getstatusprefixstring(hunk)
 
-        lineprefix = " "*self.hunkindentnumchars + checkbox
-        frtoline = "   " + hunk.getfromtoline().strip("\n")
+        lineprefix = b" " * self.hunkindentnumchars + checkbox
+        frtoline = b"   " + hunk.getfromtoline().strip(b"\n")
 
-        outstr += self.printstring(self.chunkpad, lineprefix, towin=towin,
-                                   align=False) # add uncolored checkbox/indent
-        outstr += self.printstring(self.chunkpad, frtoline, pair=colorpair,
-                                   towin=towin)
+        outstr += self.printstring(
+            self.chunkpad, lineprefix, towin=towin, align=False
+        )  # add uncolored checkbox/indent
+        outstr += self.printstring(
+            self.chunkpad, frtoline, pair=colorpair, towin=towin
+        )
 
         if hunk.folded and not ignorefolding:
             # skip remainder of output
@@ -1272,49 +1354,56 @@
 
         # print out lines of the chunk preceeding changed-lines
         for line in hunk.before:
-            linestr = " "*(self.hunklineindentnumchars + len(checkbox)) + line
+            linestr = (
+                b" " * (self.hunklineindentnumchars + len(checkbox)) + line
+            )
             outstr += self.printstring(self.chunkpad, linestr, towin=towin)
 
         return outstr
 
     def printhunklinesafter(self, hunk, towin=True, ignorefolding=False):
-        outstr = ""
+        outstr = b""
         if hunk.folded and not ignorefolding:
             return outstr
 
         # a bit superfluous, but to avoid hard-coding indent amount
         checkbox = self.getstatusprefixstring(hunk)
         for line in hunk.after:
-            linestr = " "*(self.hunklineindentnumchars + len(checkbox)) + line
+            linestr = (
+                b" " * (self.hunklineindentnumchars + len(checkbox)) + line
+            )
             outstr += self.printstring(self.chunkpad, linestr, towin=towin)
 
         return outstr
 
     def printhunkchangedline(self, hunkline, selected=False, towin=True):
-        outstr = ""
+        outstr = b""
         checkbox = self.getstatusprefixstring(hunkline)
 
-        linestr = hunkline.prettystr().strip("\n")
+        linestr = hunkline.prettystr().strip(b"\n")
 
         # select color-pair based on whether line is an addition/removal
         if selected:
-            colorpair = self.getcolorpair(name="selected")
-        elif linestr.startswith("+"):
-            colorpair = self.getcolorpair(name="addition")
-        elif linestr.startswith("-"):
-            colorpair = self.getcolorpair(name="deletion")
-        elif linestr.startswith("\\"):
-            colorpair = self.getcolorpair(name="normal")
+            colorpair = self.getcolorpair(name=b"selected")
+        elif linestr.startswith(b"+"):
+            colorpair = self.getcolorpair(name=b"addition")
+        elif linestr.startswith(b"-"):
+            colorpair = self.getcolorpair(name=b"deletion")
+        elif linestr.startswith(b"\\"):
+            colorpair = self.getcolorpair(name=b"normal")
 
-        lineprefix = " "*self.hunklineindentnumchars + checkbox
-        outstr += self.printstring(self.chunkpad, lineprefix, towin=towin,
-                                   align=False) # add uncolored checkbox/indent
-        outstr += self.printstring(self.chunkpad, linestr, pair=colorpair,
-                                   towin=towin, showwhtspc=True)
+        lineprefix = b" " * self.hunklineindentnumchars + checkbox
+        outstr += self.printstring(
+            self.chunkpad, lineprefix, towin=towin, align=False
+        )  # add uncolored checkbox/indent
+        outstr += self.printstring(
+            self.chunkpad, linestr, pair=colorpair, towin=towin, showwhtspc=True
+        )
         return outstr
 
-    def printitem(self, item=None, ignorefolding=False, recursechildren=True,
-                  towin=True):
+    def printitem(
+        self, item=None, ignorefolding=False, recursechildren=True, towin=True
+    ):
         """
         use __printitem() to print the the specified item.applied.
         if item is not specified, then print the entire patch.
@@ -1327,12 +1416,13 @@
             self.linesprintedtopadsofar = 0
 
         outstr = []
-        self.__printitem(item, ignorefolding, recursechildren, outstr,
-                                  towin=towin)
-        return ''.join(outstr)
+        self.__printitem(
+            item, ignorefolding, recursechildren, outstr, towin=towin
+        )
+        return b''.join(outstr)
 
     def outofdisplayedarea(self):
-        y, _ = self.chunkpad.getyx() # cursor location
+        y, _ = self.chunkpad.getyx()  # cursor location
         # * 2 here works but an optimization would be the max number of
         # consecutive non selectable lines
         # i.e the max number of context line for any hunk in the patch
@@ -1341,18 +1431,21 @@
         return y < miny or y > maxy
 
     def handleselection(self, item, recursechildren):
-        selected = (item is self.currentselecteditem)
+        selected = item is self.currentselecteditem
         if selected and recursechildren:
             # assumes line numbering starting from line 0
             self.selecteditemstartline = self.linesprintedtopadsofar
-            selecteditemlines = self.getnumlinesdisplayed(item,
-                                                          recursechildren=False)
-            self.selecteditemendline = (self.selecteditemstartline +
-                                        selecteditemlines - 1)
+            selecteditemlines = self.getnumlinesdisplayed(
+                item, recursechildren=False
+            )
+            self.selecteditemendline = (
+                self.selecteditemstartline + selecteditemlines - 1
+            )
         return selected
 
-    def __printitem(self, item, ignorefolding, recursechildren, outstr,
-                    towin=True):
+    def __printitem(
+        self, item, ignorefolding, recursechildren, outstr, towin=True
+    ):
         """
         recursive method for printing out patch/header/hunk/hunk-line data to
         screen.  also returns a string with all of the content of the displayed
@@ -1373,36 +1466,52 @@
         if isinstance(item, patch):
             if recursechildren:
                 for hdr in item:
-                    self.__printitem(hdr, ignorefolding,
-                            recursechildren, outstr, towin)
+                    self.__printitem(
+                        hdr, ignorefolding, recursechildren, outstr, towin
+                    )
         # todo: eliminate all isinstance() calls
         if isinstance(item, uiheader):
-            outstr.append(self.printheader(item, selected, towin=towin,
-                                       ignorefolding=ignorefolding))
+            outstr.append(
+                self.printheader(
+                    item, selected, towin=towin, ignorefolding=ignorefolding
+                )
+            )
             if recursechildren:
                 for hnk in item.hunks:
-                    self.__printitem(hnk, ignorefolding,
-                            recursechildren, outstr, towin)
-        elif (isinstance(item, uihunk) and
-              ((not item.header.folded) or ignorefolding)):
+                    self.__printitem(
+                        hnk, ignorefolding, recursechildren, outstr, towin
+                    )
+        elif isinstance(item, uihunk) and (
+            (not item.header.folded) or ignorefolding
+        ):
             # print the hunk data which comes before the changed-lines
-            outstr.append(self.printhunklinesbefore(item, selected, towin=towin,
-                                                ignorefolding=ignorefolding))
+            outstr.append(
+                self.printhunklinesbefore(
+                    item, selected, towin=towin, ignorefolding=ignorefolding
+                )
+            )
             if recursechildren:
                 for l in item.changedlines:
-                    self.__printitem(l, ignorefolding,
-                            recursechildren, outstr, towin)
-                outstr.append(self.printhunklinesafter(item, towin=towin,
-                                                ignorefolding=ignorefolding))
-        elif (isinstance(item, uihunkline) and
-              ((not item.hunk.folded) or ignorefolding)):
-            outstr.append(self.printhunkchangedline(item, selected,
-                towin=towin))
+                    self.__printitem(
+                        l, ignorefolding, recursechildren, outstr, towin
+                    )
+                outstr.append(
+                    self.printhunklinesafter(
+                        item, towin=towin, ignorefolding=ignorefolding
+                    )
+                )
+        elif isinstance(item, uihunkline) and (
+            (not item.hunk.folded) or ignorefolding
+        ):
+            outstr.append(
+                self.printhunkchangedline(item, selected, towin=towin)
+            )
 
         return outstr
 
-    def getnumlinesdisplayed(self, item=None, ignorefolding=False,
-                             recursechildren=True):
+    def getnumlinesdisplayed(
+        self, item=None, ignorefolding=False, recursechildren=True
+    ):
         """
         return the number of lines which would be displayed if the item were
         to be printed to the display.  the item will not be printed to the
@@ -1413,13 +1522,14 @@
         """
 
         # temporarily disable printing to windows by printstring
-        patchdisplaystring = self.printitem(item, ignorefolding,
-                                            recursechildren, towin=False)
+        patchdisplaystring = self.printitem(
+            item, ignorefolding, recursechildren, towin=False
+        )
         numlines = len(patchdisplaystring) // self.xscreensize
         return numlines
 
     def sigwinchhandler(self, n, frame):
-        "handle window resizing"
+        b"handle window resizing"
         try:
             curses.endwin()
             self.xscreensize, self.yscreensize = scmutil.termsize(self.ui)
@@ -1429,8 +1539,9 @@
         except curses.error:
             pass
 
-    def getcolorpair(self, fgcolor=None, bgcolor=None, name=None,
-                     attrlist=None):
+    def getcolorpair(
+        self, fgcolor=None, bgcolor=None, name=None, attrlist=None
+    ):
         """
         get a curses color pair, adding it to self.colorpairs if it is not
         already defined.  an optional string, name, can be passed as a shortcut
@@ -1460,14 +1571,15 @@
                 pairindex = len(self.colorpairs) + 1
                 if self.usecolor:
                     curses.init_pair(pairindex, fgcolor, bgcolor)
-                    colorpair = self.colorpairs[(fgcolor, bgcolor)] = (
-                        curses.color_pair(pairindex))
+                    colorpair = self.colorpairs[
+                        (fgcolor, bgcolor)
+                    ] = curses.color_pair(pairindex)
                     if name is not None:
                         self.colorpairnames[name] = curses.color_pair(pairindex)
                 else:
                     cval = 0
                     if name is not None:
-                        if name == 'selected':
+                        if name == b'selected':
                             cval = curses.A_REVERSE
                         self.colorpairnames[name] = cval
                     colorpair = self.colorpairs[(fgcolor, bgcolor)] = cval
@@ -1487,11 +1599,11 @@
         return colorpair
 
     def initcolorpair(self, *args, **kwargs):
-        "same as getcolorpair."
+        b"same as getcolorpair."
         self.getcolorpair(*args, **kwargs)
 
     def helpwindow(self):
-        "print a help window to the screen.  exit after any keypress."
+        b"print a help window to the screen.  exit after any keypress."
         helptext = _(
             """            [press any key to return to the patch-display]
 
@@ -1521,26 +1633,28 @@
                       c : confirm selected changes
                       r : review/edit and confirm selected changes
                       q : quit without confirming (no changes will be made)
-                      ? : help (what you're currently reading)""")
+                      ? : help (what you're currently reading)"""
+        )
 
         helpwin = curses.newwin(self.yscreensize, 0, 0, 0)
-        helplines = helptext.split("\n")
-        helplines = helplines + [" "]*(
-            self.yscreensize - self.numstatuslines - len(helplines) - 1)
+        helplines = helptext.split(b"\n")
+        helplines = helplines + [b" "] * (
+            self.yscreensize - self.numstatuslines - len(helplines) - 1
+        )
         try:
             for line in helplines:
-                self.printstring(helpwin, line, pairname="legend")
+                self.printstring(helpwin, line, pairname=b"legend")
         except curses.error:
             pass
         helpwin.refresh()
         try:
-            with self.ui.timeblockedsection('crecord'):
+            with self.ui.timeblockedsection(b'crecord'):
                 helpwin.getkey()
         except curses.error:
             pass
 
     def commitMessageWindow(self):
-        "Create a temporary commit message editing window on the screen."
+        b"Create a temporary commit message editing window on the screen."
 
         curses.raw()
         curses.def_prog_mode()
@@ -1548,7 +1662,7 @@
         self.commenttext = self.ui.edit(self.commenttext, self.ui.username())
         curses.cbreak()
         self.stdscr.refresh()
-        self.stdscr.keypad(1) # allow arrow-keys to continue to function
+        self.stdscr.keypad(1)  # allow arrow-keys to continue to function
 
     def handlefirstlineevent(self):
         """
@@ -1590,19 +1704,19 @@
         self.recenterdisplayedarea()
 
     def confirmationwindow(self, windowtext):
-        "display an informational window, then wait for and return a keypress."
+        b"display an informational window, then wait for and return a keypress."
 
         confirmwin = curses.newwin(self.yscreensize, 0, 0, 0)
         try:
-            lines = windowtext.split("\n")
+            lines = windowtext.split(b"\n")
             for line in lines:
-                self.printstring(confirmwin, line, pairname="selected")
+                self.printstring(confirmwin, line, pairname=b"selected")
         except curses.error:
             pass
         self.stdscr.refresh()
         confirmwin.refresh()
         try:
-            with self.ui.timeblockedsection('crecord'):
+            with self.ui.timeblockedsection(b'crecord'):
                 response = chr(self.stdscr.getch())
         except ValueError:
             response = None
@@ -1613,7 +1727,7 @@
         """ask for 'y' to be pressed to confirm selected. return True if
         confirmed."""
         confirmtext = _(
-"""If you answer yes to the following, your currently chosen patch chunks
+            """If you answer yes to the following, your currently chosen patch chunks
 will be loaded into an editor. To modify the patch, make the changes in your
 editor and save. To accept the current patch as-is, close the editor without
 saving.
@@ -1622,12 +1736,13 @@
       failing to follow this rule will result in the commit aborting.
 
 are you sure you want to review/edit and confirm the selected changes [yn]?
-""")
-        with self.ui.timeblockedsection('crecord'):
+"""
+        )
+        with self.ui.timeblockedsection(b'crecord'):
             response = self.confirmationwindow(confirmtext)
         if response is None:
-            response = "n"
-        if response.lower().startswith("y"):
+            response = b"n"
+        if response.lower().startswith(b"y"):
             return True
         else:
             return False
@@ -1640,17 +1755,21 @@
         new changeset will be created (the normal commit behavior).
         """
 
-        if opts.get('amend') is None:
-            opts['amend'] = True
-            msg = _("Amend option is turned on -- committing the currently "
-                    "selected changes will not create a new changeset, but "
-                    "instead update the most recently committed changeset.\n\n"
-                    "Press any key to continue.")
-        elif opts.get('amend') is True:
-            opts['amend'] = None
-            msg = _("Amend option is turned off -- committing the currently "
-                    "selected changes will create a new changeset.\n\n"
-                    "Press any key to continue.")
+        if opts.get(b'amend') is None:
+            opts[b'amend'] = True
+            msg = _(
+                b"Amend option is turned on -- committing the currently "
+                b"selected changes will not create a new changeset, but "
+                b"instead update the most recently committed changeset.\n\n"
+                b"Press any key to continue."
+            )
+        elif opts.get(b'amend') is True:
+            opts[b'amend'] = None
+            msg = _(
+                b"Amend option is turned off -- committing the currently "
+                b"selected changes will create a new changeset.\n\n"
+                b"Press any key to continue."
+            )
         if not test:
             self.confirmationwindow(msg)
 
@@ -1668,6 +1787,7 @@
         """
         edit the currently selected chunk
         """
+
         def updateui(self):
             self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
             self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
@@ -1678,12 +1798,12 @@
 
         def editpatchwitheditor(self, chunk):
             if chunk is None:
-                self.ui.write(_('cannot edit patch for whole file'))
-                self.ui.write("\n")
+                self.ui.write(_(b'cannot edit patch for whole file'))
+                self.ui.write(b"\n")
                 return None
             if chunk.header.binary():
-                self.ui.write(_('cannot edit patch for binary file'))
-                self.ui.write("\n")
+                self.ui.write(_(b'cannot edit patch for binary file'))
+                self.ui.write(b"\n")
                 return None
 
             # write the initial patch
@@ -1694,7 +1814,7 @@
 
             # start the editor and wait for it to complete
             try:
-                patch = self.ui.edit(patch.getvalue(), "", action="diff")
+                patch = self.ui.edit(patch.getvalue(), b"", action=b"diff")
             except error.Abort as exc:
                 self.errorstr = str(exc)
                 return None
@@ -1703,8 +1823,11 @@
                 self.stdscr.refresh()
 
             # remove comment lines
-            patch = [line + '\n' for line in patch.splitlines()
-                     if not line.startswith('#')]
+            patch = [
+                line + b'\n'
+                for line in patch.splitlines()
+                if not line.startswith(b'#')
+            ]
             return patchmod.parsepatch(patch)
 
         if item is None:
@@ -1728,7 +1851,7 @@
         header = item.header
         editedhunkindex = header.hunks.index(item)
         hunksbefore = header.hunks[:editedhunkindex]
-        hunksafter = header.hunks[editedhunkindex + 1:]
+        hunksafter = header.hunks[editedhunkindex + 1 :]
         newpatchheader = newpatches[0]
         newhunks = [uihunk(h, header) for h in newpatchheader.hunks]
         newadded = sum([h.added for h in newhunks])
@@ -1765,59 +1888,59 @@
         Return true to exit the main loop.
         """
         keypressed = pycompat.bytestr(keypressed)
-        if keypressed in ["k", "KEY_UP"]:
+        if keypressed in [b"k", b"KEY_UP"]:
             self.uparrowevent()
-        elif keypressed in ["K", "KEY_PPAGE"]:
+        elif keypressed in [b"K", b"KEY_PPAGE"]:
             self.uparrowshiftevent()
-        elif keypressed in ["j", "KEY_DOWN"]:
+        elif keypressed in [b"j", b"KEY_DOWN"]:
             self.downarrowevent()
-        elif keypressed in ["J", "KEY_NPAGE"]:
+        elif keypressed in [b"J", b"KEY_NPAGE"]:
             self.downarrowshiftevent()
-        elif keypressed in ["l", "KEY_RIGHT"]:
+        elif keypressed in [b"l", b"KEY_RIGHT"]:
             self.rightarrowevent()
-        elif keypressed in ["h", "KEY_LEFT"]:
+        elif keypressed in [b"h", b"KEY_LEFT"]:
             self.leftarrowevent()
-        elif keypressed in ["H", "KEY_SLEFT"]:
+        elif keypressed in [b"H", b"KEY_SLEFT"]:
             self.leftarrowshiftevent()
-        elif keypressed in ["q"]:
-            raise error.Abort(_('user quit'))
-        elif keypressed in ['a']:
+        elif keypressed in [b"q"]:
+            raise error.Abort(_(b'user quit'))
+        elif keypressed in [b'a']:
             self.toggleamend(self.opts, test)
-        elif keypressed in ["c"]:
+        elif keypressed in [b"c"]:
             return True
-        elif keypressed in ["r"]:
+        elif keypressed in [b"r"]:
             if self.reviewcommit():
-                self.opts['review'] = True
+                self.opts[b'review'] = True
                 return True
-        elif test and keypressed in ['R']:
-            self.opts['review'] = True
+        elif test and keypressed in [b'R']:
+            self.opts[b'review'] = True
             return True
-        elif keypressed in [' ', 'x']:
+        elif keypressed in [b' ', b'x']:
             self.toggleapply()
-        elif keypressed in ['\n', 'KEY_ENTER']:
+        elif keypressed in [b'\n', b'KEY_ENTER']:
             self.toggleapply()
             self.nextsametype(test=test)
-        elif keypressed in ['X']:
+        elif keypressed in [b'X']:
             self.toggleallbetween()
-        elif keypressed in ['A']:
+        elif keypressed in [b'A']:
             self.toggleall()
-        elif keypressed in ['e']:
+        elif keypressed in [b'e']:
             self.toggleedit(test=test)
-        elif keypressed in ["f"]:
+        elif keypressed in [b"f"]:
             self.togglefolded()
-        elif keypressed in ["F"]:
+        elif keypressed in [b"F"]:
             self.togglefolded(foldparent=True)
-        elif keypressed in ["m"]:
+        elif keypressed in [b"m"]:
             self.commitMessageWindow()
-        elif keypressed in ["g", "KEY_HOME"]:
+        elif keypressed in [b"g", b"KEY_HOME"]:
             self.handlefirstlineevent()
-        elif keypressed in ["G", "KEY_END"]:
+        elif keypressed in [b"G", b"KEY_END"]:
             self.handlelastlineevent()
-        elif keypressed in ["?"]:
+        elif keypressed in [b"?"]:
             self.helpwindow()
             self.stdscr.clear()
             self.stdscr.refresh()
-        elif curses.unctrl(keypressed) in ["^L"]:
+        elif curses.unctrl(keypressed) in [b"^L"]:
             # scroll the current line to the top of the screen, and redraw
             # everything
             self.scrolllines(self.selecteditemstartline)
@@ -1830,9 +1953,8 @@
         """
 
         origsigwinch = sentinel = object()
-        if util.safehasattr(signal, 'SIGWINCH'):
-            origsigwinch = signal.signal(signal.SIGWINCH,
-                                         self.sigwinchhandler)
+        if util.safehasattr(signal, b'SIGWINCH'):
+            origsigwinch = signal.signal(signal.SIGWINCH, self.sigwinchhandler)
         try:
             return self._main(stdscr)
         finally:
@@ -1866,15 +1988,18 @@
 
         # available colors: black, blue, cyan, green, magenta, white, yellow
         # init_pair(color_id, foreground_color, background_color)
-        self.initcolorpair(None, None, name="normal")
-        self.initcolorpair(curses.COLOR_WHITE, curses.COLOR_MAGENTA,
-                           name="selected")
-        self.initcolorpair(curses.COLOR_RED, None, name="deletion")
-        self.initcolorpair(curses.COLOR_GREEN, None, name="addition")
-        self.initcolorpair(curses.COLOR_WHITE, curses.COLOR_BLUE, name="legend")
+        self.initcolorpair(None, None, name=b"normal")
+        self.initcolorpair(
+            curses.COLOR_WHITE, curses.COLOR_MAGENTA, name=b"selected"
+        )
+        self.initcolorpair(curses.COLOR_RED, None, name=b"deletion")
+        self.initcolorpair(curses.COLOR_GREEN, None, name=b"addition")
+        self.initcolorpair(
+            curses.COLOR_WHITE, curses.COLOR_BLUE, name=b"legend"
+        )
         # newwin([height, width,] begin_y, begin_x)
         self.statuswin = curses.newwin(self.numstatuslines, 0, 0, 0)
-        self.statuswin.keypad(1) # interpret arrow-key, etc. esc sequences
+        self.statuswin.keypad(1)  # interpret arrow-key, etc. esc sequences
 
         # figure out how much space to allocate for the chunk-pad which is
         # used for displaying the patch
@@ -1889,27 +2014,30 @@
             self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
         except curses.error:
             self.initexc = fallbackerror(
-                _('this diff is too large to be displayed'))
+                _(b'this diff is too large to be displayed')
+            )
             return
         # initialize selecteditemendline (initial start-line is 0)
         self.selecteditemendline = self.getnumlinesdisplayed(
-            self.currentselecteditem, recursechildren=False)
+            self.currentselecteditem, recursechildren=False
+        )
 
         while True:
             self.updatescreen()
             try:
-                with self.ui.timeblockedsection('crecord'):
+                with self.ui.timeblockedsection(b'crecord'):
                     keypressed = self.statuswin.getkey()
                 if self.errorstr is not None:
                     self.errorstr = None
                     continue
             except curses.error:
-                keypressed = "foobar"
+                keypressed = b"foobar"
             if self.handlekeypressed(keypressed):
                 break
 
-        if self.commenttext != "":
-            whitespaceremoved = re.sub(br"(?m)^\s.*(\n|$)", b"",
-                                       self.commenttext)
-            if whitespaceremoved != "":
-                self.opts['message'] = self.commenttext
+        if self.commenttext != b"":
+            whitespaceremoved = re.sub(
+                br"(?m)^\s.*(\n|$)", b"", self.commenttext
+            )
+            if whitespaceremoved != b"":
+                self.opts[b'message'] = self.commenttext
--- a/mercurial/dagop.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/dagop.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,12 +9,8 @@
 
 import heapq
 
-from .node import (
-    nullrev,
-)
-from .thirdparty import (
-    attr,
-)
+from .node import nullrev
+from .thirdparty import attr
 from . import (
     error,
     mdiff,
@@ -30,6 +26,7 @@
 # possible maximum depth between null and wdir()
 maxlogdepth = 0x80000000
 
+
 def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse):
     """Walk DAG using 'pfunc' from the given 'revs' nodes
 
@@ -46,7 +43,7 @@
     if stopdepth == 0:
         return
     if stopdepth < 0:
-        raise error.ProgrammingError('negative stopdepth')
+        raise error.ProgrammingError(b'negative stopdepth')
     if reverse:
         heapsign = -1  # max heap
     else:
@@ -72,7 +69,7 @@
                 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
         # rescan parents until curdepth >= startdepth because queued entries
         # of the same revision are iterated from the lowest depth
-        foundnew = (currev != lastrev)
+        foundnew = currev != lastrev
         if foundnew and curdepth >= startdepth:
             lastrev = currev
             yield currev
@@ -82,6 +79,7 @@
                 if prev != node.nullrev:
                     heapq.heappush(pendingheap, (heapsign * prev, pdepth))
 
+
 def filectxancestors(fctxs, followfirst=False):
     """Like filectx.ancestors(), but can walk from multiple files/revisions,
     and includes the given fctxs themselves
@@ -90,6 +88,7 @@
     """
     visit = {}
     visitheap = []
+
     def addvisit(fctx):
         rev = fctx.rev()
         if rev not in visit:
@@ -105,7 +104,7 @@
     for c in fctxs:
         addvisit(c)
     while visit:
-        currev = -heapq.heappop(visitheap)
+        currev = -(heapq.heappop(visitheap))
         curfctxs = visit.pop(currev)
         yield currev, curfctxs
         for c in curfctxs:
@@ -113,6 +112,7 @@
                 addvisit(parent)
     assert not visitheap
 
+
 def filerevancestors(fctxs, followfirst=False):
     """Like filectx.ancestors(), but can walk from multiple files/revisions,
     and includes the given fctxs themselves
@@ -122,17 +122,20 @@
     gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst))
     return generatorset(gen, iterasc=False)
 
+
 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc):
     if followfirst:
         cut = 1
     else:
         cut = None
     cl = repo.changelog
+
     def plainpfunc(rev):
         try:
             return cl.parentrevs(rev)[:cut]
         except error.WdirUnsupported:
             return (pctx.rev() for pctx in repo[rev].parents()[:cut])
+
     if cutfunc is None:
         pfunc = plainpfunc
     else:
@@ -140,8 +143,10 @@
         revs = revs.filter(lambda rev: not cutfunc(rev))
     return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True)
 
-def revancestors(repo, revs, followfirst=False, startdepth=None,
-                 stopdepth=None, cutfunc=None):
+
+def revancestors(
+    repo, revs, followfirst=False, startdepth=None, stopdepth=None, cutfunc=None
+):
     r"""Like revlog.ancestors(), but supports additional options, includes
     the given revs themselves, and returns a smartset
 
@@ -163,10 +168,12 @@
         |/
         A
     """
-    gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth,
-                           cutfunc)
+    gen = _genrevancestors(
+        repo, revs, followfirst, startdepth, stopdepth, cutfunc
+    )
     return generatorset(gen, iterasc=False)
 
+
 def _genrevdescendants(repo, revs, followfirst):
     if followfirst:
         cut = 1
@@ -194,6 +201,7 @@
                     yield i
                     break
 
+
 def _builddescendantsmap(repo, startrev, followfirst):
     """Build map of 'rev -> child revs', offset from startrev"""
     cl = repo.changelog
@@ -207,13 +215,17 @@
             descmap[p2rev - startrev].append(currev)
     return descmap
 
+
 def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth):
     startrev = revs.min()
     descmap = _builddescendantsmap(repo, startrev, followfirst)
+
     def pfunc(rev):
         return descmap[rev - startrev]
+
     return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False)
 
+
 def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None):
     """Like revlog.descendants() but supports additional options, includes
     the given revs themselves, and returns a smartset
@@ -224,10 +236,12 @@
     if startdepth is None and (stopdepth is None or stopdepth >= maxlogdepth):
         gen = _genrevdescendants(repo, revs, followfirst)
     else:
-        gen = _genrevdescendantsofdepth(repo, revs, followfirst,
-                                        startdepth, stopdepth)
+        gen = _genrevdescendantsofdepth(
+            repo, revs, followfirst, startdepth, stopdepth
+        )
     return generatorset(gen, iterasc=True)
 
+
 def descendantrevs(revs, revsfn, parentrevsfn):
     """Generate revision number descendants in revision order.
 
@@ -259,6 +273,7 @@
                 yield rev
                 break
 
+
 def _reachablerootspure(pfunc, minroot, roots, heads, includepath):
     """See revlog.reachableroots"""
     if not roots:
@@ -294,6 +309,7 @@
                 reached(rev)
     return reachable
 
+
 def reachableroots(repo, roots, heads, includepath=False):
     """See revlog.reachableroots"""
     if not roots:
@@ -306,6 +322,7 @@
     revs.sort()
     return revs
 
+
 def _changesrange(fctx1, fctx2, linerange2, diffopts):
     """Return `(diffinrange, linerange1)` where `diffinrange` is True
     if diff from fctx2 to fctx1 has changes in linerange2 and
@@ -313,9 +330,10 @@
     """
     blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
     filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
-    diffinrange = any(stype == '!' for _, stype in filteredblocks)
+    diffinrange = any(stype == b'!' for _, stype in filteredblocks)
     return diffinrange, linerange1
 
+
 def blockancestors(fctx, fromline, toline, followfirst=False):
     """Yield ancestors of `fctx` with respect to the block of lines within
     `fromline`-`toline` range.
@@ -349,6 +367,7 @@
         if inrange:
             yield c, linerange2
 
+
 def blockdescendants(fctx, fromline, toline):
     """Yield descendants of `fctx` with respect to the block of lines within
     `fromline`-`toline` range.
@@ -388,6 +407,7 @@
         if inrange:
             yield c, linerange1
 
+
 @attr.s(slots=True, frozen=True)
 class annotateline(object):
     fctx = attr.ib()
@@ -396,6 +416,7 @@
     skip = attr.ib(default=False)
     text = attr.ib(default=None)
 
+
 @attr.s(slots=True, frozen=True)
 class _annotatedfile(object):
     # list indexed by lineno - 1
@@ -405,16 +426,19 @@
     # full file content
     text = attr.ib()
 
+
 def _countlines(text):
-    if text.endswith("\n"):
-        return text.count("\n")
-    return text.count("\n") + int(bool(text))
+    if text.endswith(b"\n"):
+        return text.count(b"\n")
+    return text.count(b"\n") + int(bool(text))
+
 
 def _decoratelines(text, fctx):
     n = _countlines(text)
     linenos = pycompat.rangelist(1, n + 1)
     return _annotatedfile([fctx] * n, linenos, [False] * n, text)
 
+
 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
     r'''
     Given parent and child fctxes and annotate data for parents, for all lines
@@ -426,8 +450,10 @@
 
     See test-annotate.py for unit tests.
     '''
-    pblocks = [(parent, mdiff.allblocks(parent.text, child.text, opts=diffopts))
-               for parent in parents]
+    pblocks = [
+        (parent, mdiff.allblocks(parent.text, child.text, opts=diffopts))
+        for parent in parents
+    ]
 
     if skipchild:
         # Need to iterate over the blocks twice -- make it a list
@@ -438,7 +464,7 @@
         for (a1, a2, b1, b2), t in blocks:
             # Changed blocks ('!') or blocks made only of blank lines ('~')
             # belong to the child.
-            if t == '=':
+            if t == b'=':
                 child.fctxs[b1:b2] = parent.fctxs[a1:a2]
                 child.linenos[b1:b2] = parent.linenos[a1:a2]
                 child.skips[b1:b2] = parent.skips[a1:a2]
@@ -482,6 +508,7 @@
                         child.skips[bk] = True
     return child
 
+
 def annotate(base, parents, skiprevs=None, diffopts=None):
     """Core algorithm for filectx.annotate()
 
@@ -528,8 +555,9 @@
             skipchild = False
             if skiprevs is not None:
                 skipchild = f._changeid in skiprevs
-            curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
-                                 diffopts)
+            curr = _annotatepair(
+                [hist[p] for p in pl], f, curr, skipchild, diffopts
+            )
             for p in pl:
                 if needed[p] == 1:
                     del hist[p]
@@ -541,8 +569,11 @@
             del pcache[f]
 
     a = hist[base]
-    return [annotateline(*r) for r in zip(a.fctxs, a.linenos, a.skips,
-                                          mdiff.splitnewlines(a.text))]
+    return [
+        annotateline(*r)
+        for r in zip(a.fctxs, a.linenos, a.skips, mdiff.splitnewlines(a.text))
+    ]
+
 
 def toposort(revs, parentsfunc, firstbranch=()):
     """Yield revisions from heads to roots one (topo) branch at a time.
@@ -695,7 +726,7 @@
             #
             # we also update the <parents> set to include the parents of the
             # new nodes.
-            if rev == currentrev: # only display stuff in rev
+            if rev == currentrev:  # only display stuff in rev
                 gr[0].append(rev)
             gr[1].remove(rev)
             parents = [p for p in parentsfunc(rev) if p > node.nullrev]
@@ -742,6 +773,7 @@
         for r in g[0]:
             yield r
 
+
 def headrevs(revs, parentsfn):
     """Resolve the set of heads from a set of revisions.
 
@@ -764,6 +796,7 @@
     headrevs.difference_update(parents)
     return headrevs
 
+
 def headrevssubset(revsfn, parentrevsfn, startrev=None, stoprevs=None):
     """Returns the set of all revs that have no children with control.
 
@@ -800,6 +833,7 @@
 
     return heads
 
+
 def linearize(revs, parentsfn):
     """Linearize and topologically sort a list of revisions.
 
--- a/mercurial/dagparser.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/dagparser.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,9 +15,8 @@
     error,
     pycompat,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
+
 
 def parsedag(desc):
     '''parses a DAG from a concise textual description; generates events
@@ -186,17 +185,17 @@
     chiter = pycompat.iterbytestr(desc)
 
     def nextch():
-        return next(chiter, '\0')
+        return next(chiter, b'\0')
 
     def nextrun(c, allow):
-        s = ''
+        s = b''
         while c in allow:
             s += c
             c = nextch()
         return c, s
 
     def nextdelimited(c, limit, escape):
-        s = ''
+        s = b''
         while c != limit:
             if c == escape:
                 c = nextch()
@@ -205,93 +204,97 @@
         return nextch(), s
 
     def nextstring(c):
-        if c == '"':
-            return nextdelimited(nextch(), '"', '\\')
+        if c == b'"':
+            return nextdelimited(nextch(), b'"', b'\\')
         else:
             return nextrun(c, wordchars)
 
     c = nextch()
-    while c != '\0':
+    while c != b'\0':
         while c in pycompat.bytestr(string.whitespace):
             c = nextch()
-        if c == '.':
-            yield 'n', (r, [p1])
+        if c == b'.':
+            yield b'n', (r, [p1])
             p1 = r
             r += 1
             c = nextch()
-        elif c == '+':
+        elif c == b'+':
             c, digs = nextrun(nextch(), pycompat.bytestr(string.digits))
             n = int(digs)
             for i in pycompat.xrange(0, n):
-                yield 'n', (r, [p1])
+                yield b'n', (r, [p1])
                 p1 = r
                 r += 1
-        elif c in '*/':
-            if c == '*':
+        elif c in b'*/':
+            if c == b'*':
                 c = nextch()
             c, pref = nextstring(c)
             prefs = [pref]
-            while c == '/':
+            while c == b'/':
                 c, pref = nextstring(nextch())
                 prefs.append(pref)
             ps = [resolve(ref) for ref in prefs]
-            yield 'n', (r, ps)
+            yield b'n', (r, ps)
             p1 = r
             r += 1
-        elif c == '<':
+        elif c == b'<':
             c, ref = nextstring(nextch())
             p1 = resolve(ref)
-        elif c == ':':
+        elif c == b':':
             c, name = nextstring(nextch())
             labels[name] = p1
-            yield 'l', (p1, name)
-        elif c == '@':
+            yield b'l', (p1, name)
+        elif c == b'@':
             c, text = nextstring(nextch())
-            yield 'a', text
-        elif c == '!':
+            yield b'a', text
+        elif c == b'!':
             c = nextch()
-            if c == '!':
-                cmd = ''
+            if c == b'!':
+                cmd = b''
                 c = nextch()
-                while c not in '\n\r\0':
+                while c not in b'\n\r\0':
                     cmd += c
                     c = nextch()
-                yield 'C', cmd
+                yield b'C', cmd
             else:
                 c, cmd = nextstring(c)
-                yield 'c', cmd
-        elif c == '#':
-            while c not in '\n\r\0':
+                yield b'c', cmd
+        elif c == b'#':
+            while c not in b'\n\r\0':
                 c = nextch()
-        elif c == '$':
+        elif c == b'$':
             p1 = -1
             c = nextch()
-        elif c == '\0':
-            return # in case it was preceded by whitespace
+        elif c == b'\0':
+            return  # in case it was preceded by whitespace
         else:
-            s = ''
+            s = b''
             i = 0
-            while c != '\0' and i < 10:
+            while c != b'\0' and i < 10:
                 s += c
                 i += 1
                 c = nextch()
-            raise error.Abort(_('invalid character in dag description: '
-                               '%s...') % s)
+            raise error.Abort(
+                _(b'invalid character in dag description: %s...') % s
+            )
+
 
-def dagtextlines(events,
-                 addspaces=True,
-                 wraplabels=False,
-                 wrapannotations=False,
-                 wrapcommands=False,
-                 wrapnonlinear=False,
-                 usedots=False,
-                 maxlinewidth=70):
+def dagtextlines(
+    events,
+    addspaces=True,
+    wraplabels=False,
+    wrapannotations=False,
+    wrapcommands=False,
+    wrapnonlinear=False,
+    usedots=False,
+    maxlinewidth=70,
+):
     '''generates single lines for dagtext()'''
 
     def wrapstring(text):
-        if re.match("^[0-9a-z]*$", text):
+        if re.match(b"^[0-9a-z]*$", text):
             return text
-        return '"' + text.replace('\\', '\\\\').replace('"', '\"') + '"'
+        return b'"' + text.replace(b'\\', b'\\\\').replace(b'"', b'\"') + b'"'
 
     def gen():
         labels = {}
@@ -299,19 +302,24 @@
         wantr = 0
         needroot = False
         for kind, data in events:
-            if kind == 'n':
+            if kind == b'n':
                 r, ps = data
 
                 # sanity check
                 if r != wantr:
-                    raise error.Abort(_("expected id %i, got %i") % (wantr, r))
+                    raise error.Abort(_(b"expected id %i, got %i") % (wantr, r))
                 if not ps:
                     ps = [-1]
                 else:
                     for p in ps:
                         if p >= r:
-                            raise error.Abort(_("parent id %i is larger than "
-                                               "current id %i") % (p, r))
+                            raise error.Abort(
+                                _(
+                                    b"parent id %i is larger than "
+                                    b"current id %i"
+                                )
+                                % (p, r)
+                            )
                 wantr += 1
 
                 # new root?
@@ -319,90 +327,96 @@
                 if len(ps) == 1 and ps[0] == -1:
                     if needroot:
                         if run:
-                            yield '+%d' % run
+                            yield b'+%d' % run
                             run = 0
                         if wrapnonlinear:
-                            yield '\n'
-                        yield '$'
+                            yield b'\n'
+                        yield b'$'
                         p1 = -1
                     else:
                         needroot = True
                 if len(ps) == 1 and ps[0] == p1:
                     if usedots:
-                        yield "."
+                        yield b"."
                     else:
                         run += 1
                 else:
                     if run:
-                        yield '+%d' % run
+                        yield b'+%d' % run
                         run = 0
                     if wrapnonlinear:
-                        yield '\n'
+                        yield b'\n'
                     prefs = []
                     for p in ps:
                         if p == p1:
-                            prefs.append('')
+                            prefs.append(b'')
                         elif p in labels:
                             prefs.append(labels[p])
                         else:
-                            prefs.append('%d' % (r - p))
-                    yield '*' + '/'.join(prefs)
+                            prefs.append(b'%d' % (r - p))
+                    yield b'*' + b'/'.join(prefs)
             else:
                 if run:
-                    yield '+%d' % run
+                    yield b'+%d' % run
                     run = 0
-                if kind == 'l':
+                if kind == b'l':
                     rid, name = data
                     labels[rid] = name
-                    yield ':' + name
+                    yield b':' + name
                     if wraplabels:
-                        yield '\n'
-                elif kind == 'c':
-                    yield '!' + wrapstring(data)
+                        yield b'\n'
+                elif kind == b'c':
+                    yield b'!' + wrapstring(data)
                     if wrapcommands:
-                        yield '\n'
-                elif kind == 'C':
-                    yield '!!' + data
-                    yield '\n'
-                elif kind == 'a':
+                        yield b'\n'
+                elif kind == b'C':
+                    yield b'!!' + data
+                    yield b'\n'
+                elif kind == b'a':
                     if wrapannotations:
-                        yield '\n'
-                    yield '@' + wrapstring(data)
-                elif kind == '#':
-                    yield '#' + data
-                    yield '\n'
+                        yield b'\n'
+                    yield b'@' + wrapstring(data)
+                elif kind == b'#':
+                    yield b'#' + data
+                    yield b'\n'
                 else:
-                    raise error.Abort(_("invalid event type in dag: "
-                                        "('%s', '%s')")
-                                      % (stringutil.escapestr(kind),
-                                         stringutil.escapestr(data)))
+                    raise error.Abort(
+                        _(b"invalid event type in dag: ('%s', '%s')")
+                        % (
+                            stringutil.escapestr(kind),
+                            stringutil.escapestr(data),
+                        )
+                    )
         if run:
-            yield '+%d' % run
+            yield b'+%d' % run
 
-    line = ''
+    line = b''
     for part in gen():
-        if part == '\n':
+        if part == b'\n':
             if line:
                 yield line
-                line = ''
+                line = b''
         else:
             if len(line) + len(part) >= maxlinewidth:
                 yield line
-                line = ''
-            elif addspaces and line and part != '.':
-                line += ' '
+                line = b''
+            elif addspaces and line and part != b'.':
+                line += b' '
             line += part
     if line:
         yield line
 
-def dagtext(dag,
-            addspaces=True,
-            wraplabels=False,
-            wrapannotations=False,
-            wrapcommands=False,
-            wrapnonlinear=False,
-            usedots=False,
-            maxlinewidth=70):
+
+def dagtext(
+    dag,
+    addspaces=True,
+    wraplabels=False,
+    wrapannotations=False,
+    wrapcommands=False,
+    wrapnonlinear=False,
+    usedots=False,
+    maxlinewidth=70,
+):
     '''generates lines of a textual representation for a dag event stream
 
     events should generate what parsedag() does, so:
@@ -480,11 +494,15 @@
         '+1 :f +1 :p2 *f */p2'
 
     '''
-    return "\n".join(dagtextlines(dag,
-                                  addspaces,
-                                  wraplabels,
-                                  wrapannotations,
-                                  wrapcommands,
-                                  wrapnonlinear,
-                                  usedots,
-                                  maxlinewidth))
+    return b"\n".join(
+        dagtextlines(
+            dag,
+            addspaces,
+            wraplabels,
+            wrapannotations,
+            wrapcommands,
+            wrapnonlinear,
+            usedots,
+            maxlinewidth,
+        )
+    )
--- a/mercurial/debugcommands.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/debugcommands.py	Mon Oct 21 11:09:48 2019 -0400
@@ -32,6 +32,10 @@
     nullrev,
     short,
 )
+from .pycompat import (
+    getattr,
+    open,
+)
 from . import (
     bundle2,
     changegroup,
@@ -88,15 +92,14 @@
     stringutil,
 )
 
-from .revlogutils import (
-    deltas as deltautil
-)
+from .revlogutils import deltas as deltautil
 
 release = lockmod.release
 
 command = registrar.command()
 
-@command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
+
+@command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
 def debugancestor(ui, repo, *args):
     """find the ancestor revision of two revisions in a given index"""
     if len(args) == 3:
@@ -105,32 +108,53 @@
         lookup = r.lookup
     elif len(args) == 2:
         if not repo:
-            raise error.Abort(_('there is no Mercurial repository here '
-                                '(.hg not found)'))
+            raise error.Abort(
+                _(b'there is no Mercurial repository here (.hg not found)')
+            )
         rev1, rev2 = args
         r = repo.changelog
         lookup = repo.lookup
     else:
-        raise error.Abort(_('either two or three arguments required'))
+        raise error.Abort(_(b'either two or three arguments required'))
     a = r.ancestor(lookup(rev1), lookup(rev2))
-    ui.write('%d:%s\n' % (r.rev(a), hex(a)))
-
-@command('debugapplystreamclonebundle', [], 'FILE')
+    ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
+
+
+@command(b'debugapplystreamclonebundle', [], b'FILE')
 def debugapplystreamclonebundle(ui, repo, fname):
     """apply a stream clone bundle file"""
     f = hg.openpath(ui, fname)
     gen = exchange.readbundle(ui, f, fname)
     gen.apply(repo)
 
-@command('debugbuilddag',
-    [('m', 'mergeable-file', None, _('add single file mergeable changes')),
-    ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
-    ('n', 'new-file', None, _('add new file at each rev'))],
-    _('[OPTION]... [TEXT]'))
-def debugbuilddag(ui, repo, text=None,
-                  mergeable_file=False,
-                  overwritten_file=False,
-                  new_file=False):
+
+@command(
+    b'debugbuilddag',
+    [
+        (
+            b'm',
+            b'mergeable-file',
+            None,
+            _(b'add single file mergeable changes'),
+        ),
+        (
+            b'o',
+            b'overwritten-file',
+            None,
+            _(b'add single file all revs overwrite'),
+        ),
+        (b'n', b'new-file', None, _(b'add new file at each rev')),
+    ],
+    _(b'[OPTION]... [TEXT]'),
+)
+def debugbuilddag(
+    ui,
+    repo,
+    text=None,
+    mergeable_file=False,
+    overwritten_file=False,
+    new_file=False,
+):
     """builds a repo with a given DAG from scratch in the current empty repo
 
     The description of the DAG is read from stdin if not given on the
@@ -164,38 +188,40 @@
     """
 
     if text is None:
-        ui.status(_("reading DAG from stdin\n"))
+        ui.status(_(b"reading DAG from stdin\n"))
         text = ui.fin.read()
 
     cl = repo.changelog
     if len(cl) > 0:
-        raise error.Abort(_('repository is not empty'))
+        raise error.Abort(_(b'repository is not empty'))
 
     # determine number of revs in DAG
     total = 0
     for type, data in dagparser.parsedag(text):
-        if type == 'n':
+        if type == b'n':
             total += 1
 
     if mergeable_file:
         linesperrev = 2
         # make a file with k lines per rev
-        initialmergedlines = ['%d' % i
-                              for i in pycompat.xrange(0, total * linesperrev)]
-        initialmergedlines.append("")
+        initialmergedlines = [
+            b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
+        ]
+        initialmergedlines.append(b"")
 
     tags = []
-    progress = ui.makeprogress(_('building'), unit=_('revisions'),
-                               total=total)
-    with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"):
+    progress = ui.makeprogress(
+        _(b'building'), unit=_(b'revisions'), total=total
+    )
+    with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
         at = -1
-        atbranch = 'default'
+        atbranch = b'default'
         nodeids = []
         id = 0
         progress.update(id)
         for type, data in dagparser.parsedag(text):
-            if type == 'n':
-                ui.note(('node %s\n' % pycompat.bytestr(data)))
+            if type == b'n':
+                ui.note((b'node %s\n' % pycompat.bytestr(data)))
                 id, ps = data
 
                 files = []
@@ -203,46 +229,48 @@
 
                 p2 = None
                 if mergeable_file:
-                    fn = "mf"
+                    fn = b"mf"
                     p1 = repo[ps[0]]
                     if len(ps) > 1:
                         p2 = repo[ps[1]]
                         pa = p1.ancestor(p2)
-                        base, local, other = [x[fn].data() for x in (pa, p1,
-                                                                     p2)]
+                        base, local, other = [
+                            x[fn].data() for x in (pa, p1, p2)
+                        ]
                         m3 = simplemerge.Merge3Text(base, local, other)
                         ml = [l.strip() for l in m3.merge_lines()]
-                        ml.append("")
+                        ml.append(b"")
                     elif at > 0:
-                        ml = p1[fn].data().split("\n")
+                        ml = p1[fn].data().split(b"\n")
                     else:
                         ml = initialmergedlines
-                    ml[id * linesperrev] += " r%i" % id
-                    mergedtext = "\n".join(ml)
+                    ml[id * linesperrev] += b" r%i" % id
+                    mergedtext = b"\n".join(ml)
                     files.append(fn)
                     filecontent[fn] = mergedtext
 
                 if overwritten_file:
-                    fn = "of"
+                    fn = b"of"
                     files.append(fn)
-                    filecontent[fn] = "r%i\n" % id
+                    filecontent[fn] = b"r%i\n" % id
 
                 if new_file:
-                    fn = "nf%i" % id
+                    fn = b"nf%i" % id
                     files.append(fn)
-                    filecontent[fn] = "r%i\n" % id
+                    filecontent[fn] = b"r%i\n" % id
                     if len(ps) > 1:
                         if not p2:
                             p2 = repo[ps[1]]
                         for fn in p2:
-                            if fn.startswith("nf"):
+                            if fn.startswith(b"nf"):
                                 files.append(fn)
                                 filecontent[fn] = p2[fn].data()
 
                 def fctxfn(repo, cx, path):
                     if path in filecontent:
-                        return context.memfilectx(repo, cx, path,
-                                                  filecontent[path])
+                        return context.memfilectx(
+                            repo, cx, path, filecontent[path]
+                        )
                     return None
 
                 if len(ps) == 0 or ps[0] < 0:
@@ -251,70 +279,89 @@
                     pars = [nodeids[ps[0]], None]
                 else:
                     pars = [nodeids[p] for p in ps]
-                cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
-                                    date=(id, 0),
-                                    user="debugbuilddag",
-                                    extra={'branch': atbranch})
+                cx = context.memctx(
+                    repo,
+                    pars,
+                    b"r%i" % id,
+                    files,
+                    fctxfn,
+                    date=(id, 0),
+                    user=b"debugbuilddag",
+                    extra={b'branch': atbranch},
+                )
                 nodeid = repo.commitctx(cx)
                 nodeids.append(nodeid)
                 at = id
-            elif type == 'l':
+            elif type == b'l':
                 id, name = data
-                ui.note(('tag %s\n' % name))
-                tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
-            elif type == 'a':
-                ui.note(('branch %s\n' % data))
+                ui.note((b'tag %s\n' % name))
+                tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
+            elif type == b'a':
+                ui.note((b'branch %s\n' % data))
                 atbranch = data
             progress.update(id)
 
         if tags:
-            repo.vfs.write("localtags", "".join(tags))
+            repo.vfs.write(b"localtags", b"".join(tags))
+
 
 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
-    indent_string = ' ' * indent
+    indent_string = b' ' * indent
     if all:
-        ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
-                 % indent_string)
+        ui.writenoi18n(
+            b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
+            % indent_string
+        )
 
         def showchunks(named):
-            ui.write("\n%s%s\n" % (indent_string, named))
+            ui.write(b"\n%s%s\n" % (indent_string, named))
             for deltadata in gen.deltaiter():
                 node, p1, p2, cs, deltabase, delta, flags = deltadata
-                ui.write("%s%s %s %s %s %s %d\n" %
-                         (indent_string, hex(node), hex(p1), hex(p2),
-                          hex(cs), hex(deltabase), len(delta)))
+                ui.write(
+                    b"%s%s %s %s %s %s %d\n"
+                    % (
+                        indent_string,
+                        hex(node),
+                        hex(p1),
+                        hex(p2),
+                        hex(cs),
+                        hex(deltabase),
+                        len(delta),
+                    )
+                )
 
         chunkdata = gen.changelogheader()
-        showchunks("changelog")
+        showchunks(b"changelog")
         chunkdata = gen.manifestheader()
-        showchunks("manifest")
+        showchunks(b"manifest")
         for chunkdata in iter(gen.filelogheader, {}):
-            fname = chunkdata['filename']
+            fname = chunkdata[b'filename']
             showchunks(fname)
     else:
         if isinstance(gen, bundle2.unbundle20):
-            raise error.Abort(_('use debugbundle2 for this file'))
+            raise error.Abort(_(b'use debugbundle2 for this file'))
         chunkdata = gen.changelogheader()
         for deltadata in gen.deltaiter():
             node, p1, p2, cs, deltabase, delta, flags = deltadata
-            ui.write("%s%s\n" % (indent_string, hex(node)))
+            ui.write(b"%s%s\n" % (indent_string, hex(node)))
+
 
 def _debugobsmarkers(ui, part, indent=0, **opts):
     """display version and markers contained in 'data'"""
     opts = pycompat.byteskwargs(opts)
     data = part.read()
-    indent_string = ' ' * indent
+    indent_string = b' ' * indent
     try:
         version, markers = obsolete._readmarkers(data)
     except error.UnknownVersion as exc:
-        msg = "%sunsupported version: %s (%d bytes)\n"
+        msg = b"%sunsupported version: %s (%d bytes)\n"
         msg %= indent_string, exc.version, len(data)
         ui.write(msg)
     else:
-        msg = "%sversion: %d (%d bytes)\n"
+        msg = b"%sversion: %d (%d bytes)\n"
         msg %= indent_string, version, len(data)
         ui.write(msg)
-        fm = ui.formatter('debugobsolete', opts)
+        fm = ui.formatter(b'debugobsolete', opts)
         for rawmarker in sorted(markers):
             m = obsutil.marker(None, rawmarker)
             fm.startitem()
@@ -322,56 +369,65 @@
             cmdutil.showmarker(fm, m)
         fm.end()
 
+
 def _debugphaseheads(ui, data, indent=0):
     """display version and markers contained in 'data'"""
-    indent_string = ' ' * indent
+    indent_string = b' ' * indent
     headsbyphase = phases.binarydecode(data)
     for phase in phases.allphases:
         for head in headsbyphase[phase]:
             ui.write(indent_string)
-            ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
+            ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
+
 
 def _quasirepr(thing):
     if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
-        return '{%s}' % (
-            b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
+        return b'{%s}' % (
+            b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
+        )
     return pycompat.bytestr(repr(thing))
 
+
 def _debugbundle2(ui, gen, all=None, **opts):
     """lists the contents of a bundle2"""
     if not isinstance(gen, bundle2.unbundle20):
-        raise error.Abort(_('not a bundle2 file'))
-    ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
+        raise error.Abort(_(b'not a bundle2 file'))
+    ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
     parttypes = opts.get(r'part_type', [])
     for part in gen.iterparts():
         if parttypes and part.type not in parttypes:
             continue
-        msg = '%s -- %s (mandatory: %r)\n'
+        msg = b'%s -- %s (mandatory: %r)\n'
         ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
-        if part.type == 'changegroup':
-            version = part.params.get('version', '01')
-            cg = changegroup.getunbundler(version, part, 'UN')
+        if part.type == b'changegroup':
+            version = part.params.get(b'version', b'01')
+            cg = changegroup.getunbundler(version, part, b'UN')
             if not ui.quiet:
                 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
-        if part.type == 'obsmarkers':
+        if part.type == b'obsmarkers':
             if not ui.quiet:
                 _debugobsmarkers(ui, part, indent=4, **opts)
-        if part.type == 'phase-heads':
+        if part.type == b'phase-heads':
             if not ui.quiet:
                 _debugphaseheads(ui, part, indent=4)
 
-@command('debugbundle',
-        [('a', 'all', None, _('show all details')),
-         ('', 'part-type', [], _('show only the named part type')),
-         ('', 'spec', None, _('print the bundlespec of the bundle'))],
-        _('FILE'),
-        norepo=True)
+
+@command(
+    b'debugbundle',
+    [
+        (b'a', b'all', None, _(b'show all details')),
+        (b'', b'part-type', [], _(b'show only the named part type')),
+        (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
+    ],
+    _(b'FILE'),
+    norepo=True,
+)
 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
     """lists the contents of a bundle"""
     with hg.openpath(ui, bundlepath) as f:
         if spec:
             spec = exchange.getbundlespec(ui, f)
-            ui.write('%s\n' % spec)
+            ui.write(b'%s\n' % spec)
             return
 
         gen = exchange.readbundle(ui, f, bundlepath)
@@ -379,26 +435,26 @@
             return _debugbundle2(ui, gen, all=all, **opts)
         _debugchangegroup(ui, gen, all=all, **opts)
 
-@command('debugcapabilities',
-        [], _('PATH'),
-        norepo=True)
+
+@command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
 def debugcapabilities(ui, path, **opts):
     """lists the capabilities of a remote peer"""
     opts = pycompat.byteskwargs(opts)
     peer = hg.peer(ui, opts, path)
     caps = peer.capabilities()
-    ui.write(('Main capabilities:\n'))
+    ui.writenoi18n(b'Main capabilities:\n')
     for c in sorted(caps):
-        ui.write(('  %s\n') % c)
+        ui.write(b'  %s\n' % c)
     b2caps = bundle2.bundle2caps(peer)
     if b2caps:
-        ui.write(('Bundle2 capabilities:\n'))
-        for key, values in sorted(b2caps.iteritems()):
-            ui.write(('  %s\n') % key)
+        ui.writenoi18n(b'Bundle2 capabilities:\n')
+        for key, values in sorted(pycompat.iteritems(b2caps)):
+            ui.write(b'  %s\n' % key)
             for v in values:
-                ui.write(('    %s\n') % v)
-
-@command('debugcheckstate', [], '')
+                ui.write(b'    %s\n' % v)
+
+
+@command(b'debugcheckstate', [], b'')
 def debugcheckstate(ui, repo):
     """validate the correctness of the current dirstate"""
     parent1, parent2 = repo.dirstate.parents()
@@ -407,69 +463,75 @@
     errors = 0
     for f in repo.dirstate:
         state = repo.dirstate[f]
-        if state in "nr" and f not in m1:
-            ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
+        if state in b"nr" and f not in m1:
+            ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
             errors += 1
-        if state in "a" and f in m1:
-            ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
+        if state in b"a" and f in m1:
+            ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
             errors += 1
-        if state in "m" and f not in m1 and f not in m2:
-            ui.warn(_("%s in state %s, but not in either manifest\n") %
-                    (f, state))
+        if state in b"m" and f not in m1 and f not in m2:
+            ui.warn(
+                _(b"%s in state %s, but not in either manifest\n") % (f, state)
+            )
             errors += 1
     for f in m1:
         state = repo.dirstate[f]
-        if state not in "nrm":
-            ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
+        if state not in b"nrm":
+            ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
             errors += 1
     if errors:
-        error = _(".hg/dirstate inconsistent with current parent's manifest")
+        error = _(b".hg/dirstate inconsistent with current parent's manifest")
         raise error.Abort(error)
 
-@command('debugcolor',
-        [('', 'style', None, _('show all configured styles'))],
-        'hg debugcolor')
+
+@command(
+    b'debugcolor',
+    [(b'', b'style', None, _(b'show all configured styles'))],
+    b'hg debugcolor',
+)
 def debugcolor(ui, repo, **opts):
     """show available color, effects or style"""
-    ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode))
+    ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
     if opts.get(r'style'):
         return _debugdisplaystyle(ui)
     else:
         return _debugdisplaycolor(ui)
 
+
 def _debugdisplaycolor(ui):
     ui = ui.copy()
     ui._styles.clear()
     for effect in color._activeeffects(ui).keys():
         ui._styles[effect] = effect
     if ui._terminfoparams:
-        for k, v in ui.configitems('color'):
-            if k.startswith('color.'):
+        for k, v in ui.configitems(b'color'):
+            if k.startswith(b'color.'):
                 ui._styles[k] = k[6:]
-            elif k.startswith('terminfo.'):
+            elif k.startswith(b'terminfo.'):
                 ui._styles[k] = k[9:]
-    ui.write(_('available colors:\n'))
+    ui.write(_(b'available colors:\n'))
     # sort label with a '_' after the other to group '_background' entry.
-    items = sorted(ui._styles.items(),
-                   key=lambda i: ('_' in i[0], i[0], i[1]))
+    items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
     for colorname, label in items:
-        ui.write(('%s\n') % colorname, label=label)
+        ui.write(b'%s\n' % colorname, label=label)
+
 
 def _debugdisplaystyle(ui):
-    ui.write(_('available style:\n'))
+    ui.write(_(b'available style:\n'))
     if not ui._styles:
         return
     width = max(len(s) for s in ui._styles)
     for label, effects in sorted(ui._styles.items()):
-        ui.write('%s' % label, label=label)
+        ui.write(b'%s' % label, label=label)
         if effects:
             # 50
-            ui.write(': ')
-            ui.write(' ' * (max(0, width - len(label))))
-            ui.write(', '.join(ui.label(e, e) for e in effects.split()))
-        ui.write('\n')
-
-@command('debugcreatestreamclonebundle', [], 'FILE')
+            ui.write(b': ')
+            ui.write(b' ' * (max(0, width - len(label))))
+            ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
+        ui.write(b'\n')
+
+
+@command(b'debugcreatestreamclonebundle', [], b'FILE')
 def debugcreatestreamclonebundle(ui, repo, fname):
     """create a stream clone bundle file
 
@@ -479,21 +541,30 @@
     # TODO we may want to turn this into an abort when this functionality
     # is moved into `hg bundle`.
     if phases.hassecret(repo):
-        ui.warn(_('(warning: stream clone bundle will contain secret '
-                  'revisions)\n'))
+        ui.warn(
+            _(
+                b'(warning: stream clone bundle will contain secret '
+                b'revisions)\n'
+            )
+        )
 
     requirements, gen = streamclone.generatebundlev1(repo)
     changegroup.writechunks(ui, gen, fname)
 
-    ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
-
-@command('debugdag',
-    [('t', 'tags', None, _('use tags as labels')),
-    ('b', 'branches', None, _('annotate with branch names')),
-    ('', 'dots', None, _('use dots for runs')),
-    ('s', 'spaces', None, _('separate elements by spaces'))],
-    _('[OPTION]... [FILE [REV]...]'),
-    optionalrepo=True)
+    ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
+
+
+@command(
+    b'debugdag',
+    [
+        (b't', b'tags', None, _(b'use tags as labels')),
+        (b'b', b'branches', None, _(b'annotate with branch names')),
+        (b'', b'dots', None, _(b'use dots for runs')),
+        (b's', b'spaces', None, _(b'separate elements by spaces')),
+    ],
+    _(b'[OPTION]... [FILE [REV]...]'),
+    optionalrepo=True,
+)
 def debugdag(ui, repo, file_=None, *revs, **opts):
     """format the changelog or an index DAG as a concise textual description
 
@@ -505,15 +576,15 @@
     spaces = opts.get(r'spaces')
     dots = opts.get(r'dots')
     if file_:
-        rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
-                             file_)
+        rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
         revs = set((int(r) for r in revs))
+
         def events():
             for r in rlog:
-                yield 'n', (r, list(p for p in rlog.parentrevs(r)
-                                        if p != -1))
+                yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
                 if r in revs:
-                    yield 'l', (r, "r%i" % r)
+                    yield b'l', (r, b"r%i" % r)
+
     elif repo:
         cl = repo.changelog
         tags = opts.get(r'tags')
@@ -522,70 +593,81 @@
             labels = {}
             for l, n in repo.tags().items():
                 labels.setdefault(cl.rev(n), []).append(l)
+
         def events():
-            b = "default"
+            b = b"default"
             for r in cl:
                 if branches:
-                    newb = cl.read(cl.node(r))[5]['branch']
+                    newb = cl.read(cl.node(r))[5][b'branch']
                     if newb != b:
-                        yield 'a', newb
+                        yield b'a', newb
                         b = newb
-                yield 'n', (r, list(p for p in cl.parentrevs(r)
-                                        if p != -1))
+                yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
                 if tags:
                     ls = labels.get(r)
                     if ls:
                         for l in ls:
-                            yield 'l', (r, l)
+                            yield b'l', (r, l)
+
     else:
-        raise error.Abort(_('need repo for changelog dag'))
-
-    for line in dagparser.dagtextlines(events(),
-                                       addspaces=spaces,
-                                       wraplabels=True,
-                                       wrapannotations=True,
-                                       wrapnonlinear=dots,
-                                       usedots=dots,
-                                       maxlinewidth=70):
+        raise error.Abort(_(b'need repo for changelog dag'))
+
+    for line in dagparser.dagtextlines(
+        events(),
+        addspaces=spaces,
+        wraplabels=True,
+        wrapannotations=True,
+        wrapnonlinear=dots,
+        usedots=dots,
+        maxlinewidth=70,
+    ):
         ui.write(line)
-        ui.write("\n")
-
-@command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
+        ui.write(b"\n")
+
+
+@command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
 def debugdata(ui, repo, file_, rev=None, **opts):
     """dump the contents of a data file revision"""
     opts = pycompat.byteskwargs(opts)
-    if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
+    if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
         if rev is not None:
-            raise error.CommandError('debugdata', _('invalid arguments'))
+            raise error.CommandError(b'debugdata', _(b'invalid arguments'))
         file_, rev = None, file_
     elif rev is None:
-        raise error.CommandError('debugdata', _('invalid arguments'))
-    r = cmdutil.openstorage(repo, 'debugdata', file_, opts)
+        raise error.CommandError(b'debugdata', _(b'invalid arguments'))
+    r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
     try:
-        ui.write(r.revision(r.lookup(rev), raw=True))
+        ui.write(r.rawdata(r.lookup(rev)))
     except KeyError:
-        raise error.Abort(_('invalid revision identifier %s') % rev)
-
-@command('debugdate',
-    [('e', 'extended', None, _('try extended date formats'))],
-    _('[-e] DATE [RANGE]'),
-    norepo=True, optionalrepo=True)
+        raise error.Abort(_(b'invalid revision identifier %s') % rev)
+
+
+@command(
+    b'debugdate',
+    [(b'e', b'extended', None, _(b'try extended date formats'))],
+    _(b'[-e] DATE [RANGE]'),
+    norepo=True,
+    optionalrepo=True,
+)
 def debugdate(ui, date, range=None, **opts):
     """parse and display a date"""
     if opts[r"extended"]:
         d = dateutil.parsedate(date, util.extendeddateformats)
     else:
         d = dateutil.parsedate(date)
-    ui.write(("internal: %d %d\n") % d)
-    ui.write(("standard: %s\n") % dateutil.datestr(d))
+    ui.writenoi18n(b"internal: %d %d\n" % d)
+    ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
     if range:
         m = dateutil.matchdate(range)
-        ui.write(("match: %s\n") % m(d[0]))
-
-@command('debugdeltachain',
+        ui.writenoi18n(b"match: %s\n" % m(d[0]))
+
+
+@command(
+    b'debugdeltachain',
     cmdutil.debugrevlogopts + cmdutil.formatteropts,
-    _('-c|-m|FILE'),
-    optionalrepo=True)
+    _(b'-c|-m|FILE'),
+    optionalrepo=True,
+)
 def debugdeltachain(ui, repo, file_=None, **opts):
     """dump information about delta chains in a revlog
 
@@ -622,7 +704,7 @@
     The sparse read can be enabled with experimental.sparse-read = True
     """
     opts = pycompat.byteskwargs(opts)
-    r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
+    r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
     index = r.index
     start = r.start
     length = r.length
@@ -637,20 +719,20 @@
 
         if generaldelta:
             if e[3] == e[5]:
-                deltatype = 'p1'
+                deltatype = b'p1'
             elif e[3] == e[6]:
-                deltatype = 'p2'
+                deltatype = b'p2'
             elif e[3] == rev - 1:
-                deltatype = 'prev'
+                deltatype = b'prev'
             elif e[3] == rev:
-                deltatype = 'base'
+                deltatype = b'base'
             else:
-                deltatype = 'other'
+                deltatype = b'other'
         else:
             if e[3] == rev:
-                deltatype = 'base'
+                deltatype = b'base'
             else:
-                deltatype = 'prev'
+                deltatype = b'prev'
 
         chain = r._deltachain(rev)[0]
         for iterrev in chain:
@@ -659,14 +741,16 @@
 
         return compsize, uncompsize, deltatype, chain, chainsize
 
-    fm = ui.formatter('debugdeltachain', opts)
-
-    fm.plain('    rev  chain# chainlen     prev   delta       '
-             'size    rawsize  chainsize     ratio   lindist extradist '
-             'extraratio')
+    fm = ui.formatter(b'debugdeltachain', opts)
+
+    fm.plain(
+        b'    rev  chain# chainlen     prev   delta       '
+        b'size    rawsize  chainsize     ratio   lindist extradist '
+        b'extraratio'
+    )
     if withsparseread:
-        fm.plain('   readsize largestblk rddensity srchunks')
-    fm.plain('\n')
+        fm.plain(b'   readsize largestblk rddensity srchunks')
+    fm.plain(b'\n')
 
     chainbases = {}
     for rev in r:
@@ -693,18 +777,36 @@
             extraratio = extradist
 
         fm.startitem()
-        fm.write('rev chainid chainlen prevrev deltatype compsize '
-                 'uncompsize chainsize chainratio lindist extradist '
-                 'extraratio',
-                 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
-                 rev, chainid, len(chain), prevrev, deltatype, comp,
-                 uncomp, chainsize, chainratio, lineardist, extradist,
-                 extraratio,
-                 rev=rev, chainid=chainid, chainlen=len(chain),
-                 prevrev=prevrev, deltatype=deltatype, compsize=comp,
-                 uncompsize=uncomp, chainsize=chainsize,
-                 chainratio=chainratio, lindist=lineardist,
-                 extradist=extradist, extraratio=extraratio)
+        fm.write(
+            b'rev chainid chainlen prevrev deltatype compsize '
+            b'uncompsize chainsize chainratio lindist extradist '
+            b'extraratio',
+            b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
+            rev,
+            chainid,
+            len(chain),
+            prevrev,
+            deltatype,
+            comp,
+            uncomp,
+            chainsize,
+            chainratio,
+            lineardist,
+            extradist,
+            extraratio,
+            rev=rev,
+            chainid=chainid,
+            chainlen=len(chain),
+            prevrev=prevrev,
+            deltatype=deltatype,
+            compsize=comp,
+            uncompsize=uncomp,
+            chainsize=chainsize,
+            chainratio=chainratio,
+            lindist=lineardist,
+            extradist=extradist,
+            extraratio=extraratio,
+        )
         if withsparseread:
             readsize = 0
             largestblock = 0
@@ -724,21 +826,38 @@
             else:
                 readdensity = 1
 
-            fm.write('readsize largestblock readdensity srchunks',
-                     ' %10d %10d %9.5f %8d',
-                     readsize, largestblock, readdensity, srchunks,
-                     readsize=readsize, largestblock=largestblock,
-                     readdensity=readdensity, srchunks=srchunks)
-
-        fm.plain('\n')
+            fm.write(
+                b'readsize largestblock readdensity srchunks',
+                b' %10d %10d %9.5f %8d',
+                readsize,
+                largestblock,
+                readdensity,
+                srchunks,
+                readsize=readsize,
+                largestblock=largestblock,
+                readdensity=readdensity,
+                srchunks=srchunks,
+            )
+
+        fm.plain(b'\n')
 
     fm.end()
 
-@command('debugdirstate|debugstate',
-    [('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')),
-     ('', 'dates', True, _('display the saved mtime')),
-     ('', 'datesort', None, _('sort by saved mtime'))],
-    _('[OPTION]...'))
+
+@command(
+    b'debugdirstate|debugstate',
+    [
+        (
+            b'',
+            b'nodates',
+            None,
+            _(b'do not display the saved mtime (DEPRECATED)'),
+        ),
+        (b'', b'dates', True, _(b'display the saved mtime')),
+        (b'', b'datesort', None, _(b'sort by saved mtime')),
+    ],
+    _(b'[OPTION]...'),
+)
 def debugstate(ui, repo, **opts):
     """show the contents of the current dirstate"""
 
@@ -748,74 +867,89 @@
     datesort = opts.get(r'datesort')
 
     if datesort:
-        keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
+        keyfunc = lambda x: (x[1][3], x[0])  # sort by mtime, then by filename
     else:
-        keyfunc = None # sort by filename
-    for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
+        keyfunc = None  # sort by filename
+    for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
         if ent[3] == -1:
-            timestr = 'unset               '
+            timestr = b'unset               '
         elif nodates:
-            timestr = 'set                 '
+            timestr = b'set                 '
         else:
-            timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
-                                    time.localtime(ent[3]))
+            timestr = time.strftime(
+                r"%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
+            )
             timestr = encoding.strtolocal(timestr)
         if ent[1] & 0o20000:
-            mode = 'lnk'
+            mode = b'lnk'
         else:
-            mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
-        ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
+            mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
+        ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
     for f in repo.dirstate.copies():
-        ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
-
-@command('debugdiscovery',
-    [('', 'old', None, _('use old-style discovery')),
-    ('', 'nonheads', None,
-     _('use old-style discovery with non-heads included')),
-    ('', 'rev', [], 'restrict discovery to this set of revs'),
-    ('', 'seed', '12323', 'specify the random seed use for discovery'),
-    ] + cmdutil.remoteopts,
-    _('[--rev REV] [OTHER]'))
-def debugdiscovery(ui, repo, remoteurl="default", **opts):
+        ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
+
+
+@command(
+    b'debugdiscovery',
+    [
+        (b'', b'old', None, _(b'use old-style discovery')),
+        (
+            b'',
+            b'nonheads',
+            None,
+            _(b'use old-style discovery with non-heads included'),
+        ),
+        (b'', b'rev', [], b'restrict discovery to this set of revs'),
+        (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
+    ]
+    + cmdutil.remoteopts,
+    _(b'[--rev REV] [OTHER]'),
+)
+def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
     """runs the changeset discovery protocol in isolation"""
     opts = pycompat.byteskwargs(opts)
     remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
     remote = hg.peer(repo, opts, remoteurl)
-    ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
+    ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
 
     # make sure tests are repeatable
-    random.seed(int(opts['seed']))
-
-
-
-    if opts.get('old'):
+    random.seed(int(opts[b'seed']))
+
+    if opts.get(b'old'):
+
         def doit(pushedrevs, remoteheads, remote=remote):
-            if not util.safehasattr(remote, 'branches'):
+            if not util.safehasattr(remote, b'branches'):
                 # enable in-client legacy support
                 remote = localrepo.locallegacypeer(remote.local())
-            common, _in, hds = treediscovery.findcommonincoming(repo, remote,
-                                                                force=True)
+            common, _in, hds = treediscovery.findcommonincoming(
+                repo, remote, force=True
+            )
             common = set(common)
-            if not opts.get('nonheads'):
-                ui.write(("unpruned common: %s\n") %
-                         " ".join(sorted(short(n) for n in common)))
+            if not opts.get(b'nonheads'):
+                ui.writenoi18n(
+                    b"unpruned common: %s\n"
+                    % b" ".join(sorted(short(n) for n in common))
+                )
 
                 clnode = repo.changelog.node
-                common = repo.revs('heads(::%ln)', common)
+                common = repo.revs(b'heads(::%ln)', common)
                 common = {clnode(r) for r in common}
             return common, hds
+
     else:
+
         def doit(pushedrevs, remoteheads, remote=remote):
             nodes = None
             if pushedrevs:
                 revs = scmutil.revrange(repo, pushedrevs)
                 nodes = [repo[r].node() for r in revs]
-            common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
-                                                            ancestorsof=nodes)
+            common, any, hds = setdiscovery.findcommonheads(
+                ui, repo, remote, ancestorsof=nodes
+            )
             return common, hds
 
     remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
-    localrevs = opts['rev']
+    localrevs = opts[b'rev']
     with util.timedcm('debug-discovery') as t:
         common, hds = doit(localrevs, remoterevs)
 
@@ -825,47 +959,48 @@
     lheads = set(repo.heads())
 
     data = {}
-    data['elapsed'] = t.elapsed
-    data['nb-common'] = len(common)
-    data['nb-common-local'] = len(common & lheads)
-    data['nb-common-remote'] = len(common & rheads)
-    data['nb-common-both'] = len(common & rheads & lheads)
-    data['nb-local'] = len(lheads)
-    data['nb-local-missing'] = data['nb-local'] - data['nb-common-local']
-    data['nb-remote'] = len(rheads)
-    data['nb-remote-unknown'] = data['nb-remote'] - data['nb-common-remote']
-    data['nb-revs'] = len(repo.revs('all()'))
-    data['nb-revs-common'] = len(repo.revs('::%ln', common))
-    data['nb-revs-missing'] = data['nb-revs'] - data['nb-revs-common']
+    data[b'elapsed'] = t.elapsed
+    data[b'nb-common'] = len(common)
+    data[b'nb-common-local'] = len(common & lheads)
+    data[b'nb-common-remote'] = len(common & rheads)
+    data[b'nb-common-both'] = len(common & rheads & lheads)
+    data[b'nb-local'] = len(lheads)
+    data[b'nb-local-missing'] = data[b'nb-local'] - data[b'nb-common-local']
+    data[b'nb-remote'] = len(rheads)
+    data[b'nb-remote-unknown'] = data[b'nb-remote'] - data[b'nb-common-remote']
+    data[b'nb-revs'] = len(repo.revs(b'all()'))
+    data[b'nb-revs-common'] = len(repo.revs(b'::%ln', common))
+    data[b'nb-revs-missing'] = data[b'nb-revs'] - data[b'nb-revs-common']
 
     # display discovery summary
-    ui.write(("elapsed time:  %(elapsed)f seconds\n") % data)
-    ui.write(("heads summary:\n"))
-    ui.write(("  total common heads:  %(nb-common)9d\n") % data)
-    ui.write(("    also local heads:  %(nb-common-local)9d\n") % data)
-    ui.write(("    also remote heads: %(nb-common-remote)9d\n") % data)
-    ui.write(("    both:              %(nb-common-both)9d\n") % data)
-    ui.write(("  local heads:         %(nb-local)9d\n") % data)
-    ui.write(("    common:            %(nb-common-local)9d\n") % data)
-    ui.write(("    missing:           %(nb-local-missing)9d\n") % data)
-    ui.write(("  remote heads:        %(nb-remote)9d\n") % data)
-    ui.write(("    common:            %(nb-common-remote)9d\n") % data)
-    ui.write(("    unknown:           %(nb-remote-unknown)9d\n") % data)
-    ui.write(("local changesets:      %(nb-revs)9d\n") % data)
-    ui.write(("  common:              %(nb-revs-common)9d\n") % data)
-    ui.write(("  missing:             %(nb-revs-missing)9d\n") % data)
+    ui.writenoi18n(b"elapsed time:  %(elapsed)f seconds\n" % data)
+    ui.writenoi18n(b"heads summary:\n")
+    ui.writenoi18n(b"  total common heads:  %(nb-common)9d\n" % data)
+    ui.writenoi18n(b"    also local heads:  %(nb-common-local)9d\n" % data)
+    ui.writenoi18n(b"    also remote heads: %(nb-common-remote)9d\n" % data)
+    ui.writenoi18n(b"    both:              %(nb-common-both)9d\n" % data)
+    ui.writenoi18n(b"  local heads:         %(nb-local)9d\n" % data)
+    ui.writenoi18n(b"    common:            %(nb-common-local)9d\n" % data)
+    ui.writenoi18n(b"    missing:           %(nb-local-missing)9d\n" % data)
+    ui.writenoi18n(b"  remote heads:        %(nb-remote)9d\n" % data)
+    ui.writenoi18n(b"    common:            %(nb-common-remote)9d\n" % data)
+    ui.writenoi18n(b"    unknown:           %(nb-remote-unknown)9d\n" % data)
+    ui.writenoi18n(b"local changesets:      %(nb-revs)9d\n" % data)
+    ui.writenoi18n(b"  common:              %(nb-revs-common)9d\n" % data)
+    ui.writenoi18n(b"  missing:             %(nb-revs-missing)9d\n" % data)
 
     if ui.verbose:
-        ui.write(("common heads: %s\n") %
-                 " ".join(sorted(short(n) for n in common)))
+        ui.writenoi18n(
+            b"common heads: %s\n" % b" ".join(sorted(short(n) for n in common))
+        )
+
 
 _chunksize = 4 << 10
 
-@command('debugdownload',
-    [
-        ('o', 'output', '', _('path')),
-    ],
-    optionalrepo=True)
+
+@command(
+    b'debugdownload', [(b'o', b'output', b'', _(b'path')),], optionalrepo=True
+)
 def debugdownload(ui, repo, url, output=None, **opts):
     """download a resource using Mercurial logic and config
     """
@@ -873,7 +1008,7 @@
 
     dest = ui
     if output:
-        dest = open(output, "wb", _chunksize)
+        dest = open(output, b"wb", _chunksize)
     try:
         data = fh.read(_chunksize)
         while data:
@@ -883,121 +1018,163 @@
         if output:
             dest.close()
 
-@command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
+
+@command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
 def debugextensions(ui, repo, **opts):
     '''show information about active extensions'''
     opts = pycompat.byteskwargs(opts)
     exts = extensions.extensions(ui)
     hgver = util.version()
-    fm = ui.formatter('debugextensions', opts)
+    fm = ui.formatter(b'debugextensions', opts)
     for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
         isinternal = extensions.ismoduleinternal(extmod)
         extsource = pycompat.fsencode(extmod.__file__)
         if isinternal:
             exttestedwith = []  # never expose magic string to users
         else:
-            exttestedwith = getattr(extmod, 'testedwith', '').split()
+            exttestedwith = getattr(extmod, 'testedwith', b'').split()
         extbuglink = getattr(extmod, 'buglink', None)
 
         fm.startitem()
 
         if ui.quiet or ui.verbose:
-            fm.write('name', '%s\n', extname)
+            fm.write(b'name', b'%s\n', extname)
         else:
-            fm.write('name', '%s', extname)
+            fm.write(b'name', b'%s', extname)
             if isinternal or hgver in exttestedwith:
-                fm.plain('\n')
+                fm.plain(b'\n')
             elif not exttestedwith:
-                fm.plain(_(' (untested!)\n'))
+                fm.plain(_(b' (untested!)\n'))
             else:
                 lasttestedversion = exttestedwith[-1]
-                fm.plain(' (%s!)\n' % lasttestedversion)
-
-        fm.condwrite(ui.verbose and extsource, 'source',
-                 _('  location: %s\n'), extsource or "")
+                fm.plain(b' (%s!)\n' % lasttestedversion)
+
+        fm.condwrite(
+            ui.verbose and extsource,
+            b'source',
+            _(b'  location: %s\n'),
+            extsource or b"",
+        )
 
         if ui.verbose:
-            fm.plain(_('  bundled: %s\n') % ['no', 'yes'][isinternal])
+            fm.plain(_(b'  bundled: %s\n') % [b'no', b'yes'][isinternal])
         fm.data(bundled=isinternal)
 
-        fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
-                     _('  tested with: %s\n'),
-                     fm.formatlist(exttestedwith, name='ver'))
-
-        fm.condwrite(ui.verbose and extbuglink, 'buglink',
-                 _('  bug reporting: %s\n'), extbuglink or "")
+        fm.condwrite(
+            ui.verbose and exttestedwith,
+            b'testedwith',
+            _(b'  tested with: %s\n'),
+            fm.formatlist(exttestedwith, name=b'ver'),
+        )
+
+        fm.condwrite(
+            ui.verbose and extbuglink,
+            b'buglink',
+            _(b'  bug reporting: %s\n'),
+            extbuglink or b"",
+        )
 
     fm.end()
 
-@command('debugfileset',
-    [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
-     ('', 'all-files', False,
-      _('test files from all revisions and working directory')),
-     ('s', 'show-matcher', None,
-      _('print internal representation of matcher')),
-     ('p', 'show-stage', [],
-      _('print parsed tree at the given stage'), _('NAME'))],
-    _('[-r REV] [--all-files] [OPTION]... FILESPEC'))
+
+@command(
+    b'debugfileset',
+    [
+        (
+            b'r',
+            b'rev',
+            b'',
+            _(b'apply the filespec on this revision'),
+            _(b'REV'),
+        ),
+        (
+            b'',
+            b'all-files',
+            False,
+            _(b'test files from all revisions and working directory'),
+        ),
+        (
+            b's',
+            b'show-matcher',
+            None,
+            _(b'print internal representation of matcher'),
+        ),
+        (
+            b'p',
+            b'show-stage',
+            [],
+            _(b'print parsed tree at the given stage'),
+            _(b'NAME'),
+        ),
+    ],
+    _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
+)
 def debugfileset(ui, repo, expr, **opts):
     '''parse and apply a fileset specification'''
     from . import fileset
-    fileset.symbols # force import of fileset so we have predicates to optimize
+
+    fileset.symbols  # force import of fileset so we have predicates to optimize
     opts = pycompat.byteskwargs(opts)
-    ctx = scmutil.revsingle(repo, opts.get('rev'), None)
+    ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
 
     stages = [
-        ('parsed', pycompat.identity),
-        ('analyzed', filesetlang.analyze),
-        ('optimized', filesetlang.optimize),
+        (b'parsed', pycompat.identity),
+        (b'analyzed', filesetlang.analyze),
+        (b'optimized', filesetlang.optimize),
     ]
     stagenames = set(n for n, f in stages)
 
     showalways = set()
-    if ui.verbose and not opts['show_stage']:
+    if ui.verbose and not opts[b'show_stage']:
         # show parsed tree by --verbose (deprecated)
-        showalways.add('parsed')
-    if opts['show_stage'] == ['all']:
+        showalways.add(b'parsed')
+    if opts[b'show_stage'] == [b'all']:
         showalways.update(stagenames)
     else:
-        for n in opts['show_stage']:
+        for n in opts[b'show_stage']:
             if n not in stagenames:
-                raise error.Abort(_('invalid stage name: %s') % n)
-        showalways.update(opts['show_stage'])
+                raise error.Abort(_(b'invalid stage name: %s') % n)
+        showalways.update(opts[b'show_stage'])
 
     tree = filesetlang.parse(expr)
     for n, f in stages:
         tree = f(tree)
         if n in showalways:
-            if opts['show_stage'] or n != 'parsed':
-                ui.write(("* %s:\n") % n)
-            ui.write(filesetlang.prettyformat(tree), "\n")
+            if opts[b'show_stage'] or n != b'parsed':
+                ui.write(b"* %s:\n" % n)
+            ui.write(filesetlang.prettyformat(tree), b"\n")
 
     files = set()
-    if opts['all_files']:
+    if opts[b'all_files']:
         for r in repo:
             c = repo[r]
             files.update(c.files())
             files.update(c.substate)
-    if opts['all_files'] or ctx.rev() is None:
+    if opts[b'all_files'] or ctx.rev() is None:
         wctx = repo[None]
-        files.update(repo.dirstate.walk(scmutil.matchall(repo),
-                                        subrepos=list(wctx.substate),
-                                        unknown=True, ignored=True))
+        files.update(
+            repo.dirstate.walk(
+                scmutil.matchall(repo),
+                subrepos=list(wctx.substate),
+                unknown=True,
+                ignored=True,
+            )
+        )
         files.update(wctx.substate)
     else:
         files.update(ctx.files())
         files.update(ctx.substate)
 
     m = ctx.matchfileset(expr)
-    if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
-        ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
+    if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
+        ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
     for f in sorted(files):
         if not m(f):
             continue
-        ui.write("%s\n" % f)
-
-@command('debugformat',
-         [] + cmdutil.formatteropts)
+        ui.write(b"%s\n" % f)
+
+
+@command(b'debugformat', [] + cmdutil.formatteropts)
 def debugformat(ui, repo, **opts):
     """display format information about the current repository
 
@@ -1005,82 +1182,110 @@
     Mercurial default."""
     opts = pycompat.byteskwargs(opts)
     maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
-    maxvariantlength = max(len('format-variant'), maxvariantlength)
+    maxvariantlength = max(len(b'format-variant'), maxvariantlength)
 
     def makeformatname(name):
-        return '%s:' + (' ' * (maxvariantlength - len(name)))
-
-    fm = ui.formatter('debugformat', opts)
+        return b'%s:' + (b' ' * (maxvariantlength - len(name)))
+
+    fm = ui.formatter(b'debugformat', opts)
     if fm.isplain():
+
         def formatvalue(value):
-            if util.safehasattr(value, 'startswith'):
+            if util.safehasattr(value, b'startswith'):
                 return value
             if value:
-                return 'yes'
+                return b'yes'
             else:
-                return 'no'
+                return b'no'
+
     else:
         formatvalue = pycompat.identity
 
-    fm.plain('format-variant')
-    fm.plain(' ' * (maxvariantlength - len('format-variant')))
-    fm.plain(' repo')
+    fm.plain(b'format-variant')
+    fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
+    fm.plain(b' repo')
     if ui.verbose:
-        fm.plain(' config default')
-    fm.plain('\n')
+        fm.plain(b' config default')
+    fm.plain(b'\n')
     for fv in upgrade.allformatvariant:
         fm.startitem()
         repovalue = fv.fromrepo(repo)
         configvalue = fv.fromconfig(repo)
 
         if repovalue != configvalue:
-            namelabel = 'formatvariant.name.mismatchconfig'
-            repolabel = 'formatvariant.repo.mismatchconfig'
+            namelabel = b'formatvariant.name.mismatchconfig'
+            repolabel = b'formatvariant.repo.mismatchconfig'
         elif repovalue != fv.default:
-            namelabel = 'formatvariant.name.mismatchdefault'
-            repolabel = 'formatvariant.repo.mismatchdefault'
+            namelabel = b'formatvariant.name.mismatchdefault'
+            repolabel = b'formatvariant.repo.mismatchdefault'
         else:
-            namelabel = 'formatvariant.name.uptodate'
-            repolabel = 'formatvariant.repo.uptodate'
-
-        fm.write('name', makeformatname(fv.name), fv.name,
-                 label=namelabel)
-        fm.write('repo', ' %3s', formatvalue(repovalue),
-                 label=repolabel)
+            namelabel = b'formatvariant.name.uptodate'
+            repolabel = b'formatvariant.repo.uptodate'
+
+        fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
+        fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
         if fv.default != configvalue:
-            configlabel = 'formatvariant.config.special'
+            configlabel = b'formatvariant.config.special'
         else:
-            configlabel = 'formatvariant.config.default'
-        fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
-                     label=configlabel)
-        fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
-                     label='formatvariant.default')
-        fm.plain('\n')
+            configlabel = b'formatvariant.config.default'
+        fm.condwrite(
+            ui.verbose,
+            b'config',
+            b' %6s',
+            formatvalue(configvalue),
+            label=configlabel,
+        )
+        fm.condwrite(
+            ui.verbose,
+            b'default',
+            b' %7s',
+            formatvalue(fv.default),
+            label=b'formatvariant.default',
+        )
+        fm.plain(b'\n')
     fm.end()
 
-@command('debugfsinfo', [], _('[PATH]'), norepo=True)
-def debugfsinfo(ui, path="."):
+
+@command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
+def debugfsinfo(ui, path=b"."):
     """show information detected about current filesystem"""
-    ui.write(('path: %s\n') % path)
-    ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
-    ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
-    ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
-    ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
-    ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
-    casesensitive = '(unknown)'
+    ui.writenoi18n(b'path: %s\n' % path)
+    ui.writenoi18n(
+        b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
+    )
+    ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
+    ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
+    ui.writenoi18n(
+        b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
+    )
+    ui.writenoi18n(
+        b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
+    )
+    casesensitive = b'(unknown)'
     try:
-        with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f:
-            casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
+        with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
+            casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
     except OSError:
         pass
-    ui.write(('case-sensitive: %s\n') % casesensitive)
-
-@command('debuggetbundle',
-    [('H', 'head', [], _('id of head node'), _('ID')),
-    ('C', 'common', [], _('id of common node'), _('ID')),
-    ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
-    _('REPO FILE [-H|-C ID]...'),
-    norepo=True)
+    ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
+
+
+@command(
+    b'debuggetbundle',
+    [
+        (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
+        (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
+        (
+            b't',
+            b'type',
+            b'bzip2',
+            _(b'bundle compression type to use'),
+            _(b'TYPE'),
+        ),
+    ],
+    _(b'REPO FILE [-H|-C ID]...'),
+    norepo=True,
+)
 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
     """retrieves a bundle from a repo
 
@@ -1089,8 +1294,8 @@
     """
     opts = pycompat.byteskwargs(opts)
     repo = hg.peer(ui, opts, repopath)
-    if not repo.capable('getbundle'):
-        raise error.Abort("getbundle() not supported by target repository")
+    if not repo.capable(b'getbundle'):
+        raise error.Abort(b"getbundle() not supported by target repository")
     args = {}
     if common:
         args[r'common'] = [bin(s) for s in common]
@@ -1098,19 +1303,22 @@
         args[r'heads'] = [bin(s) for s in head]
     # TODO: get desired bundlecaps from command line.
     args[r'bundlecaps'] = None
-    bundle = repo.getbundle('debug', **args)
-
-    bundletype = opts.get('type', 'bzip2').lower()
-    btypes = {'none': 'HG10UN',
-              'bzip2': 'HG10BZ',
-              'gzip': 'HG10GZ',
-              'bundle2': 'HG20'}
+    bundle = repo.getbundle(b'debug', **args)
+
+    bundletype = opts.get(b'type', b'bzip2').lower()
+    btypes = {
+        b'none': b'HG10UN',
+        b'bzip2': b'HG10BZ',
+        b'gzip': b'HG10GZ',
+        b'bundle2': b'HG20',
+    }
     bundletype = btypes.get(bundletype)
     if bundletype not in bundle2.bundletypes:
-        raise error.Abort(_('unknown bundle type specified with --type'))
+        raise error.Abort(_(b'unknown bundle type specified with --type'))
     bundle2.writebundle(ui, bundle, bundlepath, bundletype)
 
-@command('debugignore', [], '[FILE]')
+
+@command(b'debugignore', [], b'[FILE]')
 def debugignore(ui, repo, *files, **opts):
     """display the combined ignore pattern and information about ignored files
 
@@ -1122,7 +1330,7 @@
     ignore = repo.dirstate._ignore
     if not files:
         # Show all the patterns
-        ui.write("%s\n" % pycompat.byterepr(ignore))
+        ui.write(b"%s\n" % pycompat.byterepr(ignore))
     else:
         m = scmutil.match(repo[None], pats=files)
         uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
@@ -1130,7 +1338,7 @@
             nf = util.normpath(f)
             ignored = None
             ignoredata = None
-            if nf != '.':
+            if nf != b'.':
                 if ignore(nf):
                     ignored = nf
                     ignoredata = repo.dirstate._ignorefileandline(nf)
@@ -1142,23 +1350,33 @@
                             break
             if ignored:
                 if ignored == nf:
-                    ui.write(_("%s is ignored\n") % uipathfn(f))
+                    ui.write(_(b"%s is ignored\n") % uipathfn(f))
                 else:
-                    ui.write(_("%s is ignored because of "
-                               "containing directory %s\n")
-                             % (uipathfn(f), ignored))
+                    ui.write(
+                        _(
+                            b"%s is ignored because of "
+                            b"containing directory %s\n"
+                        )
+                        % (uipathfn(f), ignored)
+                    )
                 ignorefile, lineno, line = ignoredata
-                ui.write(_("(ignore rule in %s, line %d: '%s')\n")
-                         % (ignorefile, lineno, line))
+                ui.write(
+                    _(b"(ignore rule in %s, line %d: '%s')\n")
+                    % (ignorefile, lineno, line)
+                )
             else:
-                ui.write(_("%s is not ignored\n") % uipathfn(f))
-
-@command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts,
-         _('-c|-m|FILE'))
+                ui.write(_(b"%s is not ignored\n") % uipathfn(f))
+
+
+@command(
+    b'debugindex',
+    cmdutil.debugrevlogopts + cmdutil.formatteropts,
+    _(b'-c|-m|FILE'),
+)
 def debugindex(ui, repo, file_=None, **opts):
     """dump index data for a storage primitive"""
     opts = pycompat.byteskwargs(opts)
-    store = cmdutil.openstorage(repo, 'debugindex', file_, opts)
+    store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
 
     if ui.debugflag:
         shortfn = hex
@@ -1170,10 +1388,11 @@
         idlen = len(shortfn(store.node(i)))
         break
 
-    fm = ui.formatter('debugindex', opts)
-    fm.plain(b'   rev linkrev %s %s p2\n' % (
-        b'nodeid'.ljust(idlen),
-        b'p1'.ljust(idlen)))
+    fm = ui.formatter(b'debugindex', opts)
+    fm.plain(
+        b'   rev linkrev %s %s p2\n'
+        % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
+    )
 
     for rev in store:
         node = store.node(rev)
@@ -1181,40 +1400,47 @@
 
         fm.startitem()
         fm.write(b'rev', b'%6d ', rev)
-        fm.write(b'linkrev', '%7d ', store.linkrev(rev))
-        fm.write(b'node', '%s ', shortfn(node))
-        fm.write(b'p1', '%s ', shortfn(parents[0]))
-        fm.write(b'p2', '%s', shortfn(parents[1]))
+        fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
+        fm.write(b'node', b'%s ', shortfn(node))
+        fm.write(b'p1', b'%s ', shortfn(parents[0]))
+        fm.write(b'p2', b'%s', shortfn(parents[1]))
         fm.plain(b'\n')
 
     fm.end()
 
-@command('debugindexdot', cmdutil.debugrevlogopts,
-    _('-c|-m|FILE'), optionalrepo=True)
+
+@command(
+    b'debugindexdot',
+    cmdutil.debugrevlogopts,
+    _(b'-c|-m|FILE'),
+    optionalrepo=True,
+)
 def debugindexdot(ui, repo, file_=None, **opts):
     """dump an index DAG as a graphviz dot file"""
     opts = pycompat.byteskwargs(opts)
-    r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts)
-    ui.write(("digraph G {\n"))
+    r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
+    ui.writenoi18n(b"digraph G {\n")
     for i in r:
         node = r.node(i)
         pp = r.parents(node)
-        ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
+        ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
         if pp[1] != nullid:
-            ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
-    ui.write("}\n")
-
-@command('debugindexstats', [])
+            ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
+    ui.write(b"}\n")
+
+
+@command(b'debugindexstats', [])
 def debugindexstats(ui, repo):
     """show stats related to the changelog index"""
     repo.changelog.shortest(nullid, 1)
     index = repo.changelog.index
-    if not util.safehasattr(index, 'stats'):
-        raise error.Abort(_('debugindexstats only works with native code'))
+    if not util.safehasattr(index, b'stats'):
+        raise error.Abort(_(b'debugindexstats only works with native code'))
     for k, v in sorted(index.stats().items()):
-        ui.write('%s: %d\n' % (k, v))
-
-@command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
+        ui.write(b'%s: %d\n' % (k, v))
+
+
+@command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
 def debuginstall(ui, **opts):
     '''test Mercurial installation
 
@@ -1224,63 +1450,94 @@
 
     problems = 0
 
-    fm = ui.formatter('debuginstall', opts)
+    fm = ui.formatter(b'debuginstall', opts)
     fm.startitem()
 
     # encoding
-    fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
+    fm.write(b'encoding', _(b"checking encoding (%s)...\n"), encoding.encoding)
     err = None
     try:
         codecs.lookup(pycompat.sysstr(encoding.encoding))
     except LookupError as inst:
         err = stringutil.forcebytestr(inst)
         problems += 1
-    fm.condwrite(err, 'encodingerror', _(" %s\n"
-                 " (check that your locale is properly set)\n"), err)
+    fm.condwrite(
+        err,
+        b'encodingerror',
+        _(b" %s\n (check that your locale is properly set)\n"),
+        err,
+    )
 
     # Python
-    fm.write('pythonexe', _("checking Python executable (%s)\n"),
-             pycompat.sysexecutable or _("unknown"))
-    fm.write('pythonver', _("checking Python version (%s)\n"),
-             ("%d.%d.%d" % sys.version_info[:3]))
-    fm.write('pythonlib', _("checking Python lib (%s)...\n"),
-             os.path.dirname(pycompat.fsencode(os.__file__)))
+    fm.write(
+        b'pythonexe',
+        _(b"checking Python executable (%s)\n"),
+        pycompat.sysexecutable or _(b"unknown"),
+    )
+    fm.write(
+        b'pythonver',
+        _(b"checking Python version (%s)\n"),
+        (b"%d.%d.%d" % sys.version_info[:3]),
+    )
+    fm.write(
+        b'pythonlib',
+        _(b"checking Python lib (%s)...\n"),
+        os.path.dirname(pycompat.fsencode(os.__file__)),
+    )
 
     security = set(sslutil.supportedprotocols)
     if sslutil.hassni:
-        security.add('sni')
-
-    fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
-             fm.formatlist(sorted(security), name='protocol',
-                           fmt='%s', sep=','))
+        security.add(b'sni')
+
+    fm.write(
+        b'pythonsecurity',
+        _(b"checking Python security support (%s)\n"),
+        fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
+    )
 
     # These are warnings, not errors. So don't increment problem count. This
     # may change in the future.
-    if 'tls1.2' not in security:
-        fm.plain(_('  TLS 1.2 not supported by Python install; '
-                   'network connections lack modern security\n'))
-    if 'sni' not in security:
-        fm.plain(_('  SNI not supported by Python install; may have '
-                   'connectivity issues with some servers\n'))
+    if b'tls1.2' not in security:
+        fm.plain(
+            _(
+                b'  TLS 1.2 not supported by Python install; '
+                b'network connections lack modern security\n'
+            )
+        )
+    if b'sni' not in security:
+        fm.plain(
+            _(
+                b'  SNI not supported by Python install; may have '
+                b'connectivity issues with some servers\n'
+            )
+        )
 
     # TODO print CA cert info
 
     # hg version
     hgver = util.version()
-    fm.write('hgver', _("checking Mercurial version (%s)\n"),
-             hgver.split('+')[0])
-    fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
-             '+'.join(hgver.split('+')[1:]))
+    fm.write(
+        b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
+    )
+    fm.write(
+        b'hgverextra',
+        _(b"checking Mercurial custom build (%s)\n"),
+        b'+'.join(hgver.split(b'+')[1:]),
+    )
 
     # compiled modules
-    fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
-             policy.policy)
-    fm.write('hgmodules', _("checking installed modules (%s)...\n"),
-             os.path.dirname(pycompat.fsencode(__file__)))
-
-    rustandc = policy.policy in ('rust+c', 'rust+c-allow')
+    fm.write(
+        b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
+    )
+    fm.write(
+        b'hgmodules',
+        _(b"checking installed modules (%s)...\n"),
+        os.path.dirname(pycompat.fsencode(__file__)),
+    )
+
+    rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
     rustext = rustandc  # for now, that's the only case
-    cext = policy.policy in ('c', 'allow') or rustandc
+    cext = policy.policy in (b'c', b'allow') or rustandc
     nopure = cext or rustext
     if nopure:
         err = None
@@ -1292,6 +1549,7 @@
                     mpatch,
                     osutil,
                 )
+
                 # quiet pyflakes
                 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
             if rustext:
@@ -1299,40 +1557,62 @@
                     ancestor,
                     dirstate,
                 )
-                dir(ancestor), dir(dirstate) # quiet pyflakes
+
+                dir(ancestor), dir(dirstate)  # quiet pyflakes
         except Exception as inst:
             err = stringutil.forcebytestr(inst)
             problems += 1
-        fm.condwrite(err, 'extensionserror', " %s\n", err)
+        fm.condwrite(err, b'extensionserror', b" %s\n", err)
 
     compengines = util.compengines._engines.values()
-    fm.write('compengines', _('checking registered compression engines (%s)\n'),
-             fm.formatlist(sorted(e.name() for e in compengines),
-                           name='compengine', fmt='%s', sep=', '))
-    fm.write('compenginesavail', _('checking available compression engines '
-                                   '(%s)\n'),
-             fm.formatlist(sorted(e.name() for e in compengines
-                                  if e.available()),
-                           name='compengine', fmt='%s', sep=', '))
+    fm.write(
+        b'compengines',
+        _(b'checking registered compression engines (%s)\n'),
+        fm.formatlist(
+            sorted(e.name() for e in compengines),
+            name=b'compengine',
+            fmt=b'%s',
+            sep=b', ',
+        ),
+    )
+    fm.write(
+        b'compenginesavail',
+        _(b'checking available compression engines (%s)\n'),
+        fm.formatlist(
+            sorted(e.name() for e in compengines if e.available()),
+            name=b'compengine',
+            fmt=b'%s',
+            sep=b', ',
+        ),
+    )
     wirecompengines = compression.compengines.supportedwireengines(
-        compression.SERVERROLE)
-    fm.write('compenginesserver', _('checking available compression engines '
-                                    'for wire protocol (%s)\n'),
-             fm.formatlist([e.name() for e in wirecompengines
-                            if e.wireprotosupport()],
-                           name='compengine', fmt='%s', sep=', '))
-    re2 = 'missing'
+        compression.SERVERROLE
+    )
+    fm.write(
+        b'compenginesserver',
+        _(
+            b'checking available compression engines '
+            b'for wire protocol (%s)\n'
+        ),
+        fm.formatlist(
+            [e.name() for e in wirecompengines if e.wireprotosupport()],
+            name=b'compengine',
+            fmt=b'%s',
+            sep=b', ',
+        ),
+    )
+    re2 = b'missing'
     if util._re2:
-        re2 = 'available'
-    fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
+        re2 = b'available'
+    fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
     fm.data(re2=bool(util._re2))
 
     # templates
     p = templater.templatepaths()
-    fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
-    fm.condwrite(not p, '', _(" no template directories found\n"))
+    fm.write(b'templatedirs', b'checking templates (%s)...\n', b' '.join(p))
+    fm.condwrite(not p, b'', _(b" no template directories found\n"))
     if p:
-        m = templater.templatepath("map-cmdline.default")
+        m = templater.templatepath(b"map-cmdline.default")
         if m:
             # template found, check if it is working
             err = None
@@ -1341,33 +1621,51 @@
             except Exception as inst:
                 err = stringutil.forcebytestr(inst)
                 p = None
-            fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
+            fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
         else:
             p = None
-        fm.condwrite(p, 'defaulttemplate',
-                     _("checking default template (%s)\n"), m)
-        fm.condwrite(not m, 'defaulttemplatenotfound',
-                     _(" template '%s' not found\n"), "default")
+        fm.condwrite(
+            p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
+        )
+        fm.condwrite(
+            not m,
+            b'defaulttemplatenotfound',
+            _(b" template '%s' not found\n"),
+            b"default",
+        )
     if not p:
         problems += 1
-    fm.condwrite(not p, '',
-                 _(" (templates seem to have been installed incorrectly)\n"))
+    fm.condwrite(
+        not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
+    )
 
     # editor
     editor = ui.geteditor()
     editor = util.expandpath(editor)
     editorbin = procutil.shellsplit(editor)[0]
-    fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
+    fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
     cmdpath = procutil.findexe(editorbin)
-    fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
-                 _(" No commit editor set and can't find %s in PATH\n"
-                   " (specify a commit editor in your configuration"
-                   " file)\n"), not cmdpath and editor == 'vi' and editorbin)
-    fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
-                 _(" Can't find editor '%s' in PATH\n"
-                   " (specify a commit editor in your configuration"
-                   " file)\n"), not cmdpath and editorbin)
-    if not cmdpath and editor != 'vi':
+    fm.condwrite(
+        not cmdpath and editor == b'vi',
+        b'vinotfound',
+        _(
+            b" No commit editor set and can't find %s in PATH\n"
+            b" (specify a commit editor in your configuration"
+            b" file)\n"
+        ),
+        not cmdpath and editor == b'vi' and editorbin,
+    )
+    fm.condwrite(
+        not cmdpath and editor != b'vi',
+        b'editornotfound',
+        _(
+            b" Can't find editor '%s' in PATH\n"
+            b" (specify a commit editor in your configuration"
+            b" file)\n"
+        ),
+        not cmdpath and editorbin,
+    )
+    if not cmdpath and editor != b'vi':
         problems += 1
 
     # check username
@@ -1379,22 +1677,39 @@
         err = stringutil.forcebytestr(e)
         problems += 1
 
-    fm.condwrite(username, 'username',  _("checking username (%s)\n"), username)
-    fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
-        " (specify a username in your configuration file)\n"), err)
-
-    fm.condwrite(not problems, '',
-                 _("no problems detected\n"))
+    fm.condwrite(
+        username, b'username', _(b"checking username (%s)\n"), username
+    )
+    fm.condwrite(
+        err,
+        b'usernameerror',
+        _(
+            b"checking username...\n %s\n"
+            b" (specify a username in your configuration file)\n"
+        ),
+        err,
+    )
+
+    for name, mod in extensions.extensions():
+        handler = getattr(mod, 'debuginstall', None)
+        if handler is not None:
+            problems += handler(ui, fm)
+
+    fm.condwrite(not problems, b'', _(b"no problems detected\n"))
     if not problems:
         fm.data(problems=problems)
-    fm.condwrite(problems, 'problems',
-                 _("%d problems detected,"
-                   " please check your install!\n"), problems)
+    fm.condwrite(
+        problems,
+        b'problems',
+        _(b"%d problems detected, please check your install!\n"),
+        problems,
+    )
     fm.end()
 
     return problems
 
-@command('debugknown', [], _('REPO ID...'), norepo=True)
+
+@command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
 def debugknown(ui, repopath, *ids, **opts):
     """test whether node ids are known to a repo
 
@@ -1403,24 +1718,38 @@
     """
     opts = pycompat.byteskwargs(opts)
     repo = hg.peer(ui, opts, repopath)
-    if not repo.capable('known'):
-        raise error.Abort("known() not supported by target repository")
+    if not repo.capable(b'known'):
+        raise error.Abort(b"known() not supported by target repository")
     flags = repo.known([bin(s) for s in ids])
-    ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
-
-@command('debuglabelcomplete', [], _('LABEL...'))
+    ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
+
+
+@command(b'debuglabelcomplete', [], _(b'LABEL...'))
 def debuglabelcomplete(ui, repo, *args):
     '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
     debugnamecomplete(ui, repo, *args)
 
-@command('debuglocks',
-         [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
-          ('W', 'force-wlock', None,
-           _('free the working state lock (DANGEROUS)')),
-          ('s', 'set-lock', None, _('set the store lock until stopped')),
-          ('S', 'set-wlock', None,
-           _('set the working state lock until stopped'))],
-         _('[OPTION]...'))
+
+@command(
+    b'debuglocks',
+    [
+        (b'L', b'force-lock', None, _(b'free the store lock (DANGEROUS)')),
+        (
+            b'W',
+            b'force-wlock',
+            None,
+            _(b'free the working state lock (DANGEROUS)'),
+        ),
+        (b's', b'set-lock', None, _(b'set the store lock until stopped')),
+        (
+            b'S',
+            b'set-wlock',
+            None,
+            _(b'set the working state lock until stopped'),
+        ),
+    ],
+    _(b'[OPTION]...'),
+)
 def debuglocks(ui, repo, **opts):
     """show or modify state of locks
 
@@ -1447,9 +1776,9 @@
     """
 
     if opts.get(r'force_lock'):
-        repo.svfs.unlink('lock')
+        repo.svfs.unlink(b'lock')
     if opts.get(r'force_wlock'):
-        repo.vfs.unlink('wlock')
+        repo.vfs.unlink(b'wlock')
     if opts.get(r'force_lock') or opts.get(r'force_wlock'):
         return 0
 
@@ -1459,14 +1788,14 @@
             try:
                 locks.append(repo.wlock(False))
             except error.LockHeld:
-                raise error.Abort(_('wlock is already held'))
+                raise error.Abort(_(b'wlock is already held'))
         if opts.get(r'set_lock'):
             try:
                 locks.append(repo.lock(False))
             except error.LockHeld:
-                raise error.Abort(_('lock is already held'))
+                raise error.Abort(_(b'lock is already held'))
         if len(locks):
-            ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
+            ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
             return 0
     finally:
         release(*locks)
@@ -1489,32 +1818,45 @@
                 age = now - st[stat.ST_MTIME]
                 user = util.username(st.st_uid)
                 locker = vfs.readlock(name)
-                if ":" in locker:
-                    host, pid = locker.split(':')
+                if b":" in locker:
+                    host, pid = locker.split(b':')
                     if host == socket.gethostname():
-                        locker = 'user %s, process %s' % (user or b'None', pid)
+                        locker = b'user %s, process %s' % (user or b'None', pid)
                     else:
-                        locker = ('user %s, process %s, host %s'
-                                  % (user or b'None', pid, host))
-                ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
+                        locker = b'user %s, process %s, host %s' % (
+                            user or b'None',
+                            pid,
+                            host,
+                        )
+                ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
                 return 1
             except OSError as e:
                 if e.errno != errno.ENOENT:
                     raise
 
-        ui.write(("%-6s free\n") % (name + ":"))
+        ui.writenoi18n(b"%-6s free\n" % (name + b":"))
         return 0
 
-    held += report(repo.svfs, "lock", repo.lock)
-    held += report(repo.vfs, "wlock", repo.wlock)
+    held += report(repo.svfs, b"lock", repo.lock)
+    held += report(repo.vfs, b"wlock", repo.wlock)
 
     return held
 
-@command('debugmanifestfulltextcache', [
-        ('', 'clear', False, _('clear the cache')),
-        ('a', 'add', [], _('add the given manifest nodes to the cache'),
-         _('NODE'))
-    ], '')
+
+@command(
+    b'debugmanifestfulltextcache',
+    [
+        (b'', b'clear', False, _(b'clear the cache')),
+        (
+            b'a',
+            b'add',
+            [],
+            _(b'add the given manifest nodes to the cache'),
+            _(b'NODE'),
+        ),
+    ],
+    b'',
+)
 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
     """show, clear or amend the contents of the manifest fulltext cache"""
 
@@ -1523,8 +1865,10 @@
         try:
             return r._fulltextcache
         except AttributeError:
-            msg = _("Current revlog implementation doesn't appear to have a "
-                    "manifest fulltext cache\n")
+            msg = _(
+                b"Current revlog implementation doesn't appear to have a "
+                b"manifest fulltext cache\n"
+            )
             raise error.Abort(msg)
 
     if opts.get(r'clear'):
@@ -1541,45 +1885,52 @@
                 try:
                     manifest = m[store.lookup(n)]
                 except error.LookupError as e:
-                    raise error.Abort(e, hint="Check your manifest node id")
+                    raise error.Abort(e, hint=b"Check your manifest node id")
                 manifest.read()  # stores revisision in cache too
             return
 
     cache = getcache()
     if not len(cache):
-        ui.write(_('cache empty\n'))
+        ui.write(_(b'cache empty\n'))
     else:
         ui.write(
-            _('cache contains %d manifest entries, in order of most to '
-              'least recent:\n') % (len(cache),))
+            _(
+                b'cache contains %d manifest entries, in order of most to '
+                b'least recent:\n'
+            )
+            % (len(cache),)
+        )
         totalsize = 0
         for nodeid in cache:
             # Use cache.get to not update the LRU order
             data = cache.peek(nodeid)
             size = len(data)
-            totalsize += size + 24   # 20 bytes nodeid, 4 bytes size
-            ui.write(_('id: %s, size %s\n') % (
-                hex(nodeid), util.bytecount(size)))
-        ondisk = cache._opener.stat('manifestfulltextcache').st_size
+            totalsize += size + 24  # 20 bytes nodeid, 4 bytes size
+            ui.write(
+                _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
+            )
+        ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
         ui.write(
-            _('total cache data size %s, on-disk %s\n') % (
-                util.bytecount(totalsize), util.bytecount(ondisk))
+            _(b'total cache data size %s, on-disk %s\n')
+            % (util.bytecount(totalsize), util.bytecount(ondisk))
         )
 
-@command('debugmergestate', [], '')
+
+@command(b'debugmergestate', [], b'')
 def debugmergestate(ui, repo, *args):
     """print merge state
 
     Use --verbose to print out information about whether v1 or v2 merge state
     was chosen."""
+
     def _hashornull(h):
         if h == nullhex:
-            return 'null'
+            return b'null'
         else:
             return h
 
     def printrecords(version):
-        ui.write(('* version %d records\n') % version)
+        ui.writenoi18n(b'* version %d records\n' % version)
         if version == 1:
             records = v1records
         else:
@@ -1587,51 +1938,64 @@
 
         for rtype, record in records:
             # pretty print some record types
-            if rtype == 'L':
-                ui.write(('local: %s\n') % record)
-            elif rtype == 'O':
-                ui.write(('other: %s\n') % record)
-            elif rtype == 'm':
-                driver, mdstate = record.split('\0', 1)
-                ui.write(('merge driver: %s (state "%s")\n')
-                         % (driver, mdstate))
-            elif rtype in 'FDC':
-                r = record.split('\0')
+            if rtype == b'L':
+                ui.writenoi18n(b'local: %s\n' % record)
+            elif rtype == b'O':
+                ui.writenoi18n(b'other: %s\n' % record)
+            elif rtype == b'm':
+                driver, mdstate = record.split(b'\0', 1)
+                ui.writenoi18n(
+                    b'merge driver: %s (state "%s")\n' % (driver, mdstate)
+                )
+            elif rtype in b'FDC':
+                r = record.split(b'\0')
                 f, state, hash, lfile, afile, anode, ofile = r[0:7]
                 if version == 1:
-                    onode = 'not stored in v1 format'
+                    onode = b'not stored in v1 format'
                     flags = r[7]
                 else:
                     onode, flags = r[7:9]
-                ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
-                         % (f, rtype, state, _hashornull(hash)))
-                ui.write(('  local path: %s (flags "%s")\n') % (lfile, flags))
-                ui.write(('  ancestor path: %s (node %s)\n')
-                         % (afile, _hashornull(anode)))
-                ui.write(('  other path: %s (node %s)\n')
-                         % (ofile, _hashornull(onode)))
-            elif rtype == 'f':
-                filename, rawextras = record.split('\0', 1)
-                extras = rawextras.split('\0')
+                ui.writenoi18n(
+                    b'file: %s (record type "%s", state "%s", hash %s)\n'
+                    % (f, rtype, state, _hashornull(hash))
+                )
+                ui.writenoi18n(
+                    b'  local path: %s (flags "%s")\n' % (lfile, flags)
+                )
+                ui.writenoi18n(
+                    b'  ancestor path: %s (node %s)\n'
+                    % (afile, _hashornull(anode))
+                )
+                ui.writenoi18n(
+                    b'  other path: %s (node %s)\n'
+                    % (ofile, _hashornull(onode))
+                )
+            elif rtype == b'f':
+                filename, rawextras = record.split(b'\0', 1)
+                extras = rawextras.split(b'\0')
                 i = 0
                 extrastrings = []
                 while i < len(extras):
-                    extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
+                    extrastrings.append(b'%s = %s' % (extras[i], extras[i + 1]))
                     i += 2
 
-                ui.write(('file extras: %s (%s)\n')
-                         % (filename, ', '.join(extrastrings)))
-            elif rtype == 'l':
-                labels = record.split('\0', 2)
+                ui.writenoi18n(
+                    b'file extras: %s (%s)\n'
+                    % (filename, b', '.join(extrastrings))
+                )
+            elif rtype == b'l':
+                labels = record.split(b'\0', 2)
                 labels = [l for l in labels if len(l) > 0]
-                ui.write(('labels:\n'))
-                ui.write(('  local: %s\n' % labels[0]))
-                ui.write(('  other: %s\n' % labels[1]))
+                ui.writenoi18n(b'labels:\n')
+                ui.write((b'  local: %s\n' % labels[0]))
+                ui.write((b'  other: %s\n' % labels[1]))
                 if len(labels) > 2:
-                    ui.write(('  base:  %s\n' % labels[2]))
+                    ui.write((b'  base:  %s\n' % labels[2]))
             else:
-                ui.write(('unrecognized entry: %s\t%s\n')
-                         % (rtype, record.replace('\0', '\t')))
+                ui.writenoi18n(
+                    b'unrecognized entry: %s\t%s\n'
+                    % (rtype, record.replace(b'\0', b'\t'))
+                )
 
     # Avoid mergestate.read() since it may raise an exception for unsupported
     # merge state records. We shouldn't be doing this, but this is OK since this
@@ -1641,61 +2005,81 @@
     # sort so that reasonable information is on top
     v1records = ms._readrecordsv1()
     v2records = ms._readrecordsv2()
-    order = 'LOml'
+    order = b'LOml'
+
     def key(r):
         idx = order.find(r[0])
         if idx == -1:
             return (1, r[1])
         else:
             return (0, idx)
+
     v1records.sort(key=key)
     v2records.sort(key=key)
 
     if not v1records and not v2records:
-        ui.write(('no merge state found\n'))
+        ui.writenoi18n(b'no merge state found\n')
     elif not v2records:
-        ui.note(('no version 2 merge state\n'))
+        ui.notenoi18n(b'no version 2 merge state\n')
         printrecords(1)
     elif ms._v1v2match(v1records, v2records):
-        ui.note(('v1 and v2 states match: using v2\n'))
+        ui.notenoi18n(b'v1 and v2 states match: using v2\n')
         printrecords(2)
     else:
-        ui.note(('v1 and v2 states mismatch: using v1\n'))
+        ui.notenoi18n(b'v1 and v2 states mismatch: using v1\n')
         printrecords(1)
         if ui.verbose:
             printrecords(2)
 
-@command('debugnamecomplete', [], _('NAME...'))
+
+@command(b'debugnamecomplete', [], _(b'NAME...'))
 def debugnamecomplete(ui, repo, *args):
     '''complete "names" - tags, open branch names, bookmark names'''
 
     names = set()
     # since we previously only listed open branches, we will handle that
     # specially (after this for loop)
-    for name, ns in repo.names.iteritems():
-        if name != 'branches':
+    for name, ns in pycompat.iteritems(repo.names):
+        if name != b'branches':
             names.update(ns.listnames(repo))
-    names.update(tag for (tag, heads, tip, closed)
-                 in repo.branchmap().iterbranches() if not closed)
+    names.update(
+        tag
+        for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
+        if not closed
+    )
     completions = set()
     if not args:
-        args = ['']
+        args = [b'']
     for a in args:
         completions.update(n for n in names if n.startswith(a))
-    ui.write('\n'.join(sorted(completions)))
-    ui.write('\n')
-
-@command('debugobsolete',
-        [('', 'flags', 0, _('markers flag')),
-         ('', 'record-parents', False,
-          _('record parent information for the precursor')),
-         ('r', 'rev', [], _('display markers relevant to REV')),
-         ('', 'exclusive', False, _('restrict display to markers only '
-                                    'relevant to REV')),
-         ('', 'index', False, _('display index of the marker')),
-         ('', 'delete', [], _('delete markers specified by indices')),
-        ] + cmdutil.commitopts2 + cmdutil.formatteropts,
-         _('[OBSOLETED [REPLACEMENT ...]]'))
+    ui.write(b'\n'.join(sorted(completions)))
+    ui.write(b'\n')
+
+
+@command(
+    b'debugobsolete',
+    [
+        (b'', b'flags', 0, _(b'markers flag')),
+        (
+            b'',
+            b'record-parents',
+            False,
+            _(b'record parent information for the precursor'),
+        ),
+        (b'r', b'rev', [], _(b'display markers relevant to REV')),
+        (
+            b'',
+            b'exclusive',
+            False,
+            _(b'restrict display to markers only relevant to REV'),
+        ),
+        (b'', b'index', False, _(b'display index of the marker')),
+        (b'', b'delete', [], _(b'delete markers specified by indices')),
+    ]
+    + cmdutil.commitopts2
+    + cmdutil.formatteropts,
+    _(b'[OBSOLETED [REPLACEMENT ...]]'),
+)
 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
     """create arbitrary obsolete marker
 
@@ -1713,80 +2097,98 @@
                 raise TypeError()
             return n
         except TypeError:
-            raise error.Abort('changeset references must be full hexadecimal '
-                             'node identifiers')
-
-    if opts.get('delete'):
+            raise error.Abort(
+                b'changeset references must be full hexadecimal '
+                b'node identifiers'
+            )
+
+    if opts.get(b'delete'):
         indices = []
-        for v in opts.get('delete'):
+        for v in opts.get(b'delete'):
             try:
                 indices.append(int(v))
             except ValueError:
-                raise error.Abort(_('invalid index value: %r') % v,
-                                  hint=_('use integers for indices'))
+                raise error.Abort(
+                    _(b'invalid index value: %r') % v,
+                    hint=_(b'use integers for indices'),
+                )
 
         if repo.currenttransaction():
-            raise error.Abort(_('cannot delete obsmarkers in the middle '
-                                'of transaction.'))
+            raise error.Abort(
+                _(b'cannot delete obsmarkers in the middle of transaction.')
+            )
 
         with repo.lock():
             n = repair.deleteobsmarkers(repo.obsstore, indices)
-            ui.write(_('deleted %i obsolescence markers\n') % n)
+            ui.write(_(b'deleted %i obsolescence markers\n') % n)
 
         return
 
     if precursor is not None:
-        if opts['rev']:
-            raise error.Abort('cannot select revision when creating marker')
+        if opts[b'rev']:
+            raise error.Abort(b'cannot select revision when creating marker')
         metadata = {}
-        metadata['user'] = encoding.fromlocal(opts['user'] or ui.username())
+        metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
         succs = tuple(parsenodeid(succ) for succ in successors)
         l = repo.lock()
         try:
-            tr = repo.transaction('debugobsolete')
+            tr = repo.transaction(b'debugobsolete')
             try:
-                date = opts.get('date')
+                date = opts.get(b'date')
                 if date:
                     date = dateutil.parsedate(date)
                 else:
                     date = None
                 prec = parsenodeid(precursor)
                 parents = None
-                if opts['record_parents']:
+                if opts[b'record_parents']:
                     if prec not in repo.unfiltered():
-                        raise error.Abort('cannot used --record-parents on '
-                                         'unknown changesets')
+                        raise error.Abort(
+                            b'cannot used --record-parents on '
+                            b'unknown changesets'
+                        )
                     parents = repo.unfiltered()[prec].parents()
                     parents = tuple(p.node() for p in parents)
-                repo.obsstore.create(tr, prec, succs, opts['flags'],
-                                     parents=parents, date=date,
-                                     metadata=metadata, ui=ui)
+                repo.obsstore.create(
+                    tr,
+                    prec,
+                    succs,
+                    opts[b'flags'],
+                    parents=parents,
+                    date=date,
+                    metadata=metadata,
+                    ui=ui,
+                )
                 tr.close()
             except ValueError as exc:
-                raise error.Abort(_('bad obsmarker input: %s') %
-                                  pycompat.bytestr(exc))
+                raise error.Abort(
+                    _(b'bad obsmarker input: %s') % pycompat.bytestr(exc)
+                )
             finally:
                 tr.release()
         finally:
             l.release()
     else:
-        if opts['rev']:
-            revs = scmutil.revrange(repo, opts['rev'])
+        if opts[b'rev']:
+            revs = scmutil.revrange(repo, opts[b'rev'])
             nodes = [repo[r].node() for r in revs]
-            markers = list(obsutil.getmarkers(repo, nodes=nodes,
-                                               exclusive=opts['exclusive']))
+            markers = list(
+                obsutil.getmarkers(
+                    repo, nodes=nodes, exclusive=opts[b'exclusive']
+                )
+            )
             markers.sort(key=lambda x: x._data)
         else:
             markers = obsutil.getmarkers(repo)
 
         markerstoiter = markers
         isrelevant = lambda m: True
-        if opts.get('rev') and opts.get('index'):
+        if opts.get(b'rev') and opts.get(b'index'):
             markerstoiter = obsutil.getmarkers(repo)
             markerset = set(markers)
             isrelevant = lambda m: m in markerset
 
-        fm = ui.formatter('debugobsolete', opts)
+        fm = ui.formatter(b'debugobsolete', opts)
         for i, m in enumerate(markerstoiter):
             if not isrelevant(m):
                 # marker can be irrelevant when we're iterating over a set
@@ -1798,38 +2200,49 @@
                 # are relevant to --rev value
                 continue
             fm.startitem()
-            ind = i if opts.get('index') else None
+            ind = i if opts.get(b'index') else None
             cmdutil.showmarker(fm, m, index=ind)
         fm.end()
 
-@command('debugp1copies',
-         [('r', 'rev', '', _('revision to debug'), _('REV'))],
-         _('[-r REV]'))
+
+@command(
+    b'debugp1copies',
+    [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
+    _(b'[-r REV]'),
+)
 def debugp1copies(ui, repo, **opts):
     """dump copy information compared to p1"""
 
     opts = pycompat.byteskwargs(opts)
-    ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
+    ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
     for dst, src in ctx.p1copies().items():
-        ui.write('%s -> %s\n' % (src, dst))
-
-@command('debugp2copies',
-         [('r', 'rev', '', _('revision to debug'), _('REV'))],
-         _('[-r REV]'))
+        ui.write(b'%s -> %s\n' % (src, dst))
+
+
+@command(
+    b'debugp2copies',
+    [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
+    _(b'[-r REV]'),
+)
 def debugp1copies(ui, repo, **opts):
     """dump copy information compared to p2"""
 
     opts = pycompat.byteskwargs(opts)
-    ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
+    ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
     for dst, src in ctx.p2copies().items():
-        ui.write('%s -> %s\n' % (src, dst))
-
-@command('debugpathcomplete',
-         [('f', 'full', None, _('complete an entire path')),
-          ('n', 'normal', None, _('show only normal files')),
-          ('a', 'added', None, _('show only added files')),
-          ('r', 'removed', None, _('show only removed files'))],
-         _('FILESPEC...'))
+        ui.write(b'%s -> %s\n' % (src, dst))
+
+
+@command(
+    b'debugpathcomplete',
+    [
+        (b'f', b'full', None, _(b'complete an entire path')),
+        (b'n', b'normal', None, _(b'show only normal files')),
+        (b'a', b'added', None, _(b'show only added files')),
+        (b'r', b'removed', None, _(b'show only removed files')),
+    ],
+    _(b'FILESPEC...'),
+)
 def debugpathcomplete(ui, repo, *specs, **opts):
     '''complete part or all of a tracked path
 
@@ -1846,19 +2259,19 @@
         if spec != repo.root and not spec.startswith(rootdir):
             return [], []
         if os.path.isdir(spec):
-            spec += '/'
-        spec = spec[len(rootdir):]
-        fixpaths = pycompat.ossep != '/'
+            spec += b'/'
+        spec = spec[len(rootdir) :]
+        fixpaths = pycompat.ossep != b'/'
         if fixpaths:
-            spec = spec.replace(pycompat.ossep, '/')
+            spec = spec.replace(pycompat.ossep, b'/')
         speclen = len(spec)
         fullpaths = opts[r'full']
         files, dirs = set(), set()
         adddir, addfile = dirs.add, files.add
-        for f, st in dirstate.iteritems():
+        for f, st in pycompat.iteritems(dirstate):
             if f.startswith(spec) and st[0] in acceptable:
                 if fixpaths:
-                    f = f.replace('/', pycompat.ossep)
+                    f = f.replace(b'/', pycompat.ossep)
                 if fullpaths:
                     addfile(f)
                     continue
@@ -1869,45 +2282,49 @@
                     addfile(f)
         return files, dirs
 
-    acceptable = ''
+    acceptable = b''
     if opts[r'normal']:
-        acceptable += 'nm'
+        acceptable += b'nm'
     if opts[r'added']:
-        acceptable += 'a'
+        acceptable += b'a'
     if opts[r'removed']:
-        acceptable += 'r'
+        acceptable += b'r'
     cwd = repo.getcwd()
     if not specs:
-        specs = ['.']
+        specs = [b'.']
 
     files, dirs = set(), set()
     for spec in specs:
-        f, d = complete(spec, acceptable or 'nmar')
+        f, d = complete(spec, acceptable or b'nmar')
         files.update(f)
         dirs.update(d)
     files.update(dirs)
-    ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
-    ui.write('\n')
-
-@command('debugpathcopies',
-         cmdutil.walkopts,
-         'hg debugpathcopies REV1 REV2 [FILE]',
-         inferrepo=True)
+    ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
+    ui.write(b'\n')
+
+
+@command(
+    b'debugpathcopies',
+    cmdutil.walkopts,
+    b'hg debugpathcopies REV1 REV2 [FILE]',
+    inferrepo=True,
+)
 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
     """show copies between two revisions"""
     ctx1 = scmutil.revsingle(repo, rev1)
     ctx2 = scmutil.revsingle(repo, rev2)
     m = scmutil.match(ctx1, pats, opts)
     for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
-        ui.write('%s -> %s\n' % (src, dst))
-
-@command('debugpeer', [], _('PATH'), norepo=True)
+        ui.write(b'%s -> %s\n' % (src, dst))
+
+
+@command(b'debugpeer', [], _(b'PATH'), norepo=True)
 def debugpeer(ui, path):
     """establish a connection to a peer repository"""
     # Always enable peer request logging. Requires --debug to display
     # though.
     overrides = {
-        ('devel', 'debug.peer-request'): True,
+        (b'devel', b'debug.peer-request'): True,
     }
 
     with ui.configoverride(overrides):
@@ -1916,16 +2333,22 @@
         local = peer.local() is not None
         canpush = peer.canpush()
 
-        ui.write(_('url: %s\n') % peer.url())
-        ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
-        ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
-
-@command('debugpickmergetool',
-        [('r', 'rev', '', _('check for files in this revision'), _('REV')),
-         ('', 'changedelete', None, _('emulate merging change and delete')),
-        ] + cmdutil.walkopts + cmdutil.mergetoolopts,
-        _('[PATTERN]...'),
-        inferrepo=True)
+        ui.write(_(b'url: %s\n') % peer.url())
+        ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
+        ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
+
+
+@command(
+    b'debugpickmergetool',
+    [
+        (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
+        (b'', b'changedelete', None, _(b'emulate merging change and delete')),
+    ]
+    + cmdutil.walkopts
+    + cmdutil.mergetoolopts,
+    _(b'[PATTERN]...'),
+    inferrepo=True,
+)
 def debugpickmergetool(ui, repo, *pats, **opts):
     """examine which merge tool is chosen for specified file
 
@@ -1970,36 +2393,41 @@
     """
     opts = pycompat.byteskwargs(opts)
     overrides = {}
-    if opts['tool']:
-        overrides[('ui', 'forcemerge')] = opts['tool']
-        ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
-
-    with ui.configoverride(overrides, 'debugmergepatterns'):
-        hgmerge = encoding.environ.get("HGMERGE")
+    if opts[b'tool']:
+        overrides[(b'ui', b'forcemerge')] = opts[b'tool']
+        ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
+
+    with ui.configoverride(overrides, b'debugmergepatterns'):
+        hgmerge = encoding.environ.get(b"HGMERGE")
         if hgmerge is not None:
-            ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
-        uimerge = ui.config("ui", "merge")
+            ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
+        uimerge = ui.config(b"ui", b"merge")
         if uimerge:
-            ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
-
-        ctx = scmutil.revsingle(repo, opts.get('rev'))
+            ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
+
+        ctx = scmutil.revsingle(repo, opts.get(b'rev'))
         m = scmutil.match(ctx, pats, opts)
-        changedelete = opts['changedelete']
+        changedelete = opts[b'changedelete']
         for path in ctx.walk(m):
             fctx = ctx[path]
             try:
                 if not ui.debugflag:
                     ui.pushbuffer(error=True)
-                tool, toolpath = filemerge._picktool(repo, ui, path,
-                                                     fctx.isbinary(),
-                                                     'l' in fctx.flags(),
-                                                     changedelete)
+                tool, toolpath = filemerge._picktool(
+                    repo,
+                    ui,
+                    path,
+                    fctx.isbinary(),
+                    b'l' in fctx.flags(),
+                    changedelete,
+                )
             finally:
                 if not ui.debugflag:
                     ui.popbuffer()
-            ui.write(('%s = %s\n') % (path, tool))
-
-@command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
+            ui.write(b'%s = %s\n' % (path, tool))
+
+
+@command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
     '''access the pushkey key/value protocol
 
@@ -2013,47 +2441,69 @@
     if keyinfo:
         key, old, new = keyinfo
         with target.commandexecutor() as e:
-            r = e.callcommand('pushkey', {
-                'namespace': namespace,
-                'key': key,
-                'old': old,
-                'new': new,
-            }).result()
-
-        ui.status(pycompat.bytestr(r) + '\n')
+            r = e.callcommand(
+                b'pushkey',
+                {
+                    b'namespace': namespace,
+                    b'key': key,
+                    b'old': old,
+                    b'new': new,
+                },
+            ).result()
+
+        ui.status(pycompat.bytestr(r) + b'\n')
         return not r
     else:
-        for k, v in sorted(target.listkeys(namespace).iteritems()):
-            ui.write("%s\t%s\n" % (stringutil.escapestr(k),
-                                   stringutil.escapestr(v)))
-
-@command('debugpvec', [], _('A B'))
+        for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
+            ui.write(
+                b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
+            )
+
+
+@command(b'debugpvec', [], _(b'A B'))
 def debugpvec(ui, repo, a, b=None):
     ca = scmutil.revsingle(repo, a)
     cb = scmutil.revsingle(repo, b)
     pa = pvec.ctxpvec(ca)
     pb = pvec.ctxpvec(cb)
     if pa == pb:
-        rel = "="
+        rel = b"="
     elif pa > pb:
-        rel = ">"
+        rel = b">"
     elif pa < pb:
-        rel = "<"
+        rel = b"<"
     elif pa | pb:
-        rel = "|"
-    ui.write(_("a: %s\n") % pa)
-    ui.write(_("b: %s\n") % pb)
-    ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
-    ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
-             (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
-              pa.distance(pb), rel))
-
-@command('debugrebuilddirstate|debugrebuildstate',
-    [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
-     ('', 'minimal', None, _('only rebuild files that are inconsistent with '
-                             'the working copy parent')),
+        rel = b"|"
+    ui.write(_(b"a: %s\n") % pa)
+    ui.write(_(b"b: %s\n") % pb)
+    ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
+    ui.write(
+        _(b"delta: %d hdist: %d distance: %d relation: %s\n")
+        % (
+            abs(pa._depth - pb._depth),
+            pvec._hamming(pa._vec, pb._vec),
+            pa.distance(pb),
+            rel,
+        )
+    )
+
+
+@command(
+    b'debugrebuilddirstate|debugrebuildstate',
+    [
+        (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
+        (
+            b'',
+            b'minimal',
+            None,
+            _(
+                b'only rebuild files that are inconsistent with '
+                b'the working copy parent'
+            ),
+        ),
     ],
-    _('[-r REV]'))
+    _(b'[-r REV]'),
+)
 def debugrebuilddirstate(ui, repo, rev, **opts):
     """rebuild the dirstate as it would look like for the given revision
 
@@ -2081,47 +2531,58 @@
             dirstatefiles = set(dirstate)
             manifestonly = manifestfiles - dirstatefiles
             dsonly = dirstatefiles - manifestfiles
-            dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
+            dsnotadded = set(f for f in dsonly if dirstate[f] != b'a')
             changedfiles = manifestonly | dsnotadded
 
         dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
 
-@command('debugrebuildfncache', [], '')
+
+@command(b'debugrebuildfncache', [], b'')
 def debugrebuildfncache(ui, repo):
     """rebuild the fncache file"""
     repair.rebuildfncache(ui, repo)
 
-@command('debugrename',
-    [('r', 'rev', '', _('revision to debug'), _('REV'))],
-    _('[-r REV] [FILE]...'))
+
+@command(
+    b'debugrename',
+    [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
+    _(b'[-r REV] [FILE]...'),
+)
 def debugrename(ui, repo, *pats, **opts):
     """dump rename information"""
 
     opts = pycompat.byteskwargs(opts)
-    ctx = scmutil.revsingle(repo, opts.get('rev'))
+    ctx = scmutil.revsingle(repo, opts.get(b'rev'))
     m = scmutil.match(ctx, pats, opts)
     for abs in ctx.walk(m):
         fctx = ctx[abs]
         o = fctx.filelog().renamed(fctx.filenode())
         rel = repo.pathto(abs)
         if o:
-            ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
+            ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
         else:
-            ui.write(_("%s not renamed\n") % rel)
-
-@command('debugrevlog', cmdutil.debugrevlogopts +
-    [('d', 'dump', False, _('dump index data'))],
-    _('-c|-m|FILE'),
-    optionalrepo=True)
+            ui.write(_(b"%s not renamed\n") % rel)
+
+
+@command(
+    b'debugrevlog',
+    cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
+    _(b'-c|-m|FILE'),
+    optionalrepo=True,
+)
 def debugrevlog(ui, repo, file_=None, **opts):
     """show data and statistics about a revlog"""
     opts = pycompat.byteskwargs(opts)
-    r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
-
-    if opts.get("dump"):
+    r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
+
+    if opts.get(b"dump"):
         numrevs = len(r)
-        ui.write(("# rev p1rev p2rev start   end deltastart base   p1   p2"
-                 " rawsize totalsize compression heads chainlen\n"))
+        ui.write(
+            (
+                b"# rev p1rev p2rev start   end deltastart base   p1   p2"
+                b" rawsize totalsize compression heads chainlen\n"
+            )
+        )
         ts = 0
         heads = set()
 
@@ -2140,12 +2601,26 @@
                 compression = ts / r.end(rev)
             except ZeroDivisionError:
                 compression = 0
-            ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
-                     "%11d %5d %8d\n" %
-                     (rev, p1, p2, r.start(rev), r.end(rev),
-                      r.start(dbase), r.start(cbase),
-                      r.start(p1), r.start(p2),
-                      rs, ts, compression, len(heads), clen))
+            ui.write(
+                b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
+                b"%11d %5d %8d\n"
+                % (
+                    rev,
+                    p1,
+                    p2,
+                    r.start(rev),
+                    r.end(rev),
+                    r.start(dbase),
+                    r.start(cbase),
+                    r.start(p1),
+                    r.start(p2),
+                    rs,
+                    ts,
+                    compression,
+                    len(heads),
+                    clen,
+                )
+            )
         return 0
 
     v = r.version
@@ -2153,12 +2628,12 @@
     flags = []
     gdelta = False
     if v & revlog.FLAG_INLINE_DATA:
-        flags.append('inline')
+        flags.append(b'inline')
     if v & revlog.FLAG_GENERALDELTA:
         gdelta = True
-        flags.append('generaldelta')
+        flags.append(b'generaldelta')
     if not flags:
-        flags = ['(none)']
+        flags = [b'(none)']
 
     ### tracks merge vs single parent
     nummerges = 0
@@ -2260,14 +2735,14 @@
                     numother += 1
 
         # Obtain data on the raw chunks in the revlog.
-        if util.safehasattr(r, '_getsegmentforrevs'):
+        if util.safehasattr(r, b'_getsegmentforrevs'):
             segment = r._getsegmentforrevs(rev, rev)[1]
         else:
             segment = r._revlog._getsegmentforrevs(rev, rev)[1]
         if segment:
             chunktype = bytes(segment[0:1])
         else:
-            chunktype = 'empty'
+            chunktype = b'empty'
 
         if chunktype not in chunktypecounts:
             chunktypecounts[chunktype] = 0
@@ -2309,13 +2784,14 @@
     if totalsize:
         compratio = totalrawsize / totalsize
 
-    basedfmtstr = '%%%dd\n'
-    basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
+    basedfmtstr = b'%%%dd\n'
+    basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
 
     def dfmtstr(max):
         return basedfmtstr % len(str(max))
+
     def pcfmtstr(max, padding=0):
-        return basepcfmtstr % (len(str(max)), ' ' * padding)
+        return basepcfmtstr % (len(str(max)), b' ' * padding)
 
     def pcfmt(value, total):
         if total:
@@ -2323,106 +2799,141 @@
         else:
             return value, 100.0
 
-    ui.write(('format : %d\n') % format)
-    ui.write(('flags  : %s\n') % ', '.join(flags))
-
-    ui.write('\n')
+    ui.writenoi18n(b'format : %d\n' % format)
+    ui.writenoi18n(b'flags  : %s\n' % b', '.join(flags))
+
+    ui.write(b'\n')
     fmt = pcfmtstr(totalsize)
     fmt2 = dfmtstr(totalsize)
-    ui.write(('revisions     : ') + fmt2 % numrevs)
-    ui.write(('    merges    : ') + fmt % pcfmt(nummerges, numrevs))
-    ui.write(('    normal    : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
-    ui.write(('revisions     : ') + fmt2 % numrevs)
-    ui.write(('    empty     : ') + fmt % pcfmt(numempty, numrevs))
-    ui.write(('                   text  : ')
-             + fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
-    ui.write(('                   delta : ')
-             + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
-    ui.write(('    snapshot  : ') + fmt % pcfmt(numfull + numsemi, numrevs))
+    ui.writenoi18n(b'revisions     : ' + fmt2 % numrevs)
+    ui.writenoi18n(b'    merges    : ' + fmt % pcfmt(nummerges, numrevs))
+    ui.writenoi18n(
+        b'    normal    : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
+    )
+    ui.writenoi18n(b'revisions     : ' + fmt2 % numrevs)
+    ui.writenoi18n(b'    empty     : ' + fmt % pcfmt(numempty, numrevs))
+    ui.writenoi18n(
+        b'                   text  : '
+        + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
+    )
+    ui.writenoi18n(
+        b'                   delta : '
+        + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
+    )
+    ui.writenoi18n(
+        b'    snapshot  : ' + fmt % pcfmt(numfull + numsemi, numrevs)
+    )
     for depth in sorted(numsnapdepth):
-        ui.write(('      lvl-%-3d :       ' % depth)
-                 + fmt % pcfmt(numsnapdepth[depth], numrevs))
-    ui.write(('    deltas    : ') + fmt % pcfmt(numdeltas, numrevs))
-    ui.write(('revision size : ') + fmt2 % totalsize)
-    ui.write(('    snapshot  : ')
-             + fmt % pcfmt(fulltotal + semitotal, totalsize))
+        ui.write(
+            (b'      lvl-%-3d :       ' % depth)
+            + fmt % pcfmt(numsnapdepth[depth], numrevs)
+        )
+    ui.writenoi18n(b'    deltas    : ' + fmt % pcfmt(numdeltas, numrevs))
+    ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
+    ui.writenoi18n(
+        b'    snapshot  : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
+    )
     for depth in sorted(numsnapdepth):
-        ui.write(('      lvl-%-3d :       ' % depth)
-                 + fmt % pcfmt(snaptotal[depth], totalsize))
-    ui.write(('    deltas    : ') + fmt % pcfmt(deltatotal, totalsize))
+        ui.write(
+            (b'      lvl-%-3d :       ' % depth)
+            + fmt % pcfmt(snaptotal[depth], totalsize)
+        )
+    ui.writenoi18n(b'    deltas    : ' + fmt % pcfmt(deltatotal, totalsize))
 
     def fmtchunktype(chunktype):
-        if chunktype == 'empty':
-            return '    %s     : ' % chunktype
+        if chunktype == b'empty':
+            return b'    %s     : ' % chunktype
         elif chunktype in pycompat.bytestr(string.ascii_letters):
-            return '    0x%s (%s)  : ' % (hex(chunktype), chunktype)
+            return b'    0x%s (%s)  : ' % (hex(chunktype), chunktype)
         else:
-            return '    0x%s      : ' % hex(chunktype)
-
-    ui.write('\n')
-    ui.write(('chunks        : ') + fmt2 % numrevs)
+            return b'    0x%s      : ' % hex(chunktype)
+
+    ui.write(b'\n')
+    ui.writenoi18n(b'chunks        : ' + fmt2 % numrevs)
     for chunktype in sorted(chunktypecounts):
         ui.write(fmtchunktype(chunktype))
         ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
-    ui.write(('chunks size   : ') + fmt2 % totalsize)
+    ui.writenoi18n(b'chunks size   : ' + fmt2 % totalsize)
     for chunktype in sorted(chunktypecounts):
         ui.write(fmtchunktype(chunktype))
         ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
 
-    ui.write('\n')
+    ui.write(b'\n')
     fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
-    ui.write(('avg chain length  : ') + fmt % avgchainlen)
-    ui.write(('max chain length  : ') + fmt % maxchainlen)
-    ui.write(('max chain reach   : ') + fmt % maxchainspan)
-    ui.write(('compression ratio : ') + fmt % compratio)
+    ui.writenoi18n(b'avg chain length  : ' + fmt % avgchainlen)
+    ui.writenoi18n(b'max chain length  : ' + fmt % maxchainlen)
+    ui.writenoi18n(b'max chain reach   : ' + fmt % maxchainspan)
+    ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
 
     if format > 0:
-        ui.write('\n')
-        ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
-                 % tuple(datasize))
-    ui.write(('full revision size (min/max/avg)     : %d / %d / %d\n')
-             % tuple(fullsize))
-    ui.write(('inter-snapshot size (min/max/avg)    : %d / %d / %d\n')
-             % tuple(semisize))
+        ui.write(b'\n')
+        ui.writenoi18n(
+            b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
+            % tuple(datasize)
+        )
+    ui.writenoi18n(
+        b'full revision size (min/max/avg)     : %d / %d / %d\n'
+        % tuple(fullsize)
+    )
+    ui.writenoi18n(
+        b'inter-snapshot size (min/max/avg)    : %d / %d / %d\n'
+        % tuple(semisize)
+    )
     for depth in sorted(snapsizedepth):
         if depth == 0:
             continue
-        ui.write(('    level-%-3d (min/max/avg)          : %d / %d / %d\n')
-                 % ((depth,) + tuple(snapsizedepth[depth])))
-    ui.write(('delta size (min/max/avg)             : %d / %d / %d\n')
-             % tuple(deltasize))
+        ui.writenoi18n(
+            b'    level-%-3d (min/max/avg)          : %d / %d / %d\n'
+            % ((depth,) + tuple(snapsizedepth[depth]))
+        )
+    ui.writenoi18n(
+        b'delta size (min/max/avg)             : %d / %d / %d\n'
+        % tuple(deltasize)
+    )
 
     if numdeltas > 0:
-        ui.write('\n')
+        ui.write(b'\n')
         fmt = pcfmtstr(numdeltas)
         fmt2 = pcfmtstr(numdeltas, 4)
-        ui.write(('deltas against prev  : ') + fmt % pcfmt(numprev, numdeltas))
+        ui.writenoi18n(
+            b'deltas against prev  : ' + fmt % pcfmt(numprev, numdeltas)
+        )
         if numprev > 0:
-            ui.write(('    where prev = p1  : ') + fmt2 % pcfmt(nump1prev,
-                                                              numprev))
-            ui.write(('    where prev = p2  : ') + fmt2 % pcfmt(nump2prev,
-                                                              numprev))
-            ui.write(('    other            : ') + fmt2 % pcfmt(numoprev,
-                                                              numprev))
+            ui.writenoi18n(
+                b'    where prev = p1  : ' + fmt2 % pcfmt(nump1prev, numprev)
+            )
+            ui.writenoi18n(
+                b'    where prev = p2  : ' + fmt2 % pcfmt(nump2prev, numprev)
+            )
+            ui.writenoi18n(
+                b'    other            : ' + fmt2 % pcfmt(numoprev, numprev)
+            )
         if gdelta:
-            ui.write(('deltas against p1    : ')
-                     + fmt % pcfmt(nump1, numdeltas))
-            ui.write(('deltas against p2    : ')
-                     + fmt % pcfmt(nump2, numdeltas))
-            ui.write(('deltas against other : ') + fmt % pcfmt(numother,
-                                                             numdeltas))
-
-@command('debugrevlogindex', cmdutil.debugrevlogopts +
-    [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
-    _('[-f FORMAT] -c|-m|FILE'),
-    optionalrepo=True)
+            ui.writenoi18n(
+                b'deltas against p1    : ' + fmt % pcfmt(nump1, numdeltas)
+            )
+            ui.writenoi18n(
+                b'deltas against p2    : ' + fmt % pcfmt(nump2, numdeltas)
+            )
+            ui.writenoi18n(
+                b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
+            )
+
+
+@command(
+    b'debugrevlogindex',
+    cmdutil.debugrevlogopts
+    + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
+    _(b'[-f FORMAT] -c|-m|FILE'),
+    optionalrepo=True,
+)
 def debugrevlogindex(ui, repo, file_=None, **opts):
     """dump the contents of a revlog index"""
     opts = pycompat.byteskwargs(opts)
-    r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts)
-    format = opts.get('format', 0)
+    r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
+    format = opts.get(b'format', 0)
     if format not in (0, 1):
-        raise error.Abort(_("unknown format %d") % format)
+        raise error.Abort(_(b"unknown format %d") % format)
 
     if ui.debugflag:
         shortfn = hex
@@ -2437,19 +2948,29 @@
 
     if format == 0:
         if ui.verbose:
-            ui.write(("   rev    offset  length linkrev"
-                     " %s %s p2\n") % ("nodeid".ljust(idlen),
-                                       "p1".ljust(idlen)))
+            ui.writenoi18n(
+                b"   rev    offset  length linkrev %s %s p2\n"
+                % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
+            )
         else:
-            ui.write(("   rev linkrev %s %s p2\n") % (
-                "nodeid".ljust(idlen), "p1".ljust(idlen)))
+            ui.writenoi18n(
+                b"   rev linkrev %s %s p2\n"
+                % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
+            )
     elif format == 1:
         if ui.verbose:
-            ui.write(("   rev flag   offset   length     size   link     p1"
-                      "     p2 %s\n") % "nodeid".rjust(idlen))
+            ui.writenoi18n(
+                (
+                    b"   rev flag   offset   length     size   link     p1"
+                    b"     p2 %s\n"
+                )
+                % b"nodeid".rjust(idlen)
+            )
         else:
-            ui.write(("   rev flag     size   link     p1     p2 %s\n") %
-                     "nodeid".rjust(idlen))
+            ui.writenoi18n(
+                b"   rev flag     size   link     p1     p2 %s\n"
+                % b"nodeid".rjust(idlen)
+            )
 
     for i in r:
         node = r.node(i)
@@ -2459,35 +2980,94 @@
             except Exception:
                 pp = [nullid, nullid]
             if ui.verbose:
-                ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
-                        i, r.start(i), r.length(i), r.linkrev(i),
-                        shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
+                ui.write(
+                    b"% 6d % 9d % 7d % 7d %s %s %s\n"
+                    % (
+                        i,
+                        r.start(i),
+                        r.length(i),
+                        r.linkrev(i),
+                        shortfn(node),
+                        shortfn(pp[0]),
+                        shortfn(pp[1]),
+                    )
+                )
             else:
-                ui.write("% 6d % 7d %s %s %s\n" % (
-                    i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
-                    shortfn(pp[1])))
+                ui.write(
+                    b"% 6d % 7d %s %s %s\n"
+                    % (
+                        i,
+                        r.linkrev(i),
+                        shortfn(node),
+                        shortfn(pp[0]),
+                        shortfn(pp[1]),
+                    )
+                )
         elif format == 1:
             pr = r.parentrevs(i)
             if ui.verbose:
-                ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
-                        i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
-                        r.linkrev(i), pr[0], pr[1], shortfn(node)))
+                ui.write(
+                    b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
+                    % (
+                        i,
+                        r.flags(i),
+                        r.start(i),
+                        r.length(i),
+                        r.rawsize(i),
+                        r.linkrev(i),
+                        pr[0],
+                        pr[1],
+                        shortfn(node),
+                    )
+                )
             else:
-                ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
-                    i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
-                    shortfn(node)))
-
-@command('debugrevspec',
-    [('', 'optimize', None,
-      _('print parsed tree after optimizing (DEPRECATED)')),
-     ('', 'show-revs', True, _('print list of result revisions (default)')),
-     ('s', 'show-set', None, _('print internal representation of result set')),
-     ('p', 'show-stage', [],
-      _('print parsed tree at the given stage'), _('NAME')),
-     ('', 'no-optimized', False, _('evaluate tree without optimization')),
-     ('', 'verify-optimized', False, _('verify optimized result')),
-     ],
-    ('REVSPEC'))
+                ui.write(
+                    b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
+                    % (
+                        i,
+                        r.flags(i),
+                        r.rawsize(i),
+                        r.linkrev(i),
+                        pr[0],
+                        pr[1],
+                        shortfn(node),
+                    )
+                )
+
+
+@command(
+    b'debugrevspec',
+    [
+        (
+            b'',
+            b'optimize',
+            None,
+            _(b'print parsed tree after optimizing (DEPRECATED)'),
+        ),
+        (
+            b'',
+            b'show-revs',
+            True,
+            _(b'print list of result revisions (default)'),
+        ),
+        (
+            b's',
+            b'show-set',
+            None,
+            _(b'print internal representation of result set'),
+        ),
+        (
+            b'p',
+            b'show-stage',
+            [],
+            _(b'print parsed tree at the given stage'),
+            _(b'NAME'),
+        ),
+        (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
+        (b'', b'verify-optimized', False, _(b'verify optimized result')),
+    ],
+    b'REVSPEC',
+)
 def debugrevspec(ui, repo, expr, **opts):
     """parse and apply a revision specification
 
@@ -2501,39 +3081,42 @@
     one. Returns 1 if the optimized result differs.
     """
     opts = pycompat.byteskwargs(opts)
-    aliases = ui.configitems('revsetalias')
+    aliases = ui.configitems(b'revsetalias')
     stages = [
-        ('parsed', lambda tree: tree),
-        ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
-                                                           ui.warn)),
-        ('concatenated', revsetlang.foldconcat),
-        ('analyzed', revsetlang.analyze),
-        ('optimized', revsetlang.optimize),
+        (b'parsed', lambda tree: tree),
+        (
+            b'expanded',
+            lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
+        ),
+        (b'concatenated', revsetlang.foldconcat),
+        (b'analyzed', revsetlang.analyze),
+        (b'optimized', revsetlang.optimize),
     ]
-    if opts['no_optimized']:
+    if opts[b'no_optimized']:
         stages = stages[:-1]
-    if opts['verify_optimized'] and opts['no_optimized']:
-        raise error.Abort(_('cannot use --verify-optimized with '
-                            '--no-optimized'))
+    if opts[b'verify_optimized'] and opts[b'no_optimized']:
+        raise error.Abort(
+            _(b'cannot use --verify-optimized with --no-optimized')
+        )
     stagenames = set(n for n, f in stages)
 
     showalways = set()
     showchanged = set()
-    if ui.verbose and not opts['show_stage']:
+    if ui.verbose and not opts[b'show_stage']:
         # show parsed tree by --verbose (deprecated)
-        showalways.add('parsed')
-        showchanged.update(['expanded', 'concatenated'])
-        if opts['optimize']:
-            showalways.add('optimized')
-    if opts['show_stage'] and opts['optimize']:
-        raise error.Abort(_('cannot use --optimize with --show-stage'))
-    if opts['show_stage'] == ['all']:
+        showalways.add(b'parsed')
+        showchanged.update([b'expanded', b'concatenated'])
+        if opts[b'optimize']:
+            showalways.add(b'optimized')
+    if opts[b'show_stage'] and opts[b'optimize']:
+        raise error.Abort(_(b'cannot use --optimize with --show-stage'))
+    if opts[b'show_stage'] == [b'all']:
         showalways.update(stagenames)
     else:
-        for n in opts['show_stage']:
+        for n in opts[b'show_stage']:
             if n not in stagenames:
-                raise error.Abort(_('invalid stage name: %s') % n)
-        showalways.update(opts['show_stage'])
+                raise error.Abort(_(b'invalid stage name: %s') % n)
+        showalways.update(opts[b'show_stage'])
 
     treebystage = {}
     printedtree = None
@@ -2541,50 +3124,64 @@
     for n, f in stages:
         treebystage[n] = tree = f(tree)
         if n in showalways or (n in showchanged and tree != printedtree):
-            if opts['show_stage'] or n != 'parsed':
-                ui.write(("* %s:\n") % n)
-            ui.write(revsetlang.prettyformat(tree), "\n")
+            if opts[b'show_stage'] or n != b'parsed':
+                ui.write(b"* %s:\n" % n)
+            ui.write(revsetlang.prettyformat(tree), b"\n")
             printedtree = tree
 
-    if opts['verify_optimized']:
-        arevs = revset.makematcher(treebystage['analyzed'])(repo)
-        brevs = revset.makematcher(treebystage['optimized'])(repo)
-        if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
-            ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n")
-            ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n")
+    if opts[b'verify_optimized']:
+        arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
+        brevs = revset.makematcher(treebystage[b'optimized'])(repo)
+        if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
+            ui.writenoi18n(
+                b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
+            )
+            ui.writenoi18n(
+                b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
+            )
         arevs = list(arevs)
         brevs = list(brevs)
         if arevs == brevs:
             return 0
-        ui.write(('--- analyzed\n'), label='diff.file_a')
-        ui.write(('+++ optimized\n'), label='diff.file_b')
+        ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
+        ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
         sm = difflib.SequenceMatcher(None, arevs, brevs)
         for tag, alo, ahi, blo, bhi in sm.get_opcodes():
             if tag in (r'delete', r'replace'):
                 for c in arevs[alo:ahi]:
-                    ui.write('-%d\n' % c, label='diff.deleted')
+                    ui.write(b'-%d\n' % c, label=b'diff.deleted')
             if tag in (r'insert', r'replace'):
                 for c in brevs[blo:bhi]:
-                    ui.write('+%d\n' % c, label='diff.inserted')
+                    ui.write(b'+%d\n' % c, label=b'diff.inserted')
             if tag == r'equal':
                 for c in arevs[alo:ahi]:
-                    ui.write(' %d\n' % c)
+                    ui.write(b' %d\n' % c)
         return 1
 
     func = revset.makematcher(tree)
     revs = func(repo)
-    if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
-        ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n")
-    if not opts['show_revs']:
+    if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
+        ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
+    if not opts[b'show_revs']:
         return
     for c in revs:
-        ui.write("%d\n" % c)
-
-@command('debugserve', [
-    ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
-    ('', 'logiofd', '', _('file descriptor to log server I/O to')),
-    ('', 'logiofile', '', _('file to log server I/O to')),
-], '')
+        ui.write(b"%d\n" % c)
+
+
+@command(
+    b'debugserve',
+    [
+        (
+            b'',
+            b'sshstdio',
+            False,
+            _(b'run an SSH server bound to process handles'),
+        ),
+        (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
+        (b'', b'logiofile', b'', _(b'file to log server I/O to')),
+    ],
+    b'',
+)
 def debugserve(ui, repo, **opts):
     """run a server with advanced settings
 
@@ -2594,30 +3191,31 @@
     """
     opts = pycompat.byteskwargs(opts)
 
-    if not opts['sshstdio']:
-        raise error.Abort(_('only --sshstdio is currently supported'))
+    if not opts[b'sshstdio']:
+        raise error.Abort(_(b'only --sshstdio is currently supported'))
 
     logfh = None
 
-    if opts['logiofd'] and opts['logiofile']:
-        raise error.Abort(_('cannot use both --logiofd and --logiofile'))
-
-    if opts['logiofd']:
+    if opts[b'logiofd'] and opts[b'logiofile']:
+        raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
+
+    if opts[b'logiofd']:
         # Line buffered because output is line based.
         try:
-            logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
+            logfh = os.fdopen(int(opts[b'logiofd']), r'ab', 1)
         except OSError as e:
             if e.errno != errno.ESPIPE:
                 raise
             # can't seek a pipe, so `ab` mode fails on py3
-            logfh = os.fdopen(int(opts['logiofd']), r'wb', 1)
-    elif opts['logiofile']:
-        logfh = open(opts['logiofile'], 'ab', 1)
+            logfh = os.fdopen(int(opts[b'logiofd']), r'wb', 1)
+    elif opts[b'logiofile']:
+        logfh = open(opts[b'logiofile'], b'ab', 1)
 
     s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
     s.serve_forever()
 
-@command('debugsetparents', [], _('REV1 [REV2]'))
+
+@command(b'debugsetparents', [], _(b'REV1 [REV2]'))
 def debugsetparents(ui, repo, rev1, rev2=None):
     """manually set the parents of the current working directory
 
@@ -2630,12 +3228,41 @@
     """
 
     node1 = scmutil.revsingle(repo, rev1).node()
-    node2 = scmutil.revsingle(repo, rev2, 'null').node()
+    node2 = scmutil.revsingle(repo, rev2, b'null').node()
 
     with repo.wlock():
         repo.setparents(node1, node2)
 
-@command('debugssl', [], '[SOURCE]', optionalrepo=True)
+
+@command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
+def debugsidedata(ui, repo, file_, rev=None, **opts):
+    """dump the side data for a cl/manifest/file revision
+
+    Use --verbose to dump the sidedata content."""
+    opts = pycompat.byteskwargs(opts)
+    if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
+        if rev is not None:
+            raise error.CommandError(b'debugdata', _(b'invalid arguments'))
+        file_, rev = None, file_
+    elif rev is None:
+        raise error.CommandError(b'debugdata', _(b'invalid arguments'))
+    r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
+    r = getattr(r, '_revlog', r)
+    try:
+        sidedata = r.sidedata(r.lookup(rev))
+    except KeyError:
+        raise error.Abort(_(b'invalid revision identifier %s') % rev)
+    if sidedata:
+        sidedata = list(sidedata.items())
+        sidedata.sort()
+        ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
+        for key, value in sidedata:
+            ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
+            if ui.verbose:
+                ui.writenoi18n(b'  %s\n' % stringutil.pprint(value))
+
+
+@command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
 def debugssl(ui, repo, source=None, **opts):
     '''test a secure connection to a server
 
@@ -2650,66 +3277,80 @@
     of the SSL error is likely another issue.
     '''
     if not pycompat.iswindows:
-        raise error.Abort(_('certificate chain building is only possible on '
-                            'Windows'))
+        raise error.Abort(
+            _(b'certificate chain building is only possible on Windows')
+        )
 
     if not source:
         if not repo:
-            raise error.Abort(_("there is no Mercurial repository here, and no "
-                                "server specified"))
-        source = "default"
+            raise error.Abort(
+                _(
+                    b"there is no Mercurial repository here, and no "
+                    b"server specified"
+                )
+            )
+        source = b"default"
 
     source, branches = hg.parseurl(ui.expandpath(source))
     url = util.url(source)
 
-    defaultport = {'https': 443, 'ssh': 22}
+    defaultport = {b'https': 443, b'ssh': 22}
     if url.scheme in defaultport:
         try:
             addr = (url.host, int(url.port or defaultport[url.scheme]))
         except ValueError:
-            raise error.Abort(_("malformed port number in URL"))
+            raise error.Abort(_(b"malformed port number in URL"))
     else:
-        raise error.Abort(_("only https and ssh connections are supported"))
+        raise error.Abort(_(b"only https and ssh connections are supported"))
 
     from . import win32
 
-    s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
-                        cert_reqs=ssl.CERT_NONE, ca_certs=None)
+    s = ssl.wrap_socket(
+        socket.socket(),
+        ssl_version=ssl.PROTOCOL_TLS,
+        cert_reqs=ssl.CERT_NONE,
+        ca_certs=None,
+    )
 
     try:
         s.connect(addr)
         cert = s.getpeercert(True)
 
-        ui.status(_('checking the certificate chain for %s\n') % url.host)
+        ui.status(_(b'checking the certificate chain for %s\n') % url.host)
 
         complete = win32.checkcertificatechain(cert, build=False)
 
         if not complete:
-            ui.status(_('certificate chain is incomplete, updating... '))
+            ui.status(_(b'certificate chain is incomplete, updating... '))
 
             if not win32.checkcertificatechain(cert):
-                ui.status(_('failed.\n'))
+                ui.status(_(b'failed.\n'))
             else:
-                ui.status(_('done.\n'))
+                ui.status(_(b'done.\n'))
         else:
-            ui.status(_('full certificate chain is available\n'))
+            ui.status(_(b'full certificate chain is available\n'))
     finally:
         s.close()
 
-@command('debugsub',
-    [('r', 'rev', '',
-     _('revision to check'), _('REV'))],
-    _('[-r REV] [REV]'))
+
+@command(
+    b'debugsub',
+    [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
+    _(b'[-r REV] [REV]'),
+)
 def debugsub(ui, repo, rev=None):
     ctx = scmutil.revsingle(repo, rev, None)
     for k, v in sorted(ctx.substate.items()):
-        ui.write(('path %s\n') % k)
-        ui.write((' source   %s\n') % v[0])
-        ui.write((' revision %s\n') % v[1])
-
-@command('debugsuccessorssets',
-    [('', 'closest', False, _('return closest successors sets only'))],
-    _('[REV]'))
+        ui.writenoi18n(b'path %s\n' % k)
+        ui.writenoi18n(b' source   %s\n' % v[0])
+        ui.writenoi18n(b' revision %s\n' % v[1])
+
+
+@command(
+    b'debugsuccessorssets',
+    [(b'', b'closest', False, _(b'return closest successors sets only'))],
+    _(b'[REV]'),
+)
 def debugsuccessorssets(ui, repo, *revs, **opts):
     """show set of successors for revision
 
@@ -2748,23 +3389,28 @@
     node2str = short
     for rev in scmutil.revrange(repo, revs):
         ctx = repo[rev]
-        ui.write('%s\n'% ctx2str(ctx))
-        for succsset in obsutil.successorssets(repo, ctx.node(),
-                                                closest=opts[r'closest'],
-                                                cache=cache):
+        ui.write(b'%s\n' % ctx2str(ctx))
+        for succsset in obsutil.successorssets(
+            repo, ctx.node(), closest=opts[r'closest'], cache=cache
+        ):
             if succsset:
-                ui.write('    ')
+                ui.write(b'    ')
                 ui.write(node2str(succsset[0]))
                 for node in succsset[1:]:
-                    ui.write(' ')
+                    ui.write(b' ')
                     ui.write(node2str(node))
-            ui.write('\n')
-
-@command('debugtemplate',
-    [('r', 'rev', [], _('apply template on changesets'), _('REV')),
-     ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
-    _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
-    optionalrepo=True)
+            ui.write(b'\n')
+
+
+@command(
+    b'debugtemplate',
+    [
+        (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
+        (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
+    ],
+    _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
+    optionalrepo=True,
+)
 def debugtemplate(ui, repo, tmpl, **opts):
     """parse and apply a template
 
@@ -2777,74 +3423,98 @@
     revs = None
     if opts[r'rev']:
         if repo is None:
-            raise error.RepoError(_('there is no Mercurial repository here '
-                                    '(.hg not found)'))
+            raise error.RepoError(
+                _(b'there is no Mercurial repository here (.hg not found)')
+            )
         revs = scmutil.revrange(repo, opts[r'rev'])
 
     props = {}
     for d in opts[r'define']:
         try:
-            k, v = (e.strip() for e in d.split('=', 1))
-            if not k or k == 'ui':
+            k, v = (e.strip() for e in d.split(b'=', 1))
+            if not k or k == b'ui':
                 raise ValueError
             props[k] = v
         except ValueError:
-            raise error.Abort(_('malformed keyword definition: %s') % d)
+            raise error.Abort(_(b'malformed keyword definition: %s') % d)
 
     if ui.verbose:
-        aliases = ui.configitems('templatealias')
+        aliases = ui.configitems(b'templatealias')
         tree = templater.parse(tmpl)
-        ui.note(templater.prettyformat(tree), '\n')
+        ui.note(templater.prettyformat(tree), b'\n')
         newtree = templater.expandaliases(tree, aliases)
         if newtree != tree:
-            ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
+            ui.notenoi18n(
+                b"* expanded:\n", templater.prettyformat(newtree), b'\n'
+            )
 
     if revs is None:
         tres = formatter.templateresources(ui, repo)
         t = formatter.maketemplater(ui, tmpl, resources=tres)
         if ui.verbose:
             kwds, funcs = t.symbolsuseddefault()
-            ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
-            ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
+            ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
+            ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
         ui.write(t.renderdefault(props))
     else:
         displayer = logcmdutil.maketemplater(ui, repo, tmpl)
         if ui.verbose:
             kwds, funcs = displayer.t.symbolsuseddefault()
-            ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
-            ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
+            ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
+            ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
         for r in revs:
             displayer.show(repo[r], **pycompat.strkwargs(props))
         displayer.close()
 
-@command('debuguigetpass', [
-    ('p', 'prompt', '', _('prompt text'), _('TEXT')),
-], _('[-p TEXT]'), norepo=True)
-def debuguigetpass(ui, prompt=''):
+
+@command(
+    b'debuguigetpass',
+    [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
+    _(b'[-p TEXT]'),
+    norepo=True,
+)
+def debuguigetpass(ui, prompt=b''):
     """show prompt to type password"""
     r = ui.getpass(prompt)
-    ui.write(('respose: %s\n') % r)
-
-@command('debuguiprompt', [
-    ('p', 'prompt', '', _('prompt text'), _('TEXT')),
-], _('[-p TEXT]'), norepo=True)
-def debuguiprompt(ui, prompt=''):
+    ui.writenoi18n(b'respose: %s\n' % r)
+
+
+@command(
+    b'debuguiprompt',
+    [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
+    _(b'[-p TEXT]'),
+    norepo=True,
+)
+def debuguiprompt(ui, prompt=b''):
     """show plain prompt"""
     r = ui.prompt(prompt)
-    ui.write(('response: %s\n') % r)
-
-@command('debugupdatecaches', [])
+    ui.writenoi18n(b'response: %s\n' % r)
+
+
+@command(b'debugupdatecaches', [])
 def debugupdatecaches(ui, repo, *pats, **opts):
     """warm all known caches in the repository"""
     with repo.wlock(), repo.lock():
         repo.updatecaches(full=True)
 
-@command('debugupgraderepo', [
-    ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
-    ('', 'run', False, _('performs an upgrade')),
-    ('', 'backup', True, _('keep the old repository content around')),
-])
-def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True):
+
+@command(
+    b'debugupgraderepo',
+    [
+        (
+            b'o',
+            b'optimize',
+            [],
+            _(b'extra optimization to perform'),
+            _(b'NAME'),
+        ),
+        (b'', b'run', False, _(b'performs an upgrade')),
+        (b'', b'backup', True, _(b'keep the old repository content around')),
+        (b'', b'changelog', None, _(b'select the changelog for upgrade')),
+        (b'', b'manifest', None, _(b'select the manifest for upgrade')),
+    ],
+)
+def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
     """upgrade a repository to use different features
 
     If no arguments are specified, the repository is evaluated for upgrade
@@ -2862,65 +3532,95 @@
     rename some directories inside the ``.hg`` directory. On most machines, this
     should complete almost instantaneously and the chances of a consumer being
     unable to access the repository should be low.
+
+    By default, all revlog will be upgraded. You can restrict this using flag
+    such as `--manifest`:
+
+      * `--manifest`: only optimize the manifest
+      * `--no-manifest`: optimize all revlog but the manifest
+      * `--changelog`: optimize the changelog only
+      * `--no-changelog --no-manifest`: optimize filelogs only
     """
-    return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize,
-                               backup=backup)
-
-@command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
-         inferrepo=True)
+    return upgrade.upgraderepo(
+        ui, repo, run=run, optimize=optimize, backup=backup, **opts
+    )
+
+
+@command(
+    b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
+)
 def debugwalk(ui, repo, *pats, **opts):
     """show how files match on given patterns"""
     opts = pycompat.byteskwargs(opts)
     m = scmutil.match(repo[None], pats, opts)
     if ui.verbose:
-        ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
+        ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
     items = list(repo[None].walk(m))
     if not items:
         return
     f = lambda fn: fn
-    if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
+    if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
         f = lambda fn: util.normpath(fn)
-    fmt = 'f  %%-%ds  %%-%ds  %%s' % (
+    fmt = b'f  %%-%ds  %%-%ds  %%s' % (
         max([len(abs) for abs in items]),
-        max([len(repo.pathto(abs)) for abs in items]))
+        max([len(repo.pathto(abs)) for abs in items]),
+    )
     for abs in items:
-        line = fmt % (abs, f(repo.pathto(abs)), m.exact(abs) and 'exact' or '')
-        ui.write("%s\n" % line.rstrip())
-
-@command('debugwhyunstable', [], _('REV'))
+        line = fmt % (
+            abs,
+            f(repo.pathto(abs)),
+            m.exact(abs) and b'exact' or b'',
+        )
+        ui.write(b"%s\n" % line.rstrip())
+
+
+@command(b'debugwhyunstable', [], _(b'REV'))
 def debugwhyunstable(ui, repo, rev):
     """explain instabilities of a changeset"""
     for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
-        dnodes = ''
-        if entry.get('divergentnodes'):
-            dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
-                              for ctx in entry['divergentnodes']) + ' '
-        ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
-                                    entry['reason'], entry['node']))
-
-@command('debugwireargs',
-    [('', 'three', '', 'three'),
-    ('', 'four', '', 'four'),
-    ('', 'five', '', 'five'),
-    ] + cmdutil.remoteopts,
-    _('REPO [OPTIONS]... [ONE [TWO]]'),
-    norepo=True)
+        dnodes = b''
+        if entry.get(b'divergentnodes'):
+            dnodes = (
+                b' '.join(
+                    b'%s (%s)' % (ctx.hex(), ctx.phasestr())
+                    for ctx in entry[b'divergentnodes']
+                )
+                + b' '
+            )
+        ui.write(
+            b'%s: %s%s %s\n'
+            % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
+        )
+
+
+@command(
+    b'debugwireargs',
+    [
+        (b'', b'three', b'', b'three'),
+        (b'', b'four', b'', b'four'),
+        (b'', b'five', b'', b'five'),
+    ]
+    + cmdutil.remoteopts,
+    _(b'REPO [OPTIONS]... [ONE [TWO]]'),
+    norepo=True,
+)
 def debugwireargs(ui, repopath, *vals, **opts):
     opts = pycompat.byteskwargs(opts)
     repo = hg.peer(ui, opts, repopath)
     for opt in cmdutil.remoteopts:
         del opts[opt[1]]
     args = {}
-    for k, v in opts.iteritems():
+    for k, v in pycompat.iteritems(opts):
         if v:
             args[k] = v
     args = pycompat.strkwargs(args)
     # run twice to check that we don't mess up the stream for the next command
     res1 = repo.debugwireargs(*vals, **args)
     res2 = repo.debugwireargs(*vals, **args)
-    ui.write("%s\n" % res1)
+    ui.write(b"%s\n" % res1)
     if res1 != res2:
-        ui.warn("%s\n" % res2)
+        ui.warn(b"%s\n" % res2)
+
 
 def _parsewirelangblocks(fh):
     activeaction = None
@@ -2948,7 +3648,7 @@
         # Else we start with an indent.
 
         if not activeaction:
-            raise error.Abort(_('indented line outside of block'))
+            raise error.Abort(_(b'indented line outside of block'))
 
         indent = len(line) - len(line.lstrip())
 
@@ -2963,16 +3663,29 @@
     if activeaction:
         yield activeaction, blocklines
 
-@command('debugwireproto',
+
+@command(
+    b'debugwireproto',
     [
-        ('', 'localssh', False, _('start an SSH server for this repo')),
-        ('', 'peer', '', _('construct a specific version of the peer')),
-        ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
-        ('', 'nologhandshake', False,
-         _('do not log I/O related to the peer handshake')),
-    ] + cmdutil.remoteopts,
-    _('[PATH]'),
-    optionalrepo=True)
+        (b'', b'localssh', False, _(b'start an SSH server for this repo')),
+        (b'', b'peer', b'', _(b'construct a specific version of the peer')),
+        (
+            b'',
+            b'noreadstderr',
+            False,
+            _(b'do not read from stderr of the remote'),
+        ),
+        (
+            b'',
+            b'nologhandshake',
+            False,
+            _(b'do not log I/O related to the peer handshake'),
+        ),
+    ]
+    + cmdutil.remoteopts,
+    _(b'[PATH]'),
+    optionalrepo=True,
+)
 def debugwireproto(ui, repo, path=None, **opts):
     """send wire protocol commands to a server
 
@@ -3156,19 +3869,25 @@
     """
     opts = pycompat.byteskwargs(opts)
 
-    if opts['localssh'] and not repo:
-        raise error.Abort(_('--localssh requires a repository'))
-
-    if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'):
-        raise error.Abort(_('invalid value for --peer'),
-                          hint=_('valid values are "raw", "ssh1", and "ssh2"'))
-
-    if path and opts['localssh']:
-        raise error.Abort(_('cannot specify --localssh with an explicit '
-                            'path'))
+    if opts[b'localssh'] and not repo:
+        raise error.Abort(_(b'--localssh requires a repository'))
+
+    if opts[b'peer'] and opts[b'peer'] not in (
+        b'raw',
+        b'http2',
+        b'ssh1',
+        b'ssh2',
+    ):
+        raise error.Abort(
+            _(b'invalid value for --peer'),
+            hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
+        )
+
+    if path and opts[b'localssh']:
+        raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
 
     if ui.interactive():
-        ui.write(_('(waiting for commands on stdin)\n'))
+        ui.write(_(b'(waiting for commands on stdin)\n'))
 
     blocks = list(_parsewirelangblocks(ui.fin))
 
@@ -3178,60 +3897,91 @@
     stderr = None
     opener = None
 
-    if opts['localssh']:
+    if opts[b'localssh']:
         # We start the SSH server in its own process so there is process
         # separation. This prevents a whole class of potential bugs around
         # shared state from interfering with server operation.
         args = procutil.hgcmd() + [
-            '-R', repo.root,
-            'debugserve', '--sshstdio',
+            b'-R',
+            repo.root,
+            b'debugserve',
+            b'--sshstdio',
         ]
-        proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args),
-                                stdin=subprocess.PIPE,
-                                stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                                bufsize=0)
+        proc = subprocess.Popen(
+            pycompat.rapply(procutil.tonativestr, args),
+            stdin=subprocess.PIPE,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            bufsize=0,
+        )
 
         stdin = proc.stdin
         stdout = proc.stdout
         stderr = proc.stderr
 
         # We turn the pipes into observers so we can log I/O.
-        if ui.verbose or opts['peer'] == 'raw':
-            stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
-                                               logdata=True)
-            stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
-                                                logdata=True)
-            stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
-                                                logdata=True)
+        if ui.verbose or opts[b'peer'] == b'raw':
+            stdin = util.makeloggingfileobject(
+                ui, proc.stdin, b'i', logdata=True
+            )
+            stdout = util.makeloggingfileobject(
+                ui, proc.stdout, b'o', logdata=True
+            )
+            stderr = util.makeloggingfileobject(
+                ui, proc.stderr, b'e', logdata=True
+            )
 
         # --localssh also implies the peer connection settings.
 
-        url = 'ssh://localserver'
-        autoreadstderr = not opts['noreadstderr']
-
-        if opts['peer'] == 'ssh1':
-            ui.write(_('creating ssh peer for wire protocol version 1\n'))
-            peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
-                                     None, autoreadstderr=autoreadstderr)
-        elif opts['peer'] == 'ssh2':
-            ui.write(_('creating ssh peer for wire protocol version 2\n'))
-            peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
-                                     None, autoreadstderr=autoreadstderr)
-        elif opts['peer'] == 'raw':
-            ui.write(_('using raw connection to peer\n'))
+        url = b'ssh://localserver'
+        autoreadstderr = not opts[b'noreadstderr']
+
+        if opts[b'peer'] == b'ssh1':
+            ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
+            peer = sshpeer.sshv1peer(
+                ui,
+                url,
+                proc,
+                stdin,
+                stdout,
+                stderr,
+                None,
+                autoreadstderr=autoreadstderr,
+            )
+        elif opts[b'peer'] == b'ssh2':
+            ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
+            peer = sshpeer.sshv2peer(
+                ui,
+                url,
+                proc,
+                stdin,
+                stdout,
+                stderr,
+                None,
+                autoreadstderr=autoreadstderr,
+            )
+        elif opts[b'peer'] == b'raw':
+            ui.write(_(b'using raw connection to peer\n'))
             peer = None
         else:
-            ui.write(_('creating ssh peer from handshake results\n'))
-            peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
-                                    autoreadstderr=autoreadstderr)
+            ui.write(_(b'creating ssh peer from handshake results\n'))
+            peer = sshpeer.makepeer(
+                ui,
+                url,
+                proc,
+                stdin,
+                stdout,
+                stderr,
+                autoreadstderr=autoreadstderr,
+            )
 
     elif path:
         # We bypass hg.peer() so we can proxy the sockets.
         # TODO consider not doing this because we skip
         # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
         u = util.url(path)
-        if u.scheme != 'http':
-            raise error.Abort(_('only http:// paths are currently supported'))
+        if u.scheme != b'http':
+            raise error.Abort(_(b'only http:// paths are currently supported'))
 
         url, authinfo = u.authinfo()
         openerargs = {
@@ -3240,14 +3990,13 @@
 
         # Turn pipes/sockets into observers so we can log I/O.
         if ui.verbose:
-            openerargs.update({
-                r'loggingfh': ui,
-                r'loggingname': b's',
-                r'loggingopts': {
-                    r'logdata': True,
-                    r'logdataapis': False,
-                },
-            })
+            openerargs.update(
+                {
+                    r'loggingfh': ui,
+                    r'loggingname': b's',
+                    r'loggingopts': {r'logdata': True, r'logdataapis': False,},
+                }
+            )
 
         if ui.debugflag:
             openerargs[r'loggingopts'][r'logdataapis'] = True
@@ -3255,82 +4004,94 @@
         # Don't send default headers when in raw mode. This allows us to
         # bypass most of the behavior of our URL handling code so we can
         # have near complete control over what's sent on the wire.
-        if opts['peer'] == 'raw':
+        if opts[b'peer'] == b'raw':
             openerargs[r'sendaccept'] = False
 
         opener = urlmod.opener(ui, authinfo, **openerargs)
 
-        if opts['peer'] == 'http2':
-            ui.write(_('creating http peer for wire protocol version 2\n'))
+        if opts[b'peer'] == b'http2':
+            ui.write(_(b'creating http peer for wire protocol version 2\n'))
             # We go through makepeer() because we need an API descriptor for
             # the peer instance to be useful.
-            with ui.configoverride({
-                ('experimental', 'httppeer.advertise-v2'): True}):
-                if opts['nologhandshake']:
+            with ui.configoverride(
+                {(b'experimental', b'httppeer.advertise-v2'): True}
+            ):
+                if opts[b'nologhandshake']:
                     ui.pushbuffer()
 
                 peer = httppeer.makepeer(ui, path, opener=opener)
 
-                if opts['nologhandshake']:
+                if opts[b'nologhandshake']:
                     ui.popbuffer()
 
             if not isinstance(peer, httppeer.httpv2peer):
-                raise error.Abort(_('could not instantiate HTTP peer for '
-                                    'wire protocol version 2'),
-                                  hint=_('the server may not have the feature '
-                                         'enabled or is not allowing this '
-                                         'client version'))
-
-        elif opts['peer'] == 'raw':
-            ui.write(_('using raw connection to peer\n'))
+                raise error.Abort(
+                    _(
+                        b'could not instantiate HTTP peer for '
+                        b'wire protocol version 2'
+                    ),
+                    hint=_(
+                        b'the server may not have the feature '
+                        b'enabled or is not allowing this '
+                        b'client version'
+                    ),
+                )
+
+        elif opts[b'peer'] == b'raw':
+            ui.write(_(b'using raw connection to peer\n'))
             peer = None
-        elif opts['peer']:
-            raise error.Abort(_('--peer %s not supported with HTTP peers') %
-                              opts['peer'])
+        elif opts[b'peer']:
+            raise error.Abort(
+                _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
+            )
         else:
             peer = httppeer.makepeer(ui, path, opener=opener)
 
         # We /could/ populate stdin/stdout with sock.makefile()...
     else:
-        raise error.Abort(_('unsupported connection configuration'))
+        raise error.Abort(_(b'unsupported connection configuration'))
 
     batchedcommands = None
 
     # Now perform actions based on the parsed wire language instructions.
     for action, lines in blocks:
-        if action in ('raw', 'raw+'):
+        if action in (b'raw', b'raw+'):
             if not stdin:
-                raise error.Abort(_('cannot call raw/raw+ on this peer'))
+                raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
 
             # Concatenate the data together.
-            data = ''.join(l.lstrip() for l in lines)
+            data = b''.join(l.lstrip() for l in lines)
             data = stringutil.unescapestr(data)
             stdin.write(data)
 
-            if action == 'raw+':
+            if action == b'raw+':
                 stdin.flush()
-        elif action == 'flush':
+        elif action == b'flush':
             if not stdin:
-                raise error.Abort(_('cannot call flush on this peer'))
+                raise error.Abort(_(b'cannot call flush on this peer'))
             stdin.flush()
-        elif action.startswith('command'):
+        elif action.startswith(b'command'):
             if not peer:
-                raise error.Abort(_('cannot send commands unless peer instance '
-                                    'is available'))
-
-            command = action.split(' ', 1)[1]
+                raise error.Abort(
+                    _(
+                        b'cannot send commands unless peer instance '
+                        b'is available'
+                    )
+                )
+
+            command = action.split(b' ', 1)[1]
 
             args = {}
             for line in lines:
                 # We need to allow empty values.
-                fields = line.lstrip().split(' ', 1)
+                fields = line.lstrip().split(b' ', 1)
                 if len(fields) == 1:
                     key = fields[0]
-                    value = ''
+                    value = b''
                 else:
                     key, value = fields
 
-                if value.startswith('eval:'):
+                if value.startswith(b'eval:'):
                     value = stringutil.evalpythonliteral(value[5:])
                 else:
                     value = stringutil.unescapestr(value)
@@ -3341,54 +4102,68 @@
                 batchedcommands.append((command, args))
                 continue
 
-            ui.status(_('sending %s command\n') % command)
-
-            if 'PUSHFILE' in args:
-                with open(args['PUSHFILE'], r'rb') as fh:
-                    del args['PUSHFILE']
-                    res, output = peer._callpush(command, fh,
-                                                 **pycompat.strkwargs(args))
-                    ui.status(_('result: %s\n') % stringutil.escapestr(res))
-                    ui.status(_('remote output: %s\n') %
-                              stringutil.escapestr(output))
+            ui.status(_(b'sending %s command\n') % command)
+
+            if b'PUSHFILE' in args:
+                with open(args[b'PUSHFILE'], r'rb') as fh:
+                    del args[b'PUSHFILE']
+                    res, output = peer._callpush(
+                        command, fh, **pycompat.strkwargs(args)
+                    )
+                    ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
+                    ui.status(
+                        _(b'remote output: %s\n') % stringutil.escapestr(output)
+                    )
             else:
                 with peer.commandexecutor() as e:
                     res = e.callcommand(command, args).result()
 
                 if isinstance(res, wireprotov2peer.commandresponse):
                     val = res.objects()
-                    ui.status(_('response: %s\n') %
-                              stringutil.pprint(val, bprefix=True, indent=2))
+                    ui.status(
+                        _(b'response: %s\n')
+                        % stringutil.pprint(val, bprefix=True, indent=2)
+                    )
                 else:
-                    ui.status(_('response: %s\n') %
-                              stringutil.pprint(res, bprefix=True, indent=2))
-
-        elif action == 'batchbegin':
+                    ui.status(
+                        _(b'response: %s\n')
+                        % stringutil.pprint(res, bprefix=True, indent=2)
+                    )
+
+        elif action == b'batchbegin':
             if batchedcommands is not None:
-                raise error.Abort(_('nested batchbegin not allowed'))
+                raise error.Abort(_(b'nested batchbegin not allowed'))
 
             batchedcommands = []
-        elif action == 'batchsubmit':
+        elif action == b'batchsubmit':
             # There is a batching API we could go through. But it would be
             # difficult to normalize requests into function calls. It is easier
             # to bypass this layer and normalize to commands + args.
-            ui.status(_('sending batch with %d sub-commands\n') %
-                      len(batchedcommands))
+            ui.status(
+                _(b'sending batch with %d sub-commands\n')
+                % len(batchedcommands)
+            )
             for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
-                ui.status(_('response #%d: %s\n') %
-                          (i, stringutil.escapestr(chunk)))
+                ui.status(
+                    _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
+                )
 
             batchedcommands = None
 
-        elif action.startswith('httprequest '):
+        elif action.startswith(b'httprequest '):
             if not opener:
-                raise error.Abort(_('cannot use httprequest without an HTTP '
-                                    'peer'))
-
-            request = action.split(' ', 2)
+                raise error.Abort(
+                    _(b'cannot use httprequest without an HTTP peer')
+                )
+
+            request = action.split(b' ', 2)
             if len(request) != 3:
-                raise error.Abort(_('invalid httprequest: expected format is '
-                                    '"httprequest <method> <path>'))
+                raise error.Abort(
+                    _(
+                        b'invalid httprequest: expected format is '
+                        b'"httprequest <method> <path>'
+                    )
+                )
 
             method, httppath = request[1:]
             headers = {}
@@ -3405,16 +4180,18 @@
                     continue
 
                 if line.startswith(b'BODYFILE '):
-                    with open(line.split(b' ', 1), 'rb') as fh:
+                    with open(line.split(b' ', 1), b'rb') as fh:
                         body = fh.read()
                 elif line.startswith(b'frame '):
                     frame = wireprotoframing.makeframefromhumanstring(
-                        line[len(b'frame '):])
+                        line[len(b'frame ') :]
+                    )
 
                     frames.append(frame)
                 else:
-                    raise error.Abort(_('unknown argument to httprequest: %s') %
-                                      line)
+                    raise error.Abort(
+                        _(b'unknown argument to httprequest: %s') % line
+                    )
 
             url = path + httppath
 
@@ -3438,44 +4215,48 @@
 
             ct = res.headers.get(r'Content-Type')
             if ct == r'application/mercurial-cbor':
-                ui.write(_('cbor> %s\n') %
-                         stringutil.pprint(cborutil.decodeall(body),
-                                           bprefix=True,
-                                           indent=2))
-
-        elif action == 'close':
+                ui.write(
+                    _(b'cbor> %s\n')
+                    % stringutil.pprint(
+                        cborutil.decodeall(body), bprefix=True, indent=2
+                    )
+                )
+
+        elif action == b'close':
             peer.close()
-        elif action == 'readavailable':
+        elif action == b'readavailable':
             if not stdout or not stderr:
-                raise error.Abort(_('readavailable not available on this peer'))
+                raise error.Abort(
+                    _(b'readavailable not available on this peer')
+                )
 
             stdin.close()
             stdout.read()
             stderr.read()
 
-        elif action == 'readline':
+        elif action == b'readline':
             if not stdout:
-                raise error.Abort(_('readline not available on this peer'))
+                raise error.Abort(_(b'readline not available on this peer'))
             stdout.readline()
-        elif action == 'ereadline':
+        elif action == b'ereadline':
             if not stderr:
-                raise error.Abort(_('ereadline not available on this peer'))
+                raise error.Abort(_(b'ereadline not available on this peer'))
             stderr.readline()
-        elif action.startswith('read '):
-            count = int(action.split(' ', 1)[1])
+        elif action.startswith(b'read '):
+            count = int(action.split(b' ', 1)[1])
             if not stdout:
-                raise error.Abort(_('read not available on this peer'))
+                raise error.Abort(_(b'read not available on this peer'))
             stdout.read(count)
-        elif action.startswith('eread '):
-            count = int(action.split(' ', 1)[1])
+        elif action.startswith(b'eread '):
+            count = int(action.split(b' ', 1)[1])
             if not stderr:
-                raise error.Abort(_('eread not available on this peer'))
+                raise error.Abort(_(b'eread not available on this peer'))
             stderr.read(count)
         else:
-            raise error.Abort(_('unknown action: %s') % action)
+            raise error.Abort(_(b'unknown action: %s') % action)
 
     if batchedcommands is not None:
-        raise error.Abort(_('unclosed "batchbegin" request'))
+        raise error.Abort(_(b'unclosed "batchbegin" request'))
 
     if peer:
         peer.close()
--- a/mercurial/destutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/destutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -8,13 +8,8 @@
 from __future__ import absolute_import
 
 from .i18n import _
-from . import (
-    bookmarks,
-    error,
-    obsutil,
-    scmutil,
-    stack
-)
+from . import bookmarks, error, obsutil, scmutil, stack
+
 
 def orphanpossibledestination(repo, rev):
     """Return all changesets that may be a new parent for orphan `rev`.
@@ -49,6 +44,7 @@
                         dest.add(dr)
     return dest
 
+
 def _destupdateobs(repo, clean):
     """decide of an update destination from obsolescence markers"""
     node = None
@@ -80,11 +76,12 @@
 
             # get the max revision for the given successors set,
             # i.e. the 'tip' of a set
-            node = repo.revs('max(%ln)', successors).first()
+            node = repo.revs(b'max(%ln)', successors).first()
             if bookmarks.isactivewdirparent(repo):
-                movemark = repo['.'].node()
+                movemark = repo[b'.'].node()
     return node, movemark, None
 
+
 def _destupdatebook(repo, clean):
     """decide on an update destination from active bookmark"""
     # we also move the active bookmark, if any
@@ -94,6 +91,7 @@
         node = repo._bookmarks[activemark]
     return node, movemark, activemark
 
+
 def _destupdatebranch(repo, clean):
     """decide on an update destination from current branch
 
@@ -104,22 +102,23 @@
     currentbranch = wc.branch()
 
     if clean:
-        currentbranch = repo['.'].branch()
+        currentbranch = repo[b'.'].branch()
 
     if currentbranch in repo.branchmap():
         heads = repo.branchheads(currentbranch)
         if heads:
-            node = repo.revs('max(.::(%ln))', heads).first()
+            node = repo.revs(b'max(.::(%ln))', heads).first()
         if bookmarks.isactivewdirparent(repo):
-            movemark = repo['.'].node()
-    elif currentbranch == 'default' and not wc.p1():
+            movemark = repo[b'.'].node()
+    elif currentbranch == b'default' and not wc.p1():
         # "null" parent belongs to "default" branch, but it doesn't exist, so
         # update to the tipmost non-closed branch head
-        node = repo.revs('max(head() and not closed())').first()
+        node = repo.revs(b'max(head() and not closed())').first()
     else:
-        node = repo['.'].node()
+        node = repo[b'.'].node()
     return node, movemark, None
 
+
 def _destupdatebranchfallback(repo, clean):
     """decide on an update destination from closed heads in current branch"""
     wc = repo[None]
@@ -128,27 +127,31 @@
     if currentbranch in repo.branchmap():
         # here, all descendant branch heads are closed
         heads = repo.branchheads(currentbranch, closed=True)
-        assert heads, "any branch has at least one head"
-        node = repo.revs('max(.::(%ln))', heads).first()
-        assert node is not None, ("any revision has at least "
-                                  "one descendant branch head")
+        assert heads, b"any branch has at least one head"
+        node = repo.revs(b'max(.::(%ln))', heads).first()
+        assert (
+            node is not None
+        ), b"any revision has at least one descendant branch head"
         if bookmarks.isactivewdirparent(repo):
-            movemark = repo['.'].node()
+            movemark = repo[b'.'].node()
     else:
         # here, no "default" branch, and all branches are closed
-        node = repo.lookup('tip')
-        assert node is not None, "'tip' exists even in empty repository"
+        node = repo.lookup(b'tip')
+        assert node is not None, b"'tip' exists even in empty repository"
     return node, movemark, None
 
+
 # order in which each step should be evaluated
 # steps are run until one finds a destination
-destupdatesteps = ['evolution', 'bookmark', 'branch', 'branchfallback']
+destupdatesteps = [b'evolution', b'bookmark', b'branch', b'branchfallback']
 # mapping to ease extension overriding steps.
-destupdatestepmap = {'evolution': _destupdateobs,
-                     'bookmark': _destupdatebook,
-                     'branch': _destupdatebranch,
-                     'branchfallback': _destupdatebranchfallback,
-                     }
+destupdatestepmap = {
+    b'evolution': _destupdateobs,
+    b'bookmark': _destupdatebook,
+    b'branch': _destupdatebranch,
+    b'branchfallback': _destupdatebranchfallback,
+}
+
 
 def destupdate(repo, clean=False):
     """destination for bare update operation
@@ -170,102 +173,111 @@
 
     return rev, movemark, activemark
 
+
 msgdestmerge = {
     # too many matching divergent bookmark
-    'toomanybookmarks':
-        {'merge':
-            (_("multiple matching bookmarks to merge -"
-               " please merge with an explicit rev or bookmark"),
-             _("run 'hg heads' to see all heads")),
-         'rebase':
-            (_("multiple matching bookmarks to rebase -"
-               " please rebase to an explicit rev or bookmark"),
-             _("run 'hg heads' to see all heads")),
-        },
+    b'toomanybookmarks': {
+        b'merge': (
+            _(
+                b"multiple matching bookmarks to merge -"
+                b" please merge with an explicit rev or bookmark"
+            ),
+            _(b"run 'hg heads' to see all heads, specify rev with -r"),
+        ),
+        b'rebase': (
+            _(
+                b"multiple matching bookmarks to rebase -"
+                b" please rebase to an explicit rev or bookmark"
+            ),
+            _(b"run 'hg heads' to see all heads, specify destination with -d"),
+        ),
+    },
     # no other matching divergent bookmark
-    'nootherbookmarks':
-        {'merge':
-            (_("no matching bookmark to merge - "
-               "please merge with an explicit rev or bookmark"),
-             _("run 'hg heads' to see all heads")),
-         'rebase':
-            (_("no matching bookmark to rebase - "
-               "please rebase to an explicit rev or bookmark"),
-             _("run 'hg heads' to see all heads")),
-        },
+    b'nootherbookmarks': {
+        b'merge': (
+            _(
+                b"no matching bookmark to merge - "
+                b"please merge with an explicit rev or bookmark"
+            ),
+            _(b"run 'hg heads' to see all heads, specify rev with -r"),
+        ),
+        b'rebase': (
+            _(
+                b"no matching bookmark to rebase - "
+                b"please rebase to an explicit rev or bookmark"
+            ),
+            _(b"run 'hg heads' to see all heads, specify destination with -d"),
+        ),
+    },
     # branch have too many unbookmarked heads, no obvious destination
-    'toomanyheads':
-        {'merge':
-            (_("branch '%s' has %d heads - please merge with an explicit rev"),
-             _("run 'hg heads .' to see heads")),
-         'rebase':
-            (_("branch '%s' has %d heads - please rebase to an explicit rev"),
-             _("run 'hg heads .' to see heads")),
-        },
+    b'toomanyheads': {
+        b'merge': (
+            _(b"branch '%s' has %d heads - please merge with an explicit rev"),
+            _(b"run 'hg heads .' to see heads, specify rev with -r"),
+        ),
+        b'rebase': (
+            _(b"branch '%s' has %d heads - please rebase to an explicit rev"),
+            _(b"run 'hg heads .' to see heads, specify destination with -d"),
+        ),
+    },
     # branch have no other unbookmarked heads
-    'bookmarkedheads':
-        {'merge':
-            (_("heads are bookmarked - please merge with an explicit rev"),
-             _("run 'hg heads' to see all heads")),
-         'rebase':
-            (_("heads are bookmarked - please rebase to an explicit rev"),
-             _("run 'hg heads' to see all heads")),
-        },
+    b'bookmarkedheads': {
+        b'merge': (
+            _(b"heads are bookmarked - please merge with an explicit rev"),
+            _(b"run 'hg heads' to see all heads, specify rev with -r"),
+        ),
+        b'rebase': (
+            _(b"heads are bookmarked - please rebase to an explicit rev"),
+            _(b"run 'hg heads' to see all heads, specify destination with -d"),
+        ),
+    },
     # branch have just a single heads, but there is other branches
-    'nootherbranchheads':
-        {'merge':
-            (_("branch '%s' has one head - please merge with an explicit rev"),
-             _("run 'hg heads' to see all heads")),
-         'rebase':
-            (_("branch '%s' has one head - please rebase to an explicit rev"),
-             _("run 'hg heads' to see all heads")),
-        },
+    b'nootherbranchheads': {
+        b'merge': (
+            _(b"branch '%s' has one head - please merge with an explicit rev"),
+            _(b"run 'hg heads' to see all heads, specify rev with -r"),
+        ),
+        b'rebase': (
+            _(b"branch '%s' has one head - please rebase to an explicit rev"),
+            _(b"run 'hg heads' to see all heads, specify destination with -d"),
+        ),
+    },
     # repository have a single head
-    'nootherheads':
-        {'merge':
-            (_('nothing to merge'),
-            None),
-         'rebase':
-            (_('nothing to rebase'),
-            None),
-        },
+    b'nootherheads': {
+        b'merge': (_(b'nothing to merge'), None),
+        b'rebase': (_(b'nothing to rebase'), None),
+    },
     # repository have a single head and we are not on it
-    'nootherheadsbehind':
-        {'merge':
-            (_('nothing to merge'),
-             _("use 'hg update' instead")),
-         'rebase':
-            (_('nothing to rebase'),
-             _("use 'hg update' instead")),
-        },
+    b'nootherheadsbehind': {
+        b'merge': (_(b'nothing to merge'), _(b"use 'hg update' instead")),
+        b'rebase': (_(b'nothing to rebase'), _(b"use 'hg update' instead")),
+    },
     # We are not on a head
-    'notatheads':
-        {'merge':
-            (_('working directory not at a head revision'),
-             _("use 'hg update' or merge with an explicit revision")),
-         'rebase':
-            (_('working directory not at a head revision'),
-             _("use 'hg update' or rebase to an explicit revision"))
-        },
-    'emptysourceset':
-        {'merge':
-            (_('source set is empty'),
-             None),
-         'rebase':
-            (_('source set is empty'),
-             None),
-        },
-    'multiplebranchessourceset':
-        {'merge':
-            (_('source set is rooted in multiple branches'),
-             None),
-         'rebase':
-            (_('rebaseset is rooted in multiple named branches'),
-             _('specify an explicit destination with --dest')),
-        },
-    }
+    b'notatheads': {
+        b'merge': (
+            _(b'working directory not at a head revision'),
+            _(b"use 'hg update' or merge with an explicit revision"),
+        ),
+        b'rebase': (
+            _(b'working directory not at a head revision'),
+            _(b"use 'hg update' or rebase to an explicit revision"),
+        ),
+    },
+    b'emptysourceset': {
+        b'merge': (_(b'source set is empty'), None),
+        b'rebase': (_(b'source set is empty'), None),
+    },
+    b'multiplebranchessourceset': {
+        b'merge': (_(b'source set is rooted in multiple branches'), None),
+        b'rebase': (
+            _(b'rebaseset is rooted in multiple named branches'),
+            _(b'specify an explicit destination with --dest'),
+        ),
+    },
+}
 
-def _destmergebook(repo, action='merge', sourceset=None, destspace=None):
+
+def _destmergebook(repo, action=b'merge', sourceset=None, destspace=None):
     """find merge destination in the active bookmark case"""
     node = None
     bmheads = bookmarks.headsforactive(repo)
@@ -276,16 +288,18 @@
         else:
             node = bmheads[0]
     elif len(bmheads) > 2:
-        msg, hint = msgdestmerge['toomanybookmarks'][action]
+        msg, hint = msgdestmerge[b'toomanybookmarks'][action]
         raise error.ManyMergeDestAbort(msg, hint=hint)
     elif len(bmheads) <= 1:
-        msg, hint = msgdestmerge['nootherbookmarks'][action]
+        msg, hint = msgdestmerge[b'nootherbookmarks'][action]
         raise error.NoMergeDestAbort(msg, hint=hint)
     assert node is not None
     return node
 
-def _destmergebranch(repo, action='merge', sourceset=None, onheadcheck=True,
-                     destspace=None):
+
+def _destmergebranch(
+    repo, action=b'merge', sourceset=None, onheadcheck=True, destspace=None
+):
     """find merge destination based on branch heads"""
     node = None
 
@@ -293,36 +307,36 @@
         sourceset = [repo[repo.dirstate.p1()].rev()]
         branch = repo.dirstate.branch()
     elif not sourceset:
-        msg, hint = msgdestmerge['emptysourceset'][action]
+        msg, hint = msgdestmerge[b'emptysourceset'][action]
         raise error.NoMergeDestAbort(msg, hint=hint)
     else:
         branch = None
-        for ctx in repo.set('roots(%ld::%ld)', sourceset, sourceset):
+        for ctx in repo.set(b'roots(%ld::%ld)', sourceset, sourceset):
             if branch is not None and ctx.branch() != branch:
-                msg, hint = msgdestmerge['multiplebranchessourceset'][action]
+                msg, hint = msgdestmerge[b'multiplebranchessourceset'][action]
                 raise error.ManyMergeDestAbort(msg, hint=hint)
             branch = ctx.branch()
 
     bheads = repo.branchheads(branch)
-    onhead = repo.revs('%ld and %ln', sourceset, bheads)
+    onhead = repo.revs(b'%ld and %ln', sourceset, bheads)
     if onheadcheck and not onhead:
         # Case A: working copy if not on a head. (merge only)
         #
         # This is probably a user mistake We bailout pointing at 'hg update'
         if len(repo.heads()) <= 1:
-            msg, hint = msgdestmerge['nootherheadsbehind'][action]
+            msg, hint = msgdestmerge[b'nootherheadsbehind'][action]
         else:
-            msg, hint = msgdestmerge['notatheads'][action]
+            msg, hint = msgdestmerge[b'notatheads'][action]
         raise error.Abort(msg, hint=hint)
     # remove heads descendants of source from the set
-    bheads = list(repo.revs('%ln - (%ld::)', bheads, sourceset))
+    bheads = list(repo.revs(b'%ln - (%ld::)', bheads, sourceset))
     # filters out bookmarked heads
-    nbhs = list(repo.revs('%ld - bookmark()', bheads))
+    nbhs = list(repo.revs(b'%ld - bookmark()', bheads))
 
     if destspace is not None:
         # restrict search space
         # used in the 'hg pull --rebase' case, see issue 5214.
-        nbhs = list(repo.revs('%ld and %ld', destspace, nbhs))
+        nbhs = list(repo.revs(b'%ld and %ld', destspace, nbhs))
 
     if len(nbhs) > 1:
         # Case B: There is more than 1 other anonymous heads
@@ -330,7 +344,7 @@
         # This means that there will be more than 1 candidate. This is
         # ambiguous. We abort asking the user to pick as explicit destination
         # instead.
-        msg, hint = msgdestmerge['toomanyheads'][action]
+        msg, hint = msgdestmerge[b'toomanyheads'][action]
         msg %= (branch, len(bheads) + 1)
         raise error.ManyMergeDestAbort(msg, hint=hint)
     elif not nbhs:
@@ -339,24 +353,26 @@
         # This means that there is no natural candidate to merge with.
         # We abort, with various messages for various cases.
         if bheads:
-            msg, hint = msgdestmerge['bookmarkedheads'][action]
+            msg, hint = msgdestmerge[b'bookmarkedheads'][action]
         elif len(repo.heads()) > 1:
-            msg, hint = msgdestmerge['nootherbranchheads'][action]
+            msg, hint = msgdestmerge[b'nootherbranchheads'][action]
             msg %= branch
         elif not onhead:
             # if 'onheadcheck == False' (rebase case),
             # this was not caught in Case A.
-            msg, hint = msgdestmerge['nootherheadsbehind'][action]
+            msg, hint = msgdestmerge[b'nootherheadsbehind'][action]
         else:
-            msg, hint = msgdestmerge['nootherheads'][action]
+            msg, hint = msgdestmerge[b'nootherheads'][action]
         raise error.NoMergeDestAbort(msg, hint=hint)
     else:
         node = nbhs[0]
     assert node is not None
     return node
 
-def destmerge(repo, action='merge', sourceset=None, onheadcheck=True,
-              destspace=None):
+
+def destmerge(
+    repo, action=b'merge', sourceset=None, onheadcheck=True, destspace=None
+):
     """return the default destination for a merge
 
     (or raise exception about why it can't pick one)
@@ -366,23 +382,32 @@
     # destspace is here to work around issues with `hg pull --rebase` see
     # issue5214 for details
     if repo._activebookmark:
-        node = _destmergebook(repo, action=action, sourceset=sourceset,
-                              destspace=destspace)
+        node = _destmergebook(
+            repo, action=action, sourceset=sourceset, destspace=destspace
+        )
     else:
-        node = _destmergebranch(repo, action=action, sourceset=sourceset,
-                                onheadcheck=onheadcheck, destspace=destspace)
+        node = _destmergebranch(
+            repo,
+            action=action,
+            sourceset=sourceset,
+            onheadcheck=onheadcheck,
+            destspace=destspace,
+        )
     return repo[node].rev()
 
+
 def desthistedit(ui, repo):
     """Default base revision to edit for `hg histedit`."""
-    default = ui.config('histedit', 'defaultrev')
+    default = ui.config(b'histedit', b'defaultrev')
 
     if default is None:
         revs = stack.getstack(repo)
     elif default:
         revs = scmutil.revrange(repo, [default])
     else:
-        raise error.Abort(_("config option histedit.defaultrev can't be empty"))
+        raise error.Abort(
+            _(b"config option histedit.defaultrev can't be empty")
+        )
 
     if revs:
         # Take the first revision of the revset as the root
@@ -390,25 +415,28 @@
 
     return None
 
+
 def stackbase(ui, repo):
     revs = stack.getstack(repo)
     return revs.first() if revs else None
 
+
 def _statusotherbook(ui, repo):
     bmheads = bookmarks.headsforactive(repo)
     curhead = repo._bookmarks[repo._activebookmark]
-    if repo.revs('%n and parents()', curhead):
+    if repo.revs(b'%n and parents()', curhead):
         # we are on the active bookmark
         bmheads = [b for b in bmheads if curhead != b]
         if bmheads:
-            msg = _('%i other divergent bookmarks for "%s"\n')
+            msg = _(b'%i other divergent bookmarks for "%s"\n')
             ui.status(msg % (len(bmheads), repo._activebookmark))
 
+
 def _statusotherbranchheads(ui, repo):
     currentbranch = repo.dirstate.branch()
     allheads = repo.branchheads(currentbranch, closed=True)
     heads = repo.branchheads(currentbranch)
-    if repo.revs('%ln and parents()', allheads):
+    if repo.revs(b'%ln and parents()', allheads):
         # we are on a head, even though it might be closed
         #
         #  on closed otherheads
@@ -418,24 +446,38 @@
         #      x        0       there is only one non-closed branch head
         #               N       there are some non-closed branch heads
         #  ========= ==========
-        otherheads = repo.revs('%ln - parents()', heads)
-        if repo['.'].closesbranch():
-            ui.warn(_('no open descendant heads on branch "%s", '
-                        'updating to a closed head\n') %
-                      (currentbranch))
+        otherheads = repo.revs(b'%ln - parents()', heads)
+        if repo[b'.'].closesbranch():
+            ui.warn(
+                _(
+                    b'no open descendant heads on branch "%s", '
+                    b'updating to a closed head\n'
+                )
+                % currentbranch
+            )
             if otherheads:
-                ui.warn(_("(committing will reopen the head, "
-                            "use 'hg heads .' to see %i other heads)\n") %
-                          (len(otherheads)))
+                ui.warn(
+                    _(
+                        b"(committing will reopen the head, "
+                        b"use 'hg heads .' to see %i other heads)\n"
+                    )
+                    % (len(otherheads))
+                )
             else:
-                ui.warn(_('(committing will reopen branch "%s")\n') %
-                          (currentbranch))
+                ui.warn(
+                    _(b'(committing will reopen branch "%s")\n') % currentbranch
+                )
         elif otherheads:
-            curhead = repo['.']
-            ui.status(_('updated to "%s: %s"\n') % (curhead,
-                                    curhead.description().split('\n')[0]))
-            ui.status(_('%i other heads for branch "%s"\n') %
-                      (len(otherheads), currentbranch))
+            curhead = repo[b'.']
+            ui.status(
+                _(b'updated to "%s: %s"\n')
+                % (curhead, curhead.description().split(b'\n')[0])
+            )
+            ui.status(
+                _(b'%i other heads for branch "%s"\n')
+                % (len(otherheads), currentbranch)
+            )
+
 
 def statusotherdests(ui, repo):
     """Print message about other head"""
--- a/mercurial/diffhelper.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/diffhelper.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,6 +14,7 @@
     pycompat,
 )
 
+
 def addlines(fp, hunk, lena, lenb, a, b):
     """Read lines from fp into the hunk
 
@@ -30,38 +31,40 @@
         for i in pycompat.xrange(num):
             s = fp.readline()
             if not s:
-                raise error.ParseError(_('incomplete hunk'))
-            if s == "\\ No newline at end of file\n":
+                raise error.ParseError(_(b'incomplete hunk'))
+            if s == b"\\ No newline at end of file\n":
                 fixnewline(hunk, a, b)
                 continue
-            if s == '\n' or s == '\r\n':
+            if s == b'\n' or s == b'\r\n':
                 # Some patches may be missing the control char
                 # on empty lines. Supply a leading space.
-                s = ' ' + s
+                s = b' ' + s
             hunk.append(s)
-            if s.startswith('+'):
+            if s.startswith(b'+'):
                 b.append(s[1:])
-            elif s.startswith('-'):
+            elif s.startswith(b'-'):
                 a.append(s)
             else:
                 b.append(s[1:])
                 a.append(s)
 
+
 def fixnewline(hunk, a, b):
     """Fix up the last lines of a and b when the patch has no newline at EOF"""
     l = hunk[-1]
     # tolerate CRLF in last line
-    if l.endswith('\r\n'):
+    if l.endswith(b'\r\n'):
         hline = l[:-2]
     else:
         hline = l[:-1]
 
-    if hline.startswith((' ', '+')):
+    if hline.startswith((b' ', b'+')):
         b[-1] = hline[1:]
-    if hline.startswith((' ', '-')):
+    if hline.startswith((b' ', b'-')):
         a[-1] = hline
     hunk[-1] = hline
 
+
 def testhunk(a, b, bstart):
     """Compare the lines in a with the lines in b
 
--- a/mercurial/diffutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/diffutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,15 +16,33 @@
     pycompat,
 )
 
-def diffallopts(ui, opts=None, untrusted=False, section='diff',
-                configprefix=''):
+
+def diffallopts(
+    ui, opts=None, untrusted=False, section=b'diff', configprefix=b''
+):
     '''return diffopts with all features supported and parsed'''
-    return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
-                           git=True, whitespace=True, formatchanging=True,
-                           configprefix=configprefix)
+    return difffeatureopts(
+        ui,
+        opts=opts,
+        untrusted=untrusted,
+        section=section,
+        git=True,
+        whitespace=True,
+        formatchanging=True,
+        configprefix=configprefix,
+    )
 
-def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
-                    whitespace=False, formatchanging=False, configprefix=''):
+
+def difffeatureopts(
+    ui,
+    opts=None,
+    untrusted=False,
+    section=b'diff',
+    git=False,
+    whitespace=False,
+    formatchanging=False,
+    configprefix=b'',
+):
     '''return diffopts with only opted-in features parsed
 
     Features:
@@ -33,6 +51,7 @@
     - formatchanging: options that will likely break or cause correctness issues
       with most diff parsers
     '''
+
     def get(key, name=None, getter=ui.configbool, forceplain=None):
         if opts:
             v = opts.get(key)
@@ -47,28 +66,30 @@
                 return v
         if forceplain is not None and ui.plain():
             return forceplain
-        return getter(section, configprefix + (name or key),
-                      untrusted=untrusted)
+        return getter(
+            section, configprefix + (name or key), untrusted=untrusted
+        )
 
     # core options, expected to be understood by every diff parser
     buildopts = {
-        'nodates': get('nodates'),
-        'showfunc': get('show_function', 'showfunc'),
-        'context': get('unified', getter=ui.config),
+        b'nodates': get(b'nodates'),
+        b'showfunc': get(b'show_function', b'showfunc'),
+        b'context': get(b'unified', getter=ui.config),
     }
-    buildopts['xdiff'] = ui.configbool('experimental', 'xdiff')
+    buildopts[b'xdiff'] = ui.configbool(b'experimental', b'xdiff')
 
     if git:
-        buildopts['git'] = get('git')
+        buildopts[b'git'] = get(b'git')
 
         # since this is in the experimental section, we need to call
         # ui.configbool directory
-        buildopts['showsimilarity'] = ui.configbool('experimental',
-                                                    'extendedheader.similarity')
+        buildopts[b'showsimilarity'] = ui.configbool(
+            b'experimental', b'extendedheader.similarity'
+        )
 
         # need to inspect the ui object instead of using get() since we want to
         # test for an int
-        hconf = ui.config('experimental', 'extendedheader.index')
+        hconf = ui.config(b'experimental', b'extendedheader.index')
         if hconf is not None:
             hlen = None
             try:
@@ -76,33 +97,40 @@
                 # word (e.g. short, full, none)
                 hlen = int(hconf)
                 if hlen < 0 or hlen > 40:
-                    msg = _("invalid length for extendedheader.index: '%d'\n")
+                    msg = _(b"invalid length for extendedheader.index: '%d'\n")
                     ui.warn(msg % hlen)
             except ValueError:
                 # default value
-                if hconf == 'short' or hconf == '':
+                if hconf == b'short' or hconf == b'':
                     hlen = 12
-                elif hconf == 'full':
+                elif hconf == b'full':
                     hlen = 40
-                elif hconf != 'none':
-                    msg = _("invalid value for extendedheader.index: '%s'\n")
+                elif hconf != b'none':
+                    msg = _(b"invalid value for extendedheader.index: '%s'\n")
                     ui.warn(msg % hconf)
             finally:
-                buildopts['index'] = hlen
+                buildopts[b'index'] = hlen
 
     if whitespace:
-        buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
-        buildopts['ignorewsamount'] = get('ignore_space_change',
-                                          'ignorewsamount')
-        buildopts['ignoreblanklines'] = get('ignore_blank_lines',
-                                            'ignoreblanklines')
-        buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
+        buildopts[b'ignorews'] = get(b'ignore_all_space', b'ignorews')
+        buildopts[b'ignorewsamount'] = get(
+            b'ignore_space_change', b'ignorewsamount'
+        )
+        buildopts[b'ignoreblanklines'] = get(
+            b'ignore_blank_lines', b'ignoreblanklines'
+        )
+        buildopts[b'ignorewseol'] = get(b'ignore_space_at_eol', b'ignorewseol')
     if formatchanging:
-        buildopts['text'] = opts and opts.get('text')
-        binary = None if opts is None else opts.get('binary')
-        buildopts['nobinary'] = (not binary if binary is not None
-                                 else get('nobinary', forceplain=False))
-        buildopts['noprefix'] = get('noprefix', forceplain=False)
-        buildopts['worddiff'] = get('word_diff', 'word-diff', forceplain=False)
+        buildopts[b'text'] = opts and opts.get(b'text')
+        binary = None if opts is None else opts.get(b'binary')
+        buildopts[b'nobinary'] = (
+            not binary
+            if binary is not None
+            else get(b'nobinary', forceplain=False)
+        )
+        buildopts[b'noprefix'] = get(b'noprefix', forceplain=False)
+        buildopts[b'worddiff'] = get(
+            b'word_diff', b'word-diff', forceplain=False
+        )
 
     return mdiff.diffopts(**pycompat.strkwargs(buildopts))
--- a/mercurial/dirstate.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/dirstate.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,6 +15,10 @@
 
 from .i18n import _
 from .node import nullid
+from .pycompat import delattr
+
+from hgdemandimport import tracing
+
 from . import (
     encoding,
     error,
@@ -27,25 +31,35 @@
     util,
 )
 
+from .interfaces import (
+    dirstate as intdirstate,
+    util as interfaceutil,
+)
+
 parsers = policy.importmod(r'parsers')
-dirstatemod = policy.importrust(r'dirstate', default=parsers)
+rustmod = policy.importrust(r'dirstate')
 
 propertycache = util.propertycache
 filecache = scmutil.filecache
-_rangemask = 0x7fffffff
+_rangemask = 0x7FFFFFFF
 
 dirstatetuple = parsers.dirstatetuple
 
+
 class repocache(filecache):
     """filecache for files in .hg/"""
+
     def join(self, obj, fname):
         return obj._opener.join(fname)
 
+
 class rootcache(filecache):
     """filecache for files in the repository root"""
+
     def join(self, obj, fname):
         return obj._join(fname)
 
+
 def _getfsnow(vfs):
     '''Get "now" timestamp on filesystem'''
     tmpfd, tmpname = vfs.mkstemp()
@@ -55,8 +69,9 @@
         os.close(tmpfd)
         vfs.unlink(tmpname)
 
+
+@interfaceutil.implementer(intdirstate.idirstate)
 class dirstate(object):
-
     def __init__(self, opener, ui, root, validate, sparsematchfn):
         '''Create a new dirstate object.
 
@@ -76,8 +91,8 @@
         self._ui = ui
         self._filecache = {}
         self._parentwriters = 0
-        self._filename = 'dirstate'
-        self._pendingfilename = '%s.pending' % self._filename
+        self._filename = b'dirstate'
+        self._pendingfilename = b'%s.pending' % self._filename
         self._plchangecallbacks = {}
         self._origpl = None
         self._updatedfiles = set()
@@ -129,14 +144,14 @@
         # cache to keep the lookup fast.)
         return self._sparsematchfn()
 
-    @repocache('branch')
+    @repocache(b'branch')
     def _branch(self):
         try:
-            return self._opener.read("branch").strip() or "default"
+            return self._opener.read(b"branch").strip() or b"default"
         except IOError as inst:
             if inst.errno != errno.ENOENT:
                 raise
-            return "default"
+            return b"default"
 
     @property
     def _pl(self):
@@ -145,18 +160,18 @@
     def hasdir(self, d):
         return self._map.hastrackeddir(d)
 
-    @rootcache('.hgignore')
+    @rootcache(b'.hgignore')
     def _ignore(self):
         files = self._ignorefiles()
         if not files:
             return matchmod.never()
 
-        pats = ['include:%s' % f for f in files]
-        return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
+        pats = [b'include:%s' % f for f in files]
+        return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
 
     @propertycache
     def _slash(self):
-        return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
+        return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
 
     @propertycache
     def _checklink(self):
@@ -168,7 +183,7 @@
 
     @propertycache
     def _checkcase(self):
-        return not util.fscasesensitive(self._join('.hg'))
+        return not util.fscasesensitive(self._join(b'.hg'))
 
     def _join(self, f):
         # much faster than os.path.join()
@@ -177,34 +192,40 @@
 
     def flagfunc(self, buildfallback):
         if self._checklink and self._checkexec:
+
             def f(x):
                 try:
                     st = os.lstat(self._join(x))
                     if util.statislink(st):
-                        return 'l'
+                        return b'l'
                     if util.statisexec(st):
-                        return 'x'
+                        return b'x'
                 except OSError:
                     pass
-                return ''
+                return b''
+
             return f
 
         fallback = buildfallback()
         if self._checklink:
+
             def f(x):
                 if os.path.islink(self._join(x)):
-                    return 'l'
-                if 'x' in fallback(x):
-                    return 'x'
-                return ''
+                    return b'l'
+                if b'x' in fallback(x):
+                    return b'x'
+                return b''
+
             return f
         if self._checkexec:
+
             def f(x):
-                if 'l' in fallback(x):
-                    return 'l'
+                if b'l' in fallback(x):
+                    return b'l'
                 if util.isexec(self._join(x)):
-                    return 'x'
-                return ''
+                    return b'x'
+                return b''
+
             return f
         else:
             return fallback
@@ -212,7 +233,7 @@
     @propertycache
     def _cwd(self):
         # internal config: ui.forcecwd
-        forcecwd = self._ui.config('ui', 'forcecwd')
+        forcecwd = self._ui.config(b'ui', b'forcecwd')
         if forcecwd:
             return forcecwd
         return encoding.getcwd()
@@ -226,13 +247,13 @@
         '''
         cwd = self._cwd
         if cwd == self._root:
-            return ''
+            return b''
         # self._root ends with a path separator if self._root is '/' or 'C:\'
         rootsep = self._root
         if not util.endswithsep(rootsep):
             rootsep += pycompat.ossep
         if cwd.startswith(rootsep):
-            return cwd[len(rootsep):]
+            return cwd[len(rootsep) :]
         else:
             # we're outside the repo. return an absolute path.
             return cwd
@@ -255,7 +276,7 @@
           a  marked for addition
           ?  not tracked
         '''
-        return self._map.get(key, ("?",))[0]
+        return self._map.get(key, (b"?",))[0]
 
     def __contains__(self, key):
         return key in self._map
@@ -264,7 +285,7 @@
         return iter(sorted(self._map))
 
     def items(self):
-        return self._map.iteritems()
+        return pycompat.iteritems(self._map)
 
     iteritems = items
 
@@ -290,8 +311,10 @@
         See localrepo.setparents()
         """
         if self._parentwriters == 0:
-            raise ValueError("cannot set dirstate parent outside of "
-                             "dirstate.parentchange context manager")
+            raise ValueError(
+                b"cannot set dirstate parent outside of "
+                b"dirstate.parentchange context manager"
+            )
 
         self._dirty = True
         oldp2 = self._pl[1]
@@ -301,20 +324,21 @@
         copies = {}
         if oldp2 != nullid and p2 == nullid:
             candidatefiles = self._map.nonnormalset.union(
-                                self._map.otherparentset)
+                self._map.otherparentset
+            )
             for f in candidatefiles:
                 s = self._map.get(f)
                 if s is None:
                     continue
 
                 # Discard 'm' markers when moving away from a merge state
-                if s[0] == 'm':
+                if s[0] == b'm':
                     source = self._map.copymap.get(f)
                     if source:
                         copies[f] = source
                     self.normallookup(f)
                 # Also fix up otherparent markers
-                elif s[0] == 'n' and s[2] == -2:
+                elif s[0] == b'n' and s[2] == -2:
                     source = self._map.copymap.get(f)
                     if source:
                         copies[f] = source
@@ -323,17 +347,17 @@
 
     def setbranch(self, branch):
         self.__class__._branch.set(self, encoding.fromlocal(branch))
-        f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
+        f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
         try:
-            f.write(self._branch + '\n')
+            f.write(self._branch + b'\n')
             f.close()
 
             # make sure filecache has the correct stat info for _branch after
             # replacing the underlying file
-            ce = self._filecache['_branch']
+            ce = self._filecache[b'_branch']
             if ce:
                 ce.refresh()
-        except: # re-raises
+        except:  # re-raises
             f.discard()
             raise
 
@@ -373,20 +397,22 @@
 
     def _addpath(self, f, state, mode, size, mtime):
         oldstate = self[f]
-        if state == 'a' or oldstate == 'r':
+        if state == b'a' or oldstate == b'r':
             scmutil.checkfilename(f)
             if self._map.hastrackeddir(f):
-                raise error.Abort(_('directory %r already in dirstate') %
-                                  pycompat.bytestr(f))
+                raise error.Abort(
+                    _(b'directory %r already in dirstate') % pycompat.bytestr(f)
+                )
             # shadows
             for d in util.finddirs(f):
                 if self._map.hastrackeddir(d):
                     break
                 entry = self._map.get(d)
-                if entry is not None and entry[0] != 'r':
+                if entry is not None and entry[0] != b'r':
                     raise error.Abort(
-                        _('file %r in dirstate clashes with %r') %
-                        (pycompat.bytestr(d), pycompat.bytestr(f)))
+                        _(b'file %r in dirstate clashes with %r')
+                        % (pycompat.bytestr(d), pycompat.bytestr(f))
+                    )
         self._dirty = True
         self._updatedfiles.add(f)
         self._map.addfile(f, oldstate, state, mode, size, mtime)
@@ -408,7 +434,7 @@
             mode = s.st_mode
             size = s.st_size
             mtime = s[stat.ST_MTIME]
-        self._addpath(f, 'n', mode, size & _rangemask, mtime & _rangemask)
+        self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
         self._map.copymap.pop(f, None)
         if f in self._map.nonnormalset:
             self._map.nonnormalset.remove(f)
@@ -426,7 +452,7 @@
             # being removed, restore that state.
             entry = self._map.get(f)
             if entry is not None:
-                if entry[0] == 'r' and entry[2] in (-1, -2):
+                if entry[0] == b'r' and entry[2] in (-1, -2):
                     source = self._map.copymap.get(f)
                     if entry[2] == -1:
                         self.merge(f)
@@ -435,27 +461,28 @@
                     if source:
                         self.copy(source, f)
                     return
-                if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
+                if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
                     return
-        self._addpath(f, 'n', 0, -1, -1)
+        self._addpath(f, b'n', 0, -1, -1)
         self._map.copymap.pop(f, None)
 
     def otherparent(self, f):
         '''Mark as coming from the other parent, always dirty.'''
         if self._pl[1] == nullid:
-            raise error.Abort(_("setting %r to other parent "
-                               "only allowed in merges") % f)
-        if f in self and self[f] == 'n':
+            raise error.Abort(
+                _(b"setting %r to other parent only allowed in merges") % f
+            )
+        if f in self and self[f] == b'n':
             # merge-like
-            self._addpath(f, 'm', 0, -2, -1)
+            self._addpath(f, b'm', 0, -2, -1)
         else:
             # add-like
-            self._addpath(f, 'n', 0, -2, -1)
+            self._addpath(f, b'n', 0, -2, -1)
         self._map.copymap.pop(f, None)
 
     def add(self, f):
         '''Mark a file added.'''
-        self._addpath(f, 'a', 0, -1, -1)
+        self._addpath(f, b'a', 0, -1, -1)
         self._map.copymap.pop(f, None)
 
     def remove(self, f):
@@ -467,9 +494,9 @@
             entry = self._map.get(f)
             if entry is not None:
                 # backup the previous state
-                if entry[0] == 'm': # merge
+                if entry[0] == b'm':  # merge
                     size = -1
-                elif entry[0] == 'n' and entry[2] == -2: # other parent
+                elif entry[0] == b'n' and entry[2] == -2:  # other parent
                     size = -2
                     self._map.otherparentset.add(f)
         self._updatedfiles.add(f)
@@ -496,21 +523,21 @@
             exists = os.path.lexists(os.path.join(self._root, path))
         if not exists:
             # Maybe a path component exists
-            if not ignoremissing and '/' in path:
-                d, f = path.rsplit('/', 1)
+            if not ignoremissing and b'/' in path:
+                d, f = path.rsplit(b'/', 1)
                 d = self._normalize(d, False, ignoremissing, None)
-                folded = d + "/" + f
+                folded = d + b"/" + f
             else:
                 # No path components, preserve original case
                 folded = path
         else:
             # recursively normalize leading directory components
             # against dirstate
-            if '/' in normed:
-                d, f = normed.rsplit('/', 1)
+            if b'/' in normed:
+                d, f = normed.rsplit(b'/', 1)
                 d = self._normalize(d, False, ignoremissing, True)
-                r = self._root + "/" + d
-                folded = d + "/" + util.fspath(f, r)
+                r = self._root + b"/" + d
+                folded = d + b"/" + util.fspath(f, r)
             else:
                 folded = util.fspath(normed, self._root)
             storemap[normed] = folded
@@ -524,8 +551,9 @@
             if isknown:
                 folded = path
             else:
-                folded = self._discoverpath(path, normed, ignoremissing, exists,
-                                            self._map.filefoldmap)
+                folded = self._discoverpath(
+                    path, normed, ignoremissing, exists, self._map.filefoldmap
+                )
         return folded
 
     def _normalize(self, path, isknown, ignoremissing=False, exists=None):
@@ -539,8 +567,9 @@
             else:
                 # store discovered result in dirfoldmap so that future
                 # normalizefile calls don't start matching directories
-                folded = self._discoverpath(path, normed, ignoremissing, exists,
-                                            self._map.dirfoldmap)
+                folded = self._discoverpath(
+                    path, normed, ignoremissing, exists, self._map.dirfoldmap
+                )
         return folded
 
     def normalize(self, path, isknown=False, ignoremissing=False):
@@ -619,11 +648,15 @@
             self._updatedfiles.clear()
 
             # delay writing in-memory changes out
-            tr.addfilegenerator('dirstate', (self._filename,),
-                                self._writedirstate, location='plain')
+            tr.addfilegenerator(
+                b'dirstate',
+                (self._filename,),
+                self._writedirstate,
+                location=b'plain',
+            )
             return
 
-        st = self._opener(filename, "w", atomictemp=True, checkambig=True)
+        st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
         self._writedirstate(st)
 
     def addparentchangecallback(self, category, callback):
@@ -640,7 +673,9 @@
     def _writedirstate(self, st):
         # notify callbacks about parents change
         if self._origpl is not None and self._origpl != self._pl:
-            for c, callback in sorted(self._plchangecallbacks.iteritems()):
+            for c, callback in sorted(
+                pycompat.iteritems(self._plchangecallbacks)
+            ):
                 callback(self, self._origpl, self._pl)
             self._origpl = None
         # use the modification time of the newly created temporary file as the
@@ -649,20 +684,28 @@
 
         # enough 'delaywrite' prevents 'pack_dirstate' from dropping
         # timestamp of each entries in dirstate, because of 'now > mtime'
-        delaywrite = self._ui.configint('debug', 'dirstate.delaywrite')
+        delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
         if delaywrite > 0:
             # do we have any files to delay for?
-            for f, e in self._map.iteritems():
-                if e[0] == 'n' and e[3] == now:
-                    import time # to avoid useless import
+            items = pycompat.iteritems(self._map)
+            for f, e in items:
+                if e[0] == b'n' and e[3] == now:
+                    import time  # to avoid useless import
+
                     # rather than sleep n seconds, sleep until the next
                     # multiple of n seconds
                     clock = time.time()
                     start = int(clock) - (int(clock) % delaywrite)
                     end = start + delaywrite
                     time.sleep(end - clock)
-                    now = end # trust our estimate that the end is near now
+                    now = end  # trust our estimate that the end is near now
                     break
+            # since the iterator is potentially not deleted,
+            # delete the iterator to release the reference for the Rust
+            # implementation.
+            # TODO make the Rust implementation behave like Python
+            # since this would not work with a non ref-counting GC.
+            del items
 
         self._map.write(st, now)
         self._lastnormaltime = 0
@@ -678,10 +721,10 @@
 
     def _ignorefiles(self):
         files = []
-        if os.path.exists(self._join('.hgignore')):
-            files.append(self._join('.hgignore'))
-        for name, path in self._ui.configitems("ui"):
-            if name == 'ignore' or name.startswith('ignore.'):
+        if os.path.exists(self._join(b'.hgignore')):
+            files.append(self._join(b'.hgignore'))
+        for name, path in self._ui.configitems(b"ui"):
+            if name == b'ignore' or name.startswith(b'ignore.'):
                 # we need to use os.path.join here rather than self._join
                 # because path is arbitrary and user-specified
                 files.append(os.path.join(self._rootdir, util.expandpath(path)))
@@ -692,20 +735,22 @@
         visited = set()
         while files:
             i = files.popleft()
-            patterns = matchmod.readpatternfile(i, self._ui.warn,
-                                                sourceinfo=True)
+            patterns = matchmod.readpatternfile(
+                i, self._ui.warn, sourceinfo=True
+            )
             for pattern, lineno, line in patterns:
-                kind, p = matchmod._patsplit(pattern, 'glob')
-                if kind == "subinclude":
+                kind, p = matchmod._patsplit(pattern, b'glob')
+                if kind == b"subinclude":
                     if p not in visited:
                         files.append(p)
                     continue
-                m = matchmod.match(self._root, '', [], [pattern],
-                                   warn=self._ui.warn)
+                m = matchmod.match(
+                    self._root, b'', [], [pattern], warn=self._ui.warn
+                )
                 if m(f):
                     return (i, lineno, line)
             visited.add(i)
-        return (None, -1, "")
+        return (None, -1, b"")
 
     def _walkexplicit(self, match, subrepos):
         '''Get stat data about the files explicitly specified by match.
@@ -718,18 +763,18 @@
           directories and that were not found.'''
 
         def badtype(mode):
-            kind = _('unknown')
+            kind = _(b'unknown')
             if stat.S_ISCHR(mode):
-                kind = _('character device')
+                kind = _(b'character device')
             elif stat.S_ISBLK(mode):
-                kind = _('block device')
+                kind = _(b'block device')
             elif stat.S_ISFIFO(mode):
-                kind = _('fifo')
+                kind = _(b'fifo')
             elif stat.S_ISSOCK(mode):
-                kind = _('socket')
+                kind = _(b'socket')
             elif stat.S_ISDIR(mode):
-                kind = _('directory')
-            return _('unsupported file type (type is %s)') % kind
+                kind = _(b'directory')
+            return _(b'unsupported file type (type is %s)') % kind
 
         matchedir = match.explicitdir
         badfn = match.bad
@@ -754,7 +799,7 @@
         subrepos.sort()
         i, j = 0, 0
         while i < len(files) and j < len(subrepos):
-            subpath = subrepos[j] + "/"
+            subpath = subrepos[j] + b"/"
             if files[i] < subpath:
                 i += 1
                 continue
@@ -762,13 +807,13 @@
                 del files[i]
             j += 1
 
-        if not files or '' in files:
-            files = ['']
+        if not files or b'' in files:
+            files = [b'']
             # constructing the foldmap is expensive, so don't do it for the
             # common case where files is ['']
             normalize = None
         results = dict.fromkeys(subrepos)
-        results['.hg'] = None
+        results[b'.hg'] = None
 
         for ff in files:
             if normalize:
@@ -794,10 +839,10 @@
                     badfn(ff, badtype(kind))
                     if nf in dmap:
                         results[nf] = None
-            except OSError as inst: # nf not found on disk - it is dirstate only
-                if nf in dmap: # does it exactly match a missing file?
+            except OSError as inst:  # nf not found on disk - it is dirstate only
+                if nf in dmap:  # does it exactly match a missing file?
                     results[nf] = None
-                else: # does it match a missing directory?
+                else:  # does it match a missing directory?
                     if self._map.hasdir(nf):
                         if matchedir:
                             matchedir(nf)
@@ -810,7 +855,7 @@
         # aren't filtered here because they will be tested later.
         if match.anypats():
             for f in list(results):
-                if f == '.hg' or f in subrepos:
+                if f == b'.hg' or f in subrepos:
                     # keep sentinel to disable further out-of-repo walks
                     continue
                 if not match(f):
@@ -823,7 +868,7 @@
         if match.isexact() and self._checkcase:
             normed = {}
 
-            for f, st in results.iteritems():
+            for f, st in pycompat.iteritems(results):
                 if st is None:
                     continue
 
@@ -836,11 +881,12 @@
 
                 paths.add(f)
 
-            for norm, paths in normed.iteritems():
+            for norm, paths in pycompat.iteritems(normed):
                 if len(paths) > 1:
                     for path in paths:
-                        folded = self._discoverpath(path, norm, True, None,
-                                                    self._map.dirfoldmap)
+                        folded = self._discoverpath(
+                            path, norm, True, None, self._map.dirfoldmap
+                        )
                         if path != folded:
                             results[path] = None
 
@@ -884,10 +930,10 @@
         join = self._join
 
         exact = skipstep3 = False
-        if match.isexact(): # match.exact
+        if match.isexact():  # match.exact
             exact = True
-            dirignore = util.always # skip step 2
-        elif match.prefix(): # match.match, no patterns
+            dirignore = util.always  # skip step 2
+        elif match.prefix():  # match.match, no patterns
             skipstep3 = True
 
         if not exact and self._checkcase:
@@ -908,21 +954,24 @@
         def traverse(work, alreadynormed):
             wadd = work.append
             while work:
+                tracing.counter('dirstate.walk work', len(work))
                 nd = work.pop()
                 visitentries = match.visitchildrenset(nd)
                 if not visitentries:
                     continue
-                if visitentries == 'this' or visitentries == 'all':
+                if visitentries == b'this' or visitentries == b'all':
                     visitentries = None
                 skip = None
-                if nd != '':
-                    skip = '.hg'
+                if nd != b'':
+                    skip = b'.hg'
                 try:
-                    entries = listdir(join(nd), stat=True, skip=skip)
+                    with tracing.log('dirstate.walk.traverse listdir %s', nd):
+                        entries = listdir(join(nd), stat=True, skip=skip)
                 except OSError as inst:
                     if inst.errno in (errno.EACCES, errno.ENOENT):
-                        match.bad(self.pathto(nd),
-                                  encoding.strtolocal(inst.strerror))
+                        match.bad(
+                            self.pathto(nd), encoding.strtolocal(inst.strerror)
+                        )
                         continue
                     raise
                 for f, kind, st in entries:
@@ -940,10 +989,11 @@
                         # even though f might be a directory, we're only
                         # interested in comparing it to files currently in the
                         # dmap -- therefore normalizefile is enough
-                        nf = normalizefile(nd and (nd + "/" + f) or f, True,
-                                           True)
+                        nf = normalizefile(
+                            nd and (nd + b"/" + f) or f, True, True
+                        )
                     else:
-                        nf = nd and (nd + "/" + f) or f
+                        nf = nd and (nd + b"/" + f) or f
                     if nf not in results:
                         if kind == dirkind:
                             if not ignore(nf):
@@ -956,8 +1006,9 @@
                             if nf in dmap:
                                 if matchalways or matchfn(nf):
                                     results[nf] = st
-                            elif ((matchalways or matchfn(nf))
-                                  and not ignore(nf)):
+                            elif (matchalways or matchfn(nf)) and not ignore(
+                                nf
+                            ):
                                 # unknown file -- normalize if necessary
                                 if not alreadynormed:
                                     nf = normalize(nf, False, True)
@@ -973,7 +1024,7 @@
 
         for s in subrepos:
             del results[s]
-        del results['.hg']
+        del results[b'.hg']
 
         # step 3: visit remaining files from dmap
         if not skipstep3 and not exact:
@@ -998,8 +1049,10 @@
                     # different case, don't add one for this, since that would
                     # make it appear as if the file exists under both names
                     # on disk.
-                    if (normalizefile and
-                        normalizefile(nf, True, True) in results):
+                    if (
+                        normalizefile
+                        and normalizefile(nf, True, True) in results
+                    ):
                         results[nf] = None
                     # Report ignored items in the dmap as long as they are not
                     # under a symlink directory.
@@ -1044,9 +1097,56 @@
 
         dmap = self._map
         dmap.preload()
+
+        use_rust = True
+        if rustmod is None:
+            use_rust = False
+        elif subrepos:
+            use_rust = False
+        if bool(listunknown):
+            # Pathauditor does not exist yet in Rust, unknown files
+            # can't be trusted.
+            use_rust = False
+        elif self._ignorefiles() and listignored:
+            # Rust has no ignore mechanism yet, so don't use Rust for
+            # commands that need ignore.
+            use_rust = False
+        elif not match.always():
+            # Matchers have yet to be implemented
+            use_rust = False
+
+        if use_rust:
+            (
+                lookup,
+                modified,
+                added,
+                removed,
+                deleted,
+                unknown,
+                clean,
+            ) = rustmod.status(
+                dmap._rustmap,
+                self._rootdir,
+                match.files(),
+                bool(listclean),
+                self._lastnormaltime,
+                self._checkexec,
+            )
+
+            status = scmutil.status(
+                modified=modified,
+                added=added,
+                removed=removed,
+                deleted=deleted,
+                unknown=unknown,
+                ignored=ignored,
+                clean=clean,
+            )
+            return (lookup, status)
+
         dcontains = dmap.__contains__
         dget = dmap.__getitem__
-        ladd = lookup.append            # aka "unsure"
+        ladd = lookup.append  # aka "unsure"
         madd = modified.append
         aadd = added.append
         uadd = unknown.append
@@ -1065,8 +1165,9 @@
         # - match.traversedir does something, because match.traversedir should
         #   be called for every dir in the working dir
         full = listclean or match.traversedir is not None
-        for fn, st in self.walk(match, subrepos, listunknown, listignored,
-                                full=full).iteritems():
+        for fn, st in pycompat.iteritems(
+            self.walk(match, subrepos, listunknown, listignored, full=full)
+        ):
             if not dcontains(fn):
                 if (listignored or mexact(fn)) and dirignore(fn):
                     if listignored:
@@ -1088,17 +1189,23 @@
             size = t[2]
             time = t[3]
 
-            if not st and state in "nma":
+            if not st and state in b"nma":
                 dadd(fn)
-            elif state == 'n':
-                if (size >= 0 and
-                    ((size != st.st_size and size != st.st_size & _rangemask)
-                     or ((mode ^ st.st_mode) & 0o100 and checkexec))
-                    or size == -2 # other parent
-                    or fn in copymap):
+            elif state == b'n':
+                if (
+                    size >= 0
+                    and (
+                        (size != st.st_size and size != st.st_size & _rangemask)
+                        or ((mode ^ st.st_mode) & 0o100 and checkexec)
+                    )
+                    or size == -2  # other parent
+                    or fn in copymap
+                ):
                     madd(fn)
-                elif (time != st[stat.ST_MTIME]
-                      and time != st[stat.ST_MTIME] & _rangemask):
+                elif (
+                    time != st[stat.ST_MTIME]
+                    and time != st[stat.ST_MTIME] & _rangemask
+                ):
                     ladd(fn)
                 elif st[stat.ST_MTIME] == lastnormaltime:
                     # fn may have just been marked as normal and it may have
@@ -1108,15 +1215,19 @@
                     ladd(fn)
                 elif listclean:
                     cadd(fn)
-            elif state == 'm':
+            elif state == b'm':
                 madd(fn)
-            elif state == 'a':
+            elif state == b'a':
                 aadd(fn)
-            elif state == 'r':
+            elif state == b'r':
                 radd(fn)
 
-        return (lookup, scmutil.status(modified, added, removed, deleted,
-                                       unknown, ignored, clean))
+        return (
+            lookup,
+            scmutil.status(
+                modified, added, removed, deleted, unknown, ignored, clean
+            ),
+        )
 
     def matches(self, match):
         '''
@@ -1151,26 +1262,34 @@
         # because the latter omits writing out if transaction is running.
         # output file will be used to create backup of dirstate at this point.
         if self._dirty or not self._opener.exists(filename):
-            self._writedirstate(self._opener(filename, "w", atomictemp=True,
-                                             checkambig=True))
+            self._writedirstate(
+                self._opener(filename, b"w", atomictemp=True, checkambig=True)
+            )
 
         if tr:
             # ensure that subsequent tr.writepending returns True for
             # changes written out above, even if dirstate is never
             # changed after this
-            tr.addfilegenerator('dirstate', (self._filename,),
-                                self._writedirstate, location='plain')
+            tr.addfilegenerator(
+                b'dirstate',
+                (self._filename,),
+                self._writedirstate,
+                location=b'plain',
+            )
 
             # ensure that pending file written above is unlinked at
             # failure, even if tr.writepending isn't invoked until the
             # end of this transaction
-            tr.registertmp(filename, location='plain')
+            tr.registertmp(filename, location=b'plain')
 
         self._opener.tryunlink(backupname)
         # hardlink backup is okay because _writedirstate is always called
         # with an "atomictemp=True" file.
-        util.copyfile(self._opener.join(filename),
-                      self._opener.join(backupname), hardlink=True)
+        util.copyfile(
+            self._opener.join(filename),
+            self._opener.join(backupname),
+            hardlink=True,
+        )
 
     def restorebackup(self, tr, backupname):
         '''Restore dirstate by backup file'''
@@ -1188,6 +1307,7 @@
         '''Clear backup file'''
         self._opener.unlink(backupname)
 
+
 class dirstatemap(object):
     """Map encapsulating the dirstate's contents.
 
@@ -1226,7 +1346,7 @@
         self._ui = ui
         self._opener = opener
         self._root = root
-        self._filename = 'dirstate'
+        self._filename = b'dirstate'
 
         self._parents = None
         self._dirtyparents = False
@@ -1250,15 +1370,15 @@
         self._map.clear()
         self.copymap.clear()
         self.setparents(nullid, nullid)
-        util.clearcachedproperty(self, "_dirs")
-        util.clearcachedproperty(self, "_alldirs")
-        util.clearcachedproperty(self, "filefoldmap")
-        util.clearcachedproperty(self, "dirfoldmap")
-        util.clearcachedproperty(self, "nonnormalset")
-        util.clearcachedproperty(self, "otherparentset")
+        util.clearcachedproperty(self, b"_dirs")
+        util.clearcachedproperty(self, b"_alldirs")
+        util.clearcachedproperty(self, b"filefoldmap")
+        util.clearcachedproperty(self, b"dirfoldmap")
+        util.clearcachedproperty(self, b"nonnormalset")
+        util.clearcachedproperty(self, b"otherparentset")
 
     def items(self):
-        return self._map.iteritems()
+        return pycompat.iteritems(self._map)
 
     # forward for python2,3 compat
     iteritems = items
@@ -1287,12 +1407,12 @@
 
     def addfile(self, f, oldstate, state, mode, size, mtime):
         """Add a tracked file to the dirstate."""
-        if oldstate in "?r" and r"_dirs" in self.__dict__:
+        if oldstate in b"?r" and r"_dirs" in self.__dict__:
             self._dirs.addpath(f)
-        if oldstate == "?" and r"_alldirs" in self.__dict__:
+        if oldstate == b"?" and r"_alldirs" in self.__dict__:
             self._alldirs.addpath(f)
         self._map[f] = dirstatetuple(state, mode, size, mtime)
-        if state != 'n' or mtime == -1:
+        if state != b'n' or mtime == -1:
             self.nonnormalset.add(f)
         if size == -2:
             self.otherparentset.add(f)
@@ -1305,14 +1425,14 @@
         the file's previous state.  In the future, we should refactor this
         to be more explicit about what that state is.
         """
-        if oldstate not in "?r" and r"_dirs" in self.__dict__:
+        if oldstate not in b"?r" and r"_dirs" in self.__dict__:
             self._dirs.delpath(f)
-        if oldstate == "?" and r"_alldirs" in self.__dict__:
+        if oldstate == b"?" and r"_alldirs" in self.__dict__:
             self._alldirs.addpath(f)
         if r"filefoldmap" in self.__dict__:
             normed = util.normcase(f)
             self.filefoldmap.pop(normed, None)
-        self._map[f] = dirstatetuple('r', 0, size, 0)
+        self._map[f] = dirstatetuple(b'r', 0, size, 0)
         self.nonnormalset.add(f)
 
     def dropfile(self, f, oldstate):
@@ -1322,7 +1442,7 @@
         """
         exists = self._map.pop(f, None) is not None
         if exists:
-            if oldstate != "r" and r"_dirs" in self.__dict__:
+            if oldstate != b"r" and r"_dirs" in self.__dict__:
                 self._dirs.delpath(f)
             if r"_alldirs" in self.__dict__:
                 self._alldirs.delpath(f)
@@ -1335,7 +1455,7 @@
     def clearambiguoustimes(self, files, now):
         for f in files:
             e = self.get(f)
-            if e is not None and e[0] == 'n' and e[3] == now:
+            if e is not None and e[0] == b'n' and e[3] == now:
                 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
                 self.nonnormalset.add(f)
 
@@ -1346,10 +1466,10 @@
         except AttributeError:
             nonnorm = set()
             otherparent = set()
-            for fname, e in self._map.iteritems():
-                if e[0] != 'n' or e[3] == -1:
+            for fname, e in pycompat.iteritems(self._map):
+                if e[0] != b'n' or e[3] == -1:
                     nonnorm.add(fname)
-                if e[0] == 'n' and e[2] == -2:
+                if e[0] == b'n' and e[2] == -2:
                     otherparent.add(fname)
             return nonnorm, otherparent
 
@@ -1363,15 +1483,16 @@
         except AttributeError:
             pass
         else:
-            return makefilefoldmap(self._map, util.normcasespec,
-                                   util.normcasefallback)
+            return makefilefoldmap(
+                self._map, util.normcasespec, util.normcasefallback
+            )
 
         f = {}
         normcase = util.normcase
-        for name, s in self._map.iteritems():
-            if s[0] != 'r':
+        for name, s in pycompat.iteritems(self._map):
+            if s[0] != b'r':
                 f[normcase(name)] = name
-        f['.'] = '.' # prevents useless util.fspath() invocation
+        f[b'.'] = b'.'  # prevents useless util.fspath() invocation
         return f
 
     def hastrackeddir(self, d):
@@ -1390,7 +1511,7 @@
 
     @propertycache
     def _dirs(self):
-        return util.dirs(self._map, 'r')
+        return util.dirs(self._map, b'r')
 
     @propertycache
     def _alldirs(self):
@@ -1400,8 +1521,9 @@
         fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
         if self._pendingmode is not None and self._pendingmode != mode:
             fp.close()
-            raise error.Abort(_('working directory state may be '
-                                'changed parallelly'))
+            raise error.Abort(
+                _(b'working directory state may be changed parallelly')
+            )
         self._pendingmode = mode
         return fp
 
@@ -1415,7 +1537,7 @@
                 if err.errno != errno.ENOENT:
                     raise
                 # File doesn't exist, so the current state is empty
-                st = ''
+                st = b''
 
             l = len(st)
             if l == 40:
@@ -1423,8 +1545,9 @@
             elif l == 0:
                 self._parents = (nullid, nullid)
             else:
-                raise error.Abort(_('working directory state appears '
-                                    'damaged!'))
+                raise error.Abort(
+                    _(b'working directory state appears damaged!')
+                )
 
         return self._parents
 
@@ -1435,7 +1558,8 @@
     def read(self):
         # ignore HG_PENDING because identity is used only for writing
         self.identity = util.filestat.frompath(
-            self._opener.join(self._filename))
+            self._opener.join(self._filename)
+        )
 
         try:
             fp = self._opendirstatefile()
@@ -1450,7 +1574,7 @@
         if not st:
             return
 
-        if util.safehasattr(parsers, 'dict_new_presized'):
+        if util.safehasattr(parsers, b'dict_new_presized'):
             # Make an estimate of the number of files in the dirstate based on
             # its size. From a linear regression on a set of real-world repos,
             # all over 10,000 files, the size of a dirstate entry is 85
@@ -1475,7 +1599,7 @@
         # parsing the dirstate.
         #
         # (we cannot decorate the function directly since it is in a C module)
-        parse_dirstate = util.nogc(dirstatemod.parse_dirstate)
+        parse_dirstate = util.nogc(parsers.parse_dirstate)
         p = parse_dirstate(self._map, self.copymap, st)
         if not self._dirtyparents:
             self.setparents(*p)
@@ -1486,8 +1610,9 @@
         self.get = self._map.get
 
     def write(self, st, now):
-        st.write(dirstatemod.pack_dirstate(self._map, self.copymap,
-                                           self.parents(), now))
+        st.write(
+            parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
+        )
         st.close()
         self._dirtyparents = False
         self.nonnormalset, self.otherparentset = self.nonnormalentries()
@@ -1516,3 +1641,195 @@
         for name in self._dirs:
             f[normcase(name)] = name
         return f
+
+
+if rustmod is not None:
+
+    class dirstatemap(object):
+        def __init__(self, ui, opener, root):
+            self._ui = ui
+            self._opener = opener
+            self._root = root
+            self._filename = b'dirstate'
+            self._parents = None
+            self._dirtyparents = False
+
+            # for consistent view between _pl() and _read() invocations
+            self._pendingmode = None
+
+        def addfile(self, *args, **kwargs):
+            return self._rustmap.addfile(*args, **kwargs)
+
+        def removefile(self, *args, **kwargs):
+            return self._rustmap.removefile(*args, **kwargs)
+
+        def dropfile(self, *args, **kwargs):
+            return self._rustmap.dropfile(*args, **kwargs)
+
+        def clearambiguoustimes(self, *args, **kwargs):
+            return self._rustmap.clearambiguoustimes(*args, **kwargs)
+
+        def nonnormalentries(self):
+            return self._rustmap.nonnormalentries()
+
+        def get(self, *args, **kwargs):
+            return self._rustmap.get(*args, **kwargs)
+
+        @propertycache
+        def _rustmap(self):
+            self._rustmap = rustmod.DirstateMap(self._root)
+            self.read()
+            return self._rustmap
+
+        @property
+        def copymap(self):
+            return self._rustmap.copymap()
+
+        def preload(self):
+            self._rustmap
+
+        def clear(self):
+            self._rustmap.clear()
+            self.setparents(nullid, nullid)
+            util.clearcachedproperty(self, b"_dirs")
+            util.clearcachedproperty(self, b"_alldirs")
+            util.clearcachedproperty(self, b"dirfoldmap")
+
+        def items(self):
+            return self._rustmap.items()
+
+        def keys(self):
+            return iter(self._rustmap)
+
+        def __contains__(self, key):
+            return key in self._rustmap
+
+        def __getitem__(self, item):
+            return self._rustmap[item]
+
+        def __len__(self):
+            return len(self._rustmap)
+
+        def __iter__(self):
+            return iter(self._rustmap)
+
+        # forward for python2,3 compat
+        iteritems = items
+
+        def _opendirstatefile(self):
+            fp, mode = txnutil.trypending(
+                self._root, self._opener, self._filename
+            )
+            if self._pendingmode is not None and self._pendingmode != mode:
+                fp.close()
+                raise error.Abort(
+                    _(b'working directory state may be changed parallelly')
+                )
+            self._pendingmode = mode
+            return fp
+
+        def setparents(self, p1, p2):
+            self._rustmap.setparents(p1, p2)
+            self._parents = (p1, p2)
+            self._dirtyparents = True
+
+        def parents(self):
+            if not self._parents:
+                try:
+                    fp = self._opendirstatefile()
+                    st = fp.read(40)
+                    fp.close()
+                except IOError as err:
+                    if err.errno != errno.ENOENT:
+                        raise
+                    # File doesn't exist, so the current state is empty
+                    st = b''
+
+                try:
+                    self._parents = self._rustmap.parents(st)
+                except ValueError:
+                    raise error.Abort(
+                        _(b'working directory state appears damaged!')
+                    )
+
+            return self._parents
+
+        def read(self):
+            # ignore HG_PENDING because identity is used only for writing
+            self.identity = util.filestat.frompath(
+                self._opener.join(self._filename)
+            )
+
+            try:
+                fp = self._opendirstatefile()
+                try:
+                    st = fp.read()
+                finally:
+                    fp.close()
+            except IOError as err:
+                if err.errno != errno.ENOENT:
+                    raise
+                return
+            if not st:
+                return
+
+            parse_dirstate = util.nogc(self._rustmap.read)
+            parents = parse_dirstate(st)
+            if parents and not self._dirtyparents:
+                self.setparents(*parents)
+
+            self.__contains__ = self._rustmap.__contains__
+            self.__getitem__ = self._rustmap.__getitem__
+            self.get = self._rustmap.get
+
+        def write(self, st, now):
+            parents = self.parents()
+            st.write(self._rustmap.write(parents[0], parents[1], now))
+            st.close()
+            self._dirtyparents = False
+
+        @propertycache
+        def filefoldmap(self):
+            """Returns a dictionary mapping normalized case paths to their
+            non-normalized versions.
+            """
+            return self._rustmap.filefoldmapasdict()
+
+        def hastrackeddir(self, d):
+            self._dirs  # Trigger Python's propertycache
+            return self._rustmap.hastrackeddir(d)
+
+        def hasdir(self, d):
+            self._dirs  # Trigger Python's propertycache
+            return self._rustmap.hasdir(d)
+
+        @propertycache
+        def _dirs(self):
+            return self._rustmap.getdirs()
+
+        @propertycache
+        def _alldirs(self):
+            return self._rustmap.getalldirs()
+
+        @propertycache
+        def identity(self):
+            self._rustmap
+            return self.identity
+
+        @property
+        def nonnormalset(self):
+            nonnorm, otherparents = self._rustmap.nonnormalentries()
+            return nonnorm
+
+        @property
+        def otherparentset(self):
+            nonnorm, otherparents = self._rustmap.nonnormalentries()
+            return otherparents
+
+        @propertycache
+        def dirfoldmap(self):
+            f = {}
+            normcase = util.normcase
+            for name in self._dirs:
+                f[normcase(name)] = name
+            return f
--- a/mercurial/dirstateguard.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/dirstateguard.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,6 +15,7 @@
     util,
 )
 
+
 class dirstateguard(util.transactional):
     '''Restore dirstate at unexpected failure.
 
@@ -33,15 +34,17 @@
         self._repo = repo
         self._active = False
         self._closed = False
-        self._backupname = 'dirstate.backup.%s.%d' % (name, id(self))
-        self._narrowspecbackupname = ('narrowspec.backup.%s.%d' %
-                                      (name, id(self)))
+        self._backupname = b'dirstate.backup.%s.%d' % (name, id(self))
+        self._narrowspecbackupname = b'narrowspec.backup.%s.%d' % (
+            name,
+            id(self),
+        )
         repo.dirstate.savebackup(repo.currenttransaction(), self._backupname)
         narrowspec.savewcbackup(repo, self._narrowspecbackupname)
         self._active = True
 
     def __del__(self):
-        if self._active: # still active
+        if self._active:  # still active
             # this may occur, even if this class is used correctly:
             # for example, releasing other resources like transaction
             # may raise exception before ``dirstateguard.release`` in
@@ -49,27 +52,33 @@
             self._abort()
 
     def close(self):
-        if not self._active: # already inactivated
-            msg = (_("can't close already inactivated backup: %s")
-                   % self._backupname)
+        if not self._active:  # already inactivated
+            msg = (
+                _(b"can't close already inactivated backup: %s")
+                % self._backupname
+            )
             raise error.Abort(msg)
 
-        self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
-                                         self._backupname)
+        self._repo.dirstate.clearbackup(
+            self._repo.currenttransaction(), self._backupname
+        )
         narrowspec.clearwcbackup(self._repo, self._narrowspecbackupname)
         self._active = False
         self._closed = True
 
     def _abort(self):
         narrowspec.restorewcbackup(self._repo, self._narrowspecbackupname)
-        self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
-                                           self._backupname)
+        self._repo.dirstate.restorebackup(
+            self._repo.currenttransaction(), self._backupname
+        )
         self._active = False
 
     def release(self):
         if not self._closed:
-            if not self._active: # already inactivated
-                msg = (_("can't release already inactivated backup: %s")
-                       % self._backupname)
+            if not self._active:  # already inactivated
+                msg = (
+                    _(b"can't release already inactivated backup: %s")
+                    % self._backupname
+                )
                 raise error.Abort(msg)
             self._abort()
--- a/mercurial/discovery.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/discovery.py	Mon Oct 21 11:09:48 2019 -0400
@@ -21,12 +21,14 @@
     branchmap,
     error,
     phases,
+    pycompat,
     scmutil,
     setdiscovery,
     treediscovery,
     util,
 )
 
+
 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
     """Return a tuple (common, anyincoming, heads) used to identify the common
     subset of nodes between repo and remote.
@@ -49,20 +51,25 @@
     extensions a good hook into outgoing.
     """
 
-    if not remote.capable('getbundle'):
+    if not remote.capable(b'getbundle'):
         return treediscovery.findcommonincoming(repo, remote, heads, force)
 
     if heads:
-        knownnode = repo.changelog.hasnode # no nodemap until it is filtered
+        knownnode = repo.changelog.hasnode  # no nodemap until it is filtered
         if all(knownnode(h) for h in heads):
             return (heads, False, heads)
 
-    res = setdiscovery.findcommonheads(repo.ui, repo, remote,
-                                       abortwhenunrelated=not force,
-                                       ancestorsof=ancestorsof)
+    res = setdiscovery.findcommonheads(
+        repo.ui,
+        repo,
+        remote,
+        abortwhenunrelated=not force,
+        ancestorsof=ancestorsof,
+    )
     common, anyinc, srvheads = res
     return (list(common), anyinc, heads or list(srvheads))
 
+
 class outgoing(object):
     '''Represents the set of nodes present in a local repo but not in a
     (possibly) remote one.
@@ -78,8 +85,9 @@
     The sets are computed on demand from the heads, unless provided upfront
     by discovery.'''
 
-    def __init__(self, repo, commonheads=None, missingheads=None,
-                 missingroots=None):
+    def __init__(
+        self, repo, commonheads=None, missingheads=None, missingroots=None
+    ):
         # at least one of them must not be set
         assert None in (commonheads, missingroots)
         cl = repo.changelog
@@ -106,8 +114,9 @@
         self.excluded = []
 
     def _computecommonmissing(self):
-        sets = self._revlog.findcommonmissing(self.commonheads,
-                                              self.missingheads)
+        sets = self._revlog.findcommonmissing(
+            self.commonheads, self.missingheads
+        )
         self._common, self._missing = sets
 
     @util.propertycache
@@ -122,8 +131,10 @@
             self._computecommonmissing()
         return self._missing
 
-def findcommonoutgoing(repo, other, onlyheads=None, force=False,
-                       commoninc=None, portable=False):
+
+def findcommonoutgoing(
+    repo, other, onlyheads=None, force=False, commoninc=None, portable=False
+):
     '''Return an outgoing instance to identify the nodes present in repo but
     not in other.
 
@@ -141,18 +152,19 @@
 
     # get common set if not provided
     if commoninc is None:
-        commoninc = findcommonincoming(repo, other, force=force,
-                                       ancestorsof=onlyheads)
+        commoninc = findcommonincoming(
+            repo, other, force=force, ancestorsof=onlyheads
+        )
     og.commonheads, _any, _hds = commoninc
 
     # compute outgoing
-    mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
+    mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore
     if not mayexclude:
         og.missingheads = onlyheads or repo.heads()
     elif onlyheads is None:
         # use visible heads as it should be cached
-        og.missingheads = repo.filtered("served").heads()
-        og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
+        og.missingheads = repo.filtered(b"served").heads()
+        og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
     else:
         # compute common, missing and exclude secret stuff
         sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
@@ -167,7 +179,7 @@
                 missing.append(node)
         if len(missing) == len(allmissing):
             missingheads = onlyheads
-        else: # update missing heads
+        else:  # update missing heads
             missingheads = phases.newheads(repo, onlyheads, excluded)
         og.missingheads = missingheads
     if portable:
@@ -183,6 +195,7 @@
 
     return og
 
+
 def _headssummary(pushop):
     """compute a summary of branch and heads status before and after push
 
@@ -210,11 +223,11 @@
         branches.add(ctx.branch())
 
     with remote.commandexecutor() as e:
-        remotemap = e.callcommand('branchmap', {}).result()
+        remotemap = e.callcommand(b'branchmap', {}).result()
 
-    knownnode = cl.hasnode # do not use nodemap until it is filtered
+    knownnode = cl.hasnode  # do not use nodemap until it is filtered
     # A. register remote heads of branches which are in outgoing set
-    for branch, heads in remotemap.iteritems():
+    for branch, heads in pycompat.iteritems(remotemap):
         # don't add head info about branches which we don't have locally
         if branch not in branches:
             continue
@@ -234,13 +247,15 @@
 
     # C. Update newmap with outgoing changes.
     # This will possibly add new heads and remove existing ones.
-    newmap = branchmap.remotebranchcache((branch, heads[1])
-                                 for branch, heads in headssum.iteritems()
-                                 if heads[0] is not None)
+    newmap = branchmap.remotebranchcache(
+        (branch, heads[1])
+        for branch, heads in pycompat.iteritems(headssum)
+        if heads[0] is not None
+    )
     newmap.update(repo, (ctx.rev() for ctx in missingctx))
-    for branch, newheads in newmap.iteritems():
+    for branch, newheads in pycompat.iteritems(newmap):
         headssum[branch][1][:] = newheads
-    for branch, items in headssum.iteritems():
+    for branch, items in pycompat.iteritems(headssum):
         for l in items:
             if l is not None:
                 l.sort()
@@ -252,27 +267,32 @@
         futureheads = set(torev(h) for h in outgoing.missingheads)
         futureheads |= set(torev(h) for h in outgoing.commonheads)
         allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
-        for branch, heads in sorted(headssum.iteritems()):
+        for branch, heads in sorted(pycompat.iteritems(headssum)):
             remoteheads, newheads, unsyncedheads, placeholder = heads
             result = _postprocessobsolete(pushop, allfuturecommon, newheads)
-            headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
-                                sorted(result[1]))
+            headssum[branch] = (
+                remoteheads,
+                sorted(result[0]),
+                unsyncedheads,
+                sorted(result[1]),
+            )
     return headssum
 
+
 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
     """Compute branchmapsummary for repo without branchmap support"""
 
     # 1-4b. old servers: Check for new topological heads.
     # Construct {old,new}map with branch = None (topological branch).
     # (code based on update)
-    knownnode = repo.changelog.hasnode # no nodemap until it is filtered
+    knownnode = repo.changelog.hasnode  # no nodemap until it is filtered
     oldheads = sorted(h for h in remoteheads if knownnode(h))
     # all nodes in outgoing.missing are children of either:
     # - an element of oldheads
     # - another element of outgoing.missing
     # - nullrev
     # This explains why the new head are very simple to compute.
-    r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
+    r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
     newheads = sorted(c.node() for c in r)
     # set some unsynced head to issue the "unsynced changes" warning
     if inc:
@@ -281,6 +301,7 @@
         unsynced = []
     return {None: (oldheads, newheads, unsynced, [])}
 
+
 def _nowarnheads(pushop):
     # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
     repo = pushop.repo.unfiltered()
@@ -288,20 +309,22 @@
     localbookmarks = repo._bookmarks
 
     with remote.commandexecutor() as e:
-        remotebookmarks = e.callcommand('listkeys', {
-            'namespace': 'bookmarks',
-        }).result()
+        remotebookmarks = e.callcommand(
+            b'listkeys', {b'namespace': b'bookmarks',}
+        ).result()
 
     bookmarkedheads = set()
 
     # internal config: bookmarks.pushing
-    newbookmarks = [localbookmarks.expandname(b)
-                    for b in pushop.ui.configlist('bookmarks', 'pushing')]
+    newbookmarks = [
+        localbookmarks.expandname(b)
+        for b in pushop.ui.configlist(b'bookmarks', b'pushing')
+    ]
 
     for bm in localbookmarks:
         rnode = remotebookmarks.get(bm)
         if rnode and rnode in repo:
-            lctx, rctx = localbookmarks.changectx(bm), repo[rnode]
+            lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
             if bookmarks.validdest(repo, rctx, lctx):
                 bookmarkedheads.add(lctx.node())
         else:
@@ -310,6 +333,7 @@
 
     return bookmarkedheads
 
+
 def checkheads(pushop):
     """Check that a push won't add any outgoing head
 
@@ -333,28 +357,33 @@
         # remote is empty, nothing to check.
         return
 
-    if remote.capable('branchmap'):
+    if remote.capable(b'branchmap'):
         headssum = _headssummary(pushop)
     else:
         headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
     pushop.pushbranchmap = headssum
-    newbranches = [branch for branch, heads in headssum.iteritems()
-                   if heads[0] is None]
+    newbranches = [
+        branch
+        for branch, heads in pycompat.iteritems(headssum)
+        if heads[0] is None
+    ]
     # 1. Check for new branches on the remote.
     if newbranches and not newbranch:  # new branch requires --new-branch
-        branchnames = ', '.join(sorted(newbranches))
+        branchnames = b', '.join(sorted(newbranches))
         # Calculate how many of the new branches are closed branches
         closedbranches = set()
         for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
             if isclosed:
                 closedbranches.add(tag)
-        closedbranches = (closedbranches & set(newbranches))
+        closedbranches = closedbranches & set(newbranches)
         if closedbranches:
-            errmsg = (_("push creates new remote branches: %s (%d closed)!")
-                        % (branchnames, len(closedbranches)))
+            errmsg = _(b"push creates new remote branches: %s (%d closed)!") % (
+                branchnames,
+                len(closedbranches),
+            )
         else:
-            errmsg = (_("push creates new remote branches: %s!")% branchnames)
-        hint=_("use 'hg push --new-branch' to create new remote branches")
+            errmsg = _(b"push creates new remote branches: %s!") % branchnames
+        hint = _(b"use 'hg push --new-branch' to create new remote branches")
         raise error.Abort(errmsg, hint=hint)
 
     # 2. Find heads that we need not warn about
@@ -364,7 +393,7 @@
     # If there are more heads after the push than before, a suitable
     # error message, depending on unsynced status, is displayed.
     errormsg = None
-    for branch, heads in sorted(headssum.iteritems()):
+    for branch, heads in sorted(pycompat.iteritems(headssum)):
         remoteheads, newheads, unsyncedheads, discardedheads = heads
         # add unsynced data
         if remoteheads is None:
@@ -372,7 +401,7 @@
         else:
             oldhs = set(remoteheads)
         oldhs.update(unsyncedheads)
-        dhs = None # delta heads, the new heads on branch
+        dhs = None  # delta heads, the new heads on branch
         newhs = set(newheads)
         newhs.update(unsyncedheads)
         if unsyncedheads:
@@ -382,55 +411,75 @@
             else:
                 heads = scmutil.nodesummaries(repo, unsyncedheads)
             if heads is None:
-                repo.ui.status(_("remote has heads that are "
-                                 "not known locally\n"))
+                repo.ui.status(
+                    _(b"remote has heads that are not known locally\n")
+                )
             elif branch is None:
-                repo.ui.status(_("remote has heads that are "
-                                 "not known locally: %s\n") % heads)
+                repo.ui.status(
+                    _(b"remote has heads that are not known locally: %s\n")
+                    % heads
+                )
             else:
-                repo.ui.status(_("remote has heads on branch '%s' that are "
-                                 "not known locally: %s\n") % (branch, heads))
+                repo.ui.status(
+                    _(
+                        b"remote has heads on branch '%s' that are "
+                        b"not known locally: %s\n"
+                    )
+                    % (branch, heads)
+                )
         if remoteheads is None:
             if len(newhs) > 1:
                 dhs = list(newhs)
                 if errormsg is None:
-                    errormsg = (_("push creates new branch '%s' "
-                                  "with multiple heads") % (branch))
-                    hint = _("merge or"
-                             " see 'hg help push' for details about"
-                             " pushing new heads")
+                    errormsg = (
+                        _(b"push creates new branch '%s' with multiple heads")
+                        % branch
+                    )
+                    hint = _(
+                        b"merge or"
+                        b" see 'hg help push' for details about"
+                        b" pushing new heads"
+                    )
         elif len(newhs) > len(oldhs):
             # remove bookmarked or existing remote heads from the new heads list
             dhs = sorted(newhs - nowarnheads - oldhs)
         if dhs:
             if errormsg is None:
-                if branch not in ('default', None):
-                    errormsg = _("push creates new remote head %s "
-                                 "on branch '%s'!") % (short(dhs[0]), branch)
+                if branch not in (b'default', None):
+                    errormsg = _(
+                        b"push creates new remote head %s on branch '%s'!"
+                    ) % (short(dhs[0]), branch)
                 elif repo[dhs[0]].bookmarks():
-                    errormsg = _("push creates new remote head %s "
-                                 "with bookmark '%s'!") % (
-                                 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
+                    errormsg = _(
+                        b"push creates new remote head %s "
+                        b"with bookmark '%s'!"
+                    ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
                 else:
-                    errormsg = _("push creates new remote head %s!"
-                                 ) % short(dhs[0])
+                    errormsg = _(b"push creates new remote head %s!") % short(
+                        dhs[0]
+                    )
                 if unsyncedheads:
-                    hint = _("pull and merge or"
-                             " see 'hg help push' for details about"
-                             " pushing new heads")
+                    hint = _(
+                        b"pull and merge or"
+                        b" see 'hg help push' for details about"
+                        b" pushing new heads"
+                    )
                 else:
-                    hint = _("merge or"
-                             " see 'hg help push' for details about"
-                             " pushing new heads")
+                    hint = _(
+                        b"merge or"
+                        b" see 'hg help push' for details about"
+                        b" pushing new heads"
+                    )
             if branch is None:
-                repo.ui.note(_("new remote heads:\n"))
+                repo.ui.note(_(b"new remote heads:\n"))
             else:
-                repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
+                repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
             for h in dhs:
-                repo.ui.note((" %s\n") % short(h))
+                repo.ui.note(b" %s\n" % short(h))
     if errormsg:
         raise error.Abort(errormsg, hint=hint)
 
+
 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
     """post process the list of new heads with obsolescence information
 
@@ -453,22 +502,24 @@
     torev = unfi.changelog.nodemap.get
     public = phases.public
     getphase = unfi._phasecache.phase
-    ispublic = (lambda r: getphase(unfi, r) == public)
-    ispushed = (lambda n: torev(n) in futurecommon)
+    ispublic = lambda r: getphase(unfi, r) == public
+    ispushed = lambda n: torev(n) in futurecommon
     hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
     successorsmarkers = unfi.obsstore.successors
-    newhs = set() # final set of new heads
-    discarded = set() # new head of fully replaced branch
+    newhs = set()  # final set of new heads
+    discarded = set()  # new head of fully replaced branch
 
-    localcandidate = set() # candidate heads known locally
-    unknownheads = set() # candidate heads unknown locally
+    localcandidate = set()  # candidate heads known locally
+    unknownheads = set()  # candidate heads unknown locally
     for h in candidate_newhs:
         if h in unfi:
             localcandidate.add(h)
         else:
             if successorsmarkers.get(h) is not None:
-                msg = ('checkheads: remote head unknown locally has'
-                       ' local marker: %s\n')
+                msg = (
+                    b'checkheads: remote head unknown locally has'
+                    b' local marker: %s\n'
+                )
                 repo.ui.debug(msg % hex(h))
             unknownheads.add(h)
 
@@ -480,23 +531,26 @@
     while localcandidate:
         nh = localcandidate.pop()
         # run this check early to skip the evaluation of the whole branch
-        if (torev(nh) in futurecommon or ispublic(torev(nh))):
+        if torev(nh) in futurecommon or ispublic(torev(nh)):
             newhs.add(nh)
             continue
 
         # Get all revs/nodes on the branch exclusive to this head
         # (already filtered heads are "ignored"))
-        branchrevs = unfi.revs('only(%n, (%ln+%ln))',
-                               nh, localcandidate, newhs)
+        branchrevs = unfi.revs(
+            b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
+        )
         branchnodes = [tonode(r) for r in branchrevs]
 
         # The branch won't be hidden on the remote if
         # * any part of it is public,
         # * any part of it is considered part of the result by previous logic,
         # * if we have no markers to push to obsolete it.
-        if (any(ispublic(r) for r in branchrevs)
-                or any(torev(n) in futurecommon for n in branchnodes)
-                or any(not hasoutmarker(n) for n in branchnodes)):
+        if (
+            any(ispublic(r) for r in branchrevs)
+            or any(torev(n) in futurecommon for n in branchnodes)
+            or any(not hasoutmarker(n) for n in branchnodes)
+        ):
             newhs.add(nh)
         else:
             # note: there is a corner case if there is a merge in the branch.
@@ -507,6 +561,7 @@
     newhs |= unknownheads
     return newhs, discarded
 
+
 def pushingmarkerfor(obsstore, ispushed, node):
     """true if some markers are to be pushed for node
 
@@ -528,9 +583,9 @@
         markers = successorsmarkers.get(current, ())
         # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
         for m in markers:
-            nexts = m[1] # successors
-            if not nexts: # this is a prune marker
-                nexts = m[5] or () # parents
+            nexts = m[1]  # successors
+            if not nexts:  # this is a prune marker
+                nexts = m[5] or ()  # parents
             for n in nexts:
                 if n not in seen:
                     seen.add(n)
--- a/mercurial/dispatch.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/dispatch.py	Mon Oct 21 11:09:48 2019 -0400
@@ -20,6 +20,7 @@
 
 
 from .i18n import _
+from .pycompat import getattr
 
 from hgdemandimport import tracing
 
@@ -48,9 +49,19 @@
     stringutil,
 )
 
+
 class request(object):
-    def __init__(self, args, ui=None, repo=None, fin=None, fout=None,
-                 ferr=None, fmsg=None, prereposetups=None):
+    def __init__(
+        self,
+        args,
+        ui=None,
+        repo=None,
+        fin=None,
+        fout=None,
+        ferr=None,
+        fmsg=None,
+        prereposetups=None,
+    ):
         self.args = args
         self.ui = ui
         self.repo = repo
@@ -80,17 +91,18 @@
                 func, args, kwargs = handlers.pop()
                 try:
                     func(*args, **kwargs)
-                except: # re-raises below
+                except:  # re-raises below
                     if exc is None:
                         exc = sys.exc_info()[1]
-                    self.ui.warn(('error in exit handlers:\n'))
+                    self.ui.warnnoi18n(b'error in exit handlers:\n')
                     self.ui.traceback(force=True)
         finally:
             if exc is not None:
                 raise exc
 
+
 def run():
-    "run the command in sys.argv"
+    b"run the command in sys.argv"
     initstdio()
     with tracing.log('parse args into request'):
         req = request(pycompat.sysargv[1:])
@@ -102,18 +114,19 @@
         status = -1
 
     # In all cases we try to flush stdio streams.
-    if util.safehasattr(req.ui, 'fout'):
+    if util.safehasattr(req.ui, b'fout'):
         try:
             req.ui.fout.flush()
         except IOError as e:
             err = e
             status = -1
 
-    if util.safehasattr(req.ui, 'ferr'):
+    if util.safehasattr(req.ui, b'ferr'):
         try:
             if err is not None and err.errno != errno.EPIPE:
-                req.ui.ferr.write('abort: %s\n' %
-                                  encoding.strtolocal(err.strerror))
+                req.ui.ferr.write(
+                    b'abort: %s\n' % encoding.strtolocal(err.strerror)
+                )
             req.ui.ferr.flush()
         # There's not much we can do about an I/O error here. So (possibly)
         # change the status code and move on.
@@ -123,7 +136,9 @@
     _silencestdio()
     sys.exit(status & 255)
 
+
 if pycompat.ispy3:
+
     def initstdio():
         pass
 
@@ -143,7 +158,10 @@
                 fp.close()
             except IOError:
                 pass
+
+
 else:
+
     def initstdio():
         for fp in (sys.stdin, sys.stdout, sys.stderr):
             procutil.setbinary(fp)
@@ -151,18 +169,21 @@
     def _silencestdio():
         pass
 
+
 def _getsimilar(symbols, value):
     sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
     # The cutoff for similarity here is pretty arbitrary. It should
     # probably be investigated and tweaked.
     return [s for s in symbols if sim(s) > 0.6]
 
+
 def _reportsimilar(write, similar):
     if len(similar) == 1:
-        write(_("(did you mean %s?)\n") % similar[0])
+        write(_(b"(did you mean %s?)\n") % similar[0])
     elif similar:
-        ss = ", ".join(sorted(similar))
-        write(_("(did you mean one of %s?)\n") % ss)
+        ss = b", ".join(sorted(similar))
+        write(_(b"(did you mean one of %s?)\n") % ss)
+
 
 def _formatparse(write, inst):
     similar = []
@@ -170,18 +191,22 @@
         # make sure to check fileset first, as revset can invoke fileset
         similar = _getsimilar(inst.symbols, inst.function)
     if len(inst.args) > 1:
-        write(_("hg: parse error at %s: %s\n") %
-              (pycompat.bytestr(inst.args[1]), inst.args[0]))
-        if inst.args[0].startswith(' '):
-            write(_("unexpected leading whitespace\n"))
+        write(
+            _(b"hg: parse error at %s: %s\n")
+            % (pycompat.bytestr(inst.args[1]), inst.args[0])
+        )
+        if inst.args[0].startswith(b' '):
+            write(_(b"unexpected leading whitespace\n"))
     else:
-        write(_("hg: parse error: %s\n") % inst.args[0])
+        write(_(b"hg: parse error: %s\n") % inst.args[0])
         _reportsimilar(write, similar)
     if inst.hint:
-        write(_("(%s)\n") % inst.hint)
+        write(_(b"(%s)\n") % inst.hint)
+
 
 def _formatargs(args):
-    return ' '.join(procutil.shellquote(a) for a in args)
+    return b' '.join(procutil.shellquote(a) for a in args)
+
 
 def dispatch(req):
     """run the command specified in req.args; returns an integer status code"""
@@ -197,8 +222,8 @@
             if not req.ui:
                 req.ui = uimod.ui.load()
             req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
-            if req.earlyoptions['traceback']:
-                req.ui.setconfig('ui', 'traceback', 'on', '--traceback')
+            if req.earlyoptions[b'traceback']:
+                req.ui.setconfig(b'ui', b'traceback', b'on', b'--traceback')
 
             # set ui streams from the request
             if req.fin:
@@ -210,9 +235,9 @@
             if req.fmsg:
                 req.ui.fmsg = req.fmsg
         except error.Abort as inst:
-            ferr.write(_("abort: %s\n") % inst)
+            ferr.write(_(b"abort: %s\n") % inst)
             if inst.hint:
-                ferr.write(_("(%s)\n") % inst.hint)
+                ferr.write(_(b"(%s)\n") % inst.hint)
             return -1
         except error.ParseError as inst:
             _formatparse(ferr.write, inst)
@@ -224,16 +249,16 @@
         try:
             ret = _runcatch(req) or 0
         except error.ProgrammingError as inst:
-            req.ui.error(_('** ProgrammingError: %s\n') % inst)
+            req.ui.error(_(b'** ProgrammingError: %s\n') % inst)
             if inst.hint:
-                req.ui.error(_('** (%s)\n') % inst.hint)
+                req.ui.error(_(b'** (%s)\n') % inst.hint)
             raise
         except KeyboardInterrupt as inst:
             try:
                 if isinstance(inst, error.SignalInterrupt):
-                    msg = _("killed!\n")
+                    msg = _(b"killed!\n")
                 else:
-                    msg = _("interrupted!\n")
+                    msg = _(b"interrupted!\n")
                 req.ui.error(msg)
             except error.SignalInterrupt:
                 # maybe pager would quit without consuming all the output, and
@@ -247,13 +272,16 @@
             duration = util.timer() - starttime
             req.ui.flush()
             if req.ui.logblockedtimes:
-                req.ui._blockedtimes['command_duration'] = duration * 1000
-                req.ui.log('uiblocked', 'ui blocked ms\n',
-                           **pycompat.strkwargs(req.ui._blockedtimes))
+                req.ui._blockedtimes[b'command_duration'] = duration * 1000
+                req.ui.log(
+                    b'uiblocked',
+                    b'ui blocked ms\n',
+                    **pycompat.strkwargs(req.ui._blockedtimes)
+                )
             return_code = ret & 255
             req.ui.log(
-                "commandfinish",
-                "%s exited %d after %0.2f seconds\n",
+                b"commandfinish",
+                b"%s exited %d after %0.2f seconds\n",
                 msg,
                 return_code,
                 duration,
@@ -263,40 +291,47 @@
             )
             try:
                 req._runexithandlers()
-            except: # exiting, so no re-raises
+            except:  # exiting, so no re-raises
                 ret = ret or -1
         return ret
 
+
 def _runcatch(req):
     with tracing.log('dispatch._runcatch'):
+
         def catchterm(*args):
             raise error.SignalInterrupt
 
         ui = req.ui
         try:
-            for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
+            for name in b'SIGBREAK', b'SIGHUP', b'SIGTERM':
                 num = getattr(signal, name, None)
                 if num:
                     signal.signal(num, catchterm)
         except ValueError:
-            pass # happens if called in a thread
+            pass  # happens if called in a thread
 
         def _runcatchfunc():
             realcmd = None
             try:
                 cmdargs = fancyopts.fancyopts(
-                    req.args[:], commands.globalopts, {})
+                    req.args[:], commands.globalopts, {}
+                )
                 cmd = cmdargs[0]
                 aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
                 realcmd = aliases[0]
-            except (error.UnknownCommand, error.AmbiguousCommand,
-                    IndexError, getopt.GetoptError):
+            except (
+                error.UnknownCommand,
+                error.AmbiguousCommand,
+                IndexError,
+                getopt.GetoptError,
+            ):
                 # Don't handle this here. We know the command is
                 # invalid, but all we're worried about for now is that
                 # it's not a command that server operators expect to
                 # be safe to offer to users in a sandbox.
                 pass
-            if realcmd == 'serve' and '--stdio' in cmdargs:
+            if realcmd == b'serve' and b'--stdio' in cmdargs:
                 # We want to constrain 'hg serve --stdio' instances pretty
                 # closely, as many shared-ssh access tools want to grant
                 # access to run *only* 'hg -R $repo serve --stdio'. We
@@ -305,41 +340,42 @@
                 # shenanigans wherein a user does something like pass
                 # --debugger or --config=ui.debugger=1 as a repo
                 # name. This used to actually run the debugger.
-                if (len(req.args) != 4 or
-                    req.args[0] != '-R' or
-                    req.args[1].startswith('--') or
-                    req.args[2] != 'serve' or
-                    req.args[3] != '--stdio'):
+                if (
+                    len(req.args) != 4
+                    or req.args[0] != b'-R'
+                    or req.args[1].startswith(b'--')
+                    or req.args[2] != b'serve'
+                    or req.args[3] != b'--stdio'
+                ):
                     raise error.Abort(
-                        _('potentially unsafe serve --stdio invocation: %s') %
-                        (stringutil.pprint(req.args),))
+                        _(b'potentially unsafe serve --stdio invocation: %s')
+                        % (stringutil.pprint(req.args),)
+                    )
 
             try:
-                debugger = 'pdb'
-                debugtrace = {
-                    'pdb': pdb.set_trace
-                }
-                debugmortem = {
-                    'pdb': pdb.post_mortem
-                }
+                debugger = b'pdb'
+                debugtrace = {b'pdb': pdb.set_trace}
+                debugmortem = {b'pdb': pdb.post_mortem}
 
                 # read --config before doing anything else
                 # (e.g. to change trust settings for reading .hg/hgrc)
-                cfgs = _parseconfig(req.ui, req.earlyoptions['config'])
+                cfgs = _parseconfig(req.ui, req.earlyoptions[b'config'])
 
                 if req.repo:
                     # copy configs that were passed on the cmdline (--config) to
                     # the repo ui
                     for sec, name, val in cfgs:
-                        req.repo.ui.setconfig(sec, name, val, source='--config')
+                        req.repo.ui.setconfig(
+                            sec, name, val, source=b'--config'
+                        )
 
                 # developer config: ui.debugger
-                debugger = ui.config("ui", "debugger")
+                debugger = ui.config(b"ui", b"debugger")
                 debugmod = pdb
                 if not debugger or ui.plain():
                     # if we are in HGPLAIN mode, then disable custom debugging
-                    debugger = 'pdb'
-                elif req.earlyoptions['debugger']:
+                    debugger = b'pdb'
+                elif req.earlyoptions[b'debugger']:
                     # This import can be slow for fancy debuggers, so only
                     # do it when absolutely necessary, i.e. when actual
                     # debugging has been requested
@@ -347,34 +383,47 @@
                         try:
                             debugmod = __import__(debugger)
                         except ImportError:
-                            pass # Leave debugmod = pdb
+                            pass  # Leave debugmod = pdb
 
                 debugtrace[debugger] = debugmod.set_trace
                 debugmortem[debugger] = debugmod.post_mortem
 
                 # enter the debugger before command execution
-                if req.earlyoptions['debugger']:
-                    ui.warn(_("entering debugger - "
-                            "type c to continue starting hg or h for help\n"))
+                if req.earlyoptions[b'debugger']:
+                    ui.warn(
+                        _(
+                            b"entering debugger - "
+                            b"type c to continue starting hg or h for help\n"
+                        )
+                    )
 
-                    if (debugger != 'pdb' and
-                        debugtrace[debugger] == debugtrace['pdb']):
-                        ui.warn(_("%s debugger specified "
-                                  "but its module was not found\n") % debugger)
+                    if (
+                        debugger != b'pdb'
+                        and debugtrace[debugger] == debugtrace[b'pdb']
+                    ):
+                        ui.warn(
+                            _(
+                                b"%s debugger specified "
+                                b"but its module was not found\n"
+                            )
+                            % debugger
+                        )
                     with demandimport.deactivated():
                         debugtrace[debugger]()
                 try:
                     return _dispatch(req)
                 finally:
                     ui.flush()
-            except: # re-raises
+            except:  # re-raises
                 # enter the debugger when we hit an exception
-                if req.earlyoptions['debugger']:
+                if req.earlyoptions[b'debugger']:
                     traceback.print_exc()
                     debugmortem[debugger](sys.exc_info()[2])
                 raise
+
         return _callcatch(ui, _runcatchfunc)
 
+
 def _callcatch(ui, func):
     """like scmutil.callcatch but handles more high-level exceptions about
     config parsing and commands. besides, use handlecommandexception to handle
@@ -383,27 +432,30 @@
     try:
         return scmutil.callcatch(ui, func)
     except error.AmbiguousCommand as inst:
-        ui.warn(_("hg: command '%s' is ambiguous:\n    %s\n") %
-                (inst.args[0], " ".join(inst.args[1])))
+        ui.warn(
+            _(b"hg: command '%s' is ambiguous:\n    %s\n")
+            % (inst.args[0], b" ".join(inst.args[1]))
+        )
     except error.CommandError as inst:
         if inst.args[0]:
-            ui.pager('help')
+            ui.pager(b'help')
             msgbytes = pycompat.bytestr(inst.args[1])
-            ui.warn(_("hg %s: %s\n") % (inst.args[0], msgbytes))
+            ui.warn(_(b"hg %s: %s\n") % (inst.args[0], msgbytes))
             commands.help_(ui, inst.args[0], full=False, command=True)
         else:
-            ui.warn(_("hg: %s\n") % inst.args[1])
-            ui.warn(_("(use 'hg help -v' for a list of global options)\n"))
+            ui.warn(_(b"hg: %s\n") % inst.args[1])
+            ui.warn(_(b"(use 'hg help -v' for a list of global options)\n"))
     except error.ParseError as inst:
         _formatparse(ui.warn, inst)
         return -1
     except error.UnknownCommand as inst:
-        nocmdmsg = _("hg: unknown command '%s'\n") % inst.args[0]
+        nocmdmsg = _(b"hg: unknown command '%s'\n") % inst.args[0]
         try:
             # check if the command is in a disabled extension
             # (but don't check for extensions themselves)
-            formatted = help.formattedhelp(ui, commands, inst.args[0],
-                                           unknowncmd=True)
+            formatted = help.formattedhelp(
+                ui, commands, inst.args[0], unknowncmd=True
+            )
             ui.warn(nocmdmsg)
             ui.write(formatted)
         except (error.UnknownCommand, error.Abort):
@@ -416,7 +468,7 @@
                     suggested = True
             if not suggested:
                 ui.warn(nocmdmsg)
-                ui.warn(_("(use 'hg help' for a list of commands)\n"))
+                ui.warn(_(b"(use 'hg help' for a list of commands)\n"))
     except IOError:
         raise
     except KeyboardInterrupt:
@@ -427,27 +479,30 @@
 
     return -1
 
+
 def aliasargs(fn, givenargs):
     args = []
     # only care about alias 'args', ignore 'args' set by extensions.wrapfunction
-    if not util.safehasattr(fn, '_origfunc'):
+    if not util.safehasattr(fn, b'_origfunc'):
         args = getattr(fn, 'args', args)
     if args:
-        cmd = ' '.join(map(procutil.shellquote, args))
+        cmd = b' '.join(map(procutil.shellquote, args))
 
         nums = []
+
         def replacer(m):
             num = int(m.group(1)) - 1
             nums.append(num)
             if num < len(givenargs):
                 return givenargs[num]
-            raise error.Abort(_('too few arguments for command alias'))
+            raise error.Abort(_(b'too few arguments for command alias'))
+
         cmd = re.sub(br'\$(\d+|\$)', replacer, cmd)
-        givenargs = [x for i, x in enumerate(givenargs)
-                     if i not in nums]
+        givenargs = [x for i, x in enumerate(givenargs) if i not in nums]
         args = pycompat.shlexsplit(cmd)
     return args + givenargs
 
+
 def aliasinterpolate(name, args, cmd):
     '''interpolate args into cmd for shell aliases
 
@@ -455,36 +510,37 @@
     '''
     # util.interpolate can't deal with "$@" (with quotes) because it's only
     # built to match prefix + patterns.
-    replacemap = dict(('$%d' % (i + 1), arg) for i, arg in enumerate(args))
-    replacemap['$0'] = name
-    replacemap['$$'] = '$'
-    replacemap['$@'] = ' '.join(args)
+    replacemap = dict((b'$%d' % (i + 1), arg) for i, arg in enumerate(args))
+    replacemap[b'$0'] = name
+    replacemap[b'$$'] = b'$'
+    replacemap[b'$@'] = b' '.join(args)
     # Typical Unix shells interpolate "$@" (with quotes) as all the positional
     # parameters, separated out into words. Emulate the same behavior here by
     # quoting the arguments individually. POSIX shells will then typically
     # tokenize each argument into exactly one word.
-    replacemap['"$@"'] = ' '.join(procutil.shellquote(arg) for arg in args)
+    replacemap[b'"$@"'] = b' '.join(procutil.shellquote(arg) for arg in args)
     # escape '\$' for regex
-    regex = '|'.join(replacemap.keys()).replace('$', br'\$')
+    regex = b'|'.join(replacemap.keys()).replace(b'$', br'\$')
     r = re.compile(regex)
     return r.sub(lambda x: replacemap[x.group()], cmd)
 
+
 class cmdalias(object):
     def __init__(self, ui, name, definition, cmdtable, source):
         self.name = self.cmd = name
-        self.cmdname = ''
+        self.cmdname = b''
         self.definition = definition
         self.fn = None
         self.givenargs = []
         self.opts = []
-        self.help = ''
+        self.help = b''
         self.badalias = None
         self.unknowncmd = False
         self.source = source
 
         try:
             aliases, entry = cmdutil.findcmd(self.name, cmdtable)
-            for alias, e in cmdtable.iteritems():
+            for alias, e in pycompat.iteritems(cmdtable):
                 if e is entry:
                     self.cmd = alias
                     break
@@ -493,28 +549,35 @@
             self.shadows = False
 
         if not self.definition:
-            self.badalias = _("no definition for alias '%s'") % self.name
+            self.badalias = _(b"no definition for alias '%s'") % self.name
             return
 
-        if self.definition.startswith('!'):
+        if self.definition.startswith(b'!'):
             shdef = self.definition[1:]
             self.shell = True
+
             def fn(ui, *args):
-                env = {'HG_ARGS': ' '.join((self.name,) + args)}
+                env = {b'HG_ARGS': b' '.join((self.name,) + args)}
+
                 def _checkvar(m):
-                    if m.groups()[0] == '$':
+                    if m.groups()[0] == b'$':
                         return m.group()
                     elif int(m.groups()[0]) <= len(args):
                         return m.group()
                     else:
-                        ui.debug("No argument found for substitution "
-                                 "of %i variable in alias '%s' definition.\n"
-                                 % (int(m.groups()[0]), self.name))
-                        return ''
+                        ui.debug(
+                            b"No argument found for substitution "
+                            b"of %i variable in alias '%s' definition.\n"
+                            % (int(m.groups()[0]), self.name)
+                        )
+                        return b''
+
                 cmd = re.sub(br'\$(\d+|\$)', _checkvar, shdef)
                 cmd = aliasinterpolate(self.name, args, cmd)
-                return ui.system(cmd, environ=env,
-                                 blockedtag='alias_%s' % self.name)
+                return ui.system(
+                    cmd, environ=env, blockedtag=b'alias_%s' % self.name
+                )
+
             self.fn = fn
             self.alias = True
             self._populatehelp(ui, name, shdef, self.fn)
@@ -523,15 +586,17 @@
         try:
             args = pycompat.shlexsplit(self.definition)
         except ValueError as inst:
-            self.badalias = (_("error in definition for alias '%s': %s")
-                             % (self.name, stringutil.forcebytestr(inst)))
+            self.badalias = _(b"error in definition for alias '%s': %s") % (
+                self.name,
+                stringutil.forcebytestr(inst),
+            )
             return
         earlyopts, args = _earlysplitopts(args)
         if earlyopts:
-            self.badalias = (_("error in definition for alias '%s': %s may "
-                               "only be given on the command line")
-                             % (self.name, '/'.join(pycompat.ziplist(*earlyopts)
-                                                    [0])))
+            self.badalias = _(
+                b"error in definition for alias '%s': %s may "
+                b"only be given on the command line"
+            ) % (self.name, b'/'.join(pycompat.ziplist(*earlyopts)[0]))
             return
         self.cmdname = cmd = args.pop(0)
         self.givenargs = args
@@ -548,38 +613,43 @@
             self._populatehelp(ui, name, cmd, self.fn, cmdhelp)
 
         except error.UnknownCommand:
-            self.badalias = (_("alias '%s' resolves to unknown command '%s'")
-                             % (self.name, cmd))
+            self.badalias = _(
+                b"alias '%s' resolves to unknown command '%s'"
+            ) % (self.name, cmd,)
             self.unknowncmd = True
         except error.AmbiguousCommand:
-            self.badalias = (_("alias '%s' resolves to ambiguous command '%s'")
-                             % (self.name, cmd))
+            self.badalias = _(
+                b"alias '%s' resolves to ambiguous command '%s'"
+            ) % (self.name, cmd)
 
     def _populatehelp(self, ui, name, cmd, fn, defaulthelp=None):
         # confine strings to be passed to i18n.gettext()
         cfg = {}
-        for k in ('doc', 'help', 'category'):
-            v = ui.config('alias', '%s:%s' % (name, k), None)
+        for k in (b'doc', b'help', b'category'):
+            v = ui.config(b'alias', b'%s:%s' % (name, k), None)
             if v is None:
                 continue
             if not encoding.isasciistr(v):
-                self.badalias = (_("non-ASCII character in alias definition "
-                                   "'%s:%s'") % (name, k))
+                self.badalias = _(
+                    b"non-ASCII character in alias definition '%s:%s'"
+                ) % (name, k)
                 return
             cfg[k] = v
 
-        self.help = cfg.get('help', defaulthelp or '')
-        if self.help and self.help.startswith("hg " + cmd):
+        self.help = cfg.get(b'help', defaulthelp or b'')
+        if self.help and self.help.startswith(b"hg " + cmd):
             # drop prefix in old-style help lines so hg shows the alias
-            self.help = self.help[4 + len(cmd):]
+            self.help = self.help[4 + len(cmd) :]
 
-        self.owndoc = 'doc' in cfg
-        doc = cfg.get('doc', pycompat.getdoc(fn))
+        self.owndoc = b'doc' in cfg
+        doc = cfg.get(b'doc', pycompat.getdoc(fn))
         if doc is not None:
             doc = pycompat.sysstr(doc)
         self.__doc__ = doc
 
-        self.helpcategory = cfg.get('category', registrar.command.CATEGORY_NONE)
+        self.helpcategory = cfg.get(
+            b'category', registrar.command.CATEGORY_NONE
+        )
 
     @property
     def args(self):
@@ -587,11 +657,15 @@
         return aliasargs(self.fn, args)
 
     def __getattr__(self, name):
-        adefaults = {r'norepo': True, r'intents': set(),
-                     r'optionalrepo': False, r'inferrepo': False}
+        adefaults = {
+            r'norepo': True,
+            r'intents': set(),
+            r'optionalrepo': False,
+            r'inferrepo': False,
+        }
         if name not in adefaults:
             raise AttributeError(name)
-        if self.badalias or util.safehasattr(self, 'shell'):
+        if self.badalias or util.safehasattr(self, b'shell'):
             return adefaults[name]
         return getattr(self.fn, name)
 
@@ -602,26 +676,32 @@
                 try:
                     # check if the command is in a disabled extension
                     cmd, ext = extensions.disabledcmd(ui, self.cmdname)[:2]
-                    hint = _("'%s' is provided by '%s' extension") % (cmd, ext)
+                    hint = _(b"'%s' is provided by '%s' extension") % (cmd, ext)
                 except error.UnknownCommand:
                     pass
             raise error.Abort(self.badalias, hint=hint)
         if self.shadows:
-            ui.debug("alias '%s' shadows command '%s'\n" %
-                     (self.name, self.cmdname))
+            ui.debug(
+                b"alias '%s' shadows command '%s'\n" % (self.name, self.cmdname)
+            )
 
-        ui.log('commandalias', "alias '%s' expands to '%s'\n",
-               self.name, self.definition)
-        if util.safehasattr(self, 'shell'):
+        ui.log(
+            b'commandalias',
+            b"alias '%s' expands to '%s'\n",
+            self.name,
+            self.definition,
+        )
+        if util.safehasattr(self, b'shell'):
             return self.fn(ui, *args, **opts)
         else:
             try:
                 return util.checksignature(self.fn)(ui, *args, **opts)
             except error.SignatureError:
-                args = ' '.join([self.cmdname] + self.args)
-                ui.debug("alias '%s' expands to '%s'\n" % (self.name, args))
+                args = b' '.join([self.cmdname] + self.args)
+                ui.debug(b"alias '%s' expands to '%s'\n" % (self.name, args))
                 raise
 
+
 class lazyaliasentry(object):
     """like a typical command entry (func, opts, help), but is lazy"""
 
@@ -635,8 +715,9 @@
 
     @util.propertycache
     def _aliasdef(self):
-        return cmdalias(self.ui, self.name, self.definition, self.cmdtable,
-                        self.source)
+        return cmdalias(
+            self.ui, self.name, self.definition, self.cmdtable, self.source
+        )
 
     def __getitem__(self, n):
         aliasdef = self._aliasdef
@@ -656,11 +737,12 @@
     def __len__(self):
         return 3
 
+
 def addaliases(ui, cmdtable):
     # aliases are processed after extensions have been loaded, so they
     # may use extension commands. Aliases can also use other alias definitions,
     # but only if they have been defined prior to the current definition.
-    for alias, definition in ui.configitems('alias', ignoresub=True):
+    for alias, definition in ui.configitems(b'alias', ignoresub=True):
         try:
             if cmdtable[alias].definition == definition:
                 continue
@@ -668,10 +750,11 @@
             # definition might not exist or it might not be a cmdalias
             pass
 
-        source = ui.configsource('alias', alias)
+        source = ui.configsource(b'alias', alias)
         entry = lazyaliasentry(ui, alias, definition, cmdtable, source)
         cmdtable[alias] = entry
 
+
 def _parse(ui, args):
     options = {}
     cmdoptions = {}
@@ -683,14 +766,17 @@
 
     if args:
         cmd, args = args[0], args[1:]
-        aliases, entry = cmdutil.findcmd(cmd, commands.table,
-                                         ui.configbool("ui", "strict"))
+        aliases, entry = cmdutil.findcmd(
+            cmd, commands.table, ui.configbool(b"ui", b"strict")
+        )
         cmd = aliases[0]
         args = aliasargs(entry[0], args)
-        defaults = ui.config("defaults", cmd)
+        defaults = ui.config(b"defaults", cmd)
         if defaults:
-            args = pycompat.maplist(
-                util.expandpath, pycompat.shlexsplit(defaults)) + args
+            args = (
+                pycompat.maplist(util.expandpath, pycompat.shlexsplit(defaults))
+                + args
+            )
         c = list(entry[1])
     else:
         cmd = None
@@ -713,57 +799,93 @@
 
     return (cmd, cmd and entry[0] or None, args, options, cmdoptions)
 
+
 def _parseconfig(ui, config):
     """parse the --config options from the command line"""
     configs = []
 
     for cfg in config:
         try:
-            name, value = [cfgelem.strip()
-                           for cfgelem in cfg.split('=', 1)]
-            section, name = name.split('.', 1)
+            name, value = [cfgelem.strip() for cfgelem in cfg.split(b'=', 1)]
+            section, name = name.split(b'.', 1)
             if not section or not name:
                 raise IndexError
-            ui.setconfig(section, name, value, '--config')
+            ui.setconfig(section, name, value, b'--config')
             configs.append((section, name, value))
         except (IndexError, ValueError):
-            raise error.Abort(_('malformed --config option: %r '
-                                '(use --config section.name=value)')
-                              % pycompat.bytestr(cfg))
+            raise error.Abort(
+                _(
+                    b'malformed --config option: %r '
+                    b'(use --config section.name=value)'
+                )
+                % pycompat.bytestr(cfg)
+            )
 
     return configs
 
+
 def _earlyparseopts(ui, args):
     options = {}
-    fancyopts.fancyopts(args, commands.globalopts, options,
-                        gnu=not ui.plain('strictflags'), early=True,
-                        optaliases={'repository': ['repo']})
+    fancyopts.fancyopts(
+        args,
+        commands.globalopts,
+        options,
+        gnu=not ui.plain(b'strictflags'),
+        early=True,
+        optaliases={b'repository': [b'repo']},
+    )
     return options
 
+
 def _earlysplitopts(args):
     """Split args into a list of possible early options and remainder args"""
-    shortoptions = 'R:'
+    shortoptions = b'R:'
     # TODO: perhaps 'debugger' should be included
-    longoptions = ['cwd=', 'repository=', 'repo=', 'config=']
-    return fancyopts.earlygetopt(args, shortoptions, longoptions,
-                                 gnu=True, keepsep=True)
+    longoptions = [b'cwd=', b'repository=', b'repo=', b'config=']
+    return fancyopts.earlygetopt(
+        args, shortoptions, longoptions, gnu=True, keepsep=True
+    )
+
 
 def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
     # run pre-hook, and abort if it fails
-    hook.hook(lui, repo, "pre-%s" % cmd, True, args=" ".join(fullargs),
-              pats=cmdpats, opts=cmdoptions)
+    hook.hook(
+        lui,
+        repo,
+        b"pre-%s" % cmd,
+        True,
+        args=b" ".join(fullargs),
+        pats=cmdpats,
+        opts=cmdoptions,
+    )
     try:
         ret = _runcommand(ui, options, cmd, d)
         # run post-hook, passing command result
-        hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
-                  result=ret, pats=cmdpats, opts=cmdoptions)
+        hook.hook(
+            lui,
+            repo,
+            b"post-%s" % cmd,
+            False,
+            args=b" ".join(fullargs),
+            result=ret,
+            pats=cmdpats,
+            opts=cmdoptions,
+        )
     except Exception:
         # run failure hook and re-raise
-        hook.hook(lui, repo, "fail-%s" % cmd, False, args=" ".join(fullargs),
-                  pats=cmdpats, opts=cmdoptions)
+        hook.hook(
+            lui,
+            repo,
+            b"fail-%s" % cmd,
+            False,
+            args=b" ".join(fullargs),
+            pats=cmdpats,
+            opts=cmdoptions,
+        )
         raise
     return ret
 
+
 def _getlocal(ui, rpath, wd=None):
     """Return (path, local ui object) for the given target path.
 
@@ -773,22 +895,25 @@
         try:
             wd = encoding.getcwd()
         except OSError as e:
-            raise error.Abort(_("error getting current working directory: %s") %
-                              encoding.strtolocal(e.strerror))
-    path = cmdutil.findrepo(wd) or ""
+            raise error.Abort(
+                _(b"error getting current working directory: %s")
+                % encoding.strtolocal(e.strerror)
+            )
+    path = cmdutil.findrepo(wd) or b""
     if not path:
         lui = ui
     else:
         lui = ui.copy()
-        lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
+        lui.readconfig(os.path.join(path, b".hg", b"hgrc"), path)
 
     if rpath:
         path = lui.expandpath(rpath)
         lui = ui.copy()
-        lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
+        lui.readconfig(os.path.join(path, b".hg", b"hgrc"), path)
 
     return path, lui
 
+
 def _checkshellalias(lui, ui, args):
     """Return the function to run the shell alias, if it is required"""
     options = {}
@@ -805,7 +930,7 @@
 
     cmd = args[0]
     try:
-        strict = ui.configbool("ui", "strict")
+        strict = ui.configbool(b"ui", b"strict")
         aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict)
     except (error.AmbiguousCommand, error.UnknownCommand):
         return
@@ -813,23 +938,25 @@
     cmd = aliases[0]
     fn = entry[0]
 
-    if cmd and util.safehasattr(fn, 'shell'):
+    if cmd and util.safehasattr(fn, b'shell'):
         # shell alias shouldn't receive early options which are consumed by hg
         _earlyopts, args = _earlysplitopts(args)
         d = lambda: fn(ui, *args[1:])
-        return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d,
-                                  [], {})
+        return lambda: runcommand(
+            lui, None, cmd, args[:1], ui, options, d, [], {}
+        )
+
 
 def _dispatch(req):
     args = req.args
     ui = req.ui
 
     # check for cwd
-    cwd = req.earlyoptions['cwd']
+    cwd = req.earlyoptions[b'cwd']
     if cwd:
         os.chdir(cwd)
 
-    rpath = req.earlyoptions['repository']
+    rpath = req.earlyoptions[b'repository']
     path, lui = _getlocal(ui, rpath)
 
     uis = {ui, lui}
@@ -837,18 +964,21 @@
     if req.repo:
         uis.add(req.repo.ui)
 
-    if (req.earlyoptions['verbose'] or req.earlyoptions['debug']
-            or req.earlyoptions['quiet']):
-        for opt in ('verbose', 'debug', 'quiet'):
+    if (
+        req.earlyoptions[b'verbose']
+        or req.earlyoptions[b'debug']
+        or req.earlyoptions[b'quiet']
+    ):
+        for opt in (b'verbose', b'debug', b'quiet'):
             val = pycompat.bytestr(bool(req.earlyoptions[opt]))
             for ui_ in uis:
-                ui_.setconfig('ui', opt, val, '--' + opt)
+                ui_.setconfig(b'ui', opt, val, b'--' + opt)
 
-    if req.earlyoptions['profile']:
+    if req.earlyoptions[b'profile']:
         for ui_ in uis:
-            ui_.setconfig('profiling', 'enabled', 'true', '--profile')
+            ui_.setconfig(b'profiling', b'enabled', b'true', b'--profile')
 
-    profile = lui.configbool('profiling', 'enabled')
+    profile = lui.configbool(b'profiling', b'enabled')
     with profiling.profile(lui, enabled=profile) as profiler:
         # Configure extensions in phases: uisetup, extsetup, cmdtable, and
         # reposetup
@@ -872,7 +1002,7 @@
             return shellaliasfn()
 
         # check for fallback encoding
-        fallback = lui.config('ui', 'fallbackencoding')
+        fallback = lui.config(b'ui', b'fallbackencoding')
         if fallback:
             encoding.fallbackencoding = fallback
 
@@ -882,72 +1012,86 @@
         # store the canonical command name in request object for later access
         req.canonical_command = cmd
 
-        if options["config"] != req.earlyoptions["config"]:
-            raise error.Abort(_("option --config may not be abbreviated!"))
-        if options["cwd"] != req.earlyoptions["cwd"]:
-            raise error.Abort(_("option --cwd may not be abbreviated!"))
-        if options["repository"] != req.earlyoptions["repository"]:
-            raise error.Abort(_(
-                "option -R has to be separated from other options (e.g. not "
-                "-qR) and --repository may only be abbreviated as --repo!"))
-        if options["debugger"] != req.earlyoptions["debugger"]:
-            raise error.Abort(_("option --debugger may not be abbreviated!"))
+        if options[b"config"] != req.earlyoptions[b"config"]:
+            raise error.Abort(_(b"option --config may not be abbreviated!"))
+        if options[b"cwd"] != req.earlyoptions[b"cwd"]:
+            raise error.Abort(_(b"option --cwd may not be abbreviated!"))
+        if options[b"repository"] != req.earlyoptions[b"repository"]:
+            raise error.Abort(
+                _(
+                    b"option -R has to be separated from other options (e.g. not "
+                    b"-qR) and --repository may only be abbreviated as --repo!"
+                )
+            )
+        if options[b"debugger"] != req.earlyoptions[b"debugger"]:
+            raise error.Abort(_(b"option --debugger may not be abbreviated!"))
         # don't validate --profile/--traceback, which can be enabled from now
 
-        if options["encoding"]:
-            encoding.encoding = options["encoding"]
-        if options["encodingmode"]:
-            encoding.encodingmode = options["encodingmode"]
-        if options["time"]:
+        if options[b"encoding"]:
+            encoding.encoding = options[b"encoding"]
+        if options[b"encodingmode"]:
+            encoding.encodingmode = options[b"encodingmode"]
+        if options[b"time"]:
+
             def get_times():
                 t = os.times()
                 if t[4] == 0.0:
                     # Windows leaves this as zero, so use time.clock()
                     t = (t[0], t[1], t[2], t[3], time.clock())
                 return t
+
             s = get_times()
+
             def print_time():
                 t = get_times()
                 ui.warn(
-                    _("time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
-                    (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
+                    _(b"time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n")
+                    % (
+                        t[4] - s[4],
+                        t[0] - s[0],
+                        t[2] - s[2],
+                        t[1] - s[1],
+                        t[3] - s[3],
+                    )
+                )
+
             ui.atexit(print_time)
-        if options["profile"]:
+        if options[b"profile"]:
             profiler.start()
 
         # if abbreviated version of this were used, take them in account, now
-        if options['verbose'] or options['debug'] or options['quiet']:
-            for opt in ('verbose', 'debug', 'quiet'):
+        if options[b'verbose'] or options[b'debug'] or options[b'quiet']:
+            for opt in (b'verbose', b'debug', b'quiet'):
                 if options[opt] == req.earlyoptions[opt]:
                     continue
                 val = pycompat.bytestr(bool(options[opt]))
                 for ui_ in uis:
-                    ui_.setconfig('ui', opt, val, '--' + opt)
+                    ui_.setconfig(b'ui', opt, val, b'--' + opt)
 
-        if options['traceback']:
+        if options[b'traceback']:
             for ui_ in uis:
-                ui_.setconfig('ui', 'traceback', 'on', '--traceback')
+                ui_.setconfig(b'ui', b'traceback', b'on', b'--traceback')
 
-        if options['noninteractive']:
+        if options[b'noninteractive']:
             for ui_ in uis:
-                ui_.setconfig('ui', 'interactive', 'off', '-y')
+                ui_.setconfig(b'ui', b'interactive', b'off', b'-y')
 
-        if cmdoptions.get('insecure', False):
+        if cmdoptions.get(b'insecure', False):
             for ui_ in uis:
                 ui_.insecureconnections = True
 
         # setup color handling before pager, because setting up pager
         # might cause incorrect console information
-        coloropt = options['color']
+        coloropt = options[b'color']
         for ui_ in uis:
             if coloropt:
-                ui_.setconfig('ui', 'color', coloropt, '--color')
+                ui_.setconfig(b'ui', b'color', coloropt, b'--color')
             color.setup(ui_)
 
-        if stringutil.parsebool(options['pager']):
+        if stringutil.parsebool(options[b'pager']):
             # ui.pager() expects 'internal-always-' prefix in this case
-            ui.pager('internal-always-' + cmd)
-        elif options['pager'] != 'auto':
+            ui.pager(b'internal-always-' + cmd)
+        elif options[b'pager'] != b'auto':
             for ui_ in uis:
                 ui_.disablepager()
 
@@ -955,12 +1099,12 @@
         for ui_ in uis:
             extensions.populateui(ui_)
 
-        if options['version']:
+        if options[b'version']:
             return commands.version_(ui)
-        if options['help']:
+        if options[b'help']:
             return commands.help_(ui, cmd, command=cmd is not None)
         elif not cmd:
-            return commands.help_(ui, 'shortlist')
+            return commands.help_(ui, b'shortlist')
 
         repo = None
         cmdpats = args[:]
@@ -977,18 +1121,23 @@
                 repo.ui.fmsg = ui.fmsg
             else:
                 try:
-                    repo = hg.repository(ui, path=path,
-                                         presetupfuncs=req.prereposetups,
-                                         intents=func.intents)
+                    repo = hg.repository(
+                        ui,
+                        path=path,
+                        presetupfuncs=req.prereposetups,
+                        intents=func.intents,
+                    )
                     if not repo.local():
-                        raise error.Abort(_("repository '%s' is not local")
-                                          % path)
-                    repo.ui.setconfig("bundle", "mainreporoot", repo.root,
-                                      'repo')
+                        raise error.Abort(
+                            _(b"repository '%s' is not local") % path
+                        )
+                    repo.ui.setconfig(
+                        b"bundle", b"mainreporoot", repo.root, b'repo'
+                    )
                 except error.RequirementError:
                     raise
                 except error.RepoError:
-                    if rpath: # invalid -R path
+                    if rpath:  # invalid -R path
                         raise
                     if not func.optionalrepo:
                         if func.inferrepo and args and not path:
@@ -996,40 +1145,47 @@
                             repos = pycompat.maplist(cmdutil.findrepo, args)
                             guess = repos[0]
                             if guess and repos.count(guess) == len(repos):
-                                req.args = ['--repository', guess] + fullargs
-                                req.earlyoptions['repository'] = guess
+                                req.args = [b'--repository', guess] + fullargs
+                                req.earlyoptions[b'repository'] = guess
                                 return _dispatch(req)
                         if not path:
-                            raise error.RepoError(_("no repository found in"
-                                                    " '%s' (.hg not found)")
-                                                  % encoding.getcwd())
+                            raise error.RepoError(
+                                _(
+                                    b"no repository found in"
+                                    b" '%s' (.hg not found)"
+                                )
+                                % encoding.getcwd()
+                            )
                         raise
             if repo:
                 ui = repo.ui
-                if options['hidden']:
+                if options[b'hidden']:
                     repo = repo.unfiltered()
             args.insert(0, repo)
         elif rpath:
-            ui.warn(_("warning: --repository ignored\n"))
+            ui.warn(_(b"warning: --repository ignored\n"))
 
         msg = _formatargs(fullargs)
-        ui.log("command", '%s\n', msg)
+        ui.log(b"command", b'%s\n', msg)
         strcmdopt = pycompat.strkwargs(cmdoptions)
         d = lambda: util.checksignature(func)(ui, *args, **strcmdopt)
         try:
-            return runcommand(lui, repo, cmd, fullargs, ui, options, d,
-                              cmdpats, cmdoptions)
+            return runcommand(
+                lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions
+            )
         finally:
             if repo and repo != req.repo:
                 repo.close()
 
+
 def _runcommand(ui, options, cmd, cmdfunc):
     """Run a command function, possibly with profiling enabled."""
     try:
         with tracing.log("Running %s command" % cmd):
             return cmdfunc()
     except error.SignatureError:
-        raise error.CommandError(cmd, _('invalid arguments'))
+        raise error.CommandError(cmd, _(b'invalid arguments'))
+
 
 def _exceptionwarning(ui):
     """Produce a warning message for the current active exception"""
@@ -1042,16 +1198,18 @@
     # of date) will be clueful enough to notice the implausible
     # version number and try updating.
     ct = util.versiontuple(n=2)
-    worst = None, ct, ''
-    if ui.config('ui', 'supportcontact') is None:
+    worst = None, ct, b''
+    if ui.config(b'ui', b'supportcontact') is None:
         for name, mod in extensions.extensions():
             # 'testedwith' should be bytes, but not all extensions are ported
             # to py3 and we don't want UnicodeException because of that.
-            testedwith = stringutil.forcebytestr(getattr(mod, 'testedwith', ''))
-            report = getattr(mod, 'buglink', _('the extension author.'))
+            testedwith = stringutil.forcebytestr(
+                getattr(mod, 'testedwith', b'')
+            )
+            report = getattr(mod, 'buglink', _(b'the extension author.'))
             if not testedwith.strip():
                 # We found an untested extension. It's likely the culprit.
-                worst = name, 'unknown', report
+                worst = name, b'unknown', report
                 break
 
             # Never blame on extensions bundled with Mercurial.
@@ -1069,28 +1227,40 @@
     if worst[0] is not None:
         name, testedwith, report = worst
         if not isinstance(testedwith, (bytes, str)):
-            testedwith = '.'.join([stringutil.forcebytestr(c)
-                                   for c in testedwith])
-        warning = (_('** Unknown exception encountered with '
-                     'possibly-broken third-party extension %s\n'
-                     '** which supports versions %s of Mercurial.\n'
-                     '** Please disable %s and try your action again.\n'
-                     '** If that fixes the bug please report it to %s\n')
-                   % (name, testedwith, name, stringutil.forcebytestr(report)))
+            testedwith = b'.'.join(
+                [stringutil.forcebytestr(c) for c in testedwith]
+            )
+        warning = _(
+            b'** Unknown exception encountered with '
+            b'possibly-broken third-party extension %s\n'
+            b'** which supports versions %s of Mercurial.\n'
+            b'** Please disable %s and try your action again.\n'
+            b'** If that fixes the bug please report it to %s\n'
+        ) % (name, testedwith, name, stringutil.forcebytestr(report))
     else:
-        bugtracker = ui.config('ui', 'supportcontact')
+        bugtracker = ui.config(b'ui', b'supportcontact')
         if bugtracker is None:
-            bugtracker = _("https://mercurial-scm.org/wiki/BugTracker")
-        warning = (_("** unknown exception encountered, "
-                     "please report by visiting\n** ") + bugtracker + '\n')
-    sysversion = pycompat.sysbytes(sys.version).replace('\n', '')
-    warning += ((_("** Python %s\n") % sysversion) +
-                (_("** Mercurial Distributed SCM (version %s)\n") %
-                 util.version()) +
-                (_("** Extensions loaded: %s\n") %
-                 ", ".join([x[0] for x in extensions.extensions()])))
+            bugtracker = _(b"https://mercurial-scm.org/wiki/BugTracker")
+        warning = (
+            _(
+                b"** unknown exception encountered, "
+                b"please report by visiting\n** "
+            )
+            + bugtracker
+            + b'\n'
+        )
+    sysversion = pycompat.sysbytes(sys.version).replace(b'\n', b'')
+    warning += (
+        (_(b"** Python %s\n") % sysversion)
+        + (_(b"** Mercurial Distributed SCM (version %s)\n") % util.version())
+        + (
+            _(b"** Extensions loaded: %s\n")
+            % b", ".join([x[0] for x in extensions.extensions()])
+        )
+    )
     return warning
 
+
 def handlecommandexception(ui):
     """Produce a warning message for broken commands
 
@@ -1098,7 +1268,11 @@
     this function returns False, ignored otherwise.
     """
     warning = _exceptionwarning(ui)
-    ui.log("commandexception", "%s\n%s\n", warning,
-           pycompat.sysbytes(traceback.format_exc()))
+    ui.log(
+        b"commandexception",
+        b"%s\n%s\n",
+        warning,
+        pycompat.sysbytes(traceback.format_exc()),
+    )
     ui.warn(warning)
     return False  # re-raise the exception
--- a/mercurial/encoding.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/encoding.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,15 +11,14 @@
 import os
 import unicodedata
 
+from .pycompat import getattr
 from . import (
     error,
     policy,
     pycompat,
 )
 
-from .pure import (
-    charencode as charencodepure,
-)
+from .pure import charencode as charencodepure
 
 charencode = policy.importmod(r'charencode')
 
@@ -36,11 +35,14 @@
 # These unicode characters are ignored by HFS+ (Apple Technote 1150,
 # "Unicode Subtleties"), so we need to ignore them in some places for
 # sanity.
-_ignore = [unichr(int(x, 16)).encode("utf-8") for x in
-           "200c 200d 200e 200f 202a 202b 202c 202d 202e "
-           "206a 206b 206c 206d 206e 206f feff".split()]
+_ignore = [
+    unichr(int(x, 16)).encode("utf-8")
+    for x in b"200c 200d 200e 200f 202a 202b 202c 202d 202e "
+    b"206a 206b 206c 206d 206e 206f feff".split()
+]
 # verify the next function will work
-assert all(i.startswith(("\xe2", "\xef")) for i in _ignore)
+assert all(i.startswith((b"\xe2", b"\xef")) for i in _ignore)
+
 
 def hfsignoreclean(s):
     """Remove codepoints ignored by HFS+ from s.
@@ -50,14 +52,15 @@
     >>> hfsignoreclean(u'.h\ufeffg'.encode('utf-8'))
     '.hg'
     """
-    if "\xe2" in s or "\xef" in s:
+    if b"\xe2" in s or b"\xef" in s:
         for c in _ignore:
-            s = s.replace(c, '')
+            s = s.replace(c, b'')
     return s
 
+
 # encoding.environ is provided read-only, which may not be used to modify
 # the process environment
-_nativeenviron = (not pycompat.ispy3 or os.supports_bytes_environ)
+_nativeenviron = not pycompat.ispy3 or os.supports_bytes_environ
 if not pycompat.ispy3:
     environ = os.environ  # re-exports
 elif _nativeenviron:
@@ -65,38 +68,44 @@
 else:
     # preferred encoding isn't known yet; use utf-8 to avoid unicode error
     # and recreate it once encoding is settled
-    environ = dict((k.encode(r'utf-8'), v.encode(r'utf-8'))
-                   for k, v in os.environ.items())  # re-exports
+    environ = dict(
+        (k.encode(r'utf-8'), v.encode(r'utf-8'))
+        for k, v in os.environ.items()  # re-exports
+    )
 
 _encodingrewrites = {
-    '646': 'ascii',
-    'ANSI_X3.4-1968': 'ascii',
+    b'646': b'ascii',
+    b'ANSI_X3.4-1968': b'ascii',
 }
 # cp65001 is a Windows variant of utf-8, which isn't supported on Python 2.
 # No idea if it should be rewritten to the canonical name 'utf-8' on Python 3.
 # https://bugs.python.org/issue13216
 if pycompat.iswindows and not pycompat.ispy3:
-    _encodingrewrites['cp65001'] = 'utf-8'
+    _encodingrewrites[b'cp65001'] = b'utf-8'
 
 try:
-    encoding = environ.get("HGENCODING")
+    encoding = environ.get(b"HGENCODING")
     if not encoding:
-        encoding = locale.getpreferredencoding().encode('ascii') or 'ascii'
+        encoding = locale.getpreferredencoding().encode('ascii') or b'ascii'
         encoding = _encodingrewrites.get(encoding, encoding)
 except locale.Error:
-    encoding = 'ascii'
-encodingmode = environ.get("HGENCODINGMODE", "strict")
-fallbackencoding = 'ISO-8859-1'
+    encoding = b'ascii'
+encodingmode = environ.get(b"HGENCODINGMODE", b"strict")
+fallbackencoding = b'ISO-8859-1'
+
 
 class localstr(bytes):
     '''This class allows strings that are unmodified to be
     round-tripped to the local encoding and back'''
+
     def __new__(cls, u, l):
         s = bytes.__new__(cls, l)
         s._utf8 = u
         return s
+
     def __hash__(self):
-        return hash(self._utf8) # avoid collisions in local string space
+        return hash(self._utf8)  # avoid collisions in local string space
+
 
 class safelocalstr(bytes):
     """Tagged string denoting it was previously an internal UTF-8 string,
@@ -108,6 +117,7 @@
     >>> assert safelocalstr(b'\\xc3') in {b'\\xc3': 0}
     """
 
+
 def tolocal(s):
     """
     Convert a string from internal UTF-8 to local encoding
@@ -149,7 +159,7 @@
         try:
             # make sure string is actually stored in UTF-8
             u = s.decode('UTF-8')
-            if encoding == 'UTF-8':
+            if encoding == b'UTF-8':
                 # fast path
                 return s
             r = u.encode(_sysstr(encoding), r"replace")
@@ -167,11 +177,12 @@
                     return safelocalstr(r)
                 return localstr(u.encode('UTF-8'), r)
             except UnicodeDecodeError:
-                u = s.decode("utf-8", "replace") # last ditch
+                u = s.decode("utf-8", "replace")  # last ditch
                 # can't round-trip
                 return u.encode(_sysstr(encoding), r"replace")
     except LookupError as k:
-        raise error.Abort(k, hint="please check your locale settings")
+        raise error.Abort(k, hint=b"please check your locale settings")
+
 
 def fromlocal(s):
     """
@@ -194,27 +205,34 @@
         u = s.decode(_sysstr(encoding), _sysstr(encodingmode))
         return u.encode("utf-8")
     except UnicodeDecodeError as inst:
-        sub = s[max(0, inst.start - 10):inst.start + 10]
-        raise error.Abort("decoding near '%s': %s!"
-                          % (sub, pycompat.bytestr(inst)))
+        sub = s[max(0, inst.start - 10) : inst.start + 10]
+        raise error.Abort(
+            b"decoding near '%s': %s!" % (sub, pycompat.bytestr(inst))
+        )
     except LookupError as k:
-        raise error.Abort(k, hint="please check your locale settings")
+        raise error.Abort(k, hint=b"please check your locale settings")
+
 
 def unitolocal(u):
     """Convert a unicode string to a byte string of local encoding"""
     return tolocal(u.encode('utf-8'))
 
+
 def unifromlocal(s):
     """Convert a byte string of local encoding to a unicode string"""
     return fromlocal(s).decode('utf-8')
 
+
 def unimethod(bytesfunc):
     """Create a proxy method that forwards __unicode__() and __str__() of
     Python 3 to __bytes__()"""
+
     def unifunc(obj):
         return unifromlocal(bytesfunc(obj))
+
     return unifunc
 
+
 # converter functions between native str and byte string. use these if the
 # character encoding is not aware (e.g. exception message) or is known to
 # be locale dependent (e.g. date formatting.)
@@ -230,8 +248,10 @@
 if not _nativeenviron:
     # now encoding and helper functions are available, recreate the environ
     # dict to be exported to other modules
-    environ = dict((tolocal(k.encode(r'utf-8')), tolocal(v.encode(r'utf-8')))
-                   for k, v in os.environ.items())  # re-exports
+    environ = dict(
+        (tolocal(k.encode(r'utf-8')), tolocal(v.encode(r'utf-8')))
+        for k, v in os.environ.items()  # re-exports
+    )
 
 if pycompat.ispy3:
     # os.getcwd() on Python 3 returns string, but it has os.getcwdb() which
@@ -246,20 +266,26 @@
     getcwd = os.getcwd  # re-exports
 
 # How to treat ambiguous-width characters. Set to 'wide' to treat as wide.
-_wide = _sysstr(environ.get("HGENCODINGAMBIGUOUS", "narrow") == "wide"
-                and "WFA" or "WF")
+_wide = _sysstr(
+    environ.get(b"HGENCODINGAMBIGUOUS", b"narrow") == b"wide"
+    and b"WFA"
+    or b"WF"
+)
+
 
 def colwidth(s):
-    "Find the column width of a string for display in the local encoding"
+    b"Find the column width of a string for display in the local encoding"
     return ucolwidth(s.decode(_sysstr(encoding), r'replace'))
 
+
 def ucolwidth(d):
-    "Find the column width of a Unicode string for display"
+    b"Find the column width of a Unicode string for display"
     eaw = getattr(unicodedata, 'east_asian_width', None)
     if eaw is not None:
         return sum([eaw(c) in _wide and 2 or 1 for c in d])
     return len(d)
 
+
 def getcols(s, start, c):
     '''Use colwidth to find a c-column substring of s starting at byte
     index start'''
@@ -268,7 +294,8 @@
         if colwidth(t) == c:
             return t
 
-def trim(s, width, ellipsis='', leftside=False):
+
+def trim(s, width, ellipsis=b'', leftside=False):
     """Trim string 's' to at most 'width' columns (including 'ellipsis').
 
     If 'leftside' is True, left side of string 's' is trimmed.
@@ -336,21 +363,21 @@
     try:
         u = s.decode(_sysstr(encoding))
     except UnicodeDecodeError:
-        if len(s) <= width: # trimming is not needed
+        if len(s) <= width:  # trimming is not needed
             return s
         width -= len(ellipsis)
-        if width <= 0: # no enough room even for ellipsis
-            return ellipsis[:width + len(ellipsis)]
+        if width <= 0:  # no enough room even for ellipsis
+            return ellipsis[: width + len(ellipsis)]
         if leftside:
             return ellipsis + s[-width:]
         return s[:width] + ellipsis
 
-    if ucolwidth(u) <= width: # trimming is not needed
+    if ucolwidth(u) <= width:  # trimming is not needed
         return s
 
     width -= len(ellipsis)
-    if width <= 0: # no enough room even for ellipsis
-        return ellipsis[:width + len(ellipsis)]
+    if width <= 0:  # no enough room even for ellipsis
+        return ellipsis[: width + len(ellipsis)]
 
     if leftside:
         uslice = lambda i: u[i:]
@@ -362,10 +389,11 @@
         usub = uslice(i)
         if ucolwidth(usub) <= width:
             return concat(usub.encode(_sysstr(encoding)))
-    return ellipsis # no enough room for multi-column characters
+    return ellipsis  # no enough room for multi-column characters
+
 
 def lower(s):
-    "best-effort encoding-aware case-folding of local string s"
+    b"best-effort encoding-aware case-folding of local string s"
     try:
         return asciilower(s)
     except UnicodeDecodeError:
@@ -378,20 +406,22 @@
 
         lu = u.lower()
         if u == lu:
-            return s # preserve localstring
+            return s  # preserve localstring
         return lu.encode(_sysstr(encoding))
     except UnicodeError:
-        return s.lower() # we don't know how to fold this except in ASCII
+        return s.lower()  # we don't know how to fold this except in ASCII
     except LookupError as k:
-        raise error.Abort(k, hint="please check your locale settings")
+        raise error.Abort(k, hint=b"please check your locale settings")
+
 
 def upper(s):
-    "best-effort encoding-aware case-folding of local string s"
+    b"best-effort encoding-aware case-folding of local string s"
     try:
         return asciiupper(s)
     except UnicodeDecodeError:
         return upperfallback(s)
 
+
 def upperfallback(s):
     try:
         if isinstance(s, localstr):
@@ -401,12 +431,13 @@
 
         uu = u.upper()
         if u == uu:
-            return s # preserve localstring
+            return s  # preserve localstring
         return uu.encode(_sysstr(encoding))
     except UnicodeError:
-        return s.upper() # we don't know how to fold this except in ASCII
+        return s.upper()  # we don't know how to fold this except in ASCII
     except LookupError as k:
-        raise error.Abort(k, hint="please check your locale settings")
+        raise error.Abort(k, hint=b"please check your locale settings")
+
 
 class normcasespecs(object):
     '''what a platform's normcase does to ASCII strings
@@ -419,10 +450,12 @@
     other: the fallback function should always be called
 
     This should be kept in sync with normcase_spec in util.h.'''
+
     lower = -1
     upper = 1
     other = 0
 
+
 def jsonescape(s, paranoid=False):
     '''returns a string suitable for JSON
 
@@ -475,6 +508,7 @@
         pass
     return charencodepure.jsonescapeu8fallback(u8chars, paranoid)
 
+
 # We need to decode/encode U+DCxx codes transparently since invalid UTF-8
 # bytes are mapped to that range.
 if pycompat.ispy3:
@@ -484,6 +518,7 @@
 
 _utf8len = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4]
 
+
 def getutf8char(s, pos):
     '''get the next full utf-8 character in the given string, starting at pos
 
@@ -492,15 +527,16 @@
     '''
 
     # find how many bytes to attempt decoding from first nibble
-    l = _utf8len[ord(s[pos:pos + 1]) >> 4]
-    if not l: # ascii
-        return s[pos:pos + 1]
+    l = _utf8len[ord(s[pos : pos + 1]) >> 4]
+    if not l:  # ascii
+        return s[pos : pos + 1]
 
-    c = s[pos:pos + l]
+    c = s[pos : pos + l]
     # validate with attempted decode
     c.decode("utf-8", _utf8strict)
     return c
 
+
 def toutf8b(s):
     '''convert a local, possibly-binary string into UTF-8b
 
@@ -542,7 +578,7 @@
         return fromlocal(s)
     elif isasciistr(s):
         return s
-    if "\xed" not in s:
+    if b"\xed" not in s:
         try:
             s.decode('utf-8', _utf8strict)
             return s
@@ -550,24 +586,25 @@
             pass
 
     s = pycompat.bytestr(s)
-    r = ""
+    r = b""
     pos = 0
     l = len(s)
     while pos < l:
         try:
             c = getutf8char(s, pos)
-            if "\xed\xb0\x80" <= c <= "\xed\xb3\xbf":
+            if b"\xed\xb0\x80" <= c <= b"\xed\xb3\xbf":
                 # have to re-escape existing U+DCxx characters
-                c = unichr(0xdc00 + ord(s[pos])).encode('utf-8', _utf8strict)
+                c = unichr(0xDC00 + ord(s[pos])).encode('utf-8', _utf8strict)
                 pos += 1
             else:
                 pos += len(c)
         except UnicodeDecodeError:
-            c = unichr(0xdc00 + ord(s[pos])).encode('utf-8', _utf8strict)
+            c = unichr(0xDC00 + ord(s[pos])).encode('utf-8', _utf8strict)
             pos += 1
         r += c
     return r
 
+
 def fromutf8b(s):
     '''Given a UTF-8b string, return a local, possibly-binary string.
 
@@ -594,7 +631,7 @@
     if isasciistr(s):
         return s
     # fast path - look for uDxxx prefixes in s
-    if "\xed" not in s:
+    if b"\xed" not in s:
         return s
 
     # We could do this with the unicode type but some Python builds
@@ -603,14 +640,14 @@
     # helper again to walk the string without "decoding" it.
 
     s = pycompat.bytestr(s)
-    r = ""
+    r = b""
     pos = 0
     l = len(s)
     while pos < l:
         c = getutf8char(s, pos)
         pos += len(c)
         # unescape U+DCxx characters
-        if "\xed\xb0\x80" <= c <= "\xed\xb3\xbf":
-            c = pycompat.bytechr(ord(c.decode("utf-8", _utf8strict)) & 0xff)
+        if b"\xed\xb0\x80" <= c <= b"\xed\xb3\xbf":
+            c = pycompat.bytechr(ord(c.decode("utf-8", _utf8strict)) & 0xFF)
         r += c
     return r
--- a/mercurial/error.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/error.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,6 +16,7 @@
 # Do not import anything but pycompat here, please
 from . import pycompat
 
+
 def _tobytes(exc):
     """Byte-stringify exception in the same way as BaseException_str()"""
     if not exc.args:
@@ -24,29 +25,43 @@
         return pycompat.bytestr(exc.args[0])
     return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
 
+
 class Hint(object):
     """Mix-in to provide a hint of an error
 
     This should come first in the inheritance list to consume a hint and
     pass remaining arguments to the exception class.
     """
+
     def __init__(self, *args, **kw):
         self.hint = kw.pop(r'hint', None)
         super(Hint, self).__init__(*args, **kw)
 
+
 class StorageError(Hint, Exception):
     """Raised when an error occurs in a storage layer.
 
     Usually subclassed by a storage-specific exception.
     """
+
     __bytes__ = _tobytes
 
+
 class RevlogError(StorageError):
     __bytes__ = _tobytes
 
+
+class SidedataHashError(RevlogError):
+    def __init__(self, key, expected, got):
+        self.sidedatakey = key
+        self.expecteddigest = expected
+        self.actualdigest = got
+
+
 class FilteredIndexError(IndexError):
     __bytes__ = _tobytes
 
+
 class LookupError(RevlogError, KeyError):
     def __init__(self, name, index, message):
         self.name = name
@@ -56,8 +71,9 @@
         self.lookupmessage = message
         if isinstance(name, bytes) and len(name) == 20:
             from .node import short
+
             name = short(name)
-        RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
+        RevlogError.__init__(self, b'%s@%s: %s' % (index, name, message))
 
     def __bytes__(self):
         return RevlogError.__bytes__(self)
@@ -65,93 +81,125 @@
     def __str__(self):
         return RevlogError.__str__(self)
 
+
 class AmbiguousPrefixLookupError(LookupError):
     pass
 
+
 class FilteredLookupError(LookupError):
     pass
 
+
 class ManifestLookupError(LookupError):
     pass
 
+
 class CommandError(Exception):
     """Exception raised on errors in parsing the command line."""
+
     __bytes__ = _tobytes
 
+
 class InterventionRequired(Hint, Exception):
     """Exception raised when a command requires human intervention."""
+
     __bytes__ = _tobytes
 
+
 class Abort(Hint, Exception):
     """Raised if a command needs to print an error and exit."""
+
     __bytes__ = _tobytes
 
+
 class HookLoadError(Abort):
     """raised when loading a hook fails, aborting an operation
 
     Exists to allow more specialized catching."""
 
+
 class HookAbort(Abort):
     """raised when a validation hook fails, aborting an operation
 
     Exists to allow more specialized catching."""
 
+
 class ConfigError(Abort):
     """Exception raised when parsing config files"""
 
+
 class UpdateAbort(Abort):
     """Raised when an update is aborted for destination issue"""
 
+
 class MergeDestAbort(Abort):
     """Raised when an update is aborted for destination issues"""
 
+
 class NoMergeDestAbort(MergeDestAbort):
     """Raised when an update is aborted because there is nothing to merge"""
 
+
 class ManyMergeDestAbort(MergeDestAbort):
     """Raised when an update is aborted because destination is ambiguous"""
 
+
 class ResponseExpected(Abort):
     """Raised when an EOF is received for a prompt"""
+
     def __init__(self):
         from .i18n import _
-        Abort.__init__(self, _('response expected'))
+
+        Abort.__init__(self, _(b'response expected'))
+
 
 class OutOfBandError(Hint, Exception):
     """Exception raised when a remote repo reports failure"""
+
     __bytes__ = _tobytes
 
+
 class ParseError(Hint, Exception):
     """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
+
     __bytes__ = _tobytes
 
+
 class PatchError(Exception):
     __bytes__ = _tobytes
 
+
 class UnknownIdentifier(ParseError):
     """Exception raised when a {rev,file}set references an unknown identifier"""
 
     def __init__(self, function, symbols):
         from .i18n import _
-        ParseError.__init__(self, _("unknown identifier: %s") % function)
+
+        ParseError.__init__(self, _(b"unknown identifier: %s") % function)
         self.function = function
         self.symbols = symbols
 
+
 class RepoError(Hint, Exception):
     __bytes__ = _tobytes
 
+
 class RepoLookupError(RepoError):
     pass
 
+
 class FilteredRepoLookupError(RepoLookupError):
     pass
 
+
 class CapabilityError(RepoError):
     pass
 
+
 class RequirementError(RepoError):
     """Exception raised if .hg/requires has an unknown entry."""
 
+
 class StdioError(IOError):
     """Raised if I/O to stdout or stderr fails"""
 
@@ -160,15 +208,22 @@
 
     # no __bytes__() because error message is derived from the standard IOError
 
+
 class UnsupportedMergeRecords(Abort):
     def __init__(self, recordtypes):
         from .i18n import _
+
         self.recordtypes = sorted(recordtypes)
-        s = ' '.join(self.recordtypes)
+        s = b' '.join(self.recordtypes)
         Abort.__init__(
-            self, _('unsupported merge state records: %s') % s,
-            hint=_('see https://mercurial-scm.org/wiki/MergeStateRecords for '
-                   'more information'))
+            self,
+            _(b'unsupported merge state records: %s') % s,
+            hint=_(
+                b'see https://mercurial-scm.org/wiki/MergeStateRecords for '
+                b'more information'
+            ),
+        )
+
 
 class UnknownVersion(Abort):
     """generic exception for aborting from an encounter with an unknown version
@@ -178,6 +233,7 @@
         self.version = version
         super(UnknownVersion, self).__init__(msg, hint=hint)
 
+
 class LockError(IOError):
     def __init__(self, errno, strerror, filename, desc):
         IOError.__init__(self, errno, strerror, filename)
@@ -185,41 +241,55 @@
 
     # no __bytes__() because error message is derived from the standard IOError
 
+
 class LockHeld(LockError):
     def __init__(self, errno, filename, desc, locker):
-        LockError.__init__(self, errno, 'Lock held', filename, desc)
+        LockError.__init__(self, errno, b'Lock held', filename, desc)
         self.locker = locker
 
+
 class LockUnavailable(LockError):
     pass
 
+
 # LockError is for errors while acquiring the lock -- this is unrelated
 class LockInheritanceContractViolation(RuntimeError):
     __bytes__ = _tobytes
 
+
 class ResponseError(Exception):
     """Raised to print an error with part of output and exit."""
+
     __bytes__ = _tobytes
 
+
 class UnknownCommand(Exception):
     """Exception raised if command is not in the command table."""
+
     __bytes__ = _tobytes
 
+
 class AmbiguousCommand(Exception):
     """Exception raised if command shortcut matches more than one command."""
+
     __bytes__ = _tobytes
 
+
 # derived from KeyboardInterrupt to simplify some breakout code
 class SignalInterrupt(KeyboardInterrupt):
     """Exception raised on SIGTERM and SIGHUP."""
 
+
 class SignatureError(Exception):
     __bytes__ = _tobytes
 
+
 class PushRaced(RuntimeError):
     """An exception raised during unbundling that indicate a push race"""
+
     __bytes__ = _tobytes
 
+
 class ProgrammingError(Hint, RuntimeError):
     """Raised if a mercurial (core or extension) developer made a mistake"""
 
@@ -232,22 +302,27 @@
 
     __bytes__ = _tobytes
 
+
 class WdirUnsupported(Exception):
     """An exception which is raised when 'wdir()' is not supported"""
+
     __bytes__ = _tobytes
 
+
 # bundle2 related errors
 class BundleValueError(ValueError):
     """error raised when bundle2 cannot be processed"""
+
     __bytes__ = _tobytes
 
+
 class BundleUnknownFeatureError(BundleValueError):
     def __init__(self, parttype=None, params=(), values=()):
         self.parttype = parttype
         self.params = params
         self.values = values
         if self.parttype is None:
-            msg = 'Stream Parameter'
+            msg = b'Stream Parameter'
         else:
             msg = parttype
         entries = self.params
@@ -259,20 +334,24 @@
                 if val is None:
                     entries.append(val)
                 else:
-                    entries.append("%s=%r" % (par, pycompat.maybebytestr(val)))
+                    entries.append(b"%s=%r" % (par, pycompat.maybebytestr(val)))
         if entries:
-            msg = '%s - %s' % (msg, ', '.join(entries))
+            msg = b'%s - %s' % (msg, b', '.join(entries))
         ValueError.__init__(self, msg)
 
+
 class ReadOnlyPartError(RuntimeError):
     """error raised when code tries to alter a part being generated"""
+
     __bytes__ = _tobytes
 
+
 class PushkeyFailed(Abort):
     """error raised when a pushkey part failed to update a value"""
 
-    def __init__(self, partid, namespace=None, key=None, new=None, old=None,
-                 ret=None):
+    def __init__(
+        self, partid, namespace=None, key=None, new=None, old=None, ret=None
+    ):
         self.partid = partid
         self.namespace = namespace
         self.key = key
@@ -280,8 +359,10 @@
         self.old = old
         self.ret = ret
         # no i18n expected to be processed into a better message
-        Abort.__init__(self, 'failed to update value for "%s/%s"'
-                       % (namespace, key))
+        Abort.__init__(
+            self, b'failed to update value for "%s/%s"' % (namespace, key)
+        )
+
 
 class CensoredNodeError(StorageError):
     """error raised when content verification fails on a censored node
@@ -291,9 +372,11 @@
 
     def __init__(self, filename, node, tombstone):
         from .node import short
-        StorageError.__init__(self, '%s:%s' % (filename, short(node)))
+
+        StorageError.__init__(self, b'%s:%s' % (filename, short(node)))
         self.tombstone = tombstone
 
+
 class CensoredBaseError(StorageError):
     """error raised when a delta is rejected because its base is censored
 
@@ -302,28 +385,38 @@
     the delta may be applied by clones which have not censored the base.
     """
 
+
 class InvalidBundleSpecification(Exception):
     """error raised when a bundle specification is invalid.
 
     This is used for syntax errors as opposed to support errors.
     """
+
     __bytes__ = _tobytes
 
+
 class UnsupportedBundleSpecification(Exception):
     """error raised when a bundle specification is not supported."""
+
     __bytes__ = _tobytes
 
+
 class CorruptedState(Exception):
     """error raised when a command is not able to read its state from file"""
+
     __bytes__ = _tobytes
 
+
 class PeerTransportError(Abort):
     """Transport-level I/O error when communicating with a peer repo."""
 
+
 class InMemoryMergeConflictsError(Exception):
     """Exception raised when merge conflicts arose during an in-memory merge."""
+
     __bytes__ = _tobytes
 
+
 class WireprotoCommandError(Exception):
     """Represents an error during execution of a wire protocol command.
 
@@ -331,6 +424,7 @@
 
     The error is a formatter string and an optional iterable of arguments.
     """
+
     def __init__(self, message, args=None):
         self.message = message
         self.messageargs = args
--- a/mercurial/exchange.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/exchange.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,14 +12,11 @@
 
 from .i18n import _
 from .node import (
-    bin,
     hex,
     nullid,
     nullrev,
 )
-from .thirdparty import (
-    attr,
-)
+from .thirdparty import attr
 from . import (
     bookmarks as bookmod,
     bundle2,
@@ -34,7 +31,6 @@
     phases,
     pushkey,
     pycompat,
-    repository,
     scmutil,
     sslutil,
     streamclone,
@@ -42,52 +38,56 @@
     util,
     wireprototypes,
 )
-from .utils import (
-    stringutil,
-)
+from .interfaces import repository
+from .utils import stringutil
 
 urlerr = util.urlerr
 urlreq = util.urlreq
 
-_NARROWACL_SECTION = 'narrowacl'
+_NARROWACL_SECTION = b'narrowacl'
 
 # Maps bundle version human names to changegroup versions.
-_bundlespeccgversions = {'v1': '01',
-                         'v2': '02',
-                         'packed1': 's1',
-                         'bundle2': '02', #legacy
-                        }
+_bundlespeccgversions = {
+    b'v1': b'01',
+    b'v2': b'02',
+    b'packed1': b's1',
+    b'bundle2': b'02',  # legacy
+}
 
 # Maps bundle version with content opts to choose which part to bundle
 _bundlespeccontentopts = {
-    'v1': {
-        'changegroup': True,
-        'cg.version': '01',
-        'obsolescence': False,
-        'phases': False,
-        'tagsfnodescache': False,
-        'revbranchcache': False
+    b'v1': {
+        b'changegroup': True,
+        b'cg.version': b'01',
+        b'obsolescence': False,
+        b'phases': False,
+        b'tagsfnodescache': False,
+        b'revbranchcache': False,
     },
-    'v2': {
-        'changegroup': True,
-        'cg.version': '02',
-        'obsolescence': False,
-        'phases': False,
-        'tagsfnodescache': True,
-        'revbranchcache': True
+    b'v2': {
+        b'changegroup': True,
+        b'cg.version': b'02',
+        b'obsolescence': False,
+        b'phases': False,
+        b'tagsfnodescache': True,
+        b'revbranchcache': True,
     },
-    'packed1' : {
-        'cg.version': 's1'
+    b'packed1': {b'cg.version': b's1'},
+}
+_bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
+
+_bundlespecvariants = {
+    b"streamv2": {
+        b"changegroup": False,
+        b"streamv2": True,
+        b"tagsfnodescache": False,
+        b"revbranchcache": False,
     }
 }
-_bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
-
-_bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
-                                    "tagsfnodescache": False,
-                                    "revbranchcache": False}}
 
 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
-_bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
+_bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
+
 
 @attr.s
 class bundlespec(object):
@@ -98,6 +98,12 @@
     params = attr.ib()
     contentopts = attr.ib()
 
+
+def _sortedmarkers(markers):
+    # last item of marker tuple ('parents') may be None or a tuple
+    return sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
+
+
 def parsebundlespec(repo, spec, strict=True):
     """Parse a bundle string specification into parts.
 
@@ -131,44 +137,54 @@
     Note: this function will likely eventually return a more complex data
     structure, including bundle2 part information.
     """
+
     def parseparams(s):
-        if ';' not in s:
+        if b';' not in s:
             return s, {}
 
         params = {}
-        version, paramstr = s.split(';', 1)
-
-        for p in paramstr.split(';'):
-            if '=' not in p:
+        version, paramstr = s.split(b';', 1)
+
+        for p in paramstr.split(b';'):
+            if b'=' not in p:
                 raise error.InvalidBundleSpecification(
-                    _('invalid bundle specification: '
-                      'missing "=" in parameter: %s') % p)
-
-            key, value = p.split('=', 1)
+                    _(
+                        b'invalid bundle specification: '
+                        b'missing "=" in parameter: %s'
+                    )
+                    % p
+                )
+
+            key, value = p.split(b'=', 1)
             key = urlreq.unquote(key)
             value = urlreq.unquote(value)
             params[key] = value
 
         return version, params
 
-
-    if strict and '-' not in spec:
+    if strict and b'-' not in spec:
         raise error.InvalidBundleSpecification(
-                _('invalid bundle specification; '
-                  'must be prefixed with compression: %s') % spec)
-
-    if '-' in spec:
-        compression, version = spec.split('-', 1)
+            _(
+                b'invalid bundle specification; '
+                b'must be prefixed with compression: %s'
+            )
+            % spec
+        )
+
+    if b'-' in spec:
+        compression, version = spec.split(b'-', 1)
 
         if compression not in util.compengines.supportedbundlenames:
             raise error.UnsupportedBundleSpecification(
-                    _('%s compression is not supported') % compression)
+                _(b'%s compression is not supported') % compression
+            )
 
         version, params = parseparams(version)
 
         if version not in _bundlespeccgversions:
             raise error.UnsupportedBundleSpecification(
-                    _('%s is not a recognized bundle version') % version)
+                _(b'%s is not a recognized bundle version') % version
+            )
     else:
         # Value could be just the compression or just the version, in which
         # case some defaults are assumed (but only when not in strict mode).
@@ -178,82 +194,90 @@
 
         if spec in util.compengines.supportedbundlenames:
             compression = spec
-            version = 'v1'
+            version = b'v1'
             # Generaldelta repos require v2.
-            if 'generaldelta' in repo.requirements:
-                version = 'v2'
+            if b'generaldelta' in repo.requirements:
+                version = b'v2'
             # Modern compression engines require v2.
             if compression not in _bundlespecv1compengines:
-                version = 'v2'
+                version = b'v2'
         elif spec in _bundlespeccgversions:
-            if spec == 'packed1':
-                compression = 'none'
+            if spec == b'packed1':
+                compression = b'none'
             else:
-                compression = 'bzip2'
+                compression = b'bzip2'
             version = spec
         else:
             raise error.UnsupportedBundleSpecification(
-                    _('%s is not a recognized bundle specification') % spec)
+                _(b'%s is not a recognized bundle specification') % spec
+            )
 
     # Bundle version 1 only supports a known set of compression engines.
-    if version == 'v1' and compression not in _bundlespecv1compengines:
+    if version == b'v1' and compression not in _bundlespecv1compengines:
         raise error.UnsupportedBundleSpecification(
-            _('compression engine %s is not supported on v1 bundles') %
-            compression)
+            _(b'compression engine %s is not supported on v1 bundles')
+            % compression
+        )
 
     # The specification for packed1 can optionally declare the data formats
     # required to apply it. If we see this metadata, compare against what the
     # repo supports and error if the bundle isn't compatible.
-    if version == 'packed1' and 'requirements' in params:
-        requirements = set(params['requirements'].split(','))
+    if version == b'packed1' and b'requirements' in params:
+        requirements = set(params[b'requirements'].split(b','))
         missingreqs = requirements - repo.supportedformats
         if missingreqs:
             raise error.UnsupportedBundleSpecification(
-                    _('missing support for repository features: %s') %
-                      ', '.join(sorted(missingreqs)))
+                _(b'missing support for repository features: %s')
+                % b', '.join(sorted(missingreqs))
+            )
 
     # Compute contentopts based on the version
     contentopts = _bundlespeccontentopts.get(version, {}).copy()
 
     # Process the variants
-    if "stream" in params and params["stream"] == "v2":
-        variant = _bundlespecvariants["streamv2"]
+    if b"stream" in params and params[b"stream"] == b"v2":
+        variant = _bundlespecvariants[b"streamv2"]
         contentopts.update(variant)
 
     engine = util.compengines.forbundlename(compression)
     compression, wirecompression = engine.bundletype()
     wireversion = _bundlespeccgversions[version]
 
-    return bundlespec(compression, wirecompression, version, wireversion,
-                      params, contentopts)
+    return bundlespec(
+        compression, wirecompression, version, wireversion, params, contentopts
+    )
+
 
 def readbundle(ui, fh, fname, vfs=None):
     header = changegroup.readexactly(fh, 4)
 
     alg = None
     if not fname:
-        fname = "stream"
-        if not header.startswith('HG') and header.startswith('\0'):
+        fname = b"stream"
+        if not header.startswith(b'HG') and header.startswith(b'\0'):
             fh = changegroup.headerlessfixup(fh, header)
-            header = "HG10"
-            alg = 'UN'
+            header = b"HG10"
+            alg = b'UN'
     elif vfs:
         fname = vfs.join(fname)
 
     magic, version = header[0:2], header[2:4]
 
-    if magic != 'HG':
-        raise error.Abort(_('%s: not a Mercurial bundle') % fname)
-    if version == '10':
+    if magic != b'HG':
+        raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
+    if version == b'10':
         if alg is None:
             alg = changegroup.readexactly(fh, 2)
         return changegroup.cg1unpacker(fh, alg)
-    elif version.startswith('2'):
+    elif version.startswith(b'2'):
         return bundle2.getunbundler(ui, fh, magicstring=magic + version)
-    elif version == 'S1':
+    elif version == b'S1':
         return streamclone.streamcloneapplier(fh)
     else:
-        raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
+        raise error.Abort(
+            _(b'%s: unknown bundle version %s') % (fname, version)
+        )
+
 
 def getbundlespec(ui, fh):
     """Infer the bundlespec from a bundle file handle.
@@ -261,6 +285,7 @@
     The input file handle is seeked and the original seek position is not
     restored.
     """
+
     def speccompression(alg):
         try:
             return util.compengines.forbundletype(alg).bundletype()[0]
@@ -270,49 +295,57 @@
     b = readbundle(ui, fh, None)
     if isinstance(b, changegroup.cg1unpacker):
         alg = b._type
-        if alg == '_truncatedBZ':
-            alg = 'BZ'
+        if alg == b'_truncatedBZ':
+            alg = b'BZ'
         comp = speccompression(alg)
         if not comp:
-            raise error.Abort(_('unknown compression algorithm: %s') % alg)
-        return '%s-v1' % comp
+            raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
+        return b'%s-v1' % comp
     elif isinstance(b, bundle2.unbundle20):
-        if 'Compression' in b.params:
-            comp = speccompression(b.params['Compression'])
+        if b'Compression' in b.params:
+            comp = speccompression(b.params[b'Compression'])
             if not comp:
-                raise error.Abort(_('unknown compression algorithm: %s') % comp)
+                raise error.Abort(
+                    _(b'unknown compression algorithm: %s') % comp
+                )
         else:
-            comp = 'none'
+            comp = b'none'
 
         version = None
         for part in b.iterparts():
-            if part.type == 'changegroup':
-                version = part.params['version']
-                if version in ('01', '02'):
-                    version = 'v2'
+            if part.type == b'changegroup':
+                version = part.params[b'version']
+                if version in (b'01', b'02'):
+                    version = b'v2'
                 else:
-                    raise error.Abort(_('changegroup version %s does not have '
-                                        'a known bundlespec') % version,
-                                      hint=_('try upgrading your Mercurial '
-                                              'client'))
-            elif part.type == 'stream2' and version is None:
+                    raise error.Abort(
+                        _(
+                            b'changegroup version %s does not have '
+                            b'a known bundlespec'
+                        )
+                        % version,
+                        hint=_(b'try upgrading your Mercurial client'),
+                    )
+            elif part.type == b'stream2' and version is None:
                 # A stream2 part requires to be part of a v2 bundle
-                requirements = urlreq.unquote(part.params['requirements'])
+                requirements = urlreq.unquote(part.params[b'requirements'])
                 splitted = requirements.split()
                 params = bundle2._formatrequirementsparams(splitted)
-                return 'none-v2;stream=v2;%s' % params
+                return b'none-v2;stream=v2;%s' % params
 
         if not version:
-            raise error.Abort(_('could not identify changegroup version in '
-                                'bundle'))
-
-        return '%s-%s' % (comp, version)
+            raise error.Abort(
+                _(b'could not identify changegroup version in bundle')
+            )
+
+        return b'%s-%s' % (comp, version)
     elif isinstance(b, streamclone.streamcloneapplier):
         requirements = streamclone.readbundle1header(fh)[2]
         formatted = bundle2._formatrequirementsparams(requirements)
-        return 'none-packed1;%s' % formatted
+        return b'none-packed1;%s' % formatted
     else:
-        raise error.Abort(_('unknown bundle type: %s') % b)
+        raise error.Abort(_(b'unknown bundle type: %s') % b)
+
 
 def _computeoutgoing(repo, heads, common):
     """Computes which revs are outgoing given a set of common
@@ -333,34 +366,41 @@
         heads = cl.heads()
     return discovery.outgoing(repo, common, heads)
 
+
 def _checkpublish(pushop):
     repo = pushop.repo
     ui = repo.ui
-    behavior = ui.config('experimental', 'auto-publish')
-    if pushop.publish or behavior not in ('warn', 'confirm', 'abort'):
+    behavior = ui.config(b'experimental', b'auto-publish')
+    if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
         return
-    remotephases = listkeys(pushop.remote, 'phases')
-    if not remotephases.get('publishing', False):
+    remotephases = listkeys(pushop.remote, b'phases')
+    if not remotephases.get(b'publishing', False):
         return
 
     if pushop.revs is None:
-        published = repo.filtered('served').revs('not public()')
+        published = repo.filtered(b'served').revs(b'not public()')
     else:
-        published = repo.revs('::%ln - public()', pushop.revs)
+        published = repo.revs(b'::%ln - public()', pushop.revs)
     if published:
-        if behavior == 'warn':
-            ui.warn(_('%i changesets about to be published\n')
-                    % len(published))
-        elif behavior == 'confirm':
-            if ui.promptchoice(_('push and publish %i changesets (yn)?'
-                                 '$$ &Yes $$ &No') % len(published)):
-                raise error.Abort(_('user quit'))
-        elif behavior == 'abort':
-            msg = _('push would publish %i changesets') % len(published)
-            hint = _("use --publish or adjust 'experimental.auto-publish'"
-                     " config")
+        if behavior == b'warn':
+            ui.warn(
+                _(b'%i changesets about to be published\n') % len(published)
+            )
+        elif behavior == b'confirm':
+            if ui.promptchoice(
+                _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
+                % len(published)
+            ):
+                raise error.Abort(_(b'user quit'))
+        elif behavior == b'abort':
+            msg = _(b'push would publish %i changesets') % len(published)
+            hint = _(
+                b"use --publish or adjust 'experimental.auto-publish'"
+                b" config"
+            )
             raise error.Abort(msg, hint=hint)
 
+
 def _forcebundle1(op):
     """return true if a pull/push must use bundle1
 
@@ -372,9 +412,10 @@
     # should be used.
     #
     # developer config: devel.legacy.exchange
-    exchange = ui.configlist('devel', 'legacy.exchange')
-    forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
-    return forcebundle1 or not op.remote.capable('bundle2')
+    exchange = ui.configlist(b'devel', b'legacy.exchange')
+    forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
+    return forcebundle1 or not op.remote.capable(b'bundle2')
+
 
 class pushoperation(object):
     """A object that represent a single push operation
@@ -385,8 +426,17 @@
     discarded afterward.
     """
 
-    def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
-                 bookmarks=(), publish=False, pushvars=None):
+    def __init__(
+        self,
+        repo,
+        remote,
+        force=False,
+        revs=None,
+        newbranch=False,
+        bookmarks=(),
+        publish=False,
+        pushvars=None,
+    ):
         # repo we push from
         self.repo = repo
         self.ui = repo.ui
@@ -439,7 +489,7 @@
         self.fallbackoutdatedphases = None
         # outgoing obsmarkers
         self.outobsmarkers = set()
-        # outgoing bookmarks
+        # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
         self.outbookmarks = []
         # transaction manager
         self.trmanager = None
@@ -482,9 +532,11 @@
         cheads = [node for node in self.revs if nm[node] in common]
         # and
         # * commonheads parents on missing
-        revset = unfi.set('%ln and parents(roots(%ln))',
-                         self.outgoing.commonheads,
-                         self.outgoing.missing)
+        revset = unfi.set(
+            b'%ln and parents(roots(%ln))',
+            self.outgoing.commonheads,
+            self.outgoing.missing,
+        )
         cheads.extend(c.node() for c in revset)
         return cheads
 
@@ -496,18 +548,34 @@
         else:
             return self.fallbackheads
 
+
 # mapping of message used when pushing bookmark
-bookmsgmap = {'update': (_("updating bookmark %s\n"),
-                         _('updating bookmark %s failed!\n')),
-              'export': (_("exporting bookmark %s\n"),
-                         _('exporting bookmark %s failed!\n')),
-              'delete': (_("deleting remote bookmark %s\n"),
-                         _('deleting remote bookmark %s failed!\n')),
-              }
-
-
-def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
-         publish=False, opargs=None):
+bookmsgmap = {
+    b'update': (
+        _(b"updating bookmark %s\n"),
+        _(b'updating bookmark %s failed!\n'),
+    ),
+    b'export': (
+        _(b"exporting bookmark %s\n"),
+        _(b'exporting bookmark %s failed!\n'),
+    ),
+    b'delete': (
+        _(b"deleting remote bookmark %s\n"),
+        _(b'deleting remote bookmark %s failed!\n'),
+    ),
+}
+
+
+def push(
+    repo,
+    remote,
+    force=False,
+    revs=None,
+    newbranch=False,
+    bookmarks=(),
+    publish=False,
+    opargs=None,
+):
     '''Push outgoing changesets (limited by revs) from a local
     repository to remote. Return an integer:
       - None means nothing to push
@@ -518,44 +586,62 @@
     '''
     if opargs is None:
         opargs = {}
-    pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
-                           publish, **pycompat.strkwargs(opargs))
+    pushop = pushoperation(
+        repo,
+        remote,
+        force,
+        revs,
+        newbranch,
+        bookmarks,
+        publish,
+        **pycompat.strkwargs(opargs)
+    )
     if pushop.remote.local():
-        missing = (set(pushop.repo.requirements)
-                   - pushop.remote.local().supported)
+        missing = (
+            set(pushop.repo.requirements) - pushop.remote.local().supported
+        )
         if missing:
-            msg = _("required features are not"
-                    " supported in the destination:"
-                    " %s") % (', '.join(sorted(missing)))
+            msg = _(
+                b"required features are not"
+                b" supported in the destination:"
+                b" %s"
+            ) % (b', '.join(sorted(missing)))
             raise error.Abort(msg)
 
     if not pushop.remote.canpush():
-        raise error.Abort(_("destination does not support push"))
-
-    if not pushop.remote.capable('unbundle'):
-        raise error.Abort(_('cannot push: destination does not support the '
-                            'unbundle wire protocol command'))
+        raise error.Abort(_(b"destination does not support push"))
+
+    if not pushop.remote.capable(b'unbundle'):
+        raise error.Abort(
+            _(
+                b'cannot push: destination does not support the '
+                b'unbundle wire protocol command'
+            )
+        )
 
     # get lock as we might write phase data
     wlock = lock = None
     try:
         # bundle2 push may receive a reply bundle touching bookmarks
         # requiring the wlock. Take it now to ensure proper ordering.
-        maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
-        if ((not _forcebundle1(pushop)) and
-            maypushback and
-            not bookmod.bookmarksinstore(repo)):
+        maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
+        if (
+            (not _forcebundle1(pushop))
+            and maypushback
+            and not bookmod.bookmarksinstore(repo)
+        ):
             wlock = pushop.repo.wlock()
         lock = pushop.repo.lock()
-        pushop.trmanager = transactionmanager(pushop.repo,
-                                              'push-response',
-                                              pushop.remote.url())
+        pushop.trmanager = transactionmanager(
+            pushop.repo, b'push-response', pushop.remote.url()
+        )
     except error.LockUnavailable as err:
         # source repo cannot be locked.
         # We do not abort the push, but just disable the local phase
         # synchronisation.
-        msg = ('cannot lock source repository: %s\n'
-               % stringutil.forcebytestr(err))
+        msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
+            err
+        )
         pushop.ui.debug(msg)
 
     with wlock or util.nullcontextmanager():
@@ -571,11 +657,12 @@
                 _pushobsolete(pushop)
                 _pushbookmark(pushop)
 
-    if repo.ui.configbool('experimental', 'remotenames'):
+    if repo.ui.configbool(b'experimental', b'remotenames'):
         logexchange.pullremotenames(repo, remote)
 
     return pushop
 
+
 # list of steps to perform discovery before push
 pushdiscoveryorder = []
 
@@ -584,6 +671,7 @@
 # This exists to help extensions wrap steps if necessary
 pushdiscoverymapping = {}
 
+
 def pushdiscovery(stepname):
     """decorator for function performing discovery before push
 
@@ -593,49 +681,65 @@
 
     You can only use this decorator for a new step, if you want to wrap a step
     from an extension, change the pushdiscovery dictionary directly."""
+
     def dec(func):
         assert stepname not in pushdiscoverymapping
         pushdiscoverymapping[stepname] = func
         pushdiscoveryorder.append(stepname)
         return func
+
     return dec
 
+
 def _pushdiscovery(pushop):
     """Run all discovery steps"""
     for stepname in pushdiscoveryorder:
         step = pushdiscoverymapping[stepname]
         step(pushop)
 
-@pushdiscovery('changeset')
+
+@pushdiscovery(b'changeset')
 def _pushdiscoverychangeset(pushop):
     """discover the changeset that need to be pushed"""
     fci = discovery.findcommonincoming
     if pushop.revs:
-        commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
-                        ancestorsof=pushop.revs)
+        commoninc = fci(
+            pushop.repo,
+            pushop.remote,
+            force=pushop.force,
+            ancestorsof=pushop.revs,
+        )
     else:
         commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
     common, inc, remoteheads = commoninc
     fco = discovery.findcommonoutgoing
-    outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
-                   commoninc=commoninc, force=pushop.force)
+    outgoing = fco(
+        pushop.repo,
+        pushop.remote,
+        onlyheads=pushop.revs,
+        commoninc=commoninc,
+        force=pushop.force,
+    )
     pushop.outgoing = outgoing
     pushop.remoteheads = remoteheads
     pushop.incoming = inc
 
-@pushdiscovery('phase')
+
+@pushdiscovery(b'phase')
 def _pushdiscoveryphase(pushop):
     """discover the phase that needs to be pushed
 
     (computed for both success and failure case for changesets push)"""
     outgoing = pushop.outgoing
     unfi = pushop.repo.unfiltered()
-    remotephases = listkeys(pushop.remote, 'phases')
-
-    if (pushop.ui.configbool('ui', '_usedassubrepo')
-        and remotephases    # server supports phases
-        and not pushop.outgoing.missing # no changesets to be pushed
-        and remotephases.get('publishing', False)):
+    remotephases = listkeys(pushop.remote, b'phases')
+
+    if (
+        pushop.ui.configbool(b'ui', b'_usedassubrepo')
+        and remotephases  # server supports phases
+        and not pushop.outgoing.missing  # no changesets to be pushed
+        and remotephases.get(b'publishing', False)
+    ):
         # When:
         # - this is a subrepo push
         # - and remote support phase
@@ -649,22 +753,25 @@
         pushop.fallbackoutdatedphases = []
         return
 
-    pushop.remotephases = phases.remotephasessummary(pushop.repo,
-                                                     pushop.fallbackheads,
-                                                     remotephases)
+    pushop.remotephases = phases.remotephasessummary(
+        pushop.repo, pushop.fallbackheads, remotephases
+    )
     droots = pushop.remotephases.draftroots
 
-    extracond = ''
+    extracond = b''
     if not pushop.remotephases.publishing:
-        extracond = ' and public()'
-    revset = 'heads((%%ln::%%ln) %s)' % extracond
+        extracond = b' and public()'
+    revset = b'heads((%%ln::%%ln) %s)' % extracond
     # Get the list of all revs draft on remote by public here.
     # XXX Beware that revset break if droots is not strictly
     # XXX root we may want to ensure it is but it is costly
     fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
     if not pushop.remotephases.publishing and pushop.publish:
-        future = list(unfi.set('%ln and (not public() or %ln::)',
-                               pushop.futureheads, droots))
+        future = list(
+            unfi.set(
+                b'%ln and (not public() or %ln::)', pushop.futureheads, droots
+            )
+        )
     elif not outgoing.missing:
         future = fallback
     else:
@@ -672,14 +779,16 @@
         #
         # should not be necessary for publishing server, but because of an
         # issue fixed in xxxxx we have to do it anyway.
-        fdroots = list(unfi.set('roots(%ln  + %ln::)',
-                       outgoing.missing, droots))
+        fdroots = list(
+            unfi.set(b'roots(%ln  + %ln::)', outgoing.missing, droots)
+        )
         fdroots = [f.node() for f in fdroots]
         future = list(unfi.set(revset, fdroots, pushop.futureheads))
     pushop.outdatedphases = future
     pushop.fallbackoutdatedphases = fallback
 
-@pushdiscovery('obsmarker')
+
+@pushdiscovery(b'obsmarker')
 def _pushdiscoveryobsmarkers(pushop):
     if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
         return
@@ -687,50 +796,41 @@
     if not pushop.repo.obsstore:
         return
 
-    if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
+    if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
         return
 
     repo = pushop.repo
     # very naive computation, that can be quite expensive on big repo.
     # However: evolution is currently slow on them anyway.
-    nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
+    nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
     pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
 
-@pushdiscovery('bookmarks')
+
+@pushdiscovery(b'bookmarks')
 def _pushdiscoverybookmarks(pushop):
     ui = pushop.ui
     repo = pushop.repo.unfiltered()
     remote = pushop.remote
-    ui.debug("checking for updated bookmarks\n")
+    ui.debug(b"checking for updated bookmarks\n")
     ancestors = ()
     if pushop.revs:
         revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
         ancestors = repo.changelog.ancestors(revnums, inclusive=True)
 
-    remotebookmark = listkeys(remote, 'bookmarks')
-
-    explicit = {repo._bookmarks.expandname(bookmark)
-                for bookmark in pushop.bookmarks}
-
-    remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
+    remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
+
+    explicit = {
+        repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
+    }
+
     comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
-
-    def safehex(x):
-        if x is None:
-            return x
-        return hex(x)
-
-    def hexifycompbookmarks(bookmarks):
-        return [(b, safehex(scid), safehex(dcid))
-                for (b, scid, dcid) in bookmarks]
-
-    comp = [hexifycompbookmarks(marks) for marks in comp]
     return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
 
+
 def _processcompared(pushop, pushed, explicit, remotebms, comp):
-    """take decision on bookmark to pull from the remote bookmark
-
-    Exist to help extensions who want to alter this behavior.
+    """take decision on bookmarks to push to the remote repo
+
+    Exists to help extensions alter this behavior.
     """
     addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
 
@@ -745,7 +845,7 @@
     for b, scid, dcid in addsrc:
         if b in explicit:
             explicit.remove(b)
-            pushop.outbookmarks.append((b, '', scid))
+            pushop.outbookmarks.append((b, b'', scid))
     # search for overwritten bookmark
     for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
         if b in explicit:
@@ -756,7 +856,7 @@
         if b in explicit:
             explicit.remove(b)
             # treat as "deleted locally"
-            pushop.outbookmarks.append((b, dcid, ''))
+            pushop.outbookmarks.append((b, dcid, b''))
     # identical bookmarks shouldn't get reported
     for b, scid, dcid in same:
         if b in explicit:
@@ -765,12 +865,18 @@
     if explicit:
         explicit = sorted(explicit)
         # we should probably list all of them
-        pushop.ui.warn(_('bookmark %s does not exist on the local '
-                         'or remote repository!\n') % explicit[0])
+        pushop.ui.warn(
+            _(
+                b'bookmark %s does not exist on the local '
+                b'or remote repository!\n'
+            )
+            % explicit[0]
+        )
         pushop.bkresult = 2
 
     pushop.outbookmarks.sort()
 
+
 def _pushcheckoutgoing(pushop):
     outgoing = pushop.outgoing
     unfi = pushop.repo.unfiltered()
@@ -784,12 +890,14 @@
         # then, save the iteration
         if unfi.obsstore:
             # this message are here for 80 char limit reason
-            mso = _("push includes obsolete changeset: %s!")
-            mspd = _("push includes phase-divergent changeset: %s!")
-            mscd = _("push includes content-divergent changeset: %s!")
-            mst = {"orphan": _("push includes orphan changeset: %s!"),
-                   "phase-divergent": mspd,
-                   "content-divergent": mscd}
+            mso = _(b"push includes obsolete changeset: %s!")
+            mspd = _(b"push includes phase-divergent changeset: %s!")
+            mscd = _(b"push includes content-divergent changeset: %s!")
+            mst = {
+                b"orphan": _(b"push includes orphan changeset: %s!"),
+                b"phase-divergent": mspd,
+                b"content-divergent": mscd,
+            }
             # If we are to push if there is at least one
             # obsolete or unstable changeset in missing, at
             # least one of the missinghead will be obsolete or
@@ -806,6 +914,7 @@
         discovery.checkheads(pushop)
     return True
 
+
 # List of names of steps to perform for an outgoing bundle2, order matters.
 b2partsgenorder = []
 
@@ -814,6 +923,7 @@
 # This exists to help extensions wrap steps if necessary
 b2partsgenmapping = {}
 
+
 def b2partsgenerator(stepname, idx=None):
     """decorator for function generating bundle2 part
 
@@ -823,6 +933,7 @@
 
     You can only use this decorator for new steps, if you want to wrap a step
     from an extension, attack the b2partsgenmapping dictionary directly."""
+
     def dec(func):
         assert stepname not in b2partsgenmapping
         b2partsgenmapping[stepname] = func
@@ -831,8 +942,10 @@
         else:
             b2partsgenorder.insert(idx, stepname)
         return func
+
     return dec
 
+
 def _pushb2ctxcheckheads(pushop, bundler):
     """Generate race condition checking parts
 
@@ -841,13 +954,15 @@
     # * 'force' do not check for push race,
     # * if we don't push anything, there are nothing to check.
     if not pushop.force and pushop.outgoing.missingheads:
-        allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
+        allowunrelated = b'related' in bundler.capabilities.get(
+            b'checkheads', ()
+        )
         emptyremote = pushop.pushbranchmap is None
         if not allowunrelated or emptyremote:
-            bundler.newpart('check:heads', data=iter(pushop.remoteheads))
+            bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
         else:
             affected = set()
-            for branch, heads in pushop.pushbranchmap.iteritems():
+            for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
                 remoteheads, newheads, unsyncedheads, discardedheads = heads
                 if remoteheads is not None:
                     remote = set(remoteheads)
@@ -855,38 +970,42 @@
                     affected |= remote - set(newheads)
             if affected:
                 data = iter(sorted(affected))
-                bundler.newpart('check:updated-heads', data=data)
+                bundler.newpart(b'check:updated-heads', data=data)
+
 
 def _pushing(pushop):
     """return True if we are pushing anything"""
-    return bool(pushop.outgoing.missing
-                or pushop.outdatedphases
-                or pushop.outobsmarkers
-                or pushop.outbookmarks)
-
-@b2partsgenerator('check-bookmarks')
+    return bool(
+        pushop.outgoing.missing
+        or pushop.outdatedphases
+        or pushop.outobsmarkers
+        or pushop.outbookmarks
+    )
+
+
+@b2partsgenerator(b'check-bookmarks')
 def _pushb2checkbookmarks(pushop, bundler):
     """insert bookmark move checking"""
     if not _pushing(pushop) or pushop.force:
         return
     b2caps = bundle2.bundle2caps(pushop.remote)
-    hasbookmarkcheck = 'bookmarks' in b2caps
+    hasbookmarkcheck = b'bookmarks' in b2caps
     if not (pushop.outbookmarks and hasbookmarkcheck):
         return
     data = []
     for book, old, new in pushop.outbookmarks:
-        old = bin(old)
         data.append((book, old))
     checkdata = bookmod.binaryencode(data)
-    bundler.newpart('check:bookmarks', data=checkdata)
-
-@b2partsgenerator('check-phases')
+    bundler.newpart(b'check:bookmarks', data=checkdata)
+
+
+@b2partsgenerator(b'check-phases')
 def _pushb2checkphases(pushop, bundler):
     """insert phase move checking"""
     if not _pushing(pushop) or pushop.force:
         return
     b2caps = bundle2.bundle2caps(pushop.remote)
-    hasphaseheads = 'heads' in b2caps.get('phases', ())
+    hasphaseheads = b'heads' in b2caps.get(b'phases', ())
     if pushop.remotephases is not None and hasphaseheads:
         # check that the remote phase has not changed
         checks = [[] for p in phases.allphases]
@@ -896,17 +1015,18 @@
             for nodes in checks:
                 nodes.sort()
             checkdata = phases.binaryencode(checks)
-            bundler.newpart('check:phases', data=checkdata)
-
-@b2partsgenerator('changeset')
+            bundler.newpart(b'check:phases', data=checkdata)
+
+
+@b2partsgenerator(b'changeset')
 def _pushb2ctx(pushop, bundler):
     """handle changegroup push through bundle2
 
     addchangegroup result is stored in the ``pushop.cgresult`` attribute.
     """
-    if 'changesets' in pushop.stepsdone:
+    if b'changesets' in pushop.stepsdone:
         return
-    pushop.stepsdone.add('changesets')
+    pushop.stepsdone.add(b'changesets')
     # Send known heads to the server for race detection.
     if not _pushcheckoutgoing(pushop):
         return
@@ -915,133 +1035,150 @@
     _pushb2ctxcheckheads(pushop, bundler)
 
     b2caps = bundle2.bundle2caps(pushop.remote)
-    version = '01'
-    cgversions = b2caps.get('changegroup')
+    version = b'01'
+    cgversions = b2caps.get(b'changegroup')
     if cgversions:  # 3.1 and 3.2 ship with an empty value
-        cgversions = [v for v in cgversions
-                      if v in changegroup.supportedoutgoingversions(
-                          pushop.repo)]
+        cgversions = [
+            v
+            for v in cgversions
+            if v in changegroup.supportedoutgoingversions(pushop.repo)
+        ]
         if not cgversions:
-            raise error.Abort(_('no common changegroup version'))
+            raise error.Abort(_(b'no common changegroup version'))
         version = max(cgversions)
-    cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
-                                      'push')
-    cgpart = bundler.newpart('changegroup', data=cgstream)
+    cgstream = changegroup.makestream(
+        pushop.repo, pushop.outgoing, version, b'push'
+    )
+    cgpart = bundler.newpart(b'changegroup', data=cgstream)
     if cgversions:
-        cgpart.addparam('version', version)
-    if 'treemanifest' in pushop.repo.requirements:
-        cgpart.addparam('treemanifest', '1')
+        cgpart.addparam(b'version', version)
+    if b'treemanifest' in pushop.repo.requirements:
+        cgpart.addparam(b'treemanifest', b'1')
+    if b'exp-sidedata-flag' in pushop.repo.requirements:
+        cgpart.addparam(b'exp-sidedata', b'1')
+
     def handlereply(op):
         """extract addchangegroup returns from server reply"""
         cgreplies = op.records.getreplies(cgpart.id)
-        assert len(cgreplies['changegroup']) == 1
-        pushop.cgresult = cgreplies['changegroup'][0]['return']
+        assert len(cgreplies[b'changegroup']) == 1
+        pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
+
     return handlereply
 
-@b2partsgenerator('phase')
+
+@b2partsgenerator(b'phase')
 def _pushb2phases(pushop, bundler):
     """handle phase push through bundle2"""
-    if 'phases' in pushop.stepsdone:
+    if b'phases' in pushop.stepsdone:
         return
     b2caps = bundle2.bundle2caps(pushop.remote)
     ui = pushop.repo.ui
 
-    legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
-    haspushkey = 'pushkey' in b2caps
-    hasphaseheads = 'heads' in b2caps.get('phases', ())
+    legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
+    haspushkey = b'pushkey' in b2caps
+    hasphaseheads = b'heads' in b2caps.get(b'phases', ())
 
     if hasphaseheads and not legacyphase:
         return _pushb2phaseheads(pushop, bundler)
     elif haspushkey:
         return _pushb2phasespushkey(pushop, bundler)
 
+
 def _pushb2phaseheads(pushop, bundler):
     """push phase information through a bundle2 - binary part"""
-    pushop.stepsdone.add('phases')
+    pushop.stepsdone.add(b'phases')
     if pushop.outdatedphases:
         updates = [[] for p in phases.allphases]
         updates[0].extend(h.node() for h in pushop.outdatedphases)
         phasedata = phases.binaryencode(updates)
-        bundler.newpart('phase-heads', data=phasedata)
+        bundler.newpart(b'phase-heads', data=phasedata)
+
 
 def _pushb2phasespushkey(pushop, bundler):
     """push phase information through a bundle2 - pushkey part"""
-    pushop.stepsdone.add('phases')
+    pushop.stepsdone.add(b'phases')
     part2node = []
 
     def handlefailure(pushop, exc):
         targetid = int(exc.partid)
         for partid, node in part2node:
             if partid == targetid:
-                raise error.Abort(_('updating %s to public failed') % node)
+                raise error.Abort(_(b'updating %s to public failed') % node)
 
     enc = pushkey.encode
     for newremotehead in pushop.outdatedphases:
-        part = bundler.newpart('pushkey')
-        part.addparam('namespace', enc('phases'))
-        part.addparam('key', enc(newremotehead.hex()))
-        part.addparam('old', enc('%d' % phases.draft))
-        part.addparam('new', enc('%d' % phases.public))
+        part = bundler.newpart(b'pushkey')
+        part.addparam(b'namespace', enc(b'phases'))
+        part.addparam(b'key', enc(newremotehead.hex()))
+        part.addparam(b'old', enc(b'%d' % phases.draft))
+        part.addparam(b'new', enc(b'%d' % phases.public))
         part2node.append((part.id, newremotehead))
         pushop.pkfailcb[part.id] = handlefailure
 
     def handlereply(op):
         for partid, node in part2node:
             partrep = op.records.getreplies(partid)
-            results = partrep['pushkey']
+            results = partrep[b'pushkey']
             assert len(results) <= 1
             msg = None
             if not results:
-                msg = _('server ignored update of %s to public!\n') % node
-            elif not int(results[0]['return']):
-                msg = _('updating %s to public failed!\n') % node
+                msg = _(b'server ignored update of %s to public!\n') % node
+            elif not int(results[0][b'return']):
+                msg = _(b'updating %s to public failed!\n') % node
             if msg is not None:
                 pushop.ui.warn(msg)
+
     return handlereply
 
-@b2partsgenerator('obsmarkers')
+
+@b2partsgenerator(b'obsmarkers')
 def _pushb2obsmarkers(pushop, bundler):
-    if 'obsmarkers' in pushop.stepsdone:
+    if b'obsmarkers' in pushop.stepsdone:
         return
     remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
     if obsolete.commonversion(remoteversions) is None:
         return
-    pushop.stepsdone.add('obsmarkers')
+    pushop.stepsdone.add(b'obsmarkers')
     if pushop.outobsmarkers:
-        markers = sorted(pushop.outobsmarkers)
+        markers = _sortedmarkers(pushop.outobsmarkers)
         bundle2.buildobsmarkerspart(bundler, markers)
 
-@b2partsgenerator('bookmarks')
+
+@b2partsgenerator(b'bookmarks')
 def _pushb2bookmarks(pushop, bundler):
     """handle bookmark push through bundle2"""
-    if 'bookmarks' in pushop.stepsdone:
+    if b'bookmarks' in pushop.stepsdone:
         return
     b2caps = bundle2.bundle2caps(pushop.remote)
 
-    legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
-    legacybooks = 'bookmarks' in legacy
-
-    if not legacybooks and 'bookmarks' in b2caps:
+    legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
+    legacybooks = b'bookmarks' in legacy
+
+    if not legacybooks and b'bookmarks' in b2caps:
         return _pushb2bookmarkspart(pushop, bundler)
-    elif 'pushkey' in b2caps:
+    elif b'pushkey' in b2caps:
         return _pushb2bookmarkspushkey(pushop, bundler)
 
+
 def _bmaction(old, new):
     """small utility for bookmark pushing"""
     if not old:
-        return 'export'
+        return b'export'
     elif not new:
-        return 'delete'
-    return 'update'
+        return b'delete'
+    return b'update'
+
 
 def _abortonsecretctx(pushop, node, b):
     """abort if a given bookmark points to a secret changeset"""
     if node and pushop.repo[node].phase() == phases.secret:
-        raise error.Abort(_('cannot push bookmark %s as it points to a secret'
-                            ' changeset') % b)
+        raise error.Abort(
+            _(b'cannot push bookmark %s as it points to a secret changeset') % b
+        )
+
 
 def _pushb2bookmarkspart(pushop, bundler):
-    pushop.stepsdone.add('bookmarks')
+    pushop.stepsdone.add(b'bookmarks')
     if not pushop.outbookmarks:
         return
 
@@ -1049,11 +1186,10 @@
     data = []
     for book, old, new in pushop.outbookmarks:
         _abortonsecretctx(pushop, new, book)
-        new = bin(new)
         data.append((book, new))
         allactions.append((book, _bmaction(old, new)))
     checkdata = bookmod.binaryencode(data)
-    bundler.newpart('bookmarks', data=checkdata)
+    bundler.newpart(b'bookmarks', data=checkdata)
 
     def handlereply(op):
         ui = pushop.ui
@@ -1063,8 +1199,9 @@
 
     return handlereply
 
+
 def _pushb2bookmarkspushkey(pushop, bundler):
-    pushop.stepsdone.add('bookmarks')
+    pushop.stepsdone.add(b'bookmarks')
     part2book = []
     enc = pushkey.encode
 
@@ -1078,16 +1215,16 @@
 
     for book, old, new in pushop.outbookmarks:
         _abortonsecretctx(pushop, new, book)
-        part = bundler.newpart('pushkey')
-        part.addparam('namespace', enc('bookmarks'))
-        part.addparam('key', enc(book))
-        part.addparam('old', enc(old))
-        part.addparam('new', enc(new))
-        action = 'update'
+        part = bundler.newpart(b'pushkey')
+        part.addparam(b'namespace', enc(b'bookmarks'))
+        part.addparam(b'key', enc(book))
+        part.addparam(b'old', enc(hex(old)))
+        part.addparam(b'new', enc(hex(new)))
+        action = b'update'
         if not old:
-            action = 'export'
+            action = b'export'
         elif not new:
-            action = 'delete'
+            action = b'delete'
         part2book.append((part.id, book, action))
         pushop.pkfailcb[part.id] = handlefailure
 
@@ -1095,53 +1232,59 @@
         ui = pushop.ui
         for partid, book, action in part2book:
             partrep = op.records.getreplies(partid)
-            results = partrep['pushkey']
+            results = partrep[b'pushkey']
             assert len(results) <= 1
             if not results:
-                pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
+                pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
             else:
-                ret = int(results[0]['return'])
+                ret = int(results[0][b'return'])
                 if ret:
                     ui.status(bookmsgmap[action][0] % book)
                 else:
                     ui.warn(bookmsgmap[action][1] % book)
                     if pushop.bkresult is not None:
                         pushop.bkresult = 1
+
     return handlereply
 
-@b2partsgenerator('pushvars', idx=0)
+
+@b2partsgenerator(b'pushvars', idx=0)
 def _getbundlesendvars(pushop, bundler):
     '''send shellvars via bundle2'''
     pushvars = pushop.pushvars
     if pushvars:
         shellvars = {}
         for raw in pushvars:
-            if '=' not in raw:
-                msg = ("unable to parse variable '%s', should follow "
-                        "'KEY=VALUE' or 'KEY=' format")
+            if b'=' not in raw:
+                msg = (
+                    b"unable to parse variable '%s', should follow "
+                    b"'KEY=VALUE' or 'KEY=' format"
+                )
                 raise error.Abort(msg % raw)
-            k, v = raw.split('=', 1)
+            k, v = raw.split(b'=', 1)
             shellvars[k] = v
 
-        part = bundler.newpart('pushvars')
-
-        for key, value in shellvars.iteritems():
+        part = bundler.newpart(b'pushvars')
+
+        for key, value in pycompat.iteritems(shellvars):
             part.addparam(key, value, mandatory=False)
 
+
 def _pushbundle2(pushop):
     """push data to the remote using bundle2
 
     The only currently supported type of data is changegroup but this will
     evolve in the future."""
     bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
-    pushback = (pushop.trmanager
-                and pushop.ui.configbool('experimental', 'bundle2.pushback'))
+    pushback = pushop.trmanager and pushop.ui.configbool(
+        b'experimental', b'bundle2.pushback'
+    )
 
     # create reply capability
-    capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
-                                                      allowpushback=pushback,
-                                                      role='client'))
-    bundler.newpart('replycaps', data=capsblob)
+    capsblob = bundle2.encodecaps(
+        bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
+    )
+    bundler.newpart(b'replycaps', data=capsblob)
     replyhandlers = []
     for partgenname in b2partsgenorder:
         partgen = b2partsgenmapping[partgenname]
@@ -1155,25 +1298,28 @@
     try:
         try:
             with pushop.remote.commandexecutor() as e:
-                reply = e.callcommand('unbundle', {
-                    'bundle': stream,
-                    'heads': ['force'],
-                    'url': pushop.remote.url(),
-                }).result()
+                reply = e.callcommand(
+                    b'unbundle',
+                    {
+                        b'bundle': stream,
+                        b'heads': [b'force'],
+                        b'url': pushop.remote.url(),
+                    },
+                ).result()
         except error.BundleValueError as exc:
-            raise error.Abort(_('missing support for %s') % exc)
+            raise error.Abort(_(b'missing support for %s') % exc)
         try:
             trgetter = None
             if pushback:
                 trgetter = pushop.trmanager.transaction
             op = bundle2.processbundle(pushop.repo, reply, trgetter)
         except error.BundleValueError as exc:
-            raise error.Abort(_('missing support for %s') % exc)
+            raise error.Abort(_(b'missing support for %s') % exc)
         except bundle2.AbortFromPart as exc:
-            pushop.ui.status(_('remote: %s\n') % exc)
+            pushop.ui.status(_(b'remote: %s\n') % exc)
             if exc.hint is not None:
-                pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
-            raise error.Abort(_('push failed on remote'))
+                pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
+            raise error.Abort(_(b'push failed on remote'))
     except error.PushkeyFailed as exc:
         partid = int(exc.partid)
         if partid not in pushop.pkfailcb:
@@ -1182,31 +1328,40 @@
     for rephand in replyhandlers:
         rephand(op)
 
+
 def _pushchangeset(pushop):
     """Make the actual push of changeset bundle to remote repo"""
-    if 'changesets' in pushop.stepsdone:
+    if b'changesets' in pushop.stepsdone:
         return
-    pushop.stepsdone.add('changesets')
+    pushop.stepsdone.add(b'changesets')
     if not _pushcheckoutgoing(pushop):
         return
 
     # Should have verified this in push().
-    assert pushop.remote.capable('unbundle')
+    assert pushop.remote.capable(b'unbundle')
 
     pushop.repo.prepushoutgoinghooks(pushop)
     outgoing = pushop.outgoing
     # TODO: get bundlecaps from remote
     bundlecaps = None
     # create a changegroup from local
-    if pushop.revs is None and not (outgoing.excluded
-                            or pushop.repo.changelog.filteredrevs):
+    if pushop.revs is None and not (
+        outgoing.excluded or pushop.repo.changelog.filteredrevs
+    ):
         # push everything,
         # use the fast path, no race possible on push
-        cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
-                fastpath=True, bundlecaps=bundlecaps)
+        cg = changegroup.makechangegroup(
+            pushop.repo,
+            outgoing,
+            b'01',
+            b'push',
+            fastpath=True,
+            bundlecaps=bundlecaps,
+        )
     else:
-        cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
-                                        'push', bundlecaps=bundlecaps)
+        cg = changegroup.makechangegroup(
+            pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
+        )
 
     # apply changegroup to remote
     # local repo finds heads on server, finds out what
@@ -1214,23 +1369,25 @@
     # finds it has different heads (someone else won
     # commit/push race), server aborts.
     if pushop.force:
-        remoteheads = ['force']
+        remoteheads = [b'force']
     else:
         remoteheads = pushop.remoteheads
     # ssh: return remote's addchangegroup()
     # http: return remote's addchangegroup() or 0 for error
-    pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
-                                        pushop.repo.url())
+    pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
+
 
 def _pushsyncphase(pushop):
     """synchronise phase information locally and remotely"""
     cheads = pushop.commonheads
     # even when we don't push, exchanging phase data is useful
-    remotephases = listkeys(pushop.remote, 'phases')
-    if (pushop.ui.configbool('ui', '_usedassubrepo')
-        and remotephases    # server supports phases
-        and pushop.cgresult is None # nothing was pushed
-        and remotephases.get('publishing', False)):
+    remotephases = listkeys(pushop.remote, b'phases')
+    if (
+        pushop.ui.configbool(b'ui', b'_usedassubrepo')
+        and remotephases  # server supports phases
+        and pushop.cgresult is None  # nothing was pushed
+        and remotephases.get(b'publishing', False)
+    ):
         # When:
         # - this is a subrepo push
         # - and remote support phase
@@ -1240,55 +1397,58 @@
         # We drop the possible phase synchronisation done by
         # courtesy to publish changesets possibly locally draft
         # on the remote.
-        remotephases = {'publishing': 'True'}
-    if not remotephases: # old server or public only reply from non-publishing
+        remotephases = {b'publishing': b'True'}
+    if not remotephases:  # old server or public only reply from non-publishing
         _localphasemove(pushop, cheads)
         # don't push any phase data as there is nothing to push
     else:
-        ana = phases.analyzeremotephases(pushop.repo, cheads,
-                                         remotephases)
+        ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
         pheads, droots = ana
         ### Apply remote phase on local
-        if remotephases.get('publishing', False):
+        if remotephases.get(b'publishing', False):
             _localphasemove(pushop, cheads)
-        else: # publish = False
+        else:  # publish = False
             _localphasemove(pushop, pheads)
             _localphasemove(pushop, cheads, phases.draft)
         ### Apply local phase on remote
 
         if pushop.cgresult:
-            if 'phases' in pushop.stepsdone:
+            if b'phases' in pushop.stepsdone:
                 # phases already pushed though bundle2
                 return
             outdated = pushop.outdatedphases
         else:
             outdated = pushop.fallbackoutdatedphases
 
-        pushop.stepsdone.add('phases')
+        pushop.stepsdone.add(b'phases')
 
         # filter heads already turned public by the push
         outdated = [c for c in outdated if c.node() not in pheads]
         # fallback to independent pushkey command
         for newremotehead in outdated:
             with pushop.remote.commandexecutor() as e:
-                r = e.callcommand('pushkey', {
-                    'namespace': 'phases',
-                    'key': newremotehead.hex(),
-                    'old': '%d' % phases.draft,
-                    'new': '%d' % phases.public
-                }).result()
+                r = e.callcommand(
+                    b'pushkey',
+                    {
+                        b'namespace': b'phases',
+                        b'key': newremotehead.hex(),
+                        b'old': b'%d' % phases.draft,
+                        b'new': b'%d' % phases.public,
+                    },
+                ).result()
 
             if not r:
-                pushop.ui.warn(_('updating %s to public failed!\n')
-                               % newremotehead)
+                pushop.ui.warn(
+                    _(b'updating %s to public failed!\n') % newremotehead
+                )
+
 
 def _localphasemove(pushop, nodes, phase=phases.public):
     """move <nodes> to <phase> in the local source repo"""
     if pushop.trmanager:
-        phases.advanceboundary(pushop.repo,
-                               pushop.trmanager.transaction(),
-                               phase,
-                               nodes)
+        phases.advanceboundary(
+            pushop.repo, pushop.trmanager.transaction(), phase, nodes
+        )
     else:
         # repo is not locked, do not change any phases!
         # Informs the user that phases should have been moved when
@@ -1296,50 +1456,61 @@
         actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
         phasestr = phases.phasenames[phase]
         if actualmoves:
-            pushop.ui.status(_('cannot lock source repo, skipping '
-                               'local %s phase update\n') % phasestr)
+            pushop.ui.status(
+                _(
+                    b'cannot lock source repo, skipping '
+                    b'local %s phase update\n'
+                )
+                % phasestr
+            )
+
 
 def _pushobsolete(pushop):
     """utility function to push obsolete markers to a remote"""
-    if 'obsmarkers' in pushop.stepsdone:
+    if b'obsmarkers' in pushop.stepsdone:
         return
     repo = pushop.repo
     remote = pushop.remote
-    pushop.stepsdone.add('obsmarkers')
+    pushop.stepsdone.add(b'obsmarkers')
     if pushop.outobsmarkers:
-        pushop.ui.debug('try to push obsolete markers to remote\n')
+        pushop.ui.debug(b'try to push obsolete markers to remote\n')
         rslts = []
-        remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
+        markers = _sortedmarkers(pushop.outobsmarkers)
+        remotedata = obsolete._pushkeyescape(markers)
         for key in sorted(remotedata, reverse=True):
             # reverse sort to ensure we end with dump0
             data = remotedata[key]
-            rslts.append(remote.pushkey('obsolete', key, '', data))
+            rslts.append(remote.pushkey(b'obsolete', key, b'', data))
         if [r for r in rslts if not r]:
-            msg = _('failed to push some obsolete markers!\n')
+            msg = _(b'failed to push some obsolete markers!\n')
             repo.ui.warn(msg)
 
+
 def _pushbookmark(pushop):
     """Update bookmark position on remote"""
-    if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
+    if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
         return
-    pushop.stepsdone.add('bookmarks')
+    pushop.stepsdone.add(b'bookmarks')
     ui = pushop.ui
     remote = pushop.remote
 
     for b, old, new in pushop.outbookmarks:
-        action = 'update'
+        action = b'update'
         if not old:
-            action = 'export'
+            action = b'export'
         elif not new:
-            action = 'delete'
+            action = b'delete'
 
         with remote.commandexecutor() as e:
-            r = e.callcommand('pushkey', {
-                'namespace': 'bookmarks',
-                'key': b,
-                'old': old,
-                'new': new,
-            }).result()
+            r = e.callcommand(
+                b'pushkey',
+                {
+                    b'namespace': b'bookmarks',
+                    b'key': b,
+                    b'old': hex(old),
+                    b'new': hex(new),
+                },
+            ).result()
 
         if r:
             ui.status(bookmsgmap[action][0] % b)
@@ -1349,6 +1520,7 @@
             if pushop.bkresult is not None:
                 pushop.bkresult = 1
 
+
 class pulloperation(object):
     """A object that represent a single pull operation
 
@@ -1358,9 +1530,19 @@
     afterward.
     """
 
-    def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
-                 remotebookmarks=None, streamclonerequested=None,
-                 includepats=None, excludepats=None, depth=None):
+    def __init__(
+        self,
+        repo,
+        remote,
+        heads=None,
+        force=False,
+        bookmarks=(),
+        remotebookmarks=None,
+        streamclonerequested=None,
+        includepats=None,
+        excludepats=None,
+        depth=None,
+    ):
         # repo we pull into
         self.repo = repo
         # repo we pull from
@@ -1368,8 +1550,9 @@
         # revision we try to pull (None is "all")
         self.heads = heads
         # bookmark pulled explicitly
-        self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
-                                  for bookmark in bookmarks]
+        self.explicitbookmarks = [
+            repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
+        ]
         # do we force pull?
         self.force = force
         # whether a streaming clone was requested
@@ -1427,11 +1610,13 @@
         # deprecated; talk to trmanager directly
         return self.trmanager.transaction()
 
+
 class transactionmanager(util.transactional):
     """An object to manage the life cycle of a transaction
 
     It creates the transaction on demand and calls the appropriate hooks when
     closing the transaction."""
+
     def __init__(self, repo, source, url):
         self.repo = repo
         self.source = source
@@ -1441,10 +1626,10 @@
     def transaction(self):
         """Return an open transaction object, constructing if necessary"""
         if not self._tr:
-            trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
+            trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
             self._tr = self.repo.transaction(trname)
-            self._tr.hookargs['source'] = self.source
-            self._tr.hookargs['url'] = self.url
+            self._tr.hookargs[b'source'] = self.source
+            self._tr.hookargs[b'url'] = self.url
         return self._tr
 
     def close(self):
@@ -1457,9 +1642,11 @@
         if self._tr is not None:
             self._tr.release()
 
+
 def listkeys(remote, namespace):
     with remote.commandexecutor() as e:
-        return e.callcommand('listkeys', {'namespace': namespace}).result()
+        return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
+
 
 def _fullpullbundle2(repo, pullop):
     # The server may send a partial reply, i.e. when inlining
@@ -1473,14 +1660,17 @@
     # markers can hide heads.
     unfi = repo.unfiltered()
     unficl = unfi.changelog
+
     def headsofdiff(h1, h2):
         """Returns heads(h1 % h2)"""
-        res = unfi.set('heads(%ln %% %ln)', h1, h2)
+        res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
         return set(ctx.node() for ctx in res)
+
     def headsofunion(h1, h2):
         """Returns heads((h1 + h2) - null)"""
-        res = unfi.set('heads((%ln + %ln - null))', h1, h2)
+        res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
         return set(ctx.node() for ctx in res)
+
     while True:
         old_heads = unficl.heads()
         clstart = len(unficl)
@@ -1499,9 +1689,19 @@
         pullop.common = headsofunion(new_heads, pullop.common)
         pullop.rheads = set(pullop.rheads) - pullop.common
 
-def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
-         streamclonerequested=None, includepats=None, excludepats=None,
-         depth=None):
+
+def pull(
+    repo,
+    remote,
+    heads=None,
+    force=False,
+    bookmarks=(),
+    opargs=None,
+    streamclonerequested=None,
+    includepats=None,
+    excludepats=None,
+    depth=None,
+):
     """Fetch repository data from a remote.
 
     This is the main function used to retrieve data from a remote repository.
@@ -1542,28 +1742,37 @@
     narrowspec.validatepatterns(includepats)
     narrowspec.validatepatterns(excludepats)
 
-    pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
-                           streamclonerequested=streamclonerequested,
-                           includepats=includepats, excludepats=excludepats,
-                           depth=depth,
-                           **pycompat.strkwargs(opargs))
+    pullop = pulloperation(
+        repo,
+        remote,
+        heads,
+        force,
+        bookmarks=bookmarks,
+        streamclonerequested=streamclonerequested,
+        includepats=includepats,
+        excludepats=excludepats,
+        depth=depth,
+        **pycompat.strkwargs(opargs)
+    )
 
     peerlocal = pullop.remote.local()
     if peerlocal:
         missing = set(peerlocal.requirements) - pullop.repo.supported
         if missing:
-            msg = _("required features are not"
-                    " supported in the destination:"
-                    " %s") % (', '.join(sorted(missing)))
+            msg = _(
+                b"required features are not"
+                b" supported in the destination:"
+                b" %s"
+            ) % (b', '.join(sorted(missing)))
             raise error.Abort(msg)
 
-    pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
+    pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
     wlock = util.nullcontextmanager()
     if not bookmod.bookmarksinstore(repo):
         wlock = repo.wlock()
     with wlock, repo.lock(), pullop.trmanager:
         # Use the modern wire protocol, if available.
-        if remote.capable('command-changesetdata'):
+        if remote.capable(b'command-changesetdata'):
             exchangev2.pull(pullop)
         else:
             # This should ideally be in _pullbundle2(). However, it needs to run
@@ -1579,11 +1788,12 @@
             _pullobsolete(pullop)
 
     # storing remotenames
-    if repo.ui.configbool('experimental', 'remotenames'):
+    if repo.ui.configbool(b'experimental', b'remotenames'):
         logexchange.pullremotenames(repo, remote)
 
     return pullop
 
+
 # list of steps to perform discovery before pull
 pulldiscoveryorder = []
 
@@ -1592,6 +1802,7 @@
 # This exists to help extensions wrap steps if necessary
 pulldiscoverymapping = {}
 
+
 def pulldiscovery(stepname):
     """decorator for function performing discovery before pull
 
@@ -1601,20 +1812,24 @@
 
     You can only use this decorator for a new step, if you want to wrap a step
     from an extension, change the pulldiscovery dictionary directly."""
+
     def dec(func):
         assert stepname not in pulldiscoverymapping
         pulldiscoverymapping[stepname] = func
         pulldiscoveryorder.append(stepname)
         return func
+
     return dec
 
+
 def _pulldiscovery(pullop):
     """Run all discovery steps"""
     for stepname in pulldiscoveryorder:
         step = pulldiscoverymapping[stepname]
         step(pullop)
 
-@pulldiscovery('b1:bookmarks')
+
+@pulldiscovery(b'b1:bookmarks')
 def _pullbookmarkbundle1(pullop):
     """fetch bookmark data in bundle1 case
 
@@ -1622,24 +1837,23 @@
     discovery to reduce the chance and impact of race conditions."""
     if pullop.remotebookmarks is not None:
         return
-    if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
+    if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
         # all known bundle2 servers now support listkeys, but lets be nice with
         # new implementation.
         return
-    books = listkeys(pullop.remote, 'bookmarks')
+    books = listkeys(pullop.remote, b'bookmarks')
     pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
 
 
-@pulldiscovery('changegroup')
+@pulldiscovery(b'changegroup')
 def _pulldiscoverychangegroup(pullop):
     """discovery phase for the pull
 
     Current handle changeset discovery only, will change handle all discovery
     at some point."""
-    tmp = discovery.findcommonincoming(pullop.repo,
-                                       pullop.remote,
-                                       heads=pullop.heads,
-                                       force=pullop.force)
+    tmp = discovery.findcommonincoming(
+        pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
+    )
     common, fetch, rheads = tmp
     nm = pullop.repo.unfiltered().changelog.nodemap
     if fetch and rheads:
@@ -1663,11 +1877,12 @@
     pullop.fetch = fetch
     pullop.rheads = rheads
 
+
 def _pullbundle2(pullop):
     """pull data using bundle2
 
     For now, the only supported data are changegroup."""
-    kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
+    kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
 
     # make ui easier to access
     ui = pullop.repo.ui
@@ -1677,188 +1892,207 @@
     streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
 
     # declare pull perimeters
-    kwargs['common'] = pullop.common
-    kwargs['heads'] = pullop.heads or pullop.rheads
+    kwargs[b'common'] = pullop.common
+    kwargs[b'heads'] = pullop.heads or pullop.rheads
 
     # check server supports narrow and then adding includepats and excludepats
     servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
     if servernarrow and pullop.includepats:
-        kwargs['includepats'] = pullop.includepats
+        kwargs[b'includepats'] = pullop.includepats
     if servernarrow and pullop.excludepats:
-        kwargs['excludepats'] = pullop.excludepats
+        kwargs[b'excludepats'] = pullop.excludepats
 
     if streaming:
-        kwargs['cg'] = False
-        kwargs['stream'] = True
-        pullop.stepsdone.add('changegroup')
-        pullop.stepsdone.add('phases')
+        kwargs[b'cg'] = False
+        kwargs[b'stream'] = True
+        pullop.stepsdone.add(b'changegroup')
+        pullop.stepsdone.add(b'phases')
 
     else:
         # pulling changegroup
-        pullop.stepsdone.add('changegroup')
-
-        kwargs['cg'] = pullop.fetch
-
-        legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
-        hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
-        if (not legacyphase and hasbinaryphase):
-            kwargs['phases'] = True
-            pullop.stepsdone.add('phases')
-
-        if 'listkeys' in pullop.remotebundle2caps:
-            if 'phases' not in pullop.stepsdone:
-                kwargs['listkeys'] = ['phases']
+        pullop.stepsdone.add(b'changegroup')
+
+        kwargs[b'cg'] = pullop.fetch
+
+        legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
+        hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
+        if not legacyphase and hasbinaryphase:
+            kwargs[b'phases'] = True
+            pullop.stepsdone.add(b'phases')
+
+        if b'listkeys' in pullop.remotebundle2caps:
+            if b'phases' not in pullop.stepsdone:
+                kwargs[b'listkeys'] = [b'phases']
 
     bookmarksrequested = False
-    legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
-    hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
+    legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
+    hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
 
     if pullop.remotebookmarks is not None:
-        pullop.stepsdone.add('request-bookmarks')
-
-    if ('request-bookmarks' not in pullop.stepsdone
+        pullop.stepsdone.add(b'request-bookmarks')
+
+    if (
+        b'request-bookmarks' not in pullop.stepsdone
         and pullop.remotebookmarks is None
-        and not legacybookmark and hasbinarybook):
-        kwargs['bookmarks'] = True
+        and not legacybookmark
+        and hasbinarybook
+    ):
+        kwargs[b'bookmarks'] = True
         bookmarksrequested = True
 
-    if 'listkeys' in pullop.remotebundle2caps:
-        if 'request-bookmarks' not in pullop.stepsdone:
+    if b'listkeys' in pullop.remotebundle2caps:
+        if b'request-bookmarks' not in pullop.stepsdone:
             # make sure to always includes bookmark data when migrating
             # `hg incoming --bundle` to using this function.
-            pullop.stepsdone.add('request-bookmarks')
-            kwargs.setdefault('listkeys', []).append('bookmarks')
+            pullop.stepsdone.add(b'request-bookmarks')
+            kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
 
     # If this is a full pull / clone and the server supports the clone bundles
     # feature, tell the server whether we attempted a clone bundle. The
     # presence of this flag indicates the client supports clone bundles. This
     # will enable the server to treat clients that support clone bundles
     # differently from those that don't.
-    if (pullop.remote.capable('clonebundles')
-        and pullop.heads is None and list(pullop.common) == [nullid]):
-        kwargs['cbattempted'] = pullop.clonebundleattempted
+    if (
+        pullop.remote.capable(b'clonebundles')
+        and pullop.heads is None
+        and list(pullop.common) == [nullid]
+    ):
+        kwargs[b'cbattempted'] = pullop.clonebundleattempted
 
     if streaming:
-        pullop.repo.ui.status(_('streaming all changes\n'))
+        pullop.repo.ui.status(_(b'streaming all changes\n'))
     elif not pullop.fetch:
-        pullop.repo.ui.status(_("no changes found\n"))
+        pullop.repo.ui.status(_(b"no changes found\n"))
         pullop.cgresult = 0
     else:
         if pullop.heads is None and list(pullop.common) == [nullid]:
-            pullop.repo.ui.status(_("requesting all changes\n"))
+            pullop.repo.ui.status(_(b"requesting all changes\n"))
     if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
         remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
         if obsolete.commonversion(remoteversions) is not None:
-            kwargs['obsmarkers'] = True
-            pullop.stepsdone.add('obsmarkers')
+            kwargs[b'obsmarkers'] = True
+            pullop.stepsdone.add(b'obsmarkers')
     _pullbundle2extraprepare(pullop, kwargs)
 
     with pullop.remote.commandexecutor() as e:
         args = dict(kwargs)
-        args['source'] = 'pull'
-        bundle = e.callcommand('getbundle', args).result()
+        args[b'source'] = b'pull'
+        bundle = e.callcommand(b'getbundle', args).result()
 
         try:
-            op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
-                                         source='pull')
-            op.modes['bookmarks'] = 'records'
+            op = bundle2.bundleoperation(
+                pullop.repo, pullop.gettransaction, source=b'pull'
+            )
+            op.modes[b'bookmarks'] = b'records'
             bundle2.processbundle(pullop.repo, bundle, op=op)
         except bundle2.AbortFromPart as exc:
-            pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
-            raise error.Abort(_('pull failed on remote'), hint=exc.hint)
+            pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
+            raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
         except error.BundleValueError as exc:
-            raise error.Abort(_('missing support for %s') % exc)
+            raise error.Abort(_(b'missing support for %s') % exc)
 
     if pullop.fetch:
         pullop.cgresult = bundle2.combinechangegroupresults(op)
 
     # processing phases change
-    for namespace, value in op.records['listkeys']:
-        if namespace == 'phases':
+    for namespace, value in op.records[b'listkeys']:
+        if namespace == b'phases':
             _pullapplyphases(pullop, value)
 
     # processing bookmark update
     if bookmarksrequested:
         books = {}
-        for record in op.records['bookmarks']:
-            books[record['bookmark']] = record["node"]
+        for record in op.records[b'bookmarks']:
+            books[record[b'bookmark']] = record[b"node"]
         pullop.remotebookmarks = books
     else:
-        for namespace, value in op.records['listkeys']:
-            if namespace == 'bookmarks':
+        for namespace, value in op.records[b'listkeys']:
+            if namespace == b'bookmarks':
                 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
 
     # bookmark data were either already there or pulled in the bundle
     if pullop.remotebookmarks is not None:
         _pullbookmarks(pullop)
 
+
 def _pullbundle2extraprepare(pullop, kwargs):
     """hook function so that extensions can extend the getbundle call"""
 
+
 def _pullchangeset(pullop):
     """pull changeset from unbundle into the local repo"""
     # We delay the open of the transaction as late as possible so we
     # don't open transaction for nothing or you break future useful
     # rollback call
-    if 'changegroup' in pullop.stepsdone:
+    if b'changegroup' in pullop.stepsdone:
         return
-    pullop.stepsdone.add('changegroup')
+    pullop.stepsdone.add(b'changegroup')
     if not pullop.fetch:
-        pullop.repo.ui.status(_("no changes found\n"))
+        pullop.repo.ui.status(_(b"no changes found\n"))
         pullop.cgresult = 0
         return
     tr = pullop.gettransaction()
     if pullop.heads is None and list(pullop.common) == [nullid]:
-        pullop.repo.ui.status(_("requesting all changes\n"))
-    elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
+        pullop.repo.ui.status(_(b"requesting all changes\n"))
+    elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
         # issue1320, avoid a race if remote changed after discovery
         pullop.heads = pullop.rheads
 
-    if pullop.remote.capable('getbundle'):
+    if pullop.remote.capable(b'getbundle'):
         # TODO: get bundlecaps from remote
-        cg = pullop.remote.getbundle('pull', common=pullop.common,
-                                     heads=pullop.heads or pullop.rheads)
+        cg = pullop.remote.getbundle(
+            b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
+        )
     elif pullop.heads is None:
         with pullop.remote.commandexecutor() as e:
-            cg = e.callcommand('changegroup', {
-                'nodes': pullop.fetch,
-                'source': 'pull',
-            }).result()
-
-    elif not pullop.remote.capable('changegroupsubset'):
-        raise error.Abort(_("partial pull cannot be done because "
-                           "other repository doesn't support "
-                           "changegroupsubset."))
+            cg = e.callcommand(
+                b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
+            ).result()
+
+    elif not pullop.remote.capable(b'changegroupsubset'):
+        raise error.Abort(
+            _(
+                b"partial pull cannot be done because "
+                b"other repository doesn't support "
+                b"changegroupsubset."
+            )
+        )
     else:
         with pullop.remote.commandexecutor() as e:
-            cg = e.callcommand('changegroupsubset', {
-                'bases': pullop.fetch,
-                'heads': pullop.heads,
-                'source': 'pull',
-            }).result()
-
-    bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
-                                   pullop.remote.url())
+            cg = e.callcommand(
+                b'changegroupsubset',
+                {
+                    b'bases': pullop.fetch,
+                    b'heads': pullop.heads,
+                    b'source': b'pull',
+                },
+            ).result()
+
+    bundleop = bundle2.applybundle(
+        pullop.repo, cg, tr, b'pull', pullop.remote.url()
+    )
     pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
 
+
 def _pullphase(pullop):
     # Get remote phases data from remote
-    if 'phases' in pullop.stepsdone:
+    if b'phases' in pullop.stepsdone:
         return
-    remotephases = listkeys(pullop.remote, 'phases')
+    remotephases = listkeys(pullop.remote, b'phases')
     _pullapplyphases(pullop, remotephases)
 
+
 def _pullapplyphases(pullop, remotephases):
     """apply phase movement from observed remote state"""
-    if 'phases' in pullop.stepsdone:
+    if b'phases' in pullop.stepsdone:
         return
-    pullop.stepsdone.add('phases')
-    publishing = bool(remotephases.get('publishing', False))
+    pullop.stepsdone.add(b'phases')
+    publishing = bool(remotephases.get(b'publishing', False))
     if remotephases and not publishing:
         # remote is new and non-publishing
-        pheads, _dr = phases.analyzeremotephases(pullop.repo,
-                                                 pullop.pulledsubset,
-                                                 remotephases)
+        pheads, _dr = phases.analyzeremotephases(
+            pullop.repo, pullop.pulledsubset, remotephases
+        )
         dheads = pullop.pulledsubset
     else:
         # Remote is old or publishing all common changesets
@@ -1883,17 +2117,23 @@
         tr = pullop.gettransaction()
         phases.advanceboundary(pullop.repo, tr, draft, dheads)
 
+
 def _pullbookmarks(pullop):
     """process the remote bookmark information to update the local one"""
-    if 'bookmarks' in pullop.stepsdone:
+    if b'bookmarks' in pullop.stepsdone:
         return
-    pullop.stepsdone.add('bookmarks')
+    pullop.stepsdone.add(b'bookmarks')
     repo = pullop.repo
     remotebookmarks = pullop.remotebookmarks
-    bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
-                             pullop.remote.url(),
-                             pullop.gettransaction,
-                             explicit=pullop.explicitbookmarks)
+    bookmod.updatefromremote(
+        repo.ui,
+        repo,
+        remotebookmarks,
+        pullop.remote.url(),
+        pullop.gettransaction,
+        explicit=pullop.explicitbookmarks,
+    )
+
 
 def _pullobsolete(pullop):
     """utility function to pull obsolete markers from a remote
@@ -1903,18 +2143,18 @@
     a new transaction have been created (when applicable).
 
     Exists mostly to allow overriding for experimentation purpose"""
-    if 'obsmarkers' in pullop.stepsdone:
+    if b'obsmarkers' in pullop.stepsdone:
         return
-    pullop.stepsdone.add('obsmarkers')
+    pullop.stepsdone.add(b'obsmarkers')
     tr = None
     if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
-        pullop.repo.ui.debug('fetching remote obsolete markers\n')
-        remoteobs = listkeys(pullop.remote, 'obsolete')
-        if 'dump0' in remoteobs:
+        pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
+        remoteobs = listkeys(pullop.remote, b'obsolete')
+        if b'dump0' in remoteobs:
             tr = pullop.gettransaction()
             markers = []
             for key in sorted(remoteobs, reverse=True):
-                if key.startswith('dump'):
+                if key.startswith(b'dump'):
                     data = util.b85decode(remoteobs[key])
                     version, newmarks = obsolete._readmarkers(data)
                     markers += newmarks
@@ -1923,6 +2163,7 @@
             pullop.repo.invalidatevolatilesets()
     return tr
 
+
 def applynarrowacl(repo, kwargs):
     """Apply narrow fetch access control.
 
@@ -1931,32 +2172,44 @@
     """
     ui = repo.ui
     # TODO this assumes existence of HTTP and is a layering violation.
-    username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
+    username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
     user_includes = ui.configlist(
-        _NARROWACL_SECTION, username + '.includes',
-        ui.configlist(_NARROWACL_SECTION, 'default.includes'))
+        _NARROWACL_SECTION,
+        username + b'.includes',
+        ui.configlist(_NARROWACL_SECTION, b'default.includes'),
+    )
     user_excludes = ui.configlist(
-        _NARROWACL_SECTION, username + '.excludes',
-        ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
+        _NARROWACL_SECTION,
+        username + b'.excludes',
+        ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
+    )
     if not user_includes:
-        raise error.Abort(_("{} configuration for user {} is empty")
-                          .format(_NARROWACL_SECTION, username))
+        raise error.Abort(
+            _(b"{} configuration for user {} is empty").format(
+                _NARROWACL_SECTION, username
+            )
+        )
 
     user_includes = [
-        'path:.' if p == '*' else 'path:' + p for p in user_includes]
+        b'path:.' if p == b'*' else b'path:' + p for p in user_includes
+    ]
     user_excludes = [
-        'path:.' if p == '*' else 'path:' + p for p in user_excludes]
+        b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
+    ]
 
     req_includes = set(kwargs.get(r'includepats', []))
     req_excludes = set(kwargs.get(r'excludepats', []))
 
     req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
-        req_includes, req_excludes, user_includes, user_excludes)
+        req_includes, req_excludes, user_includes, user_excludes
+    )
 
     if invalid_includes:
         raise error.Abort(
-            _("The following includes are not accessible for {}: {}")
-            .format(username, invalid_includes))
+            _(b"The following includes are not accessible for {}: {}").format(
+                username, invalid_includes
+            )
+        )
 
     new_args = {}
     new_args.update(kwargs)
@@ -1968,6 +2221,7 @@
 
     return new_args
 
+
 def _computeellipsis(repo, common, heads, known, match, depth=None):
     """Compute the shape of a narrowed DAG.
 
@@ -2026,15 +2280,21 @@
     def splithead(head):
         r1, r2, r3 = sorted(ellipsisroots[head])
         for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
-            mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
-                            nr1, head, nr2, head)
+            mid = repo.revs(
+                b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
+            )
             for j in mid:
                 if j == nr2:
                     return nr2, (nr1, nr2)
                 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
                     return j, (nr1, nr2)
-        raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
-                            'roots: %d %d %d') % (head, r1, r2, r3))
+        raise error.Abort(
+            _(
+                b'Failed to split up ellipsis node! head: %d, '
+                b'roots: %d %d %d'
+            )
+            % (head, r1, r2, r3)
+        )
 
     missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
     visit = reversed(missing)
@@ -2094,13 +2354,15 @@
             addroot(head, c)
     return visitnodes, relevant_nodes, ellipsisroots
 
+
 def caps20to10(repo, role):
     """return a set with appropriate options to use bundle20 during getbundle"""
-    caps = {'HG20'}
+    caps = {b'HG20'}
     capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
-    caps.add('bundle2=' + urlreq.quote(capsblob))
+    caps.add(b'bundle2=' + urlreq.quote(capsblob))
     return caps
 
+
 # List of names of steps to perform for a bundle2 for getbundle, order matters.
 getbundle2partsorder = []
 
@@ -2109,6 +2371,7 @@
 # This exists to help extensions wrap steps if necessary
 getbundle2partsmapping = {}
 
+
 def getbundle2partsgenerator(stepname, idx=None):
     """decorator for function generating bundle2 part for getbundle
 
@@ -2118,6 +2381,7 @@
 
     You can only use this decorator for new steps, if you want to wrap a step
     from an extension, attack the getbundle2partsmapping dictionary directly."""
+
     def dec(func):
         assert stepname not in getbundle2partsmapping
         getbundle2partsmapping[stepname] = func
@@ -2126,15 +2390,19 @@
         else:
             getbundle2partsorder.insert(idx, stepname)
         return func
+
     return dec
 
+
 def bundle2requested(bundlecaps):
     if bundlecaps is not None:
-        return any(cap.startswith('HG2') for cap in bundlecaps)
+        return any(cap.startswith(b'HG2') for cap in bundlecaps)
     return False
 
-def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
-                    **kwargs):
+
+def getbundlechunks(
+    repo, source, heads=None, common=None, bundlecaps=None, **kwargs
+):
     """Return chunks constituting a bundle's raw data.
 
     Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
@@ -2148,56 +2416,83 @@
     usebundle2 = bundle2requested(bundlecaps)
     # bundle10 case
     if not usebundle2:
-        if bundlecaps and not kwargs.get('cg', True):
-            raise ValueError(_('request for bundle10 must include changegroup'))
+        if bundlecaps and not kwargs.get(b'cg', True):
+            raise ValueError(
+                _(b'request for bundle10 must include changegroup')
+            )
 
         if kwargs:
-            raise ValueError(_('unsupported getbundle arguments: %s')
-                             % ', '.join(sorted(kwargs.keys())))
+            raise ValueError(
+                _(b'unsupported getbundle arguments: %s')
+                % b', '.join(sorted(kwargs.keys()))
+            )
         outgoing = _computeoutgoing(repo, heads, common)
-        info['bundleversion'] = 1
-        return info, changegroup.makestream(repo, outgoing, '01', source,
-                                            bundlecaps=bundlecaps)
+        info[b'bundleversion'] = 1
+        return (
+            info,
+            changegroup.makestream(
+                repo, outgoing, b'01', source, bundlecaps=bundlecaps
+            ),
+        )
 
     # bundle20 case
-    info['bundleversion'] = 2
+    info[b'bundleversion'] = 2
     b2caps = {}
     for bcaps in bundlecaps:
-        if bcaps.startswith('bundle2='):
-            blob = urlreq.unquote(bcaps[len('bundle2='):])
+        if bcaps.startswith(b'bundle2='):
+            blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
             b2caps.update(bundle2.decodecaps(blob))
     bundler = bundle2.bundle20(repo.ui, b2caps)
 
-    kwargs['heads'] = heads
-    kwargs['common'] = common
+    kwargs[b'heads'] = heads
+    kwargs[b'common'] = common
 
     for name in getbundle2partsorder:
         func = getbundle2partsmapping[name]
-        func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
-             **pycompat.strkwargs(kwargs))
-
-    info['prefercompressed'] = bundler.prefercompressed
+        func(
+            bundler,
+            repo,
+            source,
+            bundlecaps=bundlecaps,
+            b2caps=b2caps,
+            **pycompat.strkwargs(kwargs)
+        )
+
+    info[b'prefercompressed'] = bundler.prefercompressed
 
     return info, bundler.getchunks()
 
-@getbundle2partsgenerator('stream2')
+
+@getbundle2partsgenerator(b'stream2')
 def _getbundlestream2(bundler, repo, *args, **kwargs):
     return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
 
-@getbundle2partsgenerator('changegroup')
-def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
-                              b2caps=None, heads=None, common=None, **kwargs):
+
+@getbundle2partsgenerator(b'changegroup')
+def _getbundlechangegrouppart(
+    bundler,
+    repo,
+    source,
+    bundlecaps=None,
+    b2caps=None,
+    heads=None,
+    common=None,
+    **kwargs
+):
     """add a changegroup part to the requested bundle"""
     if not kwargs.get(r'cg', True):
         return
 
-    version = '01'
-    cgversions = b2caps.get('changegroup')
+    version = b'01'
+    cgversions = b2caps.get(b'changegroup')
     if cgversions:  # 3.1 and 3.2 ship with an empty value
-        cgversions = [v for v in cgversions
-                      if v in changegroup.supportedoutgoingversions(repo)]
+        cgversions = [
+            v
+            for v in cgversions
+            if v in changegroup.supportedoutgoingversions(repo)
+        ]
         if not cgversions:
-            raise error.Abort(_('no common changegroup version'))
+            raise error.Abort(_(b'no common changegroup version'))
         version = max(cgversions)
 
     outgoing = _computeoutgoing(repo, heads, common)
@@ -2211,69 +2506,85 @@
     else:
         matcher = None
 
-    cgstream = changegroup.makestream(repo, outgoing, version, source,
-                                      bundlecaps=bundlecaps, matcher=matcher)
-
-    part = bundler.newpart('changegroup', data=cgstream)
+    cgstream = changegroup.makestream(
+        repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
+    )
+
+    part = bundler.newpart(b'changegroup', data=cgstream)
     if cgversions:
-        part.addparam('version', version)
-
-    part.addparam('nbchanges', '%d' % len(outgoing.missing),
-                  mandatory=False)
-
-    if 'treemanifest' in repo.requirements:
-        part.addparam('treemanifest', '1')
-
-    if (kwargs.get(r'narrow', False) and kwargs.get(r'narrow_acl', False)
-        and (include or exclude)):
+        part.addparam(b'version', version)
+
+    part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
+
+    if b'treemanifest' in repo.requirements:
+        part.addparam(b'treemanifest', b'1')
+
+    if b'exp-sidedata-flag' in repo.requirements:
+        part.addparam(b'exp-sidedata', b'1')
+
+    if (
+        kwargs.get(r'narrow', False)
+        and kwargs.get(r'narrow_acl', False)
+        and (include or exclude)
+    ):
         # this is mandatory because otherwise ACL clients won't work
-        narrowspecpart = bundler.newpart('Narrow:responsespec')
-        narrowspecpart.data = '%s\0%s' % ('\n'.join(include),
-                                           '\n'.join(exclude))
-
-@getbundle2partsgenerator('bookmarks')
-def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
-                              b2caps=None, **kwargs):
+        narrowspecpart = bundler.newpart(b'Narrow:responsespec')
+        narrowspecpart.data = b'%s\0%s' % (
+            b'\n'.join(include),
+            b'\n'.join(exclude),
+        )
+
+
+@getbundle2partsgenerator(b'bookmarks')
+def _getbundlebookmarkpart(
+    bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
+):
     """add a bookmark part to the requested bundle"""
     if not kwargs.get(r'bookmarks', False):
         return
-    if 'bookmarks' not in b2caps:
-        raise error.Abort(_('no common bookmarks exchange method'))
-    books  = bookmod.listbinbookmarks(repo)
+    if b'bookmarks' not in b2caps:
+        raise error.Abort(_(b'no common bookmarks exchange method'))
+    books = bookmod.listbinbookmarks(repo)
     data = bookmod.binaryencode(books)
     if data:
-        bundler.newpart('bookmarks', data=data)
-
-@getbundle2partsgenerator('listkeys')
-def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
-                            b2caps=None, **kwargs):
+        bundler.newpart(b'bookmarks', data=data)
+
+
+@getbundle2partsgenerator(b'listkeys')
+def _getbundlelistkeysparts(
+    bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
+):
     """add parts containing listkeys namespaces to the requested bundle"""
     listkeys = kwargs.get(r'listkeys', ())
     for namespace in listkeys:
-        part = bundler.newpart('listkeys')
-        part.addparam('namespace', namespace)
+        part = bundler.newpart(b'listkeys')
+        part.addparam(b'namespace', namespace)
         keys = repo.listkeys(namespace).items()
         part.data = pushkey.encodekeys(keys)
 
-@getbundle2partsgenerator('obsmarkers')
-def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
-                            b2caps=None, heads=None, **kwargs):
+
+@getbundle2partsgenerator(b'obsmarkers')
+def _getbundleobsmarkerpart(
+    bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
+):
     """add an obsolescence markers part to the requested bundle"""
     if kwargs.get(r'obsmarkers', False):
         if heads is None:
             heads = repo.heads()
-        subset = [c.node() for c in repo.set('::%ln', heads)]
+        subset = [c.node() for c in repo.set(b'::%ln', heads)]
         markers = repo.obsstore.relevantmarkers(subset)
-        markers = sorted(markers)
+        markers = _sortedmarkers(markers)
         bundle2.buildobsmarkerspart(bundler, markers)
 
-@getbundle2partsgenerator('phases')
-def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
-                            b2caps=None, heads=None, **kwargs):
+
+@getbundle2partsgenerator(b'phases')
+def _getbundlephasespart(
+    bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
+):
     """add phase heads part to the requested bundle"""
     if kwargs.get(r'phases', False):
-        if not 'heads' in b2caps.get('phases'):
-            raise error.Abort(_('no common phases exchange method'))
+        if not b'heads' in b2caps.get(b'phases'):
+            raise error.Abort(_(b'no common phases exchange method'))
         if heads is None:
             heads = repo.heads()
 
@@ -2300,7 +2611,7 @@
             if draftheads:
                 publicheads = headsbyphase.get(phases.public, set())
 
-                revset = 'heads(only(%ln, %ln) and public())'
+                revset = b'heads(only(%ln, %ln) and public())'
                 extraheads = repo.revs(revset, draftheads, publicheads)
                 for r in extraheads:
                     headsbyphase[phases.public].add(node(r))
@@ -2312,12 +2623,20 @@
 
         # generate the actual part
         phasedata = phases.binaryencode(phasemapping)
-        bundler.newpart('phase-heads', data=phasedata)
-
-@getbundle2partsgenerator('hgtagsfnodes')
-def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
-                         b2caps=None, heads=None, common=None,
-                         **kwargs):
+        bundler.newpart(b'phase-heads', data=phasedata)
+
+
+@getbundle2partsgenerator(b'hgtagsfnodes')
+def _getbundletagsfnodes(
+    bundler,
+    repo,
+    source,
+    bundlecaps=None,
+    b2caps=None,
+    heads=None,
+    common=None,
+    **kwargs
+):
     """Transfer the .hgtags filenodes mapping.
 
     Only values for heads in this bundle will be transferred.
@@ -2328,16 +2647,24 @@
     # Don't send unless:
     # - changeset are being exchanged,
     # - the client supports it.
-    if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
+    if not (kwargs.get(r'cg', True) and b'hgtagsfnodes' in b2caps):
         return
 
     outgoing = _computeoutgoing(repo, heads, common)
     bundle2.addparttagsfnodescache(repo, bundler, outgoing)
 
-@getbundle2partsgenerator('cache:rev-branch-cache')
-def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
-                             b2caps=None, heads=None, common=None,
-                             **kwargs):
+
+@getbundle2partsgenerator(b'cache:rev-branch-cache')
+def _getbundlerevbranchcache(
+    bundler,
+    repo,
+    source,
+    bundlecaps=None,
+    b2caps=None,
+    heads=None,
+    common=None,
+    **kwargs
+):
     """Transfer the rev-branch-cache mapping
 
     The payload is a series of data related to each branch
@@ -2352,28 +2679,36 @@
     # - changeset are being exchanged,
     # - the client supports it.
     # - narrow bundle isn't in play (not currently compatible).
-    if (not kwargs.get(r'cg', True)
-        or 'rev-branch-cache' not in b2caps
+    if (
+        not kwargs.get(r'cg', True)
+        or b'rev-branch-cache' not in b2caps
         or kwargs.get(r'narrow', False)
-        or repo.ui.has_section(_NARROWACL_SECTION)):
+        or repo.ui.has_section(_NARROWACL_SECTION)
+    ):
         return
 
     outgoing = _computeoutgoing(repo, heads, common)
     bundle2.addpartrevbranchcache(repo, bundler, outgoing)
 
+
 def check_heads(repo, their_heads, context):
     """check if the heads of a repo have been modified
 
     Used by peer for unbundling.
     """
     heads = repo.heads()
-    heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
-    if not (their_heads == ['force'] or their_heads == heads or
-            their_heads == ['hashed', heads_hash]):
+    heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest()
+    if not (
+        their_heads == [b'force']
+        or their_heads == heads
+        or their_heads == [b'hashed', heads_hash]
+    ):
         # someone else committed/pushed/unbundled while we
         # were transferring data
-        raise error.PushRaced('repository changed while %s - '
-                              'please try again' % context)
+        raise error.PushRaced(
+            b'repository changed while %s - please try again' % context
+        )
+
 
 def unbundle(repo, cg, heads, source, url):
     """Apply a bundle to a repo.
@@ -2389,60 +2724,73 @@
     lockandtr = [None, None, None]
     recordout = None
     # quick fix for output mismatch with bundle2 in 3.4
-    captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
-    if url.startswith('remote:http:') or url.startswith('remote:https:'):
+    captureoutput = repo.ui.configbool(
+        b'experimental', b'bundle2-output-capture'
+    )
+    if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
         captureoutput = True
     try:
         # note: outside bundle1, 'heads' is expected to be empty and this
         # 'check_heads' call wil be a no-op
-        check_heads(repo, heads, 'uploading changes')
+        check_heads(repo, heads, b'uploading changes')
         # push can proceed
         if not isinstance(cg, bundle2.unbundle20):
             # legacy case: bundle1 (changegroup 01)
-            txnname = "\n".join([source, util.hidepassword(url)])
+            txnname = b"\n".join([source, util.hidepassword(url)])
             with repo.lock(), repo.transaction(txnname) as tr:
                 op = bundle2.applybundle(repo, cg, tr, source, url)
                 r = bundle2.combinechangegroupresults(op)
         else:
             r = None
             try:
+
                 def gettransaction():
                     if not lockandtr[2]:
                         if not bookmod.bookmarksinstore(repo):
                             lockandtr[0] = repo.wlock()
                         lockandtr[1] = repo.lock()
                         lockandtr[2] = repo.transaction(source)
-                        lockandtr[2].hookargs['source'] = source
-                        lockandtr[2].hookargs['url'] = url
-                        lockandtr[2].hookargs['bundle2'] = '1'
+                        lockandtr[2].hookargs[b'source'] = source
+                        lockandtr[2].hookargs[b'url'] = url
+                        lockandtr[2].hookargs[b'bundle2'] = b'1'
                     return lockandtr[2]
 
                 # Do greedy locking by default until we're satisfied with lazy
                 # locking.
-                if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
+                if not repo.ui.configbool(
+                    b'experimental', b'bundle2lazylocking'
+                ):
                     gettransaction()
 
-                op = bundle2.bundleoperation(repo, gettransaction,
-                                             captureoutput=captureoutput,
-                                             source='push')
+                op = bundle2.bundleoperation(
+                    repo,
+                    gettransaction,
+                    captureoutput=captureoutput,
+                    source=b'push',
+                )
                 try:
                     op = bundle2.processbundle(repo, cg, op=op)
                 finally:
                     r = op.reply
                     if captureoutput and r is not None:
                         repo.ui.pushbuffer(error=True, subproc=True)
+
                         def recordout(output):
-                            r.newpart('output', data=output, mandatory=False)
+                            r.newpart(b'output', data=output, mandatory=False)
+
                 if lockandtr[2] is not None:
                     lockandtr[2].close()
             except BaseException as exc:
                 exc.duringunbundle2 = True
                 if captureoutput and r is not None:
                     parts = exc._bundle2salvagedoutput = r.salvageoutput()
+
                     def recordout(output):
-                        part = bundle2.bundlepart('output', data=output,
-                                                  mandatory=False)
+                        part = bundle2.bundlepart(
+                            b'output', data=output, mandatory=False
+                        )
                         parts.append(part)
+
                 raise
     finally:
         lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
@@ -2450,13 +2798,14 @@
             recordout(repo.ui.popbuffer())
     return r
 
+
 def _maybeapplyclonebundle(pullop):
     """Apply a clone bundle from a remote, if possible."""
 
     repo = pullop.repo
     remote = pullop.remote
 
-    if not repo.ui.configbool('ui', 'clonebundles'):
+    if not repo.ui.configbool(b'ui', b'clonebundles'):
         return
 
     # Only run if local repo is empty.
@@ -2466,11 +2815,11 @@
     if pullop.heads:
         return
 
-    if not remote.capable('clonebundles'):
+    if not remote.capable(b'clonebundles'):
         return
 
     with remote.commandexecutor() as e:
-        res = e.callcommand('clonebundles', {}).result()
+        res = e.callcommand(b'clonebundles', {}).result()
 
     # If we call the wire protocol command, that's good enough to record the
     # attempt.
@@ -2478,12 +2827,17 @@
 
     entries = parseclonebundlesmanifest(repo, res)
     if not entries:
-        repo.ui.note(_('no clone bundles available on remote; '
-                       'falling back to regular clone\n'))
+        repo.ui.note(
+            _(
+                b'no clone bundles available on remote; '
+                b'falling back to regular clone\n'
+            )
+        )
         return
 
     entries = filterclonebundleentries(
-        repo, entries, streamclonerequested=pullop.streamclonerequested)
+        repo, entries, streamclonerequested=pullop.streamclonerequested
+    )
 
     if not entries:
         # There is a thundering herd concern here. However, if a server
@@ -2491,31 +2845,41 @@
         # they deserve what's coming. Furthermore, from a client's
         # perspective, no automatic fallback would mean not being able to
         # clone!
-        repo.ui.warn(_('no compatible clone bundles available on server; '
-                       'falling back to regular clone\n'))
-        repo.ui.warn(_('(you may want to report this to the server '
-                       'operator)\n'))
+        repo.ui.warn(
+            _(
+                b'no compatible clone bundles available on server; '
+                b'falling back to regular clone\n'
+            )
+        )
+        repo.ui.warn(
+            _(b'(you may want to report this to the server operator)\n')
+        )
         return
 
     entries = sortclonebundleentries(repo.ui, entries)
 
-    url = entries[0]['URL']
-    repo.ui.status(_('applying clone bundle from %s\n') % url)
+    url = entries[0][b'URL']
+    repo.ui.status(_(b'applying clone bundle from %s\n') % url)
     if trypullbundlefromurl(repo.ui, repo, url):
-        repo.ui.status(_('finished applying clone bundle\n'))
+        repo.ui.status(_(b'finished applying clone bundle\n'))
     # Bundle failed.
     #
     # We abort by default to avoid the thundering herd of
     # clients flooding a server that was expecting expensive
     # clone load to be offloaded.
-    elif repo.ui.configbool('ui', 'clonebundlefallback'):
-        repo.ui.warn(_('falling back to normal clone\n'))
+    elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
+        repo.ui.warn(_(b'falling back to normal clone\n'))
     else:
-        raise error.Abort(_('error applying bundle'),
-                          hint=_('if this error persists, consider contacting '
-                                 'the server operator or disable clone '
-                                 'bundles via '
-                                 '"--config ui.clonebundles=false"'))
+        raise error.Abort(
+            _(b'error applying bundle'),
+            hint=_(
+                b'if this error persists, consider contacting '
+                b'the server operator or disable clone '
+                b'bundles via '
+                b'"--config ui.clonebundles=false"'
+            ),
+        )
+
 
 def parseclonebundlesmanifest(repo, s):
     """Parses the raw text of a clone bundles manifest.
@@ -2528,9 +2892,9 @@
         fields = line.split()
         if not fields:
             continue
-        attrs = {'URL': fields[0]}
+        attrs = {b'URL': fields[0]}
         for rawattr in fields[1:]:
-            key, value = rawattr.split('=', 1)
+            key, value = rawattr.split(b'=', 1)
             key = urlreq.unquote(key)
             value = urlreq.unquote(value)
             attrs[key] = value
@@ -2538,11 +2902,11 @@
             # Parse BUNDLESPEC into components. This makes client-side
             # preferences easier to specify since you can prefer a single
             # component of the BUNDLESPEC.
-            if key == 'BUNDLESPEC':
+            if key == b'BUNDLESPEC':
                 try:
                     bundlespec = parsebundlespec(repo, value)
-                    attrs['COMPRESSION'] = bundlespec.compression
-                    attrs['VERSION'] = bundlespec.version
+                    attrs[b'COMPRESSION'] = bundlespec.compression
+                    attrs[b'VERSION'] = bundlespec.version
                 except error.InvalidBundleSpecification:
                     pass
                 except error.UnsupportedBundleSpecification:
@@ -2552,19 +2916,23 @@
 
     return m
 
+
 def isstreamclonespec(bundlespec):
     # Stream clone v1
-    if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
+    if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
         return True
 
     # Stream clone v2
-    if (bundlespec.wirecompression == 'UN' and
-        bundlespec.wireversion == '02' and
-        bundlespec.contentopts.get('streamv2')):
+    if (
+        bundlespec.wirecompression == b'UN'
+        and bundlespec.wireversion == b'02'
+        and bundlespec.contentopts.get(b'streamv2')
+    ):
         return True
 
     return False
 
+
 def filterclonebundleentries(repo, entries, streamclonerequested=False):
     """Remove incompatible clone bundle manifest entries.
 
@@ -2577,7 +2945,7 @@
     """
     newentries = []
     for entry in entries:
-        spec = entry.get('BUNDLESPEC')
+        spec = entry.get(b'BUNDLESPEC')
         if spec:
             try:
                 bundlespec = parsebundlespec(repo, spec, strict=True)
@@ -2585,34 +2953,41 @@
                 # If a stream clone was requested, filter out non-streamclone
                 # entries.
                 if streamclonerequested and not isstreamclonespec(bundlespec):
-                    repo.ui.debug('filtering %s because not a stream clone\n' %
-                                  entry['URL'])
+                    repo.ui.debug(
+                        b'filtering %s because not a stream clone\n'
+                        % entry[b'URL']
+                    )
                     continue
 
             except error.InvalidBundleSpecification as e:
-                repo.ui.debug(stringutil.forcebytestr(e) + '\n')
+                repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
                 continue
             except error.UnsupportedBundleSpecification as e:
-                repo.ui.debug('filtering %s because unsupported bundle '
-                              'spec: %s\n' % (
-                                  entry['URL'], stringutil.forcebytestr(e)))
+                repo.ui.debug(
+                    b'filtering %s because unsupported bundle '
+                    b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
+                )
                 continue
         # If we don't have a spec and requested a stream clone, we don't know
         # what the entry is so don't attempt to apply it.
         elif streamclonerequested:
-            repo.ui.debug('filtering %s because cannot determine if a stream '
-                          'clone bundle\n' % entry['URL'])
+            repo.ui.debug(
+                b'filtering %s because cannot determine if a stream '
+                b'clone bundle\n' % entry[b'URL']
+            )
             continue
 
-        if 'REQUIRESNI' in entry and not sslutil.hassni:
-            repo.ui.debug('filtering %s because SNI not supported\n' %
-                          entry['URL'])
+        if b'REQUIRESNI' in entry and not sslutil.hassni:
+            repo.ui.debug(
+                b'filtering %s because SNI not supported\n' % entry[b'URL']
+            )
             continue
 
         newentries.append(entry)
 
     return newentries
 
+
 class clonebundleentry(object):
     """Represents an item in a clone bundles manifest.
 
@@ -2677,33 +3052,39 @@
     def __ne__(self, other):
         return self._cmp(other) != 0
 
+
 def sortclonebundleentries(ui, entries):
-    prefers = ui.configlist('ui', 'clonebundleprefers')
+    prefers = ui.configlist(b'ui', b'clonebundleprefers')
     if not prefers:
         return list(entries)
 
-    prefers = [p.split('=', 1) for p in prefers]
+    prefers = [p.split(b'=', 1) for p in prefers]
 
     items = sorted(clonebundleentry(v, prefers) for v in entries)
     return [i.value for i in items]
 
+
 def trypullbundlefromurl(ui, repo, url):
     """Attempt to apply a bundle from a URL."""
-    with repo.lock(), repo.transaction('bundleurl') as tr:
+    with repo.lock(), repo.transaction(b'bundleurl') as tr:
         try:
             fh = urlmod.open(ui, url)
-            cg = readbundle(ui, fh, 'stream')
+            cg = readbundle(ui, fh, b'stream')
 
             if isinstance(cg, streamclone.streamcloneapplier):
                 cg.apply(repo)
             else:
-                bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
+                bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
             return True
         except urlerr.httperror as e:
-            ui.warn(_('HTTP error fetching bundle: %s\n') %
-                    stringutil.forcebytestr(e))
+            ui.warn(
+                _(b'HTTP error fetching bundle: %s\n')
+                % stringutil.forcebytestr(e)
+            )
         except urlerr.urlerror as e:
-            ui.warn(_('error fetching bundle: %s\n') %
-                    stringutil.forcebytestr(e.reason))
+            ui.warn(
+                _(b'error fetching bundle: %s\n')
+                % stringutil.forcebytestr(e.reason)
+            )
 
         return False
--- a/mercurial/exchangev2.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/exchangev2.py	Mon Oct 21 11:09:48 2019 -0400
@@ -22,9 +22,10 @@
     narrowspec,
     phases,
     pycompat,
-    repository,
     setdiscovery,
 )
+from .interfaces import repository
+
 
 def pull(pullop):
     """Pull using wire protocol version 2."""
@@ -38,7 +39,7 @@
     # incremental pull. This is somewhat hacky and is not nearly robust enough
     # for long-term usage.
     if usingrawchangelogandmanifest:
-        with repo.transaction('clone'):
+        with repo.transaction(b'clone'):
             _fetchrawstorefiles(repo, remote)
             repo.invalidate(clearfilecache=True)
 
@@ -46,11 +47,13 @@
 
     # We don't use the repo's narrow matcher here because the patterns passed
     # to exchange.pull() could be different.
-    narrowmatcher = narrowspec.match(repo.root,
-                                     # Empty maps to nevermatcher. So always
-                                     # set includes if missing.
-                                     pullop.includepats or {'path:.'},
-                                     pullop.excludepats)
+    narrowmatcher = narrowspec.match(
+        repo.root,
+        # Empty maps to nevermatcher. So always
+        # set includes if missing.
+        pullop.includepats or {b'path:.'},
+        pullop.excludepats,
+    )
 
     if pullop.includepats or pullop.excludepats:
         pathfilter = {}
@@ -63,7 +66,8 @@
 
     # Figure out what needs to be fetched.
     common, fetch, remoteheads = _pullchangesetdiscovery(
-        repo, remote, pullop.heads, abortwhenunrelated=pullop.force)
+        repo, remote, pullop.heads, abortwhenunrelated=pullop.force
+    )
 
     # And fetch the data.
     pullheads = pullop.heads or remoteheads
@@ -74,23 +78,32 @@
 
     # Ensure all new changesets are draft by default. If the repo is
     # publishing, the phase will be adjusted by the loop below.
-    if csetres['added']:
-        phases.registernew(repo, tr, phases.draft, csetres['added'])
+    if csetres[b'added']:
+        phases.registernew(repo, tr, phases.draft, csetres[b'added'])
 
     # And adjust the phase of all changesets accordingly.
     for phase in phases.phasenames:
-        if phase == b'secret' or not csetres['nodesbyphase'][phase]:
+        if phase == b'secret' or not csetres[b'nodesbyphase'][phase]:
             continue
 
-        phases.advanceboundary(repo, tr, phases.phasenames.index(phase),
-                               csetres['nodesbyphase'][phase])
+        phases.advanceboundary(
+            repo,
+            tr,
+            phases.phasenames.index(phase),
+            csetres[b'nodesbyphase'][phase],
+        )
 
     # Write bookmark updates.
-    bookmarks.updatefromremote(repo.ui, repo, csetres['bookmarks'],
-                               remote.url(), pullop.gettransaction,
-                               explicit=pullop.explicitbookmarks)
+    bookmarks.updatefromremote(
+        repo.ui,
+        repo,
+        csetres[b'bookmarks'],
+        remote.url(),
+        pullop.gettransaction,
+        explicit=pullop.explicitbookmarks,
+    )
 
-    manres = _fetchmanifests(repo, tr, remote, csetres['manifestnodes'])
+    manres = _fetchmanifests(repo, tr, remote, csetres[b'manifestnodes'])
 
     # We don't properly support shallow changeset and manifest yet. So we apply
     # depth limiting locally.
@@ -98,8 +111,9 @@
         relevantcsetnodes = set()
         clnode = repo.changelog.node
 
-        for rev in repo.revs(b'ancestors(%ln, %s)',
-                             pullheads, pullop.depth - 1):
+        for rev in repo.revs(
+            b'ancestors(%ln, %s)', pullheads, pullop.depth - 1
+        ):
             relevantcsetnodes.add(clnode(rev))
 
         csetrelevantfilter = lambda n: n in relevantcsetnodes
@@ -128,15 +142,24 @@
             manifestlinkrevs[mnode] = rev
 
     else:
-        csetsforfiles = [n for n in csetres['added'] if csetrelevantfilter(n)]
-        mnodesforfiles = manres['added']
-        manifestlinkrevs = manres['linkrevs']
+        csetsforfiles = [n for n in csetres[b'added'] if csetrelevantfilter(n)]
+        mnodesforfiles = manres[b'added']
+        manifestlinkrevs = manres[b'linkrevs']
 
     # Find all file nodes referenced by added manifests and fetch those
     # revisions.
     fnodes = _derivefilesfrommanifests(repo, narrowmatcher, mnodesforfiles)
-    _fetchfilesfromcsets(repo, tr, remote, pathfilter, fnodes, csetsforfiles,
-                         manifestlinkrevs, shallow=bool(pullop.depth))
+    _fetchfilesfromcsets(
+        repo,
+        tr,
+        remote,
+        pathfilter,
+        fnodes,
+        csetsforfiles,
+        manifestlinkrevs,
+        shallow=bool(pullop.depth),
+    )
+
 
 def _checkuserawstorefiledata(pullop):
     """Check whether we should use rawstorefiledata command to retrieve data."""
@@ -163,17 +186,19 @@
 
     return True
 
+
 def _fetchrawstorefiles(repo, remote):
     with remote.commandexecutor() as e:
-        objs = e.callcommand(b'rawstorefiledata', {
-            b'files': [b'changelog', b'manifestlog'],
-        }).result()
+        objs = e.callcommand(
+            b'rawstorefiledata', {b'files': [b'changelog', b'manifestlog'],}
+        ).result()
 
         # First object is a summary of files data that follows.
         overall = next(objs)
 
-        progress = repo.ui.makeprogress(_('clone'), total=overall[b'totalsize'],
-                                        unit=_('bytes'))
+        progress = repo.ui.makeprogress(
+            _(b'clone'), total=overall[b'totalsize'], unit=_(b'bytes')
+        )
         with progress:
             progress.update(0)
 
@@ -186,14 +211,17 @@
 
                 for k in (b'location', b'path', b'size'):
                     if k not in filemeta:
-                        raise error.Abort(_(b'remote file data missing key: %s')
-                                          % k)
+                        raise error.Abort(
+                            _(b'remote file data missing key: %s') % k
+                        )
 
                 if filemeta[b'location'] == b'store':
                     vfs = repo.svfs
                 else:
-                    raise error.Abort(_(b'invalid location for raw file data: '
-                                        b'%s') % filemeta[b'location'])
+                    raise error.Abort(
+                        _(b'invalid location for raw file data: %s')
+                        % filemeta[b'location']
+                    )
 
                 bytesremaining = filemeta[b'size']
 
@@ -207,10 +235,13 @@
                         bytesremaining -= len(chunk)
 
                         if bytesremaining < 0:
-                            raise error.Abort(_(
-                                b'received invalid number of bytes for file '
-                                b'data; expected %d, got extra') %
-                                              filemeta[b'size'])
+                            raise error.Abort(
+                                _(
+                                    b'received invalid number of bytes for file '
+                                    b'data; expected %d, got extra'
+                                )
+                                % filemeta[b'size']
+                            )
 
                         progress.increment(step=len(chunk))
                         fh.write(chunk)
@@ -219,15 +250,25 @@
                             if chunk.islast:
                                 break
                         except AttributeError:
-                            raise error.Abort(_(
-                                b'did not receive indefinite length bytestring '
-                                b'for file data'))
+                            raise error.Abort(
+                                _(
+                                    b'did not receive indefinite length bytestring '
+                                    b'for file data'
+                                )
+                            )
 
                 if bytesremaining:
-                    raise error.Abort(_(b'received invalid number of bytes for'
-                                        b'file data; expected %d got %d') %
-                                      (filemeta[b'size'],
-                                       filemeta[b'size'] - bytesremaining))
+                    raise error.Abort(
+                        _(
+                            b'received invalid number of bytes for'
+                            b'file data; expected %d got %d'
+                        )
+                        % (
+                            filemeta[b'size'],
+                            filemeta[b'size'] - bytesremaining,
+                        )
+                    )
+
 
 def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):
     """Determine which changesets need to be pulled."""
@@ -240,7 +281,8 @@
     # TODO wire protocol version 2 is capable of more efficient discovery
     # than setdiscovery. Consider implementing something better.
     common, fetch, remoteheads = setdiscovery.findcommonheads(
-        repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated)
+        repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated
+    )
 
     common = set(common)
     remoteheads = set(remoteheads)
@@ -260,6 +302,7 @@
 
     return common, fetch, remoteheads
 
+
 def _fetchchangesets(repo, tr, remote, common, fetch, remoteheads):
     # TODO consider adding a step here where we obtain the DAG shape first
     # (or ask the server to slice changesets into chunks for us) so that
@@ -267,22 +310,27 @@
     # resuming interrupted clones, higher server-side cache hit rates due
     # to smaller segments, etc.
     with remote.commandexecutor() as e:
-        objs = e.callcommand(b'changesetdata', {
-            b'revisions': [{
-                b'type': b'changesetdagrange',
-                b'roots': sorted(common),
-                b'heads': sorted(remoteheads),
-            }],
-            b'fields': {b'bookmarks', b'parents', b'phase', b'revision'},
-        }).result()
+        objs = e.callcommand(
+            b'changesetdata',
+            {
+                b'revisions': [
+                    {
+                        b'type': b'changesetdagrange',
+                        b'roots': sorted(common),
+                        b'heads': sorted(remoteheads),
+                    }
+                ],
+                b'fields': {b'bookmarks', b'parents', b'phase', b'revision'},
+            },
+        ).result()
 
         # The context manager waits on all response data when exiting. So
         # we need to remain in the context manager in order to stream data.
         return _processchangesetdata(repo, tr, objs)
 
+
 def _processchangesetdata(repo, tr, objs):
-    repo.hook('prechangegroup', throw=True,
-              **pycompat.strkwargs(tr.hookargs))
+    repo.hook(b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs))
 
     urepo = repo.unfiltered()
     cl = urepo.changelog
@@ -293,14 +341,14 @@
     # follows.
     meta = next(objs)
 
-    progress = repo.ui.makeprogress(_('changesets'),
-                                    unit=_('chunks'),
-                                    total=meta.get(b'totalitems'))
+    progress = repo.ui.makeprogress(
+        _(b'changesets'), unit=_(b'chunks'), total=meta.get(b'totalitems')
+    )
 
     manifestnodes = {}
 
     def linkrev(node):
-        repo.ui.debug('add changeset %s\n' % short(node))
+        repo.ui.debug(b'add changeset %s\n' % short(node))
         # Linkrev for changelog is always self.
         return len(cl)
 
@@ -358,18 +406,20 @@
                 0,
             )
 
-    added = cl.addgroup(iterrevisions(), linkrev, weakref.proxy(tr),
-                        addrevisioncb=onchangeset)
+    added = cl.addgroup(
+        iterrevisions(), linkrev, weakref.proxy(tr), addrevisioncb=onchangeset
+    )
 
     progress.complete()
 
     return {
-        'added': added,
-        'nodesbyphase': nodesbyphase,
-        'bookmarks': remotebookmarks,
-        'manifestnodes': manifestnodes,
+        b'added': added,
+        b'nodesbyphase': nodesbyphase,
+        b'bookmarks': remotebookmarks,
+        b'manifestnodes': manifestnodes,
     }
 
+
 def _fetchmanifests(repo, tr, remote, manifestnodes):
     rootmanifest = repo.manifestlog.getstorage(b'')
 
@@ -379,7 +429,7 @@
     linkrevs = {}
     seen = set()
 
-    for clrev, node in sorted(manifestnodes.iteritems()):
+    for clrev, node in sorted(pycompat.iteritems(manifestnodes)):
         if node in seen:
             continue
 
@@ -427,13 +477,14 @@
                 basenode,
                 delta,
                 # Flags not yet supported.
-                0
+                0,
             )
 
             progress.increment()
 
-    progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
-                                    total=len(fetchnodes))
+    progress = repo.ui.makeprogress(
+        _(b'manifests'), unit=_(b'chunks'), total=len(fetchnodes)
+    )
 
     commandmeta = remote.apidescriptor[b'commands'][b'manifestdata']
     batchsize = commandmeta.get(b'recommendedbatchsize', 10000)
@@ -450,33 +501,40 @@
     added = []
 
     for i in pycompat.xrange(0, len(fetchnodes), batchsize):
-        batch = [node for node in fetchnodes[i:i + batchsize]]
+        batch = [node for node in fetchnodes[i : i + batchsize]]
         if not batch:
             continue
 
         with remote.commandexecutor() as e:
-            objs = e.callcommand(b'manifestdata', {
-                b'tree': b'',
-                b'nodes': batch,
-                b'fields': {b'parents', b'revision'},
-                b'haveparents': True,
-            }).result()
+            objs = e.callcommand(
+                b'manifestdata',
+                {
+                    b'tree': b'',
+                    b'nodes': batch,
+                    b'fields': {b'parents', b'revision'},
+                    b'haveparents': True,
+                },
+            ).result()
 
             # Chomp off header object.
             next(objs)
 
-            added.extend(rootmanifest.addgroup(
-                iterrevisions(objs, progress),
-                linkrevs.__getitem__,
-                weakref.proxy(tr)))
+            added.extend(
+                rootmanifest.addgroup(
+                    iterrevisions(objs, progress),
+                    linkrevs.__getitem__,
+                    weakref.proxy(tr),
+                )
+            )
 
     progress.complete()
 
     return {
-        'added': added,
-        'linkrevs': linkrevs,
+        b'added': added,
+        b'linkrevs': linkrevs,
     }
 
+
 def _derivefilesfrommanifests(repo, matcher, manifestnodes):
     """Determine what file nodes are relevant given a set of manifest nodes.
 
@@ -487,7 +545,8 @@
     fnodes = collections.defaultdict(dict)
 
     progress = repo.ui.makeprogress(
-        _('scanning manifests'), total=len(manifestnodes))
+        _(b'scanning manifests'), total=len(manifestnodes)
+    )
 
     with progress:
         for manifestnode in manifestnodes:
@@ -509,8 +568,10 @@
 
     return fnodes
 
+
 def _fetchfiles(repo, tr, remote, fnodes, linkrevs):
     """Fetch file data from explicit file revisions."""
+
     def iterrevisions(objs, progress):
         for filerevision in objs:
             node = filerevision[b'node']
@@ -544,15 +605,17 @@
             progress.increment()
 
     progress = repo.ui.makeprogress(
-        _('files'), unit=_('chunks'),
-         total=sum(len(v) for v in fnodes.itervalues()))
+        _(b'files'),
+        unit=_(b'chunks'),
+        total=sum(len(v) for v in pycompat.itervalues(fnodes)),
+    )
 
     # TODO make batch size configurable
     batchsize = 10000
     fnodeslist = [x for x in sorted(fnodes.items())]
 
     for i in pycompat.xrange(0, len(fnodeslist), batchsize):
-        batch = [x for x in fnodeslist[i:i + batchsize]]
+        batch = [x for x in fnodeslist[i : i + batchsize]]
         if not batch:
             continue
 
@@ -561,16 +624,25 @@
             locallinkrevs = {}
 
             for path, nodes in batch:
-                fs.append((path, e.callcommand(b'filedata', {
-                    b'path': path,
-                    b'nodes': sorted(nodes),
-                    b'fields': {b'parents', b'revision'},
-                    b'haveparents': True,
-                })))
+                fs.append(
+                    (
+                        path,
+                        e.callcommand(
+                            b'filedata',
+                            {
+                                b'path': path,
+                                b'nodes': sorted(nodes),
+                                b'fields': {b'parents', b'revision'},
+                                b'haveparents': True,
+                            },
+                        ),
+                    )
+                )
 
                 locallinkrevs[path] = {
                     node: linkrevs[manifestnode]
-                    for node, manifestnode in nodes.iteritems()}
+                    for node, manifestnode in pycompat.iteritems(nodes)
+                }
 
             for path, f in fs:
                 objs = f.result()
@@ -582,10 +654,13 @@
                 store.addgroup(
                     iterrevisions(objs, progress),
                     locallinkrevs[path].__getitem__,
-                    weakref.proxy(tr))
+                    weakref.proxy(tr),
+                )
+
 
-def _fetchfilesfromcsets(repo, tr, remote, pathfilter, fnodes, csets,
-                         manlinkrevs, shallow=False):
+def _fetchfilesfromcsets(
+    repo, tr, remote, pathfilter, fnodes, csets, manlinkrevs, shallow=False
+):
     """Fetch file data from explicit changeset revisions."""
 
     def iterrevisions(objs, remaining, progress):
@@ -629,8 +704,10 @@
             remaining -= 1
 
     progress = repo.ui.makeprogress(
-        _('files'), unit=_('chunks'),
-        total=sum(len(v) for v in fnodes.itervalues()))
+        _(b'files'),
+        unit=_(b'chunks'),
+        total=sum(len(v) for v in pycompat.itervalues(fnodes)),
+    )
 
     commandmeta = remote.apidescriptor[b'commands'][b'filesdata']
     batchsize = commandmeta.get(b'recommendedbatchsize', 50000)
@@ -651,16 +728,15 @@
         fields.add(b'linknode')
 
     for i in pycompat.xrange(0, len(csets), batchsize):
-        batch = [x for x in csets[i:i + batchsize]]
+        batch = [x for x in csets[i : i + batchsize]]
         if not batch:
             continue
 
         with remote.commandexecutor() as e:
             args = {
-                b'revisions': [{
-                    b'type': b'changesetexplicit',
-                    b'nodes': batch,
-                }],
+                b'revisions': [
+                    {b'type': b'changesetexplicit', b'nodes': batch,}
+                ],
                 b'fields': fields,
                 b'haveparents': haveparents,
             }
@@ -682,7 +758,8 @@
 
                 linkrevs = {
                     fnode: manlinkrevs[mnode]
-                    for fnode, mnode in fnodes[path].iteritems()}
+                    for fnode, mnode in pycompat.iteritems(fnodes[path])
+                }
 
                 def getlinkrev(node):
                     if node in linkrevs:
@@ -690,8 +767,9 @@
                     else:
                         return clrev(node)
 
-                store.addgroup(iterrevisions(objs, header[b'totalitems'],
-                                             progress),
-                               getlinkrev,
-                               weakref.proxy(tr),
-                               maybemissingparents=shallow)
+                store.addgroup(
+                    iterrevisions(objs, header[b'totalitems'], progress),
+                    getlinkrev,
+                    weakref.proxy(tr),
+                    maybemissingparents=shallow,
+                )
--- a/mercurial/extensions.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/extensions.py	Mon Oct 21 11:09:48 2019 -0400
@@ -18,6 +18,11 @@
     _,
     gettext,
 )
+from .pycompat import (
+    getattr,
+    open,
+    setattr,
+)
 
 from . import (
     cmdutil,
@@ -27,33 +32,34 @@
     util,
 )
 
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
 
 _extensions = {}
 _disabledextensions = {}
 _aftercallbacks = {}
 _order = []
 _builtin = {
-    'hbisect',
-    'bookmarks',
-    'color',
-    'parentrevspec',
-    'progress',
-    'interhg',
-    'inotify',
-    'hgcia',
-    'shelve',
+    b'hbisect',
+    b'bookmarks',
+    b'color',
+    b'parentrevspec',
+    b'progress',
+    b'interhg',
+    b'inotify',
+    b'hgcia',
+    b'shelve',
 }
 
+
 def extensions(ui=None):
     if ui:
+
         def enabled(name):
-            for format in ['%s', 'hgext.%s']:
-                conf = ui.config('extensions', format % name)
-                if conf is not None and not conf.startswith('!'):
+            for format in [b'%s', b'hgext.%s']:
+                conf = ui.config(b'extensions', format % name)
+                if conf is not None and not conf.startswith(b'!'):
                     return True
+
     else:
         enabled = lambda name: True
     for name in _order:
@@ -61,22 +67,24 @@
         if module and enabled(name):
             yield name, module
 
+
 def find(name):
     '''return module with given extension name'''
     mod = None
     try:
         mod = _extensions[name]
     except KeyError:
-        for k, v in _extensions.iteritems():
-            if k.endswith('.' + name) or k.endswith('/' + name):
+        for k, v in pycompat.iteritems(_extensions):
+            if k.endswith(b'.' + name) or k.endswith(b'/' + name):
                 mod = v
                 break
     if not mod:
         raise KeyError(name)
     return mod
 
+
 def loadpath(path, module_name):
-    module_name = module_name.replace('.', '_')
+    module_name = module_name.replace(b'.', b'_')
     path = util.normpath(util.expandpath(path))
     module_name = pycompat.fsdecode(module_name)
     path = pycompat.fsdecode(path)
@@ -90,45 +98,54 @@
             return imp.load_source(module_name, path)
         except IOError as exc:
             if not exc.filename:
-                exc.filename = path # python does not fill this
+                exc.filename = path  # python does not fill this
             raise
 
+
 def _importh(name):
     """import and return the <name> module"""
     mod = __import__(pycompat.sysstr(name))
-    components = name.split('.')
+    components = name.split(b'.')
     for comp in components[1:]:
         mod = getattr(mod, comp)
     return mod
 
+
 def _importext(name, path=None, reportfunc=None):
     if path:
         # the module will be loaded in sys.modules
         # choose an unique name so that it doesn't
         # conflicts with other modules
-        mod = loadpath(path, 'hgext.%s' % name)
+        mod = loadpath(path, b'hgext.%s' % name)
     else:
         try:
-            mod = _importh("hgext.%s" % name)
+            mod = _importh(b"hgext.%s" % name)
         except ImportError as err:
             if reportfunc:
-                reportfunc(err, "hgext.%s" % name, "hgext3rd.%s" % name)
+                reportfunc(err, b"hgext.%s" % name, b"hgext3rd.%s" % name)
             try:
-                mod = _importh("hgext3rd.%s" % name)
+                mod = _importh(b"hgext3rd.%s" % name)
             except ImportError as err:
                 if reportfunc:
-                    reportfunc(err, "hgext3rd.%s" % name, name)
+                    reportfunc(err, b"hgext3rd.%s" % name, name)
                 mod = _importh(name)
     return mod
 
+
 def _reportimporterror(ui, err, failed, next):
     # note: this ui.log happens before --debug is processed,
     #       Use --config ui.debug=1 to see them.
-    ui.log(b'extension', b'    - could not import %s (%s): trying %s\n',
-           failed, stringutil.forcebytestr(err), next)
-    if ui.debugflag and ui.configbool('devel', 'debug.extensions'):
+    ui.log(
+        b'extension',
+        b'    - could not import %s (%s): trying %s\n',
+        failed,
+        stringutil.forcebytestr(err),
+        next,
+    )
+    if ui.debugflag and ui.configbool(b'devel', b'debug.extensions'):
         ui.traceback()
 
+
 def _rejectunicode(name, xs):
     if isinstance(xs, (list, set, tuple)):
         for x in xs:
@@ -138,36 +155,49 @@
             _rejectunicode(name, k)
             _rejectunicode(b'%s.%s' % (name, stringutil.forcebytestr(k)), v)
     elif isinstance(xs, type(u'')):
-        raise error.ProgrammingError(b"unicode %r found in %s" % (xs, name),
-                                     hint="use b'' to make it byte string")
+        raise error.ProgrammingError(
+            b"unicode %r found in %s" % (xs, name),
+            hint=b"use b'' to make it byte string",
+        )
+
 
 # attributes set by registrar.command
-_cmdfuncattrs = ('norepo', 'optionalrepo', 'inferrepo')
+_cmdfuncattrs = (b'norepo', b'optionalrepo', b'inferrepo')
+
 
 def _validatecmdtable(ui, cmdtable):
     """Check if extension commands have required attributes"""
-    for c, e in cmdtable.iteritems():
+    for c, e in pycompat.iteritems(cmdtable):
         f = e[0]
         missing = [a for a in _cmdfuncattrs if not util.safehasattr(f, a)]
         if not missing:
             continue
         raise error.ProgrammingError(
-            'missing attributes: %s' % ', '.join(missing),
-            hint="use @command decorator to register '%s'" % c)
+            b'missing attributes: %s' % b', '.join(missing),
+            hint=b"use @command decorator to register '%s'" % c,
+        )
+
 
 def _validatetables(ui, mod):
     """Sanity check for loadable tables provided by extension module"""
-    for t in ['cmdtable', 'colortable', 'configtable']:
+    for t in [b'cmdtable', b'colortable', b'configtable']:
         _rejectunicode(t, getattr(mod, t, {}))
-    for t in ['filesetpredicate', 'internalmerge', 'revsetpredicate',
-              'templatefilter', 'templatefunc', 'templatekeyword']:
+    for t in [
+        b'filesetpredicate',
+        b'internalmerge',
+        b'revsetpredicate',
+        b'templatefilter',
+        b'templatefunc',
+        b'templatekeyword',
+    ]:
         o = getattr(mod, t, None)
         if o:
             _rejectunicode(t, o._table)
     _validatecmdtable(ui, getattr(mod, 'cmdtable', {}))
 
+
 def load(ui, name, path, loadingtime=None):
-    if name.startswith('hgext.') or name.startswith('hgext/'):
+    if name.startswith(b'hgext.') or name.startswith(b'hgext/'):
         shortname = name[6:]
     else:
         shortname = name
@@ -189,8 +219,10 @@
     # of Mercurial.
     minver = getattr(mod, 'minimumhgversion', None)
     if minver and util.versiontuple(minver, 2) > util.versiontuple(n=2):
-        msg = _('(third party extension %s requires version %s or newer '
-                'of Mercurial (current: %s); disabling)\n')
+        msg = _(
+            b'(third party extension %s requires version %s or newer '
+            b'of Mercurial (current: %s); disabling)\n'
+        )
         ui.warn(msg % (shortname, minver, util.version()))
         return
     ui.log(b'extension', b'    - validating extension tables: %s\n', shortname)
@@ -198,14 +230,16 @@
 
     _extensions[shortname] = mod
     _order.append(shortname)
-    ui.log(b'extension', b'    - invoking registered callbacks: %s\n',
-           shortname)
+    ui.log(
+        b'extension', b'    - invoking registered callbacks: %s\n', shortname
+    )
     with util.timedcm('callbacks extension %s', shortname) as stats:
         for fn in _aftercallbacks.get(shortname, []):
             fn(loaded=True)
     ui.log(b'extension', b'    > callbacks completed in %s\n', stats)
     return mod
 
+
 def _runuisetup(name, ui):
     uisetup = getattr(_extensions[name], 'uisetup', None)
     if uisetup:
@@ -214,10 +248,11 @@
         except Exception as inst:
             ui.traceback(force=True)
             msg = stringutil.forcebytestr(inst)
-            ui.warn(_("*** failed to set up extension %s: %s\n") % (name, msg))
+            ui.warn(_(b"*** failed to set up extension %s: %s\n") % (name, msg))
             return False
     return True
 
+
 def _runextsetup(name, ui):
     extsetup = getattr(_extensions[name], 'extsetup', None)
     if extsetup:
@@ -226,26 +261,33 @@
         except Exception as inst:
             ui.traceback(force=True)
             msg = stringutil.forcebytestr(inst)
-            ui.warn(_("*** failed to set up extension %s: %s\n") % (name, msg))
+            ui.warn(_(b"*** failed to set up extension %s: %s\n") % (name, msg))
             return False
     return True
 
+
 def loadall(ui, whitelist=None):
     loadingtime = collections.defaultdict(int)
-    result = ui.configitems("extensions")
+    result = ui.configitems(b"extensions")
     if whitelist is not None:
         result = [(k, v) for (k, v) in result if k in whitelist]
     newindex = len(_order)
-    ui.log(b'extension', b'loading %sextensions\n',
-           'additional ' if newindex else '')
+    ui.log(
+        b'extension',
+        b'loading %sextensions\n',
+        b'additional ' if newindex else b'',
+    )
     ui.log(b'extension', b'- processing %d entries\n', len(result))
     with util.timedcm('load all extensions') as stats:
         for (name, path) in result:
             if path:
-                if path[0:1] == '!':
+                if path[0:1] == b'!':
                     if name not in _disabledextensions:
-                        ui.log(b'extension',
-                               b'  - skipping disabled extension: %s\n', name)
+                        ui.log(
+                            b'extension',
+                            b'  - skipping disabled extension: %s\n',
+                            name,
+                        )
                     _disabledextensions[name] = path[1:]
                     continue
             try:
@@ -253,17 +295,25 @@
             except Exception as inst:
                 msg = stringutil.forcebytestr(inst)
                 if path:
-                    ui.warn(_("*** failed to import extension %s from %s: %s\n")
-                            % (name, path, msg))
+                    ui.warn(
+                        _(b"*** failed to import extension %s from %s: %s\n")
+                        % (name, path, msg)
+                    )
                 else:
-                    ui.warn(_("*** failed to import extension %s: %s\n")
-                            % (name, msg))
+                    ui.warn(
+                        _(b"*** failed to import extension %s: %s\n")
+                        % (name, msg)
+                    )
                 if isinstance(inst, error.Hint) and inst.hint:
-                    ui.warn(_("*** (%s)\n") % inst.hint)
+                    ui.warn(_(b"*** (%s)\n") % inst.hint)
                 ui.traceback()
 
-    ui.log(b'extension', b'> loaded %d extensions, total time %s\n',
-           len(_order) - newindex, stats)
+    ui.log(
+        b'extension',
+        b'> loaded %d extensions, total time %s\n',
+        len(_order) - newindex,
+        stats,
+    )
     # list of (objname, loadermod, loadername) tuple:
     # - objname is the name of an object in extension module,
     #   from which extra information is loaded
@@ -273,7 +323,7 @@
     #
     # This one is for the list of item that must be run before running any setup
     earlyextraloaders = [
-        ('configtable', configitems, 'loadconfigtable'),
+        (b'configtable', configitems, b'loadconfigtable'),
     ]
 
     ui.log(b'extension', b'- loading configtable attributes\n')
@@ -286,8 +336,11 @@
             ui.log(b'extension', b'  - running uisetup for %s\n', name)
             with util.timedcm('uisetup %s', name) as stats:
                 if not _runuisetup(name, ui):
-                    ui.log(b'extension',
-                           b'    - the %s extension uisetup failed\n', name)
+                    ui.log(
+                        b'extension',
+                        b'    - the %s extension uisetup failed\n',
+                        name,
+                    )
                     broken.add(name)
             ui.log(b'extension', b'  > uisetup for %s took %s\n', name, stats)
             loadingtime[name] += stats.elapsed
@@ -301,8 +354,11 @@
             ui.log(b'extension', b'  - running extsetup for %s\n', name)
             with util.timedcm('extsetup %s', name) as stats:
                 if not _runextsetup(name, ui):
-                    ui.log(b'extension',
-                           b'    - the %s extension extsetup failed\n', name)
+                    ui.log(
+                        b'extension',
+                        b'    - the %s extension extsetup failed\n',
+                        name,
+                    )
                     broken.add(name)
             ui.log(b'extension', b'  > extsetup for %s took %s\n', name, stats)
             loadingtime[name] += stats.elapsed
@@ -320,9 +376,11 @@
                 continue
 
             for fn in _aftercallbacks[shortname]:
-                ui.log(b'extension',
-                       b'  - extension %s not loaded, notify callbacks\n',
-                       shortname)
+                ui.log(
+                    b'extension',
+                    b'  - extension %s not loaded, notify callbacks\n',
+                    shortname,
+                )
                 fn(loaded=False)
     ui.log(b'extension', b'> remaining aftercallbacks completed in %s\n', stats)
 
@@ -350,38 +408,47 @@
     #   which takes (ui, extensionname, extraobj) arguments
     ui.log(b'extension', b'- loading extension registration objects\n')
     extraloaders = [
-        ('cmdtable', commands, 'loadcmdtable'),
-        ('colortable', color, 'loadcolortable'),
-        ('filesetpredicate', fileset, 'loadpredicate'),
-        ('internalmerge', filemerge, 'loadinternalmerge'),
-        ('revsetpredicate', revset, 'loadpredicate'),
-        ('templatefilter', templatefilters, 'loadfilter'),
-        ('templatefunc', templatefuncs, 'loadfunction'),
-        ('templatekeyword', templatekw, 'loadkeyword'),
+        (b'cmdtable', commands, b'loadcmdtable'),
+        (b'colortable', color, b'loadcolortable'),
+        (b'filesetpredicate', fileset, b'loadpredicate'),
+        (b'internalmerge', filemerge, b'loadinternalmerge'),
+        (b'revsetpredicate', revset, b'loadpredicate'),
+        (b'templatefilter', templatefilters, b'loadfilter'),
+        (b'templatefunc', templatefuncs, b'loadfunction'),
+        (b'templatekeyword', templatekw, b'loadkeyword'),
     ]
     with util.timedcm('load registration objects') as stats:
         _loadextra(ui, newindex, extraloaders)
-    ui.log(b'extension', b'> extension registration object loading took %s\n',
-           stats)
+    ui.log(
+        b'extension',
+        b'> extension registration object loading took %s\n',
+        stats,
+    )
 
     # Report per extension loading time (except reposetup)
     for name in sorted(loadingtime):
-        ui.log(b'extension', b'> extension %s take a total of %s to load\n',
-               name, util.timecount(loadingtime[name]))
+        ui.log(
+            b'extension',
+            b'> extension %s take a total of %s to load\n',
+            name,
+            util.timecount(loadingtime[name]),
+        )
 
     ui.log(b'extension', b'extension loading complete\n')
 
+
 def _loadextra(ui, newindex, extraloaders):
     for name in _order[newindex:]:
         module = _extensions[name]
         if not module:
-            continue # loading this module failed
+            continue  # loading this module failed
 
         for objname, loadermod, loadername in extraloaders:
             extraobj = getattr(module, objname, None)
             if extraobj is not None:
                 getattr(loadermod, loadername)(ui, name, extraobj)
 
+
 def afterloaded(extension, callback):
     '''Run the specified function after a named extension is loaded.
 
@@ -397,11 +464,12 @@
 
     if extension in _extensions:
         # Report loaded as False if the extension is disabled
-        loaded = (_extensions[extension] is not None)
+        loaded = _extensions[extension] is not None
         callback(loaded=loaded)
     else:
         _aftercallbacks.setdefault(extension, []).append(callback)
 
+
 def populateui(ui):
     """Run extension hooks on the given ui to populate additional members,
     extend the class dynamically, etc.
@@ -418,8 +486,11 @@
             hook(ui)
         except Exception as inst:
             ui.traceback(force=True)
-            ui.warn(_('*** failed to populate ui by extension %s: %s\n')
-                    % (name, stringutil.forcebytestr(inst)))
+            ui.warn(
+                _(b'*** failed to populate ui by extension %s: %s\n')
+                % (name, stringutil.forcebytestr(inst))
+            )
+
 
 def bind(func, *args):
     '''Partial function application
@@ -429,10 +500,13 @@
 
           f(1, 2, bar=3) === bind(f, 1)(2, bar=3)'''
     assert callable(func)
+
     def closure(*a, **kw):
         return func(*(args + a), **kw)
+
     return closure
 
+
 def _updatewrapper(wrap, origfn, unboundwrapper):
     '''Copy and add some useful attributes to wrapper'''
     try:
@@ -445,6 +519,7 @@
     wrap._origfunc = origfn
     wrap._unboundwrapper = unboundwrapper
 
+
 def wrapcommand(table, command, wrapper, synopsis=None, docstring=None):
     '''Wrap the command named `command' in table
 
@@ -476,14 +551,15 @@
     '''
     assert callable(wrapper)
     aliases, entry = cmdutil.findcmd(command, table)
-    for alias, e in table.iteritems():
+    for alias, e in pycompat.iteritems(table):
         if e is entry:
             key = alias
             break
 
     origfn = entry[0]
-    wrap = functools.partial(util.checksignature(wrapper),
-                             util.checksignature(origfn))
+    wrap = functools.partial(
+        util.checksignature(wrapper), util.checksignature(origfn)
+    )
     _updatewrapper(wrap, origfn, wrapper)
     if docstring is not None:
         wrap.__doc__ += docstring
@@ -495,6 +571,7 @@
     table[key] = tuple(newentry)
     return entry
 
+
 def wrapfilecache(cls, propname, wrapper):
     """Wraps a filecache property.
 
@@ -506,14 +583,18 @@
         if propname in currcls.__dict__:
             origfn = currcls.__dict__[propname].func
             assert callable(origfn)
+
             def wrap(*args, **kwargs):
                 return wrapper(origfn, *args, **kwargs)
+
             currcls.__dict__[propname].func = wrap
             break
 
     if currcls is object:
-        raise AttributeError(r"type '%s' has no property '%s'" % (
-            cls, propname))
+        raise AttributeError(
+            r"type '%s' has no property '%s'" % (cls, propname)
+        )
+
 
 class wrappedfunction(object):
     '''context manager for temporarily wrapping a function'''
@@ -530,6 +611,7 @@
     def __exit__(self, exctype, excvalue, traceback):
         unwrapfunction(self._container, self._funcname, self._wrapper)
 
+
 def wrapfunction(container, funcname, wrapper):
     '''Wrap the function named funcname in container
 
@@ -579,6 +661,7 @@
     setattr(container, funcname, wrap)
     return origfn
 
+
 def unwrapfunction(container, funcname, wrapper=None):
     '''undo wrapfunction
 
@@ -599,6 +682,7 @@
         wrapfunction(container, funcname, w)
     return wrapper
 
+
 def getwrapperchain(container, funcname):
     '''get a chain of wrappers of a function
 
@@ -615,36 +699,40 @@
         fn = getattr(fn, '_origfunc', None)
     return result
 
+
 def _disabledpaths():
     '''find paths of disabled extensions. returns a dict of {name: path}'''
     import hgext
+
     extpath = os.path.dirname(
-        os.path.abspath(pycompat.fsencode(hgext.__file__)))
-    try: # might not be a filesystem path
+        os.path.abspath(pycompat.fsencode(hgext.__file__))
+    )
+    try:  # might not be a filesystem path
         files = os.listdir(extpath)
     except OSError:
         return {}
 
     exts = {}
     for e in files:
-        if e.endswith('.py'):
-            name = e.rsplit('.', 1)[0]
+        if e.endswith(b'.py'):
+            name = e.rsplit(b'.', 1)[0]
             path = os.path.join(extpath, e)
         else:
             name = e
-            path = os.path.join(extpath, e, '__init__.py')
+            path = os.path.join(extpath, e, b'__init__.py')
             if not os.path.exists(path):
                 continue
-        if name in exts or name in _order or name == '__init__':
+        if name in exts or name in _order or name == b'__init__':
             continue
         exts[name] = path
-    for name, path in _disabledextensions.iteritems():
+    for name, path in pycompat.iteritems(_disabledextensions):
         # If no path was provided for a disabled extension (e.g. "color=!"),
         # don't replace the path we already found by the scan above.
         if path:
             exts[name] = path
     return exts
 
+
 def _moduledoc(file):
     '''return the top-level python documentation for the given file
 
@@ -654,13 +742,13 @@
     result = []
 
     line = file.readline()
-    while line[:1] == '#' or not line.strip():
+    while line[:1] == b'#' or not line.strip():
         line = file.readline()
         if not line:
             break
 
     start = line[:3]
-    if start == '"""' or start == "'''":
+    if start == b'"""' or start == b"'''":
         line = line[3:]
         while line:
             if line.rstrip().endswith(start):
@@ -669,34 +757,39 @@
                     result.append(line)
                 break
             elif not line:
-                return None # unmatched delimiter
+                return None  # unmatched delimiter
             result.append(line)
             line = file.readline()
     else:
         return None
 
-    return ''.join(result)
+    return b''.join(result)
+
 
 def _disabledhelp(path):
     '''retrieve help synopsis of a disabled extension (without importing)'''
     try:
-        with open(path, 'rb') as src:
+        with open(path, b'rb') as src:
             doc = _moduledoc(src)
     except IOError:
         return
 
-    if doc: # extracting localized synopsis
+    if doc:  # extracting localized synopsis
         return gettext(doc)
     else:
-        return _('(no help text available)')
+        return _(b'(no help text available)')
+
 
 def disabled():
     '''find disabled extensions from hgext. returns a dict of {name: desc}'''
     try:
         from hgext import __index__
-        return dict((name, gettext(desc))
-                    for name, desc in __index__.docs.iteritems()
-                    if name not in _order)
+
+        return dict(
+            (name, gettext(desc))
+            for name, desc in pycompat.iteritems(__index__.docs)
+            if name not in _order
+        )
     except (ImportError, AttributeError):
         pass
 
@@ -705,17 +798,19 @@
         return {}
 
     exts = {}
-    for name, path in paths.iteritems():
+    for name, path in pycompat.iteritems(paths):
         doc = _disabledhelp(path)
         if doc:
             exts[name] = doc.splitlines()[0]
 
     return exts
 
+
 def disabledext(name):
     '''find a specific disabled extension from hgext. returns desc'''
     try:
         from hgext import __index__
+
         if name in _order:  # enabled
             return
         else:
@@ -727,6 +822,7 @@
     if name in paths:
         return _disabledhelp(paths[name])
 
+
 def _walkcommand(node):
     """Scan @command() decorators in the tree starting at node"""
     todo = collections.deque([node])
@@ -744,12 +840,13 @@
                 continue
             yield d
 
+
 def _disabledcmdtable(path):
     """Construct a dummy command table without loading the extension module
 
     This may raise IOError or SyntaxError.
     """
-    with open(path, 'rb') as src:
+    with open(path, b'rb') as src:
         root = ast.parse(src.read(), path)
     cmdtable = {}
     for node in _walkcommand(root):
@@ -765,6 +862,7 @@
         cmdtable[name] = (None, [], b'')
     return cmdtable
 
+
 def _finddisabledcmd(ui, cmd, name, path, strict):
     try:
         cmdtable = _disabledcmdtable(path)
@@ -783,6 +881,7 @@
     doc = _disabledhelp(path)
     return (cmd, name, doc)
 
+
 def disabledcmd(ui, cmd, strict=False):
     '''find cmd from disabled extensions without importing.
     returns (cmdname, extname, doc)'''
@@ -798,7 +897,7 @@
         ext = _finddisabledcmd(ui, cmd, cmd, path, strict=strict)
     if not ext:
         # otherwise, interrogate each extension until there's a match
-        for name, path in paths.iteritems():
+        for name, path in pycompat.iteritems(paths):
             ext = _finddisabledcmd(ui, cmd, name, path, strict=strict)
             if ext:
                 break
@@ -807,34 +906,39 @@
 
     raise error.UnknownCommand(cmd)
 
+
 def enabled(shortname=True):
     '''return a dict of {name: desc} of extensions'''
     exts = {}
     for ename, ext in extensions():
-        doc = (gettext(ext.__doc__) or _('(no help text available)'))
+        doc = gettext(ext.__doc__) or _(b'(no help text available)')
         if shortname:
-            ename = ename.split('.')[-1]
+            ename = ename.split(b'.')[-1]
         exts[ename] = doc.splitlines()[0].strip()
 
     return exts
 
+
 def notloaded():
     '''return short names of extensions that failed to load'''
-    return [name for name, mod in _extensions.iteritems() if mod is None]
+    return [
+        name for name, mod in pycompat.iteritems(_extensions) if mod is None
+    ]
+
 
 def moduleversion(module):
     '''return version information from given module as a string'''
-    if (util.safehasattr(module, 'getversion')
-          and callable(module.getversion)):
+    if util.safehasattr(module, b'getversion') and callable(module.getversion):
         version = module.getversion()
-    elif util.safehasattr(module, '__version__'):
+    elif util.safehasattr(module, b'__version__'):
         version = module.__version__
     else:
-        version = ''
+        version = b''
     if isinstance(version, (list, tuple)):
-        version = '.'.join(pycompat.bytestr(o) for o in version)
+        version = b'.'.join(pycompat.bytestr(o) for o in version)
     return version
 
+
 def ismoduleinternal(module):
     exttestedwith = getattr(module, 'testedwith', None)
-    return exttestedwith == "ships-with-hg-core"
+    return exttestedwith == b"ships-with-hg-core"
--- a/mercurial/exthelper.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/exthelper.py	Mon Oct 21 11:09:48 2019 -0400
@@ -21,6 +21,7 @@
 
 from hgdemandimport import tracing
 
+
 class exthelper(object):
     """Helper for modular extension setup
 
@@ -105,7 +106,7 @@
         self._extcommandwrappers.extend(other._extcommandwrappers)
         self._functionwrappers.extend(other._functionwrappers)
         self.cmdtable.update(other.cmdtable)
-        for section, items in other.configtable.iteritems():
+        for section, items in pycompat.iteritems(other.configtable):
             if section in self.configtable:
                 self.configtable[section].update(items)
             else:
@@ -138,7 +139,7 @@
         for cont, funcname, wrapper in self._functionwrappers:
             extensions.wrapfunction(cont, funcname, wrapper)
         for c in self._uicallables:
-            with tracing.log(b'finaluisetup: %s', pycompat.sysbytes(repr(c))):
+            with tracing.log('finaluisetup: %s', repr(c)):
                 c(ui)
 
     def finaluipopulate(self, ui):
@@ -179,7 +180,7 @@
                     entry[1].append(opt)
 
         for c in self._extcallables:
-            with tracing.log(b'finalextsetup: %s', pycompat.sysbytes(repr(c))):
+            with tracing.log('finalextsetup: %s', repr(c)):
                 c(ui)
 
     def finalreposetup(self, ui, repo):
@@ -192,7 +193,7 @@
         - Changes to repo.__class__, repo.dirstate.__class__
         """
         for c in self._repocallables:
-            with tracing.log(b'finalreposetup: %s', pycompat.sysbytes(repr(c))):
+            with tracing.log('finalreposetup: %s', repr(c)):
                 c(ui, repo)
 
     def uisetup(self, call):
@@ -272,18 +273,20 @@
         else:
             for opt in opts:
                 if not isinstance(opt, tuple):
-                    raise error.ProgrammingError('opts must be list of tuples')
+                    raise error.ProgrammingError(b'opts must be list of tuples')
                 if len(opt) not in (4, 5):
-                    msg = 'each opt tuple must contain 4 or 5 values'
+                    msg = b'each opt tuple must contain 4 or 5 values'
                     raise error.ProgrammingError(msg)
 
         def dec(wrapper):
             if extension is None:
                 self._commandwrappers.append((command, wrapper, opts))
             else:
-                self._extcommandwrappers.append((extension, command, wrapper,
-                                                 opts))
+                self._extcommandwrappers.append(
+                    (extension, command, wrapper, opts)
+                )
             return wrapper
+
         return dec
 
     def wrapfunction(self, container, funcname):
@@ -300,7 +303,9 @@
                 ui.note('His head smashed in and his heart cut out')
                 return orig(*args, **kwargs)
         """
+
         def dec(wrapper):
             self._functionwrappers.append((container, funcname, wrapper))
             return wrapper
+
         return dec
--- a/mercurial/fancyopts.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/fancyopts.py	Mon Oct 21 11:09:48 2019 -0400
@@ -19,14 +19,15 @@
 # Set of flags to not apply boolean negation logic on
 nevernegate = {
     # avoid --no-noninteractive
-    'noninteractive',
+    b'noninteractive',
     # These two flags are special because they cause hg to do one
     # thing and then exit, and so aren't suitable for use in things
     # like aliases anyway.
-    'help',
-    'version',
+    b'help',
+    b'version',
 }
 
+
 def _earlyoptarg(arg, shortlist, namelist):
     """Check if the given arg is a valid unabbreviated option
 
@@ -76,18 +77,19 @@
     >>> opt(b'-:foo')
     ('', False, '', False)
     """
-    if arg.startswith('--'):
-        flag, eq, val = arg.partition('=')
+    if arg.startswith(b'--'):
+        flag, eq, val = arg.partition(b'=')
         if flag[2:] in namelist:
             return flag, bool(eq), val, False
-        if flag[2:] + '=' in namelist:
+        if flag[2:] + b'=' in namelist:
             return flag, bool(eq), val, True
-    elif arg.startswith('-') and arg != '-' and not arg.startswith('-:'):
+    elif arg.startswith(b'-') and arg != b'-' and not arg.startswith(b'-:'):
         flag, val = arg[:2], arg[2:]
         i = shortlist.find(flag[1:])
         if i >= 0:
-            return flag, bool(val), val, shortlist.startswith(':', i + 1)
-    return '', False, '', False
+            return flag, bool(val), val, shortlist.startswith(b':', i + 1)
+    return b'', False, b'', False
+
 
 def earlygetopt(args, shortlist, namelist, gnu=False, keepsep=False):
     """Parse options like getopt, but ignores unknown options and abbreviated
@@ -176,7 +178,7 @@
     pos = 0
     while pos < len(args):
         arg = args[pos]
-        if arg == '--':
+        if arg == b'--':
             pos += not keepsep
             break
         flag, hasval, val, takeval = _earlyoptarg(arg, shortlist, namelist)
@@ -202,6 +204,7 @@
     parsedargs.extend(args[pos:])
     return parsedopts, parsedargs
 
+
 class customopt(object):
     """Manage defaults and mutations for any type of opt."""
 
@@ -226,6 +229,7 @@
 
         On failure, abort can be called with a string error message."""
 
+
 class _simpleopt(customopt):
     def _isboolopt(self):
         return isinstance(self._defaultvalue, (bool, type(None)))
@@ -233,6 +237,7 @@
     def newstate(self, oldstate, newparam, abort):
         return newparam
 
+
 class _callableopt(customopt):
     def __init__(self, callablefn):
         self.callablefn = callablefn
@@ -241,6 +246,7 @@
     def newstate(self, oldstate, newparam, abort):
         return self.callablefn(newparam)
 
+
 class _listopt(customopt):
     def getdefaultvalue(self):
         return self._defaultvalue[:]
@@ -249,12 +255,14 @@
         oldstate.append(newparam)
         return oldstate
 
+
 class _intopt(customopt):
     def newstate(self, oldstate, newparam, abort):
         try:
             return int(newparam)
         except ValueError:
-            abort(_('expected int'))
+            abort(_(b'expected int'))
+
 
 def _defaultopt(default):
     """Returns a default opt implementation, given a default value."""
@@ -270,6 +278,7 @@
     else:
         return _simpleopt(default)
 
+
 def fancyopts(args, options, state, gnu=False, early=False, optaliases=None):
     """
     read args, parse options, and store options in state
@@ -301,7 +310,7 @@
     if optaliases is None:
         optaliases = {}
     namelist = []
-    shortlist = ''
+    shortlist = b''
     argmap = {}
     defmap = {}
     negations = {}
@@ -315,11 +324,11 @@
         # convert opts to getopt format
         onames = [name]
         onames.extend(optaliases.get(name, []))
-        name = name.replace('-', '_')
+        name = name.replace(b'-', b'_')
 
-        argmap['-' + short] = name
+        argmap[b'-' + short] = name
         for n in onames:
-            argmap['--' + n] = name
+            argmap[b'--' + n] = name
         defmap[name] = _defaultopt(default)
 
         # copy defaults to state
@@ -328,20 +337,20 @@
         # does it take a parameter?
         if not defmap[name]._isboolopt():
             if short:
-                short += ':'
-            onames = [n + '=' for n in onames]
+                short += b':'
+            onames = [n + b'=' for n in onames]
         elif name not in nevernegate:
             for n in onames:
-                if n.startswith('no-'):
+                if n.startswith(b'no-'):
                     insert = n[3:]
                 else:
-                    insert = 'no-' + n
+                    insert = b'no-' + n
                 # backout (as a practical example) has both --commit and
                 # --no-commit options, so we don't want to allow the
                 # negations of those flags.
                 if insert not in alllong:
-                    assert ('--' + n) not in negations
-                    negations['--' + insert] = '--' + n
+                    assert (b'--' + n) not in negations
+                    negations[b'--' + insert] = b'--' + n
                     namelist.append(insert)
         if short:
             shortlist += short
@@ -369,9 +378,13 @@
         if obj._isboolopt():
             state[name] = boolval
         else:
+
             def abort(s):
-                raise error.Abort(_('invalid value %r for option %s, %s')
-                                  % (pycompat.maybebytestr(val), opt, s))
+                raise error.Abort(
+                    _(b'invalid value %r for option %s, %s')
+                    % (pycompat.maybebytestr(val), opt, s)
+                )
+
             state[name] = defmap[name].newstate(state[name], val, abort)
 
     # return unparsed args
--- a/mercurial/filelog.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/filelog.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,20 +14,21 @@
 )
 from . import (
     error,
-    repository,
     revlog,
 )
-from .utils import (
-    interfaceutil,
-    storageutil,
+from .interfaces import (
+    repository,
+    util as interfaceutil,
 )
+from .utils import storageutil
+
 
 @interfaceutil.implementer(repository.ifilestorage)
 class filelog(object):
     def __init__(self, opener, path):
-        self._revlog = revlog.revlog(opener,
-                                     '/'.join(('data', path + '.i')),
-                                     censorable=True)
+        self._revlog = revlog.revlog(
+            opener, b'/'.join((b'data', path + b'.i')), censorable=True
+        )
         # Full name of the user visible file, relative to the repository root.
         # Used by LFS.
         self._revlog.filename = path
@@ -64,8 +65,9 @@
         return self._revlog.node(rev)
 
     def lookup(self, node):
-        return storageutil.fileidlookup(self._revlog, node,
-                                        self._revlog.indexfile)
+        return storageutil.fileidlookup(
+            self._revlog, node, self._revlog.indexfile
+        )
 
     def linkrev(self, rev):
         return self._revlog.linkrev(rev)
@@ -90,29 +92,66 @@
     def revision(self, node, _df=None, raw=False):
         return self._revlog.revision(node, _df=_df, raw=raw)
 
-    def emitrevisions(self, nodes, nodesorder=None,
-                      revisiondata=False, assumehaveparentrevisions=False,
-                      deltamode=repository.CG_DELTAMODE_STD):
+    def rawdata(self, node, _df=None):
+        return self._revlog.rawdata(node, _df=_df)
+
+    def emitrevisions(
+        self,
+        nodes,
+        nodesorder=None,
+        revisiondata=False,
+        assumehaveparentrevisions=False,
+        deltamode=repository.CG_DELTAMODE_STD,
+    ):
         return self._revlog.emitrevisions(
-            nodes, nodesorder=nodesorder, revisiondata=revisiondata,
+            nodes,
+            nodesorder=nodesorder,
+            revisiondata=revisiondata,
             assumehaveparentrevisions=assumehaveparentrevisions,
-            deltamode=deltamode)
+            deltamode=deltamode,
+        )
 
-    def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
-                    node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
-                    cachedelta=None):
-        return self._revlog.addrevision(revisiondata, transaction, linkrev,
-                                    p1, p2, node=node, flags=flags,
-                                    cachedelta=cachedelta)
+    def addrevision(
+        self,
+        revisiondata,
+        transaction,
+        linkrev,
+        p1,
+        p2,
+        node=None,
+        flags=revlog.REVIDX_DEFAULT_FLAGS,
+        cachedelta=None,
+    ):
+        return self._revlog.addrevision(
+            revisiondata,
+            transaction,
+            linkrev,
+            p1,
+            p2,
+            node=node,
+            flags=flags,
+            cachedelta=cachedelta,
+        )
 
-    def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
-                 maybemissingparents=False):
+    def addgroup(
+        self,
+        deltas,
+        linkmapper,
+        transaction,
+        addrevisioncb=None,
+        maybemissingparents=False,
+    ):
         if maybemissingparents:
-            raise error.Abort(_('revlog storage does not support missing '
-                                'parents write mode'))
+            raise error.Abort(
+                _(
+                    b'revlog storage does not support missing '
+                    b'parents write mode'
+                )
+            )
 
-        return self._revlog.addgroup(deltas, linkmapper, transaction,
-                                     addrevisioncb=addrevisioncb)
+        return self._revlog.addgroup(
+            deltas, linkmapper, transaction, addrevisioncb=addrevisioncb
+        )
 
     def getstrippoint(self, minlink):
         return self._revlog.getstrippoint(minlink)
@@ -130,7 +169,7 @@
         return storageutil.filtermetadata(self.revision(node))
 
     def add(self, text, meta, transaction, link, p1=None, p2=None):
-        if meta or text.startswith('\1\n'):
+        if meta or text.startswith(b'\1\n'):
             text = storageutil.packmeta(meta, text)
         return self.addrevision(text, transaction, link, p1, p2)
 
@@ -160,13 +199,21 @@
     def verifyintegrity(self, state):
         return self._revlog.verifyintegrity(state)
 
-    def storageinfo(self, exclusivefiles=False, sharedfiles=False,
-                    revisionscount=False, trackedsize=False,
-                    storedsize=False):
+    def storageinfo(
+        self,
+        exclusivefiles=False,
+        sharedfiles=False,
+        revisionscount=False,
+        trackedsize=False,
+        storedsize=False,
+    ):
         return self._revlog.storageinfo(
-            exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
-            revisionscount=revisionscount, trackedsize=trackedsize,
-            storedsize=storedsize)
+            exclusivefiles=exclusivefiles,
+            sharedfiles=sharedfiles,
+            revisionscount=revisionscount,
+            trackedsize=trackedsize,
+            storedsize=storedsize,
+        )
 
     # TODO these aren't part of the interface and aren't internal methods.
     # Callers should be fixed to not use them.
@@ -183,10 +230,11 @@
     # Used by repo upgrade.
     def clone(self, tr, destrevlog, **kwargs):
         if not isinstance(destrevlog, filelog):
-            raise error.ProgrammingError('expected filelog to clone()')
+            raise error.ProgrammingError(b'expected filelog to clone()')
 
         return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
 
+
 class narrowfilelog(filelog):
     """Filelog variation to be used with narrow stores."""
 
--- a/mercurial/filemerge.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/filemerge.py	Mon Oct 21 11:09:48 2019 -0400
@@ -18,6 +18,10 @@
     nullid,
     short,
 )
+from .pycompat import (
+    getattr,
+    open,
+)
 
 from . import (
     encoding,
@@ -40,14 +44,18 @@
     stringutil,
 )
 
+
 def _toolstr(ui, tool, part, *args):
-    return ui.config("merge-tools", tool + "." + part, *args)
+    return ui.config(b"merge-tools", tool + b"." + part, *args)
 
-def _toolbool(ui, tool, part,*args):
-    return ui.configbool("merge-tools", tool + "." + part, *args)
+
+def _toolbool(ui, tool, part, *args):
+    return ui.configbool(b"merge-tools", tool + b"." + part, *args)
+
 
 def _toollist(ui, tool, part):
-    return ui.configlist("merge-tools", tool + "." + part)
+    return ui.configlist(b"merge-tools", tool + b"." + part)
+
 
 internals = {}
 # Merge tools to document.
@@ -57,24 +65,27 @@
 
 # internal tool merge types
 nomerge = internaltool.nomerge
-mergeonly = internaltool.mergeonly # just the full merge, no premerge
-fullmerge = internaltool.fullmerge # both premerge and merge
+mergeonly = internaltool.mergeonly  # just the full merge, no premerge
+fullmerge = internaltool.fullmerge  # both premerge and merge
 
 # IMPORTANT: keep the last line of this prompt very short ("What do you want to
 # do?") because of issue6158, ideally to <40 English characters (to allow other
 # languages that may take more columns to still have a chance to fit in an
 # 80-column screen).
 _localchangedotherdeletedmsg = _(
-    "file '%(fd)s' was deleted in other%(o)s but was modified in local%(l)s.\n"
-    "You can use (c)hanged version, (d)elete, or leave (u)nresolved.\n"
-    "What do you want to do?"
-    "$$ &Changed $$ &Delete $$ &Unresolved")
+    b"file '%(fd)s' was deleted in other%(o)s but was modified in local%(l)s.\n"
+    b"You can use (c)hanged version, (d)elete, or leave (u)nresolved.\n"
+    b"What do you want to do?"
+    b"$$ &Changed $$ &Delete $$ &Unresolved"
+)
 
 _otherchangedlocaldeletedmsg = _(
-    "file '%(fd)s' was deleted in local%(l)s but was modified in other%(o)s.\n"
-    "You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.\n"
-    "What do you want to do?"
-    "$$ &Changed $$ &Deleted $$ &Unresolved")
+    b"file '%(fd)s' was deleted in local%(l)s but was modified in other%(o)s.\n"
+    b"You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.\n"
+    b"What do you want to do?"
+    b"$$ &Changed $$ &Deleted $$ &Unresolved"
+)
+
 
 class absentfilectx(object):
     """Represents a file that's ostensibly in a context but is actually not
@@ -82,6 +93,7 @@
 
     This is here because it's very specific to the filemerge code for now --
     other code is likely going to break with the values this returns."""
+
     def __init__(self, ctx, f):
         self._ctx = ctx
         self._f = f
@@ -99,17 +111,20 @@
         return nullid
 
     _customcmp = True
+
     def cmp(self, fctx):
         """compare with other file context
 
         returns True if different from fctx.
         """
-        return not (fctx.isabsent() and
-                    fctx.ctx() == self.ctx() and
-                    fctx.path() == self.path())
+        return not (
+            fctx.isabsent()
+            and fctx.ctx() == self.ctx()
+            and fctx.path() == self.path()
+        )
 
     def flags(self):
-        return ''
+        return b''
 
     def changectx(self):
         return self._ctx
@@ -120,34 +135,38 @@
     def isabsent(self):
         return True
 
+
 def _findtool(ui, tool):
     if tool in internals:
         return tool
-    cmd = _toolstr(ui, tool, "executable", tool)
-    if cmd.startswith('python:'):
+    cmd = _toolstr(ui, tool, b"executable", tool)
+    if cmd.startswith(b'python:'):
         return cmd
     return findexternaltool(ui, tool)
 
+
 def _quotetoolpath(cmd):
-    if cmd.startswith('python:'):
+    if cmd.startswith(b'python:'):
         return cmd
     return procutil.shellquote(cmd)
 
+
 def findexternaltool(ui, tool):
-    for kn in ("regkey", "regkeyalt"):
+    for kn in (b"regkey", b"regkeyalt"):
         k = _toolstr(ui, tool, kn)
         if not k:
             continue
-        p = util.lookupreg(k, _toolstr(ui, tool, "regname"))
+        p = util.lookupreg(k, _toolstr(ui, tool, b"regname"))
         if p:
-            p = procutil.findexe(p + _toolstr(ui, tool, "regappend", ""))
+            p = procutil.findexe(p + _toolstr(ui, tool, b"regappend", b""))
             if p:
                 return p
-    exe = _toolstr(ui, tool, "executable", tool)
+    exe = _toolstr(ui, tool, b"executable", tool)
     return procutil.findexe(util.expandpath(exe))
 
+
 def _picktool(repo, ui, path, binary, symlink, changedelete):
-    strictcheck = ui.configbool('merge', 'strict-capability-check')
+    strictcheck = ui.configbool(b'merge', b'strict-capability-check')
 
     def hascapability(tool, capability, strict=False):
         if tool in internals:
@@ -160,33 +179,33 @@
     def check(tool, pat, symlink, binary, changedelete):
         tmsg = tool
         if pat:
-            tmsg = _("%s (for pattern %s)") % (tool, pat)
+            tmsg = _(b"%s (for pattern %s)") % (tool, pat)
         if not _findtool(ui, tool):
-            if pat: # explicitly requested tool deserves a warning
-                ui.warn(_("couldn't find merge tool %s\n") % tmsg)
-            else: # configured but non-existing tools are more silent
-                ui.note(_("couldn't find merge tool %s\n") % tmsg)
-        elif symlink and not hascapability(tool, "symlink", strictcheck):
-            ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
-        elif binary and not hascapability(tool, "binary", strictcheck):
-            ui.warn(_("tool %s can't handle binary\n") % tmsg)
+            if pat:  # explicitly requested tool deserves a warning
+                ui.warn(_(b"couldn't find merge tool %s\n") % tmsg)
+            else:  # configured but non-existing tools are more silent
+                ui.note(_(b"couldn't find merge tool %s\n") % tmsg)
+        elif symlink and not hascapability(tool, b"symlink", strictcheck):
+            ui.warn(_(b"tool %s can't handle symlinks\n") % tmsg)
+        elif binary and not hascapability(tool, b"binary", strictcheck):
+            ui.warn(_(b"tool %s can't handle binary\n") % tmsg)
         elif changedelete and not supportscd(tool):
             # the nomerge tools are the only tools that support change/delete
             # conflicts
             pass
-        elif not procutil.gui() and _toolbool(ui, tool, "gui"):
-            ui.warn(_("tool %s requires a GUI\n") % tmsg)
+        elif not procutil.gui() and _toolbool(ui, tool, b"gui"):
+            ui.warn(_(b"tool %s requires a GUI\n") % tmsg)
         else:
             return True
         return False
 
     # internal config: ui.forcemerge
     # forcemerge comes from command line arguments, highest priority
-    force = ui.config('ui', 'forcemerge')
+    force = ui.config(b'ui', b'forcemerge')
     if force:
         toolpath = _findtool(ui, force)
         if changedelete and not supportscd(toolpath):
-            return ":prompt", None
+            return b":prompt", None
         else:
             if toolpath:
                 return (force, _quotetoolpath(toolpath))
@@ -195,10 +214,10 @@
                 return (force, force)
 
     # HGMERGE takes next precedence
-    hgmerge = encoding.environ.get("HGMERGE")
+    hgmerge = encoding.environ.get(b"HGMERGE")
     if hgmerge:
         if changedelete and not supportscd(hgmerge):
-            return ":prompt", None
+            return b":prompt", None
         else:
             return (hgmerge, hgmerge)
 
@@ -207,39 +226,44 @@
     # whether binary capability should be checked strictly
     binarycap = binary and strictcheck
 
-    for pat, tool in ui.configitems("merge-patterns"):
-        mf = match.match(repo.root, '', [pat])
+    for pat, tool in ui.configitems(b"merge-patterns"):
+        mf = match.match(repo.root, b'', [pat])
         if mf(path) and check(tool, pat, symlink, binarycap, changedelete):
-            if binary and not hascapability(tool, "binary", strict=True):
-                ui.warn(_("warning: check merge-patterns configurations,"
-                          " if %r for binary file %r is unintentional\n"
-                          "(see 'hg help merge-tools'"
-                          " for binary files capability)\n")
-                        % (pycompat.bytestr(tool), pycompat.bytestr(path)))
+            if binary and not hascapability(tool, b"binary", strict=True):
+                ui.warn(
+                    _(
+                        b"warning: check merge-patterns configurations,"
+                        b" if %r for binary file %r is unintentional\n"
+                        b"(see 'hg help merge-tools'"
+                        b" for binary files capability)\n"
+                    )
+                    % (pycompat.bytestr(tool), pycompat.bytestr(path))
+                )
             toolpath = _findtool(ui, tool)
             return (tool, _quotetoolpath(toolpath))
 
     # then merge tools
     tools = {}
     disabled = set()
-    for k, v in ui.configitems("merge-tools"):
-        t = k.split('.')[0]
+    for k, v in ui.configitems(b"merge-tools"):
+        t = k.split(b'.')[0]
         if t not in tools:
-            tools[t] = int(_toolstr(ui, t, "priority"))
-        if _toolbool(ui, t, "disabled"):
+            tools[t] = int(_toolstr(ui, t, b"priority"))
+        if _toolbool(ui, t, b"disabled"):
             disabled.add(t)
     names = tools.keys()
-    tools = sorted([(-p, tool) for tool, p in tools.items()
-                    if tool not in disabled])
-    uimerge = ui.config("ui", "merge")
+    tools = sorted(
+        [(-p, tool) for tool, p in tools.items() if tool not in disabled]
+    )
+    uimerge = ui.config(b"ui", b"merge")
     if uimerge:
         # external tools defined in uimerge won't be able to handle
         # change/delete conflicts
         if check(uimerge, path, symlink, binary, changedelete):
             if uimerge not in names and not changedelete:
                 return (uimerge, uimerge)
-            tools.insert(0, (None, uimerge)) # highest priority
-    tools.append((None, "hgmerge")) # the old default, if found
+            tools.insert(0, (None, uimerge))  # highest priority
+    tools.append((None, b"hgmerge"))  # the old default, if found
     for p, t in tools:
         if check(t, None, symlink, binary, changedelete):
             toolpath = _findtool(ui, t)
@@ -249,25 +273,27 @@
     if symlink or binary or changedelete:
         if not changedelete and len(tools):
             # any tool is rejected by capability for symlink or binary
-            ui.warn(_("no tool found to merge %s\n") % path)
-        return ":prompt", None
-    return ":merge", None
+            ui.warn(_(b"no tool found to merge %s\n") % path)
+        return b":prompt", None
+    return b":merge", None
+
 
 def _eoltype(data):
-    "Guess the EOL type of a file"
-    if '\0' in data: # binary
+    b"Guess the EOL type of a file"
+    if b'\0' in data:  # binary
         return None
-    if '\r\n' in data: # Windows
-        return '\r\n'
-    if '\r' in data: # Old Mac
-        return '\r'
-    if '\n' in data: # UNIX
-        return '\n'
-    return None # unknown
+    if b'\r\n' in data:  # Windows
+        return b'\r\n'
+    if b'\r' in data:  # Old Mac
+        return b'\r'
+    if b'\n' in data:  # UNIX
+        return b'\n'
+    return None  # unknown
+
 
 def _matcheol(file, back):
-    "Convert EOL markers in a file to match origfile"
-    tostyle = _eoltype(back.data()) # No repo.wread filters?
+    b"Convert EOL markers in a file to match origfile"
+    tostyle = _eoltype(back.data())  # No repo.wread filters?
     if tostyle:
         data = util.readfile(file)
         style = _eoltype(data)
@@ -276,7 +302,8 @@
             if newdata != data:
                 util.writefile(file, newdata)
 
-@internaltool('prompt', nomerge)
+
+@internaltool(b'prompt', nomerge)
 def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
     """Asks the user which of the local `p1()` or the other `p2()` version to
     keep as the merged version."""
@@ -287,52 +314,54 @@
     # Avoid prompting during an in-memory merge since it doesn't support merge
     # conflicts.
     if fcd.changectx().isinmemory():
-        raise error.InMemoryMergeConflictsError('in-memory merge does not '
-                                                'support file conflicts')
+        raise error.InMemoryMergeConflictsError(
+            b'in-memory merge does not support file conflicts'
+        )
 
     prompts = partextras(labels)
-    prompts['fd'] = uipathfn(fd)
+    prompts[b'fd'] = uipathfn(fd)
     try:
         if fco.isabsent():
-            index = ui.promptchoice(
-                _localchangedotherdeletedmsg % prompts, 2)
-            choice = ['local', 'other', 'unresolved'][index]
+            index = ui.promptchoice(_localchangedotherdeletedmsg % prompts, 2)
+            choice = [b'local', b'other', b'unresolved'][index]
         elif fcd.isabsent():
-            index = ui.promptchoice(
-                _otherchangedlocaldeletedmsg % prompts, 2)
-            choice = ['other', 'local', 'unresolved'][index]
+            index = ui.promptchoice(_otherchangedlocaldeletedmsg % prompts, 2)
+            choice = [b'other', b'local', b'unresolved'][index]
         else:
             # IMPORTANT: keep the last line of this prompt ("What do you want to
             # do?") very short, see comment next to _localchangedotherdeletedmsg
             # at the top of the file for details.
             index = ui.promptchoice(
-                _("file '%(fd)s' needs to be resolved.\n"
-                  "You can keep (l)ocal%(l)s, take (o)ther%(o)s, or leave "
-                  "(u)nresolved.\n"
-                  "What do you want to do?"
-                  "$$ &Local $$ &Other $$ &Unresolved") % prompts, 2)
-            choice = ['local', 'other', 'unresolved'][index]
+                _(
+                    b"file '%(fd)s' needs to be resolved.\n"
+                    b"You can keep (l)ocal%(l)s, take (o)ther%(o)s, or leave "
+                    b"(u)nresolved.\n"
+                    b"What do you want to do?"
+                    b"$$ &Local $$ &Other $$ &Unresolved"
+                )
+                % prompts,
+                2,
+            )
+            choice = [b'local', b'other', b'unresolved'][index]
 
-        if choice == 'other':
-            return _iother(repo, mynode, orig, fcd, fco, fca, toolconf,
-                           labels)
-        elif choice == 'local':
-            return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf,
-                           labels)
-        elif choice == 'unresolved':
-            return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf,
-                          labels)
+        if choice == b'other':
+            return _iother(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
+        elif choice == b'local':
+            return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
+        elif choice == b'unresolved':
+            return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
     except error.ResponseExpected:
-        ui.write("\n")
-        return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf,
-                      labels)
+        ui.write(b"\n")
+        return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
 
-@internaltool('local', nomerge)
+
+@internaltool(b'local', nomerge)
 def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
     """Uses the local `p1()` version of files as the merged version."""
     return 0, fcd.isabsent()
 
-@internaltool('other', nomerge)
+
+@internaltool(b'other', nomerge)
 def _iother(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
     """Uses the other `p2()` version of files as the merged version."""
     if fco.isabsent():
@@ -344,7 +373,8 @@
         deleted = False
     return 0, deleted
 
-@internaltool('fail', nomerge)
+
+@internaltool(b'fail', nomerge)
 def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
     """
     Rather than attempting to merge files that were modified on both
@@ -355,6 +385,7 @@
         _underlyingfctxifabsent(fcd).write(fco.data(), fco.flags())
     return 1, False
 
+
 def _underlyingfctxifabsent(filectx):
     """Sometimes when resolving, our fcd is actually an absentfilectx, but
     we want to write to it (to do the resolve). This helper returns the
@@ -365,6 +396,7 @@
     else:
         return filectx
 
+
 def _premerge(repo, fcd, fco, fca, toolconf, files, labels=None):
     tool, toolpath, binary, symlink, scriptfn = toolconf
     if symlink or fcd.isabsent() or fco.isabsent():
@@ -373,47 +405,57 @@
 
     ui = repo.ui
 
-    validkeep = ['keep', 'keep-merge3']
+    validkeep = [b'keep', b'keep-merge3']
 
     # do we attempt to simplemerge first?
     try:
-        premerge = _toolbool(ui, tool, "premerge", not binary)
+        premerge = _toolbool(ui, tool, b"premerge", not binary)
     except error.ConfigError:
-        premerge = _toolstr(ui, tool, "premerge", "").lower()
+        premerge = _toolstr(ui, tool, b"premerge", b"").lower()
         if premerge not in validkeep:
-            _valid = ', '.join(["'" + v + "'" for v in validkeep])
-            raise error.ConfigError(_("%s.premerge not valid "
-                                      "('%s' is neither boolean nor %s)") %
-                                    (tool, premerge, _valid))
+            _valid = b', '.join([b"'" + v + b"'" for v in validkeep])
+            raise error.ConfigError(
+                _(b"%s.premerge not valid ('%s' is neither boolean nor %s)")
+                % (tool, premerge, _valid)
+            )
 
     if premerge:
-        if premerge == 'keep-merge3':
+        if premerge == b'keep-merge3':
             if not labels:
                 labels = _defaultconflictlabels
             if len(labels) < 3:
-                labels.append('base')
+                labels.append(b'base')
         r = simplemerge.simplemerge(ui, fcd, fca, fco, quiet=True, label=labels)
         if not r:
-            ui.debug(" premerge successful\n")
+            ui.debug(b" premerge successful\n")
             return 0
         if premerge not in validkeep:
             # restore from backup and try again
             _restorebackup(fcd, back)
-    return 1 # continue merging
+    return 1  # continue merging
+
 
 def _mergecheck(repo, mynode, orig, fcd, fco, fca, toolconf):
     tool, toolpath, binary, symlink, scriptfn = toolconf
     uipathfn = scmutil.getuipathfn(repo)
     if symlink:
-        repo.ui.warn(_('warning: internal %s cannot merge symlinks '
-                       'for %s\n') % (tool, uipathfn(fcd.path())))
+        repo.ui.warn(
+            _(b'warning: internal %s cannot merge symlinks for %s\n')
+            % (tool, uipathfn(fcd.path()))
+        )
         return False
     if fcd.isabsent() or fco.isabsent():
-        repo.ui.warn(_('warning: internal %s cannot merge change/delete '
-                       'conflict for %s\n') % (tool, uipathfn(fcd.path())))
+        repo.ui.warn(
+            _(
+                b'warning: internal %s cannot merge change/delete '
+                b'conflict for %s\n'
+            )
+            % (tool, uipathfn(fcd.path()))
+        )
         return False
     return True
 
+
 def _merge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, mode):
     """
     Uses the internal non-interactive simple merge algorithm for merging
@@ -425,35 +467,55 @@
     r = simplemerge.simplemerge(ui, fcd, fca, fco, label=labels, mode=mode)
     return True, r, False
 
-@internaltool('union', fullmerge,
-              _("warning: conflicts while merging %s! "
-                "(edit, then use 'hg resolve --mark')\n"),
-              precheck=_mergecheck)
+
+@internaltool(
+    b'union',
+    fullmerge,
+    _(
+        b"warning: conflicts while merging %s! "
+        b"(edit, then use 'hg resolve --mark')\n"
+    ),
+    precheck=_mergecheck,
+)
 def _iunion(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
     """
     Uses the internal non-interactive simple merge algorithm for merging
     files. It will use both left and right sides for conflict regions.
     No markers are inserted."""
-    return _merge(repo, mynode, orig, fcd, fco, fca, toolconf,
-                  files, labels, 'union')
+    return _merge(
+        repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, b'union'
+    )
+
 
-@internaltool('merge', fullmerge,
-              _("warning: conflicts while merging %s! "
-                "(edit, then use 'hg resolve --mark')\n"),
-              precheck=_mergecheck)
+@internaltool(
+    b'merge',
+    fullmerge,
+    _(
+        b"warning: conflicts while merging %s! "
+        b"(edit, then use 'hg resolve --mark')\n"
+    ),
+    precheck=_mergecheck,
+)
 def _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
     """
     Uses the internal non-interactive simple merge algorithm for merging
     files. It will fail if there are any conflicts and leave markers in
     the partially merged file. Markers will have two sections, one for each side
     of merge."""
-    return _merge(repo, mynode, orig, fcd, fco, fca, toolconf,
-                  files, labels, 'merge')
+    return _merge(
+        repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, b'merge'
+    )
+
 
-@internaltool('merge3', fullmerge,
-              _("warning: conflicts while merging %s! "
-                "(edit, then use 'hg resolve --mark')\n"),
-              precheck=_mergecheck)
+@internaltool(
+    b'merge3',
+    fullmerge,
+    _(
+        b"warning: conflicts while merging %s! "
+        b"(edit, then use 'hg resolve --mark')\n"
+    ),
+    precheck=_mergecheck,
+)
 def _imerge3(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
     """
     Uses the internal non-interactive simple merge algorithm for merging
@@ -463,39 +525,59 @@
     if not labels:
         labels = _defaultconflictlabels
     if len(labels) < 3:
-        labels.append('base')
+        labels.append(b'base')
     return _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels)
 
-def _imergeauto(repo, mynode, orig, fcd, fco, fca, toolconf, files,
-                labels=None, localorother=None):
+
+def _imergeauto(
+    repo,
+    mynode,
+    orig,
+    fcd,
+    fco,
+    fca,
+    toolconf,
+    files,
+    labels=None,
+    localorother=None,
+):
     """
     Generic driver for _imergelocal and _imergeother
     """
     assert localorother is not None
-    r = simplemerge.simplemerge(repo.ui, fcd, fca, fco, label=labels,
-                                localorother=localorother)
+    r = simplemerge.simplemerge(
+        repo.ui, fcd, fca, fco, label=labels, localorother=localorother
+    )
     return True, r
 
-@internaltool('merge-local', mergeonly, precheck=_mergecheck)
+
+@internaltool(b'merge-local', mergeonly, precheck=_mergecheck)
 def _imergelocal(*args, **kwargs):
     """
     Like :merge, but resolve all conflicts non-interactively in favor
     of the local `p1()` changes."""
-    success, status = _imergeauto(localorother='local', *args, **kwargs)
+    success, status = _imergeauto(localorother=b'local', *args, **kwargs)
     return success, status, False
 
-@internaltool('merge-other', mergeonly, precheck=_mergecheck)
+
+@internaltool(b'merge-other', mergeonly, precheck=_mergecheck)
 def _imergeother(*args, **kwargs):
     """
     Like :merge, but resolve all conflicts non-interactively in favor
     of the other `p2()` changes."""
-    success, status = _imergeauto(localorother='other', *args, **kwargs)
+    success, status = _imergeauto(localorother=b'other', *args, **kwargs)
     return success, status, False
 
-@internaltool('tagmerge', mergeonly,
-              _("automatic tag merging of %s failed! "
-                "(use 'hg resolve --tool :merge' or another merge "
-                "tool of your choice)\n"))
+
+@internaltool(
+    b'tagmerge',
+    mergeonly,
+    _(
+        b"automatic tag merging of %s failed! "
+        b"(use 'hg resolve --tool :merge' or another merge "
+        b"tool of your choice)\n"
+    ),
+)
 def _itagmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
     """
     Uses the internal tag merge algorithm (experimental).
@@ -503,7 +585,8 @@
     success, status = tagmerge.merge(repo, fcd, fco, fca)
     return success, status, False
 
-@internaltool('dump', fullmerge, binary=True, symlink=True)
+
+@internaltool(b'dump', fullmerge, binary=True, symlink=True)
 def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
     """
     Creates three versions of the files to merge, containing the
@@ -520,23 +603,27 @@
     fd = fcd.path()
 
     from . import context
-    if isinstance(fcd, context.overlayworkingfilectx):
-        raise error.InMemoryMergeConflictsError('in-memory merge does not '
-                                                'support the :dump tool.')
 
-    util.writefile(a + ".local", fcd.decodeddata())
-    repo.wwrite(fd + ".other", fco.data(), fco.flags())
-    repo.wwrite(fd + ".base", fca.data(), fca.flags())
+    if isinstance(fcd, context.overlayworkingfilectx):
+        raise error.InMemoryMergeConflictsError(
+            b'in-memory merge does not support the :dump tool.'
+        )
+
+    util.writefile(a + b".local", fcd.decodeddata())
+    repo.wwrite(fd + b".other", fco.data(), fco.flags())
+    repo.wwrite(fd + b".base", fca.data(), fca.flags())
     return False, 1, False
 
-@internaltool('forcedump', mergeonly, binary=True, symlink=True)
-def _forcedump(repo, mynode, orig, fcd, fco, fca, toolconf, files,
-                labels=None):
+
+@internaltool(b'forcedump', mergeonly, binary=True, symlink=True)
+def _forcedump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
     """
     Creates three versions of the files as same as :dump, but omits premerge.
     """
-    return _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files,
-                labels=labels)
+    return _idump(
+        repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=labels
+    )
+
 
 def _xmergeimm(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
     # In-memory merge simply raises an exception on all external merge tools,
@@ -547,36 +634,52 @@
     # file -- we can't leave a merge state. (Copy to somewhere in the .hg/
     # directory and tell the user how to get it is my best idea, but it's
     # clunky.)
-    raise error.InMemoryMergeConflictsError('in-memory merge does not support '
-                                            'external merge tools')
+    raise error.InMemoryMergeConflictsError(
+        b'in-memory merge does not support external merge tools'
+    )
+
 
 def _describemerge(ui, repo, mynode, fcl, fcb, fco, env, toolpath, args):
-    tmpl = ui.config('ui', 'pre-merge-tool-output-template')
+    tmpl = ui.config(b'ui', b'pre-merge-tool-output-template')
     if not tmpl:
         return
 
     mappingdict = templateutil.mappingdict
-    props = {'ctx': fcl.changectx(),
-             'node': hex(mynode),
-             'path': fcl.path(),
-             'local': mappingdict({'ctx': fcl.changectx(),
-                                   'fctx': fcl,
-                                   'node': hex(mynode),
-                                   'name': _('local'),
-                                   'islink': 'l' in fcl.flags(),
-                                   'label': env['HG_MY_LABEL']}),
-             'base': mappingdict({'ctx': fcb.changectx(),
-                                  'fctx': fcb,
-                                  'name': _('base'),
-                                  'islink': 'l' in fcb.flags(),
-                                  'label': env['HG_BASE_LABEL']}),
-             'other': mappingdict({'ctx': fco.changectx(),
-                                   'fctx': fco,
-                                   'name': _('other'),
-                                   'islink': 'l' in fco.flags(),
-                                   'label': env['HG_OTHER_LABEL']}),
-             'toolpath': toolpath,
-             'toolargs': args}
+    props = {
+        b'ctx': fcl.changectx(),
+        b'node': hex(mynode),
+        b'path': fcl.path(),
+        b'local': mappingdict(
+            {
+                b'ctx': fcl.changectx(),
+                b'fctx': fcl,
+                b'node': hex(mynode),
+                b'name': _(b'local'),
+                b'islink': b'l' in fcl.flags(),
+                b'label': env[b'HG_MY_LABEL'],
+            }
+        ),
+        b'base': mappingdict(
+            {
+                b'ctx': fcb.changectx(),
+                b'fctx': fcb,
+                b'name': _(b'base'),
+                b'islink': b'l' in fcb.flags(),
+                b'label': env[b'HG_BASE_LABEL'],
+            }
+        ),
+        b'other': mappingdict(
+            {
+                b'ctx': fco.changectx(),
+                b'fctx': fco,
+                b'name': _(b'other'),
+                b'islink': b'l' in fco.flags(),
+                b'label': env[b'HG_OTHER_LABEL'],
+            }
+        ),
+        b'toolpath': toolpath,
+        b'toolargs': args,
+    }
 
     # TODO: make all of this something that can be specified on a per-tool basis
     tmpl = templater.unquotestring(tmpl)
@@ -584,87 +687,112 @@
     # Not using cmdutil.rendertemplate here since it causes errors importing
     # things for us to import cmdutil.
     tres = formatter.templateresources(ui, repo)
-    t = formatter.maketemplater(ui, tmpl, defaults=templatekw.keywords,
-                                resources=tres)
+    t = formatter.maketemplater(
+        ui, tmpl, defaults=templatekw.keywords, resources=tres
+    )
     ui.status(t.renderdefault(props))
 
+
 def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
     tool, toolpath, binary, symlink, scriptfn = toolconf
     uipathfn = scmutil.getuipathfn(repo)
     if fcd.isabsent() or fco.isabsent():
-        repo.ui.warn(_('warning: %s cannot merge change/delete conflict '
-                       'for %s\n') % (tool, uipathfn(fcd.path())))
+        repo.ui.warn(
+            _(b'warning: %s cannot merge change/delete conflict for %s\n')
+            % (tool, uipathfn(fcd.path()))
+        )
         return False, 1, None
     unused, unused, unused, back = files
     localpath = _workingpath(repo, fcd)
-    args = _toolstr(repo.ui, tool, "args")
+    args = _toolstr(repo.ui, tool, b"args")
 
-    with _maketempfiles(repo, fco, fca, repo.wvfs.join(back.path()),
-                        "$output" in args) as temppaths:
+    with _maketempfiles(
+        repo, fco, fca, repo.wvfs.join(back.path()), b"$output" in args
+    ) as temppaths:
         basepath, otherpath, localoutputpath = temppaths
-        outpath = ""
+        outpath = b""
         mylabel, otherlabel = labels[:2]
         if len(labels) >= 3:
             baselabel = labels[2]
         else:
-            baselabel = 'base'
-        env = {'HG_FILE': fcd.path(),
-               'HG_MY_NODE': short(mynode),
-               'HG_OTHER_NODE': short(fco.changectx().node()),
-               'HG_BASE_NODE': short(fca.changectx().node()),
-               'HG_MY_ISLINK': 'l' in fcd.flags(),
-               'HG_OTHER_ISLINK': 'l' in fco.flags(),
-               'HG_BASE_ISLINK': 'l' in fca.flags(),
-               'HG_MY_LABEL': mylabel,
-               'HG_OTHER_LABEL': otherlabel,
-               'HG_BASE_LABEL': baselabel,
-               }
+            baselabel = b'base'
+        env = {
+            b'HG_FILE': fcd.path(),
+            b'HG_MY_NODE': short(mynode),
+            b'HG_OTHER_NODE': short(fco.changectx().node()),
+            b'HG_BASE_NODE': short(fca.changectx().node()),
+            b'HG_MY_ISLINK': b'l' in fcd.flags(),
+            b'HG_OTHER_ISLINK': b'l' in fco.flags(),
+            b'HG_BASE_ISLINK': b'l' in fca.flags(),
+            b'HG_MY_LABEL': mylabel,
+            b'HG_OTHER_LABEL': otherlabel,
+            b'HG_BASE_LABEL': baselabel,
+        }
         ui = repo.ui
 
-        if "$output" in args:
+        if b"$output" in args:
             # read input from backup, write to original
             outpath = localpath
             localpath = localoutputpath
-        replace = {'local': localpath, 'base': basepath, 'other': otherpath,
-                   'output': outpath, 'labellocal': mylabel,
-                   'labelother': otherlabel, 'labelbase': baselabel}
+        replace = {
+            b'local': localpath,
+            b'base': basepath,
+            b'other': otherpath,
+            b'output': outpath,
+            b'labellocal': mylabel,
+            b'labelother': otherlabel,
+            b'labelbase': baselabel,
+        }
         args = util.interpolate(
-            br'\$', replace, args,
-            lambda s: procutil.shellquote(util.localpath(s)))
-        if _toolbool(ui, tool, "gui"):
-            repo.ui.status(_('running merge tool %s for file %s\n') %
-                           (tool, uipathfn(fcd.path())))
+            br'\$',
+            replace,
+            args,
+            lambda s: procutil.shellquote(util.localpath(s)),
+        )
+        if _toolbool(ui, tool, b"gui"):
+            repo.ui.status(
+                _(b'running merge tool %s for file %s\n')
+                % (tool, uipathfn(fcd.path()))
+            )
         if scriptfn is None:
-            cmd = toolpath + ' ' + args
-            repo.ui.debug('launching merge tool: %s\n' % cmd)
+            cmd = toolpath + b' ' + args
+            repo.ui.debug(b'launching merge tool: %s\n' % cmd)
             _describemerge(ui, repo, mynode, fcd, fca, fco, env, toolpath, args)
-            r = ui.system(cmd, cwd=repo.root, environ=env,
-                          blockedtag='mergetool')
+            r = ui.system(
+                cmd, cwd=repo.root, environ=env, blockedtag=b'mergetool'
+            )
         else:
-            repo.ui.debug('launching python merge script: %s:%s\n' %
-                          (toolpath, scriptfn))
+            repo.ui.debug(
+                b'launching python merge script: %s:%s\n' % (toolpath, scriptfn)
+            )
             r = 0
             try:
                 # avoid cycle cmdutil->merge->filemerge->extensions->cmdutil
                 from . import extensions
-                mod = extensions.loadpath(toolpath, 'hgmerge.%s' % tool)
+
+                mod = extensions.loadpath(toolpath, b'hgmerge.%s' % tool)
             except Exception:
-                raise error.Abort(_("loading python merge script failed: %s") %
-                                  toolpath)
+                raise error.Abort(
+                    _(b"loading python merge script failed: %s") % toolpath
+                )
             mergefn = getattr(mod, scriptfn, None)
             if mergefn is None:
-                raise error.Abort(_("%s does not have function: %s") %
-                                  (toolpath, scriptfn))
+                raise error.Abort(
+                    _(b"%s does not have function: %s") % (toolpath, scriptfn)
+                )
             argslist = procutil.shellsplit(args)
             # avoid cycle cmdutil->merge->filemerge->hook->extensions->cmdutil
             from . import hook
-            ret, raised = hook.pythonhook(ui, repo, "merge", toolpath,
-                                          mergefn, {'args': argslist}, True)
+
+            ret, raised = hook.pythonhook(
+                ui, repo, b"merge", toolpath, mergefn, {b'args': argslist}, True
+            )
             if raised:
                 r = 1
-        repo.ui.debug('merge tool returned: %d\n' % r)
+        repo.ui.debug(b'merge tool returned: %d\n' % r)
         return True, r, False
 
+
 def _formatconflictmarker(ctx, template, label, pad):
     """Applies the given template to the ctx, prefixed by the label.
 
@@ -674,19 +802,21 @@
     if ctx.node() is None:
         ctx = ctx.p1()
 
-    props = {'ctx': ctx}
+    props = {b'ctx': ctx}
     templateresult = template.renderdefault(props)
 
-    label = ('%s:' % label).ljust(pad + 1)
-    mark = '%s %s' % (label, templateresult)
+    label = (b'%s:' % label).ljust(pad + 1)
+    mark = b'%s %s' % (label, templateresult)
 
     if mark:
-        mark = mark.splitlines()[0] # split for safety
+        mark = mark.splitlines()[0]  # split for safety
 
     # 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ')
     return stringutil.ellipsis(mark, 80 - 8)
 
-_defaultconflictlabels = ['local', 'other']
+
+_defaultconflictlabels = [b'local', b'other']
+
 
 def _formatlabels(repo, fcd, fco, fca, labels, tool=None):
     """Formats the given labels using the conflict marker template.
@@ -698,22 +828,26 @@
     ca = fca.changectx()
 
     ui = repo.ui
-    template = ui.config('ui', 'mergemarkertemplate')
+    template = ui.config(b'ui', b'mergemarkertemplate')
     if tool is not None:
-        template = _toolstr(ui, tool, 'mergemarkertemplate', template)
+        template = _toolstr(ui, tool, b'mergemarkertemplate', template)
     template = templater.unquotestring(template)
     tres = formatter.templateresources(ui, repo)
-    tmpl = formatter.maketemplater(ui, template, defaults=templatekw.keywords,
-                                   resources=tres)
+    tmpl = formatter.maketemplater(
+        ui, template, defaults=templatekw.keywords, resources=tres
+    )
 
     pad = max(len(l) for l in labels)
 
-    newlabels = [_formatconflictmarker(cd, tmpl, labels[0], pad),
-                 _formatconflictmarker(co, tmpl, labels[1], pad)]
+    newlabels = [
+        _formatconflictmarker(cd, tmpl, labels[0], pad),
+        _formatconflictmarker(co, tmpl, labels[1], pad),
+    ]
     if len(labels) > 2:
         newlabels.append(_formatconflictmarker(ca, tmpl, labels[2], pad))
     return newlabels
 
+
 def partextras(labels):
     """Return a dictionary of extra labels for use in prompts to the user
 
@@ -721,20 +855,22 @@
     """
     if labels is None:
         return {
-            "l": "",
-            "o": "",
+            b"l": b"",
+            b"o": b"",
         }
 
     return {
-        "l": " [%s]" % labels[0],
-        "o": " [%s]" % labels[1],
+        b"l": b" [%s]" % labels[0],
+        b"o": b" [%s]" % labels[1],
     }
 
+
 def _restorebackup(fcd, back):
     # TODO: Add a workingfilectx.write(otherfilectx) path so we can use
     # util.copy here instead.
     fcd.write(back.data(), fcd.flags())
 
+
 def _makebackup(repo, ui, wctx, fcd, premerge):
     """Makes and returns a filectx-like object for ``fcd``'s backup file.
 
@@ -751,14 +887,16 @@
     # TODO: Break this import cycle somehow. (filectx -> ctx -> fileset ->
     # merge -> filemerge). (I suspect the fileset import is the weakest link)
     from . import context
+
     back = scmutil.backuppath(ui, repo, fcd.path())
-    inworkingdir = (back.startswith(repo.wvfs.base) and not
-        back.startswith(repo.vfs.base))
+    inworkingdir = back.startswith(repo.wvfs.base) and not back.startswith(
+        repo.vfs.base
+    )
     if isinstance(fcd, context.overlayworkingfilectx) and inworkingdir:
         # If the backup file is to be in the working directory, and we're
         # merging in-memory, we must redirect the backup to the memory context
         # so we don't disturb the working directory.
-        relpath = back[len(repo.wvfs.base) + 1:]
+        relpath = back[len(repo.wvfs.base) + 1 :]
         if premerge:
             wctx[relpath].write(fcd.data(), fcd.flags())
         return wctx[relpath]
@@ -777,6 +915,7 @@
         # the backup context regardless of where it lives.
         return context.arbitraryfilectx(back, repo=repo)
 
+
 @contextlib.contextmanager
 def _maketempfiles(repo, fco, fca, localpath, uselocalpath):
     """Writes out `fco` and `fca` as temporary files, and (if uselocalpath)
@@ -784,20 +923,20 @@
     use them.
     """
     tmproot = None
-    tmprootprefix = repo.ui.config('experimental', 'mergetempdirprefix')
+    tmprootprefix = repo.ui.config(b'experimental', b'mergetempdirprefix')
     if tmprootprefix:
         tmproot = pycompat.mkdtemp(prefix=tmprootprefix)
 
     def maketempfrompath(prefix, path):
         fullbase, ext = os.path.splitext(path)
-        pre = "%s~%s" % (os.path.basename(fullbase), prefix)
+        pre = b"%s~%s" % (os.path.basename(fullbase), prefix)
         if tmproot:
             name = os.path.join(tmproot, pre)
             if ext:
                 name += ext
             f = open(name, r"wb")
         else:
-            fd, name = pycompat.mkstemp(prefix=pre + '.', suffix=ext)
+            fd, name = pycompat.mkstemp(prefix=pre + b'.', suffix=ext)
             f = os.fdopen(fd, r"wb")
         return f, name
 
@@ -808,16 +947,16 @@
         f.close()
         return name
 
-    b = tempfromcontext("base", fca)
-    c = tempfromcontext("other", fco)
+    b = tempfromcontext(b"base", fca)
+    c = tempfromcontext(b"other", fco)
     d = localpath
     if uselocalpath:
         # We start off with this being the backup filename, so remove the .orig
         # to make syntax-highlighting more likely.
-        if d.endswith('.orig'):
+        if d.endswith(b'.orig'):
             d, _ = os.path.splitext(d)
-        f, d = maketempfrompath("local", d)
-        with open(localpath, 'rb') as src:
+        f, d = maketempfrompath(b"local", d)
+        with open(localpath, b'rb') as src:
             f.write(src.read())
         f.close()
 
@@ -834,6 +973,7 @@
             if d and uselocalpath:
                 util.unlink(d)
 
+
 def _filemerge(premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
     """perform a 3-way merge in the working directory
 
@@ -847,7 +987,7 @@
     Returns whether the merge is complete, the return value of the merge, and
     a boolean indicating whether the file was deleted from disk."""
 
-    if not fco.cmp(fcd): # files identical?
+    if not fco.cmp(fcd):  # files identical?
         return True, None, False
 
     ui = repo.ui
@@ -855,30 +995,37 @@
     uipathfn = scmutil.getuipathfn(repo)
     fduipath = uipathfn(fd)
     binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
-    symlink = 'l' in fcd.flags() + fco.flags()
+    symlink = b'l' in fcd.flags() + fco.flags()
     changedelete = fcd.isabsent() or fco.isabsent()
     tool, toolpath = _picktool(repo, ui, fd, binary, symlink, changedelete)
     scriptfn = None
-    if tool in internals and tool.startswith('internal:'):
+    if tool in internals and tool.startswith(b'internal:'):
         # normalize to new-style names (':merge' etc)
-        tool = tool[len('internal'):]
-    if toolpath and toolpath.startswith('python:'):
+        tool = tool[len(b'internal') :]
+    if toolpath and toolpath.startswith(b'python:'):
         invalidsyntax = False
-        if toolpath.count(':') >= 2:
-            script, scriptfn = toolpath[7:].rsplit(':', 1)
+        if toolpath.count(b':') >= 2:
+            script, scriptfn = toolpath[7:].rsplit(b':', 1)
             if not scriptfn:
                 invalidsyntax = True
             # missing :callable can lead to spliting on windows drive letter
-            if '\\' in scriptfn or '/' in scriptfn:
+            if b'\\' in scriptfn or b'/' in scriptfn:
                 invalidsyntax = True
         else:
             invalidsyntax = True
         if invalidsyntax:
-            raise error.Abort(_("invalid 'python:' syntax: %s") % toolpath)
+            raise error.Abort(_(b"invalid 'python:' syntax: %s") % toolpath)
         toolpath = script
-    ui.debug("picked tool '%s' for %s (binary %s symlink %s changedelete %s)\n"
-             % (tool, fduipath, pycompat.bytestr(binary),
-                pycompat.bytestr(symlink), pycompat.bytestr(changedelete)))
+    ui.debug(
+        b"picked tool '%s' for %s (binary %s symlink %s changedelete %s)\n"
+        % (
+            tool,
+            fduipath,
+            pycompat.bytestr(binary),
+            pycompat.bytestr(symlink),
+            pycompat.bytestr(changedelete),
+        )
+    )
 
     if tool in internals:
         func = internals[tool]
@@ -892,7 +1039,7 @@
         else:
             func = _xmerge
         mergetype = fullmerge
-        onfailure = _("merging %s failed!\n")
+        onfailure = _(b"merging %s failed!\n")
         precheck = None
         isexternal = True
 
@@ -904,20 +1051,21 @@
 
     if premerge:
         if orig != fco.path():
-            ui.status(_("merging %s and %s to %s\n") %
-                      (uipathfn(orig), uipathfn(fco.path()), fduipath))
+            ui.status(
+                _(b"merging %s and %s to %s\n")
+                % (uipathfn(orig), uipathfn(fco.path()), fduipath)
+            )
         else:
-            ui.status(_("merging %s\n") % fduipath)
+            ui.status(_(b"merging %s\n") % fduipath)
 
-    ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))
+    ui.debug(b"my %s other %s ancestor %s\n" % (fcd, fco, fca))
 
-    if precheck and not precheck(repo, mynode, orig, fcd, fco, fca,
-                                 toolconf):
+    if precheck and not precheck(repo, mynode, orig, fcd, fco, fca, toolconf):
         if onfailure:
             if wctx.isinmemory():
-                raise error.InMemoryMergeConflictsError('in-memory merge does '
-                                                        'not support merge '
-                                                        'conflicts')
+                raise error.InMemoryMergeConflictsError(
+                    b'in-memory merge does not support merge conflicts'
+                )
             ui.warn(onfailure % fduipath)
         return True, 1, False
 
@@ -925,18 +1073,19 @@
     files = (None, None, None, back)
     r = 1
     try:
-        internalmarkerstyle = ui.config('ui', 'mergemarkers')
+        internalmarkerstyle = ui.config(b'ui', b'mergemarkers')
         if isexternal:
-            markerstyle = _toolstr(ui, tool, 'mergemarkers')
+            markerstyle = _toolstr(ui, tool, b'mergemarkers')
         else:
             markerstyle = internalmarkerstyle
 
         if not labels:
             labels = _defaultconflictlabels
         formattedlabels = labels
-        if markerstyle != 'basic':
-            formattedlabels = _formatlabels(repo, fcd, fco, fca, labels,
-                                            tool=tool)
+        if markerstyle != b'basic':
+            formattedlabels = _formatlabels(
+                repo, fcd, fco, fca, labels, tool=tool
+            )
 
         if premerge and mergetype == fullmerge:
             # conflict markers generated by premerge will use 'detailed'
@@ -946,21 +1095,32 @@
             # in conflict markers if premerge is 'keep' or 'keep-merge3'.
             premergelabels = labels
             labeltool = None
-            if markerstyle != 'basic':
+            if markerstyle != b'basic':
                 # respect 'tool's mergemarkertemplate (which defaults to
                 # ui.mergemarkertemplate)
                 labeltool = tool
-            if internalmarkerstyle != 'basic' or markerstyle != 'basic':
-                premergelabels = _formatlabels(repo, fcd, fco, fca,
-                                               premergelabels, tool=labeltool)
+            if internalmarkerstyle != b'basic' or markerstyle != b'basic':
+                premergelabels = _formatlabels(
+                    repo, fcd, fco, fca, premergelabels, tool=labeltool
+                )
 
-            r = _premerge(repo, fcd, fco, fca, toolconf, files,
-                          labels=premergelabels)
+            r = _premerge(
+                repo, fcd, fco, fca, toolconf, files, labels=premergelabels
+            )
             # complete if premerge successful (r is 0)
             return not r, r, False
 
-        needcheck, r, deleted = func(repo, mynode, orig, fcd, fco, fca,
-                                     toolconf, files, labels=formattedlabels)
+        needcheck, r, deleted = func(
+            repo,
+            mynode,
+            orig,
+            fcd,
+            fco,
+            fca,
+            toolconf,
+            files,
+            labels=formattedlabels,
+        )
 
         if needcheck:
             r = _check(repo, r, ui, tool, fcd, files)
@@ -968,9 +1128,11 @@
         if r:
             if onfailure:
                 if wctx.isinmemory():
-                    raise error.InMemoryMergeConflictsError('in-memory merge '
-                                                            'does not support '
-                                                            'merge conflicts')
+                    raise error.InMemoryMergeConflictsError(
+                        b'in-memory merge '
+                        b'does not support '
+                        b'merge conflicts'
+                    )
                 ui.warn(onfailure % fduipath)
             _onfilemergefailure(ui)
 
@@ -979,92 +1141,121 @@
         if not r and back is not None:
             back.remove()
 
+
 def _haltmerge():
-    msg = _('merge halted after failed merge (see hg resolve)')
+    msg = _(b'merge halted after failed merge (see hg resolve)')
     raise error.InterventionRequired(msg)
 
+
 def _onfilemergefailure(ui):
-    action = ui.config('merge', 'on-failure')
-    if action == 'prompt':
-        msg = _('continue merge operation (yn)?' '$$ &Yes $$ &No')
+    action = ui.config(b'merge', b'on-failure')
+    if action == b'prompt':
+        msg = _(b'continue merge operation (yn)?$$ &Yes $$ &No')
         if ui.promptchoice(msg, 0) == 1:
             _haltmerge()
-    if action == 'halt':
+    if action == b'halt':
         _haltmerge()
     # default action is 'continue', in which case we neither prompt nor halt
 
+
 def hasconflictmarkers(data):
-    return bool(re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", data,
-                          re.MULTILINE))
+    return bool(
+        re.search(b"^(<<<<<<< .*|=======|>>>>>>> .*)$", data, re.MULTILINE)
+    )
+
 
 def _check(repo, r, ui, tool, fcd, files):
     fd = fcd.path()
     uipathfn = scmutil.getuipathfn(repo)
     unused, unused, unused, back = files
 
-    if not r and (_toolbool(ui, tool, "checkconflicts") or
-                  'conflicts' in _toollist(ui, tool, "check")):
+    if not r and (
+        _toolbool(ui, tool, b"checkconflicts")
+        or b'conflicts' in _toollist(ui, tool, b"check")
+    ):
         if hasconflictmarkers(fcd.data()):
             r = 1
 
     checked = False
-    if 'prompt' in _toollist(ui, tool, "check"):
+    if b'prompt' in _toollist(ui, tool, b"check"):
         checked = True
-        if ui.promptchoice(_("was merge of '%s' successful (yn)?"
-                             "$$ &Yes $$ &No") % uipathfn(fd), 1):
+        if ui.promptchoice(
+            _(b"was merge of '%s' successful (yn)?$$ &Yes $$ &No")
+            % uipathfn(fd),
+            1,
+        ):
             r = 1
 
-    if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
-                                  'changed' in
-                                  _toollist(ui, tool, "check")):
+    if (
+        not r
+        and not checked
+        and (
+            _toolbool(ui, tool, b"checkchanged")
+            or b'changed' in _toollist(ui, tool, b"check")
+        )
+    ):
         if back is not None and not fcd.cmp(back):
-            if ui.promptchoice(_(" output file %s appears unchanged\n"
-                                 "was merge successful (yn)?"
-                                 "$$ &Yes $$ &No") % uipathfn(fd), 1):
+            if ui.promptchoice(
+                _(
+                    b" output file %s appears unchanged\n"
+                    b"was merge successful (yn)?"
+                    b"$$ &Yes $$ &No"
+                )
+                % uipathfn(fd),
+                1,
+            ):
                 r = 1
 
-    if back is not None and _toolbool(ui, tool, "fixeol"):
+    if back is not None and _toolbool(ui, tool, b"fixeol"):
         _matcheol(_workingpath(repo, fcd), back)
 
     return r
 
+
 def _workingpath(repo, ctx):
     return repo.wjoin(ctx.path())
 
+
 def premerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
-    return _filemerge(True, repo, wctx, mynode, orig, fcd, fco, fca,
-                      labels=labels)
+    return _filemerge(
+        True, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
+    )
+
 
 def filemerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
-    return _filemerge(False, repo, wctx, mynode, orig, fcd, fco, fca,
-                      labels=labels)
+    return _filemerge(
+        False, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
+    )
+
 
 def loadinternalmerge(ui, extname, registrarobj):
     """Load internal merge tool from specified registrarobj
     """
-    for name, func in registrarobj._table.iteritems():
-        fullname = ':' + name
+    for name, func in pycompat.iteritems(registrarobj._table):
+        fullname = b':' + name
         internals[fullname] = func
-        internals['internal:' + name] = func
+        internals[b'internal:' + name] = func
         internalsdoc[fullname] = func
 
         capabilities = sorted([k for k, v in func.capabilities.items() if v])
         if capabilities:
-            capdesc = "    (actual capabilities: %s)" % ', '.join(capabilities)
-            func.__doc__ = (func.__doc__ +
-                            pycompat.sysstr("\n\n%s" % capdesc))
+            capdesc = b"    (actual capabilities: %s)" % b', '.join(
+                capabilities
+            )
+            func.__doc__ = func.__doc__ + pycompat.sysstr(b"\n\n%s" % capdesc)
 
     # to put i18n comments into hg.pot for automatically generated texts
 
     # i18n: "binary" and "symlink" are keywords
     # i18n: this text is added automatically
-    _("    (actual capabilities: binary, symlink)")
+    _(b"    (actual capabilities: binary, symlink)")
     # i18n: "binary" is keyword
     # i18n: this text is added automatically
-    _("    (actual capabilities: binary)")
+    _(b"    (actual capabilities: binary)")
     # i18n: "symlink" is keyword
     # i18n: this text is added automatically
-    _("    (actual capabilities: symlink)")
+    _(b"    (actual capabilities: symlink)")
+
 
 # load built-in merge tools explicitly to setup internalsdoc
 loadinternalmerge(None, None, internaltool)
--- a/mercurial/fileset.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/fileset.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,6 +11,7 @@
 import re
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     error,
     filesetlang,
@@ -21,9 +22,7 @@
     scmutil,
     util,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
 
 # common weight constants
 _WEIGHT_CHECK_FILENAME = filesetlang.WEIGHT_CHECK_FILENAME
@@ -38,49 +37,67 @@
 getpattern = filesetlang.getpattern
 getargs = filesetlang.getargs
 
+
 def getmatch(mctx, x):
     if not x:
-        raise error.ParseError(_("missing argument"))
+        raise error.ParseError(_(b"missing argument"))
     return methods[x[0]](mctx, *x[1:])
 
+
 def getmatchwithstatus(mctx, x, hint):
-    keys = set(getstring(hint, 'status hint must be a string').split())
+    keys = set(getstring(hint, b'status hint must be a string').split())
     return getmatch(mctx.withstatus(keys), x)
 
+
 def stringmatch(mctx, x):
     return mctx.matcher([x])
 
+
 def kindpatmatch(mctx, x, y):
-    return stringmatch(mctx, _getkindpat(x, y, matchmod.allpatternkinds,
-                                         _("pattern must be a string")))
+    return stringmatch(
+        mctx,
+        _getkindpat(
+            x, y, matchmod.allpatternkinds, _(b"pattern must be a string")
+        ),
+    )
+
 
 def patternsmatch(mctx, *xs):
     allkinds = matchmod.allpatternkinds
-    patterns = [getpattern(x, allkinds, _("pattern must be a string"))
-                for x in xs]
+    patterns = [
+        getpattern(x, allkinds, _(b"pattern must be a string")) for x in xs
+    ]
     return mctx.matcher(patterns)
 
+
 def andmatch(mctx, x, y):
     xm = getmatch(mctx, x)
     ym = getmatch(mctx.narrowed(xm), y)
     return matchmod.intersectmatchers(xm, ym)
 
+
 def ormatch(mctx, *xs):
     ms = [getmatch(mctx, x) for x in xs]
     return matchmod.unionmatcher(ms)
 
+
 def notmatch(mctx, x):
     m = getmatch(mctx, x)
-    return mctx.predicate(lambda f: not m(f), predrepr=('<not %r>', m))
+    return mctx.predicate(lambda f: not m(f), predrepr=(b'<not %r>', m))
+
 
 def minusmatch(mctx, x, y):
     xm = getmatch(mctx, x)
     ym = getmatch(mctx.narrowed(xm), y)
     return matchmod.differencematcher(xm, ym)
 
+
 def listmatch(mctx, *xs):
-    raise error.ParseError(_("can't use a list in this context"),
-                           hint=_('see \'hg help "filesets.x or y"\''))
+    raise error.ParseError(
+        _(b"can't use a list in this context"),
+        hint=_(b'see \'hg help "filesets.x or y"\''),
+    )
+
 
 def func(mctx, a, b):
     funcname = getsymbol(a)
@@ -92,6 +109,7 @@
     syms = [s for (s, fn) in symbols.items() if keep(fn)]
     raise error.UnknownIdentifier(funcname, syms)
 
+
 # symbols are callable like:
 #  fun(mctx, x)
 # with:
@@ -101,164 +119,190 @@
 
 predicate = registrar.filesetpredicate(symbols)
 
-@predicate('modified()', callstatus=True, weight=_WEIGHT_STATUS)
+
+@predicate(b'modified()', callstatus=True, weight=_WEIGHT_STATUS)
 def modified(mctx, x):
     """File that is modified according to :hg:`status`.
     """
     # i18n: "modified" is a keyword
-    getargs(x, 0, 0, _("modified takes no arguments"))
+    getargs(x, 0, 0, _(b"modified takes no arguments"))
     s = set(mctx.status().modified)
-    return mctx.predicate(s.__contains__, predrepr='modified')
+    return mctx.predicate(s.__contains__, predrepr=b'modified')
 
-@predicate('added()', callstatus=True, weight=_WEIGHT_STATUS)
+
+@predicate(b'added()', callstatus=True, weight=_WEIGHT_STATUS)
 def added(mctx, x):
     """File that is added according to :hg:`status`.
     """
     # i18n: "added" is a keyword
-    getargs(x, 0, 0, _("added takes no arguments"))
+    getargs(x, 0, 0, _(b"added takes no arguments"))
     s = set(mctx.status().added)
-    return mctx.predicate(s.__contains__, predrepr='added')
+    return mctx.predicate(s.__contains__, predrepr=b'added')
 
-@predicate('removed()', callstatus=True, weight=_WEIGHT_STATUS)
+
+@predicate(b'removed()', callstatus=True, weight=_WEIGHT_STATUS)
 def removed(mctx, x):
     """File that is removed according to :hg:`status`.
     """
     # i18n: "removed" is a keyword
-    getargs(x, 0, 0, _("removed takes no arguments"))
+    getargs(x, 0, 0, _(b"removed takes no arguments"))
     s = set(mctx.status().removed)
-    return mctx.predicate(s.__contains__, predrepr='removed')
+    return mctx.predicate(s.__contains__, predrepr=b'removed')
 
-@predicate('deleted()', callstatus=True, weight=_WEIGHT_STATUS)
+
+@predicate(b'deleted()', callstatus=True, weight=_WEIGHT_STATUS)
 def deleted(mctx, x):
     """Alias for ``missing()``.
     """
     # i18n: "deleted" is a keyword
-    getargs(x, 0, 0, _("deleted takes no arguments"))
+    getargs(x, 0, 0, _(b"deleted takes no arguments"))
     s = set(mctx.status().deleted)
-    return mctx.predicate(s.__contains__, predrepr='deleted')
+    return mctx.predicate(s.__contains__, predrepr=b'deleted')
 
-@predicate('missing()', callstatus=True, weight=_WEIGHT_STATUS)
+
+@predicate(b'missing()', callstatus=True, weight=_WEIGHT_STATUS)
 def missing(mctx, x):
     """File that is missing according to :hg:`status`.
     """
     # i18n: "missing" is a keyword
-    getargs(x, 0, 0, _("missing takes no arguments"))
+    getargs(x, 0, 0, _(b"missing takes no arguments"))
     s = set(mctx.status().deleted)
-    return mctx.predicate(s.__contains__, predrepr='deleted')
+    return mctx.predicate(s.__contains__, predrepr=b'deleted')
 
-@predicate('unknown()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH)
+
+@predicate(b'unknown()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH)
 def unknown(mctx, x):
     """File that is unknown according to :hg:`status`."""
     # i18n: "unknown" is a keyword
-    getargs(x, 0, 0, _("unknown takes no arguments"))
+    getargs(x, 0, 0, _(b"unknown takes no arguments"))
     s = set(mctx.status().unknown)
-    return mctx.predicate(s.__contains__, predrepr='unknown')
+    return mctx.predicate(s.__contains__, predrepr=b'unknown')
 
-@predicate('ignored()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH)
+
+@predicate(b'ignored()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH)
 def ignored(mctx, x):
     """File that is ignored according to :hg:`status`."""
     # i18n: "ignored" is a keyword
-    getargs(x, 0, 0, _("ignored takes no arguments"))
+    getargs(x, 0, 0, _(b"ignored takes no arguments"))
     s = set(mctx.status().ignored)
-    return mctx.predicate(s.__contains__, predrepr='ignored')
+    return mctx.predicate(s.__contains__, predrepr=b'ignored')
 
-@predicate('clean()', callstatus=True, weight=_WEIGHT_STATUS)
+
+@predicate(b'clean()', callstatus=True, weight=_WEIGHT_STATUS)
 def clean(mctx, x):
     """File that is clean according to :hg:`status`.
     """
     # i18n: "clean" is a keyword
-    getargs(x, 0, 0, _("clean takes no arguments"))
+    getargs(x, 0, 0, _(b"clean takes no arguments"))
     s = set(mctx.status().clean)
-    return mctx.predicate(s.__contains__, predrepr='clean')
+    return mctx.predicate(s.__contains__, predrepr=b'clean')
 
-@predicate('tracked()')
+
+@predicate(b'tracked()')
 def tracked(mctx, x):
     """File that is under Mercurial control."""
     # i18n: "tracked" is a keyword
-    getargs(x, 0, 0, _("tracked takes no arguments"))
-    return mctx.predicate(mctx.ctx.__contains__, predrepr='tracked')
+    getargs(x, 0, 0, _(b"tracked takes no arguments"))
+    return mctx.predicate(mctx.ctx.__contains__, predrepr=b'tracked')
 
-@predicate('binary()', weight=_WEIGHT_READ_CONTENTS)
+
+@predicate(b'binary()', weight=_WEIGHT_READ_CONTENTS)
 def binary(mctx, x):
     """File that appears to be binary (contains NUL bytes).
     """
     # i18n: "binary" is a keyword
-    getargs(x, 0, 0, _("binary takes no arguments"))
-    return mctx.fpredicate(lambda fctx: fctx.isbinary(),
-                           predrepr='binary', cache=True)
+    getargs(x, 0, 0, _(b"binary takes no arguments"))
+    return mctx.fpredicate(
+        lambda fctx: fctx.isbinary(), predrepr=b'binary', cache=True
+    )
 
-@predicate('exec()')
+
+@predicate(b'exec()')
 def exec_(mctx, x):
     """File that is marked as executable.
     """
     # i18n: "exec" is a keyword
-    getargs(x, 0, 0, _("exec takes no arguments"))
+    getargs(x, 0, 0, _(b"exec takes no arguments"))
     ctx = mctx.ctx
-    return mctx.predicate(lambda f: ctx.flags(f) == 'x', predrepr='exec')
+    return mctx.predicate(lambda f: ctx.flags(f) == b'x', predrepr=b'exec')
 
-@predicate('symlink()')
+
+@predicate(b'symlink()')
 def symlink(mctx, x):
     """File that is marked as a symlink.
     """
     # i18n: "symlink" is a keyword
-    getargs(x, 0, 0, _("symlink takes no arguments"))
+    getargs(x, 0, 0, _(b"symlink takes no arguments"))
     ctx = mctx.ctx
-    return mctx.predicate(lambda f: ctx.flags(f) == 'l', predrepr='symlink')
+    return mctx.predicate(lambda f: ctx.flags(f) == b'l', predrepr=b'symlink')
 
-@predicate('resolved()', weight=_WEIGHT_STATUS)
+
+@predicate(b'resolved()', weight=_WEIGHT_STATUS)
 def resolved(mctx, x):
     """File that is marked resolved according to :hg:`resolve -l`.
     """
     # i18n: "resolved" is a keyword
-    getargs(x, 0, 0, _("resolved takes no arguments"))
+    getargs(x, 0, 0, _(b"resolved takes no arguments"))
     if mctx.ctx.rev() is not None:
         return mctx.never()
     ms = merge.mergestate.read(mctx.ctx.repo())
-    return mctx.predicate(lambda f: f in ms and ms[f] == 'r',
-                          predrepr='resolved')
+    return mctx.predicate(
+        lambda f: f in ms and ms[f] == b'r', predrepr=b'resolved'
+    )
 
-@predicate('unresolved()', weight=_WEIGHT_STATUS)
+
+@predicate(b'unresolved()', weight=_WEIGHT_STATUS)
 def unresolved(mctx, x):
     """File that is marked unresolved according to :hg:`resolve -l`.
     """
     # i18n: "unresolved" is a keyword
-    getargs(x, 0, 0, _("unresolved takes no arguments"))
+    getargs(x, 0, 0, _(b"unresolved takes no arguments"))
     if mctx.ctx.rev() is not None:
         return mctx.never()
     ms = merge.mergestate.read(mctx.ctx.repo())
-    return mctx.predicate(lambda f: f in ms and ms[f] == 'u',
-                          predrepr='unresolved')
+    return mctx.predicate(
+        lambda f: f in ms and ms[f] == b'u', predrepr=b'unresolved'
+    )
 
-@predicate('hgignore()', weight=_WEIGHT_STATUS)
+
+@predicate(b'hgignore()', weight=_WEIGHT_STATUS)
 def hgignore(mctx, x):
     """File that matches the active .hgignore pattern.
     """
     # i18n: "hgignore" is a keyword
-    getargs(x, 0, 0, _("hgignore takes no arguments"))
+    getargs(x, 0, 0, _(b"hgignore takes no arguments"))
     return mctx.ctx.repo().dirstate._ignore
 
-@predicate('portable()', weight=_WEIGHT_CHECK_FILENAME)
+
+@predicate(b'portable()', weight=_WEIGHT_CHECK_FILENAME)
 def portable(mctx, x):
     """File that has a portable name. (This doesn't include filenames with case
     collisions.)
     """
     # i18n: "portable" is a keyword
-    getargs(x, 0, 0, _("portable takes no arguments"))
-    return mctx.predicate(lambda f: util.checkwinfilename(f) is None,
-                          predrepr='portable')
+    getargs(x, 0, 0, _(b"portable takes no arguments"))
+    return mctx.predicate(
+        lambda f: util.checkwinfilename(f) is None, predrepr=b'portable'
+    )
 
-@predicate('grep(regex)', weight=_WEIGHT_READ_CONTENTS)
+
+@predicate(b'grep(regex)', weight=_WEIGHT_READ_CONTENTS)
 def grep(mctx, x):
     """File contains the given regular expression.
     """
     try:
         # i18n: "grep" is a keyword
-        r = re.compile(getstring(x, _("grep requires a pattern")))
+        r = re.compile(getstring(x, _(b"grep requires a pattern")))
     except re.error as e:
-        raise error.ParseError(_('invalid match pattern: %s') %
-                               stringutil.forcebytestr(e))
-    return mctx.fpredicate(lambda fctx: r.search(fctx.data()),
-                           predrepr=('grep(%r)', r.pattern), cache=True)
+        raise error.ParseError(
+            _(b'invalid match pattern: %s') % stringutil.forcebytestr(e)
+        )
+    return mctx.fpredicate(
+        lambda fctx: r.search(fctx.data()),
+        predrepr=(b'grep(%r)', r.pattern),
+        cache=True,
+    )
+
 
 def _sizetomax(s):
     try:
@@ -266,34 +310,35 @@
         for k, v in util._sizeunits:
             if s.endswith(k):
                 # max(4k) = 5k - 1, max(4.5k) = 4.6k - 1
-                n = s[:-len(k)]
+                n = s[: -len(k)]
                 inc = 1.0
-                if "." in n:
-                    inc /= 10 ** len(n.split(".")[1])
+                if b"." in n:
+                    inc /= 10 ** len(n.split(b".")[1])
                 return int((float(n) + inc) * v) - 1
         # no extension, this is a precise value
         return int(s)
     except ValueError:
-        raise error.ParseError(_("couldn't parse size: %s") % s)
+        raise error.ParseError(_(b"couldn't parse size: %s") % s)
+
 
 def sizematcher(expr):
     """Return a function(size) -> bool from the ``size()`` expression"""
     expr = expr.strip()
-    if '-' in expr: # do we have a range?
-        a, b = expr.split('-', 1)
+    if b'-' in expr:  # do we have a range?
+        a, b = expr.split(b'-', 1)
         a = util.sizetoint(a)
         b = util.sizetoint(b)
         return lambda x: x >= a and x <= b
-    elif expr.startswith("<="):
+    elif expr.startswith(b"<="):
         a = util.sizetoint(expr[2:])
         return lambda x: x <= a
-    elif expr.startswith("<"):
+    elif expr.startswith(b"<"):
         a = util.sizetoint(expr[1:])
         return lambda x: x < a
-    elif expr.startswith(">="):
+    elif expr.startswith(b">="):
         a = util.sizetoint(expr[2:])
         return lambda x: x >= a
-    elif expr.startswith(">"):
+    elif expr.startswith(b">"):
         a = util.sizetoint(expr[1:])
         return lambda x: x > a
     else:
@@ -301,7 +346,8 @@
         b = _sizetomax(expr)
         return lambda x: x >= a and x <= b
 
-@predicate('size(expression)', weight=_WEIGHT_STATUS)
+
+@predicate(b'size(expression)', weight=_WEIGHT_STATUS)
 def size(mctx, x):
     """File size matches the given expression. Examples:
 
@@ -311,12 +357,14 @@
     - size('4k - 1MB') - files from 4096 bytes to 1048576 bytes
     """
     # i18n: "size" is a keyword
-    expr = getstring(x, _("size requires an expression"))
+    expr = getstring(x, _(b"size requires an expression"))
     m = sizematcher(expr)
-    return mctx.fpredicate(lambda fctx: m(fctx.size()),
-                           predrepr=('size(%r)', expr), cache=True)
+    return mctx.fpredicate(
+        lambda fctx: m(fctx.size()), predrepr=(b'size(%r)', expr), cache=True
+    )
 
-@predicate('encoding(name)', weight=_WEIGHT_READ_CONTENTS)
+
+@predicate(b'encoding(name)', weight=_WEIGHT_READ_CONTENTS)
 def encoding(mctx, x):
     """File can be successfully decoded with the given character
     encoding. May not be useful for encodings other than ASCII and
@@ -324,7 +372,7 @@
     """
 
     # i18n: "encoding" is a keyword
-    enc = getstring(x, _("encoding requires an encoding name"))
+    enc = getstring(x, _(b"encoding requires an encoding name"))
 
     def encp(fctx):
         d = fctx.data()
@@ -332,13 +380,14 @@
             d.decode(pycompat.sysstr(enc))
             return True
         except LookupError:
-            raise error.Abort(_("unknown encoding '%s'") % enc)
+            raise error.Abort(_(b"unknown encoding '%s'") % enc)
         except UnicodeDecodeError:
             return False
 
-    return mctx.fpredicate(encp, predrepr=('encoding(%r)', enc), cache=True)
+    return mctx.fpredicate(encp, predrepr=(b'encoding(%r)', enc), cache=True)
 
-@predicate('eol(style)', weight=_WEIGHT_READ_CONTENTS)
+
+@predicate(b'eol(style)', weight=_WEIGHT_READ_CONTENTS)
 def eol(mctx, x):
     """File contains newlines of the given style (dos, unix, mac). Binary
     files are excluded, files with mixed line endings match multiple
@@ -346,41 +395,46 @@
     """
 
     # i18n: "eol" is a keyword
-    enc = getstring(x, _("eol requires a style name"))
+    enc = getstring(x, _(b"eol requires a style name"))
 
     def eolp(fctx):
         if fctx.isbinary():
             return False
         d = fctx.data()
-        if (enc == 'dos' or enc == 'win') and '\r\n' in d:
+        if (enc == b'dos' or enc == b'win') and b'\r\n' in d:
             return True
-        elif enc == 'unix' and re.search('(?<!\r)\n', d):
+        elif enc == b'unix' and re.search(b'(?<!\r)\n', d):
             return True
-        elif enc == 'mac' and re.search('\r(?!\n)', d):
+        elif enc == b'mac' and re.search(b'\r(?!\n)', d):
             return True
         return False
-    return mctx.fpredicate(eolp, predrepr=('eol(%r)', enc), cache=True)
+
+    return mctx.fpredicate(eolp, predrepr=(b'eol(%r)', enc), cache=True)
 
-@predicate('copied()')
+
+@predicate(b'copied()')
 def copied(mctx, x):
     """File that is recorded as being copied.
     """
     # i18n: "copied" is a keyword
-    getargs(x, 0, 0, _("copied takes no arguments"))
+    getargs(x, 0, 0, _(b"copied takes no arguments"))
+
     def copiedp(fctx):
         p = fctx.parents()
         return p and p[0].path() != fctx.path()
-    return mctx.fpredicate(copiedp, predrepr='copied', cache=True)
+
+    return mctx.fpredicate(copiedp, predrepr=b'copied', cache=True)
 
-@predicate('revs(revs, pattern)', weight=_WEIGHT_STATUS)
+
+@predicate(b'revs(revs, pattern)', weight=_WEIGHT_STATUS)
 def revs(mctx, x):
     """Evaluate set in the specified revisions. If the revset match multiple
     revs, this will return file matching pattern in any of the revision.
     """
     # i18n: "revs" is a keyword
-    r, x = getargs(x, 2, 2, _("revs takes two arguments"))
+    r, x = getargs(x, 2, 2, _(b"revs takes two arguments"))
     # i18n: "revs" is a keyword
-    revspec = getstring(r, _("first argument to revs must be a revision"))
+    revspec = getstring(r, _(b"first argument to revs must be a revision"))
     repo = mctx.ctx.repo()
     revs = scmutil.revrange(repo, [revspec])
 
@@ -395,7 +449,8 @@
         return matchers[0]
     return matchmod.unionmatcher(matchers)
 
-@predicate('status(base, rev, pattern)', weight=_WEIGHT_STATUS)
+
+@predicate(b'status(base, rev, pattern)', weight=_WEIGHT_STATUS)
 def status(mctx, x):
     """Evaluate predicate using status change between ``base`` and
     ``rev``. Examples:
@@ -404,13 +459,13 @@
     """
     repo = mctx.ctx.repo()
     # i18n: "status" is a keyword
-    b, r, x = getargs(x, 3, 3, _("status takes three arguments"))
+    b, r, x = getargs(x, 3, 3, _(b"status takes three arguments"))
     # i18n: "status" is a keyword
-    baseerr = _("first argument to status must be a revision")
+    baseerr = _(b"first argument to status must be a revision")
     baserevspec = getstring(b, baseerr)
     if not baserevspec:
         raise error.ParseError(baseerr)
-    reverr = _("second argument to status must be a revision")
+    reverr = _(b"second argument to status must be a revision")
     revspec = getstring(r, reverr)
     if not revspec:
         raise error.ParseError(reverr)
@@ -418,43 +473,52 @@
     mc = mctx.switch(basectx, ctx)
     return getmatch(mc, x)
 
-@predicate('subrepo([pattern])')
+
+@predicate(b'subrepo([pattern])')
 def subrepo(mctx, x):
     """Subrepositories whose paths match the given pattern.
     """
     # i18n: "subrepo" is a keyword
-    getargs(x, 0, 1, _("subrepo takes at most one argument"))
+    getargs(x, 0, 1, _(b"subrepo takes at most one argument"))
     ctx = mctx.ctx
     sstate = ctx.substate
     if x:
-        pat = getpattern(x, matchmod.allpatternkinds,
-                         # i18n: "subrepo" is a keyword
-                         _("subrepo requires a pattern or no arguments"))
+        pat = getpattern(
+            x,
+            matchmod.allpatternkinds,
+            # i18n: "subrepo" is a keyword
+            _(b"subrepo requires a pattern or no arguments"),
+        )
         fast = not matchmod.patkind(pat)
         if fast:
+
             def m(s):
-                return (s == pat)
+                return s == pat
+
         else:
-            m = matchmod.match(ctx.repo().root, '', [pat], ctx=ctx)
-        return mctx.predicate(lambda f: f in sstate and m(f),
-                              predrepr=('subrepo(%r)', pat))
+            m = matchmod.match(ctx.repo().root, b'', [pat], ctx=ctx)
+        return mctx.predicate(
+            lambda f: f in sstate and m(f), predrepr=(b'subrepo(%r)', pat)
+        )
     else:
-        return mctx.predicate(sstate.__contains__, predrepr='subrepo')
+        return mctx.predicate(sstate.__contains__, predrepr=b'subrepo')
+
 
 methods = {
-    'withstatus': getmatchwithstatus,
-    'string': stringmatch,
-    'symbol': stringmatch,
-    'kindpat': kindpatmatch,
-    'patterns': patternsmatch,
-    'and': andmatch,
-    'or': ormatch,
-    'minus': minusmatch,
-    'list': listmatch,
-    'not': notmatch,
-    'func': func,
+    b'withstatus': getmatchwithstatus,
+    b'string': stringmatch,
+    b'symbol': stringmatch,
+    b'kindpat': kindpatmatch,
+    b'patterns': patternsmatch,
+    b'and': andmatch,
+    b'or': ormatch,
+    b'minus': minusmatch,
+    b'list': listmatch,
+    b'not': notmatch,
+    b'func': func,
 }
 
+
 class matchctx(object):
     def __init__(self, basectx, ctx, badfn=None):
         self._basectx = basectx
@@ -484,10 +548,13 @@
         return mctx
 
     def _buildstatus(self, keys):
-        self._status = self._basectx.status(self.ctx, self._match,
-                                            listignored='ignored' in keys,
-                                            listclean='clean' in keys,
-                                            listunknown='unknown' in keys)
+        self._status = self._basectx.status(
+            self.ctx,
+            self._match,
+            listignored=b'ignored' in keys,
+            listclean=b'clean' in keys,
+            listunknown=b'unknown' in keys,
+        )
 
     def status(self):
         return self._status
@@ -499,8 +566,9 @@
         """Create a matcher to select files by predfn(filename)"""
         if cache:
             predfn = util.cachefunc(predfn)
-        return matchmod.predicatematcher(predfn, predrepr=predrepr,
-                                         badfn=self._badfn)
+        return matchmod.predicatematcher(
+            predfn, predrepr=predrepr, badfn=self._badfn
+        )
 
     def fpredicate(self, predfn, predrepr=None, cache=False):
         """Create a matcher to select files by predfn(fctx) at the current
@@ -510,6 +578,7 @@
         """
         ctx = self.ctx
         if ctx.rev() is None:
+
             def fctxpredfn(f):
                 try:
                     fctx = ctx[f]
@@ -523,23 +592,31 @@
                     return predfn(fctx)
                 except (IOError, OSError) as e:
                     # open()-ing a directory fails with EACCES on Windows
-                    if e.errno in (errno.ENOENT, errno.EACCES, errno.ENOTDIR,
-                                   errno.EISDIR):
+                    if e.errno in (
+                        errno.ENOENT,
+                        errno.EACCES,
+                        errno.ENOTDIR,
+                        errno.EISDIR,
+                    ):
                         return False
                     raise
+
         else:
+
             def fctxpredfn(f):
                 try:
                     fctx = ctx[f]
                 except error.LookupError:
                     return False
                 return predfn(fctx)
+
         return self.predicate(fctxpredfn, predrepr=predrepr, cache=cache)
 
     def never(self):
         """Create a matcher to select nothing"""
         return matchmod.never(badfn=self._badfn)
 
+
 def match(ctx, expr, badfn=None):
     """Create a matcher for a single fileset expression"""
     tree = filesetlang.parse(expr)
@@ -552,8 +629,9 @@
 def loadpredicate(ui, extname, registrarobj):
     """Load fileset predicates from specified registrarobj
     """
-    for name, func in registrarobj._table.iteritems():
+    for name, func in pycompat.iteritems(registrarobj._table):
         symbols[name] = func
 
+
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = symbols.values()
--- a/mercurial/filesetlang.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/filesetlang.py	Mon Oct 21 11:09:48 2019 -0400
@@ -8,6 +8,7 @@
 from __future__ import absolute_import
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     error,
     parser,
@@ -23,41 +24,45 @@
 
 elements = {
     # token-type: binding-strength, primary, prefix, infix, suffix
-    "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
-    ":": (15, None, None, ("kindpat", 15), None),
-    "-": (5, None, ("negate", 19), ("minus", 5), None),
-    "not": (10, None, ("not", 10), None, None),
-    "!": (10, None, ("not", 10), None, None),
-    "and": (5, None, None, ("and", 5), None),
-    "&": (5, None, None, ("and", 5), None),
-    "or": (4, None, None, ("or", 4), None),
-    "|": (4, None, None, ("or", 4), None),
-    "+": (4, None, None, ("or", 4), None),
-    ",": (2, None, None, ("list", 2), None),
-    ")": (0, None, None, None, None),
-    "symbol": (0, "symbol", None, None, None),
-    "string": (0, "string", None, None, None),
-    "end": (0, None, None, None, None),
+    b"(": (20, None, (b"group", 1, b")"), (b"func", 1, b")"), None),
+    b":": (15, None, None, (b"kindpat", 15), None),
+    b"-": (5, None, (b"negate", 19), (b"minus", 5), None),
+    b"not": (10, None, (b"not", 10), None, None),
+    b"!": (10, None, (b"not", 10), None, None),
+    b"and": (5, None, None, (b"and", 5), None),
+    b"&": (5, None, None, (b"and", 5), None),
+    b"or": (4, None, None, (b"or", 4), None),
+    b"|": (4, None, None, (b"or", 4), None),
+    b"+": (4, None, None, (b"or", 4), None),
+    b",": (2, None, None, (b"list", 2), None),
+    b")": (0, None, None, None, None),
+    b"symbol": (0, b"symbol", None, None, None),
+    b"string": (0, b"string", None, None, None),
+    b"end": (0, None, None, None, None),
 }
 
-keywords = {'and', 'or', 'not'}
+keywords = {b'and', b'or', b'not'}
 
 symbols = {}
 
-globchars = ".*{}[]?/\\_"
+globchars = b".*{}[]?/\\_"
+
 
 def tokenize(program):
     pos, l = 0, len(program)
     program = pycompat.bytestr(program)
     while pos < l:
         c = program[pos]
-        if c.isspace(): # skip inter-token whitespace
+        if c.isspace():  # skip inter-token whitespace
             pass
-        elif c in "(),-:|&+!": # handle simple operators
+        elif c in b"(),-:|&+!":  # handle simple operators
             yield (c, None, pos)
-        elif (c in '"\'' or c == 'r' and
-              program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
-            if c == 'r':
+        elif (
+            c in b'"\''
+            or c == b'r'
+            and program[pos : pos + 2] in (b"r'", b'r"')
+        ):  # handle quoted strings
+            if c == b'r':
                 pos += 1
                 c = program[pos]
                 decode = lambda x: x
@@ -65,111 +70,120 @@
                 decode = parser.unescapestr
             pos += 1
             s = pos
-            while pos < l: # find closing quote
+            while pos < l:  # find closing quote
                 d = program[pos]
-                if d == '\\': # skip over escaped characters
+                if d == b'\\':  # skip over escaped characters
                     pos += 2
                     continue
                 if d == c:
-                    yield ('string', decode(program[s:pos]), s)
+                    yield (b'string', decode(program[s:pos]), s)
                     break
                 pos += 1
             else:
-                raise error.ParseError(_("unterminated string"), s)
+                raise error.ParseError(_(b"unterminated string"), s)
         elif c.isalnum() or c in globchars or ord(c) > 127:
             # gather up a symbol/keyword
             s = pos
             pos += 1
-            while pos < l: # find end of symbol
+            while pos < l:  # find end of symbol
                 d = program[pos]
                 if not (d.isalnum() or d in globchars or ord(d) > 127):
                     break
                 pos += 1
             sym = program[s:pos]
-            if sym in keywords: # operator keywords
+            if sym in keywords:  # operator keywords
                 yield (sym, None, s)
             else:
-                yield ('symbol', sym, s)
+                yield (b'symbol', sym, s)
             pos -= 1
         else:
-            raise error.ParseError(_("syntax error"), pos)
+            raise error.ParseError(_(b"syntax error"), pos)
         pos += 1
-    yield ('end', None, pos)
+    yield (b'end', None, pos)
+
 
 def parse(expr):
     p = parser.parser(elements)
     tree, pos = p.parse(tokenize(expr))
     if pos != len(expr):
-        raise error.ParseError(_("invalid token"), pos)
-    return parser.simplifyinfixops(tree, {'list', 'or'})
+        raise error.ParseError(_(b"invalid token"), pos)
+    return parser.simplifyinfixops(tree, {b'list', b'or'})
+
 
 def getsymbol(x):
-    if x and x[0] == 'symbol':
+    if x and x[0] == b'symbol':
         return x[1]
-    raise error.ParseError(_('not a symbol'))
+    raise error.ParseError(_(b'not a symbol'))
+
 
 def getstring(x, err):
-    if x and (x[0] == 'string' or x[0] == 'symbol'):
+    if x and (x[0] == b'string' or x[0] == b'symbol'):
         return x[1]
     raise error.ParseError(err)
 
+
 def getkindpat(x, y, allkinds, err):
     kind = getsymbol(x)
     pat = getstring(y, err)
     if kind not in allkinds:
-        raise error.ParseError(_("invalid pattern kind: %s") % kind)
-    return '%s:%s' % (kind, pat)
+        raise error.ParseError(_(b"invalid pattern kind: %s") % kind)
+    return b'%s:%s' % (kind, pat)
+
 
 def getpattern(x, allkinds, err):
-    if x and x[0] == 'kindpat':
+    if x and x[0] == b'kindpat':
         return getkindpat(x[1], x[2], allkinds, err)
     return getstring(x, err)
 
+
 def getlist(x):
     if not x:
         return []
-    if x[0] == 'list':
+    if x[0] == b'list':
         return list(x[1:])
     return [x]
 
+
 def getargs(x, min, max, err):
     l = getlist(x)
     if len(l) < min or len(l) > max:
         raise error.ParseError(err)
     return l
 
+
 def _analyze(x):
     if x is None:
         return x
 
     op = x[0]
-    if op in {'string', 'symbol'}:
+    if op in {b'string', b'symbol'}:
         return x
-    if op == 'kindpat':
+    if op == b'kindpat':
         getsymbol(x[1])  # kind must be a symbol
         t = _analyze(x[2])
         return (op, x[1], t)
-    if op == 'group':
+    if op == b'group':
         return _analyze(x[1])
-    if op == 'negate':
-        raise error.ParseError(_("can't use negate operator in this context"))
-    if op == 'not':
+    if op == b'negate':
+        raise error.ParseError(_(b"can't use negate operator in this context"))
+    if op == b'not':
         t = _analyze(x[1])
         return (op, t)
-    if op == 'and':
+    if op == b'and':
         ta = _analyze(x[1])
         tb = _analyze(x[2])
         return (op, ta, tb)
-    if op == 'minus':
-        return _analyze(('and', x[1], ('not', x[2])))
-    if op in {'list', 'or'}:
+    if op == b'minus':
+        return _analyze((b'and', x[1], (b'not', x[2])))
+    if op in {b'list', b'or'}:
         ts = tuple(_analyze(y) for y in x[1:])
         return (op,) + ts
-    if op == 'func':
+    if op == b'func':
         getsymbol(x[1])  # function name must be a symbol
         ta = _analyze(x[2])
         return (op, x[1], ta)
-    raise error.ProgrammingError('invalid operator %r' % op)
+    raise error.ProgrammingError(b'invalid operator %r' % op)
+
 
 def _insertstatushints(x):
     """Insert hint nodes where status should be calculated (first path)
@@ -184,35 +198,36 @@
         return (), x
 
     op = x[0]
-    if op in {'string', 'symbol', 'kindpat'}:
+    if op in {b'string', b'symbol', b'kindpat'}:
         return (), x
-    if op == 'not':
+    if op == b'not':
         h, t = _insertstatushints(x[1])
         return h, (op, t)
-    if op == 'and':
+    if op == b'and':
         ha, ta = _insertstatushints(x[1])
         hb, tb = _insertstatushints(x[2])
         hr = ha + hb
         if ha and hb:
-            return hr, ('withstatus', (op, ta, tb), ('string', ' '.join(hr)))
+            return hr, (b'withstatus', (op, ta, tb), (b'string', b' '.join(hr)))
         return hr, (op, ta, tb)
-    if op == 'or':
+    if op == b'or':
         hs, ts = zip(*(_insertstatushints(y) for y in x[1:]))
         hr = sum(hs, ())
         if sum(bool(h) for h in hs) > 1:
-            return hr, ('withstatus', (op,) + ts, ('string', ' '.join(hr)))
+            return hr, (b'withstatus', (op,) + ts, (b'string', b' '.join(hr)))
         return hr, (op,) + ts
-    if op == 'list':
+    if op == b'list':
         hs, ts = zip(*(_insertstatushints(y) for y in x[1:]))
         return sum(hs, ()), (op,) + ts
-    if op == 'func':
+    if op == b'func':
         f = getsymbol(x[1])
         # don't propagate 'ha' crossing a function boundary
         ha, ta = _insertstatushints(x[2])
         if getattr(symbols.get(f), '_callstatus', False):
-            return (f,), ('withstatus', (op, x[1], ta), ('string', f))
+            return (f,), (b'withstatus', (op, x[1], ta), (b'string', f))
         return (), (op, x[1], ta)
-    raise error.ProgrammingError('invalid operator %r' % op)
+    raise error.ProgrammingError(b'invalid operator %r' % op)
+
 
 def _mergestatushints(x, instatus):
     """Remove redundant status hint nodes (second path)
@@ -223,29 +238,30 @@
         return x
 
     op = x[0]
-    if op == 'withstatus':
+    if op == b'withstatus':
         if instatus:
             # drop redundant hint node
             return _mergestatushints(x[1], instatus)
         t = _mergestatushints(x[1], instatus=True)
         return (op, t, x[2])
-    if op in {'string', 'symbol', 'kindpat'}:
+    if op in {b'string', b'symbol', b'kindpat'}:
         return x
-    if op == 'not':
+    if op == b'not':
         t = _mergestatushints(x[1], instatus)
         return (op, t)
-    if op == 'and':
+    if op == b'and':
         ta = _mergestatushints(x[1], instatus)
         tb = _mergestatushints(x[2], instatus)
         return (op, ta, tb)
-    if op in {'list', 'or'}:
+    if op in {b'list', b'or'}:
         ts = tuple(_mergestatushints(y, instatus) for y in x[1:])
         return (op,) + ts
-    if op == 'func':
+    if op == b'func':
         # don't propagate 'instatus' crossing a function boundary
         ta = _mergestatushints(x[2], instatus=False)
         return (op, x[1], ta)
-    raise error.ProgrammingError('invalid operator %r' % op)
+    raise error.ProgrammingError(b'invalid operator %r' % op)
+
 
 def analyze(x):
     """Transform raw parsed tree to evaluatable tree which can be fed to
@@ -258,65 +274,70 @@
     _h, t = _insertstatushints(t)
     return _mergestatushints(t, instatus=False)
 
+
 def _optimizeandops(op, ta, tb):
-    if tb is not None and tb[0] == 'not':
-        return ('minus', ta, tb[1])
+    if tb is not None and tb[0] == b'not':
+        return (b'minus', ta, tb[1])
     return (op, ta, tb)
 
+
 def _optimizeunion(xs):
     # collect string patterns so they can be compiled into a single regexp
     ws, ts, ss = [], [], []
     for x in xs:
         w, t = _optimize(x)
-        if t is not None and t[0] in {'string', 'symbol', 'kindpat'}:
+        if t is not None and t[0] in {b'string', b'symbol', b'kindpat'}:
             ss.append(t)
             continue
         ws.append(w)
         ts.append(t)
     if ss:
         ws.append(WEIGHT_CHECK_FILENAME)
-        ts.append(('patterns',) + tuple(ss))
+        ts.append((b'patterns',) + tuple(ss))
     return ws, ts
 
+
 def _optimize(x):
     if x is None:
         return 0, x
 
     op = x[0]
-    if op == 'withstatus':
+    if op == b'withstatus':
         w, t = _optimize(x[1])
         return w, (op, t, x[2])
-    if op in {'string', 'symbol'}:
+    if op in {b'string', b'symbol'}:
         return WEIGHT_CHECK_FILENAME, x
-    if op == 'kindpat':
+    if op == b'kindpat':
         w, t = _optimize(x[2])
         return w, (op, x[1], t)
-    if op == 'not':
+    if op == b'not':
         w, t = _optimize(x[1])
         return w, (op, t)
-    if op == 'and':
+    if op == b'and':
         wa, ta = _optimize(x[1])
         wb, tb = _optimize(x[2])
         if wa <= wb:
             return wa, _optimizeandops(op, ta, tb)
         else:
             return wb, _optimizeandops(op, tb, ta)
-    if op == 'or':
+    if op == b'or':
         ws, ts = _optimizeunion(x[1:])
         if len(ts) == 1:
-            return ws[0], ts[0] # 'or' operation is fully optimized out
-        ts = tuple(it[1] for it in sorted(enumerate(ts),
-                                          key=lambda it: ws[it[0]]))
+            return ws[0], ts[0]  # 'or' operation is fully optimized out
+        ts = tuple(
+            it[1] for it in sorted(enumerate(ts), key=lambda it: ws[it[0]])
+        )
         return max(ws), (op,) + ts
-    if op == 'list':
+    if op == b'list':
         ws, ts = zip(*(_optimize(y) for y in x[1:]))
         return sum(ws), (op,) + ts
-    if op == 'func':
+    if op == b'func':
         f = getsymbol(x[1])
         w = getattr(symbols.get(f), '_weight', 1)
         wa, ta = _optimize(x[2])
         return w + wa, (op, x[1], ta)
-    raise error.ProgrammingError('invalid operator %r' % op)
+    raise error.ProgrammingError(b'invalid operator %r' % op)
+
 
 def optimize(x):
     """Reorder/rewrite evaluatable tree for optimization
@@ -326,5 +347,6 @@
     _w, t = _optimize(x)
     return t
 
+
 def prettyformat(tree):
-    return parser.prettyformat(tree, ('string', 'symbol'))
+    return parser.prettyformat(tree, (b'string', b'symbol'))
--- a/mercurial/formatter.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/formatter.py	Mon Oct 21 11:09:48 2019 -0400
@@ -116,9 +116,7 @@
     hex,
     short,
 )
-from .thirdparty import (
-    attr,
-)
+from .thirdparty import attr
 
 from . import (
     error,
@@ -137,6 +135,7 @@
 
 pickle = util.pickle
 
+
 class _nullconverter(object):
     '''convert non-primitive data types to be processed by formatter'''
 
@@ -147,23 +146,27 @@
     def wrapnested(data, tmpl, sep):
         '''wrap nested data by appropriate type'''
         return data
+
     @staticmethod
     def formatdate(date, fmt):
         '''convert date tuple to appropriate format'''
         # timestamp can be float, but the canonical form should be int
         ts, tz = date
         return (int(ts), tz)
+
     @staticmethod
     def formatdict(data, key, value, fmt, sep):
         '''convert dict or key-value pairs to appropriate dict format'''
         # use plain dict instead of util.sortdict so that data can be
         # serialized as a builtin dict in pickle output
         return dict(data)
+
     @staticmethod
     def formatlist(data, name, fmt, sep):
         '''convert iterable to appropriate list format'''
         return list(data)
 
+
 class baseformatter(object):
     def __init__(self, ui, topic, opts, converter):
         self._ui = ui
@@ -173,90 +176,114 @@
         self._item = None
         # function to convert node to string suitable for this output
         self.hexfunc = hex
+
     def __enter__(self):
         return self
+
     def __exit__(self, exctype, excvalue, traceback):
         if exctype is None:
             self.end()
+
     def _showitem(self):
         '''show a formatted item once all data is collected'''
+
     def startitem(self):
         '''begin an item in the format list'''
         if self._item is not None:
             self._showitem()
         self._item = {}
-    def formatdate(self, date, fmt='%a %b %d %H:%M:%S %Y %1%2'):
+
+    def formatdate(self, date, fmt=b'%a %b %d %H:%M:%S %Y %1%2'):
         '''convert date tuple to appropriate format'''
         return self._converter.formatdate(date, fmt)
-    def formatdict(self, data, key='key', value='value', fmt=None, sep=' '):
+
+    def formatdict(self, data, key=b'key', value=b'value', fmt=None, sep=b' '):
         '''convert dict or key-value pairs to appropriate dict format'''
         return self._converter.formatdict(data, key, value, fmt, sep)
-    def formatlist(self, data, name, fmt=None, sep=' '):
+
+    def formatlist(self, data, name, fmt=None, sep=b' '):
         '''convert iterable to appropriate list format'''
         # name is mandatory argument for now, but it could be optional if
         # we have default template keyword, e.g. {item}
         return self._converter.formatlist(data, name, fmt, sep)
+
     def context(self, **ctxs):
         '''insert context objects to be used to render template keywords'''
         ctxs = pycompat.byteskwargs(ctxs)
-        assert all(k in {'repo', 'ctx', 'fctx'} for k in ctxs)
+        assert all(k in {b'repo', b'ctx', b'fctx'} for k in ctxs)
         if self._converter.storecontext:
             # populate missing resources in fctx -> ctx -> repo order
-            if 'fctx' in ctxs and 'ctx' not in ctxs:
-                ctxs['ctx'] = ctxs['fctx'].changectx()
-            if 'ctx' in ctxs and 'repo' not in ctxs:
-                ctxs['repo'] = ctxs['ctx'].repo()
+            if b'fctx' in ctxs and b'ctx' not in ctxs:
+                ctxs[b'ctx'] = ctxs[b'fctx'].changectx()
+            if b'ctx' in ctxs and b'repo' not in ctxs:
+                ctxs[b'repo'] = ctxs[b'ctx'].repo()
             self._item.update(ctxs)
+
     def datahint(self):
         '''set of field names to be referenced'''
         return set()
+
     def data(self, **data):
         '''insert data into item that's not shown in default output'''
         data = pycompat.byteskwargs(data)
         self._item.update(data)
+
     def write(self, fields, deftext, *fielddata, **opts):
         '''do default text output while assigning data to item'''
         fieldkeys = fields.split()
         assert len(fieldkeys) == len(fielddata), (fieldkeys, fielddata)
         self._item.update(zip(fieldkeys, fielddata))
+
     def condwrite(self, cond, fields, deftext, *fielddata, **opts):
         '''do conditional write (primarily for plain formatter)'''
         fieldkeys = fields.split()
         assert len(fieldkeys) == len(fielddata)
         self._item.update(zip(fieldkeys, fielddata))
+
     def plain(self, text, **opts):
         '''show raw text for non-templated mode'''
+
     def isplain(self):
         '''check for plain formatter usage'''
         return False
-    def nested(self, field, tmpl=None, sep=''):
+
+    def nested(self, field, tmpl=None, sep=b''):
         '''sub formatter to store nested data in the specified field'''
         data = []
         self._item[field] = self._converter.wrapnested(data, tmpl, sep)
         return _nestedformatter(self._ui, self._converter, data)
+
     def end(self):
         '''end output for the formatter'''
         if self._item is not None:
             self._showitem()
 
+
 def nullformatter(ui, topic, opts):
     '''formatter that prints nothing'''
     return baseformatter(ui, topic, opts, converter=_nullconverter)
 
+
 class _nestedformatter(baseformatter):
     '''build sub items and store them in the parent formatter'''
+
     def __init__(self, ui, converter, data):
-        baseformatter.__init__(self, ui, topic='', opts={}, converter=converter)
+        baseformatter.__init__(
+            self, ui, topic=b'', opts={}, converter=converter
+        )
         self._data = data
+
     def _showitem(self):
         self._data.append(self._item)
 
+
 def _iteritems(data):
     '''iterate key-value pairs in stable order'''
     if isinstance(data, dict):
-        return sorted(data.iteritems())
+        return sorted(pycompat.iteritems(data))
     return data
 
+
 class _plainconverter(object):
     '''convert non-primitive data types to text'''
 
@@ -264,31 +291,37 @@
 
     @staticmethod
     def wrapnested(data, tmpl, sep):
-        raise error.ProgrammingError('plainformatter should never be nested')
+        raise error.ProgrammingError(b'plainformatter should never be nested')
+
     @staticmethod
     def formatdate(date, fmt):
         '''stringify date tuple in the given format'''
         return dateutil.datestr(date, fmt)
+
     @staticmethod
     def formatdict(data, key, value, fmt, sep):
         '''stringify key-value pairs separated by sep'''
         prefmt = pycompat.identity
         if fmt is None:
-            fmt = '%s=%s'
+            fmt = b'%s=%s'
             prefmt = pycompat.bytestr
-        return sep.join(fmt % (prefmt(k), prefmt(v))
-                        for k, v in _iteritems(data))
+        return sep.join(
+            fmt % (prefmt(k), prefmt(v)) for k, v in _iteritems(data)
+        )
+
     @staticmethod
     def formatlist(data, name, fmt, sep):
         '''stringify iterable separated by sep'''
         prefmt = pycompat.identity
         if fmt is None:
-            fmt = '%s'
+            fmt = b'%s'
             prefmt = pycompat.bytestr
         return sep.join(fmt % prefmt(e) for e in data)
 
+
 class plainformatter(baseformatter):
     '''the default text output scheme'''
+
     def __init__(self, ui, out, topic, opts):
         baseformatter.__init__(self, ui, topic, opts, _plainconverter)
         if ui.debugflag:
@@ -299,86 +332,109 @@
             self._write = ui.write
         else:
             self._write = lambda s, **opts: out.write(s)
+
     def startitem(self):
         pass
+
     def data(self, **data):
         pass
+
     def write(self, fields, deftext, *fielddata, **opts):
         self._write(deftext % fielddata, **opts)
+
     def condwrite(self, cond, fields, deftext, *fielddata, **opts):
         '''do conditional write'''
         if cond:
             self._write(deftext % fielddata, **opts)
+
     def plain(self, text, **opts):
         self._write(text, **opts)
+
     def isplain(self):
         return True
-    def nested(self, field, tmpl=None, sep=''):
+
+    def nested(self, field, tmpl=None, sep=b''):
         # nested data will be directly written to ui
         return self
+
     def end(self):
         pass
 
+
 class debugformatter(baseformatter):
     def __init__(self, ui, out, topic, opts):
         baseformatter.__init__(self, ui, topic, opts, _nullconverter)
         self._out = out
-        self._out.write("%s = [\n" % self._topic)
+        self._out.write(b"%s = [\n" % self._topic)
+
     def _showitem(self):
-        self._out.write('    %s,\n'
-                        % stringutil.pprint(self._item, indent=4, level=1))
+        self._out.write(
+            b'    %s,\n' % stringutil.pprint(self._item, indent=4, level=1)
+        )
+
     def end(self):
         baseformatter.end(self)
-        self._out.write("]\n")
+        self._out.write(b"]\n")
+
 
 class pickleformatter(baseformatter):
     def __init__(self, ui, out, topic, opts):
         baseformatter.__init__(self, ui, topic, opts, _nullconverter)
         self._out = out
         self._data = []
+
     def _showitem(self):
         self._data.append(self._item)
+
     def end(self):
         baseformatter.end(self)
         self._out.write(pickle.dumps(self._data))
 
+
 class cborformatter(baseformatter):
     '''serialize items as an indefinite-length CBOR array'''
+
     def __init__(self, ui, out, topic, opts):
         baseformatter.__init__(self, ui, topic, opts, _nullconverter)
         self._out = out
         self._out.write(cborutil.BEGIN_INDEFINITE_ARRAY)
+
     def _showitem(self):
         self._out.write(b''.join(cborutil.streamencode(self._item)))
+
     def end(self):
         baseformatter.end(self)
         self._out.write(cborutil.BREAK)
 
+
 class jsonformatter(baseformatter):
     def __init__(self, ui, out, topic, opts):
         baseformatter.__init__(self, ui, topic, opts, _nullconverter)
         self._out = out
-        self._out.write("[")
+        self._out.write(b"[")
         self._first = True
+
     def _showitem(self):
         if self._first:
             self._first = False
         else:
-            self._out.write(",")
+            self._out.write(b",")
 
-        self._out.write("\n {\n")
+        self._out.write(b"\n {\n")
         first = True
         for k, v in sorted(self._item.items()):
             if first:
                 first = False
             else:
-                self._out.write(",\n")
+                self._out.write(b",\n")
             u = templatefilters.json(v, paranoid=False)
-            self._out.write('  "%s": %s' % (k, u))
-        self._out.write("\n }")
+            self._out.write(b'  "%s": %s' % (k, u))
+        self._out.write(b"\n }")
+
     def end(self):
         baseformatter.end(self)
-        self._out.write("\n]\n")
+        self._out.write(b"\n]\n")
+
 
 class _templateconverter(object):
     '''convert non-primitive data types to be processed by templater'''
@@ -389,45 +445,60 @@
     def wrapnested(data, tmpl, sep):
         '''wrap nested data by templatable type'''
         return templateutil.mappinglist(data, tmpl=tmpl, sep=sep)
+
     @staticmethod
     def formatdate(date, fmt):
         '''return date tuple'''
         return templateutil.date(date)
+
     @staticmethod
     def formatdict(data, key, value, fmt, sep):
         '''build object that can be evaluated as either plain string or dict'''
         data = util.sortdict(_iteritems(data))
+
         def f():
             yield _plainconverter.formatdict(data, key, value, fmt, sep)
-        return templateutil.hybriddict(data, key=key, value=value, fmt=fmt,
-                                       gen=f)
+
+        return templateutil.hybriddict(
+            data, key=key, value=value, fmt=fmt, gen=f
+        )
+
     @staticmethod
     def formatlist(data, name, fmt, sep):
         '''build object that can be evaluated as either plain string or list'''
         data = list(data)
+
         def f():
             yield _plainconverter.formatlist(data, name, fmt, sep)
+
         return templateutil.hybridlist(data, name=name, fmt=fmt, gen=f)
 
+
 class templateformatter(baseformatter):
-    def __init__(self, ui, out, topic, opts):
+    def __init__(self, ui, out, topic, opts, spec, overridetemplates=None):
         baseformatter.__init__(self, ui, topic, opts, _templateconverter)
         self._out = out
-        spec = lookuptemplate(ui, topic, opts.get('template', ''))
         self._tref = spec.ref
-        self._t = loadtemplater(ui, spec, defaults=templatekw.keywords,
-                                resources=templateresources(ui),
-                                cache=templatekw.defaulttempl)
-        self._parts = templatepartsmap(spec, self._t,
-                                       ['docheader', 'docfooter', 'separator'])
+        self._t = loadtemplater(
+            ui,
+            spec,
+            defaults=templatekw.keywords,
+            resources=templateresources(ui),
+            cache=templatekw.defaulttempl,
+        )
+        if overridetemplates:
+            self._t.cache.update(overridetemplates)
+        self._parts = templatepartsmap(
+            spec, self._t, [b'docheader', b'docfooter', b'separator']
+        )
         self._counter = itertools.count()
-        self._renderitem('docheader', {})
+        self._renderitem(b'docheader', {})
 
     def _showitem(self):
         item = self._item.copy()
-        item['index'] = index = next(self._counter)
+        item[b'index'] = index = next(self._counter)
         if index > 0:
-            self._renderitem('separator', {})
+            self._renderitem(b'separator', {})
         self._renderitem(self._tref, item)
 
     def _renderitem(self, part, item):
@@ -446,13 +517,16 @@
 
     def end(self):
         baseformatter.end(self)
-        self._renderitem('docfooter', {})
+        self._renderitem(b'docfooter', {})
+
 
 @attr.s(frozen=True)
 class templatespec(object):
     ref = attr.ib()
     tmpl = attr.ib()
     mapfile = attr.ib()
+    refargs = attr.ib(default=None)
+
 
 def lookuptemplate(ui, topic, tmpl):
     """Find the template matching the given -T/--template spec 'tmpl'
@@ -460,6 +534,7 @@
     'tmpl' can be any of the following:
 
      - a literal template (e.g. '{rev}')
+     - a reference to built-in template (i.e. formatter)
      - a map-file name or path (e.g. 'changelog')
      - a reference to [templates] in config file
      - a path to raw template file
@@ -473,36 +548,51 @@
     available as well as aliases in [templatealias].
     """
 
+    if not tmpl:
+        return templatespec(None, None, None)
+
     # looks like a literal template?
-    if '{' in tmpl:
-        return templatespec('', tmpl, None)
+    if b'{' in tmpl:
+        return templatespec(b'', tmpl, None)
+
+    # a reference to built-in (formatter) template
+    if tmpl in {b'cbor', b'json', b'pickle', b'debug'}:
+        return templatespec(tmpl, None, None)
+
+    # a function-style reference to built-in template
+    func, fsep, ftail = tmpl.partition(b'(')
+    if func in {b'cbor', b'json'} and fsep and ftail.endswith(b')'):
+        templater.parseexpr(tmpl)  # make sure syntax errors are confined
+        return templatespec(func, None, None, refargs=ftail[:-1])
 
     # perhaps a stock style?
     if not os.path.split(tmpl)[0]:
-        mapname = (templater.templatepath('map-cmdline.' + tmpl)
-                   or templater.templatepath(tmpl))
+        mapname = templater.templatepath(
+            b'map-cmdline.' + tmpl
+        ) or templater.templatepath(tmpl)
         if mapname and os.path.isfile(mapname):
             return templatespec(topic, None, mapname)
 
     # perhaps it's a reference to [templates]
-    if ui.config('templates', tmpl):
+    if ui.config(b'templates', tmpl):
         return templatespec(tmpl, None, None)
 
-    if tmpl == 'list':
-        ui.write(_("available styles: %s\n") % templater.stylelist())
-        raise error.Abort(_("specify a template"))
+    if tmpl == b'list':
+        ui.write(_(b"available styles: %s\n") % templater.stylelist())
+        raise error.Abort(_(b"specify a template"))
 
     # perhaps it's a path to a map or a template
-    if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
+    if (b'/' in tmpl or b'\\' in tmpl) and os.path.isfile(tmpl):
         # is it a mapfile for a style?
-        if os.path.basename(tmpl).startswith("map-"):
+        if os.path.basename(tmpl).startswith(b"map-"):
             return templatespec(topic, None, os.path.realpath(tmpl))
-        with util.posixfile(tmpl, 'rb') as f:
+        with util.posixfile(tmpl, b'rb') as f:
             tmpl = f.read()
-        return templatespec('', tmpl, None)
+        return templatespec(b'', tmpl, None)
 
     # constant string?
-    return templatespec('', tmpl, None)
+    return templatespec(b'', tmpl, None)
+
 
 def templatepartsmap(spec, t, partnames):
     """Create a mapping of {part: ref}"""
@@ -511,53 +601,62 @@
         partsmap.update((p, p) for p in partnames if p in t)
     elif spec.ref:
         for part in partnames:
-            ref = '%s:%s' % (spec.ref, part)  # select config sub-section
+            ref = b'%s:%s' % (spec.ref, part)  # select config sub-section
             if ref in t:
                 partsmap[part] = ref
     return partsmap
 
+
 def loadtemplater(ui, spec, defaults=None, resources=None, cache=None):
     """Create a templater from either a literal template or loading from
     a map file"""
     assert not (spec.tmpl and spec.mapfile)
     if spec.mapfile:
         frommapfile = templater.templater.frommapfile
-        return frommapfile(spec.mapfile, defaults=defaults, resources=resources,
-                           cache=cache)
-    return maketemplater(ui, spec.tmpl, defaults=defaults, resources=resources,
-                         cache=cache)
+        return frommapfile(
+            spec.mapfile, defaults=defaults, resources=resources, cache=cache
+        )
+    return maketemplater(
+        ui, spec.tmpl, defaults=defaults, resources=resources, cache=cache
+    )
+
 
 def maketemplater(ui, tmpl, defaults=None, resources=None, cache=None):
     """Create a templater from a string template 'tmpl'"""
-    aliases = ui.configitems('templatealias')
-    t = templater.templater(defaults=defaults, resources=resources,
-                            cache=cache, aliases=aliases)
-    t.cache.update((k, templater.unquotestring(v))
-                   for k, v in ui.configitems('templates'))
+    aliases = ui.configitems(b'templatealias')
+    t = templater.templater(
+        defaults=defaults, resources=resources, cache=cache, aliases=aliases
+    )
+    t.cache.update(
+        (k, templater.unquotestring(v)) for k, v in ui.configitems(b'templates')
+    )
     if tmpl:
-        t.cache[''] = tmpl
+        t.cache[b''] = tmpl
     return t
 
+
 # marker to denote a resource to be loaded on demand based on mapping values
 # (e.g. (ctx, path) -> fctx)
 _placeholder = object()
 
+
 class templateresources(templater.resourcemapper):
     """Resource mapper designed for the default templatekw and function"""
 
     def __init__(self, ui, repo=None):
         self._resmap = {
-            'cache': {},  # for templatekw/funcs to store reusable data
-            'repo': repo,
-            'ui': ui,
+            b'cache': {},  # for templatekw/funcs to store reusable data
+            b'repo': repo,
+            b'ui': ui,
         }
 
     def availablekeys(self, mapping):
-        return {k for k in self.knownkeys()
-                if self._getsome(mapping, k) is not None}
+        return {
+            k for k in self.knownkeys() if self._getsome(mapping, k) is not None
+        }
 
     def knownkeys(self):
-        return {'cache', 'ctx', 'fctx', 'repo', 'revcache', 'ui'}
+        return {b'cache', b'ctx', b'fctx', b'repo', b'revcache', b'ui'}
 
     def lookup(self, mapping, key):
         if key not in self.knownkeys():
@@ -570,16 +669,16 @@
     def populatemap(self, context, origmapping, newmapping):
         mapping = {}
         if self._hasnodespec(newmapping):
-            mapping['revcache'] = {}  # per-ctx cache
+            mapping[b'revcache'] = {}  # per-ctx cache
         if self._hasnodespec(origmapping) and self._hasnodespec(newmapping):
-            orignode = templateutil.runsymbol(context, origmapping, 'node')
-            mapping['originalnode'] = orignode
+            orignode = templateutil.runsymbol(context, origmapping, b'node')
+            mapping[b'originalnode'] = orignode
         # put marker to override 'ctx'/'fctx' in mapping if any, and flag
         # its existence to be reported by availablekeys()
-        if 'ctx' not in newmapping and self._hasliteral(newmapping, 'node'):
-            mapping['ctx'] = _placeholder
-        if 'fctx' not in newmapping and self._hasliteral(newmapping, 'path'):
-            mapping['fctx'] = _placeholder
+        if b'ctx' not in newmapping and self._hasliteral(newmapping, b'node'):
+            mapping[b'ctx'] = _placeholder
+        if b'fctx' not in newmapping and self._hasliteral(newmapping, b'path'):
+            mapping[b'fctx'] = _placeholder
         return mapping
 
     def _getsome(self, mapping, key):
@@ -601,67 +700,122 @@
 
     def _hasnodespec(self, mapping):
         """Test if context revision is set or unset in the given mapping"""
-        return 'node' in mapping or 'ctx' in mapping
+        return b'node' in mapping or b'ctx' in mapping
 
     def _loadctx(self, mapping):
-        repo = self._getsome(mapping, 'repo')
-        node = self._getliteral(mapping, 'node')
+        repo = self._getsome(mapping, b'repo')
+        node = self._getliteral(mapping, b'node')
         if repo is None or node is None:
             return
         try:
             return repo[node]
         except error.RepoLookupError:
-            return None # maybe hidden/non-existent node
+            return None  # maybe hidden/non-existent node
 
     def _loadfctx(self, mapping):
-        ctx = self._getsome(mapping, 'ctx')
-        path = self._getliteral(mapping, 'path')
+        ctx = self._getsome(mapping, b'ctx')
+        path = self._getliteral(mapping, b'path')
         if ctx is None or path is None:
             return None
         try:
             return ctx[path]
         except error.LookupError:
-            return None # maybe removed file?
+            return None  # maybe removed file?
 
     _loadermap = {
-        'ctx': _loadctx,
-        'fctx': _loadfctx,
+        b'ctx': _loadctx,
+        b'fctx': _loadfctx,
     }
 
+
+def _internaltemplateformatter(
+    ui,
+    out,
+    topic,
+    opts,
+    spec,
+    tmpl,
+    docheader=b'',
+    docfooter=b'',
+    separator=b'',
+):
+    """Build template formatter that handles customizable built-in templates
+    such as -Tjson(...)"""
+    templates = {spec.ref: tmpl}
+    if docheader:
+        templates[b'%s:docheader' % spec.ref] = docheader
+    if docfooter:
+        templates[b'%s:docfooter' % spec.ref] = docfooter
+    if separator:
+        templates[b'%s:separator' % spec.ref] = separator
+    return templateformatter(
+        ui, out, topic, opts, spec, overridetemplates=templates
+    )
+
+
 def formatter(ui, out, topic, opts):
-    template = opts.get("template", "")
-    if template == "cbor":
+    spec = lookuptemplate(ui, topic, opts.get(b'template', b''))
+    if spec.ref == b"cbor" and spec.refargs is not None:
+        return _internaltemplateformatter(
+            ui,
+            out,
+            topic,
+            opts,
+            spec,
+            tmpl=b'{dict(%s)|cbor}' % spec.refargs,
+            docheader=cborutil.BEGIN_INDEFINITE_ARRAY,
+            docfooter=cborutil.BREAK,
+        )
+    elif spec.ref == b"cbor":
         return cborformatter(ui, out, topic, opts)
-    elif template == "json":
+    elif spec.ref == b"json" and spec.refargs is not None:
+        return _internaltemplateformatter(
+            ui,
+            out,
+            topic,
+            opts,
+            spec,
+            tmpl=b'{dict(%s)|json}' % spec.refargs,
+            docheader=b'[\n ',
+            docfooter=b'\n]\n',
+            separator=b',\n ',
+        )
+    elif spec.ref == b"json":
         return jsonformatter(ui, out, topic, opts)
-    elif template == "pickle":
+    elif spec.ref == b"pickle":
+        assert spec.refargs is None, r'function-style not supported'
         return pickleformatter(ui, out, topic, opts)
-    elif template == "debug":
+    elif spec.ref == b"debug":
+        assert spec.refargs is None, r'function-style not supported'
         return debugformatter(ui, out, topic, opts)
-    elif template != "":
-        return templateformatter(ui, out, topic, opts)
+    elif spec.ref or spec.tmpl or spec.mapfile:
+        assert spec.refargs is None, r'function-style not supported'
+        return templateformatter(ui, out, topic, opts, spec)
     # developer config: ui.formatdebug
-    elif ui.configbool('ui', 'formatdebug'):
+    elif ui.configbool(b'ui', b'formatdebug'):
         return debugformatter(ui, out, topic, opts)
     # deprecated config: ui.formatjson
-    elif ui.configbool('ui', 'formatjson'):
+    elif ui.configbool(b'ui', b'formatjson'):
         return jsonformatter(ui, out, topic, opts)
     return plainformatter(ui, out, topic, opts)
 
+
 @contextlib.contextmanager
 def openformatter(ui, filename, topic, opts):
     """Create a formatter that writes outputs to the specified file
 
     Must be invoked using the 'with' statement.
     """
-    with util.posixfile(filename, 'wb') as out:
+    with util.posixfile(filename, b'wb') as out:
         with formatter(ui, out, topic, opts) as fm:
             yield fm
 
+
 @contextlib.contextmanager
 def _neverending(fm):
     yield fm
 
+
 def maybereopen(fm, filename):
     """Create a formatter backed by file if filename specified, else return
     the given formatter
--- a/mercurial/graphmod.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/graphmod.py	Mon Oct 21 11:09:48 2019 -0400
@@ -27,15 +27,16 @@
     util,
 )
 
-CHANGESET = 'C'
-PARENT = 'P'
-GRANDPARENT = 'G'
-MISSINGPARENT = 'M'
+CHANGESET = b'C'
+PARENT = b'P'
+GRANDPARENT = b'G'
+MISSINGPARENT = b'M'
 # Style of line to draw. None signals a line that ends and is removed at this
 # point. A number prefix means only the last N characters of the current block
 # will use that style, the rest will use the PARENT style. Add a - sign
 # (so making N negative) and all but the first N characters use that style.
-EDGES = {PARENT: '|', GRANDPARENT: ':', MISSINGPARENT: None}
+EDGES = {PARENT: b'|', GRANDPARENT: b':', MISSINGPARENT: None}
+
 
 def dagwalker(repo, revs):
     """cset DAG generator yielding (id, CHANGESET, ctx, [parentinfo]) tuples
@@ -57,8 +58,11 @@
         # augment the lists with markers, to inform graph drawing code about
         # what kind of edge to draw between nodes.
         pset = set(p.rev() for p in ctx.parents() if p.rev() in revs)
-        mpars = [p.rev() for p in ctx.parents()
-                 if p.rev() != nullrev and p.rev() not in pset]
+        mpars = [
+            p.rev()
+            for p in ctx.parents()
+            if p.rev() != nullrev and p.rev() not in pset
+        ]
         parents = [(PARENT, p) for p in sorted(pset)]
 
         for mpar in mpars:
@@ -68,8 +72,9 @@
                 # through all revs (issue4782)
                 if not isinstance(revs, smartset.baseset):
                     revs = smartset.baseset(revs)
-                gp = gpcache[mpar] = sorted(set(dagop.reachableroots(
-                    repo, revs, [mpar])))
+                gp = gpcache[mpar] = sorted(
+                    set(dagop.reachableroots(repo, revs, [mpar]))
+                )
             if not gp:
                 parents.append((MISSINGPARENT, mpar))
                 pset.add(mpar)
@@ -79,6 +84,7 @@
 
         yield (ctx.rev(), CHANGESET, ctx, parents)
 
+
 def nodes(repo, nodes):
     """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
 
@@ -88,10 +94,12 @@
     include = set(nodes)
     for node in nodes:
         ctx = repo[node]
-        parents = set((PARENT, p.rev()) for p in ctx.parents()
-                      if p.node() in include)
+        parents = set(
+            (PARENT, p.rev()) for p in ctx.parents() if p.node() in include
+        )
         yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
 
+
 def colored(dag, repo):
     """annotates a DAG with colored edge information
 
@@ -110,18 +118,19 @@
     newcolor = 1
     config = {}
 
-    for key, val in repo.ui.configitems('graph'):
-        if '.' in key:
-            branch, setting = key.rsplit('.', 1)
+    for key, val in repo.ui.configitems(b'graph'):
+        if b'.' in key:
+            branch, setting = key.rsplit(b'.', 1)
             # Validation
-            if setting == "width" and val.isdigit():
+            if setting == b"width" and val.isdigit():
                 config.setdefault(branch, {})[setting] = int(val)
-            elif setting == "color" and val.isalnum():
+            elif setting == b"color" and val.isalnum():
                 config.setdefault(branch, {})[setting] = val
 
     if config:
         getconf = util.lrucachefunc(
-            lambda rev: config.get(repo[rev].branch(), {}))
+            lambda rev: config.get(repo[rev].branch(), {})
+        )
     else:
         getconf = lambda rev: {}
 
@@ -129,7 +138,7 @@
 
         # Compute seen and next
         if cur not in seen:
-            seen.append(cur) # new head
+            seen.append(cur)  # new head
             colors[cur] = newcolor
             newcolor += 1
 
@@ -139,7 +148,7 @@
 
         # Add parents to next
         addparents = [p for pt, p in parents if p not in next]
-        next[col:col + 1] = addparents
+        next[col : col + 1] = addparents
 
         # Set colors for the parents
         for i, p in enumerate(addparents):
@@ -154,25 +163,36 @@
         for ecol, eid in enumerate(seen):
             if eid in next:
                 bconf = getconf(eid)
-                edges.append((
-                    ecol, next.index(eid), colors[eid],
-                    bconf.get('width', -1),
-                    bconf.get('color', '')))
+                edges.append(
+                    (
+                        ecol,
+                        next.index(eid),
+                        colors[eid],
+                        bconf.get(b'width', -1),
+                        bconf.get(b'color', b''),
+                    )
+                )
             elif eid == cur:
                 for ptype, p in parents:
                     bconf = getconf(p)
-                    edges.append((
-                        ecol, next.index(p), color,
-                        bconf.get('width', -1),
-                        bconf.get('color', '')))
+                    edges.append(
+                        (
+                            ecol,
+                            next.index(p),
+                            color,
+                            bconf.get(b'width', -1),
+                            bconf.get(b'color', b''),
+                        )
+                    )
 
         # Yield and move on
         yield (cur, type, data, (col, color), edges)
         seen = next
 
+
 def asciiedges(type, char, state, rev, parents):
     """adds edge info to changelog DAG walk suitable for ascii()"""
-    seen = state['seen']
+    seen = state[b'seen']
     if rev not in seen:
         seen.append(rev)
     nodeidx = seen.index(rev)
@@ -187,12 +207,12 @@
             knownparents.append(parent)
         else:
             newparents.append(parent)
-            state['edges'][parent] = state['styles'].get(ptype, '|')
+            state[b'edges'][parent] = state[b'styles'].get(ptype, b'|')
 
     ncols = len(seen)
     width = 1 + ncols * 2
     nextseen = seen[:]
-    nextseen[nodeidx:nodeidx + 1] = newparents
+    nextseen[nodeidx : nodeidx + 1] = newparents
     edges = [(nodeidx, nextseen.index(p)) for p in knownparents]
 
     seen[:] = nextseen
@@ -206,7 +226,7 @@
         nmorecols = 1
         width += 2
         yield (type, char, width, (nodeidx, edges, ncols, nmorecols))
-        char = '\\'
+        char = b'\\'
         nodeidx += 1
         ncols += 1
         edges = []
@@ -220,50 +240,53 @@
     if nmorecols > 0:
         width += 2
     # remove current node from edge characters, no longer needed
-    state['edges'].pop(rev, None)
+    state[b'edges'].pop(rev, None)
     yield (type, char, width, (nodeidx, edges, ncols, nmorecols))
 
+
 def _fixlongrightedges(edges):
     for (i, (start, end)) in enumerate(edges):
         if end > start:
             edges[i] = (start, end + 1)
 
-def _getnodelineedgestail(
-        echars, idx, pidx, ncols, coldiff, pdiff, fix_tail):
+
+def _getnodelineedgestail(echars, idx, pidx, ncols, coldiff, pdiff, fix_tail):
     if fix_tail and coldiff == pdiff and coldiff != 0:
         # Still going in the same non-vertical direction.
         if coldiff == -1:
             start = max(idx + 1, pidx)
-            tail = echars[idx * 2:(start - 1) * 2]
-            tail.extend(["/", " "] * (ncols - start))
+            tail = echars[idx * 2 : (start - 1) * 2]
+            tail.extend([b"/", b" "] * (ncols - start))
             return tail
         else:
-            return ["\\", " "] * (ncols - idx - 1)
+            return [b"\\", b" "] * (ncols - idx - 1)
     else:
-        remainder = (ncols - idx - 1)
-        return echars[-(remainder * 2):] if remainder > 0 else []
+        remainder = ncols - idx - 1
+        return echars[-(remainder * 2) :] if remainder > 0 else []
+
 
 def _drawedges(echars, edges, nodeline, interline):
     for (start, end) in edges:
         if start == end + 1:
-            interline[2 * end + 1] = "/"
+            interline[2 * end + 1] = b"/"
         elif start == end - 1:
-            interline[2 * start + 1] = "\\"
+            interline[2 * start + 1] = b"\\"
         elif start == end:
             interline[2 * start] = echars[2 * start]
         else:
             if 2 * end >= len(nodeline):
                 continue
-            nodeline[2 * end] = "+"
+            nodeline[2 * end] = b"+"
             if start > end:
                 (start, end) = (end, start)
             for i in range(2 * start + 1, 2 * end):
-                if nodeline[i] != "+":
-                    nodeline[i] = "-"
+                if nodeline[i] != b"+":
+                    nodeline[i] = b"-"
+
 
 def _getpaddingline(echars, idx, ncols, edges):
     # all edges up to the current node
-    line = echars[:idx * 2]
+    line = echars[: idx * 2]
     # an edge for the current node, if there is one
     if (idx, idx - 1) in edges or (idx, idx) in edges:
         # (idx, idx - 1)      (idx, idx)
@@ -272,15 +295,16 @@
         # | | X |           | X | |
         # | |/ /            | |/ /
         # | | |             | | |
-        line.extend(echars[idx * 2:(idx + 1) * 2])
+        line.extend(echars[idx * 2 : (idx + 1) * 2])
     else:
-        line.extend([' ', ' '])
+        line.extend([b' ', b' '])
     # all edges to the right of the current node
     remainder = ncols - idx - 1
     if remainder > 0:
-        line.extend(echars[-(remainder * 2):])
+        line.extend(echars[-(remainder * 2) :])
     return line
 
+
 def _drawendinglines(lines, extra, edgemap, seen, state):
     """Draw ending lines for missing parent edges
 
@@ -298,7 +322,7 @@
     while edgechars and edgechars[-1] is None:
         edgechars.pop()
     shift_size = max((edgechars.count(None) * 2) - 1, 0)
-    minlines = 3 if not state['graphshorten'] else 2
+    minlines = 3 if not state[b'graphshorten'] else 2
     while len(lines) < minlines + shift_size:
         lines.append(extra[:])
 
@@ -314,17 +338,17 @@
         targets = list(range(first_empty, first_empty + len(toshift) * 2, 2))
         positions = toshift[:]
         for line in lines[-shift_size:]:
-            line[first_empty:] = [' '] * (len(line) - first_empty)
+            line[first_empty:] = [b' '] * (len(line) - first_empty)
             for i in range(len(positions)):
                 pos = positions[i] - 1
                 positions[i] = max(pos, targets[i])
-                line[pos] = '/' if pos > targets[i] else extra[toshift[i]]
+                line[pos] = b'/' if pos > targets[i] else extra[toshift[i]]
 
-    map = {1: '|', 2: '~'} if not state['graphshorten'] else {1: '~'}
+    map = {1: b'|', 2: b'~'} if not state[b'graphshorten'] else {1: b'~'}
     for i, line in enumerate(lines):
         if None not in line:
             continue
-        line[:] = [c or map.get(i, ' ') for c in line]
+        line[:] = [c or map.get(i, b' ') for c in line]
 
     # remove edges that ended
     remove = [p for p, c in edgemap.items() if c is None]
@@ -332,17 +356,19 @@
         del edgemap[parent]
         seen.remove(parent)
 
+
 def asciistate():
     """returns the initial value for the "state" argument to ascii()"""
     return {
-        'seen': [],
-        'edges': {},
-        'lastcoldiff': 0,
-        'lastindex': 0,
-        'styles': EDGES.copy(),
-        'graphshorten': False,
+        b'seen': [],
+        b'edges': {},
+        b'lastcoldiff': 0,
+        b'lastindex': 0,
+        b'styles': EDGES.copy(),
+        b'graphshorten': False,
     }
 
+
 def outputgraph(ui, graph):
     """outputs an ASCII graph of a DAG
 
@@ -357,7 +383,8 @@
     without needing to mimic all of the edge-fixup logic in ascii()
     """
     for (ln, logstr) in graph:
-        ui.write((ln + logstr).rstrip() + "\n")
+        ui.write((ln + logstr).rstrip() + b"\n")
+
 
 def ascii(ui, state, type, char, text, coldata):
     """prints an ASCII graph of the DAG
@@ -382,11 +409,11 @@
     idx, edges, ncols, coldiff = coldata
     assert -2 < coldiff < 2
 
-    edgemap, seen = state['edges'], state['seen']
+    edgemap, seen = state[b'edges'], state[b'seen']
     # Be tolerant of history issues; make sure we have at least ncols + coldiff
     # elements to work with. See test-glog.t for broken history test cases.
-    echars = [c for p in seen for c in (edgemap.get(p, '|'), ' ')]
-    echars.extend(('|', ' ') * max(ncols + coldiff - len(seen), 0))
+    echars = [c for p in seen for c in (edgemap.get(p, b'|'), b' ')]
+    echars.extend((b'|', b' ') * max(ncols + coldiff - len(seen), 0))
 
     if coldiff == -1:
         # Transform
@@ -404,8 +431,9 @@
     #     |  / /         |   | |  # <--- padding line
     #     o | |          |  / /
     #                    o | |
-    add_padding_line = (len(text) > 2 and coldiff == -1 and
-                        [x for (x, y) in edges if x + 1 < y])
+    add_padding_line = (
+        len(text) > 2 and coldiff == -1 and [x for (x, y) in edges if x + 1 < y]
+    )
 
     # fix_nodeline_tail says whether to rewrite
     #
@@ -417,28 +445,35 @@
     fix_nodeline_tail = len(text) <= 2 and not add_padding_line
 
     # nodeline is the line containing the node character (typically o)
-    nodeline = echars[:idx * 2]
-    nodeline.extend([char, " "])
+    nodeline = echars[: idx * 2]
+    nodeline.extend([char, b" "])
 
     nodeline.extend(
         _getnodelineedgestail(
-            echars, idx, state['lastindex'], ncols, coldiff,
-            state['lastcoldiff'], fix_nodeline_tail))
+            echars,
+            idx,
+            state[b'lastindex'],
+            ncols,
+            coldiff,
+            state[b'lastcoldiff'],
+            fix_nodeline_tail,
+        )
+    )
 
     # shift_interline is the line containing the non-vertical
     # edges between this entry and the next
-    shift_interline = echars[:idx * 2]
+    shift_interline = echars[: idx * 2]
     for i in pycompat.xrange(2 + coldiff):
-        shift_interline.append(' ')
+        shift_interline.append(b' ')
     count = ncols - idx - 1
     if coldiff == -1:
         for i in pycompat.xrange(count):
-            shift_interline.extend(['/', ' '])
+            shift_interline.extend([b'/', b' '])
     elif coldiff == 0:
-        shift_interline.extend(echars[(idx + 1) * 2:ncols * 2])
+        shift_interline.extend(echars[(idx + 1) * 2 : ncols * 2])
     else:
         for i in pycompat.xrange(count):
-            shift_interline.extend(['\\', ' '])
+            shift_interline.extend([b'\\', b' '])
 
     # draw edges from the current node to its parents
     _drawedges(echars, edges, nodeline, shift_interline)
@@ -450,7 +485,7 @@
 
     # If 'graphshorten' config, only draw shift_interline
     # when there is any non vertical flow in graph.
-    if state['graphshorten']:
+    if state[b'graphshorten']:
         if any(c in br'\/' for c in shift_interline if c):
             lines.append(shift_interline)
     # Else, no 'graphshorten' config so draw shift_interline.
@@ -459,7 +494,7 @@
 
     # make sure that there are as many graph lines as there are
     # log strings
-    extra_interline = echars[:(ncols + coldiff) * 2]
+    extra_interline = echars[: (ncols + coldiff) * 2]
     if len(lines) < len(text):
         while len(lines) < len(text):
             lines.append(extra_interline[:])
@@ -467,13 +502,15 @@
     _drawendinglines(lines, extra_interline, edgemap, seen, state)
 
     while len(text) < len(lines):
-        text.append("")
+        text.append(b"")
 
     # print lines
     indentation_level = max(ncols, ncols + coldiff)
-    lines = ["%-*s " % (2 * indentation_level, "".join(line)) for line in lines]
+    lines = [
+        b"%-*s " % (2 * indentation_level, b"".join(line)) for line in lines
+    ]
     outputgraph(ui, zip(lines, text))
 
     # ... and start over
-    state['lastcoldiff'] = coldiff
-    state['lastindex'] = idx
+    state[b'lastcoldiff'] = coldiff
+    state[b'lastindex'] = idx
--- a/mercurial/hbisect.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/hbisect.py	Mon Oct 21 11:09:48 2019 -0400
@@ -17,9 +17,8 @@
     hex,
     short,
 )
-from . import (
-    error,
-)
+from . import error
+
 
 def bisect(repo, state):
     """find the next node (if any) for testing during a bisect search.
@@ -35,29 +34,34 @@
     repo = repo.unfiltered()
     changelog = repo.changelog
     clparents = changelog.parentrevs
-    skip = {changelog.rev(n) for n in state['skip']}
+    skip = {changelog.rev(n) for n in state[b'skip']}
 
     def buildancestors(bad, good):
         badrev = min([changelog.rev(n) for n in bad])
         ancestors = collections.defaultdict(lambda: None)
-        for rev in repo.revs("descendants(%ln) - ancestors(%ln)", good, good):
+        for rev in repo.revs(b"descendants(%ln) - ancestors(%ln)", good, good):
             ancestors[rev] = []
         if ancestors[badrev] is None:
             return badrev, None
         return badrev, ancestors
 
     good = False
-    badrev, ancestors = buildancestors(state['bad'], state['good'])
-    if not ancestors: # looking for bad to good transition?
+    badrev, ancestors = buildancestors(state[b'bad'], state[b'good'])
+    if not ancestors:  # looking for bad to good transition?
         good = True
-        badrev, ancestors = buildancestors(state['good'], state['bad'])
+        badrev, ancestors = buildancestors(state[b'good'], state[b'bad'])
     bad = changelog.node(badrev)
-    if not ancestors: # now we're confused
-        if (len(state['bad']) == 1 and len(state['good']) == 1 and
-            state['bad'] != state['good']):
-            raise error.Abort(_("starting revisions are not directly related"))
-        raise error.Abort(_("inconsistent state, %d:%s is good and bad")
-                         % (badrev, short(bad)))
+    if not ancestors:  # now we're confused
+        if (
+            len(state[b'bad']) == 1
+            and len(state[b'good']) == 1
+            and state[b'bad'] != state[b'good']
+        ):
+            raise error.Abort(_(b"starting revisions are not directly related"))
+        raise error.Abort(
+            _(b"inconsistent state, %d:%s is good and bad")
+            % (badrev, short(bad))
+        )
 
     # build children dict
     children = {}
@@ -97,16 +101,16 @@
         a = ancestors[rev] or [rev]
         ancestors[rev] = None
 
-        x = len(a) # number of ancestors
-        y = tot - x # number of non-ancestors
-        value = min(x, y) # how good is this test?
+        x = len(a)  # number of ancestors
+        y = tot - x  # number of non-ancestors
+        value = min(x, y)  # how good is this test?
         if value > best_len and rev not in skip:
             best_len = value
             best_rev = rev
-            if value == perfect: # found a perfect candidate? quit early
+            if value == perfect:  # found a perfect candidate? quit early
                 break
 
-        if y < perfect and rev not in skip: # all downhill from here?
+        if y < perfect and rev not in skip:  # all downhill from here?
             # poison children
             poison.update(children.get(rev, []))
             continue
@@ -122,54 +126,59 @@
 
     return ([best_node], tot, good)
 
+
 def extendrange(repo, state, nodes, good):
     # bisect is incomplete when it ends on a merge node and
     # one of the parent was not checked.
     parents = repo[nodes[0]].parents()
     if len(parents) > 1:
         if good:
-            side = state['bad']
+            side = state[b'bad']
         else:
-            side = state['good']
+            side = state[b'good']
         num = len(set(i.node() for i in parents) & set(side))
         if num == 1:
             return parents[0].ancestor(parents[1])
     return None
 
+
 def load_state(repo):
-    state = {'current': [], 'good': [], 'bad': [], 'skip': []}
-    for l in repo.vfs.tryreadlines("bisect.state"):
+    state = {b'current': [], b'good': [], b'bad': [], b'skip': []}
+    for l in repo.vfs.tryreadlines(b"bisect.state"):
         kind, node = l[:-1].split()
         node = repo.unfiltered().lookup(node)
         if kind not in state:
-            raise error.Abort(_("unknown bisect kind %s") % kind)
+            raise error.Abort(_(b"unknown bisect kind %s") % kind)
         state[kind].append(node)
     return state
 
 
 def save_state(repo, state):
-    f = repo.vfs("bisect.state", "w", atomictemp=True)
+    f = repo.vfs(b"bisect.state", b"w", atomictemp=True)
     with repo.wlock():
         for kind in sorted(state):
             for node in state[kind]:
-                f.write("%s %s\n" % (kind, hex(node)))
+                f.write(b"%s %s\n" % (kind, hex(node)))
         f.close()
 
+
 def resetstate(repo):
     """remove any bisect state from the repository"""
-    if repo.vfs.exists("bisect.state"):
-        repo.vfs.unlink("bisect.state")
+    if repo.vfs.exists(b"bisect.state"):
+        repo.vfs.unlink(b"bisect.state")
+
 
 def checkstate(state):
     """check we have both 'good' and 'bad' to define a range
 
     Raise Abort exception otherwise."""
-    if state['good'] and state['bad']:
+    if state[b'good'] and state[b'bad']:
         return True
-    if not state['good']:
-        raise error.Abort(_('cannot bisect (no known good revisions)'))
+    if not state[b'good']:
+        raise error.Abort(_(b'cannot bisect (no known good revisions)'))
     else:
-        raise error.Abort(_('cannot bisect (no known bad revisions)'))
+        raise error.Abort(_(b'cannot bisect (no known bad revisions)'))
+
 
 def get(repo, status):
     """
@@ -184,7 +193,7 @@
     - ``current``            : the cset currently being bisected
     """
     state = load_state(repo)
-    if status in ('good', 'bad', 'skip', 'current'):
+    if status in (b'good', b'bad', b'skip', b'current'):
         return map(repo.unfiltered().changelog.rev, state[status])
     else:
         # In the following sets, we do *not* call 'bisect()' with more
@@ -195,102 +204,116 @@
         # 'range' is all csets that make the bisection:
         #   - have a good ancestor and a bad descendant, or conversely
         # that's because the bisection can go either way
-        range = '( bisect(bad)::bisect(good) | bisect(good)::bisect(bad) )'
+        range = b'( bisect(bad)::bisect(good) | bisect(good)::bisect(bad) )'
 
-        _t = repo.revs('bisect(good)::bisect(bad)')
+        _t = repo.revs(b'bisect(good)::bisect(bad)')
         # The sets of topologically good or bad csets
         if len(_t) == 0:
             # Goods are topologically after bads
-            goods = 'bisect(good)::'    # Pruned good csets
-            bads  = '::bisect(bad)'     # Pruned bad csets
+            goods = b'bisect(good)::'  # Pruned good csets
+            bads = b'::bisect(bad)'  # Pruned bad csets
         else:
             # Goods are topologically before bads
-            goods = '::bisect(good)'    # Pruned good csets
-            bads  = 'bisect(bad)::'     # Pruned bad csets
+            goods = b'::bisect(good)'  # Pruned good csets
+            bads = b'bisect(bad)::'  # Pruned bad csets
 
         # 'pruned' is all csets whose fate is already known: good, bad, skip
-        skips = 'bisect(skip)'                 # Pruned skipped csets
-        pruned = '( (%s) | (%s) | (%s) )' % (goods, bads, skips)
+        skips = b'bisect(skip)'  # Pruned skipped csets
+        pruned = b'( (%s) | (%s) | (%s) )' % (goods, bads, skips)
 
         # 'untested' is all cset that are- in 'range', but not in 'pruned'
-        untested = '( (%s) - (%s) )' % (range, pruned)
+        untested = b'( (%s) - (%s) )' % (range, pruned)
 
         # 'ignored' is all csets that were not used during the bisection
         # due to DAG topology, but may however have had an impact.
         # E.g., a branch merged between bads and goods, but whose branch-
         # point is out-side of the range.
-        iba = '::bisect(bad) - ::bisect(good)'  # Ignored bads' ancestors
-        iga = '::bisect(good) - ::bisect(bad)'  # Ignored goods' ancestors
-        ignored = '( ( (%s) | (%s) ) - (%s) )' % (iba, iga, range)
+        iba = b'::bisect(bad) - ::bisect(good)'  # Ignored bads' ancestors
+        iga = b'::bisect(good) - ::bisect(bad)'  # Ignored goods' ancestors
+        ignored = b'( ( (%s) | (%s) ) - (%s) )' % (iba, iga, range)
 
-        if status == 'range':
+        if status == b'range':
             return repo.revs(range)
-        elif status == 'pruned':
+        elif status == b'pruned':
             return repo.revs(pruned)
-        elif status == 'untested':
+        elif status == b'untested':
             return repo.revs(untested)
-        elif status == 'ignored':
+        elif status == b'ignored':
             return repo.revs(ignored)
-        elif status == "goods":
+        elif status == b"goods":
             return repo.revs(goods)
-        elif status == "bads":
+        elif status == b"bads":
             return repo.revs(bads)
         else:
-            raise error.ParseError(_('invalid bisect state'))
+            raise error.ParseError(_(b'invalid bisect state'))
+
 
 def label(repo, node):
     rev = repo.changelog.rev(node)
 
     # Try explicit sets
-    if rev in get(repo, 'good'):
+    if rev in get(repo, b'good'):
         # i18n: bisect changeset status
-        return _('good')
-    if rev in get(repo, 'bad'):
+        return _(b'good')
+    if rev in get(repo, b'bad'):
         # i18n: bisect changeset status
-        return _('bad')
-    if rev in get(repo, 'skip'):
+        return _(b'bad')
+    if rev in get(repo, b'skip'):
         # i18n: bisect changeset status
-        return _('skipped')
-    if rev in get(repo, 'untested') or rev in get(repo, 'current'):
+        return _(b'skipped')
+    if rev in get(repo, b'untested') or rev in get(repo, b'current'):
         # i18n: bisect changeset status
-        return _('untested')
-    if rev in get(repo, 'ignored'):
+        return _(b'untested')
+    if rev in get(repo, b'ignored'):
         # i18n: bisect changeset status
-        return _('ignored')
+        return _(b'ignored')
 
     # Try implicit sets
-    if rev in get(repo, 'goods'):
+    if rev in get(repo, b'goods'):
         # i18n: bisect changeset status
-        return _('good (implicit)')
-    if rev in get(repo, 'bads'):
+        return _(b'good (implicit)')
+    if rev in get(repo, b'bads'):
         # i18n: bisect changeset status
-        return _('bad (implicit)')
+        return _(b'bad (implicit)')
 
     return None
 
+
 def printresult(ui, repo, state, displayer, nodes, good):
     repo = repo.unfiltered()
     if len(nodes) == 1:
         # narrowed it down to a single revision
         if good:
-            ui.write(_("The first good revision is:\n"))
+            ui.write(_(b"The first good revision is:\n"))
         else:
-            ui.write(_("The first bad revision is:\n"))
+            ui.write(_(b"The first bad revision is:\n"))
         displayer.show(repo[nodes[0]])
         extendnode = extendrange(repo, state, nodes, good)
         if extendnode is not None:
-            ui.write(_('Not all ancestors of this changeset have been'
-                       ' checked.\nUse bisect --extend to continue the '
-                       'bisection from\nthe common ancestor, %s.\n')
-                     % extendnode)
+            ui.write(
+                _(
+                    b'Not all ancestors of this changeset have been'
+                    b' checked.\nUse bisect --extend to continue the '
+                    b'bisection from\nthe common ancestor, %s.\n'
+                )
+                % extendnode
+            )
     else:
         # multiple possible revisions
         if good:
-            ui.write(_("Due to skipped revisions, the first "
-                    "good revision could be any of:\n"))
+            ui.write(
+                _(
+                    b"Due to skipped revisions, the first "
+                    b"good revision could be any of:\n"
+                )
+            )
         else:
-            ui.write(_("Due to skipped revisions, the first "
-                    "bad revision could be any of:\n"))
+            ui.write(
+                _(
+                    b"Due to skipped revisions, the first "
+                    b"bad revision could be any of:\n"
+                )
+            )
         for n in nodes:
             displayer.show(repo[n])
     displayer.close()
--- a/mercurial/help.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/help.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,6 +16,7 @@
     _,
     gettext,
 )
+from .pycompat import getattr
 from . import (
     cmdutil,
     encoding,
@@ -34,23 +35,19 @@
     ui as uimod,
     util,
 )
-from .hgweb import (
-    webcommands,
-)
-from .utils import (
-    compression,
-)
+from .hgweb import webcommands
+from .utils import compression
 
 _exclkeywords = {
-    "(ADVANCED)",
-    "(DEPRECATED)",
-    "(EXPERIMENTAL)",
+    b"(ADVANCED)",
+    b"(DEPRECATED)",
+    b"(EXPERIMENTAL)",
     # i18n: "(ADVANCED)" is a keyword, must be translated consistently
-    _("(ADVANCED)"),
+    _(b"(ADVANCED)"),
     # i18n: "(DEPRECATED)" is a keyword, must be translated consistently
-    _("(DEPRECATED)"),
+    _(b"(DEPRECATED)"),
     # i18n: "(EXPERIMENTAL)" is a keyword, must be translated consistently
-    _("(EXPERIMENTAL)"),
+    _(b"(EXPERIMENTAL)"),
 }
 
 # The order in which command categories will be displayed.
@@ -64,7 +61,7 @@
     registrar.command.CATEGORY_CHANGE_MANAGEMENT,
     registrar.command.CATEGORY_CHANGE_ORGANIZATION,
     registrar.command.CATEGORY_FILE_CONTENTS,
-    registrar.command.CATEGORY_CHANGE_NAVIGATION ,
+    registrar.command.CATEGORY_CHANGE_NAVIGATION,
     registrar.command.CATEGORY_WORKING_DIRECTORY,
     registrar.command.CATEGORY_IMPORT_EXPORT,
     registrar.command.CATEGORY_MAINTENANCE,
@@ -76,30 +73,28 @@
 # Human-readable category names. These are translated.
 # Extensions with custom categories should add their names here.
 CATEGORY_NAMES = {
-    registrar.command.CATEGORY_REPO_CREATION: 'Repository creation',
-    registrar.command.CATEGORY_REMOTE_REPO_MANAGEMENT:
-        'Remote repository management',
-    registrar.command.CATEGORY_COMMITTING: 'Change creation',
-    registrar.command.CATEGORY_CHANGE_NAVIGATION: 'Change navigation',
-    registrar.command.CATEGORY_CHANGE_MANAGEMENT: 'Change manipulation',
-    registrar.command.CATEGORY_CHANGE_ORGANIZATION: 'Change organization',
-    registrar.command.CATEGORY_WORKING_DIRECTORY:
-        'Working directory management',
-    registrar.command.CATEGORY_FILE_CONTENTS: 'File content management',
-    registrar.command.CATEGORY_IMPORT_EXPORT: 'Change import/export',
-    registrar.command.CATEGORY_MAINTENANCE: 'Repository maintenance',
-    registrar.command.CATEGORY_HELP: 'Help',
-    registrar.command.CATEGORY_MISC: 'Miscellaneous commands',
-    registrar.command.CATEGORY_NONE: 'Uncategorized commands',
+    registrar.command.CATEGORY_REPO_CREATION: b'Repository creation',
+    registrar.command.CATEGORY_REMOTE_REPO_MANAGEMENT: b'Remote repository management',
+    registrar.command.CATEGORY_COMMITTING: b'Change creation',
+    registrar.command.CATEGORY_CHANGE_NAVIGATION: b'Change navigation',
+    registrar.command.CATEGORY_CHANGE_MANAGEMENT: b'Change manipulation',
+    registrar.command.CATEGORY_CHANGE_ORGANIZATION: b'Change organization',
+    registrar.command.CATEGORY_WORKING_DIRECTORY: b'Working directory management',
+    registrar.command.CATEGORY_FILE_CONTENTS: b'File content management',
+    registrar.command.CATEGORY_IMPORT_EXPORT: b'Change import/export',
+    registrar.command.CATEGORY_MAINTENANCE: b'Repository maintenance',
+    registrar.command.CATEGORY_HELP: b'Help',
+    registrar.command.CATEGORY_MISC: b'Miscellaneous commands',
+    registrar.command.CATEGORY_NONE: b'Uncategorized commands',
 }
 
 # Topic categories.
-TOPIC_CATEGORY_IDS = 'ids'
-TOPIC_CATEGORY_OUTPUT = 'output'
-TOPIC_CATEGORY_CONFIG = 'config'
-TOPIC_CATEGORY_CONCEPTS = 'concepts'
-TOPIC_CATEGORY_MISC = 'misc'
-TOPIC_CATEGORY_NONE = 'none'
+TOPIC_CATEGORY_IDS = b'ids'
+TOPIC_CATEGORY_OUTPUT = b'output'
+TOPIC_CATEGORY_CONFIG = b'config'
+TOPIC_CATEGORY_CONCEPTS = b'concepts'
+TOPIC_CATEGORY_MISC = b'misc'
+TOPIC_CATEGORY_NONE = b'none'
 
 # The order in which topic categories will be displayed.
 # Extensions with custom categories should insert them into this list
@@ -116,35 +111,46 @@
 
 # Human-readable topic category names. These are translated.
 TOPIC_CATEGORY_NAMES = {
-    TOPIC_CATEGORY_IDS: 'Mercurial identifiers',
-    TOPIC_CATEGORY_OUTPUT: 'Mercurial output',
-    TOPIC_CATEGORY_CONFIG: 'Mercurial configuration',
-    TOPIC_CATEGORY_CONCEPTS: 'Concepts',
-    TOPIC_CATEGORY_MISC: 'Miscellaneous',
-    TOPIC_CATEGORY_NONE: 'Uncategorized topics',
+    TOPIC_CATEGORY_IDS: b'Mercurial identifiers',
+    TOPIC_CATEGORY_OUTPUT: b'Mercurial output',
+    TOPIC_CATEGORY_CONFIG: b'Mercurial configuration',
+    TOPIC_CATEGORY_CONCEPTS: b'Concepts',
+    TOPIC_CATEGORY_MISC: b'Miscellaneous',
+    TOPIC_CATEGORY_NONE: b'Uncategorized topics',
 }
 
+
 def listexts(header, exts, indent=1, showdeprecated=False):
     '''return a text listing of the given extensions'''
     rst = []
     if exts:
-        for name, desc in sorted(exts.iteritems()):
+        for name, desc in sorted(pycompat.iteritems(exts)):
             if not showdeprecated and any(w in desc for w in _exclkeywords):
                 continue
-            rst.append('%s:%s: %s\n' % (' ' * indent, name, desc))
+            rst.append(b'%s:%s: %s\n' % (b' ' * indent, name, desc))
     if rst:
-        rst.insert(0, '\n%s\n\n' % header)
+        rst.insert(0, b'\n%s\n\n' % header)
     return rst
 
+
 def extshelp(ui):
-    rst = loaddoc('extensions')(ui).splitlines(True)
-    rst.extend(listexts(
-        _('enabled extensions:'), extensions.enabled(), showdeprecated=True))
-    rst.extend(listexts(_('disabled extensions:'), extensions.disabled(),
-                        showdeprecated=ui.verbose))
-    doc = ''.join(rst)
+    rst = loaddoc(b'extensions')(ui).splitlines(True)
+    rst.extend(
+        listexts(
+            _(b'enabled extensions:'), extensions.enabled(), showdeprecated=True
+        )
+    )
+    rst.extend(
+        listexts(
+            _(b'disabled extensions:'),
+            extensions.disabled(),
+            showdeprecated=ui.verbose,
+        )
+    )
+    doc = b''.join(rst)
     return doc
 
+
 def optrst(header, options, verbose):
     data = []
     multioccur = False
@@ -153,17 +159,17 @@
             shortopt, longopt, default, desc, optlabel = option
         else:
             shortopt, longopt, default, desc = option
-            optlabel = _("VALUE") # default label
+            optlabel = _(b"VALUE")  # default label
 
         if not verbose and any(w in desc for w in _exclkeywords):
             continue
 
-        so = ''
+        so = b''
         if shortopt:
-            so = '-' + shortopt
-        lo = '--' + longopt
+            so = b'-' + shortopt
+        lo = b'--' + longopt
         if default is True:
-            lo = '--[no-]' + longopt
+            lo = b'--[no-]' + longopt
 
         if isinstance(default, fancyopts.customopt):
             default = default.getdefaultvalue()
@@ -174,32 +180,34 @@
             # then convert it to bytes.
             defaultstr = pycompat.bytestr(default)
             if default is True:
-                defaultstr = _("on")
-            desc += _(" (default: %s)") % defaultstr
+                defaultstr = _(b"on")
+            desc += _(b" (default: %s)") % defaultstr
 
         if isinstance(default, list):
-            lo += " %s [+]" % optlabel
+            lo += b" %s [+]" % optlabel
             multioccur = True
         elif (default is not None) and not isinstance(default, bool):
-            lo += " %s" % optlabel
+            lo += b" %s" % optlabel
 
         data.append((so, lo, desc))
 
     if multioccur:
-        header += (_(" ([+] can be repeated)"))
+        header += _(b" ([+] can be repeated)")
 
-    rst = ['\n%s:\n\n' % header]
+    rst = [b'\n%s:\n\n' % header]
     rst.extend(minirst.maketable(data, 1))
 
-    return ''.join(rst)
+    return b''.join(rst)
+
 
 def indicateomitted(rst, omitted, notomitted=None):
-    rst.append('\n\n.. container:: omitted\n\n    %s\n\n' % omitted)
+    rst.append(b'\n\n.. container:: omitted\n\n    %s\n\n' % omitted)
     if notomitted:
-        rst.append('\n\n.. container:: notomitted\n\n    %s\n\n' % notomitted)
+        rst.append(b'\n\n.. container:: notomitted\n\n    %s\n\n' % notomitted)
+
 
 def filtercmd(ui, cmd, func, kw, doc):
-    if not ui.debugflag and cmd.startswith("debug") and kw != "debug":
+    if not ui.debugflag and cmd.startswith(b"debug") and kw != b"debug":
         # Debug command, and user is not looking for those.
         return True
     if not ui.verbose:
@@ -212,16 +220,18 @@
         if doc and any(w in doc for w in _exclkeywords):
             # Documentation has excluded keywords.
             return True
-    if kw == "shortlist" and not getattr(func, 'helpbasic', False):
+    if kw == b"shortlist" and not getattr(func, 'helpbasic', False):
         # We're presenting the short list but the command is not basic.
         return True
-    if ui.configbool('help', 'hidden-command.%s' % cmd):
+    if ui.configbool(b'help', b'hidden-command.%s' % cmd):
         # Configuration explicitly hides the command.
         return True
     return False
 
+
 def filtertopic(ui, topic):
-    return ui.configbool('help', 'hidden-topic.%s' % topic, False)
+    return ui.configbool(b'help', b'hidden-topic.%s' % topic, False)
+
 
 def topicmatch(ui, commands, kw):
     """Return help topics matching kw.
@@ -230,30 +240,35 @@
     one of topics, commands, extensions, or extensioncommands.
     """
     kw = encoding.lower(kw)
+
     def lowercontains(container):
         return kw in encoding.lower(container)  # translated in helptable
-    results = {'topics': [],
-               'commands': [],
-               'extensions': [],
-               'extensioncommands': [],
-               }
+
+    results = {
+        b'topics': [],
+        b'commands': [],
+        b'extensions': [],
+        b'extensioncommands': [],
+    }
     for topic in helptable:
         names, header, doc = topic[0:3]
         # Old extensions may use a str as doc.
-        if (sum(map(lowercontains, names))
+        if (
+            sum(map(lowercontains, names))
             or lowercontains(header)
-            or (callable(doc) and lowercontains(doc(ui)))):
+            or (callable(doc) and lowercontains(doc(ui)))
+        ):
             name = names[0]
             if not filtertopic(ui, name):
-                results['topics'].append((names[0], header))
-    for cmd, entry in commands.table.iteritems():
+                results[b'topics'].append((names[0], header))
+    for cmd, entry in pycompat.iteritems(commands.table):
         if len(entry) == 3:
             summary = entry[2]
         else:
-            summary = ''
+            summary = b''
         # translate docs *before* searching there
         func = entry[0]
-        docs = _(pycompat.getdoc(func)) or ''
+        docs = _(pycompat.getdoc(func)) or b''
         if kw in cmd or lowercontains(summary) or lowercontains(docs):
             doclines = docs.splitlines()
             if doclines:
@@ -261,22 +276,23 @@
             cmdname = cmdutil.parsealiases(cmd)[0]
             if filtercmd(ui, cmdname, func, kw, docs):
                 continue
-            results['commands'].append((cmdname, summary))
+            results[b'commands'].append((cmdname, summary))
     for name, docs in itertools.chain(
-        extensions.enabled(False).iteritems(),
-        extensions.disabled().iteritems()):
+        pycompat.iteritems(extensions.enabled(False)),
+        pycompat.iteritems(extensions.disabled()),
+    ):
         if not docs:
             continue
-        name = name.rpartition('.')[-1]
+        name = name.rpartition(b'.')[-1]
         if lowercontains(name) or lowercontains(docs):
             # extension docs are already translated
-            results['extensions'].append((name, docs.splitlines()[0]))
+            results[b'extensions'].append((name, docs.splitlines()[0]))
         try:
-            mod = extensions.load(ui, name, '')
+            mod = extensions.load(ui, name, b'')
         except ImportError:
             # debug message would be printed in extensions.load()
             continue
-        for cmd, entry in getattr(mod, 'cmdtable', {}).iteritems():
+        for cmd, entry in pycompat.iteritems(getattr(mod, 'cmdtable', {})):
             if kw in cmd or (len(entry) > 2 and lowercontains(entry[2])):
                 cmdname = cmdutil.parsealiases(cmd)[0]
                 func = entry[0]
@@ -284,20 +300,21 @@
                 if cmddoc:
                     cmddoc = gettext(cmddoc).splitlines()[0]
                 else:
-                    cmddoc = _('(no help text available)')
+                    cmddoc = _(b'(no help text available)')
                 if filtercmd(ui, cmdname, func, kw, cmddoc):
                     continue
-                results['extensioncommands'].append((cmdname, cmddoc))
+                results[b'extensioncommands'].append((cmdname, cmddoc))
     return results
 
+
 def loaddoc(topic, subdir=None):
     """Return a delayed loader for help/topic.txt."""
 
     def loader(ui):
-        docdir = os.path.join(util.datapath, 'help')
+        docdir = os.path.join(util.datapath, b'help')
         if subdir:
             docdir = os.path.join(docdir, subdir)
-        path = os.path.join(docdir, topic + ".txt")
+        path = os.path.join(docdir, topic + b".txt")
         doc = gettext(util.readfile(path))
         for rewriter in helphooks.get(topic, []):
             doc = rewriter(ui, topic, doc)
@@ -305,109 +322,241 @@
 
     return loader
 
-internalstable = sorted([
-    (['bundle2'], _('Bundle2'),
-     loaddoc('bundle2', subdir='internals')),
-    (['bundles'], _('Bundles'),
-     loaddoc('bundles', subdir='internals')),
-    (['cbor'], _('CBOR'),
-     loaddoc('cbor', subdir='internals')),
-    (['censor'], _('Censor'),
-     loaddoc('censor', subdir='internals')),
-    (['changegroups'], _('Changegroups'),
-     loaddoc('changegroups', subdir='internals')),
-    (['config'], _('Config Registrar'),
-     loaddoc('config', subdir='internals')),
-    (['extensions', 'extension'], _('Extension API'),
-     loaddoc('extensions', subdir='internals')),
-    (['mergestate'], _('Mergestate'),
-     loaddoc('mergestate', subdir='internals')),
-    (['requirements'], _('Repository Requirements'),
-     loaddoc('requirements', subdir='internals')),
-    (['revlogs'], _('Revision Logs'),
-     loaddoc('revlogs', subdir='internals')),
-    (['wireprotocol'], _('Wire Protocol'),
-     loaddoc('wireprotocol', subdir='internals')),
-    (['wireprotocolrpc'], _('Wire Protocol RPC'),
-     loaddoc('wireprotocolrpc', subdir='internals')),
-    (['wireprotocolv2'], _('Wire Protocol Version 2'),
-     loaddoc('wireprotocolv2', subdir='internals')),
-])
+
+internalstable = sorted(
+    [
+        ([b'bundle2'], _(b'Bundle2'), loaddoc(b'bundle2', subdir=b'internals')),
+        ([b'bundles'], _(b'Bundles'), loaddoc(b'bundles', subdir=b'internals')),
+        ([b'cbor'], _(b'CBOR'), loaddoc(b'cbor', subdir=b'internals')),
+        ([b'censor'], _(b'Censor'), loaddoc(b'censor', subdir=b'internals')),
+        (
+            [b'changegroups'],
+            _(b'Changegroups'),
+            loaddoc(b'changegroups', subdir=b'internals'),
+        ),
+        (
+            [b'config'],
+            _(b'Config Registrar'),
+            loaddoc(b'config', subdir=b'internals'),
+        ),
+        (
+            [b'extensions', b'extension'],
+            _(b'Extension API'),
+            loaddoc(b'extensions', subdir=b'internals'),
+        ),
+        (
+            [b'mergestate'],
+            _(b'Mergestate'),
+            loaddoc(b'mergestate', subdir=b'internals'),
+        ),
+        (
+            [b'requirements'],
+            _(b'Repository Requirements'),
+            loaddoc(b'requirements', subdir=b'internals'),
+        ),
+        (
+            [b'revlogs'],
+            _(b'Revision Logs'),
+            loaddoc(b'revlogs', subdir=b'internals'),
+        ),
+        (
+            [b'wireprotocol'],
+            _(b'Wire Protocol'),
+            loaddoc(b'wireprotocol', subdir=b'internals'),
+        ),
+        (
+            [b'wireprotocolrpc'],
+            _(b'Wire Protocol RPC'),
+            loaddoc(b'wireprotocolrpc', subdir=b'internals'),
+        ),
+        (
+            [b'wireprotocolv2'],
+            _(b'Wire Protocol Version 2'),
+            loaddoc(b'wireprotocolv2', subdir=b'internals'),
+        ),
+    ]
+)
+
 
 def internalshelp(ui):
     """Generate the index for the "internals" topic."""
-    lines = ['To access a subtopic, use "hg help internals.{subtopic-name}"\n',
-             '\n']
+    lines = [
+        b'To access a subtopic, use "hg help internals.{subtopic-name}"\n',
+        b'\n',
+    ]
     for names, header, doc in internalstable:
-        lines.append(' :%s: %s\n' % (names[0], header))
+        lines.append(b' :%s: %s\n' % (names[0], header))
 
-    return ''.join(lines)
+    return b''.join(lines)
+
 
-helptable = sorted([
-    (['bundlespec'], _("Bundle File Formats"), loaddoc('bundlespec'),
-     TOPIC_CATEGORY_CONCEPTS),
-    (['color'], _("Colorizing Outputs"), loaddoc('color'),
-     TOPIC_CATEGORY_OUTPUT),
-    (["config", "hgrc"], _("Configuration Files"), loaddoc('config'),
-     TOPIC_CATEGORY_CONFIG),
-    (['deprecated'], _("Deprecated Features"), loaddoc('deprecated'),
-     TOPIC_CATEGORY_MISC),
-    (["dates"], _("Date Formats"), loaddoc('dates'), TOPIC_CATEGORY_OUTPUT),
-    (["flags"], _("Command-line flags"), loaddoc('flags'),
-     TOPIC_CATEGORY_CONFIG),
-    (["patterns"], _("File Name Patterns"), loaddoc('patterns'),
-     TOPIC_CATEGORY_IDS),
-    (['environment', 'env'], _('Environment Variables'),
-     loaddoc('environment'), TOPIC_CATEGORY_CONFIG),
-    (['revisions', 'revs', 'revsets', 'revset', 'multirevs', 'mrevs'],
-      _('Specifying Revisions'), loaddoc('revisions'), TOPIC_CATEGORY_IDS),
-    (['filesets', 'fileset'], _("Specifying File Sets"), loaddoc('filesets'),
-     TOPIC_CATEGORY_IDS),
-    (['diffs'], _('Diff Formats'), loaddoc('diffs'), TOPIC_CATEGORY_OUTPUT),
-    (['merge-tools', 'mergetools', 'mergetool'], _('Merge Tools'),
-     loaddoc('merge-tools'), TOPIC_CATEGORY_CONFIG),
-    (['templating', 'templates', 'template', 'style'], _('Template Usage'),
-     loaddoc('templates'), TOPIC_CATEGORY_OUTPUT),
-    (['urls'], _('URL Paths'), loaddoc('urls'), TOPIC_CATEGORY_IDS),
-    (["extensions"], _("Using Additional Features"), extshelp,
-     TOPIC_CATEGORY_CONFIG),
-    (["subrepos", "subrepo"], _("Subrepositories"), loaddoc('subrepos'),
-     TOPIC_CATEGORY_CONCEPTS),
-    (["hgweb"], _("Configuring hgweb"), loaddoc('hgweb'),
-     TOPIC_CATEGORY_CONFIG),
-    (["glossary"], _("Glossary"), loaddoc('glossary'), TOPIC_CATEGORY_CONCEPTS),
-    (["hgignore", "ignore"], _("Syntax for Mercurial Ignore Files"),
-     loaddoc('hgignore'), TOPIC_CATEGORY_IDS),
-    (["phases"], _("Working with Phases"), loaddoc('phases'),
-     TOPIC_CATEGORY_CONCEPTS),
-    (['scripting'], _('Using Mercurial from scripts and automation'),
-     loaddoc('scripting'), TOPIC_CATEGORY_MISC),
-    (['internals'], _("Technical implementation topics"), internalshelp,
-     TOPIC_CATEGORY_MISC),
-    (['pager'], _("Pager Support"), loaddoc('pager'), TOPIC_CATEGORY_CONFIG),
-])
+helptable = sorted(
+    [
+        (
+            [b'bundlespec'],
+            _(b"Bundle File Formats"),
+            loaddoc(b'bundlespec'),
+            TOPIC_CATEGORY_CONCEPTS,
+        ),
+        (
+            [b'color'],
+            _(b"Colorizing Outputs"),
+            loaddoc(b'color'),
+            TOPIC_CATEGORY_OUTPUT,
+        ),
+        (
+            [b"config", b"hgrc"],
+            _(b"Configuration Files"),
+            loaddoc(b'config'),
+            TOPIC_CATEGORY_CONFIG,
+        ),
+        (
+            [b'deprecated'],
+            _(b"Deprecated Features"),
+            loaddoc(b'deprecated'),
+            TOPIC_CATEGORY_MISC,
+        ),
+        (
+            [b"dates"],
+            _(b"Date Formats"),
+            loaddoc(b'dates'),
+            TOPIC_CATEGORY_OUTPUT,
+        ),
+        (
+            [b"flags"],
+            _(b"Command-line flags"),
+            loaddoc(b'flags'),
+            TOPIC_CATEGORY_CONFIG,
+        ),
+        (
+            [b"patterns"],
+            _(b"File Name Patterns"),
+            loaddoc(b'patterns'),
+            TOPIC_CATEGORY_IDS,
+        ),
+        (
+            [b'environment', b'env'],
+            _(b'Environment Variables'),
+            loaddoc(b'environment'),
+            TOPIC_CATEGORY_CONFIG,
+        ),
+        (
+            [
+                b'revisions',
+                b'revs',
+                b'revsets',
+                b'revset',
+                b'multirevs',
+                b'mrevs',
+            ],
+            _(b'Specifying Revisions'),
+            loaddoc(b'revisions'),
+            TOPIC_CATEGORY_IDS,
+        ),
+        (
+            [b'filesets', b'fileset'],
+            _(b"Specifying File Sets"),
+            loaddoc(b'filesets'),
+            TOPIC_CATEGORY_IDS,
+        ),
+        (
+            [b'diffs'],
+            _(b'Diff Formats'),
+            loaddoc(b'diffs'),
+            TOPIC_CATEGORY_OUTPUT,
+        ),
+        (
+            [b'merge-tools', b'mergetools', b'mergetool'],
+            _(b'Merge Tools'),
+            loaddoc(b'merge-tools'),
+            TOPIC_CATEGORY_CONFIG,
+        ),
+        (
+            [b'templating', b'templates', b'template', b'style'],
+            _(b'Template Usage'),
+            loaddoc(b'templates'),
+            TOPIC_CATEGORY_OUTPUT,
+        ),
+        ([b'urls'], _(b'URL Paths'), loaddoc(b'urls'), TOPIC_CATEGORY_IDS),
+        (
+            [b"extensions"],
+            _(b"Using Additional Features"),
+            extshelp,
+            TOPIC_CATEGORY_CONFIG,
+        ),
+        (
+            [b"subrepos", b"subrepo"],
+            _(b"Subrepositories"),
+            loaddoc(b'subrepos'),
+            TOPIC_CATEGORY_CONCEPTS,
+        ),
+        (
+            [b"hgweb"],
+            _(b"Configuring hgweb"),
+            loaddoc(b'hgweb'),
+            TOPIC_CATEGORY_CONFIG,
+        ),
+        (
+            [b"glossary"],
+            _(b"Glossary"),
+            loaddoc(b'glossary'),
+            TOPIC_CATEGORY_CONCEPTS,
+        ),
+        (
+            [b"hgignore", b"ignore"],
+            _(b"Syntax for Mercurial Ignore Files"),
+            loaddoc(b'hgignore'),
+            TOPIC_CATEGORY_IDS,
+        ),
+        (
+            [b"phases"],
+            _(b"Working with Phases"),
+            loaddoc(b'phases'),
+            TOPIC_CATEGORY_CONCEPTS,
+        ),
+        (
+            [b'scripting'],
+            _(b'Using Mercurial from scripts and automation'),
+            loaddoc(b'scripting'),
+            TOPIC_CATEGORY_MISC,
+        ),
+        (
+            [b'internals'],
+            _(b"Technical implementation topics"),
+            internalshelp,
+            TOPIC_CATEGORY_MISC,
+        ),
+        (
+            [b'pager'],
+            _(b"Pager Support"),
+            loaddoc(b'pager'),
+            TOPIC_CATEGORY_CONFIG,
+        ),
+    ]
+)
 
 # Maps topics with sub-topics to a list of their sub-topics.
 subtopics = {
-    'internals': internalstable,
+    b'internals': internalstable,
 }
 
 # Map topics to lists of callable taking the current topic help and
 # returning the updated version
 helphooks = {}
 
+
 def addtopichook(topic, rewriter):
     helphooks.setdefault(topic, []).append(rewriter)
 
+
 def makeitemsdoc(ui, topic, doc, marker, items, dedent=False):
     """Extract docstring from the items key to function mapping, build a
     single documentation block and use it to overwrite the marker in doc.
     """
     entries = []
     for name in sorted(items):
-        text = (pycompat.getdoc(items[name]) or '').rstrip()
-        if (not text
-            or not ui.verbose and any(w in text for w in _exclkeywords)):
+        text = (pycompat.getdoc(items[name]) or b'').rstrip()
+        if not text or not ui.verbose and any(w in text for w in _exclkeywords):
             continue
         text = gettext(text)
         if dedent:
@@ -417,45 +566,66 @@
         doclines = [(lines[0])]
         for l in lines[1:]:
             # Stop once we find some Python doctest
-            if l.strip().startswith('>>>'):
+            if l.strip().startswith(b'>>>'):
                 break
             if dedent:
                 doclines.append(l.rstrip())
             else:
-                doclines.append('  ' + l.strip())
-        entries.append('\n'.join(doclines))
-    entries = '\n\n'.join(entries)
+                doclines.append(b'  ' + l.strip())
+        entries.append(b'\n'.join(doclines))
+    entries = b'\n\n'.join(entries)
     return doc.replace(marker, entries)
 
+
 def addtopicsymbols(topic, marker, symbols, dedent=False):
     def add(ui, topic, doc):
         return makeitemsdoc(ui, topic, doc, marker, symbols, dedent=dedent)
+
     addtopichook(topic, add)
 
-addtopicsymbols('bundlespec', '.. bundlecompressionmarker',
-                compression.bundlecompressiontopics())
-addtopicsymbols('filesets', '.. predicatesmarker', fileset.symbols)
-addtopicsymbols('merge-tools', '.. internaltoolsmarker',
-                filemerge.internalsdoc)
-addtopicsymbols('revisions', '.. predicatesmarker', revset.symbols)
-addtopicsymbols('templates', '.. keywordsmarker', templatekw.keywords)
-addtopicsymbols('templates', '.. filtersmarker', templatefilters.filters)
-addtopicsymbols('templates', '.. functionsmarker', templatefuncs.funcs)
-addtopicsymbols('hgweb', '.. webcommandsmarker', webcommands.commands,
-                dedent=True)
+
+addtopicsymbols(
+    b'bundlespec',
+    b'.. bundlecompressionmarker',
+    compression.bundlecompressiontopics(),
+)
+addtopicsymbols(b'filesets', b'.. predicatesmarker', fileset.symbols)
+addtopicsymbols(
+    b'merge-tools', b'.. internaltoolsmarker', filemerge.internalsdoc
+)
+addtopicsymbols(b'revisions', b'.. predicatesmarker', revset.symbols)
+addtopicsymbols(b'templates', b'.. keywordsmarker', templatekw.keywords)
+addtopicsymbols(b'templates', b'.. filtersmarker', templatefilters.filters)
+addtopicsymbols(b'templates', b'.. functionsmarker', templatefuncs.funcs)
+addtopicsymbols(
+    b'hgweb', b'.. webcommandsmarker', webcommands.commands, dedent=True
+)
+
 
 def inserttweakrc(ui, topic, doc):
-    marker = '.. tweakdefaultsmarker'
+    marker = b'.. tweakdefaultsmarker'
     repl = uimod.tweakrc
+
     def sub(m):
         lines = [m.group(1) + s for s in repl.splitlines()]
-        return '\n'.join(lines)
+        return b'\n'.join(lines)
+
     return re.sub(br'( *)%s' % re.escape(marker), sub, doc)
 
-addtopichook('config', inserttweakrc)
+
+addtopichook(b'config', inserttweakrc)
+
 
-def help_(ui, commands, name, unknowncmd=False, full=True, subtopic=None,
-          fullname=None, **opts):
+def help_(
+    ui,
+    commands,
+    name,
+    unknowncmd=False,
+    full=True,
+    subtopic=None,
+    fullname=None,
+    **opts
+):
     '''
     Generate the help for 'name' as unformatted restructured text. If
     'name' is None, describe the commands available.
@@ -465,8 +635,9 @@
 
     def helpcmd(name, subtopic=None):
         try:
-            aliases, entry = cmdutil.findcmd(name, commands.table,
-                                             strict=unknowncmd)
+            aliases, entry = cmdutil.findcmd(
+                name, commands.table, strict=unknowncmd
+            )
         except error.AmbiguousCommand as inst:
             # py3 fix: except vars can't be used outside the scope of the
             # except block, nor can be used inside a lambda. python issue4617
@@ -479,7 +650,7 @@
 
         # check if it's an invalid alias and display its error if it is
         if getattr(entry[0], 'badalias', None):
-            rst.append(entry[0].badalias + '\n')
+            rst.append(entry[0].badalias + b'\n')
             if entry[0].unknowncmd:
                 try:
                     rst.extend(helpextcmd(entry[0].cmdname))
@@ -489,63 +660,75 @@
 
         # synopsis
         if len(entry) > 2:
-            if entry[2].startswith('hg'):
-                rst.append("%s\n" % entry[2])
+            if entry[2].startswith(b'hg'):
+                rst.append(b"%s\n" % entry[2])
             else:
-                rst.append('hg %s %s\n' % (aliases[0], entry[2]))
+                rst.append(b'hg %s %s\n' % (aliases[0], entry[2]))
         else:
-            rst.append('hg %s\n' % aliases[0])
+            rst.append(b'hg %s\n' % aliases[0])
         # aliases
         if full and not ui.quiet and len(aliases) > 1:
-            rst.append(_("\naliases: %s\n") % ', '.join(aliases[1:]))
-        rst.append('\n')
+            rst.append(_(b"\naliases: %s\n") % b', '.join(aliases[1:]))
+        rst.append(b'\n')
 
         # description
         doc = gettext(pycompat.getdoc(entry[0]))
         if not doc:
-            doc = _("(no help text available)")
-        if util.safehasattr(entry[0], 'definition'):  # aliased command
+            doc = _(b"(no help text available)")
+        if util.safehasattr(entry[0], b'definition'):  # aliased command
             source = entry[0].source
-            if entry[0].definition.startswith('!'):  # shell alias
-                doc = (_('shell alias for: %s\n\n%s\n\ndefined by: %s\n') %
-                       (entry[0].definition[1:], doc, source))
+            if entry[0].definition.startswith(b'!'):  # shell alias
+                doc = _(b'shell alias for: %s\n\n%s\n\ndefined by: %s\n') % (
+                    entry[0].definition[1:],
+                    doc,
+                    source,
+                )
             else:
-                doc = (_('alias for: hg %s\n\n%s\n\ndefined by: %s\n') %
-                       (entry[0].definition, doc, source))
+                doc = _(b'alias for: hg %s\n\n%s\n\ndefined by: %s\n') % (
+                    entry[0].definition,
+                    doc,
+                    source,
+                )
         doc = doc.splitlines(True)
         if ui.quiet or not full:
             rst.append(doc[0])
         else:
             rst.extend(doc)
-        rst.append('\n')
+        rst.append(b'\n')
 
         # check if this command shadows a non-trivial (multi-line)
         # extension help text
         try:
             mod = extensions.find(name)
-            doc = gettext(pycompat.getdoc(mod)) or ''
-            if '\n' in doc.strip():
-                msg = _("(use 'hg help -e %s' to show help for "
-                        "the %s extension)") % (name, name)
-                rst.append('\n%s\n' % msg)
+            doc = gettext(pycompat.getdoc(mod)) or b''
+            if b'\n' in doc.strip():
+                msg = _(
+                    b"(use 'hg help -e %s' to show help for "
+                    b"the %s extension)"
+                ) % (name, name)
+                rst.append(b'\n%s\n' % msg)
         except KeyError:
             pass
 
         # options
         if not ui.quiet and entry[1]:
-            rst.append(optrst(_("options"), entry[1], ui.verbose))
+            rst.append(optrst(_(b"options"), entry[1], ui.verbose))
 
         if ui.verbose:
-            rst.append(optrst(_("global options"),
-                              commands.globalopts, ui.verbose))
+            rst.append(
+                optrst(_(b"global options"), commands.globalopts, ui.verbose)
+            )
 
         if not ui.verbose:
             if not full:
-                rst.append(_("\n(use 'hg %s -h' to show more help)\n")
-                           % name)
+                rst.append(_(b"\n(use 'hg %s -h' to show more help)\n") % name)
             elif not ui.quiet:
-                rst.append(_('\n(some details hidden, use --verbose '
-                               'to show complete help)'))
+                rst.append(
+                    _(
+                        b'\n(some details hidden, use --verbose '
+                        b'to show complete help)'
+                    )
+                )
 
         return rst
 
@@ -556,10 +739,10 @@
         h = {}
         # Command -> string showing synonyms
         syns = {}
-        for c, e in commands.table.iteritems():
+        for c, e in pycompat.iteritems(commands.table):
             fs = cmdutil.parsealiases(c)
             f = fs[0]
-            syns[f] = ', '.join(fs)
+            syns[f] = b', '.join(fs)
             func = e[0]
             if select and not select(f):
                 continue
@@ -568,45 +751,48 @@
                 continue
             doc = gettext(doc)
             if not doc:
-                doc = _("(no help text available)")
+                doc = _(b"(no help text available)")
             h[f] = doc.splitlines()[0].rstrip()
 
             cat = getattr(func, 'helpcategory', None) or (
-                registrar.command.CATEGORY_NONE)
+                registrar.command.CATEGORY_NONE
+            )
             cats.setdefault(cat, []).append(f)
 
         rst = []
         if not h:
             if not ui.quiet:
-                rst.append(_('no commands defined\n'))
+                rst.append(_(b'no commands defined\n'))
             return rst
 
         # Output top header.
         if not ui.quiet:
-            if name == "shortlist":
-                rst.append(_('basic commands:\n\n'))
-            elif name == "debug":
-                rst.append(_('debug commands (internal and unsupported):\n\n'))
+            if name == b"shortlist":
+                rst.append(_(b'basic commands:\n\n'))
+            elif name == b"debug":
+                rst.append(_(b'debug commands (internal and unsupported):\n\n'))
             else:
-                rst.append(_('list of commands:\n'))
+                rst.append(_(b'list of commands:\n'))
 
         def appendcmds(cmds):
             cmds = sorted(cmds)
             for c in cmds:
                 if ui.verbose:
-                    rst.append(" :%s: %s\n" % (syns[c], h[c]))
+                    rst.append(b" :%s: %s\n" % (syns[c], h[c]))
                 else:
-                    rst.append(' :%s: %s\n' % (c, h[c]))
+                    rst.append(b' :%s: %s\n' % (c, h[c]))
 
-        if name in ('shortlist', 'debug'):
+        if name in (b'shortlist', b'debug'):
             # List without categories.
             appendcmds(h)
         else:
             # Check that all categories have an order.
             missing_order = set(cats.keys()) - set(CATEGORY_ORDER)
             if missing_order:
-                ui.develwarn('help categories missing from CATEGORY_ORDER: %s' %
-                             missing_order)
+                ui.develwarn(
+                    b'help categories missing from CATEGORY_ORDER: %s'
+                    % missing_order
+                )
 
             # List per category.
             for cat in CATEGORY_ORDER:
@@ -614,20 +800,23 @@
                 if catfns:
                     if len(cats) > 1:
                         catname = gettext(CATEGORY_NAMES[cat])
-                        rst.append("\n%s:\n" % catname)
-                    rst.append("\n")
+                        rst.append(b"\n%s:\n" % catname)
+                    rst.append(b"\n")
                     appendcmds(catfns)
 
         ex = opts.get
-        anyopts = (ex(r'keyword') or not (ex(r'command') or ex(r'extension')))
+        anyopts = ex(r'keyword') or not (ex(r'command') or ex(r'extension'))
         if not name and anyopts:
-            exts = listexts(_('enabled extensions:'), extensions.enabled(),
-                            showdeprecated=ui.verbose)
+            exts = listexts(
+                _(b'enabled extensions:'),
+                extensions.enabled(),
+                showdeprecated=ui.verbose,
+            )
             if exts:
-                rst.append('\n')
+                rst.append(b'\n')
                 rst.extend(exts)
 
-            rst.append(_("\nadditional help topics:\n"))
+            rst.append(_(b"\nadditional help topics:\n"))
             # Group commands by category.
             topiccats = {}
             for topic in helptable:
@@ -640,14 +829,16 @@
                 topicname = names[0]
                 if not filtertopic(ui, topicname):
                     topiccats.setdefault(category, []).append(
-                        (topicname, header))
+                        (topicname, header)
+                    )
 
             # Check that all categories have an order.
             missing_order = set(topiccats.keys()) - set(TOPIC_CATEGORY_ORDER)
             if missing_order:
                 ui.develwarn(
-                    'help categories missing from TOPIC_CATEGORY_ORDER: %s' %
-                    missing_order)
+                    b'help categories missing from TOPIC_CATEGORY_ORDER: %s'
+                    % missing_order
+                )
 
             # Output topics per category.
             for cat in TOPIC_CATEGORY_ORDER:
@@ -655,33 +846,51 @@
                 if topics:
                     if len(topiccats) > 1:
                         catname = gettext(TOPIC_CATEGORY_NAMES[cat])
-                        rst.append("\n%s:\n" % catname)
-                    rst.append("\n")
+                        rst.append(b"\n%s:\n" % catname)
+                    rst.append(b"\n")
                     for t, desc in topics:
-                        rst.append(" :%s: %s\n" % (t, desc))
+                        rst.append(b" :%s: %s\n" % (t, desc))
 
         if ui.quiet:
             pass
         elif ui.verbose:
-            rst.append('\n%s\n' % optrst(_("global options"),
-                                         commands.globalopts, ui.verbose))
-            if name == 'shortlist':
-                rst.append(_("\n(use 'hg help' for the full list "
-                             "of commands)\n"))
+            rst.append(
+                b'\n%s\n'
+                % optrst(_(b"global options"), commands.globalopts, ui.verbose)
+            )
+            if name == b'shortlist':
+                rst.append(
+                    _(b"\n(use 'hg help' for the full list of commands)\n")
+                )
         else:
-            if name == 'shortlist':
-                rst.append(_("\n(use 'hg help' for the full list of commands "
-                             "or 'hg -v' for details)\n"))
+            if name == b'shortlist':
+                rst.append(
+                    _(
+                        b"\n(use 'hg help' for the full list of commands "
+                        b"or 'hg -v' for details)\n"
+                    )
+                )
             elif name and not full:
-                rst.append(_("\n(use 'hg help %s' to show the full help "
-                             "text)\n") % name)
+                rst.append(
+                    _(b"\n(use 'hg help %s' to show the full help text)\n")
+                    % name
+                )
             elif name and syns and name in syns.keys():
-                rst.append(_("\n(use 'hg help -v -e %s' to show built-in "
-                             "aliases and global options)\n") % name)
+                rst.append(
+                    _(
+                        b"\n(use 'hg help -v -e %s' to show built-in "
+                        b"aliases and global options)\n"
+                    )
+                    % name
+                )
             else:
-                rst.append(_("\n(use 'hg help -v%s' to show built-in aliases "
-                             "and global options)\n")
-                           % (name and " " + name or ""))
+                rst.append(
+                    _(
+                        b"\n(use 'hg help -v%s' to show built-in aliases "
+                        b"and global options)\n"
+                    )
+                    % (name and b" " + name or b"")
+                )
         return rst
 
     def helptopic(name, subtopic=None):
@@ -706,19 +915,23 @@
 
         # description
         if not doc:
-            rst.append("    %s\n" % _("(no help text available)"))
+            rst.append(b"    %s\n" % _(b"(no help text available)"))
         if callable(doc):
-            rst += ["    %s\n" % l for l in doc(ui).splitlines()]
+            rst += [b"    %s\n" % l for l in doc(ui).splitlines()]
 
         if not ui.verbose:
-            omitted = _('(some details hidden, use --verbose'
-                         ' to show complete help)')
+            omitted = _(
+                b'(some details hidden, use --verbose'
+                b' to show complete help)'
+            )
             indicateomitted(rst, omitted)
 
         try:
             cmdutil.findcmd(name, commands.table)
-            rst.append(_("\nuse 'hg help -c %s' to see help for "
-                       "the %s command\n") % (name, name))
+            rst.append(
+                _(b"\nuse 'hg help -c %s' to see help for the %s command\n")
+                % (name, name)
+            )
         except error.UnknownCommand:
             pass
         return rst
@@ -726,25 +939,27 @@
     def helpext(name, subtopic=None):
         try:
             mod = extensions.find(name)
-            doc = gettext(pycompat.getdoc(mod)) or _('no help text available')
+            doc = gettext(pycompat.getdoc(mod)) or _(b'no help text available')
         except KeyError:
             mod = None
             doc = extensions.disabledext(name)
             if not doc:
                 raise error.UnknownCommand(name)
 
-        if '\n' not in doc:
-            head, tail = doc, ""
+        if b'\n' not in doc:
+            head, tail = doc, b""
         else:
-            head, tail = doc.split('\n', 1)
-        rst = [_('%s extension - %s\n\n') % (name.rpartition('.')[-1], head)]
+            head, tail = doc.split(b'\n', 1)
+        rst = [_(b'%s extension - %s\n\n') % (name.rpartition(b'.')[-1], head)]
         if tail:
             rst.extend(tail.splitlines(True))
-            rst.append('\n')
+            rst.append(b'\n')
 
         if not ui.verbose:
-            omitted = _('(some details hidden, use --verbose'
-                         ' to show complete help)')
+            omitted = _(
+                b'(some details hidden, use --verbose'
+                b' to show complete help)'
+            )
             indicateomitted(rst, omitted)
 
         if mod:
@@ -752,57 +967,70 @@
                 ct = mod.cmdtable
             except AttributeError:
                 ct = {}
-            modcmds = {c.partition('|')[0] for c in ct}
+            modcmds = {c.partition(b'|')[0] for c in ct}
             rst.extend(helplist(modcmds.__contains__))
         else:
-            rst.append(_("(use 'hg help extensions' for information on enabling"
-                       " extensions)\n"))
+            rst.append(
+                _(
+                    b"(use 'hg help extensions' for information on enabling"
+                    b" extensions)\n"
+                )
+            )
         return rst
 
     def helpextcmd(name, subtopic=None):
-        cmd, ext, doc = extensions.disabledcmd(ui, name,
-                                               ui.configbool('ui', 'strict'))
+        cmd, ext, doc = extensions.disabledcmd(
+            ui, name, ui.configbool(b'ui', b'strict')
+        )
         doc = doc.splitlines()[0]
 
-        rst = listexts(_("'%s' is provided by the following "
-                              "extension:") % cmd, {ext: doc}, indent=4,
-                       showdeprecated=True)
-        rst.append('\n')
-        rst.append(_("(use 'hg help extensions' for information on enabling "
-                   "extensions)\n"))
+        rst = listexts(
+            _(b"'%s' is provided by the following extension:") % cmd,
+            {ext: doc},
+            indent=4,
+            showdeprecated=True,
+        )
+        rst.append(b'\n')
+        rst.append(
+            _(
+                b"(use 'hg help extensions' for information on enabling "
+                b"extensions)\n"
+            )
+        )
         return rst
 
-
     rst = []
-    kw = opts.get('keyword')
+    kw = opts.get(b'keyword')
     if kw or name is None and any(opts[o] for o in opts):
-        matches = topicmatch(ui, commands, name or '')
+        matches = topicmatch(ui, commands, name or b'')
         helpareas = []
-        if opts.get('extension'):
-            helpareas += [('extensions', _('Extensions'))]
-        if opts.get('command'):
-            helpareas += [('commands', _('Commands'))]
+        if opts.get(b'extension'):
+            helpareas += [(b'extensions', _(b'Extensions'))]
+        if opts.get(b'command'):
+            helpareas += [(b'commands', _(b'Commands'))]
         if not helpareas:
-            helpareas = [('topics', _('Topics')),
-                         ('commands', _('Commands')),
-                         ('extensions', _('Extensions')),
-                         ('extensioncommands', _('Extension Commands'))]
+            helpareas = [
+                (b'topics', _(b'Topics')),
+                (b'commands', _(b'Commands')),
+                (b'extensions', _(b'Extensions')),
+                (b'extensioncommands', _(b'Extension Commands')),
+            ]
         for t, title in helpareas:
             if matches[t]:
-                rst.append('%s:\n\n' % title)
+                rst.append(b'%s:\n\n' % title)
                 rst.extend(minirst.maketable(sorted(matches[t]), 1))
-                rst.append('\n')
+                rst.append(b'\n')
         if not rst:
-            msg = _('no matches')
-            hint = _("try 'hg help' for a list of topics")
+            msg = _(b'no matches')
+            hint = _(b"try 'hg help' for a list of topics")
             raise error.Abort(msg, hint=hint)
-    elif name and name != 'shortlist':
+    elif name and name != b'shortlist':
         queries = []
         if unknowncmd:
             queries += [helpextcmd]
-        if opts.get('extension'):
+        if opts.get(b'extension'):
             queries += [helpext]
-        if opts.get('command'):
+        if opts.get(b'command'):
             queries += [helpcmd]
         if not queries:
             queries = (helptopic, helpcmd, helpext, helpextcmd)
@@ -824,19 +1052,21 @@
                     hintname = subtopic
                 else:
                     hintname = name
-                msg = _('no such help topic: %s') % formatname
-                hint = _("try 'hg help --keyword %s'") % hintname
+                msg = _(b'no such help topic: %s') % formatname
+                hint = _(b"try 'hg help --keyword %s'") % hintname
                 raise error.Abort(msg, hint=hint)
     else:
         # program name
         if not ui.quiet:
-            rst = [_("Mercurial Distributed SCM\n"), '\n']
+            rst = [_(b"Mercurial Distributed SCM\n"), b'\n']
         rst.extend(helplist(None, **pycompat.strkwargs(opts)))
 
-    return ''.join(rst)
+    return b''.join(rst)
+
 
-def formattedhelp(ui, commands, fullname, keep=None, unknowncmd=False,
-                  full=True, **opts):
+def formattedhelp(
+    ui, commands, fullname, keep=None, unknowncmd=False, full=True, **opts
+):
     """get help for a given topic (as a dotted name) as rendered rst
 
     Either returns the rendered help text or raises an exception.
@@ -844,30 +1074,38 @@
     if keep is None:
         keep = []
     else:
-        keep = list(keep) # make a copy so we can mutate this later
+        keep = list(keep)  # make a copy so we can mutate this later
 
     # <fullname> := <name>[.<subtopic][.<section>]
     name = subtopic = section = None
     if fullname is not None:
-        nameparts = fullname.split('.')
+        nameparts = fullname.split(b'.')
         name = nameparts.pop(0)
         if nameparts and name in subtopics:
             subtopic = nameparts.pop(0)
         if nameparts:
-            section = encoding.lower('.'.join(nameparts))
+            section = encoding.lower(b'.'.join(nameparts))
 
-    textwidth = ui.configint('ui', 'textwidth')
+    textwidth = ui.configint(b'ui', b'textwidth')
     termwidth = ui.termwidth() - 2
     if textwidth <= 0 or termwidth < textwidth:
         textwidth = termwidth
-    text = help_(ui, commands, name, fullname=fullname,
-                 subtopic=subtopic, unknowncmd=unknowncmd, full=full, **opts)
+    text = help_(
+        ui,
+        commands,
+        name,
+        fullname=fullname,
+        subtopic=subtopic,
+        unknowncmd=unknowncmd,
+        full=full,
+        **opts
+    )
 
     blocks, pruned = minirst.parse(text, keep=keep)
-    if 'verbose' in pruned:
-        keep.append('omitted')
+    if b'verbose' in pruned:
+        keep.append(b'omitted')
     else:
-        keep.append('notomitted')
+        keep.append(b'notomitted')
     blocks, pruned = minirst.parse(text, keep=keep)
     if section:
         blocks = minirst.filtersections(blocks, section)
@@ -876,6 +1114,6 @@
     # to look for, or we could have simply failed to found "foo.bar"
     # because bar isn't a section of foo
     if section and not (blocks and name):
-        raise error.Abort(_("help section not found: %s") % fullname)
+        raise error.Abort(_(b"help section not found: %s") % fullname)
 
     return minirst.formatplain(blocks, textwidth)
--- a/mercurial/help/config.txt	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/help/config.txt	Mon Oct 21 11:09:48 2019 -0400
@@ -442,6 +442,14 @@
     Show status of files in the working directory after successful commit.
     (default: False)
 
+``push.require-revs``
+    Require revisions to push be specified using one or more mechanisms such as
+    specifying them positionally on the command line, using ``-r``, ``-b``,
+    and/or ``-B`` on the command line, or using ``paths.<path>:pushrev`` in the
+    configuration. If this is enabled and revisions are not specified, the
+    command aborts.
+    (default: False)
+
 ``resolve.confirm``
     Confirm before performing action if no filename is passed.
     (default: False)
@@ -1853,7 +1861,8 @@
 
 ``update-timestamp``
     If true, updates the date and time of the changeset to current. It is only
-    applicable for hg amend in current version.
+    applicable for `hg amend`, `hg commit --amend` and `hg uncommit` in the
+    current version.
 
 ``storage``
 -----------
--- a/mercurial/hg.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/hg.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,9 +15,8 @@
 import stat
 
 from .i18n import _
-from .node import (
-    nullid,
-)
+from .node import nullid
+from .pycompat import getattr
 
 from . import (
     bookmarks,
@@ -39,7 +38,6 @@
     node,
     phases,
     pycompat,
-    repository as repositorymod,
     scmutil,
     sshpeer,
     statichttprepo,
@@ -51,10 +49,13 @@
     vfs as vfsmod,
 )
 
+from .interfaces import repository as repositorymod
+
 release = lock.release
 
 # shared features
-sharedbookmarks = 'bookmarks'
+sharedbookmarks = b'bookmarks'
+
 
 def _local(path):
     path = util.expandpath(util.urllocalpath(path))
@@ -63,13 +64,15 @@
         isfile = os.path.isfile(path)
     # Python 2 raises TypeError, Python 3 ValueError.
     except (TypeError, ValueError) as e:
-        raise error.Abort(_('invalid path %s: %s') % (
-            path, pycompat.bytestr(e)))
+        raise error.Abort(
+            _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
+        )
 
     return isfile and bundlerepo or localrepo
 
+
 def addbranchrevs(lrepo, other, branches, revs):
-    peer = other.peer() # a courtesy to callers using a localrepo for other
+    peer = other.peer()  # a courtesy to callers using a localrepo for other
     hashbranch, branches = branches
     if not hashbranch and not branches:
         x = revs or None
@@ -83,19 +86,19 @@
     else:
         revs = []
 
-    if not peer.capable('branchmap'):
+    if not peer.capable(b'branchmap'):
         if branches:
-            raise error.Abort(_("remote branch lookup not supported"))
+            raise error.Abort(_(b"remote branch lookup not supported"))
         revs.append(hashbranch)
         return revs, revs[0]
 
     with peer.commandexecutor() as e:
-        branchmap = e.callcommand('branchmap', {}).result()
+        branchmap = e.callcommand(b'branchmap', {}).result()
 
     def primary(branch):
-        if branch == '.':
+        if branch == b'.':
             if not lrepo:
-                raise error.Abort(_("dirstate branch not accessible"))
+                raise error.Abort(_(b"dirstate branch not accessible"))
             branch = lrepo.dirstate.branch()
         if branch in branchmap:
             revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
@@ -105,12 +108,13 @@
 
     for branch in branches:
         if not primary(branch):
-            raise error.RepoLookupError(_("unknown branch '%s'") % branch)
+            raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
     if hashbranch:
         if not primary(hashbranch):
             revs.append(hashbranch)
     return revs, revs[0]
 
+
 def parseurl(path, branches=None):
     '''parse url#branch, returning (url, (branch, branches))'''
 
@@ -121,29 +125,32 @@
         u.fragment = None
     return bytes(u), (branch, branches or [])
 
+
 schemes = {
-    'bundle': bundlerepo,
-    'union': unionrepo,
-    'file': _local,
-    'http': httppeer,
-    'https': httppeer,
-    'ssh': sshpeer,
-    'static-http': statichttprepo,
+    b'bundle': bundlerepo,
+    b'union': unionrepo,
+    b'file': _local,
+    b'http': httppeer,
+    b'https': httppeer,
+    b'ssh': sshpeer,
+    b'static-http': statichttprepo,
 }
 
+
 def _peerlookup(path):
     u = util.url(path)
-    scheme = u.scheme or 'file'
-    thing = schemes.get(scheme) or schemes['file']
+    scheme = u.scheme or b'file'
+    thing = schemes.get(scheme) or schemes[b'file']
     try:
         return thing(path)
     except TypeError:
         # we can't test callable(thing) because 'thing' can be an unloaded
         # module that implements __call__
-        if not util.safehasattr(thing, 'instance'):
+        if not util.safehasattr(thing, b'instance'):
             raise
         return thing
 
+
 def islocal(repo):
     '''return true if repo (or path pointing to repo) is local'''
     if isinstance(repo, bytes):
@@ -153,22 +160,27 @@
             return False
     return repo.local()
 
+
 def openpath(ui, path, sendaccept=True):
     '''open path with open if local, url.open if remote'''
     pathurl = util.url(path, parsequery=False, parsefragment=False)
     if pathurl.islocal():
-        return util.posixfile(pathurl.localpath(), 'rb')
+        return util.posixfile(pathurl.localpath(), b'rb')
     else:
         return url.open(ui, path, sendaccept=sendaccept)
 
+
 # a list of (ui, repo) functions called for wire peer initialization
 wirepeersetupfuncs = []
 
-def _peerorrepo(ui, path, create=False, presetupfuncs=None,
-                intents=None, createopts=None):
+
+def _peerorrepo(
+    ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
+):
     """return a repository object for the specified path"""
-    obj = _peerlookup(path).instance(ui, path, create, intents=intents,
-                                     createopts=createopts)
+    obj = _peerlookup(path).instance(
+        ui, path, create, intents=intents, createopts=createopts
+    )
     ui = getattr(obj, "ui", ui)
     for f in presetupfuncs or []:
         f(ui, obj)
@@ -180,30 +192,48 @@
             if hook:
                 with util.timedcm('reposetup %r', name) as stats:
                     hook(ui, obj)
-                ui.log(b'extension', b'  > reposetup for %s took %s\n',
-                       name, stats)
+                ui.log(
+                    b'extension', b'  > reposetup for %s took %s\n', name, stats
+                )
     ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
     if not obj.local():
         for f in wirepeersetupfuncs:
             f(ui, obj)
     return obj
 
-def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
-               createopts=None):
+
+def repository(
+    ui,
+    path=b'',
+    create=False,
+    presetupfuncs=None,
+    intents=None,
+    createopts=None,
+):
     """return a repository object for the specified path"""
-    peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
-                       intents=intents, createopts=createopts)
+    peer = _peerorrepo(
+        ui,
+        path,
+        create,
+        presetupfuncs=presetupfuncs,
+        intents=intents,
+        createopts=createopts,
+    )
     repo = peer.local()
     if not repo:
-        raise error.Abort(_("repository '%s' is not local") %
-                         (path or peer.url()))
-    return repo.filtered('visible')
+        raise error.Abort(
+            _(b"repository '%s' is not local") % (path or peer.url())
+        )
+    return repo.filtered(b'visible')
+
 
 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
     '''return a repository peer for the specified path'''
     rui = remoteui(uiorrepo, opts)
-    return _peerorrepo(rui, path, create, intents=intents,
-                       createopts=createopts).peer()
+    return _peerorrepo(
+        rui, path, create, intents=intents, createopts=createopts
+    ).peer()
+
 
 def defaultdest(source):
     '''return default destination of clone if none is given
@@ -223,9 +253,10 @@
     '''
     path = util.url(source).path
     if not path:
-        return ''
+        return b''
     return os.path.basename(os.path.normpath(path))
 
+
 def sharedreposource(repo):
     """Returns repository object for source repository of a shared repo.
 
@@ -234,7 +265,7 @@
     if repo.sharedpath == repo.path:
         return None
 
-    if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
+    if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
         return repo.srcrepo
 
     # the sharedpath always ends in the .hg; we want the path to the repo
@@ -244,12 +275,20 @@
     repo.srcrepo = srcrepo
     return srcrepo
 
-def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
-          relative=False):
+
+def share(
+    ui,
+    source,
+    dest=None,
+    update=True,
+    bookmarks=True,
+    defaultpath=None,
+    relative=False,
+):
     '''create a shared repository'''
 
     if not islocal(source):
-        raise error.Abort(_('can only share local repositories'))
+        raise error.Abort(_(b'can only share local repositories'))
 
     if not dest:
         dest = defaultdest(source)
@@ -269,17 +308,23 @@
     if bookmarks:
         shareditems.add(sharedbookmarks)
 
-    r = repository(ui, dest, create=True, createopts={
-        'sharedrepo': srcrepo,
-        'sharedrelative': relative,
-        'shareditems': shareditems,
-    })
+    r = repository(
+        ui,
+        dest,
+        create=True,
+        createopts={
+            b'sharedrepo': srcrepo,
+            b'sharedrelative': relative,
+            b'shareditems': shareditems,
+        },
+    )
 
     postshare(srcrepo, r, defaultpath=defaultpath)
     r = repository(ui, dest)
     _postshareupdate(r, update, checkout=checkout)
     return r
 
+
 def unshare(ui, repo):
     """convert a shared repository to a normal one
 
@@ -299,11 +344,11 @@
         destlock = copystore(ui, repo, repo.path)
         with destlock or util.nullcontextmanager():
 
-            sharefile = repo.vfs.join('sharedpath')
-            util.rename(sharefile, sharefile + '.old')
+            sharefile = repo.vfs.join(b'sharedpath')
+            util.rename(sharefile, sharefile + b'.old')
 
-            repo.requirements.discard('shared')
-            repo.requirements.discard('relshared')
+            repo.requirements.discard(b'shared')
+            repo.requirements.discard(b'relshared')
             repo._writerequirements()
 
     # Removing share changes some fundamental properties of the repo instance.
@@ -313,7 +358,7 @@
 
     # TODO: figure out how to access subrepos that exist, but were previously
     #       removed from .hgsub
-    c = newrepo['.']
+    c = newrepo[b'.']
     subs = c.substate
     for s in sorted(subs):
         c.sub(s).unshare()
@@ -322,6 +367,7 @@
 
     return newrepo
 
+
 def postshare(sourcerepo, destrepo, defaultpath=None):
     """Called after a new shared repo is created.
 
@@ -331,15 +377,15 @@
     Extensions can wrap this function and write additional entries to
     destrepo/.hg/shared to indicate additional pieces of data to be shared.
     """
-    default = defaultpath or sourcerepo.ui.config('paths', 'default')
+    default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
     if default:
-        template = ('[paths]\n'
-                    'default = %s\n')
-        destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
+        template = b'[paths]\ndefault = %s\n'
+        destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
     if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
         with destrepo.wlock():
             narrowspec.copytoworkingcopy(destrepo)
 
+
 def _postshareupdate(repo, update, checkout=None):
     """Maybe perform a working directory update after a shared repo is created.
 
@@ -348,10 +394,10 @@
     if not update:
         return
 
-    repo.ui.status(_("updating working directory\n"))
+    repo.ui.status(_(b"updating working directory\n"))
     if update is not True:
         checkout = update
-    for test in (checkout, 'default', 'tip'):
+    for test in (checkout, b'default', b'tip'):
         if test is None:
             continue
         try:
@@ -361,6 +407,7 @@
             continue
     _update(repo, uprev)
 
+
 def copystore(ui, srcrepo, destpath):
     '''copy files from store of srcrepo in destpath
 
@@ -369,38 +416,50 @@
     destlock = None
     try:
         hardlink = None
-        topic = _('linking') if hardlink else _('copying')
-        with ui.makeprogress(topic, unit=_('files')) as progress:
+        topic = _(b'linking') if hardlink else _(b'copying')
+        with ui.makeprogress(topic, unit=_(b'files')) as progress:
             num = 0
             srcpublishing = srcrepo.publishing()
             srcvfs = vfsmod.vfs(srcrepo.sharedpath)
             dstvfs = vfsmod.vfs(destpath)
             for f in srcrepo.store.copylist():
-                if srcpublishing and f.endswith('phaseroots'):
+                if srcpublishing and f.endswith(b'phaseroots'):
                     continue
                 dstbase = os.path.dirname(f)
                 if dstbase and not dstvfs.exists(dstbase):
                     dstvfs.mkdir(dstbase)
                 if srcvfs.exists(f):
-                    if f.endswith('data'):
+                    if f.endswith(b'data'):
                         # 'dstbase' may be empty (e.g. revlog format 0)
-                        lockfile = os.path.join(dstbase, "lock")
+                        lockfile = os.path.join(dstbase, b"lock")
                         # lock to avoid premature writing to the target
                         destlock = lock.lock(dstvfs, lockfile)
-                    hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
-                                                 hardlink, progress)
+                    hardlink, n = util.copyfiles(
+                        srcvfs.join(f), dstvfs.join(f), hardlink, progress
+                    )
                     num += n
             if hardlink:
-                ui.debug("linked %d files\n" % num)
+                ui.debug(b"linked %d files\n" % num)
             else:
-                ui.debug("copied %d files\n" % num)
+                ui.debug(b"copied %d files\n" % num)
         return destlock
-    except: # re-raises
+    except:  # re-raises
         release(destlock)
         raise
 
-def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
-                   rev=None, update=True, stream=False):
+
+def clonewithshare(
+    ui,
+    peeropts,
+    sharepath,
+    source,
+    srcpeer,
+    dest,
+    pull=False,
+    rev=None,
+    update=True,
+    stream=False,
+):
     """Perform a clone using a shared repo.
 
     The store for the repository will be located at <sharepath>/.hg. The
@@ -410,18 +469,22 @@
     """
     revs = None
     if rev:
-        if not srcpeer.capable('lookup'):
-            raise error.Abort(_("src repository does not support "
-                               "revision lookup and so doesn't "
-                               "support clone by revision"))
+        if not srcpeer.capable(b'lookup'):
+            raise error.Abort(
+                _(
+                    b"src repository does not support "
+                    b"revision lookup and so doesn't "
+                    b"support clone by revision"
+                )
+            )
 
         # TODO this is batchable.
         remoterevs = []
         for r in rev:
             with srcpeer.commandexecutor() as e:
-                remoterevs.append(e.callcommand('lookup', {
-                    'key': r,
-                }).result())
+                remoterevs.append(
+                    e.callcommand(b'lookup', {b'key': r,}).result()
+                )
         revs = remoterevs
 
     # Obtain a lock before checking for or cloning the pooled repo otherwise
@@ -437,17 +500,28 @@
     poolvfs = vfsmod.vfs(pooldir)
     basename = os.path.basename(sharepath)
 
-    with lock.lock(poolvfs, '%s.lock' % basename):
+    with lock.lock(poolvfs, b'%s.lock' % basename):
         if os.path.exists(sharepath):
-            ui.status(_('(sharing from existing pooled repository %s)\n') %
-                      basename)
+            ui.status(
+                _(b'(sharing from existing pooled repository %s)\n') % basename
+            )
         else:
-            ui.status(_('(sharing from new pooled repository %s)\n') % basename)
+            ui.status(
+                _(b'(sharing from new pooled repository %s)\n') % basename
+            )
             # Always use pull mode because hardlinks in share mode don't work
             # well. Never update because working copies aren't necessary in
             # share mode.
-            clone(ui, peeropts, source, dest=sharepath, pull=True,
-                  revs=rev, update=False, stream=stream)
+            clone(
+                ui,
+                peeropts,
+                source,
+                dest=sharepath,
+                pull=True,
+                revs=rev,
+                update=False,
+                stream=stream,
+            )
 
     # Resolve the value to put in [paths] section for the source.
     if islocal(source):
@@ -456,8 +530,14 @@
         defaultpath = source
 
     sharerepo = repository(ui, path=sharepath)
-    destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
-                     defaultpath=defaultpath)
+    destrepo = share(
+        ui,
+        sharerepo,
+        dest=dest,
+        update=False,
+        bookmarks=False,
+        defaultpath=defaultpath,
+    )
 
     # We need to perform a pull against the dest repo to fetch bookmarks
     # and other non-store data that isn't shared by default. In the case of
@@ -470,20 +550,34 @@
 
     return srcpeer, peer(ui, peeropts, dest)
 
+
 # Recomputing branch cache might be slow on big repos,
 # so just copy it
 def _copycache(srcrepo, dstcachedir, fname):
     """copy a cache from srcrepo to destcachedir (if it exists)"""
-    srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
+    srcbranchcache = srcrepo.vfs.join(b'cache/%s' % fname)
     dstbranchcache = os.path.join(dstcachedir, fname)
     if os.path.exists(srcbranchcache):
         if not os.path.exists(dstcachedir):
             os.mkdir(dstcachedir)
         util.copyfile(srcbranchcache, dstbranchcache)
 
-def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
-          update=True, stream=False, branch=None, shareopts=None,
-          storeincludepats=None, storeexcludepats=None, depth=None):
+
+def clone(
+    ui,
+    peeropts,
+    source,
+    dest=None,
+    pull=False,
+    revs=None,
+    update=True,
+    stream=False,
+    branch=None,
+    shareopts=None,
+    storeincludepats=None,
+    storeexcludepats=None,
+    depth=None,
+):
     """Make a copy of an existing repository.
 
     Create a copy of an existing repository in a new directory.  The
@@ -539,7 +633,7 @@
         source, branches = parseurl(origsource, branch)
         srcpeer = peer(ui, peeropts, source)
     else:
-        srcpeer = source.peer() # in case we were called with a localrepo
+        srcpeer = source.peer()  # in case we were called with a localrepo
         branches = (None, branch or [])
         origsource = source = srcpeer.url()
     revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
@@ -547,7 +641,7 @@
     if dest is None:
         dest = defaultdest(source)
         if dest:
-            ui.status(_("destination directory: %s\n") % dest)
+            ui.status(_(b"destination directory: %s\n") % dest)
     else:
         dest = ui.expandpath(dest)
 
@@ -555,14 +649,14 @@
     source = util.urllocalpath(source)
 
     if not dest:
-        raise error.Abort(_("empty destination path is not valid"))
+        raise error.Abort(_(b"empty destination path is not valid"))
 
     destvfs = vfsmod.vfs(dest, expandpath=True)
     if destvfs.lexists():
         if not destvfs.isdir():
-            raise error.Abort(_("destination '%s' already exists") % dest)
+            raise error.Abort(_(b"destination '%s' already exists") % dest)
         elif destvfs.listdir():
-            raise error.Abort(_("destination '%s' is not empty") % dest)
+            raise error.Abort(_(b"destination '%s' is not empty") % dest)
 
     createopts = {}
     narrow = False
@@ -578,12 +672,12 @@
     if narrow:
         # Include everything by default if only exclusion patterns defined.
         if storeexcludepats and not storeincludepats:
-            storeincludepats = {'path:.'}
+            storeincludepats = {b'path:.'}
 
-        createopts['narrowfiles'] = True
+        createopts[b'narrowfiles'] = True
 
     if depth:
-        createopts['shallowfilestore'] = True
+        createopts[b'shallowfilestore'] = True
 
     if srcpeer.capable(b'lfs-serve'):
         # Repository creation honors the config if it disabled the extension, so
@@ -593,55 +687,82 @@
         # requirement is added.  If the extension is explicitly disabled but the
         # requirement is set, the clone aborts early, before transferring any
         # data.
-        createopts['lfs'] = True
+        createopts[b'lfs'] = True
 
-        if extensions.disabledext('lfs'):
-            ui.status(_('(remote is using large file support (lfs), but it is '
-                        'explicitly disabled in the local configuration)\n'))
+        if extensions.disabledext(b'lfs'):
+            ui.status(
+                _(
+                    b'(remote is using large file support (lfs), but it is '
+                    b'explicitly disabled in the local configuration)\n'
+                )
+            )
         else:
-            ui.status(_('(remote is using large file support (lfs); lfs will '
-                        'be enabled for this repository)\n'))
+            ui.status(
+                _(
+                    b'(remote is using large file support (lfs); lfs will '
+                    b'be enabled for this repository)\n'
+                )
+            )
 
     shareopts = shareopts or {}
-    sharepool = shareopts.get('pool')
-    sharenamemode = shareopts.get('mode')
+    sharepool = shareopts.get(b'pool')
+    sharenamemode = shareopts.get(b'mode')
     if sharepool and islocal(dest):
         sharepath = None
-        if sharenamemode == 'identity':
+        if sharenamemode == b'identity':
             # Resolve the name from the initial changeset in the remote
             # repository. This returns nullid when the remote is empty. It
             # raises RepoLookupError if revision 0 is filtered or otherwise
             # not available. If we fail to resolve, sharing is not enabled.
             try:
                 with srcpeer.commandexecutor() as e:
-                    rootnode = e.callcommand('lookup', {
-                        'key': '0',
-                    }).result()
+                    rootnode = e.callcommand(
+                        b'lookup', {b'key': b'0',}
+                    ).result()
 
                 if rootnode != node.nullid:
                     sharepath = os.path.join(sharepool, node.hex(rootnode))
                 else:
-                    ui.status(_('(not using pooled storage: '
-                                'remote appears to be empty)\n'))
+                    ui.status(
+                        _(
+                            b'(not using pooled storage: '
+                            b'remote appears to be empty)\n'
+                        )
+                    )
             except error.RepoLookupError:
-                ui.status(_('(not using pooled storage: '
-                            'unable to resolve identity of remote)\n'))
-        elif sharenamemode == 'remote':
+                ui.status(
+                    _(
+                        b'(not using pooled storage: '
+                        b'unable to resolve identity of remote)\n'
+                    )
+                )
+        elif sharenamemode == b'remote':
             sharepath = os.path.join(
-                sharepool, node.hex(hashlib.sha1(source).digest()))
+                sharepool, node.hex(hashlib.sha1(source).digest())
+            )
         else:
-            raise error.Abort(_('unknown share naming mode: %s') %
-                              sharenamemode)
+            raise error.Abort(
+                _(b'unknown share naming mode: %s') % sharenamemode
+            )
 
         # TODO this is a somewhat arbitrary restriction.
         if narrow:
-            ui.status(_('(pooled storage not supported for narrow clones)\n'))
+            ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
             sharepath = None
 
         if sharepath:
-            return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
-                                  dest, pull=pull, rev=revs, update=update,
-                                  stream=stream)
+            return clonewithshare(
+                ui,
+                peeropts,
+                sharepath,
+                source,
+                srcpeer,
+                dest,
+                pull=pull,
+                rev=revs,
+                update=update,
+                stream=stream,
+            )
 
     srclock = destlock = cleandir = None
     srcrepo = srcpeer.local()
@@ -654,8 +775,12 @@
             cleandir = dest
 
         copy = False
-        if (srcrepo and srcrepo.cancopy() and islocal(dest)
-            and not phases.hassecret(srcrepo)):
+        if (
+            srcrepo
+            and srcrepo.cancopy()
+            and islocal(dest)
+            and not phases.hassecret(srcrepo)
+        ):
             copy = not pull and not revs
 
         # TODO this is a somewhat arbitrary restriction.
@@ -673,8 +798,8 @@
                 copy = False
 
         if copy:
-            srcrepo.hook('preoutgoing', throw=True, source='clone')
-            hgdir = os.path.realpath(os.path.join(dest, ".hg"))
+            srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
+            hgdir = os.path.realpath(os.path.join(dest, b".hg"))
             if not os.path.exists(dest):
                 util.makedirs(dest)
             else:
@@ -686,51 +811,63 @@
             except OSError as inst:
                 if inst.errno == errno.EEXIST:
                     cleandir = None
-                    raise error.Abort(_("destination '%s' already exists")
-                                     % dest)
+                    raise error.Abort(
+                        _(b"destination '%s' already exists") % dest
+                    )
                 raise
 
             destlock = copystore(ui, srcrepo, destpath)
             # copy bookmarks over
-            srcbookmarks = srcrepo.vfs.join('bookmarks')
-            dstbookmarks = os.path.join(destpath, 'bookmarks')
+            srcbookmarks = srcrepo.vfs.join(b'bookmarks')
+            dstbookmarks = os.path.join(destpath, b'bookmarks')
             if os.path.exists(srcbookmarks):
                 util.copyfile(srcbookmarks, dstbookmarks)
 
-            dstcachedir = os.path.join(destpath, 'cache')
+            dstcachedir = os.path.join(destpath, b'cache')
             for cache in cacheutil.cachetocopy(srcrepo):
                 _copycache(srcrepo, dstcachedir, cache)
 
             # we need to re-init the repo after manually copying the data
             # into it
             destpeer = peer(srcrepo, peeropts, dest)
-            srcrepo.hook('outgoing', source='clone',
-                          node=node.hex(node.nullid))
+            srcrepo.hook(
+                b'outgoing', source=b'clone', node=node.hex(node.nullid)
+            )
         else:
             try:
                 # only pass ui when no srcrepo
-                destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
-                                createopts=createopts)
+                destpeer = peer(
+                    srcrepo or ui,
+                    peeropts,
+                    dest,
+                    create=True,
+                    createopts=createopts,
+                )
             except OSError as inst:
                 if inst.errno == errno.EEXIST:
                     cleandir = None
-                    raise error.Abort(_("destination '%s' already exists")
-                                     % dest)
+                    raise error.Abort(
+                        _(b"destination '%s' already exists") % dest
+                    )
                 raise
 
             if revs:
-                if not srcpeer.capable('lookup'):
-                    raise error.Abort(_("src repository does not support "
-                                       "revision lookup and so doesn't "
-                                       "support clone by revision"))
+                if not srcpeer.capable(b'lookup'):
+                    raise error.Abort(
+                        _(
+                            b"src repository does not support "
+                            b"revision lookup and so doesn't "
+                            b"support clone by revision"
+                        )
+                    )
 
                 # TODO this is batchable.
                 remoterevs = []
                 for rev in revs:
                     with srcpeer.commandexecutor() as e:
-                        remoterevs.append(e.callcommand('lookup', {
-                            'key': rev,
-                        }).result())
+                        remoterevs.append(
+                            e.callcommand(b'lookup', {b'key': rev,}).result()
+                        )
                 revs = remoterevs
 
                 checkout = revs[0]
@@ -745,53 +882,66 @@
 
                 u = util.url(abspath)
                 defaulturl = bytes(u)
-                local.ui.setconfig('paths', 'default', defaulturl, 'clone')
+                local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
                 if not stream:
                     if pull:
                         stream = False
                     else:
                         stream = None
                 # internal config: ui.quietbookmarkmove
-                overrides = {('ui', 'quietbookmarkmove'): True}
-                with local.ui.configoverride(overrides, 'clone'):
-                    exchange.pull(local, srcpeer, revs,
-                                  streamclonerequested=stream,
-                                  includepats=storeincludepats,
-                                  excludepats=storeexcludepats,
-                                  depth=depth)
+                overrides = {(b'ui', b'quietbookmarkmove'): True}
+                with local.ui.configoverride(overrides, b'clone'):
+                    exchange.pull(
+                        local,
+                        srcpeer,
+                        revs,
+                        streamclonerequested=stream,
+                        includepats=storeincludepats,
+                        excludepats=storeexcludepats,
+                        depth=depth,
+                    )
             elif srcrepo:
                 # TODO lift restriction once exchange.push() accepts narrow
                 # push.
                 if narrow:
-                    raise error.Abort(_('narrow clone not available for '
-                                        'remote destinations'))
+                    raise error.Abort(
+                        _(
+                            b'narrow clone not available for '
+                            b'remote destinations'
+                        )
+                    )
 
-                exchange.push(srcrepo, destpeer, revs=revs,
-                              bookmarks=srcrepo._bookmarks.keys())
+                exchange.push(
+                    srcrepo,
+                    destpeer,
+                    revs=revs,
+                    bookmarks=srcrepo._bookmarks.keys(),
+                )
             else:
-                raise error.Abort(_("clone from remote to remote not supported")
-                                 )
+                raise error.Abort(
+                    _(b"clone from remote to remote not supported")
+                )
 
         cleandir = None
 
         destrepo = destpeer.local()
         if destrepo:
-            template = uimod.samplehgrcs['cloned']
+            template = uimod.samplehgrcs[b'cloned']
             u = util.url(abspath)
             u.passwd = None
             defaulturl = bytes(u)
-            destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
-            destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
+            destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
+            destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
 
-            if ui.configbool('experimental', 'remotenames'):
+            if ui.configbool(b'experimental', b'remotenames'):
                 logexchange.pullremotenames(destrepo, srcpeer)
 
             if update:
                 if update is not True:
                     with srcpeer.commandexecutor() as e:
-                        checkout = e.callcommand('lookup', {
-                            'key': update,
-                        }).result()
+                        checkout = e.callcommand(
+                            b'lookup', {b'key': update,}
+                        ).result()
 
                 uprev = None
                 status = None
@@ -812,22 +962,23 @@
                                 pass
                 if uprev is None:
                     try:
-                        uprev = destrepo._bookmarks['@']
-                        update = '@'
+                        uprev = destrepo._bookmarks[b'@']
+                        update = b'@'
                         bn = destrepo[uprev].branch()
-                        if bn == 'default':
-                            status = _("updating to bookmark @\n")
+                        if bn == b'default':
+                            status = _(b"updating to bookmark @\n")
                         else:
-                            status = (_("updating to bookmark @ on branch %s\n")
-                                      % bn)
+                            status = (
+                                _(b"updating to bookmark @ on branch %s\n") % bn
+                            )
                     except KeyError:
                         try:
-                            uprev = destrepo.branchtip('default')
+                            uprev = destrepo.branchtip(b'default')
                         except error.RepoLookupError:
-                            uprev = destrepo.lookup('tip')
+                            uprev = destrepo.lookup(b'tip')
                 if not status:
                     bn = destrepo[uprev].branch()
-                    status = _("updating to branch %s\n") % bn
+                    status = _(b"updating to branch %s\n") % bn
                 destrepo.ui.status(status)
                 _update(destrepo, uprev)
                 if update in destrepo._bookmarks:
@@ -840,13 +991,23 @@
             srcpeer.close()
     return srcpeer, destpeer
 
+
 def _showstats(repo, stats, quietempty=False):
     if quietempty and stats.isempty():
         return
-    repo.ui.status(_("%d files updated, %d files merged, "
-                     "%d files removed, %d files unresolved\n") % (
-                   stats.updatedcount, stats.mergedcount,
-                   stats.removedcount, stats.unresolvedcount))
+    repo.ui.status(
+        _(
+            b"%d files updated, %d files merged, "
+            b"%d files removed, %d files unresolved\n"
+        )
+        % (
+            stats.updatedcount,
+            stats.mergedcount,
+            stats.removedcount,
+            stats.unresolvedcount,
+        )
+    )
+
 
 def updaterepo(repo, node, overwrite, updatecheck=None):
     """Update the working directory to node.
@@ -854,32 +1015,49 @@
     When overwrite is set, changes are clobbered, merged else
 
     returns stats (see pydoc mercurial.merge.applyupdates)"""
-    return mergemod.update(repo, node, branchmerge=False, force=overwrite,
-                           labels=['working copy', 'destination'],
-                           updatecheck=updatecheck)
+    return mergemod.update(
+        repo,
+        node,
+        branchmerge=False,
+        force=overwrite,
+        labels=[b'working copy', b'destination'],
+        updatecheck=updatecheck,
+    )
+
 
 def update(repo, node, quietempty=False, updatecheck=None):
     """update the working directory to node"""
     stats = updaterepo(repo, node, False, updatecheck=updatecheck)
     _showstats(repo, stats, quietempty)
     if stats.unresolvedcount:
-        repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
+        repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
     return stats.unresolvedcount > 0
 
+
 # naming conflict in clone()
 _update = update
 
+
 def clean(repo, node, show_stats=True, quietempty=False):
     """forcibly switch the working directory to node, clobbering changes"""
     stats = updaterepo(repo, node, True)
-    repo.vfs.unlinkpath('graftstate', ignoremissing=True)
+    repo.vfs.unlinkpath(b'graftstate', ignoremissing=True)
     if show_stats:
         _showstats(repo, stats, quietempty)
     return stats.unresolvedcount > 0
 
+
 # naming conflict in updatetotally()
 _clean = clean
 
+_VALID_UPDATECHECKS = {
+    mergemod.UPDATECHECK_ABORT,
+    mergemod.UPDATECHECK_NONE,
+    mergemod.UPDATECHECK_LINEAR,
+    mergemod.UPDATECHECK_NO_CONFLICT,
+}
+
+
 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
     """Update the working directory with extra care for non-file components
 
@@ -894,21 +1072,28 @@
     :clean: whether changes in the working directory can be discarded
     :updatecheck: how to deal with a dirty working directory
 
-    Valid values for updatecheck are (None => linear):
+    Valid values for updatecheck are the UPDATECHECK_* constants
+    defined in the merge module. Passing `None` will result in using the
+    configured default.
 
-     * abort: abort if the working directory is dirty
-     * none: don't check (merge working directory changes into destination)
-     * linear: check that update is linear before merging working directory
+     * ABORT: abort if the working directory is dirty
+     * NONE: don't check (merge working directory changes into destination)
+     * LINEAR: check that update is linear before merging working directory
                changes into destination
-     * noconflict: check that the update does not result in file merges
+     * NO_CONFLICT: check that the update does not result in file merges
 
     This returns whether conflict is detected at updating or not.
     """
     if updatecheck is None:
-        updatecheck = ui.config('commands', 'update.check')
-        if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
+        updatecheck = ui.config(b'commands', b'update.check')
+        if updatecheck not in _VALID_UPDATECHECKS:
             # If not configured, or invalid value configured
-            updatecheck = 'linear'
+            updatecheck = mergemod.UPDATECHECK_LINEAR
+    if updatecheck not in _VALID_UPDATECHECKS:
+        raise ValueError(
+            r'Invalid updatecheck value %r (can accept %r)'
+            % (updatecheck, _VALID_UPDATECHECKS)
+        )
     with repo.wlock():
         movemarkfrom = None
         warndest = False
@@ -920,31 +1105,31 @@
         if clean:
             ret = _clean(repo, checkout)
         else:
-            if updatecheck == 'abort':
+            if updatecheck == mergemod.UPDATECHECK_ABORT:
                 cmdutil.bailifchanged(repo, merge=False)
-                updatecheck = 'none'
+                updatecheck = mergemod.UPDATECHECK_NONE
             ret = _update(repo, checkout, updatecheck=updatecheck)
 
         if not ret and movemarkfrom:
-            if movemarkfrom == repo['.'].node():
-                pass # no-op update
-            elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
-                b = ui.label(repo._activebookmark, 'bookmarks.active')
-                ui.status(_("updating bookmark %s\n") % b)
+            if movemarkfrom == repo[b'.'].node():
+                pass  # no-op update
+            elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
+                b = ui.label(repo._activebookmark, b'bookmarks.active')
+                ui.status(_(b"updating bookmark %s\n") % b)
             else:
                 # this can happen with a non-linear update
-                b = ui.label(repo._activebookmark, 'bookmarks')
-                ui.status(_("(leaving bookmark %s)\n") % b)
+                b = ui.label(repo._activebookmark, b'bookmarks')
+                ui.status(_(b"(leaving bookmark %s)\n") % b)
                 bookmarks.deactivate(repo)
         elif brev in repo._bookmarks:
             if brev != repo._activebookmark:
-                b = ui.label(brev, 'bookmarks.active')
-                ui.status(_("(activating bookmark %s)\n") % b)
+                b = ui.label(brev, b'bookmarks.active')
+                ui.status(_(b"(activating bookmark %s)\n") % b)
             bookmarks.activate(repo, brev)
         elif brev:
             if repo._activebookmark:
-                b = ui.label(repo._activebookmark, 'bookmarks')
-                ui.status(_("(leaving bookmark %s)\n") % b)
+                b = ui.label(repo._activebookmark, b'bookmarks')
+                ui.status(_(b"(leaving bookmark %s)\n") % b)
             bookmarks.deactivate(repo)
 
         if warndest:
@@ -952,23 +1137,42 @@
 
     return ret
 
-def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
-          abort=False):
+
+def merge(
+    repo,
+    node,
+    force=None,
+    remind=True,
+    mergeforce=False,
+    labels=None,
+    abort=False,
+):
     """Branch merge with node, resolving changes. Return true if any
     unresolved conflicts."""
     if abort:
         return abortmerge(repo.ui, repo)
 
-    stats = mergemod.update(repo, node, branchmerge=True, force=force,
-                            mergeforce=mergeforce, labels=labels)
+    stats = mergemod.update(
+        repo,
+        node,
+        branchmerge=True,
+        force=force,
+        mergeforce=mergeforce,
+        labels=labels,
+    )
     _showstats(repo, stats)
     if stats.unresolvedcount:
-        repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
-                         "or 'hg merge --abort' to abandon\n"))
+        repo.ui.status(
+            _(
+                b"use 'hg resolve' to retry unresolved file merges "
+                b"or 'hg merge --abort' to abandon\n"
+            )
+        )
     elif remind:
-        repo.ui.status(_("(branch merge, don't forget to commit)\n"))
+        repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
     return stats.unresolvedcount > 0
 
+
 def abortmerge(ui, repo):
     ms = mergemod.mergestate.read(repo)
     if ms.active():
@@ -976,49 +1180,53 @@
         node = ms.localctx.hex()
     else:
         # there were no conficts, mergestate was not stored
-        node = repo['.'].hex()
+        node = repo[b'.'].hex()
 
-    repo.ui.status(_("aborting the merge, updating back to"
-                     " %s\n") % node[:12])
+    repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
     stats = mergemod.update(repo, node, branchmerge=False, force=True)
     _showstats(repo, stats)
     return stats.unresolvedcount > 0
 
-def _incoming(displaychlist, subreporecurse, ui, repo, source,
-        opts, buffered=False):
+
+def _incoming(
+    displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
+):
     """
     Helper for incoming / gincoming.
     displaychlist gets called with
         (remoterepo, incomingchangesetlist, displayer) parameters,
     and is supposed to contain only code that can't be unified.
     """
-    source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
+    source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
     other = peer(repo, opts, source)
-    ui.status(_('comparing with %s\n') % util.hidepassword(source))
-    revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
+    ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
+    revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
 
     if revs:
         revs = [other.lookup(rev) for rev in revs]
-    other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
-                                revs, opts["bundle"], opts["force"])
+    other, chlist, cleanupfn = bundlerepo.getremotechanges(
+        ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
+    )
     try:
         if not chlist:
-            ui.status(_("no changes found\n"))
+            ui.status(_(b"no changes found\n"))
             return subreporecurse()
-        ui.pager('incoming')
-        displayer = logcmdutil.changesetdisplayer(ui, other, opts,
-                                                  buffered=buffered)
+        ui.pager(b'incoming')
+        displayer = logcmdutil.changesetdisplayer(
+            ui, other, opts, buffered=buffered
+        )
         displaychlist(other, chlist, displayer)
         displayer.close()
     finally:
         cleanupfn()
     subreporecurse()
-    return 0 # exit code is zero since we found incoming changes
+    return 0  # exit code is zero since we found incoming changes
+
 
 def incoming(ui, repo, source, opts):
     def subreporecurse():
         ret = 1
-        if opts.get('subrepos'):
+        if opts.get(b'subrepos'):
             ctx = repo[None]
             for subpath in sorted(ctx.substate):
                 sub = ctx.sub(subpath)
@@ -1027,44 +1235,50 @@
 
     def display(other, chlist, displayer):
         limit = logcmdutil.getlimit(opts)
-        if opts.get('newest_first'):
+        if opts.get(b'newest_first'):
             chlist.reverse()
         count = 0
         for n in chlist:
             if limit is not None and count >= limit:
                 break
             parents = [p for p in other.changelog.parents(n) if p != nullid]
-            if opts.get('no_merges') and len(parents) == 2:
+            if opts.get(b'no_merges') and len(parents) == 2:
                 continue
             count += 1
             displayer.show(other[n])
+
     return _incoming(display, subreporecurse, ui, repo, source, opts)
 
+
 def _outgoing(ui, repo, dest, opts):
-    path = ui.paths.getpath(dest, default=('default-push', 'default'))
+    path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
     if not path:
-        raise error.Abort(_('default repository not configured!'),
-                hint=_("see 'hg help config.paths'"))
+        raise error.Abort(
+            _(b'default repository not configured!'),
+            hint=_(b"see 'hg help config.paths'"),
+        )
     dest = path.pushloc or path.loc
-    branches = path.branch, opts.get('branch') or []
+    branches = path.branch, opts.get(b'branch') or []
 
-    ui.status(_('comparing with %s\n') % util.hidepassword(dest))
-    revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
+    ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
+    revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
     if revs:
         revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
 
     other = peer(repo, opts, dest)
-    outgoing = discovery.findcommonoutgoing(repo, other, revs,
-                                            force=opts.get('force'))
+    outgoing = discovery.findcommonoutgoing(
+        repo, other, revs, force=opts.get(b'force')
+    )
     o = outgoing.missing
     if not o:
         scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
     return o, other
 
+
 def outgoing(ui, repo, dest, opts):
     def recurse():
         ret = 1
-        if opts.get('subrepos'):
+        if opts.get(b'subrepos'):
             ctx = repo[None]
             for subpath in sorted(ctx.substate):
                 sub = ctx.sub(subpath)
@@ -1077,23 +1291,24 @@
         cmdutil.outgoinghooks(ui, repo, other, opts, o)
         return recurse()
 
-    if opts.get('newest_first'):
+    if opts.get(b'newest_first'):
         o.reverse()
-    ui.pager('outgoing')
+    ui.pager(b'outgoing')
     displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
     count = 0
     for n in o:
         if limit is not None and count >= limit:
             break
         parents = [p for p in repo.changelog.parents(n) if p != nullid]
-        if opts.get('no_merges') and len(parents) == 2:
+        if opts.get(b'no_merges') and len(parents) == 2:
             continue
         count += 1
         displayer.show(repo[n])
     displayer.close()
     cmdutil.outgoinghooks(ui, repo, other, opts, o)
     recurse()
-    return 0 # exit code is zero since we found outgoing changes
+    return 0  # exit code is zero since we found outgoing changes
+
 
 def verify(repo, level=None):
     """verify the consistency of a repository"""
@@ -1104,63 +1319,71 @@
     # concern.
 
     # pathto() is needed for -R case
-    revs = repo.revs("filelog(%s)",
-                     util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
+    revs = repo.revs(
+        b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
+    )
 
     if revs:
-        repo.ui.status(_('checking subrepo links\n'))
+        repo.ui.status(_(b'checking subrepo links\n'))
         for rev in revs:
             ctx = repo[rev]
             try:
                 for subpath in ctx.substate:
                     try:
-                        ret = (ctx.sub(subpath, allowcreate=False).verify()
-                               or ret)
+                        ret = (
+                            ctx.sub(subpath, allowcreate=False).verify() or ret
+                        )
                     except error.RepoError as e:
-                        repo.ui.warn(('%d: %s\n') % (rev, e))
+                        repo.ui.warn(b'%d: %s\n' % (rev, e))
             except Exception:
-                repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
-                             node.short(ctx.node()))
+                repo.ui.warn(
+                    _(b'.hgsubstate is corrupt in revision %s\n')
+                    % node.short(ctx.node())
+                )
 
     return ret
 
+
 def remoteui(src, opts):
-    'build a remote ui from ui or repo and opts'
-    if util.safehasattr(src, 'baseui'): # looks like a repository
-        dst = src.baseui.copy() # drop repo-specific config
-        src = src.ui # copy target options from repo
-    else: # assume it's a global ui object
-        dst = src.copy() # keep all global options
+    b'build a remote ui from ui or repo and opts'
+    if util.safehasattr(src, b'baseui'):  # looks like a repository
+        dst = src.baseui.copy()  # drop repo-specific config
+        src = src.ui  # copy target options from repo
+    else:  # assume it's a global ui object
+        dst = src.copy()  # keep all global options
 
     # copy ssh-specific options
-    for o in 'ssh', 'remotecmd':
-        v = opts.get(o) or src.config('ui', o)
+    for o in b'ssh', b'remotecmd':
+        v = opts.get(o) or src.config(b'ui', o)
         if v:
-            dst.setconfig("ui", o, v, 'copied')
+            dst.setconfig(b"ui", o, v, b'copied')
 
     # copy bundle-specific options
-    r = src.config('bundle', 'mainreporoot')
+    r = src.config(b'bundle', b'mainreporoot')
     if r:
-        dst.setconfig('bundle', 'mainreporoot', r, 'copied')
+        dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
 
     # copy selected local settings to the remote ui
-    for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
+    for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
         for key, val in src.configitems(sect):
-            dst.setconfig(sect, key, val, 'copied')
-    v = src.config('web', 'cacerts')
+            dst.setconfig(sect, key, val, b'copied')
+    v = src.config(b'web', b'cacerts')
     if v:
-        dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
+        dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
 
     return dst
 
+
 # Files of interest
 # Used to check if the repository has changed looking at mtime and size of
 # these files.
-foi = [('spath', '00changelog.i'),
-       ('spath', 'phaseroots'), # ! phase can change content at the same size
-       ('spath', 'obsstore'),
-       ('path', 'bookmarks'), # ! bookmark can change content at the same size
-      ]
+foi = [
+    (b'spath', b'00changelog.i'),
+    (b'spath', b'phaseroots'),  # ! phase can change content at the same size
+    (b'spath', b'obsstore'),
+    (b'path', b'bookmarks'),  # ! bookmark can change content at the same size
+]
+
 
 class cachedlocalrepo(object):
     """Holds a localrepository that can be cached and reused."""
--- a/mercurial/hgweb/__init__.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/hgweb/__init__.py	Mon Oct 21 11:09:48 2019 -0400
@@ -17,9 +17,7 @@
     pycompat,
 )
 
-from ..utils import (
-    procutil,
-)
+from ..utils import procutil
 
 from . import (
     hgweb_mod,
@@ -27,6 +25,7 @@
     server,
 )
 
+
 def hgweb(config, name=None, baseui=None):
     '''create an hgweb wsgi object
 
@@ -40,16 +39,22 @@
 
     if isinstance(config, pycompat.unicode):
         raise error.ProgrammingError(
-            'Mercurial only supports encoded strings: %r' % config)
-    if ((isinstance(config, bytes) and not os.path.isdir(config)) or
-        isinstance(config, dict) or isinstance(config, list)):
+            b'Mercurial only supports encoded strings: %r' % config
+        )
+    if (
+        (isinstance(config, bytes) and not os.path.isdir(config))
+        or isinstance(config, dict)
+        or isinstance(config, list)
+    ):
         # create a multi-dir interface
         return hgwebdir_mod.hgwebdir(config, baseui=baseui)
     return hgweb_mod.hgweb(config, name=name, baseui=baseui)
 
+
 def hgwebdir(config, baseui=None):
     return hgwebdir_mod.hgwebdir(config, baseui=baseui)
 
+
 class httpservice(object):
     def __init__(self, ui, app, opts):
         self.ui = ui
@@ -60,15 +65,17 @@
         procutil.setsignalhandler()
         self.httpd = server.create_server(self.ui, self.app)
 
-        if (self.opts['port'] and
-            not self.ui.verbose and
-            not self.opts['print_url']):
+        if (
+            self.opts[b'port']
+            and not self.ui.verbose
+            and not self.opts[b'print_url']
+        ):
             return
 
         if self.httpd.prefix:
-            prefix = self.httpd.prefix.strip('/') + '/'
+            prefix = self.httpd.prefix.strip(b'/') + b'/'
         else:
-            prefix = ''
+            prefix = b''
 
         port = r':%d' % self.httpd.port
         if port == r':80':
@@ -77,34 +84,41 @@
         bindaddr = self.httpd.addr
         if bindaddr == r'0.0.0.0':
             bindaddr = r'*'
-        elif r':' in bindaddr: # IPv6
+        elif r':' in bindaddr:  # IPv6
             bindaddr = r'[%s]' % bindaddr
 
         fqaddr = self.httpd.fqaddr
         if r':' in fqaddr:
             fqaddr = r'[%s]' % fqaddr
 
-        url = 'http://%s%s/%s' % (
-            pycompat.sysbytes(fqaddr), pycompat.sysbytes(port), prefix)
-        if self.opts['print_url']:
-            self.ui.write('%s\n' % url)
+        url = b'http://%s%s/%s' % (
+            pycompat.sysbytes(fqaddr),
+            pycompat.sysbytes(port),
+            prefix,
+        )
+        if self.opts[b'print_url']:
+            self.ui.write(b'%s\n' % url)
         else:
-            if self.opts['port']:
+            if self.opts[b'port']:
                 write = self.ui.status
             else:
                 write = self.ui.write
-            write(_('listening at %s (bound to %s:%d)\n') %
-                  (url, pycompat.sysbytes(bindaddr), self.httpd.port))
+            write(
+                _(b'listening at %s (bound to %s:%d)\n')
+                % (url, pycompat.sysbytes(bindaddr), self.httpd.port)
+            )
         self.ui.flush()  # avoid buffering of status message
 
     def run(self):
         self.httpd.serve_forever()
 
+
 def createapp(baseui, repo, webconf):
     if webconf:
         return hgwebdir_mod.hgwebdir(webconf, baseui=baseui)
     else:
         if not repo:
-            raise error.RepoError(_("there is no Mercurial repository"
-                                    " here (.hg not found)"))
+            raise error.RepoError(
+                _(b"there is no Mercurial repository here (.hg not found)")
+            )
         return hgweb_mod.hgweb(repo, baseui=baseui)
--- a/mercurial/hgweb/common.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/hgweb/common.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,6 +14,10 @@
 import os
 import stat
 
+from ..pycompat import (
+    getattr,
+    open,
+)
 from .. import (
     encoding,
     pycompat,
@@ -42,7 +46,8 @@
     Can be overridden by extensions to provide more complex authorization
     schemes.
     """
-    return userlist == ['*'] or username in userlist
+    return userlist == [b'*'] or username in userlist
+
 
 def checkauthz(hgweb, req, op):
     '''Check permission for operation based on request data (including
@@ -51,41 +56,42 @@
 
     user = req.remoteuser
 
-    deny_read = hgweb.configlist('web', 'deny_read')
+    deny_read = hgweb.configlist(b'web', b'deny_read')
     if deny_read and (not user or ismember(hgweb.repo.ui, user, deny_read)):
-        raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
+        raise ErrorResponse(HTTP_UNAUTHORIZED, b'read not authorized')
 
-    allow_read = hgweb.configlist('web', 'allow_read')
+    allow_read = hgweb.configlist(b'web', b'allow_read')
     if allow_read and (not ismember(hgweb.repo.ui, user, allow_read)):
-        raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
+        raise ErrorResponse(HTTP_UNAUTHORIZED, b'read not authorized')
 
-    if op == 'pull' and not hgweb.allowpull:
-        raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
-    elif op == 'pull' or op is None: # op is None for interface requests
+    if op == b'pull' and not hgweb.allowpull:
+        raise ErrorResponse(HTTP_UNAUTHORIZED, b'pull not authorized')
+    elif op == b'pull' or op is None:  # op is None for interface requests
         return
 
     # Allow LFS uploading via PUT requests
-    if op == 'upload':
-        if req.method != 'PUT':
-            msg = 'upload requires PUT request'
+    if op == b'upload':
+        if req.method != b'PUT':
+            msg = b'upload requires PUT request'
             raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
     # enforce that you can only push using POST requests
-    elif req.method != 'POST':
-        msg = 'push requires POST request'
+    elif req.method != b'POST':
+        msg = b'push requires POST request'
         raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
 
     # require ssl by default for pushing, auth info cannot be sniffed
     # and replayed
-    if hgweb.configbool('web', 'push_ssl') and req.urlscheme != 'https':
-        raise ErrorResponse(HTTP_FORBIDDEN, 'ssl required')
+    if hgweb.configbool(b'web', b'push_ssl') and req.urlscheme != b'https':
+        raise ErrorResponse(HTTP_FORBIDDEN, b'ssl required')
 
-    deny = hgweb.configlist('web', 'deny_push')
+    deny = hgweb.configlist(b'web', b'deny_push')
     if deny and (not user or ismember(hgweb.repo.ui, user, deny)):
-        raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
+        raise ErrorResponse(HTTP_UNAUTHORIZED, b'push not authorized')
 
-    allow = hgweb.configlist('web', 'allow-push')
+    allow = hgweb.configlist(b'web', b'allow-push')
     if not (allow and ismember(hgweb.repo.ui, user, allow)):
-        raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
+        raise ErrorResponse(HTTP_UNAUTHORIZED, b'push not authorized')
+
 
 # Hooks for hgweb permission checks; extensions can add hooks here.
 # Each hook is invoked like this: hook(hgweb, request, operation),
@@ -106,6 +112,8 @@
         if headers is None:
             headers = []
         self.headers = headers
+        self.message = message
+
 
 class continuereader(object):
     """File object wrapper to handle HTTP 100-continue.
@@ -115,6 +123,7 @@
     response is sent. This should trigger the client into actually sending
     the request body.
     """
+
     def __init__(self, f, write):
         self.f = f
         self._write = write
@@ -123,21 +132,25 @@
     def read(self, amt=-1):
         if not self.continued:
             self.continued = True
-            self._write('HTTP/1.1 100 Continue\r\n\r\n')
+            self._write(b'HTTP/1.1 100 Continue\r\n\r\n')
         return self.f.read(amt)
 
     def __getattr__(self, attr):
-        if attr in ('close', 'readline', 'readlines', '__iter__'):
+        if attr in (b'close', b'readline', b'readlines', b'__iter__'):
             return getattr(self.f, attr)
         raise AttributeError
 
+
 def _statusmessage(code):
     responses = httpserver.basehttprequesthandler.responses
     return pycompat.bytesurl(
-        responses.get(code, (r'Error', r'Unknown error'))[0])
+        responses.get(code, (r'Error', r'Unknown error'))[0]
+    )
+
 
 def statusmessage(code, message=None):
-    return '%d %s' % (code, message or _statusmessage(code))
+    return b'%d %s' % (code, message or _statusmessage(code))
+
 
 def get_stat(spath, fn):
     """stat fn if it exists, spath otherwise"""
@@ -147,20 +160,26 @@
     else:
         return os.stat(spath)
 
+
 def get_mtime(spath):
-    return get_stat(spath, "00changelog.i")[stat.ST_MTIME]
+    return get_stat(spath, b"00changelog.i")[stat.ST_MTIME]
+
 
 def ispathsafe(path):
     """Determine if a path is safe to use for filesystem access."""
-    parts = path.split('/')
+    parts = path.split(b'/')
     for part in parts:
-        if (part in ('', pycompat.oscurdir, pycompat.ospardir) or
-            pycompat.ossep in part or
-            pycompat.osaltsep is not None and pycompat.osaltsep in part):
+        if (
+            part in (b'', pycompat.oscurdir, pycompat.ospardir)
+            or pycompat.ossep in part
+            or pycompat.osaltsep is not None
+            and pycompat.osaltsep in part
+        ):
             return False
 
     return True
 
+
 def staticfile(directory, fname, res):
     """return a file inside directory with guessed Content-Type header
 
@@ -173,7 +192,7 @@
     if not ispathsafe(fname):
         return
 
-    fpath = os.path.join(*fname.split('/'))
+    fpath = os.path.join(*fname.split(b'/'))
     if isinstance(directory, str):
         directory = [directory]
     for d in directory:
@@ -183,21 +202,24 @@
     try:
         os.stat(path)
         ct = pycompat.sysbytes(
-            mimetypes.guess_type(pycompat.fsdecode(path))[0] or r"text/plain")
-        with open(path, 'rb') as fh:
+            mimetypes.guess_type(pycompat.fsdecode(path))[0] or r"text/plain"
+        )
+        with open(path, b'rb') as fh:
             data = fh.read()
 
-        res.headers['Content-Type'] = ct
+        res.headers[b'Content-Type'] = ct
         res.setbodybytes(data)
         return res
     except TypeError:
-        raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename')
+        raise ErrorResponse(HTTP_SERVER_ERROR, b'illegal filename')
     except OSError as err:
         if err.errno == errno.ENOENT:
             raise ErrorResponse(HTTP_NOT_FOUND)
         else:
-            raise ErrorResponse(HTTP_SERVER_ERROR,
-                                encoding.strtolocal(err.strerror))
+            raise ErrorResponse(
+                HTTP_SERVER_ERROR, encoding.strtolocal(err.strerror)
+            )
+
 
 def paritygen(stripecount, offset=0):
     """count parity of horizontal stripes for easier reading"""
@@ -215,15 +237,20 @@
             parity = 1 - parity
             count = 0
 
+
 def get_contact(config):
     """Return repo contact information or empty string.
 
     web.contact is the primary source, but if that is not set, try
     ui.username or $EMAIL as a fallback to display something useful.
     """
-    return (config("web", "contact") or
-            config("ui", "username") or
-            encoding.environ.get("EMAIL") or "")
+    return (
+        config(b"web", b"contact")
+        or config(b"ui", b"username")
+        or encoding.environ.get(b"EMAIL")
+        or b""
+    )
+
 
 def cspvalues(ui):
     """Obtain the Content-Security-Policy header and nonce value.
@@ -252,11 +279,11 @@
 
     # Don't allow untrusted CSP setting since it be disable protections
     # from a trusted/global source.
-    csp = ui.config('web', 'csp', untrusted=False)
+    csp = ui.config(b'web', b'csp', untrusted=False)
     nonce = None
 
-    if csp and '%nonce%' in csp:
-        nonce = base64.urlsafe_b64encode(uuid.uuid4().bytes).rstrip('=')
-        csp = csp.replace('%nonce%', nonce)
+    if csp and b'%nonce%' in csp:
+        nonce = base64.urlsafe_b64encode(uuid.uuid4().bytes).rstrip(b'=')
+        csp = csp.replace(b'%nonce%', nonce)
 
     return csp, nonce
--- a/mercurial/hgweb/hgweb_mod.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/hgweb/hgweb_mod.py	Mon Oct 21 11:09:48 2019 -0400
@@ -18,6 +18,7 @@
     permhooks,
     statusmessage,
 )
+from ..pycompat import getattr
 
 from .. import (
     encoding,
@@ -45,39 +46,42 @@
     wsgicgi,
 )
 
+
 def getstyle(req, configfn, templatepath):
     styles = (
-        req.qsparams.get('style', None),
-        configfn('web', 'style'),
-        'paper',
+        req.qsparams.get(b'style', None),
+        configfn(b'web', b'style'),
+        b'paper',
     )
     return styles, templater.stylemap(styles, templatepath)
 
-def makebreadcrumb(url, prefix=''):
+
+def makebreadcrumb(url, prefix=b''):
     '''Return a 'URL breadcrumb' list
 
     A 'URL breadcrumb' is a list of URL-name pairs,
     corresponding to each of the path items on a URL.
     This can be used to create path navigation entries.
     '''
-    if url.endswith('/'):
+    if url.endswith(b'/'):
         url = url[:-1]
     if prefix:
-        url = '/' + prefix + url
+        url = b'/' + prefix + url
     relpath = url
-    if relpath.startswith('/'):
+    if relpath.startswith(b'/'):
         relpath = relpath[1:]
 
     breadcrumb = []
     urlel = url
-    pathitems = [''] + relpath.split('/')
+    pathitems = [b''] + relpath.split(b'/')
     for pathel in reversed(pathitems):
         if not pathel or not urlel:
             break
-        breadcrumb.append({'url': urlel, 'name': pathel})
+        breadcrumb.append({b'url': urlel, b'name': pathel})
         urlel = os.path.dirname(urlel)
     return templateutil.mappinglist(reversed(breadcrumb))
 
+
 class requestcontext(object):
     """Holds state/context for an individual request.
 
@@ -85,22 +89,23 @@
     is prone to race conditions. Instances of this class exist to hold
     mutable and race-free state for requests.
     """
+
     def __init__(self, app, repo, req, res):
         self.repo = repo
         self.reponame = app.reponame
         self.req = req
         self.res = res
 
-        self.maxchanges = self.configint('web', 'maxchanges')
-        self.stripecount = self.configint('web', 'stripes')
-        self.maxshortchanges = self.configint('web', 'maxshortchanges')
-        self.maxfiles = self.configint('web', 'maxfiles')
-        self.allowpull = self.configbool('web', 'allow-pull')
+        self.maxchanges = self.configint(b'web', b'maxchanges')
+        self.stripecount = self.configint(b'web', b'stripes')
+        self.maxshortchanges = self.configint(b'web', b'maxshortchanges')
+        self.maxfiles = self.configint(b'web', b'maxfiles')
+        self.allowpull = self.configbool(b'web', b'allow-pull')
 
         # we use untrusted=False to prevent a repo owner from using
         # web.templates in .hg/hgrc to get access to any file readable
         # by the user running the CGI script
-        self.templatepath = self.config('web', 'templates', untrusted=False)
+        self.templatepath = self.config(b'web', b'templates', untrusted=False)
 
         # This object is more expensive to build than simple config values.
         # It is shared across requests. The app will replace the object
@@ -113,20 +118,22 @@
 
     # Trust the settings from the .hg/hgrc files by default.
     def config(self, section, name, default=uimod._unset, untrusted=True):
-        return self.repo.ui.config(section, name, default,
-                                   untrusted=untrusted)
+        return self.repo.ui.config(section, name, default, untrusted=untrusted)
 
     def configbool(self, section, name, default=uimod._unset, untrusted=True):
-        return self.repo.ui.configbool(section, name, default,
-                                       untrusted=untrusted)
+        return self.repo.ui.configbool(
+            section, name, default, untrusted=untrusted
+        )
 
     def configint(self, section, name, default=uimod._unset, untrusted=True):
-        return self.repo.ui.configint(section, name, default,
-                                      untrusted=untrusted)
+        return self.repo.ui.configint(
+            section, name, default, untrusted=untrusted
+        )
 
     def configlist(self, section, name, default=uimod._unset, untrusted=True):
-        return self.repo.ui.configlist(section, name, default,
-                                       untrusted=untrusted)
+        return self.repo.ui.configlist(
+            section, name, default, untrusted=untrusted
+        )
 
     def archivelist(self, nodeid):
         return webutil.archivelist(self.repo.ui, nodeid)
@@ -134,60 +141,64 @@
     def templater(self, req):
         # determine scheme, port and server name
         # this is needed to create absolute urls
-        logourl = self.config('web', 'logourl')
-        logoimg = self.config('web', 'logoimg')
-        staticurl = (self.config('web', 'staticurl')
-                     or req.apppath.rstrip('/') + '/static/')
-        if not staticurl.endswith('/'):
-            staticurl += '/'
+        logourl = self.config(b'web', b'logourl')
+        logoimg = self.config(b'web', b'logoimg')
+        staticurl = (
+            self.config(b'web', b'staticurl')
+            or req.apppath.rstrip(b'/') + b'/static/'
+        )
+        if not staticurl.endswith(b'/'):
+            staticurl += b'/'
 
         # figure out which style to use
 
         vars = {}
-        styles, (style, mapfile) = getstyle(req, self.config,
-                                            self.templatepath)
+        styles, (style, mapfile) = getstyle(req, self.config, self.templatepath)
         if style == styles[0]:
-            vars['style'] = style
+            vars[b'style'] = style
 
-        sessionvars = webutil.sessionvars(vars, '?')
+        sessionvars = webutil.sessionvars(vars, b'?')
 
         if not self.reponame:
-            self.reponame = (self.config('web', 'name', '')
-                             or req.reponame
-                             or req.apppath
-                             or self.repo.root)
+            self.reponame = (
+                self.config(b'web', b'name', b'')
+                or req.reponame
+                or req.apppath
+                or self.repo.root
+            )
 
         filters = {}
         templatefilter = registrar.templatefilter(filters)
-        @templatefilter('websub', intype=bytes)
+
+        @templatefilter(b'websub', intype=bytes)
         def websubfilter(text):
             return templatefilters.websub(text, self.websubtable)
 
         # create the templater
         # TODO: export all keywords: defaults = templatekw.keywords.copy()
         defaults = {
-            'url': req.apppath + '/',
-            'logourl': logourl,
-            'logoimg': logoimg,
-            'staticurl': staticurl,
-            'urlbase': req.advertisedbaseurl,
-            'repo': self.reponame,
-            'encoding': encoding.encoding,
-            'sessionvars': sessionvars,
-            'pathdef': makebreadcrumb(req.apppath),
-            'style': style,
-            'nonce': self.nonce,
+            b'url': req.apppath + b'/',
+            b'logourl': logourl,
+            b'logoimg': logoimg,
+            b'staticurl': staticurl,
+            b'urlbase': req.advertisedbaseurl,
+            b'repo': self.reponame,
+            b'encoding': encoding.encoding,
+            b'sessionvars': sessionvars,
+            b'pathdef': makebreadcrumb(req.apppath),
+            b'style': style,
+            b'nonce': self.nonce,
         }
         templatekeyword = registrar.templatekeyword(defaults)
-        @templatekeyword('motd', requires=())
+
+        @templatekeyword(b'motd', requires=())
         def motd(context, mapping):
-            yield self.config('web', 'motd')
+            yield self.config(b'web', b'motd')
 
         tres = formatter.templateresources(self.repo.ui, self.repo)
-        tmpl = templater.templater.frommapfile(mapfile,
-                                               filters=filters,
-                                               defaults=defaults,
-                                               resources=tres)
+        tmpl = templater.templater.frommapfile(
+            mapfile, filters=filters, defaults=defaults, resources=tres
+        )
         return tmpl
 
     def sendtemplate(self, name, **kwargs):
@@ -196,6 +207,7 @@
         self.res.setbodygen(self.tmpl.generate(name, kwargs))
         return self.res.sendresponse()
 
+
 class hgweb(object):
     """HTTP server for individual repositories.
 
@@ -207,6 +219,7 @@
     Some servers are multi-threaded. On these servers, there may
     be multiple active threads inside __call__.
     """
+
     def __init__(self, repo, name=None, baseui=None):
         if isinstance(repo, bytes):
             if baseui:
@@ -220,23 +233,23 @@
             # we trust caller to give us a private copy
             r = repo
 
-        r.ui.setconfig('ui', 'report_untrusted', 'off', 'hgweb')
-        r.baseui.setconfig('ui', 'report_untrusted', 'off', 'hgweb')
-        r.ui.setconfig('ui', 'nontty', 'true', 'hgweb')
-        r.baseui.setconfig('ui', 'nontty', 'true', 'hgweb')
+        r.ui.setconfig(b'ui', b'report_untrusted', b'off', b'hgweb')
+        r.baseui.setconfig(b'ui', b'report_untrusted', b'off', b'hgweb')
+        r.ui.setconfig(b'ui', b'nontty', b'true', b'hgweb')
+        r.baseui.setconfig(b'ui', b'nontty', b'true', b'hgweb')
         # resolve file patterns relative to repo root
-        r.ui.setconfig('ui', 'forcecwd', r.root, 'hgweb')
-        r.baseui.setconfig('ui', 'forcecwd', r.root, 'hgweb')
+        r.ui.setconfig(b'ui', b'forcecwd', r.root, b'hgweb')
+        r.baseui.setconfig(b'ui', b'forcecwd', r.root, b'hgweb')
         # it's unlikely that we can replace signal handlers in WSGI server,
         # and mod_wsgi issues a big warning. a plain hgweb process (with no
         # threading) could replace signal handlers, but we don't bother
         # conditionally enabling it.
-        r.ui.setconfig('ui', 'signal-safe-lock', 'false', 'hgweb')
-        r.baseui.setconfig('ui', 'signal-safe-lock', 'false', 'hgweb')
+        r.ui.setconfig(b'ui', b'signal-safe-lock', b'false', b'hgweb')
+        r.baseui.setconfig(b'ui', b'signal-safe-lock', b'false', b'hgweb')
         # displaying bundling progress bar while serving feel wrong and may
         # break some wsgi implementation.
-        r.ui.setconfig('progress', 'disable', 'true', 'hgweb')
-        r.baseui.setconfig('progress', 'disable', 'true', 'hgweb')
+        r.ui.setconfig(b'progress', b'disable', b'true', b'hgweb')
+        r.baseui.setconfig(b'progress', b'disable', b'true', b'hgweb')
         self._repos = [hg.cachedlocalrepo(self._webifyrepo(r))]
         self._lastrepo = self._repos[0]
         hook.redirect(True)
@@ -282,10 +295,13 @@
         Modern servers should be using WSGI and should avoid this
         method, if possible.
         """
-        if not encoding.environ.get('GATEWAY_INTERFACE',
-                                    '').startswith("CGI/1."):
-            raise RuntimeError("This function is only intended to be "
-                               "called while running as a CGI script.")
+        if not encoding.environ.get(b'GATEWAY_INTERFACE', b'').startswith(
+            b"CGI/1."
+        ):
+            raise RuntimeError(
+                b"This function is only intended to be "
+                b"called while running as a CGI script."
+            )
         wsgicgi.launch(self)
 
     def __call__(self, env, respond):
@@ -305,7 +321,7 @@
         should be using instances of this class as the WSGI application.
         """
         with self._obtainrepo() as repo:
-            profile = repo.ui.configbool('profiling', 'enabled')
+            profile = repo.ui.configbool(b'profiling', b'enabled')
             with profiling.profile(repo.ui, enabled=profile):
                 for r in self._runwsgi(req, res, repo):
                     yield r
@@ -314,26 +330,28 @@
         rctx = requestcontext(self, repo, req, res)
 
         # This state is global across all threads.
-        encoding.encoding = rctx.config('web', 'encoding')
+        encoding.encoding = rctx.config(b'web', b'encoding')
         rctx.repo.ui.environ = req.rawenv
 
         if rctx.csp:
             # hgwebdir may have added CSP header. Since we generate our own,
             # replace it.
-            res.headers['Content-Security-Policy'] = rctx.csp
+            res.headers[b'Content-Security-Policy'] = rctx.csp
 
         # /api/* is reserved for various API implementations. Dispatch
         # accordingly. But URL paths can conflict with subrepos and virtual
         # repos in hgwebdir. So until we have a workaround for this, only
         # expose the URLs if the feature is enabled.
-        apienabled = rctx.repo.ui.configbool('experimental', 'web.apiserver')
+        apienabled = rctx.repo.ui.configbool(b'experimental', b'web.apiserver')
         if apienabled and req.dispatchparts and req.dispatchparts[0] == b'api':
-            wireprotoserver.handlewsgiapirequest(rctx, req, res,
-                                                 self.check_perm)
+            wireprotoserver.handlewsgiapirequest(
+                rctx, req, res, self.check_perm
+            )
             return res.sendresponse()
 
         handled = wireprotoserver.handlewsgirequest(
-            rctx, req, res, self.check_perm)
+            rctx, req, res, self.check_perm
+        )
         if handled:
             return res.sendresponse()
 
@@ -344,69 +362,70 @@
         if req.dispatchpath is not None:
             query = req.dispatchpath
         else:
-            query = req.querystring.partition('&')[0].partition(';')[0]
+            query = req.querystring.partition(b'&')[0].partition(b';')[0]
 
         # translate user-visible url structure to internal structure
 
-        args = query.split('/', 2)
-        if 'cmd' not in req.qsparams and args and args[0]:
+        args = query.split(b'/', 2)
+        if b'cmd' not in req.qsparams and args and args[0]:
             cmd = args.pop(0)
-            style = cmd.rfind('-')
+            style = cmd.rfind(b'-')
             if style != -1:
-                req.qsparams['style'] = cmd[:style]
-                cmd = cmd[style + 1:]
+                req.qsparams[b'style'] = cmd[:style]
+                cmd = cmd[style + 1 :]
 
             # avoid accepting e.g. style parameter as command
             if util.safehasattr(webcommands, cmd):
-                req.qsparams['cmd'] = cmd
+                req.qsparams[b'cmd'] = cmd
 
-            if cmd == 'static':
-                req.qsparams['file'] = '/'.join(args)
+            if cmd == b'static':
+                req.qsparams[b'file'] = b'/'.join(args)
             else:
                 if args and args[0]:
-                    node = args.pop(0).replace('%2F', '/')
-                    req.qsparams['node'] = node
+                    node = args.pop(0).replace(b'%2F', b'/')
+                    req.qsparams[b'node'] = node
                 if args:
-                    if 'file' in req.qsparams:
-                        del req.qsparams['file']
+                    if b'file' in req.qsparams:
+                        del req.qsparams[b'file']
                     for a in args:
-                        req.qsparams.add('file', a)
+                        req.qsparams.add(b'file', a)
 
-            ua = req.headers.get('User-Agent', '')
-            if cmd == 'rev' and 'mercurial' in ua:
-                req.qsparams['style'] = 'raw'
+            ua = req.headers.get(b'User-Agent', b'')
+            if cmd == b'rev' and b'mercurial' in ua:
+                req.qsparams[b'style'] = b'raw'
 
-            if cmd == 'archive':
-                fn = req.qsparams['node']
-                for type_, spec in webutil.archivespecs.iteritems():
+            if cmd == b'archive':
+                fn = req.qsparams[b'node']
+                for type_, spec in pycompat.iteritems(webutil.archivespecs):
                     ext = spec[2]
                     if fn.endswith(ext):
-                        req.qsparams['node'] = fn[:-len(ext)]
-                        req.qsparams['type'] = type_
+                        req.qsparams[b'node'] = fn[: -len(ext)]
+                        req.qsparams[b'type'] = type_
         else:
-            cmd = req.qsparams.get('cmd', '')
+            cmd = req.qsparams.get(b'cmd', b'')
 
         # process the web interface request
 
         try:
             rctx.tmpl = rctx.templater(req)
-            ctype = rctx.tmpl.render('mimetype',
-                                     {'encoding': encoding.encoding})
+            ctype = rctx.tmpl.render(
+                b'mimetype', {b'encoding': encoding.encoding}
+            )
 
             # check read permissions non-static content
-            if cmd != 'static':
+            if cmd != b'static':
                 self.check_perm(rctx, req, None)
 
-            if cmd == '':
-                req.qsparams['cmd'] = rctx.tmpl.render('default', {})
-                cmd = req.qsparams['cmd']
+            if cmd == b'':
+                req.qsparams[b'cmd'] = rctx.tmpl.render(b'default', {})
+                cmd = req.qsparams[b'cmd']
 
             # Don't enable caching if using a CSP nonce because then it wouldn't
             # be a nonce.
-            if rctx.configbool('web', 'cache') and not rctx.nonce:
-                tag = 'W/"%d"' % self.mtime
-                if req.headers.get('If-None-Match') == tag:
-                    res.status = '304 Not Modified'
+            if rctx.configbool(b'web', b'cache') and not rctx.nonce:
+                tag = b'W/"%d"' % self.mtime
+                if req.headers.get(b'If-None-Match') == tag:
+                    res.status = b'304 Not Modified'
                     # Content-Type may be defined globally. It isn't valid on a
                     # 304, so discard it.
                     try:
@@ -414,49 +433,51 @@
                     except KeyError:
                         pass
                     # Response body not allowed on 304.
-                    res.setbodybytes('')
+                    res.setbodybytes(b'')
                     return res.sendresponse()
 
-                res.headers['ETag'] = tag
+                res.headers[b'ETag'] = tag
 
             if cmd not in webcommands.__all__:
-                msg = 'no such method: %s' % cmd
+                msg = b'no such method: %s' % cmd
                 raise ErrorResponse(HTTP_BAD_REQUEST, msg)
             else:
                 # Set some globals appropriate for web handlers. Commands can
                 # override easily enough.
-                res.status = '200 Script output follows'
-                res.headers['Content-Type'] = ctype
+                res.status = b'200 Script output follows'
+                res.headers[b'Content-Type'] = ctype
                 return getattr(webcommands, cmd)(rctx)
 
         except (error.LookupError, error.RepoLookupError) as err:
             msg = pycompat.bytestr(err)
-            if (util.safehasattr(err, 'name') and
-                not isinstance(err,  error.ManifestLookupError)):
-                msg = 'revision not found: %s' % err.name
+            if util.safehasattr(err, b'name') and not isinstance(
+                err, error.ManifestLookupError
+            ):
+                msg = b'revision not found: %s' % err.name
 
-            res.status = '404 Not Found'
-            res.headers['Content-Type'] = ctype
-            return rctx.sendtemplate('error', error=msg)
+            res.status = b'404 Not Found'
+            res.headers[b'Content-Type'] = ctype
+            return rctx.sendtemplate(b'error', error=msg)
         except (error.RepoError, error.StorageError) as e:
-            res.status = '500 Internal Server Error'
-            res.headers['Content-Type'] = ctype
-            return rctx.sendtemplate('error', error=pycompat.bytestr(e))
+            res.status = b'500 Internal Server Error'
+            res.headers[b'Content-Type'] = ctype
+            return rctx.sendtemplate(b'error', error=pycompat.bytestr(e))
         except error.Abort as e:
-            res.status = '403 Forbidden'
-            res.headers['Content-Type'] = ctype
-            return rctx.sendtemplate('error', error=pycompat.bytestr(e))
+            res.status = b'403 Forbidden'
+            res.headers[b'Content-Type'] = ctype
+            return rctx.sendtemplate(b'error', error=pycompat.bytestr(e))
         except ErrorResponse as e:
             for k, v in e.headers:
                 res.headers[k] = v
             res.status = statusmessage(e.code, pycompat.bytestr(e))
-            res.headers['Content-Type'] = ctype
-            return rctx.sendtemplate('error', error=pycompat.bytestr(e))
+            res.headers[b'Content-Type'] = ctype
+            return rctx.sendtemplate(b'error', error=pycompat.bytestr(e))
 
     def check_perm(self, rctx, req, op):
         for permhook in permhooks:
             permhook(rctx, req, op)
 
+
 def getwebview(repo):
     """The 'web.view' config controls changeset filter to hgweb. Possible
     values are ``served``, ``visible`` and ``all``. Default is ``served``.
@@ -469,10 +490,10 @@
     The option has been around undocumented since Mercurial 2.5, but no
     user ever asked about it. So we better keep it undocumented for now."""
     # experimental config: web.view
-    viewconfig = repo.ui.config('web', 'view', untrusted=True)
-    if viewconfig == 'all':
+    viewconfig = repo.ui.config(b'web', b'view', untrusted=True)
+    if viewconfig == b'all':
         return repo.unfiltered()
     elif viewconfig in repoview.filtertable:
         return repo.filtered(viewconfig)
     else:
-        return repo.filtered('served')
+        return repo.filtered(b'served')
--- a/mercurial/hgweb/hgwebdir_mod.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/hgweb/hgwebdir_mod.py	Mon Oct 21 11:09:48 2019 -0400
@@ -50,8 +50,10 @@
 )
 from ..utils import dateutil
 
+
 def cleannames(items):
-    return [(util.pconvert(name).strip('/'), path) for name, path in items]
+    return [(util.pconvert(name).strip(b'/'), path) for name, path in items]
+
 
 def findrepos(paths):
     repos = []
@@ -62,7 +64,7 @@
         # '*' will not search inside dirs with .hg (except .hg/patches),
         # '**' will search inside dirs with .hg (and thus also find subrepos).
         try:
-            recurse = {'*': False, '**': True}[roottail]
+            recurse = {b'*': False, b'**': True}[roottail]
         except KeyError:
             repos.append((prefix, root))
             continue
@@ -71,6 +73,7 @@
         repos.extend(urlrepos(prefix, roothead, paths))
     return repos
 
+
 def urlrepos(prefix, roothead, paths):
     """yield url paths and filesystem paths from a list of repo paths
 
@@ -82,8 +85,10 @@
     """
     for path in paths:
         path = os.path.normpath(path)
-        yield (prefix + '/' +
-               util.pconvert(path[len(roothead):]).lstrip('/')).strip('/'), path
+        yield (
+            prefix + b'/' + util.pconvert(path[len(roothead) :]).lstrip(b'/')
+        ).strip(b'/'), path
+
 
 def readallowed(ui, req):
     """Check allow_read and deny_read config options of a repo's ui object
@@ -96,34 +101,35 @@
 
     user = req.remoteuser
 
-    deny_read = ui.configlist('web', 'deny_read', untrusted=True)
+    deny_read = ui.configlist(b'web', b'deny_read', untrusted=True)
     if deny_read and (not user or ismember(ui, user, deny_read)):
         return False
 
-    allow_read = ui.configlist('web', 'allow_read', untrusted=True)
+    allow_read = ui.configlist(b'web', b'allow_read', untrusted=True)
     # by default, allow reading if no allow_read option has been set
     if not allow_read or ismember(ui, user, allow_read):
         return True
 
     return False
 
-def rawindexentries(ui, repos, req, subdir=''):
-    descend = ui.configbool('web', 'descend')
-    collapse = ui.configbool('web', 'collapse')
+
+def rawindexentries(ui, repos, req, subdir=b''):
+    descend = ui.configbool(b'web', b'descend')
+    collapse = ui.configbool(b'web', b'collapse')
     seenrepos = set()
     seendirs = set()
     for name, path in repos:
 
         if not name.startswith(subdir):
             continue
-        name = name[len(subdir):]
+        name = name[len(subdir) :]
         directory = False
 
-        if '/' in name:
+        if b'/' in name:
             if not descend:
                 continue
 
-            nameparts = name.split('/')
+            nameparts = name.split(b'/')
             rootname = nameparts[0]
 
             if not collapse:
@@ -137,10 +143,10 @@
                 name = rootname
 
                 # redefine the path to refer to the directory
-                discarded = '/'.join(nameparts[1:])
+                discarded = b'/'.join(nameparts[1:])
 
                 # remove name parts plus accompanying slash
-                path = path[:-len(discarded) - 1]
+                path = path[: -len(discarded) - 1]
 
                 try:
                     hg.repository(ui, path)
@@ -149,11 +155,11 @@
                     pass
 
         parts = [
-            req.apppath.strip('/'),
-            subdir.strip('/'),
-            name.strip('/'),
+            req.apppath.strip(b'/'),
+            subdir.strip(b'/'),
+            name.strip(b'/'),
         ]
-        url = '/' + '/'.join(p for p in parts if p) + '/'
+        url = b'/' + b'/'.join(p for p in parts if p) + b'/'
 
         # show either a directory entry or a repository
         if directory:
@@ -165,19 +171,20 @@
 
             # add '/' to the name to make it obvious that
             # the entry is a directory, not a regular repository
-            row = {'contact': "",
-                   'contact_sort': "",
-                   'name': name + '/',
-                   'name_sort': name,
-                   'url': url,
-                   'description': "",
-                   'description_sort': "",
-                   'lastchange': d,
-                   'lastchange_sort': d[1] - d[0],
-                   'archives': templateutil.mappinglist([]),
-                   'isdirectory': True,
-                   'labels': templateutil.hybridlist([], name='label'),
-                   }
+            row = {
+                b'contact': b"",
+                b'contact_sort': b"",
+                b'name': name + b'/',
+                b'name_sort': name,
+                b'url': url,
+                b'description': b"",
+                b'description_sort': b"",
+                b'lastchange': d,
+                b'lastchange_sort': d[1] - d[0],
+                b'archives': templateutil.mappinglist([]),
+                b'isdirectory': True,
+                b'labels': templateutil.hybridlist([], name=b'label'),
+            }
 
             seendirs.add(name)
             yield row
@@ -185,15 +192,15 @@
 
         u = ui.copy()
         try:
-            u.readconfig(os.path.join(path, '.hg', 'hgrc'))
+            u.readconfig(os.path.join(path, b'.hg', b'hgrc'))
         except Exception as e:
-            u.warn(_('error reading %s/.hg/hgrc: %s\n') % (path, e))
+            u.warn(_(b'error reading %s/.hg/hgrc: %s\n') % (path, e))
             continue
 
         def get(section, name, default=uimod._unset):
             return u.config(section, name, default, untrusted=True)
 
-        if u.configbool("web", "hidden", untrusted=True):
+        if u.configbool(b"web", b"hidden", untrusted=True):
             continue
 
         if not readallowed(u, req):
@@ -203,10 +210,10 @@
         try:
             r = hg.repository(ui, path)
         except IOError:
-            u.warn(_('error accessing repository at %s\n') % path)
+            u.warn(_(b'error accessing repository at %s\n') % path)
             continue
         except error.RepoError:
-            u.warn(_('error accessing repository at %s\n') % path)
+            u.warn(_(b'error accessing repository at %s\n') % path)
             continue
         try:
             d = (get_mtime(r.spath), dateutil.makedate()[1])
@@ -214,46 +221,51 @@
             continue
 
         contact = get_contact(get)
-        description = get("web", "description")
+        description = get(b"web", b"description")
         seenrepos.add(name)
-        name = get("web", "name", name)
-        labels = u.configlist('web', 'labels', untrusted=True)
-        row = {'contact': contact or "unknown",
-               'contact_sort': contact.upper() or "unknown",
-               'name': name,
-               'name_sort': name,
-               'url': url,
-               'description': description or "unknown",
-               'description_sort': description.upper() or "unknown",
-               'lastchange': d,
-               'lastchange_sort': d[1] - d[0],
-               'archives': webutil.archivelist(u, "tip", url),
-               'isdirectory': None,
-               'labels': templateutil.hybridlist(labels, name='label'),
-               }
+        name = get(b"web", b"name", name)
+        labels = u.configlist(b'web', b'labels', untrusted=True)
+        row = {
+            b'contact': contact or b"unknown",
+            b'contact_sort': contact.upper() or b"unknown",
+            b'name': name,
+            b'name_sort': name,
+            b'url': url,
+            b'description': description or b"unknown",
+            b'description_sort': description.upper() or b"unknown",
+            b'lastchange': d,
+            b'lastchange_sort': d[1] - d[0],
+            b'archives': webutil.archivelist(u, b"tip", url),
+            b'isdirectory': None,
+            b'labels': templateutil.hybridlist(labels, name=b'label'),
+        }
 
         yield row
 
-def _indexentriesgen(context, ui, repos, req, stripecount, sortcolumn,
-                     descending, subdir):
+
+def _indexentriesgen(
+    context, ui, repos, req, stripecount, sortcolumn, descending, subdir
+):
     rows = rawindexentries(ui, repos, req, subdir=subdir)
 
     sortdefault = None, False
 
     if sortcolumn and sortdefault != (sortcolumn, descending):
-        sortkey = '%s_sort' % sortcolumn
-        rows = sorted(rows, key=lambda x: x[sortkey],
-                      reverse=descending)
+        sortkey = b'%s_sort' % sortcolumn
+        rows = sorted(rows, key=lambda x: x[sortkey], reverse=descending)
 
     for row, parity in zip(rows, paritygen(stripecount)):
-        row['parity'] = parity
+        row[b'parity'] = parity
         yield row
 
-def indexentries(ui, repos, req, stripecount, sortcolumn='',
-                 descending=False, subdir=''):
+
+def indexentries(
+    ui, repos, req, stripecount, sortcolumn=b'', descending=False, subdir=b''
+):
     args = (ui, repos, req, stripecount, sortcolumn, descending, subdir)
     return templateutil.mappinggenerator(_indexentriesgen, args=args)
 
+
 class hgwebdir(object):
     """HTTP server for multiple repositories.
 
@@ -262,6 +274,7 @@
 
     Instances are typically used as WSGI applications.
     """
+
     def __init__(self, conf, baseui=None):
         self.conf = conf
         self.baseui = baseui
@@ -276,34 +289,36 @@
 
     def refresh(self):
         if self.ui:
-            refreshinterval = self.ui.configint('web', 'refreshinterval')
+            refreshinterval = self.ui.configint(b'web', b'refreshinterval')
         else:
-            item = configitems.coreitems['web']['refreshinterval']
+            item = configitems.coreitems[b'web'][b'refreshinterval']
             refreshinterval = item.default
 
         # refreshinterval <= 0 means to always refresh.
-        if (refreshinterval > 0 and
-            self.lastrefresh + refreshinterval > time.time()):
+        if (
+            refreshinterval > 0
+            and self.lastrefresh + refreshinterval > time.time()
+        ):
             return
 
         if self.baseui:
             u = self.baseui.copy()
         else:
             u = uimod.ui.load()
-            u.setconfig('ui', 'report_untrusted', 'off', 'hgwebdir')
-            u.setconfig('ui', 'nontty', 'true', 'hgwebdir')
+            u.setconfig(b'ui', b'report_untrusted', b'off', b'hgwebdir')
+            u.setconfig(b'ui', b'nontty', b'true', b'hgwebdir')
             # displaying bundling progress bar while serving feels wrong and may
             # break some wsgi implementations.
-            u.setconfig('progress', 'disable', 'true', 'hgweb')
+            u.setconfig(b'progress', b'disable', b'true', b'hgweb')
 
         if not isinstance(self.conf, (dict, list, tuple)):
-            map = {'paths': 'hgweb-paths'}
+            map = {b'paths': b'hgweb-paths'}
             if not os.path.exists(self.conf):
-                raise error.Abort(_('config file %s not found!') % self.conf)
+                raise error.Abort(_(b'config file %s not found!') % self.conf)
             u.readconfig(self.conf, remap=map, trust=True)
             paths = []
-            for name, ignored in u.configitems('hgweb-paths'):
-                for path in u.configlist('hgweb-paths', name):
+            for name, ignored in u.configitems(b'hgweb-paths'):
+                for path in u.configlist(b'hgweb-paths', name):
                     paths.append((name, path))
         elif isinstance(self.conf, (list, tuple)):
             paths = self.conf
@@ -312,47 +327,52 @@
         extensions.populateui(u)
 
         repos = findrepos(paths)
-        for prefix, root in u.configitems('collections'):
+        for prefix, root in u.configitems(b'collections'):
             prefix = util.pconvert(prefix)
             for path in scmutil.walkrepos(root, followsym=True):
                 repo = os.path.normpath(path)
                 name = util.pconvert(repo)
                 if name.startswith(prefix):
-                    name = name[len(prefix):]
-                repos.append((name.lstrip('/'), repo))
+                    name = name[len(prefix) :]
+                repos.append((name.lstrip(b'/'), repo))
 
         self.repos = repos
         self.ui = u
-        encoding.encoding = self.ui.config('web', 'encoding')
-        self.style = self.ui.config('web', 'style')
-        self.templatepath = self.ui.config('web', 'templates', untrusted=False)
-        self.stripecount = self.ui.config('web', 'stripes')
+        encoding.encoding = self.ui.config(b'web', b'encoding')
+        self.style = self.ui.config(b'web', b'style')
+        self.templatepath = self.ui.config(
+            b'web', b'templates', untrusted=False
+        )
+        self.stripecount = self.ui.config(b'web', b'stripes')
         if self.stripecount:
             self.stripecount = int(self.stripecount)
-        prefix = self.ui.config('web', 'prefix')
-        if prefix.startswith('/'):
+        prefix = self.ui.config(b'web', b'prefix')
+        if prefix.startswith(b'/'):
             prefix = prefix[1:]
-        if prefix.endswith('/'):
+        if prefix.endswith(b'/'):
             prefix = prefix[:-1]
         self.prefix = prefix
         self.lastrefresh = time.time()
 
     def run(self):
-        if not encoding.environ.get('GATEWAY_INTERFACE',
-                                    '').startswith("CGI/1."):
-            raise RuntimeError("This function is only intended to be "
-                               "called while running as a CGI script.")
+        if not encoding.environ.get(b'GATEWAY_INTERFACE', b'').startswith(
+            b"CGI/1."
+        ):
+            raise RuntimeError(
+                b"This function is only intended to be "
+                b"called while running as a CGI script."
+            )
         wsgicgi.launch(self)
 
     def __call__(self, env, respond):
-        baseurl = self.ui.config('web', 'baseurl')
+        baseurl = self.ui.config(b'web', b'baseurl')
         req = requestmod.parserequestfromenv(env, altbaseurl=baseurl)
         res = requestmod.wsgiresponse(req, respond)
 
         return self.run_wsgi(req, res)
 
     def run_wsgi(self, req, res):
-        profile = self.ui.configbool('profiling', 'enabled')
+        profile = self.ui.configbool(b'profiling', b'enabled')
         with profiling.profile(self.ui, enabled=profile):
             try:
                 for r in self._runwsgi(req, res):
@@ -373,28 +393,28 @@
 
             csp, nonce = cspvalues(self.ui)
             if csp:
-                res.headers['Content-Security-Policy'] = csp
+                res.headers[b'Content-Security-Policy'] = csp
 
-            virtual = req.dispatchpath.strip('/')
+            virtual = req.dispatchpath.strip(b'/')
             tmpl = self.templater(req, nonce)
-            ctype = tmpl.render('mimetype', {'encoding': encoding.encoding})
+            ctype = tmpl.render(b'mimetype', {b'encoding': encoding.encoding})
 
             # Global defaults. These can be overridden by any handler.
-            res.status = '200 Script output follows'
-            res.headers['Content-Type'] = ctype
+            res.status = b'200 Script output follows'
+            res.headers[b'Content-Type'] = ctype
 
             # a static file
-            if virtual.startswith('static/') or 'static' in req.qsparams:
-                if virtual.startswith('static/'):
+            if virtual.startswith(b'static/') or b'static' in req.qsparams:
+                if virtual.startswith(b'static/'):
                     fname = virtual[7:]
                 else:
-                    fname = req.qsparams['static']
-                static = self.ui.config("web", "static", untrusted=False)
+                    fname = req.qsparams[b'static']
+                static = self.ui.config(b"web", b"static", untrusted=False)
                 if not static:
                     tp = self.templatepath or templater.templatepaths()
                     if isinstance(tp, str):
                         tp = [tp]
-                    static = [os.path.join(p, 'static') for p in tp]
+                    static = [os.path.join(p, b'static') for p in tp]
 
                 staticfile(static, fname, res)
                 return res.sendresponse()
@@ -403,13 +423,13 @@
 
             repos = dict(self.repos)
 
-            if (not virtual or virtual == 'index') and virtual not in repos:
+            if (not virtual or virtual == b'index') and virtual not in repos:
                 return self.makeindex(req, res, tmpl)
 
             # nested indexes and hgwebs
 
-            if virtual.endswith('/index') and virtual not in repos:
-                subdir = virtual[:-len('index')]
+            if virtual.endswith(b'/index') and virtual not in repos:
+                subdir = virtual[: -len(b'index')]
                 if any(r.startswith(subdir) for r in repos):
                     return self.makeindex(req, res, tmpl, subdir)
 
@@ -426,14 +446,18 @@
                     # repository path component.
                     uenv = req.rawenv
                     if pycompat.ispy3:
-                        uenv = {k.decode('latin1'): v for k, v in
-                                uenv.iteritems()}
+                        uenv = {
+                            k.decode('latin1'): v
+                            for k, v in pycompat.iteritems(uenv)
+                        }
                     req = requestmod.parserequestfromenv(
-                        uenv, reponame=virtualrepo,
-                        altbaseurl=self.ui.config('web', 'baseurl'),
+                        uenv,
+                        reponame=virtualrepo,
+                        altbaseurl=self.ui.config(b'web', b'baseurl'),
                         # Reuse wrapped body file object otherwise state
                         # tracking can get confused.
-                        bodyfh=req.bodyfh)
+                        bodyfh=req.bodyfh,
+                    )
                     try:
                         # ensure caller gets private copy of ui
                         repo = hg.repository(self.ui.copy(), real)
@@ -445,92 +469,110 @@
                         raise ErrorResponse(HTTP_SERVER_ERROR, bytes(inst))
 
             # browse subdirectories
-            subdir = virtual + '/'
+            subdir = virtual + b'/'
             if [r for r in repos if r.startswith(subdir)]:
                 return self.makeindex(req, res, tmpl, subdir)
 
             # prefixes not found
-            res.status = '404 Not Found'
-            res.setbodygen(tmpl.generate('notfound', {'repo': virtual}))
+            res.status = b'404 Not Found'
+            res.setbodygen(tmpl.generate(b'notfound', {b'repo': virtual}))
             return res.sendresponse()
 
         except ErrorResponse as e:
             res.status = statusmessage(e.code, pycompat.bytestr(e))
-            res.setbodygen(tmpl.generate('error', {'error': e.message or ''}))
+            res.setbodygen(
+                tmpl.generate(b'error', {b'error': e.message or b''})
+            )
             return res.sendresponse()
         finally:
             tmpl = None
 
-    def makeindex(self, req, res, tmpl, subdir=""):
+    def makeindex(self, req, res, tmpl, subdir=b""):
         self.refresh()
-        sortable = ["name", "description", "contact", "lastchange"]
+        sortable = [b"name", b"description", b"contact", b"lastchange"]
         sortcolumn, descending = None, False
-        if 'sort' in req.qsparams:
-            sortcolumn = req.qsparams['sort']
-            descending = sortcolumn.startswith('-')
+        if b'sort' in req.qsparams:
+            sortcolumn = req.qsparams[b'sort']
+            descending = sortcolumn.startswith(b'-')
             if descending:
                 sortcolumn = sortcolumn[1:]
             if sortcolumn not in sortable:
-                sortcolumn = ""
+                sortcolumn = b""
 
-        sort = [("sort_%s" % column,
-                 "%s%s" % ((not descending and column == sortcolumn)
-                            and "-" or "", column))
-                for column in sortable]
+        sort = [
+            (
+                b"sort_%s" % column,
+                b"%s%s"
+                % (
+                    (not descending and column == sortcolumn) and b"-" or b"",
+                    column,
+                ),
+            )
+            for column in sortable
+        ]
 
         self.refresh()
 
-        entries = indexentries(self.ui, self.repos, req,
-                               self.stripecount, sortcolumn=sortcolumn,
-                               descending=descending, subdir=subdir)
+        entries = indexentries(
+            self.ui,
+            self.repos,
+            req,
+            self.stripecount,
+            sortcolumn=sortcolumn,
+            descending=descending,
+            subdir=subdir,
+        )
 
         mapping = {
-            'entries': entries,
-            'subdir': subdir,
-            'pathdef': hgweb_mod.makebreadcrumb('/' + subdir, self.prefix),
-            'sortcolumn': sortcolumn,
-            'descending': descending,
+            b'entries': entries,
+            b'subdir': subdir,
+            b'pathdef': hgweb_mod.makebreadcrumb(b'/' + subdir, self.prefix),
+            b'sortcolumn': sortcolumn,
+            b'descending': descending,
         }
         mapping.update(sort)
-        res.setbodygen(tmpl.generate('index', mapping))
+        res.setbodygen(tmpl.generate(b'index', mapping))
         return res.sendresponse()
 
     def templater(self, req, nonce):
-
         def config(section, name, default=uimod._unset, untrusted=True):
             return self.ui.config(section, name, default, untrusted)
 
         vars = {}
-        styles, (style, mapfile) = hgweb_mod.getstyle(req, config,
-                                                      self.templatepath)
+        styles, (style, mapfile) = hgweb_mod.getstyle(
+            req, config, self.templatepath
+        )
         if style == styles[0]:
-            vars['style'] = style
+            vars[b'style'] = style
 
-        sessionvars = webutil.sessionvars(vars, '?')
-        logourl = config('web', 'logourl')
-        logoimg = config('web', 'logoimg')
-        staticurl = (config('web', 'staticurl')
-                     or req.apppath.rstrip('/') + '/static/')
-        if not staticurl.endswith('/'):
-            staticurl += '/'
+        sessionvars = webutil.sessionvars(vars, b'?')
+        logourl = config(b'web', b'logourl')
+        logoimg = config(b'web', b'logoimg')
+        staticurl = (
+            config(b'web', b'staticurl')
+            or req.apppath.rstrip(b'/') + b'/static/'
+        )
+        if not staticurl.endswith(b'/'):
+            staticurl += b'/'
 
         defaults = {
-            "encoding": encoding.encoding,
-            "url": req.apppath + '/',
-            "logourl": logourl,
-            "logoimg": logoimg,
-            "staticurl": staticurl,
-            "sessionvars": sessionvars,
-            "style": style,
-            "nonce": nonce,
+            b"encoding": encoding.encoding,
+            b"url": req.apppath + b'/',
+            b"logourl": logourl,
+            b"logoimg": logoimg,
+            b"staticurl": staticurl,
+            b"sessionvars": sessionvars,
+            b"style": style,
+            b"nonce": nonce,
         }
         templatekeyword = registrar.templatekeyword(defaults)
-        @templatekeyword('motd', requires=())
+
+        @templatekeyword(b'motd', requires=())
         def motd(context, mapping):
             if self.motd is not None:
                 yield self.motd
             else:
-                yield config('web', 'motd')
+                yield config(b'web', b'motd')
 
         tmpl = templater.templater.frommapfile(mapfile, defaults=defaults)
         return tmpl
--- a/mercurial/hgweb/request.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/hgweb/request.py	Mon Oct 21 11:09:48 2019 -0400
@@ -8,17 +8,16 @@
 
 from __future__ import absolute_import
 
-#import wsgiref.validate
+# import wsgiref.validate
 
-from ..thirdparty import (
-    attr,
-)
+from ..thirdparty import attr
 from .. import (
     error,
     pycompat,
     util,
 )
 
+
 class multidict(object):
     """A dict like object that can store multiple values for a key.
 
@@ -26,6 +25,7 @@
 
     This is inspired by WebOb's class of the same name.
     """
+
     def __init__(self):
         self._items = {}
 
@@ -69,12 +69,13 @@
         vals = self._items[key]
 
         if len(vals) > 1:
-            raise KeyError('multiple values for %r' % key)
+            raise KeyError(b'multiple values for %r' % key)
 
         return vals[0]
 
     def asdictoflists(self):
-        return {k: list(v) for k, v in self._items.iteritems()}
+        return {k: list(v) for k, v in pycompat.iteritems(self._items)}
+
 
 @attr.s(frozen=True)
 class parsedrequest(object):
@@ -124,6 +125,7 @@
     # WSGI environment dict, unmodified.
     rawenv = attr.ib()
 
+
 def parserequestfromenv(env, reponame=None, altbaseurl=None, bodyfh=None):
     """Parse URL components from environment variables.
 
@@ -153,22 +155,24 @@
     # We first validate that the incoming object conforms with the WSGI spec.
     # We only want to be dealing with spec-conforming WSGI implementations.
     # TODO enable this once we fix internal violations.
-    #wsgiref.validate.check_environ(env)
+    # wsgiref.validate.check_environ(env)
 
     # PEP-0333 states that environment keys and values are native strings
     # (bytes on Python 2 and str on Python 3). The code points for the Unicode
     # strings on Python 3 must be between \00000-\000FF. We deal with bytes
     # in Mercurial, so mass convert string keys and values to bytes.
     if pycompat.ispy3:
-        env = {k.encode('latin-1'): v for k, v in env.iteritems()}
-        env = {k: v.encode('latin-1') if isinstance(v, str) else v
-               for k, v in env.iteritems()}
+        env = {k.encode('latin-1'): v for k, v in pycompat.iteritems(env)}
+        env = {
+            k: v.encode('latin-1') if isinstance(v, str) else v
+            for k, v in pycompat.iteritems(env)
+        }
 
     # Some hosting solutions are emulating hgwebdir, and dispatching directly
     # to an hgweb instance using this environment variable.  This was always
     # checked prior to d7fd203e36cc; keep doing so to avoid breaking them.
     if not reponame:
-        reponame = env.get('REPO_NAME')
+        reponame = env.get(b'REPO_NAME')
 
     if altbaseurl:
         altbaseurl = util.url(altbaseurl)
@@ -177,111 +181,114 @@
     # the environment variables.
     # https://www.python.org/dev/peps/pep-0333/#url-reconstruction defines
     # how URLs are reconstructed.
-    fullurl = env['wsgi.url_scheme'] + '://'
+    fullurl = env[b'wsgi.url_scheme'] + b'://'
 
     if altbaseurl and altbaseurl.scheme:
-        advertisedfullurl = altbaseurl.scheme + '://'
+        advertisedfullurl = altbaseurl.scheme + b'://'
     else:
         advertisedfullurl = fullurl
 
     def addport(s, port):
-        if s.startswith('https://'):
-            if port != '443':
-                s += ':' + port
+        if s.startswith(b'https://'):
+            if port != b'443':
+                s += b':' + port
         else:
-            if port != '80':
-                s += ':' + port
+            if port != b'80':
+                s += b':' + port
 
         return s
 
-    if env.get('HTTP_HOST'):
-        fullurl += env['HTTP_HOST']
+    if env.get(b'HTTP_HOST'):
+        fullurl += env[b'HTTP_HOST']
     else:
-        fullurl += env['SERVER_NAME']
-        fullurl = addport(fullurl, env['SERVER_PORT'])
+        fullurl += env[b'SERVER_NAME']
+        fullurl = addport(fullurl, env[b'SERVER_PORT'])
 
     if altbaseurl and altbaseurl.host:
         advertisedfullurl += altbaseurl.host
 
         if altbaseurl.port:
             port = altbaseurl.port
-        elif altbaseurl.scheme == 'http' and not altbaseurl.port:
-            port = '80'
-        elif altbaseurl.scheme == 'https' and not altbaseurl.port:
-            port = '443'
+        elif altbaseurl.scheme == b'http' and not altbaseurl.port:
+            port = b'80'
+        elif altbaseurl.scheme == b'https' and not altbaseurl.port:
+            port = b'443'
         else:
-            port = env['SERVER_PORT']
+            port = env[b'SERVER_PORT']
 
         advertisedfullurl = addport(advertisedfullurl, port)
     else:
-        advertisedfullurl += env['SERVER_NAME']
-        advertisedfullurl = addport(advertisedfullurl, env['SERVER_PORT'])
+        advertisedfullurl += env[b'SERVER_NAME']
+        advertisedfullurl = addport(advertisedfullurl, env[b'SERVER_PORT'])
 
     baseurl = fullurl
     advertisedbaseurl = advertisedfullurl
 
-    fullurl += util.urlreq.quote(env.get('SCRIPT_NAME', ''))
-    fullurl += util.urlreq.quote(env.get('PATH_INFO', ''))
+    fullurl += util.urlreq.quote(env.get(b'SCRIPT_NAME', b''))
+    fullurl += util.urlreq.quote(env.get(b'PATH_INFO', b''))
 
     if altbaseurl:
-        path = altbaseurl.path or ''
-        if path and not path.startswith('/'):
-            path = '/' + path
+        path = altbaseurl.path or b''
+        if path and not path.startswith(b'/'):
+            path = b'/' + path
         advertisedfullurl += util.urlreq.quote(path)
     else:
-        advertisedfullurl += util.urlreq.quote(env.get('SCRIPT_NAME', ''))
+        advertisedfullurl += util.urlreq.quote(env.get(b'SCRIPT_NAME', b''))
 
-    advertisedfullurl += util.urlreq.quote(env.get('PATH_INFO', ''))
+    advertisedfullurl += util.urlreq.quote(env.get(b'PATH_INFO', b''))
 
-    if env.get('QUERY_STRING'):
-        fullurl += '?' + env['QUERY_STRING']
-        advertisedfullurl += '?' + env['QUERY_STRING']
+    if env.get(b'QUERY_STRING'):
+        fullurl += b'?' + env[b'QUERY_STRING']
+        advertisedfullurl += b'?' + env[b'QUERY_STRING']
 
     # If ``reponame`` is defined, that must be a prefix on PATH_INFO
     # that represents the repository being dispatched to. When computing
     # the dispatch info, we ignore these leading path components.
 
     if altbaseurl:
-        apppath = altbaseurl.path or ''
-        if apppath and not apppath.startswith('/'):
-            apppath = '/' + apppath
+        apppath = altbaseurl.path or b''
+        if apppath and not apppath.startswith(b'/'):
+            apppath = b'/' + apppath
     else:
-        apppath = env.get('SCRIPT_NAME', '')
+        apppath = env.get(b'SCRIPT_NAME', b'')
 
     if reponame:
-        repoprefix = '/' + reponame.strip('/')
+        repoprefix = b'/' + reponame.strip(b'/')
 
-        if not env.get('PATH_INFO'):
-            raise error.ProgrammingError('reponame requires PATH_INFO')
+        if not env.get(b'PATH_INFO'):
+            raise error.ProgrammingError(b'reponame requires PATH_INFO')
 
-        if not env['PATH_INFO'].startswith(repoprefix):
-            raise error.ProgrammingError('PATH_INFO does not begin with repo '
-                                         'name: %s (%s)' % (env['PATH_INFO'],
-                                                            reponame))
+        if not env[b'PATH_INFO'].startswith(repoprefix):
+            raise error.ProgrammingError(
+                b'PATH_INFO does not begin with repo '
+                b'name: %s (%s)' % (env[b'PATH_INFO'], reponame)
+            )
 
-        dispatchpath = env['PATH_INFO'][len(repoprefix):]
+        dispatchpath = env[b'PATH_INFO'][len(repoprefix) :]
 
-        if dispatchpath and not dispatchpath.startswith('/'):
-            raise error.ProgrammingError('reponame prefix of PATH_INFO does '
-                                         'not end at path delimiter: %s (%s)' %
-                                         (env['PATH_INFO'], reponame))
+        if dispatchpath and not dispatchpath.startswith(b'/'):
+            raise error.ProgrammingError(
+                b'reponame prefix of PATH_INFO does '
+                b'not end at path delimiter: %s (%s)'
+                % (env[b'PATH_INFO'], reponame)
+            )
 
-        apppath = apppath.rstrip('/') + repoprefix
-        dispatchparts = dispatchpath.strip('/').split('/')
-        dispatchpath = '/'.join(dispatchparts)
+        apppath = apppath.rstrip(b'/') + repoprefix
+        dispatchparts = dispatchpath.strip(b'/').split(b'/')
+        dispatchpath = b'/'.join(dispatchparts)
 
-    elif 'PATH_INFO' in env:
-        if env['PATH_INFO'].strip('/'):
-            dispatchparts = env['PATH_INFO'].strip('/').split('/')
-            dispatchpath = '/'.join(dispatchparts)
+    elif b'PATH_INFO' in env:
+        if env[b'PATH_INFO'].strip(b'/'):
+            dispatchparts = env[b'PATH_INFO'].strip(b'/').split(b'/')
+            dispatchpath = b'/'.join(dispatchparts)
         else:
             dispatchparts = []
-            dispatchpath = ''
+            dispatchpath = b''
     else:
         dispatchparts = []
         dispatchpath = None
 
-    querystring = env.get('QUERY_STRING', '')
+    querystring = env.get(b'QUERY_STRING', b'')
 
     # We store as a list so we have ordering information. We also store as
     # a dict to facilitate fast lookup.
@@ -293,44 +300,51 @@
     # perform case normalization for us. We just rewrite underscore to dash
     # so keys match what likely went over the wire.
     headers = []
-    for k, v in env.iteritems():
-        if k.startswith('HTTP_'):
-            headers.append((k[len('HTTP_'):].replace('_', '-'), v))
+    for k, v in pycompat.iteritems(env):
+        if k.startswith(b'HTTP_'):
+            headers.append((k[len(b'HTTP_') :].replace(b'_', b'-'), v))
 
-    from . import wsgiheaders # avoid cycle
+    from . import wsgiheaders  # avoid cycle
+
     headers = wsgiheaders.Headers(headers)
 
     # This is kind of a lie because the HTTP header wasn't explicitly
     # sent. But for all intents and purposes it should be OK to lie about
     # this, since a consumer will either either value to determine how many
     # bytes are available to read.
-    if 'CONTENT_LENGTH' in env and 'HTTP_CONTENT_LENGTH' not in env:
-        headers['Content-Length'] = env['CONTENT_LENGTH']
+    if b'CONTENT_LENGTH' in env and b'HTTP_CONTENT_LENGTH' not in env:
+        headers[b'Content-Length'] = env[b'CONTENT_LENGTH']
 
-    if 'CONTENT_TYPE' in env and 'HTTP_CONTENT_TYPE' not in env:
-        headers['Content-Type'] = env['CONTENT_TYPE']
+    if b'CONTENT_TYPE' in env and b'HTTP_CONTENT_TYPE' not in env:
+        headers[b'Content-Type'] = env[b'CONTENT_TYPE']
 
     if bodyfh is None:
-        bodyfh = env['wsgi.input']
-        if 'Content-Length' in headers:
-            bodyfh = util.cappedreader(bodyfh,
-                                       int(headers['Content-Length'] or '0'))
+        bodyfh = env[b'wsgi.input']
+        if b'Content-Length' in headers:
+            bodyfh = util.cappedreader(
+                bodyfh, int(headers[b'Content-Length'] or b'0')
+            )
 
-    return parsedrequest(method=env['REQUEST_METHOD'],
-                         url=fullurl, baseurl=baseurl,
-                         advertisedurl=advertisedfullurl,
-                         advertisedbaseurl=advertisedbaseurl,
-                         urlscheme=env['wsgi.url_scheme'],
-                         remoteuser=env.get('REMOTE_USER'),
-                         remotehost=env.get('REMOTE_HOST'),
-                         apppath=apppath,
-                         dispatchparts=dispatchparts, dispatchpath=dispatchpath,
-                         reponame=reponame,
-                         querystring=querystring,
-                         qsparams=qsparams,
-                         headers=headers,
-                         bodyfh=bodyfh,
-                         rawenv=env)
+    return parsedrequest(
+        method=env[b'REQUEST_METHOD'],
+        url=fullurl,
+        baseurl=baseurl,
+        advertisedurl=advertisedfullurl,
+        advertisedbaseurl=advertisedbaseurl,
+        urlscheme=env[b'wsgi.url_scheme'],
+        remoteuser=env.get(b'REMOTE_USER'),
+        remotehost=env.get(b'REMOTE_HOST'),
+        apppath=apppath,
+        dispatchparts=dispatchparts,
+        dispatchpath=dispatchpath,
+        reponame=reponame,
+        querystring=querystring,
+        qsparams=qsparams,
+        headers=headers,
+        bodyfh=bodyfh,
+        rawenv=env,
+    )
+
 
 class offsettrackingwriter(object):
     """A file object like object that is append only and tracks write count.
@@ -345,6 +359,7 @@
     a WSGI ``start_response()`` function. Since ``write()`` is a callable and
     not a file object, it doesn't implement other file object methods.
     """
+
     def __init__(self, writefn):
         self._write = writefn
         self._offset = 0
@@ -363,6 +378,7 @@
     def tell(self):
         return self._offset
 
+
 class wsgiresponse(object):
     """Represents a response to a WSGI request.
 
@@ -389,7 +405,8 @@
         self._startresponse = startresponse
 
         self.status = None
-        from . import wsgiheaders # avoid cycle
+        from . import wsgiheaders  # avoid cycle
+
         self.headers = wsgiheaders.Headers([])
 
         self._bodybytes = None
@@ -399,9 +416,12 @@
         self._bodywritefn = None
 
     def _verifybody(self):
-        if (self._bodybytes is not None or self._bodygen is not None
-            or self._bodywillwrite):
-            raise error.ProgrammingError('cannot define body multiple times')
+        if (
+            self._bodybytes is not None
+            or self._bodygen is not None
+            or self._bodywillwrite
+        ):
+            raise error.ProgrammingError(b'cannot define body multiple times')
 
     def setbodybytes(self, b):
         """Define the response body as static bytes.
@@ -410,7 +430,7 @@
         """
         self._verifybody()
         self._bodybytes = b
-        self.headers['Content-Length'] = '%d' % len(b)
+        self.headers[b'Content-Length'] = b'%d' % len(b)
 
     def setbodygen(self, gen):
         """Define the response body as a generator of bytes."""
@@ -443,16 +463,21 @@
         Calling this method multiple times is not allowed.
         """
         if self._started:
-            raise error.ProgrammingError('sendresponse() called multiple times')
+            raise error.ProgrammingError(
+                b'sendresponse() called multiple times'
+            )
 
         self._started = True
 
         if not self.status:
-            raise error.ProgrammingError('status line not defined')
+            raise error.ProgrammingError(b'status line not defined')
 
-        if (self._bodybytes is None and self._bodygen is None
-            and not self._bodywillwrite):
-            raise error.ProgrammingError('response body not defined')
+        if (
+            self._bodybytes is None
+            and self._bodygen is None
+            and not self._bodywillwrite
+        ):
+            raise error.ProgrammingError(b'response body not defined')
 
         # RFC 7232 Section 4.1 states that a 304 MUST generate one of
         # {Cache-Control, Content-Location, Date, ETag, Expires, Vary}
@@ -461,28 +486,38 @@
         # states that no response body can be issued. Content-Length can
         # be sent. But if it is present, it should be the size of the response
         # that wasn't transferred.
-        if self.status.startswith('304 '):
+        if self.status.startswith(b'304 '):
             # setbodybytes('') will set C-L to 0. This doesn't conform with the
             # spec. So remove it.
-            if self.headers.get('Content-Length') == '0':
-                del self.headers['Content-Length']
+            if self.headers.get(b'Content-Length') == b'0':
+                del self.headers[b'Content-Length']
 
             # Strictly speaking, this is too strict. But until it causes
             # problems, let's be strict.
-            badheaders = {k for k in self.headers.keys()
-                          if k.lower() not in ('date', 'etag', 'expires',
-                                               'cache-control',
-                                               'content-location',
-                                               'content-security-policy',
-                                               'vary')}
+            badheaders = {
+                k
+                for k in self.headers.keys()
+                if k.lower()
+                not in (
+                    b'date',
+                    b'etag',
+                    b'expires',
+                    b'cache-control',
+                    b'content-location',
+                    b'content-security-policy',
+                    b'vary',
+                )
+            }
             if badheaders:
                 raise error.ProgrammingError(
-                    'illegal header on 304 response: %s' %
-                    ', '.join(sorted(badheaders)))
+                    b'illegal header on 304 response: %s'
+                    % b', '.join(sorted(badheaders))
+                )
 
             if self._bodygen is not None or self._bodywillwrite:
-                raise error.ProgrammingError("must use setbodybytes('') with "
-                                             "304 responses")
+                raise error.ProgrammingError(
+                    b"must use setbodybytes('') with 304 responses"
+                )
 
         # Various HTTP clients (notably httplib) won't read the HTTP response
         # until the HTTP request has been sent in full. If servers (us) send a
@@ -497,12 +532,12 @@
         # If the client sent Expect: 100-continue, we assume it is smart enough
         # to deal with the server sending a response before reading the request.
         # (httplib doesn't do this.)
-        if self._req.headers.get('Expect', '').lower() == '100-continue':
+        if self._req.headers.get(b'Expect', b'').lower() == b'100-continue':
             pass
         # Only tend to request methods that have bodies. Strictly speaking,
         # we should sniff for a body. But this is fine for our existing
         # WSGI applications.
-        elif self._req.method not in ('POST', 'PUT'):
+        elif self._req.method not in (b'POST', b'PUT'):
             pass
         else:
             # If we don't know how much data to read, there's no guarantee
@@ -522,7 +557,7 @@
                 drain = True
 
         if close:
-            self.headers['Connection'] = 'Close'
+            self.headers[b'Connection'] = b'Close'
 
         if drain:
             assert isinstance(self._req.bodyfh, util.cappedreader)
@@ -531,10 +566,11 @@
                 if not chunk:
                     break
 
-        strheaders = [(pycompat.strurl(k), pycompat.strurl(v)) for
-                      k, v in self.headers.items()]
-        write = self._startresponse(pycompat.sysstr(self.status),
-                                    strheaders)
+        strheaders = [
+            (pycompat.strurl(k), pycompat.strurl(v))
+            for k, v in self.headers.items()
+        ]
+        write = self._startresponse(pycompat.sysstr(self.status), strheaders)
 
         if self._bodybytes:
             yield self._bodybytes
@@ -550,7 +586,7 @@
         elif self._bodywillwrite:
             self._bodywritefn = write
         else:
-            error.ProgrammingError('do not know how to send body')
+            error.ProgrammingError(b'do not know how to send body')
 
     def getbodyfile(self):
         """Obtain a file object like object representing the response body.
@@ -563,20 +599,25 @@
         ``[]``.
         """
         if not self._bodywillwrite:
-            raise error.ProgrammingError('must call setbodywillwrite() first')
+            raise error.ProgrammingError(b'must call setbodywillwrite() first')
 
         if not self._started:
-            raise error.ProgrammingError('must call sendresponse() first; did '
-                                         'you remember to consume it since it '
-                                         'is a generator?')
+            raise error.ProgrammingError(
+                b'must call sendresponse() first; did '
+                b'you remember to consume it since it '
+                b'is a generator?'
+            )
 
         assert self._bodywritefn
         return offsettrackingwriter(self._bodywritefn)
 
+
 def wsgiapplication(app_maker):
     '''For compatibility with old CGI scripts. A plain hgweb() or hgwebdir()
     can and should now be used as a WSGI application.'''
     application = app_maker()
+
     def run_wsgi(env, respond):
         return application(env, respond)
+
     return run_wsgi
--- a/mercurial/hgweb/server.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/hgweb/server.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,6 +9,7 @@
 from __future__ import absolute_import
 
 import errno
+import importlib
 import os
 import socket
 import sys
@@ -16,6 +17,10 @@
 import wsgiref.validate
 
 from ..i18n import _
+from ..pycompat import (
+    getattr,
+    open,
+)
 
 from .. import (
     encoding,
@@ -29,9 +34,8 @@
 urlerr = util.urlerr
 urlreq = util.urlreq
 
-from . import (
-    common,
-)
+from . import common
+
 
 def _splitURI(uri):
     """Return path and query that has been split from uri
@@ -45,20 +49,25 @@
         path, query = uri, r''
     return urlreq.unquote(path), query
 
+
 class _error_logger(object):
     def __init__(self, handler):
         self.handler = handler
+
     def flush(self):
         pass
+
     def write(self, str):
-        self.writelines(str.split('\n'))
+        self.writelines(str.split(b'\n'))
+
     def writelines(self, seq):
         for msg in seq:
             self.handler.log_error(r"HG error:  %s", encoding.strfromlocal(msg))
 
+
 class _httprequesthandler(httpservermod.basehttprequesthandler):
 
-    url_scheme = 'http'
+    url_scheme = b'http'
 
     @staticmethod
     def preparehttpserver(httpserver, ui):
@@ -69,10 +78,17 @@
         httpservermod.basehttprequesthandler.__init__(self, *args, **kargs)
 
     def _log_any(self, fp, format, *args):
-        fp.write(pycompat.sysbytes(
-            r"%s - - [%s] %s" % (self.client_address[0],
-                                 self.log_date_time_string(),
-                                 format % args)) + '\n')
+        fp.write(
+            pycompat.sysbytes(
+                r"%s - - [%s] %s"
+                % (
+                    self.client_address[0],
+                    self.log_date_time_string(),
+                    format % args,
+                )
+            )
+            + b'\n'
+        )
         fp.flush()
 
     def log_error(self, format, *args):
@@ -83,12 +99,17 @@
 
     def log_request(self, code=r'-', size=r'-'):
         xheaders = []
-        if util.safehasattr(self, 'headers'):
-            xheaders = [h for h in self.headers.items()
-                        if h[0].startswith(r'x-')]
-        self.log_message(r'"%s" %s %s%s',
-                         self.requestline, str(code), str(size),
-                         r''.join([r' %s:%s' % h for h in sorted(xheaders)]))
+        if util.safehasattr(self, b'headers'):
+            xheaders = [
+                h for h in self.headers.items() if h[0].startswith(r'x-')
+            ]
+        self.log_message(
+            r'"%s" %s %s%s',
+            self.requestline,
+            str(code),
+            str(size),
+            r''.join([r' %s:%s' % h for h in sorted(xheaders)]),
+        )
 
     def do_write(self):
         try:
@@ -103,15 +124,22 @@
         except Exception as e:
             # I/O below could raise another exception. So log the original
             # exception first to ensure it is recorded.
-            if not (isinstance(e, (OSError, socket.error))
-                    and e.errno == errno.ECONNRESET):
+            if not (
+                isinstance(e, (OSError, socket.error))
+                and e.errno == errno.ECONNRESET
+            ):
                 tb = r"".join(traceback.format_exception(*sys.exc_info()))
                 # We need a native-string newline to poke in the log
                 # message, because we won't get a newline when using an
                 # r-string. This is the easy way out.
                 newline = chr(10)
-                self.log_error(r"Exception happened during processing "
-                               r"request '%s':%s%s", self.path, newline, tb)
+                self.log_error(
+                    r"Exception happened during processing "
+                    r"request '%s':%s%s",
+                    self.path,
+                    newline,
+                    tb,
+                )
 
             self._start_response(r"500 Internal Server Error", [])
             self._write(b"Internal Server Error")
@@ -128,11 +156,11 @@
         path, query = _splitURI(self.path)
 
         # Ensure the slicing of path below is valid
-        if (path != self.server.prefix
-            and not path.startswith(self.server.prefix + b'/')):
-            self._start_response(pycompat.strurl(common.statusmessage(404)),
-                                 [])
-            if self.command == 'POST':
+        if path != self.server.prefix and not path.startswith(
+            self.server.prefix + b'/'
+        ):
+            self._start_response(pycompat.strurl(common.statusmessage(404)), [])
+            if self.command == b'POST':
                 # Paranoia: tell the client we're going to close the
                 # socket so they don't try and reuse a socket that
                 # might have a POST body waiting to confuse us. We do
@@ -150,7 +178,7 @@
         env[r'SERVER_PORT'] = str(self.server.server_port)
         env[r'REQUEST_URI'] = self.path
         env[r'SCRIPT_NAME'] = pycompat.sysstr(self.server.prefix)
-        env[r'PATH_INFO'] = pycompat.sysstr(path[len(self.server.prefix):])
+        env[r'PATH_INFO'] = pycompat.sysstr(path[len(self.server.prefix) :])
         env[r'REMOTE_HOST'] = self.client_address[0]
         env[r'REMOTE_ADDR'] = self.client_address[0]
         env[r'QUERY_STRING'] = query or r''
@@ -169,8 +197,11 @@
             length = self.headers.getheader(r'content-length')
         if length:
             env[r'CONTENT_LENGTH'] = length
-        for header in [h for h in self.headers.keys()
-                      if h.lower() not in (r'content-type', r'content-length')]:
+        for header in [
+            h
+            for h in self.headers.keys()
+            if h.lower() not in (r'content-type', r'content-length')
+        ]:
             hkey = r'HTTP_' + header.replace(r'-', r'_').upper()
             hval = self.headers.get(header)
             hval = hval.replace(r'\n', r'').strip()
@@ -179,16 +210,18 @@
         env[r'SERVER_PROTOCOL'] = self.request_version
         env[r'wsgi.version'] = (1, 0)
         env[r'wsgi.url_scheme'] = pycompat.sysstr(self.url_scheme)
-        if env.get(r'HTTP_EXPECT', '').lower() == '100-continue':
+        if env.get(r'HTTP_EXPECT', b'').lower() == b'100-continue':
             self.rfile = common.continuereader(self.rfile, self.wfile.write)
 
         env[r'wsgi.input'] = self.rfile
         env[r'wsgi.errors'] = _error_logger(self)
-        env[r'wsgi.multithread'] = isinstance(self.server,
-                                             socketserver.ThreadingMixIn)
-        if util.safehasattr(socketserver, 'ForkingMixIn'):
-            env[r'wsgi.multiprocess'] = isinstance(self.server,
-                                                   socketserver.ForkingMixIn)
+        env[r'wsgi.multithread'] = isinstance(
+            self.server, socketserver.ThreadingMixIn
+        )
+        if util.safehasattr(socketserver, b'ForkingMixIn'):
+            env[r'wsgi.multiprocess'] = isinstance(
+                self.server, socketserver.ForkingMixIn
+            )
         else:
             env[r'wsgi.multiprocess'] = False
 
@@ -208,8 +241,9 @@
 
     def send_headers(self):
         if not self.saved_status:
-            raise AssertionError("Sending headers before "
-                                 "start_response() called")
+            raise AssertionError(
+                b"Sending headers before start_response() called"
+            )
         saved_status = self.saved_status.split(None, 1)
         saved_status[0] = int(saved_status[0])
         self.send_response(*saved_status)
@@ -219,10 +253,11 @@
             self.send_header(*h)
             if h[0].lower() == r'content-length':
                 self.length = int(h[1])
-        if (self.length is None and
-            saved_status[0] != common.HTTP_NOT_MODIFIED):
-            self._chunked = (not self.close_connection and
-                             self.request_version == r'HTTP/1.1')
+        if self.length is None and saved_status[0] != common.HTTP_NOT_MODIFIED:
+            self._chunked = (
+                not self.close_connection
+                and self.request_version == r'HTTP/1.1'
+            )
             if self._chunked:
                 self.send_header(r'Transfer-Encoding', r'chunked')
             else:
@@ -236,28 +271,31 @@
         code = int(code)
         self.saved_status = http_status
         bad_headers = (r'connection', r'transfer-encoding')
-        self.saved_headers = [h for h in headers
-                              if h[0].lower() not in bad_headers]
+        self.saved_headers = [
+            h for h in headers if h[0].lower() not in bad_headers
+        ]
         return self._write
 
     def _write(self, data):
         if not self.saved_status:
-            raise AssertionError("data written before start_response() called")
+            raise AssertionError(b"data written before start_response() called")
         elif not self.sent_headers:
             self.send_headers()
         if self.length is not None:
             if len(data) > self.length:
-                raise AssertionError("Content-length header sent, but more "
-                                     "bytes than specified are being written.")
+                raise AssertionError(
+                    b"Content-length header sent, but more "
+                    b"bytes than specified are being written."
+                )
             self.length = self.length - len(data)
         elif self._chunked and data:
-            data = '%x\r\n%s\r\n' % (len(data), data)
+            data = b'%x\r\n%s\r\n' % (len(data), data)
         self.wfile.write(data)
         self.wfile.flush()
 
     def _done(self):
         if self._chunked:
-            self.wfile.write('0\r\n\r\n')
+            self.wfile.write(b'0\r\n\r\n')
             self.wfile.flush()
 
     def version_string(self):
@@ -265,53 +303,62 @@
             return encoding.strfromlocal(self.server.serverheader)
         return httpservermod.basehttprequesthandler.version_string(self)
 
+
 class _httprequesthandlerssl(_httprequesthandler):
     """HTTPS handler based on Python's ssl module"""
 
-    url_scheme = 'https'
+    url_scheme = b'https'
 
     @staticmethod
     def preparehttpserver(httpserver, ui):
         try:
             from .. import sslutil
+
             sslutil.modernssl
         except ImportError:
-            raise error.Abort(_("SSL support is unavailable"))
+            raise error.Abort(_(b"SSL support is unavailable"))
 
-        certfile = ui.config('web', 'certificate')
+        certfile = ui.config(b'web', b'certificate')
 
         # These config options are currently only meant for testing. Use
         # at your own risk.
-        cafile = ui.config('devel', 'servercafile')
-        reqcert = ui.configbool('devel', 'serverrequirecert')
+        cafile = ui.config(b'devel', b'servercafile')
+        reqcert = ui.configbool(b'devel', b'serverrequirecert')
 
-        httpserver.socket = sslutil.wrapserversocket(httpserver.socket,
-                                                     ui,
-                                                     certfile=certfile,
-                                                     cafile=cafile,
-                                                     requireclientcert=reqcert)
+        httpserver.socket = sslutil.wrapserversocket(
+            httpserver.socket,
+            ui,
+            certfile=certfile,
+            cafile=cafile,
+            requireclientcert=reqcert,
+        )
 
     def setup(self):
         self.connection = self.request
         self.rfile = self.request.makefile(r"rb", self.rbufsize)
         self.wfile = self.request.makefile(r"wb", self.wbufsize)
 
+
 try:
     import threading
-    threading.activeCount() # silence pyflakes and bypass demandimport
+
+    threading.activeCount()  # silence pyflakes and bypass demandimport
     _mixin = socketserver.ThreadingMixIn
 except ImportError:
-    if util.safehasattr(os, "fork"):
+    if util.safehasattr(os, b"fork"):
         _mixin = socketserver.ForkingMixIn
     else:
+
         class _mixin(object):
             pass
 
+
 def openlog(opt, default):
-    if opt and opt != '-':
-        return open(opt, 'ab')
+    if opt and opt != b'-':
+        return open(opt, b'ab')
     return default
 
+
 class MercurialHTTPServer(_mixin, httpservermod.httpserver, object):
 
     # SO_REUSEADDR has broken semantics on windows
@@ -325,36 +372,39 @@
 
         handler.preparehttpserver(self, ui)
 
-        prefix = ui.config('web', 'prefix')
+        prefix = ui.config(b'web', b'prefix')
         if prefix:
-            prefix = '/' + prefix.strip('/')
+            prefix = b'/' + prefix.strip(b'/')
         self.prefix = prefix
 
-        alog = openlog(ui.config('web', 'accesslog'), ui.fout)
-        elog = openlog(ui.config('web', 'errorlog'), ui.ferr)
+        alog = openlog(ui.config(b'web', b'accesslog'), ui.fout)
+        elog = openlog(ui.config(b'web', b'errorlog'), ui.ferr)
         self.accesslog = alog
         self.errorlog = elog
 
         self.addr, self.port = self.socket.getsockname()[0:2]
         self.fqaddr = socket.getfqdn(addr[0])
 
-        self.serverheader = ui.config('web', 'server-header')
+        self.serverheader = ui.config(b'web', b'server-header')
+
 
 class IPv6HTTPServer(MercurialHTTPServer):
     address_family = getattr(socket, 'AF_INET6', None)
+
     def __init__(self, *args, **kwargs):
         if self.address_family is None:
-            raise error.RepoError(_('IPv6 is not available on this system'))
+            raise error.RepoError(_(b'IPv6 is not available on this system'))
         super(IPv6HTTPServer, self).__init__(*args, **kwargs)
 
+
 def create_server(ui, app):
 
-    if ui.config('web', 'certificate'):
+    if ui.config(b'web', b'certificate'):
         handler = _httprequesthandlerssl
     else:
         handler = _httprequesthandler
 
-    if ui.configbool('web', 'ipv6'):
+    if ui.configbool(b'web', b'ipv6'):
         cls = IPv6HTTPServer
     else:
         cls = MercurialHTTPServer
@@ -362,6 +412,7 @@
     # ugly hack due to python issue5853 (for threaded use)
     try:
         import mimetypes
+
         mimetypes.init()
     except UnicodeDecodeError:
         # Python 2.x's mimetypes module attempts to decode strings
@@ -369,17 +420,23 @@
         # as ascii (clown fail), because the default Python Unicode
         # codec is hardcoded as ascii.
 
-        sys.argv # unwrap demand-loader so that reload() works
-        reload(sys) # resurrect sys.setdefaultencoding()
+        sys.argv  # unwrap demand-loader so that reload() works
+        # resurrect sys.setdefaultencoding()
+        try:
+            importlib.reload(sys)
+        except AttributeError:
+            reload(sys)
         oldenc = sys.getdefaultencoding()
-        sys.setdefaultencoding("latin1") # or any full 8-bit encoding
+        sys.setdefaultencoding(b"latin1")  # or any full 8-bit encoding
         mimetypes.init()
         sys.setdefaultencoding(oldenc)
 
-    address = ui.config('web', 'address')
-    port = util.getport(ui.config('web', 'port'))
+    address = ui.config(b'web', b'address')
+    port = util.getport(ui.config(b'web', b'port'))
     try:
         return cls(ui, app, (address, port), handler)
     except socket.error as inst:
-        raise error.Abort(_("cannot start server at '%s:%d': %s")
-                          % (address, port, encoding.strtolocal(inst.args[1])))
+        raise error.Abort(
+            _(b"cannot start server at '%s:%d': %s")
+            % (address, port, encoding.strtolocal(inst.args[1]))
+        )
--- a/mercurial/hgweb/webcommands.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/hgweb/webcommands.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,6 +14,7 @@
 
 from ..i18n import _
 from ..node import hex, short
+from ..pycompat import getattr
 
 from .common import (
     ErrorResponse,
@@ -39,17 +40,14 @@
     templateutil,
 )
 
-from ..utils import (
-    stringutil,
-)
+from ..utils import stringutil
 
-from . import (
-    webutil,
-)
+from . import webutil
 
 __all__ = []
 commands = {}
 
+
 class webcommand(object):
     """Decorator used to register a web command handler.
 
@@ -81,7 +79,8 @@
         commands[self.name] = func
         return func
 
-@webcommand('log')
+
+@webcommand(b'log')
 def log(web):
     """
     /log[/{revision}[/{path}]]
@@ -98,16 +97,17 @@
     file will be shown. This form is equivalent to the ``filelog`` handler.
     """
 
-    if web.req.qsparams.get('file'):
+    if web.req.qsparams.get(b'file'):
         return filelog(web)
     else:
         return changelog(web)
 
-@webcommand('rawfile')
+
+@webcommand(b'rawfile')
 def rawfile(web):
-    guessmime = web.configbool('web', 'guessmime')
+    guessmime = web.configbool(b'web', b'guessmime')
 
-    path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
+    path = webutil.cleanpath(web.repo, web.req.qsparams.get(b'file', b''))
     if not path:
         return manifest(web)
 
@@ -121,27 +121,31 @@
 
     path = fctx.path()
     text = fctx.data()
-    mt = 'application/binary'
+    mt = b'application/binary'
     if guessmime:
         mt = mimetypes.guess_type(pycompat.fsdecode(path))[0]
         if mt is None:
             if stringutil.binary(text):
-                mt = 'application/binary'
+                mt = b'application/binary'
             else:
-                mt = 'text/plain'
+                mt = b'text/plain'
         else:
             mt = pycompat.sysbytes(mt)
 
-    if mt.startswith('text/'):
-        mt += '; charset="%s"' % encoding.encoding
+    if mt.startswith(b'text/'):
+        mt += b'; charset="%s"' % encoding.encoding
 
-    web.res.headers['Content-Type'] = mt
-    filename = (path.rpartition('/')[-1]
-                .replace('\\', '\\\\').replace('"', '\\"'))
-    web.res.headers['Content-Disposition'] = 'inline; filename="%s"' % filename
+    web.res.headers[b'Content-Type'] = mt
+    filename = (
+        path.rpartition(b'/')[-1].replace(b'\\', b'\\\\').replace(b'"', b'\\"')
+    )
+    web.res.headers[b'Content-Disposition'] = (
+        b'inline; filename="%s"' % filename
+    )
     web.res.setbodybytes(text)
     return web.res.sendresponse()
 
+
 def _filerevision(web, fctx):
     f = fctx.path()
     text = fctx.data()
@@ -151,18 +155,21 @@
     if stringutil.binary(text):
         mt = pycompat.sysbytes(
             mimetypes.guess_type(pycompat.fsdecode(f))[0]
-            or r'application/octet-stream')
-        text = '(binary:%s)' % mt
+            or r'application/octet-stream'
+        )
+        text = b'(binary:%s)' % mt
 
     def lines(context):
         for lineno, t in enumerate(text.splitlines(True)):
-            yield {"line": t,
-                   "lineid": "l%d" % (lineno + 1),
-                   "linenumber": "% 6d" % (lineno + 1),
-                   "parity": next(parity)}
+            yield {
+                b"line": t,
+                b"lineid": b"l%d" % (lineno + 1),
+                b"linenumber": b"% 6d" % (lineno + 1),
+                b"parity": next(parity),
+            }
 
     return web.sendtemplate(
-        'filerevision',
+        b'filerevision',
         file=f,
         path=webutil.up(f),
         text=templateutil.mappinggenerator(lines),
@@ -170,9 +177,11 @@
         rename=webutil.renamelink(fctx),
         permissions=fctx.manifest().flags(f),
         ishead=int(ishead),
-        **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
+        **pycompat.strkwargs(webutil.commonentry(web.repo, fctx))
+    )
 
-@webcommand('file')
+
+@webcommand(b'file')
 def file(web):
     """
     /file/{revision}[/{path}]
@@ -192,10 +201,10 @@
     If ``path`` is not defined, information about the root directory will
     be rendered.
     """
-    if web.req.qsparams.get('style') == 'raw':
+    if web.req.qsparams.get(b'style') == b'raw':
         return rawfile(web)
 
-    path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
+    path = webutil.cleanpath(web.repo, web.req.qsparams.get(b'file', b''))
     if not path:
         return manifest(web)
     try:
@@ -206,10 +215,11 @@
         except ErrorResponse:
             raise inst
 
+
 def _search(web):
-    MODE_REVISION = 'rev'
-    MODE_KEYWORD = 'keyword'
-    MODE_REVSET = 'revset'
+    MODE_REVISION = b'rev'
+    MODE_KEYWORD = b'keyword'
+    MODE_REVSET = b'revset'
 
     def revsearch(ctx):
         yield ctx
@@ -232,9 +242,11 @@
         for ctx in revgen():
             miss = 0
             for q in qw:
-                if not (q in lower(ctx.user()) or
-                        q in lower(ctx.description()) or
-                        q in lower(" ".join(ctx.files()))):
+                if not (
+                    q in lower(ctx.user())
+                    or q in lower(ctx.description())
+                    or q in lower(b" ".join(ctx.files()))
+                ):
                     miss = 1
                     break
             if miss:
@@ -247,9 +259,9 @@
             yield web.repo[r]
 
     searchfuncs = {
-        MODE_REVISION: (revsearch, 'exact revision search'),
-        MODE_KEYWORD: (keywordsearch, 'literal keyword search'),
-        MODE_REVSET: (revsetsearch, 'revset expression search'),
+        MODE_REVISION: (revsearch, b'exact revision search'),
+        MODE_KEYWORD: (keywordsearch, b'literal keyword search'),
+        MODE_REVSET: (revsetsearch, b'revset expression search'),
     }
 
     def getsearchmode(query):
@@ -262,7 +274,7 @@
         else:
             return MODE_REVISION, ctx
 
-        revdef = 'reverse(%s)' % query
+        revdef = b'reverse(%s)' % query
         try:
             tree = revsetlang.parse(revdef)
         except error.ParseError:
@@ -273,8 +285,10 @@
             # no revset syntax used
             return MODE_KEYWORD, query
 
-        if any((token, (value or '')[:3]) == ('string', 're:')
-               for token, value, pos in revsetlang.tokenize(revdef)):
+        if any(
+            (token, (value or b'')[:3]) == (b'string', b're:')
+            for token, value, pos in revsetlang.tokenize(revdef)
+        ):
             return MODE_KEYWORD, query
 
         funcsused = revsetlang.funcsused(tree)
@@ -282,16 +296,21 @@
             return MODE_KEYWORD, query
 
         try:
-            mfunc = revset.match(web.repo.ui, revdef,
-                                 lookup=revset.lookupfn(web.repo))
+            mfunc = revset.match(
+                web.repo.ui, revdef, lookup=revset.lookupfn(web.repo)
+            )
             revs = mfunc(web.repo)
             return MODE_REVSET, revs
             # ParseError: wrongly placed tokens, wrongs arguments, etc
             # RepoLookupError: no such revision, e.g. in 'revision:'
             # Abort: bookmark/tag not exists
             # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo
-        except (error.ParseError, error.RepoLookupError, error.Abort,
-                LookupError):
+        except (
+            error.ParseError,
+            error.RepoLookupError,
+            error.Abort,
+            LookupError,
+        ):
             return MODE_KEYWORD, query
 
     def changelist(context):
@@ -300,41 +319,43 @@
         for ctx in searchfunc[0](funcarg):
             count += 1
             n = scmutil.binnode(ctx)
-            showtags = webutil.showtag(web.repo, 'changelogtag', n)
+            showtags = webutil.showtag(web.repo, b'changelogtag', n)
             files = webutil.listfilediffs(ctx.files(), n, web.maxfiles)
 
             lm = webutil.commonentry(web.repo, ctx)
-            lm.update({
-                'parity': next(parity),
-                'changelogtag': showtags,
-                'files': files,
-            })
+            lm.update(
+                {
+                    b'parity': next(parity),
+                    b'changelogtag': showtags,
+                    b'files': files,
+                }
+            )
             yield lm
 
             if count >= revcount:
                 break
 
-    query = web.req.qsparams['rev']
+    query = web.req.qsparams[b'rev']
     revcount = web.maxchanges
-    if 'revcount' in web.req.qsparams:
+    if b'revcount' in web.req.qsparams:
         try:
-            revcount = int(web.req.qsparams.get('revcount', revcount))
+            revcount = int(web.req.qsparams.get(b'revcount', revcount))
             revcount = max(revcount, 1)
-            web.tmpl.defaults['sessionvars']['revcount'] = revcount
+            web.tmpl.defaults[b'sessionvars'][b'revcount'] = revcount
         except ValueError:
             pass
 
-    lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
-    lessvars['revcount'] = max(revcount // 2, 1)
-    lessvars['rev'] = query
-    morevars = copy.copy(web.tmpl.defaults['sessionvars'])
-    morevars['revcount'] = revcount * 2
-    morevars['rev'] = query
+    lessvars = copy.copy(web.tmpl.defaults[b'sessionvars'])
+    lessvars[b'revcount'] = max(revcount // 2, 1)
+    lessvars[b'rev'] = query
+    morevars = copy.copy(web.tmpl.defaults[b'sessionvars'])
+    morevars[b'revcount'] = revcount * 2
+    morevars[b'rev'] = query
 
     mode, funcarg = getsearchmode(query)
 
-    if 'forcekw' in web.req.qsparams:
-        showforcekw = ''
+    if b'forcekw' in web.req.qsparams:
+        showforcekw = b''
         showunforcekw = searchfuncs[mode][1]
         mode = MODE_KEYWORD
         funcarg = query
@@ -342,28 +363,30 @@
         if mode != MODE_KEYWORD:
             showforcekw = searchfuncs[MODE_KEYWORD][1]
         else:
-            showforcekw = ''
-        showunforcekw = ''
+            showforcekw = b''
+        showunforcekw = b''
 
     searchfunc = searchfuncs[mode]
 
-    tip = web.repo['tip']
+    tip = web.repo[b'tip']
     parity = paritygen(web.stripecount)
 
     return web.sendtemplate(
-        'search',
+        b'search',
         query=query,
         node=tip.hex(),
-        symrev='tip',
-        entries=templateutil.mappinggenerator(changelist, name='searchentry'),
-        archives=web.archivelist('tip'),
+        symrev=b'tip',
+        entries=templateutil.mappinggenerator(changelist, name=b'searchentry'),
+        archives=web.archivelist(b'tip'),
         morevars=morevars,
         lessvars=lessvars,
         modedesc=searchfunc[1],
         showforcekw=showforcekw,
-        showunforcekw=showunforcekw)
+        showunforcekw=showunforcekw,
+    )
 
-@webcommand('changelog')
+
+@webcommand(b'changelog')
 def changelog(web, shortlog=False):
     """
     /changelog[/{revision}]
@@ -389,15 +412,15 @@
     For non-searches, the ``changelog`` template will be rendered.
     """
 
-    query = ''
-    if 'node' in web.req.qsparams:
+    query = b''
+    if b'node' in web.req.qsparams:
         ctx = webutil.changectx(web.repo, web.req)
         symrev = webutil.symrevorshortnode(web.req, ctx)
-    elif 'rev' in web.req.qsparams:
+    elif b'rev' in web.req.qsparams:
         return _search(web)
     else:
-        ctx = web.repo['tip']
-        symrev = 'tip'
+        ctx = web.repo[b'tip']
+        symrev = b'tip'
 
     def changelist(maxcount):
         revs = []
@@ -412,18 +435,18 @@
     else:
         revcount = web.maxchanges
 
-    if 'revcount' in web.req.qsparams:
+    if b'revcount' in web.req.qsparams:
         try:
-            revcount = int(web.req.qsparams.get('revcount', revcount))
+            revcount = int(web.req.qsparams.get(b'revcount', revcount))
             revcount = max(revcount, 1)
-            web.tmpl.defaults['sessionvars']['revcount'] = revcount
+            web.tmpl.defaults[b'sessionvars'][b'revcount'] = revcount
         except ValueError:
             pass
 
-    lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
-    lessvars['revcount'] = max(revcount // 2, 1)
-    morevars = copy.copy(web.tmpl.defaults['sessionvars'])
-    morevars['revcount'] = revcount * 2
+    lessvars = copy.copy(web.tmpl.defaults[b'sessionvars'])
+    lessvars[b'revcount'] = max(revcount // 2, 1)
+    morevars = copy.copy(web.tmpl.defaults[b'sessionvars'])
+    morevars[b'revcount'] = revcount * 2
 
     count = len(web.repo)
     pos = ctx.rev()
@@ -440,7 +463,7 @@
         nextentry = []
 
     return web.sendtemplate(
-        'shortlog' if shortlog else 'changelog',
+        b'shortlog' if shortlog else b'changelog',
         changenav=changenav,
         node=ctx.hex(),
         rev=pos,
@@ -449,13 +472,15 @@
         entries=templateutil.mappinglist(entries),
         latestentry=templateutil.mappinglist(latestentry),
         nextentry=templateutil.mappinglist(nextentry),
-        archives=web.archivelist('tip'),
+        archives=web.archivelist(b'tip'),
         revcount=revcount,
         morevars=morevars,
         lessvars=lessvars,
-        query=query)
+        query=query,
+    )
 
-@webcommand('shortlog')
+
+@webcommand(b'shortlog')
 def shortlog(web):
     """
     /shortlog
@@ -469,7 +494,8 @@
     """
     return changelog(web, shortlog=True)
 
-@webcommand('changeset')
+
+@webcommand(b'changeset')
 def changeset(web):
     """
     /changeset[/{revision}]
@@ -487,11 +513,11 @@
     """
     ctx = webutil.changectx(web.repo, web.req)
 
-    return web.sendtemplate(
-        'changeset',
-        **webutil.changesetentry(web, ctx))
+    return web.sendtemplate(b'changeset', **webutil.changesetentry(web, ctx))
+
 
-rev = webcommand('rev')(changeset)
+rev = webcommand(b'rev')(changeset)
+
 
 def decodepath(path):
     """Hook for mapping a path in the repository to a path in the
@@ -501,7 +527,8 @@
     the virtual file system presented by the manifest command below."""
     return path
 
-@webcommand('manifest')
+
+@webcommand(b'manifest')
 def manifest(web):
     """
     /manifest[/{revision}[/{path}]]
@@ -518,13 +545,13 @@
 
     The ``manifest`` template will be rendered for this handler.
     """
-    if 'node' in web.req.qsparams:
+    if b'node' in web.req.qsparams:
         ctx = webutil.changectx(web.repo, web.req)
         symrev = webutil.symrevorshortnode(web.req, ctx)
     else:
-        ctx = web.repo['tip']
-        symrev = 'tip'
-    path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
+        ctx = web.repo[b'tip']
+        symrev = b'tip'
+    path = webutil.cleanpath(web.repo, web.req.qsparams.get(b'file', b''))
     mf = ctx.manifest()
     node = scmutil.binnode(ctx)
 
@@ -532,12 +559,12 @@
     dirs = {}
     parity = paritygen(web.stripecount)
 
-    if path and path[-1:] != "/":
-        path += "/"
+    if path and path[-1:] != b"/":
+        path += b"/"
     l = len(path)
-    abspath = "/" + path
+    abspath = b"/" + path
 
-    for full, n in mf.iteritems():
+    for full, n in pycompat.iteritems(mf):
         # the virtual path (working copy path) used for the full
         # (repository) path
         f = decodepath(full)
@@ -545,33 +572,35 @@
         if f[:l] != path:
             continue
         remain = f[l:]
-        elements = remain.split('/')
+        elements = remain.split(b'/')
         if len(elements) == 1:
             files[remain] = full
         else:
-            h = dirs # need to retain ref to dirs (root)
+            h = dirs  # need to retain ref to dirs (root)
             for elem in elements[0:-1]:
                 if elem not in h:
                     h[elem] = {}
                 h = h[elem]
                 if len(h) > 1:
                     break
-            h[None] = None # denotes files present
+            h[None] = None  # denotes files present
 
     if mf and not files and not dirs:
-        raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
+        raise ErrorResponse(HTTP_NOT_FOUND, b'path not found: ' + path)
 
     def filelist(context):
         for f in sorted(files):
             full = files[f]
 
             fctx = ctx.filectx(full)
-            yield {"file": full,
-                   "parity": next(parity),
-                   "basename": f,
-                   "date": fctx.date(),
-                   "size": fctx.size(),
-                   "permissions": mf.flags(full)}
+            yield {
+                b"file": full,
+                b"parity": next(parity),
+                b"basename": f,
+                b"date": fctx.date(),
+                b"size": fctx.size(),
+                b"permissions": mf.flags(full),
+            }
 
     def dirlist(context):
         for d in sorted(dirs):
@@ -584,14 +613,16 @@
                     emptydirs.append(k)
                 h = v
 
-            path = "%s%s" % (abspath, d)
-            yield {"parity": next(parity),
-                   "path": path,
-                   "emptydirs": "/".join(emptydirs),
-                   "basename": d}
+            path = b"%s%s" % (abspath, d)
+            yield {
+                b"parity": next(parity),
+                b"path": path,
+                b"emptydirs": b"/".join(emptydirs),
+                b"basename": d,
+            }
 
     return web.sendtemplate(
-        'manifest',
+        b'manifest',
         symrev=symrev,
         path=abspath,
         up=webutil.up(abspath),
@@ -599,9 +630,11 @@
         fentries=templateutil.mappinggenerator(filelist),
         dentries=templateutil.mappinggenerator(dirlist),
         archives=web.archivelist(hex(node)),
-        **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
+        **pycompat.strkwargs(webutil.commonentry(web.repo, ctx))
+    )
 
-@webcommand('tags')
+
+@webcommand(b'tags')
 def tags(web):
     """
     /tags
@@ -619,24 +652,27 @@
     def entries(context, notip, latestonly):
         t = i
         if notip:
-            t = [(k, n) for k, n in i if k != "tip"]
+            t = [(k, n) for k, n in i if k != b"tip"]
         if latestonly:
             t = t[:1]
         for k, n in t:
-            yield {"parity": next(parity),
-                   "tag": k,
-                   "date": web.repo[n].date(),
-                   "node": hex(n)}
+            yield {
+                b"parity": next(parity),
+                b"tag": k,
+                b"date": web.repo[n].date(),
+                b"node": hex(n),
+            }
 
     return web.sendtemplate(
-        'tags',
+        b'tags',
         node=hex(web.repo.changelog.tip()),
         entries=templateutil.mappinggenerator(entries, args=(False, False)),
-        entriesnotip=templateutil.mappinggenerator(entries,
-                                                   args=(True, False)),
-        latestentry=templateutil.mappinggenerator(entries, args=(True, True)))
+        entriesnotip=templateutil.mappinggenerator(entries, args=(True, False)),
+        latestentry=templateutil.mappinggenerator(entries, args=(True, True)),
+    )
 
-@webcommand('bookmarks')
+
+@webcommand(b'bookmarks')
 def bookmarks(web):
     """
     /bookmarks
@@ -658,10 +694,12 @@
         if latestonly:
             t = i[:1]
         for k, n in t:
-            yield {"parity": next(parity),
-                   "bookmark": k,
-                   "date": web.repo[n].date(),
-                   "node": hex(n)}
+            yield {
+                b"parity": next(parity),
+                b"bookmark": k,
+                b"date": web.repo[n].date(),
+                b"node": hex(n),
+            }
 
     if i:
         latestrev = i[0][1]
@@ -670,13 +708,15 @@
     lastdate = web.repo[latestrev].date()
 
     return web.sendtemplate(
-        'bookmarks',
+        b'bookmarks',
         node=hex(web.repo.changelog.tip()),
-        lastchange=templateutil.mappinglist([{'date': lastdate}]),
+        lastchange=templateutil.mappinglist([{b'date': lastdate}]),
         entries=templateutil.mappinggenerator(entries, args=(False,)),
-        latestentry=templateutil.mappinggenerator(entries, args=(True,)))
+        latestentry=templateutil.mappinggenerator(entries, args=(True,)),
+    )
 
-@webcommand('branches')
+
+@webcommand(b'branches')
 def branches(web):
     """
     /branches
@@ -694,12 +734,14 @@
     latestentry = webutil.branchentries(web.repo, web.stripecount, 1)
 
     return web.sendtemplate(
-        'branches',
+        b'branches',
         node=hex(web.repo.changelog.tip()),
         entries=entries,
-        latestentry=latestentry)
+        latestentry=latestentry,
+    )
 
-@webcommand('summary')
+
+@webcommand(b'summary')
 def summary(web):
     """
     /summary
@@ -718,18 +760,18 @@
         parity = paritygen(web.stripecount)
         count = 0
         for k, n in i:
-            if k == "tip": # skip tip
+            if k == b"tip":  # skip tip
                 continue
 
             count += 1
-            if count > 10: # limit to 10 tags
+            if count > 10:  # limit to 10 tags
                 break
 
             yield {
-                'parity': next(parity),
-                'tag': k,
-                'node': hex(n),
-                'date': web.repo[n].date(),
+                b'parity': next(parity),
+                b'tag': k,
+                b'node': hex(n),
+                b'date': web.repo[n].date(),
             }
 
     def bookmarks(context):
@@ -738,52 +780,57 @@
         sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
         marks = sorted(marks, key=sortkey, reverse=True)
         for k, n in marks[:10]:  # limit to 10 bookmarks
-            yield {'parity': next(parity),
-                   'bookmark': k,
-                   'date': web.repo[n].date(),
-                   'node': hex(n)}
+            yield {
+                b'parity': next(parity),
+                b'bookmark': k,
+                b'date': web.repo[n].date(),
+                b'node': hex(n),
+            }
 
     def changelist(context):
         parity = paritygen(web.stripecount, offset=start - end)
-        l = [] # build a list in forward order for efficiency
+        l = []  # build a list in forward order for efficiency
         revs = []
         if start < end:
             revs = web.repo.changelog.revs(start, end - 1)
         for i in revs:
             ctx = web.repo[i]
             lm = webutil.commonentry(web.repo, ctx)
-            lm['parity'] = next(parity)
+            lm[b'parity'] = next(parity)
             l.append(lm)
 
         for entry in reversed(l):
             yield entry
 
-    tip = web.repo['tip']
+    tip = web.repo[b'tip']
     count = len(web.repo)
     start = max(0, count - web.maxchanges)
     end = min(count, start + web.maxchanges)
 
-    desc = web.config("web", "description")
+    desc = web.config(b"web", b"description")
     if not desc:
-        desc = 'unknown'
-    labels = web.configlist('web', 'labels')
+        desc = b'unknown'
+    labels = web.configlist(b'web', b'labels')
 
     return web.sendtemplate(
-        'summary',
+        b'summary',
         desc=desc,
-        owner=get_contact(web.config) or 'unknown',
+        owner=get_contact(web.config) or b'unknown',
         lastchange=tip.date(),
-        tags=templateutil.mappinggenerator(tagentries, name='tagentry'),
+        tags=templateutil.mappinggenerator(tagentries, name=b'tagentry'),
         bookmarks=templateutil.mappinggenerator(bookmarks),
         branches=webutil.branchentries(web.repo, web.stripecount, 10),
-        shortlog=templateutil.mappinggenerator(changelist,
-                                               name='shortlogentry'),
+        shortlog=templateutil.mappinggenerator(
+            changelist, name=b'shortlogentry'
+        ),
         node=tip.hex(),
-        symrev='tip',
-        archives=web.archivelist('tip'),
-        labels=templateutil.hybridlist(labels, name='label'))
+        symrev=b'tip',
+        archives=web.archivelist(b'tip'),
+        labels=templateutil.hybridlist(labels, name=b'label'),
+    )
 
-@webcommand('filediff')
+
+@webcommand(b'filediff')
 def filediff(web):
     """
     /diff/{revision}/{path}
@@ -801,7 +848,7 @@
         fctx = webutil.filectx(web.repo, web.req)
     except LookupError:
         ctx = webutil.changectx(web.repo, web.req)
-        path = webutil.cleanpath(web.repo, web.req.qsparams['file'])
+        path = webutil.cleanpath(web.repo, web.req.qsparams[b'file'])
         if path not in ctx.files():
             raise
 
@@ -810,9 +857,9 @@
         ctx = fctx.changectx()
     basectx = ctx.p1()
 
-    style = web.config('web', 'style')
-    if 'style' in web.req.qsparams:
-        style = web.req.qsparams['style']
+    style = web.config(b'web', b'style')
+    if b'style' in web.req.qsparams:
+        style = web.req.qsparams[b'style']
 
     diffs = webutil.diffs(web, ctx, basectx, [path], style)
     if fctx is not None:
@@ -823,16 +870,19 @@
         ctx = ctx
 
     return web.sendtemplate(
-        'filediff',
+        b'filediff',
         file=path,
         symrev=webutil.symrevorshortnode(web.req, ctx),
         rename=rename,
         diff=diffs,
-        **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
+        **pycompat.strkwargs(webutil.commonentry(web.repo, ctx))
+    )
+
 
-diff = webcommand('diff')(filediff)
+diff = webcommand(b'diff')(filediff)
 
-@webcommand('comparison')
+
+@webcommand(b'comparison')
 def comparison(web):
     """
     /comparison/{revision}/{path}
@@ -850,22 +900,23 @@
     The ``filecomparison`` template is rendered.
     """
     ctx = webutil.changectx(web.repo, web.req)
-    if 'file' not in web.req.qsparams:
-        raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
-    path = webutil.cleanpath(web.repo, web.req.qsparams['file'])
+    if b'file' not in web.req.qsparams:
+        raise ErrorResponse(HTTP_NOT_FOUND, b'file not given')
+    path = webutil.cleanpath(web.repo, web.req.qsparams[b'file'])
 
-    parsecontext = lambda v: v == 'full' and -1 or int(v)
-    if 'context' in web.req.qsparams:
-        context = parsecontext(web.req.qsparams['context'])
+    parsecontext = lambda v: v == b'full' and -1 or int(v)
+    if b'context' in web.req.qsparams:
+        context = parsecontext(web.req.qsparams[b'context'])
     else:
-        context = parsecontext(web.config('web', 'comparisoncontext'))
+        context = parsecontext(web.config(b'web', b'comparisoncontext'))
 
     def filelines(f):
         if f.isbinary():
             mt = pycompat.sysbytes(
                 mimetypes.guess_type(pycompat.fsdecode(f.path()))[0]
-                or r'application/octet-stream')
-            return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
+                or r'application/octet-stream'
+            )
+            return [_(b'(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
         return f.data().splitlines()
 
     fctx = None
@@ -896,7 +947,7 @@
         ctx = ctx
 
     return web.sendtemplate(
-        'filecomparison',
+        b'filecomparison',
         file=path,
         symrev=webutil.symrevorshortnode(web.req, ctx),
         rename=rename,
@@ -905,9 +956,11 @@
         rightrev=rightrev,
         rightnode=hex(rightnode),
         comparison=comparison,
-        **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
+        **pycompat.strkwargs(webutil.commonentry(web.repo, ctx))
+    )
 
-@webcommand('annotate')
+
+@webcommand(b'annotate')
 def annotate(web):
     """
     /annotate/{revision}/{path}
@@ -934,14 +987,15 @@
     # TODO there are still redundant operations within basefilectx.parents()
     # and from the fctx.annotate() call itself that could be cached.
     parentscache = {}
+
     def parents(context, f):
         rev = f.rev()
         if rev not in parentscache:
             parentscache[rev] = []
             for p in f.parents():
                 entry = {
-                    'node': p.hex(),
-                    'rev': p.rev(),
+                    b'node': p.hex(),
+                    b'rev': p.rev(),
                 }
                 parentscache[rev].append(entry)
 
@@ -952,9 +1006,15 @@
         if fctx.isbinary():
             mt = pycompat.sysbytes(
                 mimetypes.guess_type(pycompat.fsdecode(fctx.path()))[0]
-                or r'application/octet-stream')
-            lines = [dagop.annotateline(fctx=fctx.filectx(fctx.filerev()),
-                                        lineno=1, text='(binary:%s)' % mt)]
+                or r'application/octet-stream'
+            )
+            lines = [
+                dagop.annotateline(
+                    fctx=fctx.filectx(fctx.filerev()),
+                    lineno=1,
+                    text=b'(binary:%s)' % mt,
+                )
+            ]
         else:
             lines = webutil.annotate(web.req, fctx, web.repo.ui)
 
@@ -969,28 +1029,30 @@
             else:
                 blockhead = None
             previousrev = rev
-            yield {"parity": next(parity),
-                   "node": f.hex(),
-                   "rev": rev,
-                   "author": f.user(),
-                   "parents": templateutil.mappinggenerator(parents, args=(f,)),
-                   "desc": f.description(),
-                   "extra": f.extra(),
-                   "file": f.path(),
-                   "blockhead": blockhead,
-                   "blockparity": blockparity,
-                   "targetline": aline.lineno,
-                   "line": aline.text,
-                   "lineno": lineno + 1,
-                   "lineid": "l%d" % (lineno + 1),
-                   "linenumber": "% 6d" % (lineno + 1),
-                   "revdate": f.date()}
+            yield {
+                b"parity": next(parity),
+                b"node": f.hex(),
+                b"rev": rev,
+                b"author": f.user(),
+                b"parents": templateutil.mappinggenerator(parents, args=(f,)),
+                b"desc": f.description(),
+                b"extra": f.extra(),
+                b"file": f.path(),
+                b"blockhead": blockhead,
+                b"blockparity": blockparity,
+                b"targetline": aline.lineno,
+                b"line": aline.text,
+                b"lineno": lineno + 1,
+                b"lineid": b"l%d" % (lineno + 1),
+                b"linenumber": b"% 6d" % (lineno + 1),
+                b"revdate": f.date(),
+            }
 
-    diffopts = webutil.difffeatureopts(web.req, web.repo.ui, 'annotate')
+    diffopts = webutil.difffeatureopts(web.req, web.repo.ui, b'annotate')
     diffopts = {k: getattr(diffopts, k) for k in diffopts.defaults}
 
     return web.sendtemplate(
-        'fileannotate',
+        b'fileannotate',
         file=f,
         annotate=templateutil.mappinggenerator(annotate),
         path=webutil.up(f),
@@ -999,9 +1061,11 @@
         permissions=fctx.manifest().flags(f),
         ishead=int(ishead),
         diffopts=templateutil.hybriddict(diffopts),
-        **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
+        **pycompat.strkwargs(webutil.commonentry(web.repo, fctx))
+    )
 
-@webcommand('filelog')
+
+@webcommand(b'filelog')
 def filelog(web):
     """
     /filelog/{revision}/{path}
@@ -1020,14 +1084,14 @@
         f = fctx.path()
         fl = fctx.filelog()
     except error.LookupError:
-        f = webutil.cleanpath(web.repo, web.req.qsparams['file'])
+        f = webutil.cleanpath(web.repo, web.req.qsparams[b'file'])
         fl = web.repo.file(f)
         numrevs = len(fl)
-        if not numrevs: # file doesn't exist at all
+        if not numrevs:  # file doesn't exist at all
             raise
         rev = webutil.changectx(web.repo, web.req).rev()
         first = fl.linkrev(0)
-        if rev < first: # current rev is from before file existed
+        if rev < first:  # current rev is from before file existed
             raise
         frev = numrevs - 1
         while fl.linkrev(frev) > rev:
@@ -1035,50 +1099,61 @@
         fctx = web.repo.filectx(f, fl.linkrev(frev))
 
     revcount = web.maxshortchanges
-    if 'revcount' in web.req.qsparams:
+    if b'revcount' in web.req.qsparams:
         try:
-            revcount = int(web.req.qsparams.get('revcount', revcount))
+            revcount = int(web.req.qsparams.get(b'revcount', revcount))
             revcount = max(revcount, 1)
-            web.tmpl.defaults['sessionvars']['revcount'] = revcount
+            web.tmpl.defaults[b'sessionvars'][b'revcount'] = revcount
         except ValueError:
             pass
 
     lrange = webutil.linerange(web.req)
 
-    lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
-    lessvars['revcount'] = max(revcount // 2, 1)
-    morevars = copy.copy(web.tmpl.defaults['sessionvars'])
-    morevars['revcount'] = revcount * 2
+    lessvars = copy.copy(web.tmpl.defaults[b'sessionvars'])
+    lessvars[b'revcount'] = max(revcount // 2, 1)
+    morevars = copy.copy(web.tmpl.defaults[b'sessionvars'])
+    morevars[b'revcount'] = revcount * 2
 
-    patch = 'patch' in web.req.qsparams
+    patch = b'patch' in web.req.qsparams
     if patch:
-        lessvars['patch'] = morevars['patch'] = web.req.qsparams['patch']
-    descend = 'descend' in web.req.qsparams
+        lessvars[b'patch'] = morevars[b'patch'] = web.req.qsparams[b'patch']
+    descend = b'descend' in web.req.qsparams
     if descend:
-        lessvars['descend'] = morevars['descend'] = web.req.qsparams['descend']
+        lessvars[b'descend'] = morevars[b'descend'] = web.req.qsparams[
+            b'descend'
+        ]
 
     count = fctx.filerev() + 1
-    start = max(0, count - revcount) # first rev on this page
-    end = min(count, start + revcount) # last rev on this page
+    start = max(0, count - revcount)  # first rev on this page
+    end = min(count, start + revcount)  # last rev on this page
     parity = paritygen(web.stripecount, offset=start - end)
 
     repo = web.repo
     filelog = fctx.filelog()
-    revs = [filerev for filerev in filelog.revs(start, end - 1)
-            if filelog.linkrev(filerev) in repo]
+    revs = [
+        filerev
+        for filerev in filelog.revs(start, end - 1)
+        if filelog.linkrev(filerev) in repo
+    ]
     entries = []
 
-    diffstyle = web.config('web', 'style')
-    if 'style' in web.req.qsparams:
-        diffstyle = web.req.qsparams['style']
+    diffstyle = web.config(b'web', b'style')
+    if b'style' in web.req.qsparams:
+        diffstyle = web.req.qsparams[b'style']
 
     def diff(fctx, linerange=None):
         ctx = fctx.changectx()
         basectx = ctx.p1()
         path = fctx.path()
-        return webutil.diffs(web, ctx, basectx, [path], diffstyle,
-                             linerange=linerange,
-                             lineidprefix='%s-' % ctx.hex()[:12])
+        return webutil.diffs(
+            web,
+            ctx,
+            basectx,
+            [path],
+            diffstyle,
+            linerange=linerange,
+            lineidprefix=b'%s-' % ctx.hex()[:12],
+        )
 
     linerange = None
     if lrange is not None:
@@ -1097,19 +1172,21 @@
             # follow renames accross filtered (not in range) revisions
             path = c.path()
             lm = webutil.commonentry(repo, c)
-            lm.update({
-                'parity': next(parity),
-                'filerev': c.rev(),
-                'file': path,
-                'diff': diffs,
-                'linerange': webutil.formatlinerange(*lr),
-                'rename': templateutil.mappinglist([]),
-            })
+            lm.update(
+                {
+                    b'parity': next(parity),
+                    b'filerev': c.rev(),
+                    b'file': path,
+                    b'diff': diffs,
+                    b'linerange': webutil.formatlinerange(*lr),
+                    b'rename': templateutil.mappinglist([]),
+                }
+            )
             entries.append(lm)
             if i == revcount:
                 break
-        lessvars['linerange'] = webutil.formatlinerange(*lrange)
-        morevars['linerange'] = lessvars['linerange']
+        lessvars[b'linerange'] = webutil.formatlinerange(*lrange)
+        morevars[b'linerange'] = lessvars[b'linerange']
     else:
         for i in revs:
             iterfctx = fctx.filectx(i)
@@ -1117,13 +1194,15 @@
             if patch:
                 diffs = diff(iterfctx)
             lm = webutil.commonentry(repo, iterfctx)
-            lm.update({
-                'parity': next(parity),
-                'filerev': i,
-                'file': f,
-                'diff': diffs,
-                'rename': webutil.renamelink(iterfctx),
-            })
+            lm.update(
+                {
+                    b'parity': next(parity),
+                    b'filerev': i,
+                    b'file': f,
+                    b'diff': diffs,
+                    b'rename': webutil.renamelink(iterfctx),
+                }
+            )
             entries.append(lm)
         entries.reverse()
         revnav = webutil.filerevnav(web.repo, fctx.path())
@@ -1132,7 +1211,7 @@
     latestentry = entries[:1]
 
     return web.sendtemplate(
-        'filelog',
+        b'filelog',
         file=f,
         nav=nav,
         symrev=webutil.symrevorshortnode(web.req, fctx),
@@ -1144,9 +1223,11 @@
         revcount=revcount,
         morevars=morevars,
         lessvars=lessvars,
-        **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
+        **pycompat.strkwargs(webutil.commonentry(web.repo, fctx))
+    )
 
-@webcommand('archive')
+
+@webcommand(b'archive')
 def archive(web):
     """
     /archive/{revision}.{format}[/{path}]
@@ -1167,76 +1248,88 @@
     No template is used for this handler. Raw, binary content is generated.
     """
 
-    type_ = web.req.qsparams.get('type')
-    allowed = web.configlist("web", "allow-archive")
-    key = web.req.qsparams['node']
+    type_ = web.req.qsparams.get(b'type')
+    allowed = web.configlist(b"web", b"allow-archive")
+    key = web.req.qsparams[b'node']
 
     if type_ not in webutil.archivespecs:
-        msg = 'Unsupported archive type: %s' % stringutil.pprint(type_)
+        msg = b'Unsupported archive type: %s' % stringutil.pprint(type_)
         raise ErrorResponse(HTTP_NOT_FOUND, msg)
 
-    if not ((type_ in allowed or
-             web.configbool("web", "allow" + type_))):
-        msg = 'Archive type not allowed: %s' % type_
+    if not ((type_ in allowed or web.configbool(b"web", b"allow" + type_))):
+        msg = b'Archive type not allowed: %s' % type_
         raise ErrorResponse(HTTP_FORBIDDEN, msg)
 
-    reponame = re.sub(br"\W+", "-", os.path.basename(web.reponame))
+    reponame = re.sub(br"\W+", b"-", os.path.basename(web.reponame))
     cnode = web.repo.lookup(key)
     arch_version = key
-    if cnode == key or key == 'tip':
+    if cnode == key or key == b'tip':
         arch_version = short(cnode)
-    name = "%s-%s" % (reponame, arch_version)
+    name = b"%s-%s" % (reponame, arch_version)
 
     ctx = webutil.changectx(web.repo, web.req)
     pats = []
     match = scmutil.match(ctx, [])
-    file = web.req.qsparams.get('file')
+    file = web.req.qsparams.get(b'file')
     if file:
-        pats = ['path:' + file]
-        match = scmutil.match(ctx, pats, default='path')
+        pats = [b'path:' + file]
+        match = scmutil.match(ctx, pats, default=b'path')
         if pats:
             files = [f for f in ctx.manifest().keys() if match(f)]
             if not files:
-                raise ErrorResponse(HTTP_NOT_FOUND,
-                    'file(s) not found: %s' % file)
+                raise ErrorResponse(
+                    HTTP_NOT_FOUND, b'file(s) not found: %s' % file
+                )
 
     mimetype, artype, extension, encoding = webutil.archivespecs[type_]
 
-    web.res.headers['Content-Type'] = mimetype
-    web.res.headers['Content-Disposition'] = 'attachment; filename=%s%s' % (
-        name, extension)
+    web.res.headers[b'Content-Type'] = mimetype
+    web.res.headers[b'Content-Disposition'] = b'attachment; filename=%s%s' % (
+        name,
+        extension,
+    )
 
     if encoding:
-        web.res.headers['Content-Encoding'] = encoding
+        web.res.headers[b'Content-Encoding'] = encoding
 
     web.res.setbodywillwrite()
     if list(web.res.sendresponse()):
-        raise error.ProgrammingError('sendresponse() should not emit data '
-                                     'if writing later')
+        raise error.ProgrammingError(
+            b'sendresponse() should not emit data if writing later'
+        )
 
     bodyfh = web.res.getbodyfile()
 
-    archival.archive(web.repo, bodyfh, cnode, artype, prefix=name, match=match,
-                     subrepos=web.configbool("web", "archivesubrepos"))
+    archival.archive(
+        web.repo,
+        bodyfh,
+        cnode,
+        artype,
+        prefix=name,
+        match=match,
+        subrepos=web.configbool(b"web", b"archivesubrepos"),
+    )
 
     return []
 
-@webcommand('static')
+
+@webcommand(b'static')
 def static(web):
-    fname = web.req.qsparams['file']
+    fname = web.req.qsparams[b'file']
     # a repo owner may set web.static in .hg/hgrc to get any file
     # readable by the user running the CGI script
-    static = web.config("web", "static", untrusted=False)
+    static = web.config(b"web", b"static", untrusted=False)
     if not static:
         tp = web.templatepath or templater.templatepaths()
         if isinstance(tp, str):
             tp = [tp]
-        static = [os.path.join(p, 'static') for p in tp]
+        static = [os.path.join(p, b'static') for p in tp]
 
     staticfile(static, fname, web.res)
     return web.res.sendresponse()
 
-@webcommand('graph')
+
+@webcommand(b'graph')
 def graph(web):
     """
     /graph[/{revision}]
@@ -1260,32 +1353,32 @@
     This handler will render the ``graph`` template.
     """
 
-    if 'node' in web.req.qsparams:
+    if b'node' in web.req.qsparams:
         ctx = webutil.changectx(web.repo, web.req)
         symrev = webutil.symrevorshortnode(web.req, ctx)
     else:
-        ctx = web.repo['tip']
-        symrev = 'tip'
+        ctx = web.repo[b'tip']
+        symrev = b'tip'
     rev = ctx.rev()
 
     bg_height = 39
     revcount = web.maxshortchanges
-    if 'revcount' in web.req.qsparams:
+    if b'revcount' in web.req.qsparams:
         try:
-            revcount = int(web.req.qsparams.get('revcount', revcount))
+            revcount = int(web.req.qsparams.get(b'revcount', revcount))
             revcount = max(revcount, 1)
-            web.tmpl.defaults['sessionvars']['revcount'] = revcount
+            web.tmpl.defaults[b'sessionvars'][b'revcount'] = revcount
         except ValueError:
             pass
 
-    lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
-    lessvars['revcount'] = max(revcount // 2, 1)
-    morevars = copy.copy(web.tmpl.defaults['sessionvars'])
-    morevars['revcount'] = revcount * 2
+    lessvars = copy.copy(web.tmpl.defaults[b'sessionvars'])
+    lessvars[b'revcount'] = max(revcount // 2, 1)
+    morevars = copy.copy(web.tmpl.defaults[b'sessionvars'])
+    morevars[b'revcount'] = revcount * 2
 
-    graphtop = web.req.qsparams.get('graphtop', ctx.hex())
-    graphvars = copy.copy(web.tmpl.defaults['sessionvars'])
-    graphvars['graphtop'] = graphtop
+    graphtop = web.req.qsparams.get(b'graphtop', ctx.hex())
+    graphvars = copy.copy(web.tmpl.defaults[b'sessionvars'])
+    graphvars[b'graphtop'] = graphtop
 
     count = len(web.repo)
     pos = rev
@@ -1316,8 +1409,11 @@
         # since hgweb graphing code is not itself lazy yet.
         dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
         # As we said one line above... not lazy.
-        tree = list(item for item in graphmod.colored(dag, web.repo)
-                    if item[1] == graphmod.CHANGESET)
+        tree = list(
+            item
+            for item in graphmod.colored(dag, web.repo)
+            if item[1] == graphmod.CHANGESET
+        )
 
     def fulltree():
         pos = web.repo[graphtop].rev()
@@ -1325,41 +1421,54 @@
         if pos != -1:
             revs = web.repo.changelog.revs(pos, lastrev)
             dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
-            tree = list(item for item in graphmod.colored(dag, web.repo)
-                        if item[1] == graphmod.CHANGESET)
+            tree = list(
+                item
+                for item in graphmod.colored(dag, web.repo)
+                if item[1] == graphmod.CHANGESET
+            )
         return tree
 
     def jsdata(context):
         for (id, type, ctx, vtx, edges) in fulltree():
-            yield {'node': pycompat.bytestr(ctx),
-                   'graphnode': webutil.getgraphnode(web.repo, ctx),
-                   'vertex': vtx,
-                   'edges': edges}
+            yield {
+                b'node': pycompat.bytestr(ctx),
+                b'graphnode': webutil.getgraphnode(web.repo, ctx),
+                b'vertex': vtx,
+                b'edges': edges,
+            }
 
     def nodes(context):
         parity = paritygen(web.stripecount)
         for row, (id, type, ctx, vtx, edges) in enumerate(tree):
             entry = webutil.commonentry(web.repo, ctx)
-            edgedata = [{'col': edge[0],
-                         'nextcol': edge[1],
-                         'color': (edge[2] - 1) % 6 + 1,
-                         'width': edge[3],
-                         'bcolor': edge[4]}
-                        for edge in edges]
+            edgedata = [
+                {
+                    b'col': edge[0],
+                    b'nextcol': edge[1],
+                    b'color': (edge[2] - 1) % 6 + 1,
+                    b'width': edge[3],
+                    b'bcolor': edge[4],
+                }
+                for edge in edges
+            ]
 
-            entry.update({'col': vtx[0],
-                          'color': (vtx[1] - 1) % 6 + 1,
-                          'parity': next(parity),
-                          'edges': templateutil.mappinglist(edgedata),
-                          'row': row,
-                          'nextrow': row + 1})
+            entry.update(
+                {
+                    b'col': vtx[0],
+                    b'color': (vtx[1] - 1) % 6 + 1,
+                    b'parity': next(parity),
+                    b'edges': templateutil.mappinglist(edgedata),
+                    b'row': row,
+                    b'nextrow': row + 1,
+                }
+            )
 
             yield entry
 
     rows = len(tree)
 
     return web.sendtemplate(
-        'graph',
+        b'graph',
         rev=rev,
         symrev=symrev,
         revcount=revcount,
@@ -1375,18 +1484,21 @@
         jsdata=templateutil.mappinggenerator(jsdata),
         nodes=templateutil.mappinggenerator(nodes),
         node=ctx.hex(),
-        archives=web.archivelist('tip'),
-        changenav=changenav)
+        archives=web.archivelist(b'tip'),
+        changenav=changenav,
+    )
+
 
 def _getdoc(e):
     doc = e[0].__doc__
     if doc:
-        doc = _(doc).partition('\n')[0]
+        doc = _(doc).partition(b'\n')[0]
     else:
-        doc = _('(no help text available)')
+        doc = _(b'(no help text available)')
     return doc
 
-@webcommand('help')
+
+@webcommand(b'help')
 def help(web):
     """
     /help[/{topic}]
@@ -1403,18 +1515,19 @@
     """
     from .. import commands, help as helpmod  # avoid cycle
 
-    topicname = web.req.qsparams.get('node')
+    topicname = web.req.qsparams.get(b'node')
     if not topicname:
+
         def topics(context):
             for h in helpmod.helptable:
                 entries, summary, _doc = h[0:3]
-                yield {'topic': entries[0], 'summary': summary}
+                yield {b'topic': entries[0], b'summary': summary}
 
         early, other = [], []
-        primary = lambda s: s.partition('|')[0]
-        for c, e in commands.table.iteritems():
+        primary = lambda s: s.partition(b'|')[0]
+        for c, e in pycompat.iteritems(commands.table):
             doc = _getdoc(e)
-            if 'DEPRECATED' in doc or c.startswith('debug'):
+            if b'DEPRECATED' in doc or c.startswith(b'debug'):
                 continue
             cmd = primary(c)
             if getattr(e[0], 'helpbasic', False):
@@ -1427,43 +1540,47 @@
 
         def earlycommands(context):
             for c, doc in early:
-                yield {'topic': c, 'summary': doc}
+                yield {b'topic': c, b'summary': doc}
 
         def othercommands(context):
             for c, doc in other:
-                yield {'topic': c, 'summary': doc}
+                yield {b'topic': c, b'summary': doc}
 
         return web.sendtemplate(
-            'helptopics',
+            b'helptopics',
             topics=templateutil.mappinggenerator(topics),
             earlycommands=templateutil.mappinggenerator(earlycommands),
             othercommands=templateutil.mappinggenerator(othercommands),
-            title='Index')
+            title=b'Index',
+        )
 
     # Render an index of sub-topics.
     if topicname in helpmod.subtopics:
         topics = []
         for entries, summary, _doc in helpmod.subtopics[topicname]:
-            topics.append({
-                'topic': '%s.%s' % (topicname, entries[0]),
-                'basename': entries[0],
-                'summary': summary,
-            })
+            topics.append(
+                {
+                    b'topic': b'%s.%s' % (topicname, entries[0]),
+                    b'basename': entries[0],
+                    b'summary': summary,
+                }
+            )
 
         return web.sendtemplate(
-            'helptopics',
+            b'helptopics',
             topics=templateutil.mappinglist(topics),
             title=topicname,
-            subindex=True)
+            subindex=True,
+        )
 
     u = webutil.wsgiui.load()
     u.verbose = True
 
     # Render a page from a sub-topic.
-    if '.' in topicname:
+    if b'.' in topicname:
         # TODO implement support for rendering sections, like
         # `hg help` works.
-        topic, subtopic = topicname.split('.', 1)
+        topic, subtopic = topicname.split(b'.', 1)
         if topic not in helpmod.subtopics:
             raise ErrorResponse(HTTP_NOT_FOUND)
     else:
@@ -1475,10 +1592,8 @@
     except error.Abort:
         raise ErrorResponse(HTTP_NOT_FOUND)
 
-    return web.sendtemplate(
-        'help',
-        topic=topicname,
-        doc=doc)
+    return web.sendtemplate(b'help', topic=topicname, doc=doc)
+
 
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = commands.values()
--- a/mercurial/hgweb/webutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/hgweb/webutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,6 +15,7 @@
 
 from ..i18n import _
 from ..node import hex, nullid, short
+from ..pycompat import setattr
 
 from .common import (
     ErrorResponse,
@@ -41,41 +42,47 @@
     util,
 )
 
-from ..utils import (
-    stringutil,
+from ..utils import stringutil
+
+archivespecs = util.sortdict(
+    (
+        (b'zip', (b'application/zip', b'zip', b'.zip', None)),
+        (b'gz', (b'application/x-gzip', b'tgz', b'.tar.gz', None)),
+        (b'bz2', (b'application/x-bzip2', b'tbz2', b'.tar.bz2', None)),
+    )
 )
 
-archivespecs = util.sortdict((
-    ('zip', ('application/zip', 'zip', '.zip', None)),
-    ('gz', ('application/x-gzip', 'tgz', '.tar.gz', None)),
-    ('bz2', ('application/x-bzip2', 'tbz2', '.tar.bz2', None)),
-))
 
 def archivelist(ui, nodeid, url=None):
-    allowed = ui.configlist('web', 'allow-archive', untrusted=True)
+    allowed = ui.configlist(b'web', b'allow-archive', untrusted=True)
     archives = []
 
-    for typ, spec in archivespecs.iteritems():
-        if typ in allowed or ui.configbool('web', 'allow' + typ,
-                                           untrusted=True):
-            archives.append({
-                'type': typ,
-                'extension': spec[2],
-                'node': nodeid,
-                'url': url,
-            })
+    for typ, spec in pycompat.iteritems(archivespecs):
+        if typ in allowed or ui.configbool(
+            b'web', b'allow' + typ, untrusted=True
+        ):
+            archives.append(
+                {
+                    b'type': typ,
+                    b'extension': spec[2],
+                    b'node': nodeid,
+                    b'url': url,
+                }
+            )
 
     return templateutil.mappinglist(archives)
 
+
 def up(p):
-    if p[0:1] != "/":
-        p = "/" + p
-    if p[-1:] == "/":
+    if p[0:1] != b"/":
+        p = b"/" + p
+    if p[-1:] == b"/":
         p = p[:-1]
     up = os.path.dirname(p)
-    if up == "/":
-        return "/"
-    return up + "/"
+    if up == b"/":
+        return b"/"
+    return up + b"/"
+
 
 def _navseq(step, firststep=None):
     if firststep:
@@ -92,8 +99,8 @@
         yield 3 * step
         step *= 10
 
+
 class revnav(object):
-
     def __init__(self, repo):
         """Navigation generation object
 
@@ -132,10 +139,14 @@
         """
         if not self:
             # empty repo
-            return templateutil.mappinglist([
-                {'before': templateutil.mappinglist([]),
-                 'after': templateutil.mappinglist([])},
-            ])
+            return templateutil.mappinglist(
+                [
+                    {
+                        b'before': templateutil.mappinglist([]),
+                        b'after': templateutil.mappinglist([]),
+                    },
+                ]
+            )
 
         targets = []
         for f in _navseq(1, pagelen):
@@ -146,28 +157,34 @@
         targets.sort()
 
         first = self._first()
-        navbefore = [{'label': '(%i)' % first, 'node': self.hex(first)}]
+        navbefore = [{b'label': b'(%i)' % first, b'node': self.hex(first)}]
         navafter = []
         for rev in targets:
             if rev not in self._revlog:
                 continue
             if pos < rev < limit:
-                navafter.append({'label': '+%d' % abs(rev - pos),
-                                 'node': self.hex(rev)})
+                navafter.append(
+                    {b'label': b'+%d' % abs(rev - pos), b'node': self.hex(rev)}
+                )
             if 0 < rev < pos:
-                navbefore.append({'label': '-%d' % abs(rev - pos),
-                                  'node': self.hex(rev)})
+                navbefore.append(
+                    {b'label': b'-%d' % abs(rev - pos), b'node': self.hex(rev)}
+                )
 
-        navafter.append({'label': 'tip', 'node': 'tip'})
+        navafter.append({b'label': b'tip', b'node': b'tip'})
 
         # TODO: maybe this can be a scalar object supporting tomap()
-        return templateutil.mappinglist([
-            {'before': templateutil.mappinglist(navbefore),
-             'after': templateutil.mappinglist(navafter)},
-        ])
+        return templateutil.mappinglist(
+            [
+                {
+                    b'before': templateutil.mappinglist(navbefore),
+                    b'after': templateutil.mappinglist(navafter),
+                },
+            ]
+        )
+
 
 class filerevnav(revnav):
-
     def __init__(self, repo, path):
         """Navigation generation object
 
@@ -182,22 +199,24 @@
     def hex(self, rev):
         return hex(self._changelog.node(self._revlog.linkrev(rev)))
 
+
 # TODO: maybe this can be a wrapper class for changectx/filectx list, which
 # yields {'ctx': ctx}
 def _ctxsgen(context, ctxs):
     for s in ctxs:
         d = {
-            'node': s.hex(),
-            'rev': s.rev(),
-            'user': s.user(),
-            'date': s.date(),
-            'description': s.description(),
-            'branch': s.branch(),
+            b'node': s.hex(),
+            b'rev': s.rev(),
+            b'user': s.user(),
+            b'date': s.date(),
+            b'description': s.description(),
+            b'branch': s.branch(),
         }
-        if util.safehasattr(s, 'path'):
-            d['file'] = s.path()
+        if util.safehasattr(s, b'path'):
+            d[b'file'] = s.path()
         yield d
 
+
 def _siblings(siblings=None, hiderev=None):
     if siblings is None:
         siblings = []
@@ -206,11 +225,18 @@
         siblings = []
     return templateutil.mappinggenerator(_ctxsgen, args=(siblings,))
 
+
 def difffeatureopts(req, ui, section):
-    diffopts = diffutil.difffeatureopts(ui, untrusted=True,
-                                        section=section, whitespace=True)
+    diffopts = diffutil.difffeatureopts(
+        ui, untrusted=True, section=section, whitespace=True
+    )
 
-    for k in ('ignorews', 'ignorewsamount', 'ignorewseol', 'ignoreblanklines'):
+    for k in (
+        b'ignorews',
+        b'ignorewsamount',
+        b'ignorewseol',
+        b'ignoreblanklines',
+    ):
         v = req.qsparams.get(k)
         if v is not None:
             v = stringutil.parsebool(v)
@@ -218,10 +244,12 @@
 
     return diffopts
 
+
 def annotate(req, fctx, ui):
-    diffopts = difffeatureopts(req, ui, 'annotate')
+    diffopts = difffeatureopts(req, ui, b'annotate')
     return fctx.annotate(follow=True, diffopts=diffopts)
 
+
 def parents(ctx, hide=None):
     if isinstance(ctx, context.basefilectx):
         introrev = ctx.introrev()
@@ -229,20 +257,25 @@
             return _siblings([ctx.repo()[introrev]], hide)
     return _siblings(ctx.parents(), hide)
 
+
 def children(ctx, hide=None):
     return _siblings(ctx.children(), hide)
 
+
 def renamelink(fctx):
     r = fctx.renamed()
     if r:
-        return templateutil.mappinglist([{'file': r[0], 'node': hex(r[1])}])
+        return templateutil.mappinglist([{b'file': r[0], b'node': hex(r[1])}])
     return templateutil.mappinglist([])
 
+
 def nodetagsdict(repo, node):
-    return templateutil.hybridlist(repo.nodetags(node), name='name')
+    return templateutil.hybridlist(repo.nodetags(node), name=b'name')
+
 
 def nodebookmarksdict(repo, node):
-    return templateutil.hybridlist(repo.nodebookmarks(node), name='name')
+    return templateutil.hybridlist(repo.nodebookmarks(node), name=b'name')
+
 
 def nodebranchdict(repo, ctx):
     branches = []
@@ -255,7 +288,8 @@
         branchnode = None
     if branchnode == ctx.node():
         branches.append(branch)
-    return templateutil.hybridlist(branches, name='name')
+    return templateutil.hybridlist(branches, name=b'name')
+
 
 def nodeinbranch(repo, ctx):
     branches = []
@@ -264,29 +298,34 @@
         branchnode = repo.branchtip(branch)
     except error.RepoLookupError:
         branchnode = None
-    if branch != 'default' and branchnode != ctx.node():
+    if branch != b'default' and branchnode != ctx.node():
         branches.append(branch)
-    return templateutil.hybridlist(branches, name='name')
+    return templateutil.hybridlist(branches, name=b'name')
+
 
 def nodebranchnodefault(ctx):
     branches = []
     branch = ctx.branch()
-    if branch != 'default':
+    if branch != b'default':
         branches.append(branch)
-    return templateutil.hybridlist(branches, name='name')
+    return templateutil.hybridlist(branches, name=b'name')
+
 
 def _nodenamesgen(context, f, node, name):
     for t in f(node):
         yield {name: t}
 
+
 def showtag(repo, t1, node=nullid):
-    args = (repo.nodetags, node, 'tag')
+    args = (repo.nodetags, node, b'tag')
     return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
 
+
 def showbookmark(repo, t1, node=nullid):
-    args = (repo.nodebookmarks, node, 'bookmark')
+    args = (repo.nodebookmarks, node, b'bookmark')
     return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
 
+
 def branchentries(repo, stripecount, limit=0):
     tips = []
     heads = repo.heads()
@@ -303,56 +342,60 @@
                 return
             count += 1
             if closed:
-                status = 'closed'
+                status = b'closed'
             elif ctx.node() not in heads:
-                status = 'inactive'
+                status = b'inactive'
             else:
-                status = 'open'
+                status = b'open'
             yield {
-                'parity': next(parity),
-                'branch': ctx.branch(),
-                'status': status,
-                'node': ctx.hex(),
-                'date': ctx.date()
+                b'parity': next(parity),
+                b'branch': ctx.branch(),
+                b'status': status,
+                b'node': ctx.hex(),
+                b'date': ctx.date(),
             }
 
     return templateutil.mappinggenerator(entries)
 
+
 def cleanpath(repo, path):
-    path = path.lstrip('/')
+    path = path.lstrip(b'/')
     auditor = pathutil.pathauditor(repo.root, realfs=False)
-    return pathutil.canonpath(repo.root, '', path, auditor=auditor)
+    return pathutil.canonpath(repo.root, b'', path, auditor=auditor)
+
 
 def changectx(repo, req):
-    changeid = "tip"
-    if 'node' in req.qsparams:
-        changeid = req.qsparams['node']
-        ipos = changeid.find(':')
+    changeid = b"tip"
+    if b'node' in req.qsparams:
+        changeid = req.qsparams[b'node']
+        ipos = changeid.find(b':')
         if ipos != -1:
-            changeid = changeid[(ipos + 1):]
+            changeid = changeid[(ipos + 1) :]
 
     return scmutil.revsymbol(repo, changeid)
 
+
 def basechangectx(repo, req):
-    if 'node' in req.qsparams:
-        changeid = req.qsparams['node']
-        ipos = changeid.find(':')
+    if b'node' in req.qsparams:
+        changeid = req.qsparams[b'node']
+        ipos = changeid.find(b':')
         if ipos != -1:
             changeid = changeid[:ipos]
             return scmutil.revsymbol(repo, changeid)
 
     return None
 
+
 def filectx(repo, req):
-    if 'file' not in req.qsparams:
-        raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
-    path = cleanpath(repo, req.qsparams['file'])
-    if 'node' in req.qsparams:
-        changeid = req.qsparams['node']
-    elif 'filenode' in req.qsparams:
-        changeid = req.qsparams['filenode']
+    if b'file' not in req.qsparams:
+        raise ErrorResponse(HTTP_NOT_FOUND, b'file not given')
+    path = cleanpath(repo, req.qsparams[b'file'])
+    if b'node' in req.qsparams:
+        changeid = req.qsparams[b'node']
+    elif b'filenode' in req.qsparams:
+        changeid = req.qsparams[b'filenode']
     else:
-        raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given')
+        raise ErrorResponse(HTTP_NOT_FOUND, b'node or filenode not given')
     try:
         fctx = scmutil.revsymbol(repo, changeid)[path]
     except error.RepoError:
@@ -360,54 +403,62 @@
 
     return fctx
 
+
 def linerange(req):
-    linerange = req.qsparams.getall('linerange')
+    linerange = req.qsparams.getall(b'linerange')
     if not linerange:
         return None
     if len(linerange) > 1:
-        raise ErrorResponse(HTTP_BAD_REQUEST,
-                            'redundant linerange parameter')
+        raise ErrorResponse(HTTP_BAD_REQUEST, b'redundant linerange parameter')
     try:
-        fromline, toline = map(int, linerange[0].split(':', 1))
+        fromline, toline = map(int, linerange[0].split(b':', 1))
     except ValueError:
-        raise ErrorResponse(HTTP_BAD_REQUEST,
-                            'invalid linerange parameter')
+        raise ErrorResponse(HTTP_BAD_REQUEST, b'invalid linerange parameter')
     try:
         return util.processlinerange(fromline, toline)
     except error.ParseError as exc:
         raise ErrorResponse(HTTP_BAD_REQUEST, pycompat.bytestr(exc))
 
+
 def formatlinerange(fromline, toline):
-    return '%d:%d' % (fromline + 1, toline)
+    return b'%d:%d' % (fromline + 1, toline)
+
 
 def _succsandmarkersgen(context, mapping):
-    repo = context.resource(mapping, 'repo')
+    repo = context.resource(mapping, b'repo')
     itemmappings = templatekw.showsuccsandmarkers(context, mapping)
     for item in itemmappings.tovalue(context, mapping):
-        item['successors'] = _siblings(repo[successor]
-                                       for successor in item['successors'])
+        item[b'successors'] = _siblings(
+            repo[successor] for successor in item[b'successors']
+        )
         yield item
 
+
 def succsandmarkers(context, mapping):
     return templateutil.mappinggenerator(_succsandmarkersgen, args=(mapping,))
 
+
 # teach templater succsandmarkers is switched to (context, mapping) API
-succsandmarkers._requires = {'repo', 'ctx'}
+succsandmarkers._requires = {b'repo', b'ctx'}
+
 
 def _whyunstablegen(context, mapping):
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
 
     entries = obsutil.whyunstable(repo, ctx)
     for entry in entries:
-        if entry.get('divergentnodes'):
-            entry['divergentnodes'] = _siblings(entry['divergentnodes'])
+        if entry.get(b'divergentnodes'):
+            entry[b'divergentnodes'] = _siblings(entry[b'divergentnodes'])
         yield entry
 
+
 def whyunstable(context, mapping):
     return templateutil.mappinggenerator(_whyunstablegen, args=(mapping,))
 
-whyunstable._requires = {'repo', 'ctx'}
+
+whyunstable._requires = {b'repo', b'ctx'}
+
 
 def commonentry(repo, ctx):
     node = scmutil.binnode(ctx)
@@ -415,28 +466,30 @@
         # TODO: perhaps ctx.changectx() should be assigned if ctx is a
         # filectx, but I'm not pretty sure if that would always work because
         # fctx.parents() != fctx.changectx.parents() for example.
-        'ctx': ctx,
-        'rev': ctx.rev(),
-        'node': hex(node),
-        'author': ctx.user(),
-        'desc': ctx.description(),
-        'date': ctx.date(),
-        'extra': ctx.extra(),
-        'phase': ctx.phasestr(),
-        'obsolete': ctx.obsolete(),
-        'succsandmarkers': succsandmarkers,
-        'instabilities': templateutil.hybridlist(ctx.instabilities(),
-                                                 name='instability'),
-        'whyunstable': whyunstable,
-        'branch': nodebranchnodefault(ctx),
-        'inbranch': nodeinbranch(repo, ctx),
-        'branches': nodebranchdict(repo, ctx),
-        'tags': nodetagsdict(repo, node),
-        'bookmarks': nodebookmarksdict(repo, node),
-        'parent': lambda context, mapping: parents(ctx),
-        'child': lambda context, mapping: children(ctx),
+        b'ctx': ctx,
+        b'rev': ctx.rev(),
+        b'node': hex(node),
+        b'author': ctx.user(),
+        b'desc': ctx.description(),
+        b'date': ctx.date(),
+        b'extra': ctx.extra(),
+        b'phase': ctx.phasestr(),
+        b'obsolete': ctx.obsolete(),
+        b'succsandmarkers': succsandmarkers,
+        b'instabilities': templateutil.hybridlist(
+            ctx.instabilities(), name=b'instability'
+        ),
+        b'whyunstable': whyunstable,
+        b'branch': nodebranchnodefault(ctx),
+        b'inbranch': nodeinbranch(repo, ctx),
+        b'branches': nodebranchdict(repo, ctx),
+        b'tags': nodetagsdict(repo, node),
+        b'bookmarks': nodebookmarksdict(repo, node),
+        b'parent': lambda context, mapping: parents(ctx),
+        b'child': lambda context, mapping: children(ctx),
     }
 
+
 def changelistentry(web, ctx):
     '''Obtain a dictionary to be used for entries in a changelist.
 
@@ -446,19 +499,22 @@
     repo = web.repo
     rev = ctx.rev()
     n = scmutil.binnode(ctx)
-    showtags = showtag(repo, 'changelogtag', n)
+    showtags = showtag(repo, b'changelogtag', n)
     files = listfilediffs(ctx.files(), n, web.maxfiles)
 
     entry = commonentry(repo, ctx)
-    entry.update({
-        'allparents': lambda context, mapping: parents(ctx),
-        'parent': lambda context, mapping: parents(ctx, rev - 1),
-        'child': lambda context, mapping: children(ctx, rev + 1),
-        'changelogtag': showtags,
-        'files': files,
-    })
+    entry.update(
+        {
+            b'allparents': lambda context, mapping: parents(ctx),
+            b'parent': lambda context, mapping: parents(ctx, rev - 1),
+            b'child': lambda context, mapping: children(ctx, rev + 1),
+            b'changelogtag': showtags,
+            b'files': files,
+        }
+    )
     return entry
 
+
 def changelistentries(web, revs, maxcount, parityfn):
     """Emit up to N records for an iterable of revisions."""
     repo = web.repo
@@ -471,42 +527,49 @@
         count += 1
 
         entry = changelistentry(web, repo[rev])
-        entry['parity'] = next(parityfn)
+        entry[b'parity'] = next(parityfn)
 
         yield entry
 
+
 def symrevorshortnode(req, ctx):
-    if 'node' in req.qsparams:
-        return templatefilters.revescape(req.qsparams['node'])
+    if b'node' in req.qsparams:
+        return templatefilters.revescape(req.qsparams[b'node'])
     else:
         return short(scmutil.binnode(ctx))
 
+
 def _listfilesgen(context, ctx, stripecount):
     parity = paritygen(stripecount)
     for blockno, f in enumerate(ctx.files()):
-        template = 'filenodelink' if f in ctx else 'filenolink'
-        yield context.process(template, {
-            'node': ctx.hex(),
-            'file': f,
-            'blockno': blockno + 1,
-            'parity': next(parity),
-        })
+        template = b'filenodelink' if f in ctx else b'filenolink'
+        yield context.process(
+            template,
+            {
+                b'node': ctx.hex(),
+                b'file': f,
+                b'blockno': blockno + 1,
+                b'parity': next(parity),
+            },
+        )
+
 
 def changesetentry(web, ctx):
     '''Obtain a dictionary to be used to render the "changeset" template.'''
 
-    showtags = showtag(web.repo, 'changesettag', scmutil.binnode(ctx))
-    showbookmarks = showbookmark(web.repo, 'changesetbookmark',
-                                 scmutil.binnode(ctx))
+    showtags = showtag(web.repo, b'changesettag', scmutil.binnode(ctx))
+    showbookmarks = showbookmark(
+        web.repo, b'changesetbookmark', scmutil.binnode(ctx)
+    )
     showbranch = nodebranchnodefault(ctx)
 
     basectx = basechangectx(web.repo, web.req)
     if basectx is None:
         basectx = ctx.p1()
 
-    style = web.config('web', 'style')
-    if 'style' in web.req.qsparams:
-        style = web.req.qsparams['style']
+    style = web.config(b'web', b'style')
+    if b'style' in web.req.qsparams:
+        style = web.req.qsparams[b'style']
 
     diff = diffs(web, ctx, basectx, None, style)
 
@@ -521,43 +584,62 @@
         changesettag=showtags,
         changesetbookmark=showbookmarks,
         changesetbranch=showbranch,
-        files=templateutil.mappedgenerator(_listfilesgen,
-                                           args=(ctx, web.stripecount)),
+        files=templateutil.mappedgenerator(
+            _listfilesgen, args=(ctx, web.stripecount)
+        ),
         diffsummary=lambda context, mapping: diffsummary(diffstatsgen),
         diffstat=diffstats,
         archives=web.archivelist(ctx.hex()),
-        **pycompat.strkwargs(commonentry(web.repo, ctx)))
+        **pycompat.strkwargs(commonentry(web.repo, ctx))
+    )
+
 
 def _listfilediffsgen(context, files, node, max):
     for f in files[:max]:
-        yield context.process('filedifflink', {'node': hex(node), 'file': f})
+        yield context.process(b'filedifflink', {b'node': hex(node), b'file': f})
     if len(files) > max:
-        yield context.process('fileellipses', {})
+        yield context.process(b'fileellipses', {})
+
 
 def listfilediffs(files, node, max):
-    return templateutil.mappedgenerator(_listfilediffsgen,
-                                        args=(files, node, max))
+    return templateutil.mappedgenerator(
+        _listfilediffsgen, args=(files, node, max)
+    )
+
 
 def _prettyprintdifflines(context, lines, blockno, lineidprefix):
     for lineno, l in enumerate(lines, 1):
-        difflineno = "%d.%d" % (blockno, lineno)
-        if l.startswith('+'):
-            ltype = "difflineplus"
-        elif l.startswith('-'):
-            ltype = "difflineminus"
-        elif l.startswith('@'):
-            ltype = "difflineat"
+        difflineno = b"%d.%d" % (blockno, lineno)
+        if l.startswith(b'+'):
+            ltype = b"difflineplus"
+        elif l.startswith(b'-'):
+            ltype = b"difflineminus"
+        elif l.startswith(b'@'):
+            ltype = b"difflineat"
         else:
-            ltype = "diffline"
-        yield context.process(ltype, {
-            'line': l,
-            'lineno': lineno,
-            'lineid': lineidprefix + "l%s" % difflineno,
-            'linenumber': "% 8s" % difflineno,
-        })
+            ltype = b"diffline"
+        yield context.process(
+            ltype,
+            {
+                b'line': l,
+                b'lineno': lineno,
+                b'lineid': lineidprefix + b"l%s" % difflineno,
+                b'linenumber': b"% 8s" % difflineno,
+            },
+        )
+
 
-def _diffsgen(context, repo, ctx, basectx, files, style, stripecount,
-              linerange, lineidprefix):
+def _diffsgen(
+    context,
+    repo,
+    ctx,
+    basectx,
+    files,
+    style,
+    stripecount,
+    linerange,
+    lineidprefix,
+):
     if files:
         m = match.exact(files)
     else:
@@ -568,9 +650,9 @@
 
     diffhunks = patch.diffhunks(repo, basectx, ctx, m, opts=diffopts)
     for blockno, (fctx1, fctx2, header, hunks) in enumerate(diffhunks, 1):
-        if style != 'raw':
+        if style != b'raw':
             header = header[1:]
-        lines = [h + '\n' for h in header]
+        lines = [h + b'\n' for h in header]
         for hunkrange, hunklines in hunks:
             if linerange is not None and hunkrange is not None:
                 s1, l1, s2, l2 = hunkrange
@@ -578,36 +660,49 @@
                     continue
             lines.extend(hunklines)
         if lines:
-            l = templateutil.mappedgenerator(_prettyprintdifflines,
-                                             args=(lines, blockno,
-                                                   lineidprefix))
+            l = templateutil.mappedgenerator(
+                _prettyprintdifflines, args=(lines, blockno, lineidprefix)
+            )
             yield {
-                'parity': next(parity),
-                'blockno': blockno,
-                'lines': l,
+                b'parity': next(parity),
+                b'blockno': blockno,
+                b'lines': l,
             }
 
-def diffs(web, ctx, basectx, files, style, linerange=None, lineidprefix=''):
-    args = (web.repo, ctx, basectx, files, style, web.stripecount,
-            linerange, lineidprefix)
-    return templateutil.mappinggenerator(_diffsgen, args=args, name='diffblock')
+
+def diffs(web, ctx, basectx, files, style, linerange=None, lineidprefix=b''):
+    args = (
+        web.repo,
+        ctx,
+        basectx,
+        files,
+        style,
+        web.stripecount,
+        linerange,
+        lineidprefix,
+    )
+    return templateutil.mappinggenerator(
+        _diffsgen, args=args, name=b'diffblock'
+    )
+
 
 def _compline(type, leftlineno, leftline, rightlineno, rightline):
-    lineid = leftlineno and ("l%d" % leftlineno) or ''
-    lineid += rightlineno and ("r%d" % rightlineno) or ''
-    llno = '%d' % leftlineno if leftlineno else ''
-    rlno = '%d' % rightlineno if rightlineno else ''
+    lineid = leftlineno and (b"l%d" % leftlineno) or b''
+    lineid += rightlineno and (b"r%d" % rightlineno) or b''
+    llno = b'%d' % leftlineno if leftlineno else b''
+    rlno = b'%d' % rightlineno if rightlineno else b''
     return {
-        'type': type,
-        'lineid': lineid,
-        'leftlineno': leftlineno,
-        'leftlinenumber': "% 6s" % llno,
-        'leftline': leftline or '',
-        'rightlineno': rightlineno,
-        'rightlinenumber': "% 6s" % rlno,
-        'rightline': rightline or '',
+        b'type': type,
+        b'lineid': lineid,
+        b'leftlineno': leftlineno,
+        b'leftlinenumber': b"% 6s" % llno,
+        b'leftline': leftline or b'',
+        b'rightlineno': rightlineno,
+        b'rightlinenumber': b"% 6s" % rlno,
+        b'rightline': rightline or b'',
     }
 
+
 def _getcompblockgen(context, leftlines, rightlines, opcodes):
     for type, llo, lhi, rlo, rhi in opcodes:
         type = pycompat.sysbytes(type)
@@ -615,63 +710,79 @@
         len2 = rhi - rlo
         count = min(len1, len2)
         for i in pycompat.xrange(count):
-            yield _compline(type=type,
-                            leftlineno=llo + i + 1,
-                            leftline=leftlines[llo + i],
-                            rightlineno=rlo + i + 1,
-                            rightline=rightlines[rlo + i])
+            yield _compline(
+                type=type,
+                leftlineno=llo + i + 1,
+                leftline=leftlines[llo + i],
+                rightlineno=rlo + i + 1,
+                rightline=rightlines[rlo + i],
+            )
         if len1 > len2:
             for i in pycompat.xrange(llo + count, lhi):
-                yield _compline(type=type,
-                                leftlineno=i + 1,
-                                leftline=leftlines[i],
-                                rightlineno=None,
-                                rightline=None)
+                yield _compline(
+                    type=type,
+                    leftlineno=i + 1,
+                    leftline=leftlines[i],
+                    rightlineno=None,
+                    rightline=None,
+                )
         elif len2 > len1:
             for i in pycompat.xrange(rlo + count, rhi):
-                yield _compline(type=type,
-                                leftlineno=None,
-                                leftline=None,
-                                rightlineno=i + 1,
-                                rightline=rightlines[i])
+                yield _compline(
+                    type=type,
+                    leftlineno=None,
+                    leftline=None,
+                    rightlineno=i + 1,
+                    rightline=rightlines[i],
+                )
+
 
 def _getcompblock(leftlines, rightlines, opcodes):
     args = (leftlines, rightlines, opcodes)
-    return templateutil.mappinggenerator(_getcompblockgen, args=args,
-                                         name='comparisonline')
+    return templateutil.mappinggenerator(
+        _getcompblockgen, args=args, name=b'comparisonline'
+    )
+
 
 def _comparegen(context, contextnum, leftlines, rightlines):
     '''Generator function that provides side-by-side comparison data.'''
     s = difflib.SequenceMatcher(None, leftlines, rightlines)
     if contextnum < 0:
         l = _getcompblock(leftlines, rightlines, s.get_opcodes())
-        yield {'lines': l}
+        yield {b'lines': l}
     else:
         for oc in s.get_grouped_opcodes(n=contextnum):
             l = _getcompblock(leftlines, rightlines, oc)
-            yield {'lines': l}
+            yield {b'lines': l}
+
 
 def compare(contextnum, leftlines, rightlines):
     args = (contextnum, leftlines, rightlines)
-    return templateutil.mappinggenerator(_comparegen, args=args,
-                                         name='comparisonblock')
+    return templateutil.mappinggenerator(
+        _comparegen, args=args, name=b'comparisonblock'
+    )
+
 
 def diffstatgen(ui, ctx, basectx):
     '''Generator function that provides the diffstat data.'''
 
-    diffopts = patch.diffopts(ui, {'noprefix': False})
-    stats = patch.diffstatdata(
-        util.iterlines(ctx.diff(basectx, opts=diffopts)))
+    diffopts = patch.diffopts(ui, {b'noprefix': False})
+    stats = patch.diffstatdata(util.iterlines(ctx.diff(basectx, opts=diffopts)))
     maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
     while True:
         yield stats, maxname, maxtotal, addtotal, removetotal, binary
 
+
 def diffsummary(statgen):
     '''Return a short summary of the diff.'''
 
     stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
-    return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % (
-             len(stats), addtotal, removetotal)
+    return _(b' %d files changed, %d insertions(+), %d deletions(-)\n') % (
+        len(stats),
+        addtotal,
+        removetotal,
+    )
+
 
 def _diffstattmplgen(context, ctx, statgen, parity):
     stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
@@ -684,26 +795,31 @@
 
     fileno = 0
     for filename, adds, removes, isbinary in stats:
-        template = 'diffstatlink' if filename in files else 'diffstatnolink'
+        template = b'diffstatlink' if filename in files else b'diffstatnolink'
         total = adds + removes
         fileno += 1
-        yield context.process(template, {
-            'node': ctx.hex(),
-            'file': filename,
-            'fileno': fileno,
-            'total': total,
-            'addpct': pct(adds),
-            'removepct': pct(removes),
-            'parity': next(parity),
-        })
+        yield context.process(
+            template,
+            {
+                b'node': ctx.hex(),
+                b'file': filename,
+                b'fileno': fileno,
+                b'total': total,
+                b'addpct': pct(adds),
+                b'removepct': pct(removes),
+                b'parity': next(parity),
+            },
+        )
+
 
 def diffstat(ctx, statgen, parity):
     '''Return a diffstat template for each file in the diff.'''
     args = (ctx, statgen, parity)
     return templateutil.mappedgenerator(_diffstattmplgen, args=args)
 
+
 class sessionvars(templateutil.wrapped):
-    def __init__(self, vars, start='?'):
+    def __init__(self, vars, start=b'?'):
         self._start = start
         self._vars = vars
 
@@ -725,30 +841,31 @@
         return self._vars.get(key)
 
     def getmin(self, context, mapping):
-        raise error.ParseError(_('not comparable'))
+        raise error.ParseError(_(b'not comparable'))
 
     def getmax(self, context, mapping):
-        raise error.ParseError(_('not comparable'))
+        raise error.ParseError(_(b'not comparable'))
 
     def filter(self, context, mapping, select):
         # implement if necessary
-        raise error.ParseError(_('not filterable'))
+        raise error.ParseError(_(b'not filterable'))
 
     def itermaps(self, context):
         separator = self._start
-        for key, value in sorted(self._vars.iteritems()):
-            yield {'name': key,
-                   'value': pycompat.bytestr(value),
-                   'separator': separator,
+        for key, value in sorted(pycompat.iteritems(self._vars)):
+            yield {
+                b'name': key,
+                b'value': pycompat.bytestr(value),
+                b'separator': separator,
             }
-            separator = '&'
+            separator = b'&'
 
     def join(self, context, mapping, sep):
         # could be '{separator}{name}={value|urlescape}'
-        raise error.ParseError(_('not displayable without template'))
+        raise error.ParseError(_(b'not displayable without template'))
 
     def show(self, context, mapping):
-        return self.join(context, '')
+        return self.join(context, b'')
 
     def tobool(self, context, mapping):
         return bool(self._vars)
@@ -756,16 +873,18 @@
     def tovalue(self, context, mapping):
         return self._vars
 
+
 class wsgiui(uimod.ui):
     # default termwidth breaks under mod_wsgi
     def termwidth(self):
         return 80
 
+
 def getwebsubs(repo):
     websubtable = []
-    websubdefs = repo.ui.configitems('websub')
+    websubdefs = repo.ui.configitems(b'websub')
     # we must maintain interhg backwards compatibility
-    websubdefs += repo.ui.configitems('interhg')
+    websubdefs += repo.ui.configitems(b'interhg')
     for key, pattern in websubdefs:
         # grab the delimiter from the character after the "s"
         unesc = pattern[1:2]
@@ -776,10 +895,13 @@
         # delimiters are required.
         match = re.match(
             br'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
-            % (delim, delim, delim), pattern)
+            % (delim, delim, delim),
+            pattern,
+        )
         if not match:
-            repo.ui.warn(_("websub: invalid pattern for %s: %s\n")
-                              % (key, pattern))
+            repo.ui.warn(
+                _(b"websub: invalid pattern for %s: %s\n") % (key, pattern)
+            )
             continue
 
         # we need to unescape the delimiter for regexp and format
@@ -791,17 +913,20 @@
         flagin = match.group(3)
         flags = 0
         if flagin:
-            for flag in flagin.upper():
+            for flag in pycompat.sysstr(flagin.upper()):
                 flags |= re.__dict__[flag]
 
         try:
             regexp = re.compile(regexp, flags)
             websubtable.append((regexp, format))
         except re.error:
-            repo.ui.warn(_("websub: invalid regexp for %s: %s\n")
-                         % (key, regexp))
+            repo.ui.warn(
+                _(b"websub: invalid regexp for %s: %s\n") % (key, regexp)
+            )
     return websubtable
 
+
 def getgraphnode(repo, ctx):
-    return (templatekw.getgraphnodecurrent(repo, ctx) +
-            templatekw.getgraphnodesymbol(ctx))
+    return templatekw.getgraphnodecurrent(
+        repo, ctx
+    ) + templatekw.getgraphnodesymbol(ctx)
--- a/mercurial/hgweb/wsgicgi.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/hgweb/wsgicgi.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,29 +12,25 @@
 
 import os
 
-from .. import (
-    pycompat,
-)
+from ..pycompat import getattr
+from .. import pycompat
 
-from ..utils import (
-    procutil,
-)
+from ..utils import procutil
 
-from . import (
-    common,
-)
+from . import common
+
 
 def launch(application):
     procutil.setbinary(procutil.stdin)
     procutil.setbinary(procutil.stdout)
 
-    environ = dict(os.environ.iteritems()) # re-exports
-    environ.setdefault(r'PATH_INFO', '')
+    environ = dict(pycompat.iteritems(os.environ))  # re-exports
+    environ.setdefault(r'PATH_INFO', b'')
     if environ.get(r'SERVER_SOFTWARE', r'').startswith(r'Microsoft-IIS'):
         # IIS includes script_name in PATH_INFO
         scriptname = environ[r'SCRIPT_NAME']
         if environ[r'PATH_INFO'].startswith(scriptname):
-            environ[r'PATH_INFO'] = environ[r'PATH_INFO'][len(scriptname):]
+            environ[r'PATH_INFO'] = environ[r'PATH_INFO'][len(scriptname) :]
 
     stdin = procutil.stdin
     if environ.get(r'HTTP_EXPECT', r'').lower() == r'100-continue':
@@ -58,16 +54,18 @@
 
     def write(data):
         if not headers_set:
-            raise AssertionError("write() before start_response()")
+            raise AssertionError(b"write() before start_response()")
 
         elif not headers_sent:
             # Before the first output, send the stored headers
             status, response_headers = headers_sent[:] = headers_set
-            out.write('Status: %s\r\n' % pycompat.bytesurl(status))
+            out.write(b'Status: %s\r\n' % pycompat.bytesurl(status))
             for hk, hv in response_headers:
-                out.write('%s: %s\r\n' % (pycompat.bytesurl(hk),
-                                          pycompat.bytesurl(hv)))
-            out.write('\r\n')
+                out.write(
+                    b'%s: %s\r\n'
+                    % (pycompat.bytesurl(hk), pycompat.bytesurl(hv))
+                )
+            out.write(b'\r\n')
 
         out.write(data)
         out.flush()
@@ -79,9 +77,9 @@
                     # Re-raise original exception if headers sent
                     raise exc_info[0](exc_info[1], exc_info[2])
             finally:
-                exc_info = None     # avoid dangling circular ref
+                exc_info = None  # avoid dangling circular ref
         elif headers_set:
-            raise AssertionError("Headers already set!")
+            raise AssertionError(b"Headers already set!")
 
         headers_set[:] = [status, response_headers]
         return write
@@ -91,6 +89,6 @@
         for chunk in content:
             write(chunk)
         if not headers_sent:
-            write('')   # send headers now if body was empty
+            write(b'')  # send headers now if body was empty
     finally:
         getattr(content, 'close', lambda: None)()
--- a/mercurial/hgweb/wsgiheaders.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/hgweb/wsgiheaders.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,18 +12,20 @@
 from __future__ import absolute_import, print_function
 
 import re
+
 tspecials = re.compile(br'[ \(\)<>@,;:\\"/\[\]\?=]')
 
+
 def _formatparam(param, value=None, quote=1):
     """Convenience function to format and return a key=value pair.
     This will quote the value if needed or if quote is true.
     """
     if value is not None and len(value) > 0:
         if quote or tspecials.search(value):
-            value = value.replace('\\', '\\\\').replace('"', r'\"')
-            return '%s="%s"' % (param, value)
+            value = value.replace(b'\\', b'\\\\').replace(b'"', r'\"')
+            return b'%s="%s"' % (param, value)
         else:
-            return '%s=%s' % (param, value)
+            return b'%s=%s' % (param, value)
     else:
         return param
 
@@ -34,7 +36,7 @@
     def __init__(self, headers=None):
         headers = headers if headers is not None else []
         if type(headers) is not list:
-            raise TypeError("Headers must be a list of name/value tuples")
+            raise TypeError(b"Headers must be a list of name/value tuples")
         self._headers = headers
         if __debug__:
             for k, v in headers:
@@ -45,8 +47,10 @@
         """Convert/check value type."""
         if type(value) is bytes:
             return value
-        raise AssertionError(u"Header names/values must be"
-                             u" of type bytes (got %s)" % repr(value))
+        raise AssertionError(
+            u"Header names/values must be"
+            u" of type bytes (got %s)" % repr(value)
+        )
 
     def __len__(self):
         """Return the total number of headers, including duplicates."""
@@ -56,7 +60,8 @@
         """Set the value of a header."""
         del self[name]
         self._headers.append(
-            (self._convert_string_type(name), self._convert_string_type(val)))
+            (self._convert_string_type(name), self._convert_string_type(val))
+        )
 
     def __delitem__(self, name):
         """Delete all occurrences of a header, if present.
@@ -78,7 +83,6 @@
         """Return true if the message contains the header."""
         return self.get(name) is not None
 
-
     def get_all(self, name):
         """Return a list of all the values for the named field.
         These will be sorted in the order they appeared in the original header
@@ -87,18 +91,16 @@
         If no fields exist with the given name, returns an empty list.
         """
         name = self._convert_string_type(name.lower())
-        return [kv[1] for kv in self._headers if kv[0].lower()==name]
-
+        return [kv[1] for kv in self._headers if kv[0].lower() == name]
 
     def get(self, name, default=None):
         """Get the first header value for 'name', or return 'default'"""
         name = self._convert_string_type(name.lower())
         for k, v in self._headers:
-            if k.lower()==name:
+            if k.lower() == name:
                 return v
         return default
 
-
     def keys(self):
         """Return a list of all the header field names.
         These will be sorted in the order they appeared in the original header
@@ -132,7 +134,9 @@
     def __str__(self):
         """str() returns the formatted headers, complete with end line,
         suitable for direct HTTP transmission."""
-        return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
+        return b'\r\n'.join(
+            [b"%s: %s" % kv for kv in self._headers] + [b'', b'']
+        )
 
     def __bytes__(self):
         return str(self).encode('iso-8859-1')
@@ -143,8 +147,12 @@
         and value 'value'."""
         result = self.get(name)
         if result is None:
-            self._headers.append((self._convert_string_type(name),
-                self._convert_string_type(value)))
+            self._headers.append(
+                (
+                    self._convert_string_type(name),
+                    self._convert_string_type(value),
+                )
+            )
             return value
         else:
             return result
@@ -168,9 +176,10 @@
         for k, v in _params.items():
             k = self._convert_string_type(k)
             if v is None:
-                parts.append(k.replace('_', '-'))
+                parts.append(k.replace(b'_', b'-'))
             else:
                 v = self._convert_string_type(v)
-                parts.append(_formatparam(k.replace('_', '-'), v))
+                parts.append(_formatparam(k.replace(b'_', b'-'), v))
         self._headers.append(
-            (self._convert_string_type(_name), "; ".join(parts)))
+            (self._convert_string_type(_name), b"; ".join(parts))
+        )
--- a/mercurial/hook.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/hook.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,6 +11,7 @@
 import sys
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     demandimport,
     encoding,
@@ -24,6 +25,7 @@
     stringutil,
 )
 
+
 def pythonhook(ui, repo, htype, hname, funcname, args, throw):
     '''call python hook. hook is callable object, looked up as
     name in python module. if callable returns "true", hook
@@ -38,11 +40,12 @@
         obj = funcname
         funcname = pycompat.sysbytes(obj.__module__ + r"." + obj.__name__)
     else:
-        d = funcname.rfind('.')
+        d = funcname.rfind(b'.')
         if d == -1:
             raise error.HookLoadError(
-                _('%s hook is invalid: "%s" not in a module')
-                % (hname, funcname))
+                _(b'%s hook is invalid: "%s" not in a module')
+                % (hname, funcname)
+            )
         modname = funcname[:d]
         oldpaths = sys.path
         if procutil.mainfrozen():
@@ -62,63 +65,83 @@
                 except (ImportError, SyntaxError):
                     e2 = sys.exc_info()
                     if ui.tracebackflag:
-                        ui.warn(_('exception from first failed import '
-                                  'attempt:\n'))
+                        ui.warn(
+                            _(
+                                b'exception from first failed import '
+                                b'attempt:\n'
+                            )
+                        )
                     ui.traceback(e1)
                     if ui.tracebackflag:
-                        ui.warn(_('exception from second failed import '
-                                  'attempt:\n'))
+                        ui.warn(
+                            _(
+                                b'exception from second failed import '
+                                b'attempt:\n'
+                            )
+                        )
                     ui.traceback(e2)
 
                     if not ui.tracebackflag:
                         tracebackhint = _(
-                            'run with --traceback for stack trace')
+                            b'run with --traceback for stack trace'
+                        )
                     else:
                         tracebackhint = None
                     raise error.HookLoadError(
-                        _('%s hook is invalid: import of "%s" failed') %
-                        (hname, modname), hint=tracebackhint)
+                        _(b'%s hook is invalid: import of "%s" failed')
+                        % (hname, modname),
+                        hint=tracebackhint,
+                    )
         sys.path = oldpaths
         try:
-            for p in funcname.split('.')[1:]:
+            for p in funcname.split(b'.')[1:]:
                 obj = getattr(obj, p)
         except AttributeError:
             raise error.HookLoadError(
-                _('%s hook is invalid: "%s" is not defined')
-                % (hname, funcname))
+                _(b'%s hook is invalid: "%s" is not defined')
+                % (hname, funcname)
+            )
         if not callable(obj):
             raise error.HookLoadError(
-                _('%s hook is invalid: "%s" is not callable')
-                % (hname, funcname))
+                _(b'%s hook is invalid: "%s" is not callable')
+                % (hname, funcname)
+            )
 
-    ui.note(_("calling hook %s: %s\n") % (hname, funcname))
+    ui.note(_(b"calling hook %s: %s\n") % (hname, funcname))
     starttime = util.timer()
 
     try:
         r = obj(ui=ui, repo=repo, hooktype=htype, **pycompat.strkwargs(args))
     except Exception as exc:
         if isinstance(exc, error.Abort):
-            ui.warn(_('error: %s hook failed: %s\n') %
-                         (hname, exc.args[0]))
+            ui.warn(_(b'error: %s hook failed: %s\n') % (hname, exc.args[0]))
         else:
-            ui.warn(_('error: %s hook raised an exception: '
-                      '%s\n') % (hname, stringutil.forcebytestr(exc)))
+            ui.warn(
+                _(b'error: %s hook raised an exception: %s\n')
+                % (hname, stringutil.forcebytestr(exc))
+            )
         if throw:
             raise
         if not ui.tracebackflag:
-            ui.warn(_('(run with --traceback for stack trace)\n'))
+            ui.warn(_(b'(run with --traceback for stack trace)\n'))
         ui.traceback()
         return True, True
     finally:
         duration = util.timer() - starttime
-        ui.log('pythonhook', 'pythonhook-%s: %s finished in %0.2f seconds\n',
-               htype, funcname, duration)
+        ui.log(
+            b'pythonhook',
+            b'pythonhook-%s: %s finished in %0.2f seconds\n',
+            htype,
+            funcname,
+            duration,
+        )
     if r:
         if throw:
-            raise error.HookAbort(_('%s hook failed') % hname)
-        ui.warn(_('warning: %s hook failed\n') % hname)
+            raise error.HookAbort(_(b'%s hook failed') % hname)
+        ui.warn(_(b'warning: %s hook failed\n') % hname)
     return r, False
 
+
 def _exthook(ui, repo, htype, name, cmd, args, throw):
     starttime = util.timer()
     env = {}
@@ -128,44 +151,51 @@
         tr = repo.currenttransaction()
         repo.dirstate.write(tr)
         if tr and tr.writepending():
-            env['HG_PENDING'] = repo.root
-    env['HG_HOOKTYPE'] = htype
-    env['HG_HOOKNAME'] = name
+            env[b'HG_PENDING'] = repo.root
+    env[b'HG_HOOKTYPE'] = htype
+    env[b'HG_HOOKNAME'] = name
 
-    for k, v in args.iteritems():
+    for k, v in pycompat.iteritems(args):
         if callable(v):
             v = v()
         if isinstance(v, (dict, list)):
             v = stringutil.pprint(v)
-        env['HG_' + k.upper()] = v
+        env[b'HG_' + k.upper()] = v
 
-    if ui.configbool('hooks', 'tonative.%s' % name, False):
+    if ui.configbool(b'hooks', b'tonative.%s' % name, False):
         oldcmd = cmd
         cmd = procutil.shelltonative(cmd, env)
         if cmd != oldcmd:
-            ui.note(_('converting hook "%s" to native\n') % name)
+            ui.note(_(b'converting hook "%s" to native\n') % name)
 
-    ui.note(_("running hook %s: %s\n") % (name, cmd))
+    ui.note(_(b"running hook %s: %s\n") % (name, cmd))
 
     if repo:
         cwd = repo.root
     else:
         cwd = encoding.getcwd()
-    r = ui.system(cmd, environ=env, cwd=cwd, blockedtag='exthook-%s' % (name,))
+    r = ui.system(cmd, environ=env, cwd=cwd, blockedtag=b'exthook-%s' % (name,))
 
     duration = util.timer() - starttime
-    ui.log('exthook', 'exthook-%s: %s finished in %0.2f seconds\n',
-           name, cmd, duration)
+    ui.log(
+        b'exthook',
+        b'exthook-%s: %s finished in %0.2f seconds\n',
+        name,
+        cmd,
+        duration,
+    )
     if r:
         desc = procutil.explainexit(r)
         if throw:
-            raise error.HookAbort(_('%s hook %s') % (name, desc))
-        ui.warn(_('warning: %s hook %s\n') % (name, desc))
+            raise error.HookAbort(_(b'%s hook %s') % (name, desc))
+        ui.warn(_(b'warning: %s hook %s\n') % (name, desc))
     return r
 
+
 # represent an untrusted hook command
 _fromuntrusted = object()
 
+
 def _allhooks(ui):
     """return a list of (hook-id, cmd) pairs sorted by priority"""
     hooks = _hookitems(ui)
@@ -181,38 +211,44 @@
     # (end of the security sensitive section)
     return [(k, v) for p, o, k, v in sorted(hooks.values())]
 
+
 def _hookitems(ui, _untrusted=False):
     """return all hooks items ready to be sorted"""
     hooks = {}
-    for name, cmd in ui.configitems('hooks', untrusted=_untrusted):
-        if name.startswith('priority.') or name.startswith('tonative.'):
+    for name, cmd in ui.configitems(b'hooks', untrusted=_untrusted):
+        if name.startswith(b'priority.') or name.startswith(b'tonative.'):
             continue
 
-        priority = ui.configint('hooks', 'priority.%s' % name, 0)
+        priority = ui.configint(b'hooks', b'priority.%s' % name, 0)
         hooks[name] = (-priority, len(hooks), name, cmd)
     return hooks
 
+
 _redirect = False
+
+
 def redirect(state):
     global _redirect
     _redirect = state
 
+
 def hashook(ui, htype):
     """return True if a hook is configured for 'htype'"""
     if not ui.callhooks:
         return False
     for hname, cmd in _allhooks(ui):
-        if hname.split('.')[0] == htype and cmd:
+        if hname.split(b'.')[0] == htype and cmd:
             return True
     return False
 
+
 def hook(ui, repo, htype, throw=False, **args):
     if not ui.callhooks:
         return False
 
     hooks = []
     for hname, cmd in _allhooks(ui):
-        if hname.split('.')[0] == htype and cmd:
+        if hname.split(b'.')[0] == htype and cmd:
             hooks.append((hname, cmd))
 
     res = runhooks(ui, repo, htype, hooks, throw=throw, **args)
@@ -221,6 +257,7 @@
         r = res[hname][0] or r
     return r
 
+
 def runhooks(ui, repo, htype, hooks, throw=False, **args):
     args = pycompat.byteskwargs(args)
     res = {}
@@ -244,30 +281,31 @@
             if cmd is _fromuntrusted:
                 if throw:
                     raise error.HookAbort(
-                        _('untrusted hook %s not executed') % hname,
-                        hint = _("see 'hg help config.trusted'"))
-                ui.warn(_('warning: untrusted hook %s not executed\n') % hname)
+                        _(b'untrusted hook %s not executed') % hname,
+                        hint=_(b"see 'hg help config.trusted'"),
+                    )
+                ui.warn(_(b'warning: untrusted hook %s not executed\n') % hname)
                 r = 1
                 raised = False
             elif callable(cmd):
-                r, raised = pythonhook(ui, repo, htype, hname, cmd, args,
-                                        throw)
-            elif cmd.startswith('python:'):
-                if cmd.count(':') >= 2:
-                    path, cmd = cmd[7:].rsplit(':', 1)
+                r, raised = pythonhook(ui, repo, htype, hname, cmd, args, throw)
+            elif cmd.startswith(b'python:'):
+                if cmd.count(b':') >= 2:
+                    path, cmd = cmd[7:].rsplit(b':', 1)
                     path = util.expandpath(path)
                     if repo:
                         path = os.path.join(repo.root, path)
                     try:
-                        mod = extensions.loadpath(path, 'hghook.%s' % hname)
+                        mod = extensions.loadpath(path, b'hghook.%s' % hname)
                     except Exception:
-                        ui.write(_("loading %s hook failed:\n") % hname)
+                        ui.write(_(b"loading %s hook failed:\n") % hname)
                         raise
                     hookfn = getattr(mod, cmd)
                 else:
                     hookfn = cmd[7:].strip()
-                r, raised = pythonhook(ui, repo, htype, hname, hookfn, args,
-                                        throw)
+                r, raised = pythonhook(
+                    ui, repo, htype, hname, hookfn, args, throw
+                )
             else:
                 r = _exthook(ui, repo, htype, hname, cmd, args, throw)
                 raised = False
--- a/mercurial/httpconnection.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/httpconnection.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,6 +13,7 @@
 import os
 
 from .i18n import _
+from .pycompat import open
 from . import (
     pycompat,
     util,
@@ -43,8 +44,9 @@
         # requires authentication. Since we can't know until we try
         # once whether authentication will be required, just lie to
         # the user and maybe the push succeeds suddenly at 50%.
-        self._progress = ui.makeprogress(_('sending'), unit=_('kb'),
-                                         total=(self.length // 1024 * 2))
+        self._progress = ui.makeprogress(
+            _(b'sending'), unit=_(b'kb'), total=(self.length // 1024 * 2)
+        )
 
     def read(self, *args, **kwargs):
         ret = self._data.read(*args, **kwargs)
@@ -61,35 +63,36 @@
     def __exit__(self, exc_type, exc_val, exc_tb):
         self.close()
 
+
 # moved here from url.py to avoid a cycle
 def readauthforuri(ui, uri, user):
     uri = pycompat.bytesurl(uri)
     # Read configuration
     groups = {}
-    for key, val in ui.configitems('auth'):
-        if key in ('cookiefile',):
+    for key, val in ui.configitems(b'auth'):
+        if key in (b'cookiefile',):
             continue
 
-        if '.' not in key:
-            ui.warn(_("ignoring invalid [auth] key '%s'\n") % key)
+        if b'.' not in key:
+            ui.warn(_(b"ignoring invalid [auth] key '%s'\n") % key)
             continue
-        group, setting = key.rsplit('.', 1)
+        group, setting = key.rsplit(b'.', 1)
         gdict = groups.setdefault(group, {})
-        if setting in ('username', 'cert', 'key'):
+        if setting in (b'username', b'cert', b'key'):
             val = util.expandpath(val)
         gdict[setting] = val
 
     # Find the best match
-    scheme, hostpath = uri.split('://', 1)
+    scheme, hostpath = uri.split(b'://', 1)
     bestuser = None
     bestlen = 0
     bestauth = None
-    for group, auth in groups.iteritems():
-        if user and user != auth.get('username', user):
+    for group, auth in pycompat.iteritems(groups):
+        if user and user != auth.get(b'username', user):
             # If a username was set in the URI, the entry username
             # must either match it or be unset
             continue
-        prefix = auth.get('prefix')
+        prefix = auth.get(b'prefix')
         if not prefix:
             continue
 
@@ -104,18 +107,26 @@
         prefixurl.user = None
         prefix = bytes(prefixurl)
 
-        p = prefix.split('://', 1)
+        p = prefix.split(b'://', 1)
         if len(p) > 1:
             schemes, prefix = [p[0]], p[1]
         else:
-            schemes = (auth.get('schemes') or 'https').split()
-        if ((prefix == '*' or hostpath.startswith(prefix)) and
-            (len(prefix) > bestlen or (len(prefix) == bestlen and
-                                       not bestuser and 'username' in auth))
-            and scheme in schemes):
+            schemes = (auth.get(b'schemes') or b'https').split()
+        if (
+            (prefix == b'*' or hostpath.startswith(prefix))
+            and (
+                len(prefix) > bestlen
+                or (
+                    len(prefix) == bestlen
+                    and not bestuser
+                    and b'username' in auth
+                )
+            )
+            and scheme in schemes
+        ):
             bestlen = len(prefix)
             bestauth = group, auth
-            bestuser = auth.get('username')
+            bestuser = auth.get(b'username')
             if user and not bestuser:
-                auth['username'] = user
+                auth[b'username'] = user
     return bestauth
--- a/mercurial/httppeer.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/httppeer.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,12 +16,12 @@
 import weakref
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     bundle2,
     error,
     httpconnection,
     pycompat,
-    repository,
     statichttprepo,
     url as urlmod,
     util,
@@ -31,9 +31,12 @@
     wireprotov2peer,
     wireprotov2server,
 )
+from .interfaces import (
+    repository,
+    util as interfaceutil,
+)
 from .utils import (
     cborutil,
-    interfaceutil,
     stringutil,
 )
 
@@ -41,6 +44,7 @@
 urlerr = util.urlerr
 urlreq = util.urlreq
 
+
 def encodevalueinheaders(value, header, limit):
     """Encode a string value into multiple HTTP headers.
 
@@ -59,23 +63,27 @@
     # and using an r-string to make it portable between Python 2 and 3
     # doesn't work because then the \r is a literal backslash-r
     # instead of a carriage return.
-    valuelen = limit - len(fmt % r'000') - len(': \r\n')
+    valuelen = limit - len(fmt % r'000') - len(b': \r\n')
     result = []
 
     n = 0
     for i in pycompat.xrange(0, len(value), valuelen):
         n += 1
-        result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
+        result.append((fmt % str(n), pycompat.strurl(value[i : i + valuelen])))
 
     return result
 
+
 class _multifile(object):
     def __init__(self, *fileobjs):
         for f in fileobjs:
-            if not util.safehasattr(f, 'length'):
+            if not util.safehasattr(f, b'length'):
                 raise ValueError(
-                    '_multifile only supports file objects that '
-                    'have a length but this one does not:', type(f), f)
+                    b'_multifile only supports file objects that '
+                    b'have a length but this one does not:',
+                    type(f),
+                    f,
+                )
         self._fileobjs = fileobjs
         self._index = 0
 
@@ -85,7 +93,7 @@
 
     def read(self, amt=None):
         if amt <= 0:
-            return ''.join(f.read() for f in self._fileobjs)
+            return b''.join(f.read() for f in self._fileobjs)
         parts = []
         while amt and self._index < len(self._fileobjs):
             parts.append(self._fileobjs[self._index].read(amt))
@@ -93,23 +101,27 @@
             if got < amt:
                 self._index += 1
             amt -= got
-        return ''.join(parts)
+        return b''.join(parts)
 
     def seek(self, offset, whence=os.SEEK_SET):
         if whence != os.SEEK_SET:
             raise NotImplementedError(
-                '_multifile does not support anything other'
-                ' than os.SEEK_SET for whence on seek()')
+                b'_multifile does not support anything other'
+                b' than os.SEEK_SET for whence on seek()'
+            )
         if offset != 0:
             raise NotImplementedError(
-                '_multifile only supports seeking to start, but that '
-                'could be fixed if you need it')
+                b'_multifile only supports seeking to start, but that '
+                b'could be fixed if you need it'
+            )
         for f in self._fileobjs:
             f.seek(0)
         self._index = 0
 
-def makev1commandrequest(ui, requestbuilder, caps, capablefn,
-                         repobaseurl, cmd, args):
+
+def makev1commandrequest(
+    ui, requestbuilder, caps, capablefn, repobaseurl, cmd, args
+):
     """Make an HTTP request to run a command for a version 1 client.
 
     ``caps`` is a set of known server capabilities. The value may be
@@ -120,18 +132,18 @@
     ``cmd``, ``args``, and ``data`` define the command, its arguments, and
     raw data to pass to it.
     """
-    if cmd == 'pushkey':
-        args['data'] = ''
-    data = args.pop('data', None)
-    headers = args.pop('headers', {})
+    if cmd == b'pushkey':
+        args[b'data'] = b''
+    data = args.pop(b'data', None)
+    headers = args.pop(b'headers', {})
 
-    ui.debug("sending %s command\n" % cmd)
-    q = [('cmd', cmd)]
+    ui.debug(b"sending %s command\n" % cmd)
+    q = [(b'cmd', cmd)]
     headersize = 0
     # Important: don't use self.capable() here or else you end up
     # with infinite recursion when trying to look up capabilities
     # for the first time.
-    postargsok = caps is not None and 'httppostargs' in caps
+    postargsok = caps is not None and b'httppostargs' in caps
 
     # Send arguments via POST.
     if postargsok and args:
@@ -151,26 +163,27 @@
         # Calling self.capable() can infinite loop if we are calling
         # "capabilities". But that command should never accept wire
         # protocol arguments. So this should never happen.
-        assert cmd != 'capabilities'
-        httpheader = capablefn('httpheader')
+        assert cmd != b'capabilities'
+        httpheader = capablefn(b'httpheader')
         if httpheader:
-            headersize = int(httpheader.split(',', 1)[0])
+            headersize = int(httpheader.split(b',', 1)[0])
 
         # Send arguments via HTTP headers.
         if headersize > 0:
             # The headers can typically carry more data than the URL.
             encargs = urlreq.urlencode(sorted(args.items()))
-            for header, value in encodevalueinheaders(encargs, 'X-HgArg',
-                                                      headersize):
+            for header, value in encodevalueinheaders(
+                encargs, b'X-HgArg', headersize
+            ):
                 headers[header] = value
         # Send arguments via query string (Mercurial <1.9).
         else:
             q += sorted(args.items())
 
-    qs = '?%s' % urlreq.urlencode(q)
-    cu = "%s%s" % (repobaseurl, qs)
+    qs = b'?%s' % urlreq.urlencode(q)
+    cu = b"%s%s" % (repobaseurl, qs)
     size = 0
-    if util.safehasattr(data, 'length'):
+    if util.safehasattr(data, b'length'):
         size = data.length
     elif data is not None:
         size = len(data)
@@ -186,28 +199,30 @@
 
     mediatypes = set()
     if caps is not None:
-        mt = capablefn('httpmediatype')
+        mt = capablefn(b'httpmediatype')
         if mt:
-            protoparams.add('0.1')
-            mediatypes = set(mt.split(','))
+            protoparams.add(b'0.1')
+            mediatypes = set(mt.split(b','))
 
-        protoparams.add('partial-pull')
+        protoparams.add(b'partial-pull')
 
-    if '0.2tx' in mediatypes:
-        protoparams.add('0.2')
+    if b'0.2tx' in mediatypes:
+        protoparams.add(b'0.2')
 
-    if '0.2tx' in mediatypes and capablefn('compression'):
+    if b'0.2tx' in mediatypes and capablefn(b'compression'):
         # We /could/ compare supported compression formats and prune
         # non-mutually supported or error if nothing is mutually supported.
         # For now, send the full list to the server and have it error.
-        comps = [e.wireprotosupport().name for e in
-                 util.compengines.supportedwireengines(util.CLIENTROLE)]
-        protoparams.add('comp=%s' % ','.join(comps))
+        comps = [
+            e.wireprotosupport().name
+            for e in util.compengines.supportedwireengines(util.CLIENTROLE)
+        ]
+        protoparams.add(b'comp=%s' % b','.join(comps))
 
     if protoparams:
-        protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)),
-                                            'X-HgProto',
-                                            headersize or 1024)
+        protoheaders = encodevalueinheaders(
+            b' '.join(sorted(protoparams)), b'X-HgProto', headersize or 1024
+        )
         for header, value in protoheaders:
             headers[header] = value
 
@@ -222,11 +237,12 @@
     req = requestbuilder(pycompat.strurl(cu), data, headers)
 
     if data is not None:
-        ui.debug("sending %d bytes\n" % size)
+        ui.debug(b"sending %d bytes\n" % size)
         req.add_unredirected_header(r'Content-Length', r'%d' % size)
 
     return req, cu, qs
 
+
 def _reqdata(req):
     """Get request data, if any. If no data, returns None."""
     if pycompat.ispy3:
@@ -235,38 +251,47 @@
         return None
     return req.get_data()
 
+
 def sendrequest(ui, opener, req):
     """Send a prepared HTTP request.
 
     Returns the response object.
     """
     dbg = ui.debug
-    if (ui.debugflag
-        and ui.configbool('devel', 'debug.peer-request')):
-        line = 'devel-peer-request: %s\n'
-        dbg(line % '%s %s' % (pycompat.bytesurl(req.get_method()),
-                              pycompat.bytesurl(req.get_full_url())))
+    if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
+        line = b'devel-peer-request: %s\n'
+        dbg(
+            line
+            % b'%s %s'
+            % (
+                pycompat.bytesurl(req.get_method()),
+                pycompat.bytesurl(req.get_full_url()),
+            )
+        )
         hgargssize = None
 
         for header, value in sorted(req.header_items()):
             header = pycompat.bytesurl(header)
             value = pycompat.bytesurl(value)
-            if header.startswith('X-hgarg-'):
+            if header.startswith(b'X-hgarg-'):
                 if hgargssize is None:
                     hgargssize = 0
                 hgargssize += len(value)
             else:
-                dbg(line % '  %s %s' % (header, value))
+                dbg(line % b'  %s %s' % (header, value))
 
         if hgargssize is not None:
-            dbg(line % '  %d bytes of commands arguments in headers'
-                % hgargssize)
+            dbg(
+                line
+                % b'  %d bytes of commands arguments in headers'
+                % hgargssize
+            )
         data = _reqdata(req)
         if data is not None:
             length = getattr(data, 'length', None)
             if length is None:
                 length = len(data)
-            dbg(line % '  %d bytes of data' % length)
+            dbg(line % b'  %d bytes of data' % length)
 
         start = util.timer()
 
@@ -275,44 +300,52 @@
         res = opener.open(req)
     except urlerr.httperror as inst:
         if inst.code == 401:
-            raise error.Abort(_('authorization failed'))
+            raise error.Abort(_(b'authorization failed'))
         raise
     except httplib.HTTPException as inst:
-        ui.debug('http error requesting %s\n' %
-                 util.hidepassword(req.get_full_url()))
+        ui.debug(
+            b'http error requesting %s\n'
+            % util.hidepassword(req.get_full_url())
+        )
         ui.traceback()
         raise IOError(None, inst)
     finally:
-        if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
+        if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
             code = res.code if res else -1
-            dbg(line % '  finished in %.4f seconds (%d)'
-                % (util.timer() - start, code))
+            dbg(
+                line
+                % b'  finished in %.4f seconds (%d)'
+                % (util.timer() - start, code)
+            )
 
     # Insert error handlers for common I/O failures.
     urlmod.wrapresponse(res)
 
     return res
 
+
 class RedirectedRepoError(error.RepoError):
     def __init__(self, msg, respurl):
         super(RedirectedRepoError, self).__init__(msg)
         self.respurl = respurl
 
-def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible,
-                           allowcbor=False):
+
+def parsev1commandresponse(
+    ui, baseurl, requrl, qs, resp, compressible, allowcbor=False
+):
     # record the url we got redirected to
     redirected = False
     respurl = pycompat.bytesurl(resp.geturl())
     if respurl.endswith(qs):
-        respurl = respurl[:-len(qs)]
+        respurl = respurl[: -len(qs)]
         qsdropped = False
     else:
         qsdropped = True
 
-    if baseurl.rstrip('/') != respurl.rstrip('/'):
+    if baseurl.rstrip(b'/') != respurl.rstrip(b'/'):
         redirected = True
         if not ui.quiet:
-            ui.warn(_('real URL is %s\n') % respurl)
+            ui.warn(_(b'real URL is %s\n') % respurl)
 
     try:
         proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
@@ -320,16 +353,17 @@
         proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
 
     safeurl = util.hidepassword(baseurl)
-    if proto.startswith('application/hg-error'):
+    if proto.startswith(b'application/hg-error'):
         raise error.OutOfBandError(resp.read())
 
     # Pre 1.0 versions of Mercurial used text/plain and
     # application/hg-changegroup. We don't support such old servers.
-    if not proto.startswith('application/mercurial-'):
-        ui.debug("requested URL: '%s'\n" % util.hidepassword(requrl))
-        msg = _("'%s' does not appear to be an hg repository:\n"
-                "---%%<--- (%s)\n%s\n---%%<---\n") % (
-            safeurl, proto or 'no content-type', resp.read(1024))
+    if not proto.startswith(b'application/mercurial-'):
+        ui.debug(b"requested URL: '%s'\n" % util.hidepassword(requrl))
+        msg = _(
+            b"'%s' does not appear to be an hg repository:\n"
+            b"---%%<--- (%s)\n%s\n---%%<---\n"
+        ) % (safeurl, proto or b'no content-type', resp.read(1024))
 
         # Some servers may strip the query string from the redirect. We
         # raise a special error type so callers can react to this specially.
@@ -339,50 +373,54 @@
             raise error.RepoError(msg)
 
     try:
-        subtype = proto.split('-', 1)[1]
+        subtype = proto.split(b'-', 1)[1]
 
         # Unless we end up supporting CBOR in the legacy wire protocol,
         # this should ONLY be encountered for the initial capabilities
         # request during handshake.
-        if subtype == 'cbor':
+        if subtype == b'cbor':
             if allowcbor:
                 return respurl, proto, resp
             else:
-                raise error.RepoError(_('unexpected CBOR response from '
-                                        'server'))
+                raise error.RepoError(
+                    _(b'unexpected CBOR response from server')
+                )
 
-        version_info = tuple([int(n) for n in subtype.split('.')])
+        version_info = tuple([int(n) for n in subtype.split(b'.')])
     except ValueError:
-        raise error.RepoError(_("'%s' sent a broken Content-Type "
-                                "header (%s)") % (safeurl, proto))
+        raise error.RepoError(
+            _(b"'%s' sent a broken Content-Type header (%s)") % (safeurl, proto)
+        )
 
     # TODO consider switching to a decompression reader that uses
     # generators.
     if version_info == (0, 1):
         if compressible:
-            resp = util.compengines['zlib'].decompressorreader(resp)
+            resp = util.compengines[b'zlib'].decompressorreader(resp)
 
     elif version_info == (0, 2):
         # application/mercurial-0.2 always identifies the compression
         # engine in the payload header.
-        elen = struct.unpack('B', util.readexactly(resp, 1))[0]
+        elen = struct.unpack(b'B', util.readexactly(resp, 1))[0]
         ename = util.readexactly(resp, elen)
         engine = util.compengines.forwiretype(ename)
 
         resp = engine.decompressorreader(resp)
     else:
-        raise error.RepoError(_("'%s' uses newer protocol %s") %
-                              (safeurl, subtype))
+        raise error.RepoError(
+            _(b"'%s' uses newer protocol %s") % (safeurl, subtype)
+        )
 
     return respurl, proto, resp
 
+
 class httppeer(wireprotov1peer.wirepeer):
     def __init__(self, ui, path, url, opener, requestbuilder, caps):
         self.ui = ui
         self._path = path
         self._url = url
         self._caps = caps
-        self.limitedarguments = caps is not None and 'httppostargs' not in caps
+        self.limitedarguments = caps is not None and b'httppostargs' not in caps
         self._urlopener = opener
         self._requestbuilder = requestbuilder
 
@@ -407,14 +445,20 @@
 
     def close(self):
         try:
-            reqs, sent, recv = (self._urlopener.requestscount,
-                                self._urlopener.sentbytescount,
-                                self._urlopener.receivedbytescount)
+            reqs, sent, recv = (
+                self._urlopener.requestscount,
+                self._urlopener.sentbytescount,
+                self._urlopener.receivedbytescount,
+            )
         except AttributeError:
             return
-        self.ui.note(_('(sent %d HTTP requests and %d bytes; '
-                       'received %d bytes in responses)\n') %
-                     (reqs, sent, recv))
+        self.ui.note(
+            _(
+                b'(sent %d HTTP requests and %d bytes; '
+                b'received %d bytes in responses)\n'
+            )
+            % (reqs, sent, recv)
+        )
 
     # End of ipeerconnection interface.
 
@@ -428,14 +472,21 @@
     def _callstream(self, cmd, _compressible=False, **args):
         args = pycompat.byteskwargs(args)
 
-        req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder,
-                                           self._caps, self.capable,
-                                           self._url, cmd, args)
+        req, cu, qs = makev1commandrequest(
+            self.ui,
+            self._requestbuilder,
+            self._caps,
+            self.capable,
+            self._url,
+            cmd,
+            args,
+        )
 
         resp = sendrequest(self.ui, self._urlopener, req)
 
-        self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs,
-                                                     resp, _compressible)
+        self._url, ct, resp = parsev1commandresponse(
+            self.ui, self._url, cu, qs, resp, _compressible
+        )
 
         return resp
 
@@ -451,28 +502,28 @@
         # have to stream bundle to a temp file because we do not have
         # http 1.1 chunked transfer.
 
-        types = self.capable('unbundle')
+        types = self.capable(b'unbundle')
         try:
-            types = types.split(',')
+            types = types.split(b',')
         except AttributeError:
             # servers older than d1b16a746db6 will send 'unbundle' as a
             # boolean capability. They only support headerless/uncompressed
             # bundles.
-            types = [""]
+            types = [b""]
         for x in types:
             if x in bundle2.bundletypes:
                 type = x
                 break
 
         tempname = bundle2.writebundle(self.ui, cg, None, type)
-        fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
+        fp = httpconnection.httpsendfile(self.ui, tempname, b"rb")
         headers = {r'Content-Type': r'application/mercurial-0.1'}
 
         try:
             r = self._call(cmd, data=fp, headers=headers, **args)
-            vals = r.split('\n', 1)
+            vals = r.split(b'\n', 1)
             if len(vals) < 2:
-                raise error.ResponseError(_("unexpected response:"), r)
+                raise error.ResponseError(_(b"unexpected response:"), r)
             return vals
         except urlerr.httperror:
             # Catch and re-raise these so we don't try and treat them
@@ -481,34 +532,28 @@
             raise
         except socket.error as err:
             if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
-                raise error.Abort(_('push failed: %s') % err.args[1])
+                raise error.Abort(_(b'push failed: %s') % err.args[1])
             raise error.Abort(err.args[1])
         finally:
             fp.close()
             os.unlink(tempname)
 
     def _calltwowaystream(self, cmd, fp, **args):
-        fh = None
-        fp_ = None
         filename = None
         try:
             # dump bundle to disk
-            fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
-            fh = os.fdopen(fd, r"wb")
-            d = fp.read(4096)
-            while d:
-                fh.write(d)
+            fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
+            with os.fdopen(fd, r"wb") as fh:
                 d = fp.read(4096)
-            fh.close()
+                while d:
+                    fh.write(d)
+                    d = fp.read(4096)
             # start http push
-            fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
-            headers = {r'Content-Type': r'application/mercurial-0.1'}
-            return self._callstream(cmd, data=fp_, headers=headers, **args)
+            with httpconnection.httpsendfile(self.ui, filename, b"rb") as fp_:
+                headers = {r'Content-Type': r'application/mercurial-0.1'}
+                return self._callstream(cmd, data=fp_, headers=headers, **args)
         finally:
-            if fp_ is not None:
-                fp_.close()
-            if fh is not None:
-                fh.close()
+            if filename is not None:
                 os.unlink(filename)
 
     def _callcompressable(self, cmd, **args):
@@ -517,8 +562,10 @@
     def _abort(self, exception):
         raise exception
 
-def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests,
-                  redirect):
+
+def sendv2request(
+    ui, opener, requestbuilder, apiurl, permission, requests, redirect
+):
     wireprotoframing.populatestreamencoders()
 
     uiencoders = ui.configlist(b'experimental', b'httppeer.v2-encoder-order')
@@ -528,36 +575,46 @@
 
         for encoder in uiencoders:
             if encoder not in wireprotoframing.STREAM_ENCODERS:
-                ui.warn(_(b'wire protocol version 2 encoder referenced in '
-                          b'config (%s) is not known; ignoring\n') % encoder)
+                ui.warn(
+                    _(
+                        b'wire protocol version 2 encoder referenced in '
+                        b'config (%s) is not known; ignoring\n'
+                    )
+                    % encoder
+                )
             else:
                 encoders.append(encoder)
 
     else:
         encoders = wireprotoframing.STREAM_ENCODERS_ORDER
 
-    reactor = wireprotoframing.clientreactor(ui,
-                                             hasmultiplesend=False,
-                                             buffersends=True,
-                                             clientcontentencoders=encoders)
+    reactor = wireprotoframing.clientreactor(
+        ui,
+        hasmultiplesend=False,
+        buffersends=True,
+        clientcontentencoders=encoders,
+    )
 
-    handler = wireprotov2peer.clienthandler(ui, reactor,
-                                            opener=opener,
-                                            requestbuilder=requestbuilder)
+    handler = wireprotov2peer.clienthandler(
+        ui, reactor, opener=opener, requestbuilder=requestbuilder
+    )
 
-    url = '%s/%s' % (apiurl, permission)
+    url = b'%s/%s' % (apiurl, permission)
 
     if len(requests) > 1:
-        url += '/multirequest'
+        url += b'/multirequest'
     else:
-        url += '/%s' % requests[0][0]
+        url += b'/%s' % requests[0][0]
 
-    ui.debug('sending %d commands\n' % len(requests))
+    ui.debug(b'sending %d commands\n' % len(requests))
     for command, args, f in requests:
-        ui.debug('sending command %s: %s\n' % (
-            command, stringutil.pprint(args, indent=2)))
-        assert not list(handler.callcommand(command, args, f,
-                                            redirect=redirect))
+        ui.debug(
+            b'sending command %s: %s\n'
+            % (command, stringutil.pprint(args, indent=2))
+        )
+        assert not list(
+            handler.callcommand(command, args, f, redirect=redirect)
+        )
 
     # TODO stream this.
     body = b''.join(map(bytes, handler.flushcommands()))
@@ -575,7 +632,7 @@
         res = opener.open(req)
     except urlerr.httperror as e:
         if e.code == 401:
-            raise error.Abort(_('authorization failed'))
+            raise error.Abort(_(b'authorization failed'))
 
         raise
     except httplib.HTTPException as e:
@@ -584,6 +641,7 @@
 
     return handler, res
 
+
 class queuedcommandfuture(pycompat.futures.Future):
     """Wraps result() on command futures to trigger submission on call."""
 
@@ -597,10 +655,12 @@
         # will resolve to Future.result.
         return self.result(timeout)
 
+
 @interfaceutil.implementer(repository.ipeercommandexecutor)
 class httpv2executor(object):
-    def __init__(self, ui, opener, requestbuilder, apiurl, descriptor,
-                 redirect):
+    def __init__(
+        self, ui, opener, requestbuilder, apiurl, descriptor, redirect
+    ):
         self._ui = ui
         self._opener = opener
         self._requestbuilder = requestbuilder
@@ -623,29 +683,33 @@
 
     def callcommand(self, command, args):
         if self._sent:
-            raise error.ProgrammingError('callcommand() cannot be used after '
-                                         'commands are sent')
+            raise error.ProgrammingError(
+                b'callcommand() cannot be used after commands are sent'
+            )
 
         if self._closed:
-            raise error.ProgrammingError('callcommand() cannot be used after '
-                                         'close()')
+            raise error.ProgrammingError(
+                b'callcommand() cannot be used after close()'
+            )
 
         # The service advertises which commands are available. So if we attempt
         # to call an unknown command or pass an unknown argument, we can screen
         # for this.
-        if command not in self._descriptor['commands']:
+        if command not in self._descriptor[b'commands']:
             raise error.ProgrammingError(
-                'wire protocol command %s is not available' % command)
+                b'wire protocol command %s is not available' % command
+            )
 
-        cmdinfo = self._descriptor['commands'][command]
-        unknownargs = set(args.keys()) - set(cmdinfo.get('args', {}))
+        cmdinfo = self._descriptor[b'commands'][command]
+        unknownargs = set(args.keys()) - set(cmdinfo.get(b'args', {}))
 
         if unknownargs:
             raise error.ProgrammingError(
-                'wire protocol command %s does not accept argument: %s' % (
-                    command, ', '.join(sorted(unknownargs))))
+                b'wire protocol command %s does not accept argument: %s'
+                % (command, b', '.join(sorted(unknownargs)))
+            )
 
-        self._neededpermissions |= set(cmdinfo['permissions'])
+        self._neededpermissions |= set(cmdinfo[b'permissions'])
 
         # TODO we /could/ also validate types here, since the API descriptor
         # includes types...
@@ -679,9 +743,11 @@
                 f._peerexecutor = None
 
         # Mark the future as running and filter out cancelled futures.
-        calls = [(command, args, f)
-                 for command, args, f in self._calls
-                 if f.set_running_or_notify_cancel()]
+        calls = [
+            (command, args, f)
+            for command, args, f in self._calls
+            if f.set_running_or_notify_cancel()
+        ]
 
         # Clear out references, prevent improper object usage.
         self._calls = None
@@ -691,28 +757,33 @@
 
         permissions = set(self._neededpermissions)
 
-        if 'push' in permissions and 'pull' in permissions:
-            permissions.remove('pull')
+        if b'push' in permissions and b'pull' in permissions:
+            permissions.remove(b'pull')
 
         if len(permissions) > 1:
-            raise error.RepoError(_('cannot make request requiring multiple '
-                                    'permissions: %s') %
-                                  _(', ').join(sorted(permissions)))
+            raise error.RepoError(
+                _(b'cannot make request requiring multiple permissions: %s')
+                % _(b', ').join(sorted(permissions))
+            )
 
-        permission = {
-            'push': 'rw',
-            'pull': 'ro',
-        }[permissions.pop()]
+        permission = {b'push': b'rw', b'pull': b'ro',}[permissions.pop()]
 
         handler, resp = sendv2request(
-            self._ui, self._opener, self._requestbuilder, self._apiurl,
-            permission, calls, self._redirect)
+            self._ui,
+            self._opener,
+            self._requestbuilder,
+            self._apiurl,
+            permission,
+            calls,
+            self._redirect,
+        )
 
         # TODO we probably want to validate the HTTP code, media type, etc.
 
         self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
-        self._responsef = self._responseexecutor.submit(self._handleresponse,
-                                                        handler, resp)
+        self._responsef = self._responseexecutor.submit(
+            self._handleresponse, handler, resp
+        )
 
     def close(self):
         if self._closed:
@@ -738,8 +809,9 @@
             # errored, otherwise a result() could wait indefinitely.
             for f in self._futures:
                 if not f.done():
-                    f.set_exception(error.ResponseError(
-                        _('unfulfilled command response')))
+                    f.set_exception(
+                        error.ResponseError(_(b'unfulfilled command response'))
+                    )
 
             self._futures = None
 
@@ -749,22 +821,24 @@
         while handler.readdata(resp):
             pass
 
+
 @interfaceutil.implementer(repository.ipeerv2)
 class httpv2peer(object):
 
     limitedarguments = False
 
-    def __init__(self, ui, repourl, apipath, opener, requestbuilder,
-                 apidescriptor):
+    def __init__(
+        self, ui, repourl, apipath, opener, requestbuilder, apidescriptor
+    ):
         self.ui = ui
         self.apidescriptor = apidescriptor
 
-        if repourl.endswith('/'):
+        if repourl.endswith(b'/'):
             repourl = repourl[:-1]
 
         self._url = repourl
         self._apipath = apipath
-        self._apiurl = '%s/%s' % (repourl, apipath)
+        self._apiurl = b'%s/%s' % (repourl, apipath)
         self._opener = opener
         self._requestbuilder = requestbuilder
 
@@ -786,11 +860,17 @@
         return False
 
     def close(self):
-        self.ui.note(_('(sent %d HTTP requests and %d bytes; '
-                       'received %d bytes in responses)\n') %
-                     (self._opener.requestscount,
-                      self._opener.sentbytescount,
-                      self._opener.receivedbytescount))
+        self.ui.note(
+            _(
+                b'(sent %d HTTP requests and %d bytes; '
+                b'received %d bytes in responses)\n'
+            )
+            % (
+                self._opener.requestscount,
+                self._opener.sentbytescount,
+                self._opener.receivedbytescount,
+            )
+        )
 
     # End of ipeerconnection.
 
@@ -802,16 +882,22 @@
         # version 2 of that command works differently.
 
         # Maps to commands that are available.
-        if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'):
+        if name in (
+            b'branchmap',
+            b'getbundle',
+            b'known',
+            b'lookup',
+            b'pushkey',
+        ):
             return True
 
         # Other concepts.
-        if name in ('bundle2'):
+        if name in b'bundle2':
             return True
 
         # Alias command-* to presence of command of that name.
-        if name.startswith('command-'):
-            return name[len('command-'):] in self.apidescriptor['commands']
+        if name.startswith(b'command-'):
+            return name[len(b'command-') :] in self.apidescriptor[b'commands']
 
         return False
 
@@ -820,8 +906,12 @@
             return
 
         raise error.CapabilityError(
-            _('cannot %s; client or remote repository does not support the '
-              '\'%s\' capability') % (purpose, name))
+            _(
+                b'cannot %s; client or remote repository does not support the '
+                b'\'%s\' capability'
+            )
+            % (purpose, name)
+        )
 
     # End of ipeercapabilities.
 
@@ -830,8 +920,15 @@
             return e.callcommand(name, args).result()
 
     def commandexecutor(self):
-        return httpv2executor(self.ui, self._opener, self._requestbuilder,
-                              self._apiurl, self.apidescriptor, self._redirect)
+        return httpv2executor(
+            self.ui,
+            self._opener,
+            self._requestbuilder,
+            self._apiurl,
+            self.apidescriptor,
+            self._redirect,
+        )
+
 
 # Registry of API service names to metadata about peers that handle it.
 #
@@ -845,18 +942,17 @@
 #    Integer priority for the service. If we could choose from multiple
 #    services, we choose the one with the highest priority.
 API_PEERS = {
-    wireprototypes.HTTP_WIREPROTO_V2: {
-        'init': httpv2peer,
-        'priority': 50,
-    },
+    wireprototypes.HTTP_WIREPROTO_V2: {b'init': httpv2peer, b'priority': 50,},
 }
 
+
 def performhandshake(ui, url, opener, requestbuilder):
     # The handshake is a request to the capabilities command.
 
     caps = None
+
     def capable(x):
-        raise error.ProgrammingError('should not be called')
+        raise error.ProgrammingError(b'should not be called')
 
     args = {}
 
@@ -865,23 +961,26 @@
     # X-HgProto-* header advertising which serializing formats it supports.
     # We only support the HTTP version 2 transport and CBOR responses for
     # now.
-    advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2')
+    advertisev2 = ui.configbool(b'experimental', b'httppeer.advertise-v2')
 
     if advertisev2:
-        args['headers'] = {
+        args[b'headers'] = {
             r'X-HgProto-1': r'cbor',
         }
 
-        args['headers'].update(
-            encodevalueinheaders(' '.join(sorted(API_PEERS)),
-                                 'X-HgUpgrade',
-                                 # We don't know the header limit this early.
-                                 # So make it small.
-                                 1024))
+        args[b'headers'].update(
+            encodevalueinheaders(
+                b' '.join(sorted(API_PEERS)),
+                b'X-HgUpgrade',
+                # We don't know the header limit this early.
+                # So make it small.
+                1024,
+            )
+        )
 
-    req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
-                                           capable, url, 'capabilities',
-                                           args)
+    req, requrl, qs = makev1commandrequest(
+        ui, requestbuilder, caps, capable, url, b'capabilities', args
+    )
     resp = sendrequest(ui, opener, req)
 
     # The server may redirect us to the repo root, stripping the
@@ -897,51 +996,53 @@
     # be a longstanding bug in some server implementations. So we allow a
     # redirect that drops the query string to "just work."
     try:
-        respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
-                                                   compressible=False,
-                                                   allowcbor=advertisev2)
+        respurl, ct, resp = parsev1commandresponse(
+            ui, url, requrl, qs, resp, compressible=False, allowcbor=advertisev2
+        )
     except RedirectedRepoError as e:
-        req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
-                                               capable, e.respurl,
-                                               'capabilities', args)
+        req, requrl, qs = makev1commandrequest(
+            ui, requestbuilder, caps, capable, e.respurl, b'capabilities', args
+        )
         resp = sendrequest(ui, opener, req)
-        respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
-                                                   compressible=False,
-                                                   allowcbor=advertisev2)
+        respurl, ct, resp = parsev1commandresponse(
+            ui, url, requrl, qs, resp, compressible=False, allowcbor=advertisev2
+        )
 
     try:
         rawdata = resp.read()
     finally:
         resp.close()
 
-    if not ct.startswith('application/mercurial-'):
-        raise error.ProgrammingError('unexpected content-type: %s' % ct)
+    if not ct.startswith(b'application/mercurial-'):
+        raise error.ProgrammingError(b'unexpected content-type: %s' % ct)
 
     if advertisev2:
-        if ct == 'application/mercurial-cbor':
+        if ct == b'application/mercurial-cbor':
             try:
                 info = cborutil.decodeall(rawdata)[0]
             except cborutil.CBORDecodeError:
-                raise error.Abort(_('error decoding CBOR from remote server'),
-                                  hint=_('try again and consider contacting '
-                                         'the server operator'))
+                raise error.Abort(
+                    _(b'error decoding CBOR from remote server'),
+                    hint=_(
+                        b'try again and consider contacting '
+                        b'the server operator'
+                    ),
+                )
 
         # We got a legacy response. That's fine.
-        elif ct in ('application/mercurial-0.1', 'application/mercurial-0.2'):
-            info = {
-                'v1capabilities': set(rawdata.split())
-            }
+        elif ct in (b'application/mercurial-0.1', b'application/mercurial-0.2'):
+            info = {b'v1capabilities': set(rawdata.split())}
 
         else:
             raise error.RepoError(
-                _('unexpected response type from server: %s') % ct)
+                _(b'unexpected response type from server: %s') % ct
+            )
     else:
-        info = {
-            'v1capabilities': set(rawdata.split())
-        }
+        info = {b'v1capabilities': set(rawdata.split())}
 
     return respurl, info
 
+
 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
     """Construct an appropriate HTTP peer instance.
 
@@ -953,12 +1054,13 @@
     """
     u = util.url(path)
     if u.query or u.fragment:
-        raise error.Abort(_('unsupported URL component: "%s"') %
-                          (u.query or u.fragment))
+        raise error.Abort(
+            _(b'unsupported URL component: "%s"') % (u.query or u.fragment)
+        )
 
     # urllib cannot handle URLs with embedded user or passwd.
     url, authinfo = u.authinfo()
-    ui.debug('using %s\n' % url)
+    ui.debug(b'using %s\n' % url)
 
     opener = opener or urlmod.opener(ui, authinfo)
 
@@ -973,38 +1075,41 @@
     # capabilities, we could filter out services not meeting the
     # requirements. Possibly by consulting the interfaces defined by the
     # peer type.
-    apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys())
+    apipeerchoices = set(info.get(b'apis', {}).keys()) & set(API_PEERS.keys())
 
-    preferredchoices = sorted(apipeerchoices,
-                              key=lambda x: API_PEERS[x]['priority'],
-                              reverse=True)
+    preferredchoices = sorted(
+        apipeerchoices, key=lambda x: API_PEERS[x][b'priority'], reverse=True
+    )
 
     for service in preferredchoices:
-        apipath = '%s/%s' % (info['apibase'].rstrip('/'), service)
+        apipath = b'%s/%s' % (info[b'apibase'].rstrip(b'/'), service)
 
-        return API_PEERS[service]['init'](ui, respurl, apipath, opener,
-                                          requestbuilder,
-                                          info['apis'][service])
+        return API_PEERS[service][b'init'](
+            ui, respurl, apipath, opener, requestbuilder, info[b'apis'][service]
+        )
 
     # Failed to construct an API peer. Fall back to legacy.
-    return httppeer(ui, path, respurl, opener, requestbuilder,
-                    info['v1capabilities'])
+    return httppeer(
+        ui, path, respurl, opener, requestbuilder, info[b'v1capabilities']
+    )
+
 
 def instance(ui, path, create, intents=None, createopts=None):
     if create:
-        raise error.Abort(_('cannot create new http repository'))
+        raise error.Abort(_(b'cannot create new http repository'))
     try:
-        if path.startswith('https:') and not urlmod.has_https:
-            raise error.Abort(_('Python support for SSL and HTTPS '
-                                'is not installed'))
+        if path.startswith(b'https:') and not urlmod.has_https:
+            raise error.Abort(
+                _(b'Python support for SSL and HTTPS is not installed')
+            )
 
         inst = makepeer(ui, path)
 
         return inst
     except error.RepoError as httpexception:
         try:
-            r = statichttprepo.instance(ui, "static-" + path, create)
-            ui.note(_('(falling back to static-http)\n'))
+            r = statichttprepo.instance(ui, b"static-" + path, create)
+            ui.note(_(b'(falling back to static-http)\n'))
             return r
         except error.RepoError:
-            raise httpexception # use the original http RepoError instead
+            raise httpexception  # use the original http RepoError instead
--- a/mercurial/i18n.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/i18n.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,6 +12,7 @@
 import os
 import sys
 
+from .pycompat import getattr
 from . import (
     encoding,
     pycompat,
@@ -24,17 +25,20 @@
     module = pycompat.fsencode(__file__)
 
 _languages = None
-if (pycompat.iswindows
-    and 'LANGUAGE' not in encoding.environ
-    and 'LC_ALL' not in encoding.environ
-    and 'LC_MESSAGES' not in encoding.environ
-    and 'LANG' not in encoding.environ):
+if (
+    pycompat.iswindows
+    and b'LANGUAGE' not in encoding.environ
+    and b'LC_ALL' not in encoding.environ
+    and b'LC_MESSAGES' not in encoding.environ
+    and b'LANG' not in encoding.environ
+):
     # Try to detect UI language by "User Interface Language Management" API
     # if no locale variables are set. Note that locale.getdefaultlocale()
     # uses GetLocaleInfo(), which may be different from UI language.
     # (See http://msdn.microsoft.com/en-us/library/dd374098(v=VS.85).aspx )
     try:
         import ctypes
+
         langid = ctypes.windll.kernel32.GetUserDefaultUILanguage()
         _languages = [locale.windows_locale[langid]]
     except (ImportError, AttributeError, KeyError):
@@ -43,6 +47,7 @@
 
 _ugettext = None
 
+
 def setdatapath(datapath):
     datapath = pycompat.fsdecode(datapath)
     localedir = os.path.join(datapath, r'locale')
@@ -53,8 +58,10 @@
     except AttributeError:
         _ugettext = t.gettext
 
+
 _msgcache = {}  # encoding: {message: translation}
 
+
 def gettext(message):
     """Translate message.
 
@@ -77,7 +84,7 @@
         else:
             # should be ascii, but we have unicode docstrings in test, which
             # are converted to utf-8 bytes on Python 3.
-            paragraphs = [p.decode("utf-8") for p in message.split('\n\n')]
+            paragraphs = [p.decode("utf-8") for p in message.split(b'\n\n')]
         # Be careful not to translate the empty string -- it holds the
         # meta data of the .po file.
         u = u'\n\n'.join([p and _ugettext(p) or u'' for p in paragraphs])
@@ -94,12 +101,16 @@
             cache[message] = message
     return cache[message]
 
+
 def _plain():
-    if ('HGPLAIN' not in encoding.environ
-        and 'HGPLAINEXCEPT' not in encoding.environ):
+    if (
+        b'HGPLAIN' not in encoding.environ
+        and b'HGPLAINEXCEPT' not in encoding.environ
+    ):
         return False
-    exceptions = encoding.environ.get('HGPLAINEXCEPT', '').strip().split(',')
-    return 'i18n' not in exceptions
+    exceptions = encoding.environ.get(b'HGPLAINEXCEPT', b'').strip().split(b',')
+    return b'i18n' not in exceptions
+
 
 if _plain():
     _ = lambda message: message
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/interfaces/dirstate.py	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,244 @@
+from __future__ import absolute_import, print_function
+
+import contextlib
+
+from .. import node as nodemod
+
+from . import util as interfaceutil
+
+
+class idirstate(interfaceutil.Interface):
+    def __init__(opener, ui, root, validate, sparsematchfn):
+        '''Create a new dirstate object.
+
+        opener is an open()-like callable that can be used to open the
+        dirstate file; root is the root of the directory tracked by
+        the dirstate.
+        '''
+
+    # TODO: all these private methods and attributes should be made
+    # public or removed from the interface.
+    _ignore = interfaceutil.Attribute("""Matcher for ignored files.""")
+
+    def _ignorefiles():
+        """Return a list of files containing patterns to ignore."""
+
+    def _ignorefileandline(f):
+        b"Given a file `f`, return the ignore file and line that ignores it."
+
+    _checklink = interfaceutil.Attribute("""Callable for checking symlinks.""")
+    _checkexec = interfaceutil.Attribute("""Callable for checking exec bits.""")
+
+    @contextlib.contextmanager
+    def parentchange():
+        '''Context manager for handling dirstate parents.
+
+        If an exception occurs in the scope of the context manager,
+        the incoherent dirstate won't be written when wlock is
+        released.
+        '''
+
+    def pendingparentchange():
+        '''Returns true if the dirstate is in the middle of a set of changes
+        that modify the dirstate parent.
+        '''
+
+    def hasdir(d):
+        pass
+
+    def flagfunc(buildfallback):
+        pass
+
+    def getcwd():
+        '''Return the path from which a canonical path is calculated.
+
+        This path should be used to resolve file patterns or to convert
+        canonical paths back to file paths for display. It shouldn't be
+        used to get real file paths. Use vfs functions instead.
+        '''
+
+    def pathto(f, cwd=None):
+        pass
+
+    def __getitem__(key):
+        '''Return the current state of key (a filename) in the dirstate.
+
+        States are:
+          n  normal
+          m  needs merging
+          r  marked for removal
+          a  marked for addition
+          ?  not tracked
+        '''
+
+    def __contains__(key):
+        """Check if bytestring `key` is known to the dirstate."""
+
+    def __iter__():
+        """Iterate the dirstate's contained filenames as bytestrings."""
+
+    def items():
+        """Iterate the dirstate's entries as (filename, dirstatetuple).
+
+        As usual, filename is a bytestring.
+        """
+
+    iteritems = items
+
+    def parents():
+        pass
+
+    def p1():
+        pass
+
+    def p2():
+        pass
+
+    def branch():
+        pass
+
+    def setparents(p1, p2=nodemod.nullid):
+        """Set dirstate parents to p1 and p2.
+
+        When moving from two parents to one, 'm' merged entries a
+        adjusted to normal and previous copy records discarded and
+        returned by the call.
+
+        See localrepo.setparents()
+        """
+
+    def setbranch(branch):
+        pass
+
+    def invalidate():
+        '''Causes the next access to reread the dirstate.
+
+        This is different from localrepo.invalidatedirstate() because it always
+        rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
+        check whether the dirstate has changed before rereading it.'''
+
+    def copy(source, dest):
+        """Mark dest as a copy of source. Unmark dest if source is None."""
+
+    def copied(file):
+        pass
+
+    def copies():
+        pass
+
+    def normal(f, parentfiledata=None):
+        '''Mark a file normal and clean.
+
+        parentfiledata: (mode, size, mtime) of the clean file
+
+        parentfiledata should be computed from memory (for mode,
+        size), as or close as possible from the point where we
+        determined the file was clean, to limit the risk of the
+        file having been changed by an external process between the
+        moment where the file was determined to be clean and now.'''
+        pass
+
+    def normallookup(f):
+        '''Mark a file normal, but possibly dirty.'''
+
+    def otherparent(f):
+        '''Mark as coming from the other parent, always dirty.'''
+
+    def add(f):
+        '''Mark a file added.'''
+
+    def remove(f):
+        '''Mark a file removed.'''
+
+    def merge(f):
+        '''Mark a file merged.'''
+
+    def drop(f):
+        '''Drop a file from the dirstate'''
+
+    def normalize(path, isknown=False, ignoremissing=False):
+        '''
+        normalize the case of a pathname when on a casefolding filesystem
+
+        isknown specifies whether the filename came from walking the
+        disk, to avoid extra filesystem access.
+
+        If ignoremissing is True, missing path are returned
+        unchanged. Otherwise, we try harder to normalize possibly
+        existing path components.
+
+        The normalized case is determined based on the following precedence:
+
+        - version of name already stored in the dirstate
+        - version of name stored on disk
+        - version provided via command arguments
+        '''
+
+    def clear():
+        pass
+
+    def rebuild(parent, allfiles, changedfiles=None):
+        pass
+
+    def identity():
+        '''Return identity of dirstate it to detect changing in storage
+
+        If identity of previous dirstate is equal to this, writing
+        changes based on the former dirstate out can keep consistency.
+        '''
+
+    def write(tr):
+        pass
+
+    def addparentchangecallback(category, callback):
+        """add a callback to be called when the wd parents are changed
+
+        Callback will be called with the following arguments:
+            dirstate, (oldp1, oldp2), (newp1, newp2)
+
+        Category is a unique identifier to allow overwriting an old callback
+        with a newer callback.
+        """
+
+    def walk(match, subrepos, unknown, ignored, full=True):
+        '''
+        Walk recursively through the directory tree, finding all files
+        matched by match.
+
+        If full is False, maybe skip some known-clean files.
+
+        Return a dict mapping filename to stat-like object (either
+        mercurial.osutil.stat instance or return value of os.stat()).
+
+        '''
+
+    def status(match, subrepos, ignored, clean, unknown):
+        '''Determine the status of the working copy relative to the
+        dirstate and return a pair of (unsure, status), where status is of type
+        scmutil.status and:
+
+          unsure:
+            files that might have been modified since the dirstate was
+            written, but need to be read to be sure (size is the same
+            but mtime differs)
+          status.modified:
+            files that have definitely been modified since the dirstate
+            was written (different size or mode)
+          status.clean:
+            files that have definitely not been modified since the
+            dirstate was written
+        '''
+
+    def matches(match):
+        '''
+        return files in the dirstate (in whatever state) filtered by match
+        '''
+
+    def savebackup(tr, backupname):
+        '''Save current dirstate into backup file'''
+
+    def restorebackup(tr, backupname):
+        '''Restore dirstate by backup file'''
+
+    def clearbackup(tr, backupname):
+        '''Clear backup file'''
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/interfaces/repository.py	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,1984 @@
+# repository.py - Interfaces and base classes for repositories and peers.
+#
+# Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from ..i18n import _
+from .. import error
+from . import util as interfaceutil
+
+# When narrowing is finalized and no longer subject to format changes,
+# we should move this to just "narrow" or similar.
+NARROW_REQUIREMENT = b'narrowhg-experimental'
+
+# Local repository feature string.
+
+# Revlogs are being used for file storage.
+REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
+# The storage part of the repository is shared from an external source.
+REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
+# LFS supported for backing file storage.
+REPO_FEATURE_LFS = b'lfs'
+# Repository supports being stream cloned.
+REPO_FEATURE_STREAM_CLONE = b'streamclone'
+# Files storage may lack data for all ancestors.
+REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
+
+REVISION_FLAG_CENSORED = 1 << 15
+REVISION_FLAG_ELLIPSIS = 1 << 14
+REVISION_FLAG_EXTSTORED = 1 << 13
+REVISION_FLAG_SIDEDATA = 1 << 12
+
+REVISION_FLAGS_KNOWN = (
+    REVISION_FLAG_CENSORED
+    | REVISION_FLAG_ELLIPSIS
+    | REVISION_FLAG_EXTSTORED
+    | REVISION_FLAG_SIDEDATA
+)
+
+CG_DELTAMODE_STD = b'default'
+CG_DELTAMODE_PREV = b'previous'
+CG_DELTAMODE_FULL = b'fulltext'
+CG_DELTAMODE_P1 = b'p1'
+
+
+class ipeerconnection(interfaceutil.Interface):
+    """Represents a "connection" to a repository.
+
+    This is the base interface for representing a connection to a repository.
+    It holds basic properties and methods applicable to all peer types.
+
+    This is not a complete interface definition and should not be used
+    outside of this module.
+    """
+
+    ui = interfaceutil.Attribute("""ui.ui instance""")
+
+    def url():
+        """Returns a URL string representing this peer.
+
+        Currently, implementations expose the raw URL used to construct the
+        instance. It may contain credentials as part of the URL. The
+        expectations of the value aren't well-defined and this could lead to
+        data leakage.
+
+        TODO audit/clean consumers and more clearly define the contents of this
+        value.
+        """
+
+    def local():
+        """Returns a local repository instance.
+
+        If the peer represents a local repository, returns an object that
+        can be used to interface with it. Otherwise returns ``None``.
+        """
+
+    def peer():
+        """Returns an object conforming to this interface.
+
+        Most implementations will ``return self``.
+        """
+
+    def canpush():
+        """Returns a boolean indicating if this peer can be pushed to."""
+
+    def close():
+        """Close the connection to this peer.
+
+        This is called when the peer will no longer be used. Resources
+        associated with the peer should be cleaned up.
+        """
+
+
+class ipeercapabilities(interfaceutil.Interface):
+    """Peer sub-interface related to capabilities."""
+
+    def capable(name):
+        """Determine support for a named capability.
+
+        Returns ``False`` if capability not supported.
+
+        Returns ``True`` if boolean capability is supported. Returns a string
+        if capability support is non-boolean.
+
+        Capability strings may or may not map to wire protocol capabilities.
+        """
+
+    def requirecap(name, purpose):
+        """Require a capability to be present.
+
+        Raises a ``CapabilityError`` if the capability isn't present.
+        """
+
+
+class ipeercommands(interfaceutil.Interface):
+    """Client-side interface for communicating over the wire protocol.
+
+    This interface is used as a gateway to the Mercurial wire protocol.
+    methods commonly call wire protocol commands of the same name.
+    """
+
+    def branchmap():
+        """Obtain heads in named branches.
+
+        Returns a dict mapping branch name to an iterable of nodes that are
+        heads on that branch.
+        """
+
+    def capabilities():
+        """Obtain capabilities of the peer.
+
+        Returns a set of string capabilities.
+        """
+
+    def clonebundles():
+        """Obtains the clone bundles manifest for the repo.
+
+        Returns the manifest as unparsed bytes.
+        """
+
+    def debugwireargs(one, two, three=None, four=None, five=None):
+        """Used to facilitate debugging of arguments passed over the wire."""
+
+    def getbundle(source, **kwargs):
+        """Obtain remote repository data as a bundle.
+
+        This command is how the bulk of repository data is transferred from
+        the peer to the local repository
+
+        Returns a generator of bundle data.
+        """
+
+    def heads():
+        """Determine all known head revisions in the peer.
+
+        Returns an iterable of binary nodes.
+        """
+
+    def known(nodes):
+        """Determine whether multiple nodes are known.
+
+        Accepts an iterable of nodes whose presence to check for.
+
+        Returns an iterable of booleans indicating of the corresponding node
+        at that index is known to the peer.
+        """
+
+    def listkeys(namespace):
+        """Obtain all keys in a pushkey namespace.
+
+        Returns an iterable of key names.
+        """
+
+    def lookup(key):
+        """Resolve a value to a known revision.
+
+        Returns a binary node of the resolved revision on success.
+        """
+
+    def pushkey(namespace, key, old, new):
+        """Set a value using the ``pushkey`` protocol.
+
+        Arguments correspond to the pushkey namespace and key to operate on and
+        the old and new values for that key.
+
+        Returns a string with the peer result. The value inside varies by the
+        namespace.
+        """
+
+    def stream_out():
+        """Obtain streaming clone data.
+
+        Successful result should be a generator of data chunks.
+        """
+
+    def unbundle(bundle, heads, url):
+        """Transfer repository data to the peer.
+
+        This is how the bulk of data during a push is transferred.
+
+        Returns the integer number of heads added to the peer.
+        """
+
+
+class ipeerlegacycommands(interfaceutil.Interface):
+    """Interface for implementing support for legacy wire protocol commands.
+
+    Wire protocol commands transition to legacy status when they are no longer
+    used by modern clients. To facilitate identifying which commands are
+    legacy, the interfaces are split.
+    """
+
+    def between(pairs):
+        """Obtain nodes between pairs of nodes.
+
+        ``pairs`` is an iterable of node pairs.
+
+        Returns an iterable of iterables of nodes corresponding to each
+        requested pair.
+        """
+
+    def branches(nodes):
+        """Obtain ancestor changesets of specific nodes back to a branch point.
+
+        For each requested node, the peer finds the first ancestor node that is
+        a DAG root or is a merge.
+
+        Returns an iterable of iterables with the resolved values for each node.
+        """
+
+    def changegroup(nodes, source):
+        """Obtain a changegroup with data for descendants of specified nodes."""
+
+    def changegroupsubset(bases, heads, source):
+        pass
+
+
+class ipeercommandexecutor(interfaceutil.Interface):
+    """Represents a mechanism to execute remote commands.
+
+    This is the primary interface for requesting that wire protocol commands
+    be executed. Instances of this interface are active in a context manager
+    and have a well-defined lifetime. When the context manager exits, all
+    outstanding requests are waited on.
+    """
+
+    def callcommand(name, args):
+        """Request that a named command be executed.
+
+        Receives the command name and a dictionary of command arguments.
+
+        Returns a ``concurrent.futures.Future`` that will resolve to the
+        result of that command request. That exact value is left up to
+        the implementation and possibly varies by command.
+
+        Not all commands can coexist with other commands in an executor
+        instance: it depends on the underlying wire protocol transport being
+        used and the command itself.
+
+        Implementations MAY call ``sendcommands()`` automatically if the
+        requested command can not coexist with other commands in this executor.
+
+        Implementations MAY call ``sendcommands()`` automatically when the
+        future's ``result()`` is called. So, consumers using multiple
+        commands with an executor MUST ensure that ``result()`` is not called
+        until all command requests have been issued.
+        """
+
+    def sendcommands():
+        """Trigger submission of queued command requests.
+
+        Not all transports submit commands as soon as they are requested to
+        run. When called, this method forces queued command requests to be
+        issued. It will no-op if all commands have already been sent.
+
+        When called, no more new commands may be issued with this executor.
+        """
+
+    def close():
+        """Signal that this command request is finished.
+
+        When called, no more new commands may be issued. All outstanding
+        commands that have previously been issued are waited on before
+        returning. This not only includes waiting for the futures to resolve,
+        but also waiting for all response data to arrive. In other words,
+        calling this waits for all on-wire state for issued command requests
+        to finish.
+
+        When used as a context manager, this method is called when exiting the
+        context manager.
+
+        This method may call ``sendcommands()`` if there are buffered commands.
+        """
+
+
+class ipeerrequests(interfaceutil.Interface):
+    """Interface for executing commands on a peer."""
+
+    limitedarguments = interfaceutil.Attribute(
+        """True if the peer cannot receive large argument value for commands."""
+    )
+
+    def commandexecutor():
+        """A context manager that resolves to an ipeercommandexecutor.
+
+        The object this resolves to can be used to issue command requests
+        to the peer.
+
+        Callers should call its ``callcommand`` method to issue command
+        requests.
+
+        A new executor should be obtained for each distinct set of commands
+        (possibly just a single command) that the consumer wants to execute
+        as part of a single operation or round trip. This is because some
+        peers are half-duplex and/or don't support persistent connections.
+        e.g. in the case of HTTP peers, commands sent to an executor represent
+        a single HTTP request. While some peers may support multiple command
+        sends over the wire per executor, consumers need to code to the least
+        capable peer. So it should be assumed that command executors buffer
+        called commands until they are told to send them and that each
+        command executor could result in a new connection or wire-level request
+        being issued.
+        """
+
+
+class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
+    """Unified interface for peer repositories.
+
+    All peer instances must conform to this interface.
+    """
+
+
+class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
+    """Unified peer interface for wire protocol version 2 peers."""
+
+    apidescriptor = interfaceutil.Attribute(
+        """Data structure holding description of server API."""
+    )
+
+
+@interfaceutil.implementer(ipeerbase)
+class peer(object):
+    """Base class for peer repositories."""
+
+    limitedarguments = False
+
+    def capable(self, name):
+        caps = self.capabilities()
+        if name in caps:
+            return True
+
+        name = b'%s=' % name
+        for cap in caps:
+            if cap.startswith(name):
+                return cap[len(name) :]
+
+        return False
+
+    def requirecap(self, name, purpose):
+        if self.capable(name):
+            return
+
+        raise error.CapabilityError(
+            _(
+                b'cannot %s; remote repository does not support the '
+                b'\'%s\' capability'
+            )
+            % (purpose, name)
+        )
+
+
+class iverifyproblem(interfaceutil.Interface):
+    """Represents a problem with the integrity of the repository.
+
+    Instances of this interface are emitted to describe an integrity issue
+    with a repository (e.g. corrupt storage, missing data, etc).
+
+    Instances are essentially messages associated with severity.
+    """
+
+    warning = interfaceutil.Attribute(
+        """Message indicating a non-fatal problem."""
+    )
+
+    error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
+
+    node = interfaceutil.Attribute(
+        """Revision encountering the problem.
+
+        ``None`` means the problem doesn't apply to a single revision.
+        """
+    )
+
+
+class irevisiondelta(interfaceutil.Interface):
+    """Represents a delta between one revision and another.
+
+    Instances convey enough information to allow a revision to be exchanged
+    with another repository.
+
+    Instances represent the fulltext revision data or a delta against
+    another revision. Therefore the ``revision`` and ``delta`` attributes
+    are mutually exclusive.
+
+    Typically used for changegroup generation.
+    """
+
+    node = interfaceutil.Attribute("""20 byte node of this revision.""")
+
+    p1node = interfaceutil.Attribute(
+        """20 byte node of 1st parent of this revision."""
+    )
+
+    p2node = interfaceutil.Attribute(
+        """20 byte node of 2nd parent of this revision."""
+    )
+
+    linknode = interfaceutil.Attribute(
+        """20 byte node of the changelog revision this node is linked to."""
+    )
+
+    flags = interfaceutil.Attribute(
+        """2 bytes of integer flags that apply to this revision.
+
+        This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
+        """
+    )
+
+    basenode = interfaceutil.Attribute(
+        """20 byte node of the revision this data is a delta against.
+
+        ``nullid`` indicates that the revision is a full revision and not
+        a delta.
+        """
+    )
+
+    baserevisionsize = interfaceutil.Attribute(
+        """Size of base revision this delta is against.
+
+        May be ``None`` if ``basenode`` is ``nullid``.
+        """
+    )
+
+    revision = interfaceutil.Attribute(
+        """Raw fulltext of revision data for this node."""
+    )
+
+    delta = interfaceutil.Attribute(
+        """Delta between ``basenode`` and ``node``.
+
+        Stored in the bdiff delta format.
+        """
+    )
+
+
+class ifilerevisionssequence(interfaceutil.Interface):
+    """Contains index data for all revisions of a file.
+
+    Types implementing this behave like lists of tuples. The index
+    in the list corresponds to the revision number. The values contain
+    index metadata.
+
+    The *null* revision (revision number -1) is always the last item
+    in the index.
+    """
+
+    def __len__():
+        """The total number of revisions."""
+
+    def __getitem__(rev):
+        """Returns the object having a specific revision number.
+
+        Returns an 8-tuple with the following fields:
+
+        offset+flags
+           Contains the offset and flags for the revision. 64-bit unsigned
+           integer where first 6 bytes are the offset and the next 2 bytes
+           are flags. The offset can be 0 if it is not used by the store.
+        compressed size
+            Size of the revision data in the store. It can be 0 if it isn't
+            needed by the store.
+        uncompressed size
+            Fulltext size. It can be 0 if it isn't needed by the store.
+        base revision
+            Revision number of revision the delta for storage is encoded
+            against. -1 indicates not encoded against a base revision.
+        link revision
+            Revision number of changelog revision this entry is related to.
+        p1 revision
+            Revision number of 1st parent. -1 if no 1st parent.
+        p2 revision
+            Revision number of 2nd parent. -1 if no 1st parent.
+        node
+            Binary node value for this revision number.
+
+        Negative values should index off the end of the sequence. ``-1``
+        should return the null revision. ``-2`` should return the most
+        recent revision.
+        """
+
+    def __contains__(rev):
+        """Whether a revision number exists."""
+
+    def insert(self, i, entry):
+        """Add an item to the index at specific revision."""
+
+
+class ifileindex(interfaceutil.Interface):
+    """Storage interface for index data of a single file.
+
+    File storage data is divided into index metadata and data storage.
+    This interface defines the index portion of the interface.
+
+    The index logically consists of:
+
+    * A mapping between revision numbers and nodes.
+    * DAG data (storing and querying the relationship between nodes).
+    * Metadata to facilitate storage.
+    """
+
+    def __len__():
+        """Obtain the number of revisions stored for this file."""
+
+    def __iter__():
+        """Iterate over revision numbers for this file."""
+
+    def hasnode(node):
+        """Returns a bool indicating if a node is known to this store.
+
+        Implementations must only return True for full, binary node values:
+        hex nodes, revision numbers, and partial node matches must be
+        rejected.
+
+        The null node is never present.
+        """
+
+    def revs(start=0, stop=None):
+        """Iterate over revision numbers for this file, with control."""
+
+    def parents(node):
+        """Returns a 2-tuple of parent nodes for a revision.
+
+        Values will be ``nullid`` if the parent is empty.
+        """
+
+    def parentrevs(rev):
+        """Like parents() but operates on revision numbers."""
+
+    def rev(node):
+        """Obtain the revision number given a node.
+
+        Raises ``error.LookupError`` if the node is not known.
+        """
+
+    def node(rev):
+        """Obtain the node value given a revision number.
+
+        Raises ``IndexError`` if the node is not known.
+        """
+
+    def lookup(node):
+        """Attempt to resolve a value to a node.
+
+        Value can be a binary node, hex node, revision number, or a string
+        that can be converted to an integer.
+
+        Raises ``error.LookupError`` if a node could not be resolved.
+        """
+
+    def linkrev(rev):
+        """Obtain the changeset revision number a revision is linked to."""
+
+    def iscensored(rev):
+        """Return whether a revision's content has been censored."""
+
+    def commonancestorsheads(node1, node2):
+        """Obtain an iterable of nodes containing heads of common ancestors.
+
+        See ``ancestor.commonancestorsheads()``.
+        """
+
+    def descendants(revs):
+        """Obtain descendant revision numbers for a set of revision numbers.
+
+        If ``nullrev`` is in the set, this is equivalent to ``revs()``.
+        """
+
+    def heads(start=None, stop=None):
+        """Obtain a list of nodes that are DAG heads, with control.
+
+        The set of revisions examined can be limited by specifying
+        ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
+        iterable of nodes. DAG traversal starts at earlier revision
+        ``start`` and iterates forward until any node in ``stop`` is
+        encountered.
+        """
+
+    def children(node):
+        """Obtain nodes that are children of a node.
+
+        Returns a list of nodes.
+        """
+
+
+class ifiledata(interfaceutil.Interface):
+    """Storage interface for data storage of a specific file.
+
+    This complements ``ifileindex`` and provides an interface for accessing
+    data for a tracked file.
+    """
+
+    def size(rev):
+        """Obtain the fulltext size of file data.
+
+        Any metadata is excluded from size measurements.
+        """
+
+    def revision(node, raw=False):
+        """"Obtain fulltext data for a node.
+
+        By default, any storage transformations are applied before the data
+        is returned. If ``raw`` is True, non-raw storage transformations
+        are not applied.
+
+        The fulltext data may contain a header containing metadata. Most
+        consumers should use ``read()`` to obtain the actual file data.
+        """
+
+    def rawdata(node):
+        """Obtain raw data for a node.
+        """
+
+    def read(node):
+        """Resolve file fulltext data.
+
+        This is similar to ``revision()`` except any metadata in the data
+        headers is stripped.
+        """
+
+    def renamed(node):
+        """Obtain copy metadata for a node.
+
+        Returns ``False`` if no copy metadata is stored or a 2-tuple of
+        (path, node) from which this revision was copied.
+        """
+
+    def cmp(node, fulltext):
+        """Compare fulltext to another revision.
+
+        Returns True if the fulltext is different from what is stored.
+
+        This takes copy metadata into account.
+
+        TODO better document the copy metadata and censoring logic.
+        """
+
+    def emitrevisions(
+        nodes,
+        nodesorder=None,
+        revisiondata=False,
+        assumehaveparentrevisions=False,
+        deltamode=CG_DELTAMODE_STD,
+    ):
+        """Produce ``irevisiondelta`` for revisions.
+
+        Given an iterable of nodes, emits objects conforming to the
+        ``irevisiondelta`` interface that describe revisions in storage.
+
+        This method is a generator.
+
+        The input nodes may be unordered. Implementations must ensure that a
+        node's parents are emitted before the node itself. Transitively, this
+        means that a node may only be emitted once all its ancestors in
+        ``nodes`` have also been emitted.
+
+        By default, emits "index" data (the ``node``, ``p1node``, and
+        ``p2node`` attributes). If ``revisiondata`` is set, revision data
+        will also be present on the emitted objects.
+
+        With default argument values, implementations can choose to emit
+        either fulltext revision data or a delta. When emitting deltas,
+        implementations must consider whether the delta's base revision
+        fulltext is available to the receiver.
+
+        The base revision fulltext is guaranteed to be available if any of
+        the following are met:
+
+        * Its fulltext revision was emitted by this method call.
+        * A delta for that revision was emitted by this method call.
+        * ``assumehaveparentrevisions`` is True and the base revision is a
+          parent of the node.
+
+        ``nodesorder`` can be used to control the order that revisions are
+        emitted. By default, revisions can be reordered as long as they are
+        in DAG topological order (see above). If the value is ``nodes``,
+        the iteration order from ``nodes`` should be used. If the value is
+        ``storage``, then the native order from the backing storage layer
+        is used. (Not all storage layers will have strong ordering and behavior
+        of this mode is storage-dependent.) ``nodes`` ordering can force
+        revisions to be emitted before their ancestors, so consumers should
+        use it with care.
+
+        The ``linknode`` attribute on the returned ``irevisiondelta`` may not
+        be set and it is the caller's responsibility to resolve it, if needed.
+
+        If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
+        all revision data should be emitted as deltas against the revision
+        emitted just prior. The initial revision should be a delta against its
+        1st parent.
+        """
+
+
+class ifilemutation(interfaceutil.Interface):
+    """Storage interface for mutation events of a tracked file."""
+
+    def add(filedata, meta, transaction, linkrev, p1, p2):
+        """Add a new revision to the store.
+
+        Takes file data, dictionary of metadata, a transaction, linkrev,
+        and parent nodes.
+
+        Returns the node that was added.
+
+        May no-op if a revision matching the supplied data is already stored.
+        """
+
+    def addrevision(
+        revisiondata,
+        transaction,
+        linkrev,
+        p1,
+        p2,
+        node=None,
+        flags=0,
+        cachedelta=None,
+    ):
+        """Add a new revision to the store.
+
+        This is similar to ``add()`` except it operates at a lower level.
+
+        The data passed in already contains a metadata header, if any.
+
+        ``node`` and ``flags`` can be used to define the expected node and
+        the flags to use with storage. ``flags`` is a bitwise value composed
+        of the various ``REVISION_FLAG_*`` constants.
+
+        ``add()`` is usually called when adding files from e.g. the working
+        directory. ``addrevision()`` is often called by ``add()`` and for
+        scenarios where revision data has already been computed, such as when
+        applying raw data from a peer repo.
+        """
+
+    def addgroup(
+        deltas,
+        linkmapper,
+        transaction,
+        addrevisioncb=None,
+        maybemissingparents=False,
+    ):
+        """Process a series of deltas for storage.
+
+        ``deltas`` is an iterable of 7-tuples of
+        (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
+        to add.
+
+        The ``delta`` field contains ``mpatch`` data to apply to a base
+        revision, identified by ``deltabase``. The base node can be
+        ``nullid``, in which case the header from the delta can be ignored
+        and the delta used as the fulltext.
+
+        ``addrevisioncb`` should be called for each node as it is committed.
+
+        ``maybemissingparents`` is a bool indicating whether the incoming
+        data may reference parents/ancestor revisions that aren't present.
+        This flag is set when receiving data into a "shallow" store that
+        doesn't hold all history.
+
+        Returns a list of nodes that were processed. A node will be in the list
+        even if it existed in the store previously.
+        """
+
+    def censorrevision(tr, node, tombstone=b''):
+        """Remove the content of a single revision.
+
+        The specified ``node`` will have its content purged from storage.
+        Future attempts to access the revision data for this node will
+        result in failure.
+
+        A ``tombstone`` message can optionally be stored. This message may be
+        displayed to users when they attempt to access the missing revision
+        data.
+
+        Storage backends may have stored deltas against the previous content
+        in this revision. As part of censoring a revision, these storage
+        backends are expected to rewrite any internally stored deltas such
+        that they no longer reference the deleted content.
+        """
+
+    def getstrippoint(minlink):
+        """Find the minimum revision that must be stripped to strip a linkrev.
+
+        Returns a 2-tuple containing the minimum revision number and a set
+        of all revisions numbers that would be broken by this strip.
+
+        TODO this is highly revlog centric and should be abstracted into
+        a higher-level deletion API. ``repair.strip()`` relies on this.
+        """
+
+    def strip(minlink, transaction):
+        """Remove storage of items starting at a linkrev.
+
+        This uses ``getstrippoint()`` to determine the first node to remove.
+        Then it effectively truncates storage for all revisions after that.
+
+        TODO this is highly revlog centric and should be abstracted into a
+        higher-level deletion API.
+        """
+
+
+class ifilestorage(ifileindex, ifiledata, ifilemutation):
+    """Complete storage interface for a single tracked file."""
+
+    def files():
+        """Obtain paths that are backing storage for this file.
+
+        TODO this is used heavily by verify code and there should probably
+        be a better API for that.
+        """
+
+    def storageinfo(
+        exclusivefiles=False,
+        sharedfiles=False,
+        revisionscount=False,
+        trackedsize=False,
+        storedsize=False,
+    ):
+        """Obtain information about storage for this file's data.
+
+        Returns a dict describing storage for this tracked path. The keys
+        in the dict map to arguments of the same. The arguments are bools
+        indicating whether to calculate and obtain that data.
+
+        exclusivefiles
+           Iterable of (vfs, path) describing files that are exclusively
+           used to back storage for this tracked path.
+
+        sharedfiles
+           Iterable of (vfs, path) describing files that are used to back
+           storage for this tracked path. Those files may also provide storage
+           for other stored entities.
+
+        revisionscount
+           Number of revisions available for retrieval.
+
+        trackedsize
+           Total size in bytes of all tracked revisions. This is a sum of the
+           length of the fulltext of all revisions.
+
+        storedsize
+           Total size in bytes used to store data for all tracked revisions.
+           This is commonly less than ``trackedsize`` due to internal usage
+           of deltas rather than fulltext revisions.
+
+        Not all storage backends may support all queries are have a reasonable
+        value to use. In that case, the value should be set to ``None`` and
+        callers are expected to handle this special value.
+        """
+
+    def verifyintegrity(state):
+        """Verifies the integrity of file storage.
+
+        ``state`` is a dict holding state of the verifier process. It can be
+        used to communicate data between invocations of multiple storage
+        primitives.
+
+        If individual revisions cannot have their revision content resolved,
+        the method is expected to set the ``skipread`` key to a set of nodes
+        that encountered problems.
+
+        The method yields objects conforming to the ``iverifyproblem``
+        interface.
+        """
+
+
+class idirs(interfaceutil.Interface):
+    """Interface representing a collection of directories from paths.
+
+    This interface is essentially a derived data structure representing
+    directories from a collection of paths.
+    """
+
+    def addpath(path):
+        """Add a path to the collection.
+
+        All directories in the path will be added to the collection.
+        """
+
+    def delpath(path):
+        """Remove a path from the collection.
+
+        If the removal was the last path in a particular directory, the
+        directory is removed from the collection.
+        """
+
+    def __iter__():
+        """Iterate over the directories in this collection of paths."""
+
+    def __contains__(path):
+        """Whether a specific directory is in this collection."""
+
+
+class imanifestdict(interfaceutil.Interface):
+    """Interface representing a manifest data structure.
+
+    A manifest is effectively a dict mapping paths to entries. Each entry
+    consists of a binary node and extra flags affecting that entry.
+    """
+
+    def __getitem__(path):
+        """Returns the binary node value for a path in the manifest.
+
+        Raises ``KeyError`` if the path does not exist in the manifest.
+
+        Equivalent to ``self.find(path)[0]``.
+        """
+
+    def find(path):
+        """Returns the entry for a path in the manifest.
+
+        Returns a 2-tuple of (node, flags).
+
+        Raises ``KeyError`` if the path does not exist in the manifest.
+        """
+
+    def __len__():
+        """Return the number of entries in the manifest."""
+
+    def __nonzero__():
+        """Returns True if the manifest has entries, False otherwise."""
+
+    __bool__ = __nonzero__
+
+    def __setitem__(path, node):
+        """Define the node value for a path in the manifest.
+
+        If the path is already in the manifest, its flags will be copied to
+        the new entry.
+        """
+
+    def __contains__(path):
+        """Whether a path exists in the manifest."""
+
+    def __delitem__(path):
+        """Remove a path from the manifest.
+
+        Raises ``KeyError`` if the path is not in the manifest.
+        """
+
+    def __iter__():
+        """Iterate over paths in the manifest."""
+
+    def iterkeys():
+        """Iterate over paths in the manifest."""
+
+    def keys():
+        """Obtain a list of paths in the manifest."""
+
+    def filesnotin(other, match=None):
+        """Obtain the set of paths in this manifest but not in another.
+
+        ``match`` is an optional matcher function to be applied to both
+        manifests.
+
+        Returns a set of paths.
+        """
+
+    def dirs():
+        """Returns an object implementing the ``idirs`` interface."""
+
+    def hasdir(dir):
+        """Returns a bool indicating if a directory is in this manifest."""
+
+    def matches(match):
+        """Generate a new manifest filtered through a matcher.
+
+        Returns an object conforming to the ``imanifestdict`` interface.
+        """
+
+    def walk(match):
+        """Generator of paths in manifest satisfying a matcher.
+
+        This is equivalent to ``self.matches(match).iterkeys()`` except a new
+        manifest object is not created.
+
+        If the matcher has explicit files listed and they don't exist in
+        the manifest, ``match.bad()`` is called for each missing file.
+        """
+
+    def diff(other, match=None, clean=False):
+        """Find differences between this manifest and another.
+
+        This manifest is compared to ``other``.
+
+        If ``match`` is provided, the two manifests are filtered against this
+        matcher and only entries satisfying the matcher are compared.
+
+        If ``clean`` is True, unchanged files are included in the returned
+        object.
+
+        Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
+        the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
+        represents the node and flags for this manifest and ``(node2, flag2)``
+        are the same for the other manifest.
+        """
+
+    def setflag(path, flag):
+        """Set the flag value for a given path.
+
+        Raises ``KeyError`` if the path is not already in the manifest.
+        """
+
+    def get(path, default=None):
+        """Obtain the node value for a path or a default value if missing."""
+
+    def flags(path, default=b''):
+        """Return the flags value for a path or a default value if missing."""
+
+    def copy():
+        """Return a copy of this manifest."""
+
+    def items():
+        """Returns an iterable of (path, node) for items in this manifest."""
+
+    def iteritems():
+        """Identical to items()."""
+
+    def iterentries():
+        """Returns an iterable of (path, node, flags) for this manifest.
+
+        Similar to ``iteritems()`` except items are a 3-tuple and include
+        flags.
+        """
+
+    def text():
+        """Obtain the raw data representation for this manifest.
+
+        Result is used to create a manifest revision.
+        """
+
+    def fastdelta(base, changes):
+        """Obtain a delta between this manifest and another given changes.
+
+        ``base`` in the raw data representation for another manifest.
+
+        ``changes`` is an iterable of ``(path, to_delete)``.
+
+        Returns a 2-tuple containing ``bytearray(self.text())`` and the
+        delta between ``base`` and this manifest.
+        """
+
+
+class imanifestrevisionbase(interfaceutil.Interface):
+    """Base interface representing a single revision of a manifest.
+
+    Should not be used as a primary interface: should always be inherited
+    as part of a larger interface.
+    """
+
+    def new():
+        """Obtain a new manifest instance.
+
+        Returns an object conforming to the ``imanifestrevisionwritable``
+        interface. The instance will be associated with the same
+        ``imanifestlog`` collection as this instance.
+        """
+
+    def copy():
+        """Obtain a copy of this manifest instance.
+
+        Returns an object conforming to the ``imanifestrevisionwritable``
+        interface. The instance will be associated with the same
+        ``imanifestlog`` collection as this instance.
+        """
+
+    def read():
+        """Obtain the parsed manifest data structure.
+
+        The returned object conforms to the ``imanifestdict`` interface.
+        """
+
+
+class imanifestrevisionstored(imanifestrevisionbase):
+    """Interface representing a manifest revision committed to storage."""
+
+    def node():
+        """The binary node for this manifest."""
+
+    parents = interfaceutil.Attribute(
+        """List of binary nodes that are parents for this manifest revision."""
+    )
+
+    def readdelta(shallow=False):
+        """Obtain the manifest data structure representing changes from parent.
+
+        This manifest is compared to its 1st parent. A new manifest representing
+        those differences is constructed.
+
+        The returned object conforms to the ``imanifestdict`` interface.
+        """
+
+    def readfast(shallow=False):
+        """Calls either ``read()`` or ``readdelta()``.
+
+        The faster of the two options is called.
+        """
+
+    def find(key):
+        """Calls self.read().find(key)``.
+
+        Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
+        """
+
+
+class imanifestrevisionwritable(imanifestrevisionbase):
+    """Interface representing a manifest revision that can be committed."""
+
+    def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
+        """Add this revision to storage.
+
+        Takes a transaction object, the changeset revision number it will
+        be associated with, its parent nodes, and lists of added and
+        removed paths.
+
+        If match is provided, storage can choose not to inspect or write out
+        items that do not match. Storage is still required to be able to provide
+        the full manifest in the future for any directories written (these
+        manifests should not be "narrowed on disk").
+
+        Returns the binary node of the created revision.
+        """
+
+
+class imanifeststorage(interfaceutil.Interface):
+    """Storage interface for manifest data."""
+
+    tree = interfaceutil.Attribute(
+        """The path to the directory this manifest tracks.
+
+        The empty bytestring represents the root manifest.
+        """
+    )
+
+    index = interfaceutil.Attribute(
+        """An ``ifilerevisionssequence`` instance."""
+    )
+
+    indexfile = interfaceutil.Attribute(
+        """Path of revlog index file.
+
+        TODO this is revlog specific and should not be exposed.
+        """
+    )
+
+    opener = interfaceutil.Attribute(
+        """VFS opener to use to access underlying files used for storage.
+
+        TODO this is revlog specific and should not be exposed.
+        """
+    )
+
+    version = interfaceutil.Attribute(
+        """Revlog version number.
+
+        TODO this is revlog specific and should not be exposed.
+        """
+    )
+
+    _generaldelta = interfaceutil.Attribute(
+        """Whether generaldelta storage is being used.
+
+        TODO this is revlog specific and should not be exposed.
+        """
+    )
+
+    fulltextcache = interfaceutil.Attribute(
+        """Dict with cache of fulltexts.
+
+        TODO this doesn't feel appropriate for the storage interface.
+        """
+    )
+
+    def __len__():
+        """Obtain the number of revisions stored for this manifest."""
+
+    def __iter__():
+        """Iterate over revision numbers for this manifest."""
+
+    def rev(node):
+        """Obtain the revision number given a binary node.
+
+        Raises ``error.LookupError`` if the node is not known.
+        """
+
+    def node(rev):
+        """Obtain the node value given a revision number.
+
+        Raises ``error.LookupError`` if the revision is not known.
+        """
+
+    def lookup(value):
+        """Attempt to resolve a value to a node.
+
+        Value can be a binary node, hex node, revision number, or a bytes
+        that can be converted to an integer.
+
+        Raises ``error.LookupError`` if a ndoe could not be resolved.
+        """
+
+    def parents(node):
+        """Returns a 2-tuple of parent nodes for a node.
+
+        Values will be ``nullid`` if the parent is empty.
+        """
+
+    def parentrevs(rev):
+        """Like parents() but operates on revision numbers."""
+
+    def linkrev(rev):
+        """Obtain the changeset revision number a revision is linked to."""
+
+    def revision(node, _df=None, raw=False):
+        """Obtain fulltext data for a node."""
+
+    def rawdata(node, _df=None):
+        """Obtain raw data for a node."""
+
+    def revdiff(rev1, rev2):
+        """Obtain a delta between two revision numbers.
+
+        The returned data is the result of ``bdiff.bdiff()`` on the raw
+        revision data.
+        """
+
+    def cmp(node, fulltext):
+        """Compare fulltext to another revision.
+
+        Returns True if the fulltext is different from what is stored.
+        """
+
+    def emitrevisions(
+        nodes,
+        nodesorder=None,
+        revisiondata=False,
+        assumehaveparentrevisions=False,
+    ):
+        """Produce ``irevisiondelta`` describing revisions.
+
+        See the documentation for ``ifiledata`` for more.
+        """
+
+    def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
+        """Process a series of deltas for storage.
+
+        See the documentation in ``ifilemutation`` for more.
+        """
+
+    def rawsize(rev):
+        """Obtain the size of tracked data.
+
+        Is equivalent to ``len(m.rawdata(node))``.
+
+        TODO this method is only used by upgrade code and may be removed.
+        """
+
+    def getstrippoint(minlink):
+        """Find minimum revision that must be stripped to strip a linkrev.
+
+        See the documentation in ``ifilemutation`` for more.
+        """
+
+    def strip(minlink, transaction):
+        """Remove storage of items starting at a linkrev.
+
+        See the documentation in ``ifilemutation`` for more.
+        """
+
+    def checksize():
+        """Obtain the expected sizes of backing files.
+
+        TODO this is used by verify and it should not be part of the interface.
+        """
+
+    def files():
+        """Obtain paths that are backing storage for this manifest.
+
+        TODO this is used by verify and there should probably be a better API
+        for this functionality.
+        """
+
+    def deltaparent(rev):
+        """Obtain the revision that a revision is delta'd against.
+
+        TODO delta encoding is an implementation detail of storage and should
+        not be exposed to the storage interface.
+        """
+
+    def clone(tr, dest, **kwargs):
+        """Clone this instance to another."""
+
+    def clearcaches(clear_persisted_data=False):
+        """Clear any caches associated with this instance."""
+
+    def dirlog(d):
+        """Obtain a manifest storage instance for a tree."""
+
+    def add(
+        m, transaction, link, p1, p2, added, removed, readtree=None, match=None
+    ):
+        """Add a revision to storage.
+
+        ``m`` is an object conforming to ``imanifestdict``.
+
+        ``link`` is the linkrev revision number.
+
+        ``p1`` and ``p2`` are the parent revision numbers.
+
+        ``added`` and ``removed`` are iterables of added and removed paths,
+        respectively.
+
+        ``readtree`` is a function that can be used to read the child tree(s)
+        when recursively writing the full tree structure when using
+        treemanifets.
+
+        ``match`` is a matcher that can be used to hint to storage that not all
+        paths must be inspected; this is an optimization and can be safely
+        ignored. Note that the storage must still be able to reproduce a full
+        manifest including files that did not match.
+        """
+
+    def storageinfo(
+        exclusivefiles=False,
+        sharedfiles=False,
+        revisionscount=False,
+        trackedsize=False,
+        storedsize=False,
+    ):
+        """Obtain information about storage for this manifest's data.
+
+        See ``ifilestorage.storageinfo()`` for a description of this method.
+        This one behaves the same way, except for manifest data.
+        """
+
+
+class imanifestlog(interfaceutil.Interface):
+    """Interface representing a collection of manifest snapshots.
+
+    Represents the root manifest in a repository.
+
+    Also serves as a means to access nested tree manifests and to cache
+    tree manifests.
+    """
+
+    def __getitem__(node):
+        """Obtain a manifest instance for a given binary node.
+
+        Equivalent to calling ``self.get('', node)``.
+
+        The returned object conforms to the ``imanifestrevisionstored``
+        interface.
+        """
+
+    def get(tree, node, verify=True):
+        """Retrieve the manifest instance for a given directory and binary node.
+
+        ``node`` always refers to the node of the root manifest (which will be
+        the only manifest if flat manifests are being used).
+
+        If ``tree`` is the empty string, the root manifest is returned.
+        Otherwise the manifest for the specified directory will be returned
+        (requires tree manifests).
+
+        If ``verify`` is True, ``LookupError`` is raised if the node is not
+        known.
+
+        The returned object conforms to the ``imanifestrevisionstored``
+        interface.
+        """
+
+    def getstorage(tree):
+        """Retrieve an interface to storage for a particular tree.
+
+        If ``tree`` is the empty bytestring, storage for the root manifest will
+        be returned. Otherwise storage for a tree manifest is returned.
+
+        TODO formalize interface for returned object.
+        """
+
+    def clearcaches():
+        """Clear caches associated with this collection."""
+
+    def rev(node):
+        """Obtain the revision number for a binary node.
+
+        Raises ``error.LookupError`` if the node is not known.
+        """
+
+
+class ilocalrepositoryfilestorage(interfaceutil.Interface):
+    """Local repository sub-interface providing access to tracked file storage.
+
+    This interface defines how a repository accesses storage for a single
+    tracked file path.
+    """
+
+    def file(f):
+        """Obtain a filelog for a tracked path.
+
+        The returned type conforms to the ``ifilestorage`` interface.
+        """
+
+
+class ilocalrepositorymain(interfaceutil.Interface):
+    """Main interface for local repositories.
+
+    This currently captures the reality of things - not how things should be.
+    """
+
+    supportedformats = interfaceutil.Attribute(
+        """Set of requirements that apply to stream clone.
+
+        This is actually a class attribute and is shared among all instances.
+        """
+    )
+
+    supported = interfaceutil.Attribute(
+        """Set of requirements that this repo is capable of opening."""
+    )
+
+    requirements = interfaceutil.Attribute(
+        """Set of requirements this repo uses."""
+    )
+
+    features = interfaceutil.Attribute(
+        """Set of "features" this repository supports.
+
+        A "feature" is a loosely-defined term. It can refer to a feature
+        in the classical sense or can describe an implementation detail
+        of the repository. For example, a ``readonly`` feature may denote
+        the repository as read-only. Or a ``revlogfilestore`` feature may
+        denote that the repository is using revlogs for file storage.
+
+        The intent of features is to provide a machine-queryable mechanism
+        for repo consumers to test for various repository characteristics.
+
+        Features are similar to ``requirements``. The main difference is that
+        requirements are stored on-disk and represent requirements to open the
+        repository. Features are more run-time capabilities of the repository
+        and more granular capabilities (which may be derived from requirements).
+        """
+    )
+
+    filtername = interfaceutil.Attribute(
+        """Name of the repoview that is active on this repo."""
+    )
+
+    wvfs = interfaceutil.Attribute(
+        """VFS used to access the working directory."""
+    )
+
+    vfs = interfaceutil.Attribute(
+        """VFS rooted at the .hg directory.
+
+        Used to access repository data not in the store.
+        """
+    )
+
+    svfs = interfaceutil.Attribute(
+        """VFS rooted at the store.
+
+        Used to access repository data in the store. Typically .hg/store.
+        But can point elsewhere if the store is shared.
+        """
+    )
+
+    root = interfaceutil.Attribute(
+        """Path to the root of the working directory."""
+    )
+
+    path = interfaceutil.Attribute("""Path to the .hg directory.""")
+
+    origroot = interfaceutil.Attribute(
+        """The filesystem path that was used to construct the repo."""
+    )
+
+    auditor = interfaceutil.Attribute(
+        """A pathauditor for the working directory.
+
+        This checks if a path refers to a nested repository.
+
+        Operates on the filesystem.
+        """
+    )
+
+    nofsauditor = interfaceutil.Attribute(
+        """A pathauditor for the working directory.
+
+        This is like ``auditor`` except it doesn't do filesystem checks.
+        """
+    )
+
+    baseui = interfaceutil.Attribute(
+        """Original ui instance passed into constructor."""
+    )
+
+    ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
+
+    sharedpath = interfaceutil.Attribute(
+        """Path to the .hg directory of the repo this repo was shared from."""
+    )
+
+    store = interfaceutil.Attribute("""A store instance.""")
+
+    spath = interfaceutil.Attribute("""Path to the store.""")
+
+    sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
+
+    cachevfs = interfaceutil.Attribute(
+        """A VFS used to access the cache directory.
+
+        Typically .hg/cache.
+        """
+    )
+
+    wcachevfs = interfaceutil.Attribute(
+        """A VFS used to access the cache directory dedicated to working copy
+
+        Typically .hg/wcache.
+        """
+    )
+
+    filteredrevcache = interfaceutil.Attribute(
+        """Holds sets of revisions to be filtered."""
+    )
+
+    names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
+
+    filecopiesmode = interfaceutil.Attribute(
+        """The way files copies should be dealt with in this repo."""
+    )
+
+    def close():
+        """Close the handle on this repository."""
+
+    def peer():
+        """Obtain an object conforming to the ``peer`` interface."""
+
+    def unfiltered():
+        """Obtain an unfiltered/raw view of this repo."""
+
+    def filtered(name, visibilityexceptions=None):
+        """Obtain a named view of this repository."""
+
+    obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
+
+    changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
+
+    manifestlog = interfaceutil.Attribute(
+        """An instance conforming to the ``imanifestlog`` interface.
+
+        Provides access to manifests for the repository.
+        """
+    )
+
+    dirstate = interfaceutil.Attribute("""Working directory state.""")
+
+    narrowpats = interfaceutil.Attribute(
+        """Matcher patterns for this repository's narrowspec."""
+    )
+
+    def narrowmatch(match=None, includeexact=False):
+        """Obtain a matcher for the narrowspec."""
+
+    def setnarrowpats(newincludes, newexcludes):
+        """Define the narrowspec for this repository."""
+
+    def __getitem__(changeid):
+        """Try to resolve a changectx."""
+
+    def __contains__(changeid):
+        """Whether a changeset exists."""
+
+    def __nonzero__():
+        """Always returns True."""
+        return True
+
+    __bool__ = __nonzero__
+
+    def __len__():
+        """Returns the number of changesets in the repo."""
+
+    def __iter__():
+        """Iterate over revisions in the changelog."""
+
+    def revs(expr, *args):
+        """Evaluate a revset.
+
+        Emits revisions.
+        """
+
+    def set(expr, *args):
+        """Evaluate a revset.
+
+        Emits changectx instances.
+        """
+
+    def anyrevs(specs, user=False, localalias=None):
+        """Find revisions matching one of the given revsets."""
+
+    def url():
+        """Returns a string representing the location of this repo."""
+
+    def hook(name, throw=False, **args):
+        """Call a hook."""
+
+    def tags():
+        """Return a mapping of tag to node."""
+
+    def tagtype(tagname):
+        """Return the type of a given tag."""
+
+    def tagslist():
+        """Return a list of tags ordered by revision."""
+
+    def nodetags(node):
+        """Return the tags associated with a node."""
+
+    def nodebookmarks(node):
+        """Return the list of bookmarks pointing to the specified node."""
+
+    def branchmap():
+        """Return a mapping of branch to heads in that branch."""
+
+    def revbranchcache():
+        pass
+
+    def branchtip(branchtip, ignoremissing=False):
+        """Return the tip node for a given branch."""
+
+    def lookup(key):
+        """Resolve the node for a revision."""
+
+    def lookupbranch(key):
+        """Look up the branch name of the given revision or branch name."""
+
+    def known(nodes):
+        """Determine whether a series of nodes is known.
+
+        Returns a list of bools.
+        """
+
+    def local():
+        """Whether the repository is local."""
+        return True
+
+    def publishing():
+        """Whether the repository is a publishing repository."""
+
+    def cancopy():
+        pass
+
+    def shared():
+        """The type of shared repository or None."""
+
+    def wjoin(f, *insidef):
+        """Calls self.vfs.reljoin(self.root, f, *insidef)"""
+
+    def setparents(p1, p2):
+        """Set the parent nodes of the working directory."""
+
+    def filectx(path, changeid=None, fileid=None):
+        """Obtain a filectx for the given file revision."""
+
+    def getcwd():
+        """Obtain the current working directory from the dirstate."""
+
+    def pathto(f, cwd=None):
+        """Obtain the relative path to a file."""
+
+    def adddatafilter(name, fltr):
+        pass
+
+    def wread(filename):
+        """Read a file from wvfs, using data filters."""
+
+    def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
+        """Write data to a file in the wvfs, using data filters."""
+
+    def wwritedata(filename, data):
+        """Resolve data for writing to the wvfs, using data filters."""
+
+    def currenttransaction():
+        """Obtain the current transaction instance or None."""
+
+    def transaction(desc, report=None):
+        """Open a new transaction to write to the repository."""
+
+    def undofiles():
+        """Returns a list of (vfs, path) for files to undo transactions."""
+
+    def recover():
+        """Roll back an interrupted transaction."""
+
+    def rollback(dryrun=False, force=False):
+        """Undo the last transaction.
+
+        DANGEROUS.
+        """
+
+    def updatecaches(tr=None, full=False):
+        """Warm repo caches."""
+
+    def invalidatecaches():
+        """Invalidate cached data due to the repository mutating."""
+
+    def invalidatevolatilesets():
+        pass
+
+    def invalidatedirstate():
+        """Invalidate the dirstate."""
+
+    def invalidate(clearfilecache=False):
+        pass
+
+    def invalidateall():
+        pass
+
+    def lock(wait=True):
+        """Lock the repository store and return a lock instance."""
+
+    def wlock(wait=True):
+        """Lock the non-store parts of the repository."""
+
+    def currentwlock():
+        """Return the wlock if it's held or None."""
+
+    def checkcommitpatterns(wctx, vdirs, match, status, fail):
+        pass
+
+    def commit(
+        text=b'',
+        user=None,
+        date=None,
+        match=None,
+        force=False,
+        editor=False,
+        extra=None,
+    ):
+        """Add a new revision to the repository."""
+
+    def commitctx(ctx, error=False, origctx=None):
+        """Commit a commitctx instance to the repository."""
+
+    def destroying():
+        """Inform the repository that nodes are about to be destroyed."""
+
+    def destroyed():
+        """Inform the repository that nodes have been destroyed."""
+
+    def status(
+        node1=b'.',
+        node2=None,
+        match=None,
+        ignored=False,
+        clean=False,
+        unknown=False,
+        listsubrepos=False,
+    ):
+        """Convenience method to call repo[x].status()."""
+
+    def addpostdsstatus(ps):
+        pass
+
+    def postdsstatus():
+        pass
+
+    def clearpostdsstatus():
+        pass
+
+    def heads(start=None):
+        """Obtain list of nodes that are DAG heads."""
+
+    def branchheads(branch=None, start=None, closed=False):
+        pass
+
+    def branches(nodes):
+        pass
+
+    def between(pairs):
+        pass
+
+    def checkpush(pushop):
+        pass
+
+    prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
+
+    def pushkey(namespace, key, old, new):
+        pass
+
+    def listkeys(namespace):
+        pass
+
+    def debugwireargs(one, two, three=None, four=None, five=None):
+        pass
+
+    def savecommitmessage(text):
+        pass
+
+
+class completelocalrepository(
+    ilocalrepositorymain, ilocalrepositoryfilestorage
+):
+    """Complete interface for a local repository."""
+
+
+class iwireprotocolcommandcacher(interfaceutil.Interface):
+    """Represents a caching backend for wire protocol commands.
+
+    Wire protocol version 2 supports transparent caching of many commands.
+    To leverage this caching, servers can activate objects that cache
+    command responses. Objects handle both cache writing and reading.
+    This interface defines how that response caching mechanism works.
+
+    Wire protocol version 2 commands emit a series of objects that are
+    serialized and sent to the client. The caching layer exists between
+    the invocation of the command function and the sending of its output
+    objects to an output layer.
+
+    Instances of this interface represent a binding to a cache that
+    can serve a response (in place of calling a command function) and/or
+    write responses to a cache for subsequent use.
+
+    When a command request arrives, the following happens with regards
+    to this interface:
+
+    1. The server determines whether the command request is cacheable.
+    2. If it is, an instance of this interface is spawned.
+    3. The cacher is activated in a context manager (``__enter__`` is called).
+    4. A cache *key* for that request is derived. This will call the
+       instance's ``adjustcachekeystate()`` method so the derivation
+       can be influenced.
+    5. The cacher is informed of the derived cache key via a call to
+       ``setcachekey()``.
+    6. The cacher's ``lookup()`` method is called to test for presence of
+       the derived key in the cache.
+    7. If ``lookup()`` returns a hit, that cached result is used in place
+       of invoking the command function. ``__exit__`` is called and the instance
+       is discarded.
+    8. The command function is invoked.
+    9. ``onobject()`` is called for each object emitted by the command
+       function.
+    10. After the final object is seen, ``onfinished()`` is called.
+    11. ``__exit__`` is called to signal the end of use of the instance.
+
+    Cache *key* derivation can be influenced by the instance.
+
+    Cache keys are initially derived by a deterministic representation of
+    the command request. This includes the command name, arguments, protocol
+    version, etc. This initial key derivation is performed by CBOR-encoding a
+    data structure and feeding that output into a hasher.
+
+    Instances of this interface can influence this initial key derivation
+    via ``adjustcachekeystate()``.
+
+    The instance is informed of the derived cache key via a call to
+    ``setcachekey()``. The instance must store the key locally so it can
+    be consulted on subsequent operations that may require it.
+
+    When constructed, the instance has access to a callable that can be used
+    for encoding response objects. This callable receives as its single
+    argument an object emitted by a command function. It returns an iterable
+    of bytes chunks representing the encoded object. Unless the cacher is
+    caching native Python objects in memory or has a way of reconstructing
+    the original Python objects, implementations typically call this function
+    to produce bytes from the output objects and then store those bytes in
+    the cache. When it comes time to re-emit those bytes, they are wrapped
+    in a ``wireprototypes.encodedresponse`` instance to tell the output
+    layer that they are pre-encoded.
+
+    When receiving the objects emitted by the command function, instances
+    can choose what to do with those objects. The simplest thing to do is
+    re-emit the original objects. They will be forwarded to the output
+    layer and will be processed as if the cacher did not exist.
+
+    Implementations could also choose to not emit objects - instead locally
+    buffering objects or their encoded representation. They could then emit
+    a single "coalesced" object when ``onfinished()`` is called. In
+    this way, the implementation would function as a filtering layer of
+    sorts.
+
+    When caching objects, typically the encoded form of the object will
+    be stored. Keep in mind that if the original object is forwarded to
+    the output layer, it will need to be encoded there as well. For large
+    output, this redundant encoding could add overhead. Implementations
+    could wrap the encoded object data in ``wireprototypes.encodedresponse``
+    instances to avoid this overhead.
+    """
+
+    def __enter__():
+        """Marks the instance as active.
+
+        Should return self.
+        """
+
+    def __exit__(exctype, excvalue, exctb):
+        """Called when cacher is no longer used.
+
+        This can be used by implementations to perform cleanup actions (e.g.
+        disconnecting network sockets, aborting a partially cached response.
+        """
+
+    def adjustcachekeystate(state):
+        """Influences cache key derivation by adjusting state to derive key.
+
+        A dict defining the state used to derive the cache key is passed.
+
+        Implementations can modify this dict to record additional state that
+        is wanted to influence key derivation.
+
+        Implementations are *highly* encouraged to not modify or delete
+        existing keys.
+        """
+
+    def setcachekey(key):
+        """Record the derived cache key for this request.
+
+        Instances may mutate the key for internal usage, as desired. e.g.
+        instances may wish to prepend the repo name, introduce path
+        components for filesystem or URL addressing, etc. Behavior is up to
+        the cache.
+
+        Returns a bool indicating if the request is cacheable by this
+        instance.
+        """
+
+    def lookup():
+        """Attempt to resolve an entry in the cache.
+
+        The instance is instructed to look for the cache key that it was
+        informed about via the call to ``setcachekey()``.
+
+        If there's no cache hit or the cacher doesn't wish to use the cached
+        entry, ``None`` should be returned.
+
+        Else, a dict defining the cached result should be returned. The
+        dict may have the following keys:
+
+        objs
+           An iterable of objects that should be sent to the client. That
+           iterable of objects is expected to be what the command function
+           would return if invoked or an equivalent representation thereof.
+        """
+
+    def onobject(obj):
+        """Called when a new object is emitted from the command function.
+
+        Receives as its argument the object that was emitted from the
+        command function.
+
+        This method returns an iterator of objects to forward to the output
+        layer. The easiest implementation is a generator that just
+        ``yield obj``.
+        """
+
+    def onfinished():
+        """Called after all objects have been emitted from the command function.
+
+        Implementations should return an iterator of objects to forward to
+        the output layer.
+
+        This method can be a generator.
+        """
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/interfaces/util.py	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,38 @@
+# util.py - Utilities for declaring interfaces.
+#
+# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# zope.interface imposes a run-time cost due to module import overhead and
+# bookkeeping for declaring interfaces. So, we use stubs for various
+# zope.interface primitives unless instructed otherwise.
+
+from __future__ import absolute_import
+
+from .. import encoding
+
+if encoding.environ.get(b'HGREALINTERFACES'):
+    from ..thirdparty.zope import interface as zi
+
+    Attribute = zi.Attribute
+    Interface = zi.Interface
+    implementer = zi.implementer
+else:
+
+    class Attribute(object):
+        def __init__(self, __name__, __doc__=b''):
+            pass
+
+    class Interface(object):
+        def __init__(
+            self, name, bases=(), attrs=None, __doc__=None, __module__=None
+        ):
+            pass
+
+    def implementer(*ifaces):
+        def wrapper(cls):
+            return cls
+
+        return wrapper
--- a/mercurial/keepalive.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/keepalive.py	Mon Oct 21 11:09:48 2019 -0400
@@ -92,15 +92,14 @@
 import threading
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     node,
     pycompat,
     urllibcompat,
     util,
 )
-from .utils import (
-    procutil,
-)
+from .utils import procutil
 
 httplib = util.httplib
 urlerr = util.urlerr
@@ -108,16 +107,18 @@
 
 DEBUG = None
 
+
 class ConnectionManager(object):
     """
     The connection manager must be able to:
       * keep track of all existing
       """
+
     def __init__(self):
         self._lock = threading.Lock()
-        self._hostmap = collections.defaultdict(list) # host -> [connection]
-        self._connmap = {} # map connections to host
-        self._readymap = {} # map connection to ready state
+        self._hostmap = collections.defaultdict(list)  # host -> [connection]
+        self._connmap = {}  # map connections to host
+        self._readymap = {}  # map connection to ready state
 
     def add(self, host, connection, ready):
         self._lock.acquire()
@@ -169,6 +170,7 @@
         else:
             return dict(self._hostmap)
 
+
 class KeepAliveHandler(object):
     def __init__(self, timeout=None):
         self._cm = ConnectionManager()
@@ -192,7 +194,7 @@
 
     def close_all(self):
         """close all open connections"""
-        for host, conns in self._cm.get_all().iteritems():
+        for host, conns in pycompat.iteritems(self._cm.get_all()):
             for h in conns:
                 self._cm.remove(h)
                 h.close()
@@ -214,7 +216,7 @@
     def do_open(self, http_class, req):
         host = urllibcompat.gethost(req)
         if not host:
-            raise urlerr.urlerror('no host given')
+            raise urlerr.urlerror(b'no host given')
 
         try:
             h = self._cm.get_ready_conn(host)
@@ -235,8 +237,9 @@
                 # no (working) free connections were found.  Create a new one.
                 h = http_class(host, timeout=self._timeout)
                 if DEBUG:
-                    DEBUG.info("creating new connection to %s (%d)",
-                               host, id(h))
+                    DEBUG.info(
+                        b"creating new connection to %s (%d)", host, id(h)
+                    )
                 self._cm.add(host, h, False)
                 self._start_transaction(h, req)
                 r = h.getresponse()
@@ -244,18 +247,19 @@
         # to make the error message slightly more useful.
         except httplib.BadStatusLine as err:
             raise urlerr.urlerror(
-                _('bad HTTP status line: %s') % pycompat.sysbytes(err.line))
+                _(b'bad HTTP status line: %s') % pycompat.sysbytes(err.line)
+            )
         except (socket.error, httplib.HTTPException) as err:
             raise urlerr.urlerror(err)
 
         # If not a persistent connection, don't try to reuse it. Look
         # for this using getattr() since vcr doesn't define this
         # attribute, and in that case always close the connection.
-        if getattr(r, r'will_close', True):
+        if getattr(r, 'will_close', True):
             self._cm.remove(h)
 
         if DEBUG:
-            DEBUG.info("STATUS: %s, %s", r.status, r.reason)
+            DEBUG.info(b"STATUS: %s, %s", r.status, r.reason)
         r._handler = self
         r._host = host
         r._url = req.get_full_url()
@@ -280,7 +284,7 @@
             # worked.  We'll check the version below, too.
         except (socket.error, httplib.HTTPException):
             r = None
-        except: # re-raises
+        except:  # re-raises
             # adding this block just in case we've missed
             # something we will still raise the exception, but
             # lets try and close the connection and remove it
@@ -291,8 +295,11 @@
             # that it's now possible this call will raise
             # a DIFFERENT exception
             if DEBUG:
-                DEBUG.error("unexpected exception - closing "
-                            "connection to %s (%d)", host, id(h))
+                DEBUG.error(
+                    b"unexpected exception - closing connection to %s (%d)",
+                    host,
+                    id(h),
+                )
             self._cm.remove(h)
             h.close()
             raise
@@ -303,12 +310,13 @@
             # the socket has been closed by the server since we
             # last used the connection.
             if DEBUG:
-                DEBUG.info("failed to re-use connection to %s (%d)",
-                           host, id(h))
+                DEBUG.info(
+                    b"failed to re-use connection to %s (%d)", host, id(h)
+                )
             r = None
         else:
             if DEBUG:
-                DEBUG.info("re-using connection to %s (%d)", host, id(h))
+                DEBUG.info(b"re-using connection to %s (%d)", host, id(h))
 
         return r
 
@@ -330,17 +338,22 @@
             if urllibcompat.hasdata(req):
                 data = urllibcompat.getdata(req)
                 h.putrequest(
-                    req.get_method(), urllibcompat.getselector(req),
-                    **skipheaders)
+                    req.get_method(),
+                    urllibcompat.getselector(req),
+                    **skipheaders
+                )
                 if r'content-type' not in headers:
-                    h.putheader(r'Content-type',
-                                r'application/x-www-form-urlencoded')
+                    h.putheader(
+                        r'Content-type', r'application/x-www-form-urlencoded'
+                    )
                 if r'content-length' not in headers:
                     h.putheader(r'Content-length', r'%d' % len(data))
             else:
                 h.putrequest(
-                    req.get_method(), urllibcompat.getselector(req),
-                    **skipheaders)
+                    req.get_method(),
+                    urllibcompat.getselector(req),
+                    **skipheaders
+                )
         except socket.error as err:
             raise urlerr.urlerror(err)
         for k, v in headers.items():
@@ -356,13 +369,16 @@
         try:
             self.parent.requestscount += 1
             self.parent.sentbytescount += (
-                getattr(h, 'sentbytescount', 0) - oldbytescount)
+                getattr(h, 'sentbytescount', 0) - oldbytescount
+            )
         except AttributeError:
             pass
 
+
 class HTTPHandler(KeepAliveHandler, urlreq.httphandler):
     pass
 
+
 class HTTPResponse(httplib.HTTPResponse):
     # we need to subclass HTTPResponse in order to
     # 1) add readline(), readlines(), and readinto() methods
@@ -382,23 +398,23 @@
     # Both readline and readlines have been stolen with almost no
     # modification from socket.py
 
-
     def __init__(self, sock, debuglevel=0, strict=0, method=None):
         extrakw = {}
         if not pycompat.ispy3:
             extrakw[r'strict'] = True
             extrakw[r'buffering'] = True
-        httplib.HTTPResponse.__init__(self, sock, debuglevel=debuglevel,
-                                      method=method, **extrakw)
+        httplib.HTTPResponse.__init__(
+            self, sock, debuglevel=debuglevel, method=method, **extrakw
+        )
         self.fileno = sock.fileno
         self.code = None
         self.receivedbytescount = 0
-        self._rbuf = ''
+        self._rbuf = b''
         self._rbufsize = 8096
-        self._handler = None # inserted by the handler later
-        self._host = None    # (same)
-        self._url = None     # (same)
-        self._connection = None # (same)
+        self._handler = None  # inserted by the handler later
+        self._host = None  # (same)
+        self._url = None  # (same)
+        self._connection = None  # (same)
 
     _raw_read = httplib.HTTPResponse.read
     _raw_readinto = getattr(httplib.HTTPResponse, 'readinto', None)
@@ -413,8 +429,9 @@
             self.fp.close()
             self.fp = None
             if self._handler:
-                self._handler._request_closed(self, self._host,
-                                              self._connection)
+                self._handler._request_closed(
+                    self, self._host, self._connection
+                )
 
     def _close_conn(self):
         self.close()
@@ -444,7 +461,7 @@
         # implemented using readinto(), which can duplicate self._rbuf
         # if it's not empty.
         s = self._rbuf
-        self._rbuf = ''
+        self._rbuf = b''
         data = self._raw_read(amt)
 
         self.receivedbytescount += len(data)
@@ -468,16 +485,16 @@
         while True:
             if chunk_left is None:
                 line = self.fp.readline()
-                i = line.find(';')
+                i = line.find(b';')
                 if i >= 0:
-                    line = line[:i] # strip chunk-extensions
+                    line = line[:i]  # strip chunk-extensions
                 try:
                     chunk_left = int(line, 16)
                 except ValueError:
                     # close the connection as protocol synchronization is
                     # probably lost
                     self.close()
-                    raise httplib.IncompleteRead(''.join(parts))
+                    raise httplib.IncompleteRead(b''.join(parts))
                 if chunk_left == 0:
                     break
             if amt is None:
@@ -485,18 +502,18 @@
             elif amt < chunk_left:
                 parts.append(self._safe_read(amt))
                 self.chunk_left = chunk_left - amt
-                return ''.join(parts)
+                return b''.join(parts)
             elif amt == chunk_left:
                 parts.append(self._safe_read(amt))
                 self._safe_read(2)  # toss the CRLF at the end of the chunk
                 self.chunk_left = None
-                return ''.join(parts)
+                return b''.join(parts)
             else:
                 parts.append(self._safe_read(chunk_left))
                 amt -= chunk_left
 
             # we read the whole chunk, get another
-            self._safe_read(2)      # toss the CRLF at the end of the chunk
+            self._safe_read(2)  # toss the CRLF at the end of the chunk
             chunk_left = None
 
         # read and discard trailer up to the CRLF terminator
@@ -507,17 +524,17 @@
                 # a vanishingly small number of sites EOF without
                 # sending the trailer
                 break
-            if line == '\r\n':
+            if line == b'\r\n':
                 break
 
         # we read everything; close the "file"
         self.close()
 
-        return ''.join(parts)
+        return b''.join(parts)
 
     def readline(self):
         # Fast path for a line is already available in read buffer.
-        i = self._rbuf.find('\n')
+        i = self._rbuf.find(b'\n')
         if i >= 0:
             i += 1
             line = self._rbuf[:i]
@@ -541,7 +558,7 @@
                 pass
 
             chunks.append(new)
-            i = new.find('\n')
+            i = new.find(b'\n')
             if i >= 0:
                 break
 
@@ -549,13 +566,13 @@
 
         # EOF
         if i == -1:
-            self._rbuf = ''
-            return ''.join(chunks)
+            self._rbuf = b''
+            return b''.join(chunks)
 
         i += 1
         self._rbuf = chunks[-1][i:]
         chunks[-1] = chunks[-1][:i]
-        return ''.join(chunks)
+        return b''.join(chunks)
 
     def readlines(self, sizehint=0):
         total = 0
@@ -575,7 +592,7 @@
             res = self.read(len(dest))
             if not res:
                 return 0
-            dest[0:len(res)] = res
+            dest[0 : len(res)] = res
             return len(res)
         total = len(dest)
         have = len(self._rbuf)
@@ -595,9 +612,10 @@
 
         dest[0:have] = self._rbuf
         got += len(self._rbuf)
-        self._rbuf = ''
+        self._rbuf = b''
         return got
 
+
 def safesend(self, str):
     """Send `str' to the server.
 
@@ -625,13 +643,13 @@
     # NOTE: we DO propagate the error, though, because we cannot simply
     #       ignore the error... the caller will know if they can retry.
     if self.debuglevel > 0:
-        print("send:", repr(str))
+        print(b"send:", repr(str))
     try:
         blocksize = 8192
         read = getattr(str, 'read', None)
         if read is not None:
             if self.debuglevel > 0:
-                print("sending a read()able")
+                print(b"sending a read()able")
             data = read(blocksize)
             while data:
                 self.sock.sendall(data)
@@ -642,7 +660,7 @@
             self.sentbytescount += len(str)
     except socket.error as v:
         reraise = True
-        if v.args[0] == errno.EPIPE:      # Broken pipe
+        if v.args[0] == errno.EPIPE:  # Broken pipe
             if self._HTTPConnection__state == httplib._CS_REQ_SENT:
                 self._broken_pipe_resp = None
                 self._broken_pipe_resp = self.getresponse()
@@ -651,9 +669,11 @@
         if reraise:
             raise
 
+
 def wrapgetresponse(cls):
     """Wraps getresponse in cls with a broken-pipe sane version.
     """
+
     def safegetresponse(self):
         # In safesend() we might set the _broken_pipe_resp
         # attribute, in which case the socket has already
@@ -663,9 +683,11 @@
         if r is not None:
             return r
         return cls.getresponse(self)
+
     safegetresponse.__doc__ = cls.getresponse.__doc__
     return safegetresponse
 
+
 class HTTPConnection(httplib.HTTPConnection):
     # url.httpsconnection inherits from this. So when adding/removing
     # attributes, be sure to audit httpsconnection() for unintended
@@ -681,6 +703,7 @@
         self.sentbytescount = 0
         self.receivedbytescount = 0
 
+
 #########################################################################
 #####   TEST FUNCTIONS
 #########################################################################
@@ -688,7 +711,7 @@
 
 def continuity(url):
     md5 = hashlib.md5
-    format = '%25s: %s'
+    format = b'%25s: %s'
 
     # first fetch the file with the normal http handler
     opener = urlreq.buildopener()
@@ -697,7 +720,7 @@
     foo = fo.read()
     fo.close()
     m = md5(foo)
-    print(format % ('normal urllib', node.hex(m.digest())))
+    print(format % (b'normal urllib', node.hex(m.digest())))
 
     # now install the keepalive handler and try again
     opener = urlreq.buildopener(HTTPHandler())
@@ -707,10 +730,10 @@
     foo = fo.read()
     fo.close()
     m = md5(foo)
-    print(format % ('keepalive read', node.hex(m.digest())))
+    print(format % (b'keepalive read', node.hex(m.digest())))
 
     fo = urlreq.urlopen(url)
-    foo = ''
+    foo = b''
     while True:
         f = fo.readline()
         if f:
@@ -719,28 +742,31 @@
             break
     fo.close()
     m = md5(foo)
-    print(format % ('keepalive readline', node.hex(m.digest())))
+    print(format % (b'keepalive readline', node.hex(m.digest())))
+
 
 def comp(N, url):
-    print('  making %i connections to:\n  %s' % (N, url))
+    print(b'  making %i connections to:\n  %s' % (N, url))
 
-    procutil.stdout.write('  first using the normal urllib handlers')
+    procutil.stdout.write(b'  first using the normal urllib handlers')
     # first use normal opener
     opener = urlreq.buildopener()
     urlreq.installopener(opener)
     t1 = fetch(N, url)
-    print('  TIME: %.3f s' % t1)
+    print(b'  TIME: %.3f s' % t1)
 
-    procutil.stdout.write('  now using the keepalive handler       ')
+    procutil.stdout.write(b'  now using the keepalive handler       ')
     # now install the keepalive handler and try again
     opener = urlreq.buildopener(HTTPHandler())
     urlreq.installopener(opener)
     t2 = fetch(N, url)
-    print('  TIME: %.3f s' % t2)
-    print('  improvement factor: %.2f' % (t1 / t2))
+    print(b'  TIME: %.3f s' % t2)
+    print(b'  improvement factor: %.2f' % (t1 / t2))
+
 
 def fetch(N, url, delay=0):
     import time
+
     lens = []
     starttime = time.time()
     for i in range(N):
@@ -756,61 +782,67 @@
     for i in lens[1:]:
         j = j + 1
         if not i == lens[0]:
-            print("WARNING: inconsistent length on read %i: %i" % (j, i))
+            print(b"WARNING: inconsistent length on read %i: %i" % (j, i))
 
     return diff
 
+
 def test_timeout(url):
     global DEBUG
     dbbackup = DEBUG
+
     class FakeLogger(object):
         def debug(self, msg, *args):
             print(msg % args)
+
         info = warning = error = debug
+
     DEBUG = FakeLogger()
-    print("  fetching the file to establish a connection")
+    print(b"  fetching the file to establish a connection")
     fo = urlreq.urlopen(url)
     data1 = fo.read()
     fo.close()
 
     i = 20
-    print("  waiting %i seconds for the server to close the connection" % i)
+    print(b"  waiting %i seconds for the server to close the connection" % i)
     while i > 0:
-        procutil.stdout.write('\r  %2i' % i)
+        procutil.stdout.write(b'\r  %2i' % i)
         procutil.stdout.flush()
         time.sleep(1)
         i -= 1
-    procutil.stderr.write('\r')
+    procutil.stderr.write(b'\r')
 
-    print("  fetching the file a second time")
+    print(b"  fetching the file a second time")
     fo = urlreq.urlopen(url)
     data2 = fo.read()
     fo.close()
 
     if data1 == data2:
-        print('  data are identical')
+        print(b'  data are identical')
     else:
-        print('  ERROR: DATA DIFFER')
+        print(b'  ERROR: DATA DIFFER')
 
     DEBUG = dbbackup
 
 
 def test(url, N=10):
-    print("performing continuity test (making sure stuff isn't corrupted)")
+    print(b"performing continuity test (making sure stuff isn't corrupted)")
     continuity(url)
-    print('')
-    print("performing speed comparison")
+    print(b'')
+    print(b"performing speed comparison")
     comp(N, url)
-    print('')
-    print("performing dropped-connection check")
+    print(b'')
+    print(b"performing dropped-connection check")
     test_timeout(url)
 
+
 if __name__ == '__main__':
     import time
+
     try:
         N = int(sys.argv[1])
         url = sys.argv[2]
     except (IndexError, ValueError):
-        print("%s <integer> <url>" % sys.argv[0])
+        print(b"%s <integer> <url>" % sys.argv[0])
     else:
         test(url, N)
--- a/mercurial/linelog.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/linelog.py	Mon Oct 21 11:09:48 2019 -0400
@@ -23,18 +23,16 @@
 import abc
 import struct
 
-from .thirdparty import (
-    attr,
-)
-from . import (
-    pycompat,
-)
+from .thirdparty import attr
+from . import pycompat
 
-_llentry = struct.Struct('>II')
+_llentry = struct.Struct(b'>II')
+
 
 class LineLogError(Exception):
     """Error raised when something bad happens internally in linelog."""
 
+
 @attr.s
 class lineinfo(object):
     # Introducing revision of this line.
@@ -44,6 +42,7 @@
     # Private. Offset in the linelog program of this line. Used internally.
     _offset = attr.ib()
 
+
 @attr.s
 class annotateresult(object):
     rev = attr.ib()
@@ -53,6 +52,7 @@
     def __iter__(self):
         return iter(self.lines)
 
+
 class _llinstruction(object):
 
     __metaclass__ = abc.ABCMeta
@@ -90,6 +90,7 @@
           (that is, we've found the end of the file.)
         """
 
+
 class _jge(_llinstruction):
     """If the current rev is greater than or equal to op1, jump to op2."""
 
@@ -101,9 +102,11 @@
         return r'JGE %d %d' % (self._cmprev, self._target)
 
     def __eq__(self, other):
-        return (type(self) == type(other)
-                and self._cmprev == other._cmprev
-                and self._target == other._target)
+        return (
+            type(self) == type(other)
+            and self._cmprev == other._cmprev
+            and self._target == other._target
+        )
 
     def encode(self):
         return _llentry.pack(self._cmprev << 2, self._target)
@@ -113,20 +116,20 @@
             return self._target
         return pc + 1
 
+
 class _jump(_llinstruction):
     """Unconditional jumps are expressed as a JGE with op1 set to 0."""
 
     def __init__(self, op1, op2):
         if op1 != 0:
-            raise LineLogError("malformed JUMP, op1 must be 0, got %d" % op1)
+            raise LineLogError(b"malformed JUMP, op1 must be 0, got %d" % op1)
         self._target = op2
 
     def __str__(self):
         return r'JUMP %d' % (self._target)
 
     def __eq__(self, other):
-        return (type(self) == type(other)
-                and self._target == other._target)
+        return type(self) == type(other) and self._target == other._target
 
     def encode(self):
         return _llentry.pack(0, self._target)
@@ -134,14 +137,15 @@
     def execute(self, rev, pc, emit):
         return self._target
 
+
 class _eof(_llinstruction):
     """EOF is expressed as a JGE that always jumps to 0."""
 
     def __init__(self, op1, op2):
         if op1 != 0:
-            raise LineLogError("malformed EOF, op1 must be 0, got %d" % op1)
+            raise LineLogError(b"malformed EOF, op1 must be 0, got %d" % op1)
         if op2 != 0:
-            raise LineLogError("malformed EOF, op2 must be 0, got %d" % op2)
+            raise LineLogError(b"malformed EOF, op2 must be 0, got %d" % op2)
 
     def __str__(self):
         return r'EOF'
@@ -155,6 +159,7 @@
     def execute(self, rev, pc, emit):
         return None
 
+
 class _jl(_llinstruction):
     """If the current rev is less than op1, jump to op2."""
 
@@ -166,9 +171,11 @@
         return r'JL %d %d' % (self._cmprev, self._target)
 
     def __eq__(self, other):
-        return (type(self) == type(other)
-                and self._cmprev == other._cmprev
-                and self._target == other._target)
+        return (
+            type(self) == type(other)
+            and self._cmprev == other._cmprev
+            and self._target == other._target
+        )
 
     def encode(self):
         return _llentry.pack(1 | (self._cmprev << 2), self._target)
@@ -178,6 +185,7 @@
             return self._target
         return pc + 1
 
+
 class _line(_llinstruction):
     """Emit a line."""
 
@@ -191,9 +199,11 @@
         return r'LINE %d %d' % (self._rev, self._origlineno)
 
     def __eq__(self, other):
-        return (type(self) == type(other)
-                and self._rev == other._rev
-                and self._origlineno == other._origlineno)
+        return (
+            type(self) == type(other)
+            and self._rev == other._rev
+            and self._origlineno == other._origlineno
+        )
 
     def encode(self):
         return _llentry.pack(2 | (self._rev << 2), self._origlineno)
@@ -202,12 +212,13 @@
         emit(lineinfo(self._rev, self._origlineno, pc))
         return pc + 1
 
+
 def _decodeone(data, offset):
     """Decode a single linelog instruction from an offset in a buffer."""
     try:
         op1, op2 = _llentry.unpack_from(data, offset)
     except struct.error as e:
-        raise LineLogError('reading an instruction failed: %r' % e)
+        raise LineLogError(b'reading an instruction failed: %r' % e)
     opcode = op1 & 0b11
     op1 = op1 >> 2
     if opcode == 0:
@@ -220,7 +231,8 @@
         return _jl(op1, op2)
     elif opcode == 2:
         return _line(op1, op2)
-    raise NotImplementedError('Unimplemented opcode %r' % opcode)
+    raise NotImplementedError(b'Unimplemented opcode %r' % opcode)
+
 
 class linelog(object):
     """Efficient cache for per-line history information."""
@@ -236,25 +248,32 @@
         self._maxrev = maxrev
 
     def __eq__(self, other):
-        return (type(self) == type(other)
-                and self._program == other._program
-                and self._maxrev == other._maxrev)
+        return (
+            type(self) == type(other)
+            and self._program == other._program
+            and self._maxrev == other._maxrev
+        )
 
     def __repr__(self):
-        return '<linelog at %s: maxrev=%d size=%d>' % (
-            hex(id(self)), self._maxrev, len(self._program))
+        return b'<linelog at %s: maxrev=%d size=%d>' % (
+            hex(id(self)),
+            self._maxrev,
+            len(self._program),
+        )
 
     def debugstr(self):
         fmt = r'%%%dd %%s' % len(str(len(self._program)))
-        return pycompat.sysstr('\n').join(
-            fmt % (idx, i) for idx, i in enumerate(self._program[1:], 1))
+        return pycompat.sysstr(b'\n').join(
+            fmt % (idx, i) for idx, i in enumerate(self._program[1:], 1)
+        )
 
     @classmethod
     def fromdata(cls, buf):
         if len(buf) % _llentry.size != 0:
             raise LineLogError(
-                "invalid linelog buffer size %d (must be a multiple of %d)" % (
-                    len(buf), _llentry.size))
+                b"invalid linelog buffer size %d (must be a multiple of %d)"
+                % (len(buf), _llentry.size)
+            )
         expected = len(buf) / _llentry.size
         fakejge = _decodeone(buf, 0)
         if isinstance(fakejge, _jump):
@@ -263,9 +282,11 @@
             maxrev = fakejge._cmprev
         numentries = fakejge._target
         if expected != numentries:
-            raise LineLogError("corrupt linelog data: claimed"
-                               " %d entries but given data for %d entries" % (
-                                   expected, numentries))
+            raise LineLogError(
+                b"corrupt linelog data: claimed"
+                b" %d entries but given data for %d entries"
+                % (expected, numentries)
+            )
         instructions = [_eof(0, 0)]
         for offset in pycompat.xrange(1, numentries):
             instructions.append(_decodeone(buf, offset * _llentry.size))
@@ -273,7 +294,7 @@
 
     def encode(self):
         hdr = _jge(self._maxrev, len(self._program)).encode()
-        return hdr + ''.join(i.encode() for i in self._program[1:])
+        return hdr + b''.join(i.encode() for i in self._program[1:])
 
     def clear(self):
         self._program = []
@@ -281,8 +302,9 @@
         self._lastannotate = None
 
     def replacelines_vec(self, rev, a1, a2, blines):
-        return self.replacelines(rev, a1, a2, 0, len(blines),
-                                 _internal_blines=blines)
+        return self.replacelines(
+            rev, a1, a2, 0, len(blines), _internal_blines=blines
+        )
 
     def replacelines(self, rev, a1, a2, b1, b2, _internal_blines=None):
         """Replace lines [a1, a2) with lines [b1, b2)."""
@@ -298,8 +320,9 @@
             #        ar = self.annotate(self._maxrev)
         if a1 > len(ar.lines):
             raise LineLogError(
-                '%d contains %d lines, tried to access line %d' % (
-                    rev, len(ar.lines), a1))
+                b'%d contains %d lines, tried to access line %d'
+                % (rev, len(ar.lines), a1)
+            )
         elif a1 == len(ar.lines):
             # Simulated EOF instruction since we're at EOF, which
             # doesn't have a "real" line.
@@ -333,8 +356,9 @@
         if a1 < a2:
             if a2 > len(ar.lines):
                 raise LineLogError(
-                    '%d contains %d lines, tried to access line %d' % (
-                        rev, len(ar.lines), a2))
+                    b'%d contains %d lines, tried to access line %d'
+                    % (rev, len(ar.lines), a2)
+                )
             elif a2 == len(ar.lines):
                 endaddr = ar._eof
             else:
@@ -384,8 +408,9 @@
             executed += 1
         if pc is not None:
             raise LineLogError(
-                r'Probably hit an infinite loop in linelog. Program:\n' +
-                self.debugstr())
+                r'Probably hit an infinite loop in linelog. Program:\n'
+                + self.debugstr()
+            )
         ar = annotateresult(rev, lines, lastpc)
         self._lastannotate = ar
         return ar
@@ -429,8 +454,8 @@
             elif isinstance(inst, _line):
                 lines.append((inst._rev, inst._origlineno))
             else:
-                raise LineLogError("Illegal instruction %r" % inst)
+                raise LineLogError(b"Illegal instruction %r" % inst)
             if nextpc == end:
                 return lines
             pc = nextpc
-        raise LineLogError("Failed to perform getalllines")
+        raise LineLogError(b"Failed to perform getalllines")
--- a/mercurial/localrepo.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/localrepo.py	Mon Oct 21 11:09:48 2019 -0400
@@ -23,12 +23,15 @@
     nullrev,
     short,
 )
+from .pycompat import (
+    delattr,
+    getattr,
+)
 from . import (
     bookmarks,
     branchmap,
     bundle2,
     changegroup,
-    changelog,
     color,
     context,
     dirstate,
@@ -41,7 +44,6 @@
     filelog,
     hook,
     lock as lockmod,
-    manifest,
     match as matchmod,
     merge as mergemod,
     mergeutil,
@@ -52,7 +54,6 @@
     phases,
     pushkey,
     pycompat,
-    repository,
     repoview,
     revset,
     revsetlang,
@@ -66,15 +67,18 @@
     util,
     vfs as vfsmod,
 )
+
+from .interfaces import (
+    repository,
+    util as interfaceutil,
+)
+
 from .utils import (
-    interfaceutil,
     procutil,
     stringutil,
 )
 
-from .revlogutils import (
-    constants as revlogconst,
-)
+from .revlogutils import constants as revlogconst
 
 release = lockmod.release
 urlerr = util.urlerr
@@ -85,9 +89,11 @@
 # - '' for svfs relative paths
 _cachedfiles = set()
 
+
 class _basefilecache(scmutil.filecache):
     """All filecache usage on repo are done for logic that should be unfiltered
     """
+
     def __get__(self, repo, type=None):
         if repo is None:
             return self
@@ -102,28 +108,34 @@
     def set(self, repo, value):
         return super(_basefilecache, self).set(repo.unfiltered(), value)
 
+
 class repofilecache(_basefilecache):
     """filecache for files in .hg but outside of .hg/store"""
+
     def __init__(self, *paths):
         super(repofilecache, self).__init__(*paths)
         for path in paths:
-            _cachedfiles.add((path, 'plain'))
+            _cachedfiles.add((path, b'plain'))
 
     def join(self, obj, fname):
         return obj.vfs.join(fname)
 
+
 class storecache(_basefilecache):
     """filecache for files in the store"""
+
     def __init__(self, *paths):
         super(storecache, self).__init__(*paths)
         for path in paths:
-            _cachedfiles.add((path, ''))
+            _cachedfiles.add((path, b''))
 
     def join(self, obj, fname):
         return obj.sjoin(fname)
 
+
 class mixedrepostorecache(_basefilecache):
     """filecache for a mix files in .hg/store and outside"""
+
     def __init__(self, *pathsandlocations):
         # scmutil.filecache only uses the path for passing back into our
         # join(), so we can safely pass a list of paths and locations
@@ -132,14 +144,16 @@
 
     def join(self, obj, fnameandlocation):
         fname, location = fnameandlocation
-        if location == 'plain':
+        if location == b'plain':
             return obj.vfs.join(fname)
         else:
-            if location != '':
-                raise error.ProgrammingError('unexpected location: %s' %
-                                             location)
+            if location != b'':
+                raise error.ProgrammingError(
+                    b'unexpected location: %s' % location
+                )
             return obj.sjoin(fname)
 
+
 def isfilecached(repo, name):
     """check if a repo has already cached "name" filecache-ed property
 
@@ -150,6 +164,7 @@
         return None, False
     return cacheentry.obj, True
 
+
 class unfilteredpropertycache(util.propertycache):
     """propertycache that apply to unfiltered repo only"""
 
@@ -159,6 +174,7 @@
             return super(unfilteredpropertycache, self).__get__(unfi)
         return getattr(unfi, self.name)
 
+
 class filteredpropertycache(util.propertycache):
     """propertycache that must take filtering in account"""
 
@@ -170,15 +186,26 @@
     """check if a repo has an unfilteredpropertycache value for <name>"""
     return name in vars(repo.unfiltered())
 
+
 def unfilteredmethod(orig):
     """decorate method that always need to be run on unfiltered version"""
+
     def wrapper(repo, *args, **kwargs):
         return orig(repo.unfiltered(), *args, **kwargs)
+
     return wrapper
 
-moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
-              'unbundle'}
-legacycaps = moderncaps.union({'changegroupsubset'})
+
+moderncaps = {
+    b'lookup',
+    b'branchmap',
+    b'pushkey',
+    b'known',
+    b'getbundle',
+    b'unbundle',
+}
+legacycaps = moderncaps.union({b'changegroupsubset'})
+
 
 @interfaceutil.implementer(repository.ipeercommandexecutor)
 class localcommandexecutor(object):
@@ -195,12 +222,14 @@
 
     def callcommand(self, command, args):
         if self._sent:
-            raise error.ProgrammingError('callcommand() cannot be used after '
-                                         'sendcommands()')
+            raise error.ProgrammingError(
+                b'callcommand() cannot be used after sendcommands()'
+            )
 
         if self._closed:
-            raise error.ProgrammingError('callcommand() cannot be used after '
-                                         'close()')
+            raise error.ProgrammingError(
+                b'callcommand() cannot be used after close()'
+            )
 
         # We don't need to support anything fancy. Just call the named
         # method on the peer and return a resolved future.
@@ -223,6 +252,7 @@
     def close(self):
         self._closed = True
 
+
 @interfaceutil.implementer(repository.ipeercommands)
 class localpeer(repository.peer):
     '''peer for a local repo; reflects only the most recent API'''
@@ -232,7 +262,7 @@
 
         if caps is None:
             caps = moderncaps.copy()
-        self._repo = repo.filtered('served')
+        self._repo = repo.filtered(b'served')
         self.ui = repo.ui
         self._caps = repo._restrictcapabilities(caps)
 
@@ -264,19 +294,29 @@
         return self._caps
 
     def clonebundles(self):
-        return self._repo.tryread('clonebundles.manifest')
+        return self._repo.tryread(b'clonebundles.manifest')
 
     def debugwireargs(self, one, two, three=None, four=None, five=None):
         """Used to test argument passing over the wire"""
-        return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
-                                   pycompat.bytestr(four),
-                                   pycompat.bytestr(five))
-
-    def getbundle(self, source, heads=None, common=None, bundlecaps=None,
-                  **kwargs):
-        chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
-                                          common=common, bundlecaps=bundlecaps,
-                                          **kwargs)[1]
+        return b"%s %s %s %s %s" % (
+            one,
+            two,
+            pycompat.bytestr(three),
+            pycompat.bytestr(four),
+            pycompat.bytestr(five),
+        )
+
+    def getbundle(
+        self, source, heads=None, common=None, bundlecaps=None, **kwargs
+    ):
+        chunks = exchange.getbundlechunks(
+            self._repo,
+            source,
+            heads=heads,
+            common=common,
+            bundlecaps=bundlecaps,
+            **kwargs
+        )[1]
         cb = util.chunkbuffer(chunks)
 
         if exchange.bundle2requested(bundlecaps):
@@ -285,7 +325,7 @@
             # from it in local peer.
             return bundle2.getunbundler(self.ui, cb)
         else:
-            return changegroup.getunbundler('01', cb, None)
+            return changegroup.getunbundler(b'01', cb, None)
 
     def heads(self):
         return self._repo.heads()
@@ -303,8 +343,7 @@
         return self._repo.pushkey(namespace, key, old, new)
 
     def stream_out(self):
-        raise error.Abort(_('cannot perform stream clone against local '
-                            'peer'))
+        raise error.Abort(_(b'cannot perform stream clone against local peer'))
 
     def unbundle(self, bundle, heads, url):
         """apply a bundle on a repo
@@ -313,8 +352,8 @@
         try:
             try:
                 bundle = exchange.readbundle(self.ui, bundle, None)
-                ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
-                if util.safehasattr(ret, 'getchunks'):
+                ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
+                if util.safehasattr(ret, b'getchunks'):
                     # This is a bundle20 object, turn it into an unbundler.
                     # This little dance should be dropped eventually when the
                     # API is finally improved.
@@ -339,8 +378,9 @@
                     bundle2.processbundle(self._repo, b)
                 raise
         except error.PushRaced as exc:
-            raise error.ResponseError(_('push failed:'),
-                                      stringutil.forcebytestr(exc))
+            raise error.ResponseError(
+                _(b'push failed:'), stringutil.forcebytestr(exc)
+            )
 
     # End of _basewirecommands interface.
 
@@ -351,6 +391,7 @@
 
     # End of peer interface.
 
+
 @interfaceutil.implementer(repository.ipeerlegacycommands)
 class locallegacypeer(localpeer):
     '''peer extension which implements legacy methods too; used for tests with
@@ -368,20 +409,23 @@
         return self._repo.branches(nodes)
 
     def changegroup(self, nodes, source):
-        outgoing = discovery.outgoing(self._repo, missingroots=nodes,
-                                      missingheads=self._repo.heads())
-        return changegroup.makechangegroup(self._repo, outgoing, '01', source)
+        outgoing = discovery.outgoing(
+            self._repo, missingroots=nodes, missingheads=self._repo.heads()
+        )
+        return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
 
     def changegroupsubset(self, bases, heads, source):
-        outgoing = discovery.outgoing(self._repo, missingroots=bases,
-                                      missingheads=heads)
-        return changegroup.makechangegroup(self._repo, outgoing, '01', source)
+        outgoing = discovery.outgoing(
+            self._repo, missingroots=bases, missingheads=heads
+        )
+        return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
 
     # End of baselegacywirecommands interface.
 
+
 # Increment the sub-version when the revlog v2 format changes to lock out old
 # clients.
-REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
+REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
 
 # A repository with the sparserevlog feature will have delta chains that
 # can spread over a larger span. Sparse reading cuts these large spans into
@@ -390,7 +434,15 @@
 # huge amounts of memory, because the whole span would be read at once,
 # including all the intermediate revisions that aren't pertinent for the chain.
 # This is why once a repository has enabled sparse-read, it becomes required.
-SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
+SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
+
+# A repository with the sidedataflag requirement will allow to store extra
+# information for revision without altering their original hashes.
+SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
+
+# A repository with the the copies-sidedata-changeset requirement will store
+# copies related information in changeset's sidedata.
+COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
 
 # Functions receiving (ui, features) that extensions can register to impact
 # the ability to load repositories with custom requirements. Only
@@ -401,6 +453,7 @@
 # set to reflect that the extension knows how to handle that requirements.
 featuresetupfuncs = set()
 
+
 def makelocalrepository(baseui, path, intents=None):
     """Create a local repository object.
 
@@ -516,8 +569,10 @@
         sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
 
         if not sharedvfs.exists():
-            raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
-                                    b'directory %s') % sharedvfs.base)
+            raise error.RepoError(
+                _(b'.hg/sharedpath points to nonexistent directory %s')
+                % sharedvfs.base
+            )
 
         features.add(repository.REPO_FEATURE_SHARED_STORAGE)
 
@@ -528,12 +583,14 @@
         cachepath = hgvfs.join(b'cache')
     wcachepath = hgvfs.join(b'wcache')
 
-
     # The store has changed over time and the exact layout is dictated by
     # requirements. The store interface abstracts differences across all
     # of them.
-    store = makestore(requirements, storebasepath,
-                      lambda base: vfsmod.vfs(base, cacheaudited=True))
+    store = makestore(
+        requirements,
+        storebasepath,
+        lambda base: vfsmod.vfs(base, cacheaudited=True),
+    )
     hgvfs.createmode = store.createmode
 
     storevfs = store.vfs
@@ -557,33 +614,36 @@
     for iface, fn in REPO_INTERFACES:
         # We pass all potentially useful state to give extensions tons of
         # flexibility.
-        typ = fn()(ui=ui,
-                 intents=intents,
-                 requirements=requirements,
-                 features=features,
-                 wdirvfs=wdirvfs,
-                 hgvfs=hgvfs,
-                 store=store,
-                 storevfs=storevfs,
-                 storeoptions=storevfs.options,
-                 cachevfs=cachevfs,
-                 wcachevfs=wcachevfs,
-                 extensionmodulenames=extensionmodulenames,
-                 extrastate=extrastate,
-                 baseclasses=bases)
+        typ = fn()(
+            ui=ui,
+            intents=intents,
+            requirements=requirements,
+            features=features,
+            wdirvfs=wdirvfs,
+            hgvfs=hgvfs,
+            store=store,
+            storevfs=storevfs,
+            storeoptions=storevfs.options,
+            cachevfs=cachevfs,
+            wcachevfs=wcachevfs,
+            extensionmodulenames=extensionmodulenames,
+            extrastate=extrastate,
+            baseclasses=bases,
+        )
 
         if not isinstance(typ, type):
-            raise error.ProgrammingError('unable to construct type for %s' %
-                                         iface)
+            raise error.ProgrammingError(
+                b'unable to construct type for %s' % iface
+            )
 
         bases.append(typ)
 
     # type() allows you to use characters in type names that wouldn't be
     # recognized as Python symbols in source code. We abuse that to add
     # rich information about our constructed repo.
-    name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
-        wdirvfs.base,
-        b','.join(sorted(requirements))))
+    name = pycompat.sysstr(
+        b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
+    )
 
     cls = type(name, tuple(bases), {})
 
@@ -600,7 +660,9 @@
         cachevfs=cachevfs,
         wcachevfs=wcachevfs,
         features=features,
-        intents=intents)
+        intents=intents,
+    )
+
 
 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
     """Load hgrc files/content into a ui instance.
@@ -620,6 +682,7 @@
     except IOError:
         return False
 
+
 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
     """Perform additional actions after .hg/hgrc is loaded.
 
@@ -643,7 +706,8 @@
 
         for name in names:
             if not ui.hasconfig(b'extensions', name):
-                ui.setconfig(b'extensions', name, b'', source='autoload')
+                ui.setconfig(b'extensions', name, b'', source=b'autoload')
+
 
 def gathersupportedrequirements(ui):
     """Determine the complete set of recognized requirements."""
@@ -663,11 +727,12 @@
         engine = util.compengines[name]
         if engine.available() and engine.revlogheader():
             supported.add(b'exp-compression-%s' % name)
-            if engine.name() == 'zstd':
+            if engine.name() == b'zstd':
                 supported.add(b'revlog-compression-zstd')
 
     return supported
 
+
 def ensurerequirementsrecognized(requirements, supported):
     """Validate that a set of local requirements is recognized.
 
@@ -690,10 +755,14 @@
 
     if missing:
         raise error.RequirementError(
-            _(b'repository requires features unknown to this Mercurial: %s') %
-            b' '.join(sorted(missing)),
-            hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
-                   b'for more information'))
+            _(b'repository requires features unknown to this Mercurial: %s')
+            % b' '.join(sorted(missing)),
+            hint=_(
+                b'see https://mercurial-scm.org/wiki/MissingRequirement '
+                b'for more information'
+            ),
+        )
+
 
 def ensurerequirementscompatible(ui, requirements):
     """Validates that a set of recognized requirements is mutually compatible.
@@ -709,21 +778,28 @@
     ``error.RepoError`` should be raised on failure.
     """
     if b'exp-sparse' in requirements and not sparse.enabled:
-        raise error.RepoError(_(b'repository is using sparse feature but '
-                                b'sparse is not enabled; enable the '
-                                b'"sparse" extensions to access'))
+        raise error.RepoError(
+            _(
+                b'repository is using sparse feature but '
+                b'sparse is not enabled; enable the '
+                b'"sparse" extensions to access'
+            )
+        )
+
 
 def makestore(requirements, path, vfstype):
     """Construct a storage object for a repository."""
     if b'store' in requirements:
         if b'fncache' in requirements:
-            return storemod.fncachestore(path, vfstype,
-                                         b'dotencode' in requirements)
+            return storemod.fncachestore(
+                path, vfstype, b'dotencode' in requirements
+            )
 
         return storemod.encodedstore(path, vfstype)
 
     return storemod.basicstore(path, vfstype)
 
+
 def resolvestorevfsoptions(ui, requirements, features):
     """Resolve the options to pass to the store vfs opener.
 
@@ -746,9 +822,20 @@
     # meaningful on such old repos.
     if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
         options.update(resolverevlogstorevfsoptions(ui, requirements, features))
+    else:  # explicitly mark repo as using revlogv0
+        options[b'revlogv0'] = True
+
+    if COPIESSDC_REQUIREMENT in requirements:
+        options[b'copies-storage'] = b'changeset-sidedata'
+    else:
+        writecopiesto = ui.config(b'experimental', b'copies.write-to')
+        copiesextramode = (b'changeset-only', b'compatibility')
+        if writecopiesto in copiesextramode:
+            options[b'copies-storage'] = b'extra'
 
     return options
 
+
 def resolverevlogstorevfsoptions(ui, requirements, features):
     """Resolve opener options specific to revlogs."""
 
@@ -768,15 +855,17 @@
     if chunkcachesize is not None:
         options[b'chunkcachesize'] = chunkcachesize
 
-    deltabothparents = ui.configbool(b'storage',
-                                     b'revlog.optimize-delta-parent-choice')
+    deltabothparents = ui.configbool(
+        b'storage', b'revlog.optimize-delta-parent-choice'
+    )
     options[b'deltabothparents'] = deltabothparents
 
     lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
     lazydeltabase = False
     if lazydelta:
-        lazydeltabase = ui.configbool(b'storage',
-                                      b'revlog.reuse-external-delta-parent')
+        lazydeltabase = ui.configbool(
+            b'storage', b'revlog.reuse-external-delta-parent'
+        )
     if lazydeltabase is None:
         lazydeltabase = not scmutil.gddeltaconfig(ui)
     options[b'lazydelta'] = lazydelta
@@ -786,16 +875,15 @@
     if 0 <= chainspan:
         options[b'maxdeltachainspan'] = chainspan
 
-    mmapindexthreshold = ui.configbytes(b'experimental',
-                                        b'mmapindexthreshold')
+    mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
     if mmapindexthreshold is not None:
         options[b'mmapindexthreshold'] = mmapindexthreshold
 
     withsparseread = ui.configbool(b'experimental', b'sparse-read')
-    srdensitythres = float(ui.config(b'experimental',
-                                     b'sparse-read.density-threshold'))
-    srmingapsize = ui.configbytes(b'experimental',
-                                  b'sparse-read.min-gap-size')
+    srdensitythres = float(
+        ui.config(b'experimental', b'sparse-read.density-threshold')
+    )
+    srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
     options[b'with-sparse-read'] = withsparseread
     options[b'sparse-read-density-threshold'] = srdensitythres
     options[b'sparse-read-min-gap-size'] = srmingapsize
@@ -805,6 +893,9 @@
     if sparserevlog:
         options[b'generaldelta'] = True
 
+    sidedata = SIDEDATA_REQUIREMENT in requirements
+    options[b'side-data'] = sidedata
+
     maxchainlen = None
     if sparserevlog:
         maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
@@ -819,18 +910,18 @@
         #
         # The compression used for new entries will be "the last one"
         prefix = r.startswith
-        if prefix('revlog-compression-') or prefix('exp-compression-'):
-            options[b'compengine'] = r.split('-', 2)[2]
+        if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
+            options[b'compengine'] = r.split(b'-', 2)[2]
 
     options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
     if options[b'zlib.level'] is not None:
         if not (0 <= options[b'zlib.level'] <= 9):
-            msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
+            msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
             raise error.Abort(msg % options[b'zlib.level'])
     options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
     if options[b'zstd.level'] is not None:
         if not (0 <= options[b'zstd.level'] <= 22):
-            msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
+            msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
             raise error.Abort(msg % options[b'zstd.level'])
 
     if repository.NARROW_REQUIREMENT in requirements:
@@ -838,10 +929,12 @@
 
     return options
 
+
 def makemain(**kwargs):
     """Produce a type conforming to ``ilocalrepositorymain``."""
     return localrepository
 
+
 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
 class revlogfilestorage(object):
     """File storage when using revlogs."""
@@ -852,6 +945,7 @@
 
         return filelog.filelog(self.svfs, path)
 
+
 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
 class revlognarrowfilestorage(object):
     """File storage when using revlogs and narrow files."""
@@ -862,6 +956,7 @@
 
         return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
 
+
 def makefilestorage(requirements, features, **kwargs):
     """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
     features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
@@ -872,6 +967,7 @@
     else:
         return revlogfilestorage
 
+
 # List of repository interfaces and factory functions for them. Each
 # will be called in order during ``makelocalrepository()`` to iteratively
 # derive the final type for a local repository instance. We capture the
@@ -882,6 +978,7 @@
     (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
 ]
 
+
 @interfaceutil.implementer(repository.ilocalrepositorymain)
 class localrepository(object):
     """Main class for representing local repositories.
@@ -904,21 +1001,23 @@
     #    being successful (repository sizes went up due to worse delta
     #    chains), and the code was deleted in 4.6.
     supportedformats = {
-        'revlogv1',
-        'generaldelta',
-        'treemanifest',
+        b'revlogv1',
+        b'generaldelta',
+        b'treemanifest',
+        COPIESSDC_REQUIREMENT,
         REVLOGV2_REQUIREMENT,
+        SIDEDATA_REQUIREMENT,
         SPARSEREVLOG_REQUIREMENT,
         bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
     }
     _basesupported = supportedformats | {
-        'store',
-        'fncache',
-        'shared',
-        'relshared',
-        'dotencode',
-        'exp-sparse',
-        'internal-phase'
+        b'store',
+        b'fncache',
+        b'shared',
+        b'relshared',
+        b'dotencode',
+        b'exp-sparse',
+        b'internal-phase',
     }
 
     # list of prefix for file which can be written without 'wlock'
@@ -928,22 +1027,35 @@
         # two, but pretty much all the existing code assume
         # wlock is not needed so we keep them excluded for
         # now.
-        'hgrc',
-        'requires',
+        b'hgrc',
+        b'requires',
         # XXX cache is a complicatged business someone
         # should investigate this in depth at some point
-        'cache/',
+        b'cache/',
         # XXX shouldn't be dirstate covered by the wlock?
-        'dirstate',
+        b'dirstate',
         # XXX bisect was still a bit too messy at the time
         # this changeset was introduced. Someone should fix
         # the remainig bit and drop this line
-        'bisect.state',
+        b'bisect.state',
     }
 
-    def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
-                 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
-                 features, intents=None):
+    def __init__(
+        self,
+        baseui,
+        ui,
+        origroot,
+        wdirvfs,
+        hgvfs,
+        requirements,
+        supportedrequirements,
+        sharedpath,
+        store,
+        cachevfs,
+        wcachevfs,
+        features,
+        intents=None,
+    ):
         """Create a new local repository instance.
 
         Most callers should use ``hg.repository()``, ``localrepo.instance()``,
@@ -1015,8 +1127,9 @@
 
         self.filtername = None
 
-        if (self.ui.configbool('devel', 'all-warnings') or
-            self.ui.configbool('devel', 'check-locks')):
+        if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
+            b'devel', b'check-locks'
+        ):
             self.vfs.audit = self._getvfsward(self.vfs.audit)
         # A list of callback to shape the phase if no data were found.
         # Callback are in the form: func(repo, roots) --> processed root.
@@ -1028,11 +1141,12 @@
         self.spath = self.store.path
         self.svfs = self.store.vfs
         self.sjoin = self.store.join
-        if (self.ui.configbool('devel', 'all-warnings') or
-            self.ui.configbool('devel', 'check-locks')):
-            if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
+        if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
+            b'devel', b'check-locks'
+        ):
+            if util.safehasattr(self.svfs, b'vfs'):  # this is filtervfs
                 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
-            else: # standard vfs
+            else:  # standard vfs
                 self.svfs.audit = self._getsvfsward(self.svfs.audit)
 
         self._dirstatevalidatewarned = False
@@ -1071,29 +1185,39 @@
 
         self._extrafilterid = repoview.extrafilter(ui)
 
+        self.filecopiesmode = None
+        if COPIESSDC_REQUIREMENT in self.requirements:
+            self.filecopiesmode = b'changeset-sidedata'
+
     def _getvfsward(self, origfunc):
         """build a ward for self.vfs"""
         rref = weakref.ref(self)
+
         def checkvfs(path, mode=None):
             ret = origfunc(path, mode=mode)
             repo = rref()
-            if (repo is None
-                or not util.safehasattr(repo, '_wlockref')
-                or not util.safehasattr(repo, '_lockref')):
+            if (
+                repo is None
+                or not util.safehasattr(repo, b'_wlockref')
+                or not util.safehasattr(repo, b'_lockref')
+            ):
                 return
-            if mode in (None, 'r', 'rb'):
+            if mode in (None, b'r', b'rb'):
                 return
             if path.startswith(repo.path):
                 # truncate name relative to the repository (.hg)
-                path = path[len(repo.path) + 1:]
-            if path.startswith('cache/'):
-                msg = 'accessing cache with vfs instead of cachevfs: "%s"'
-                repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
-            if path.startswith('journal.') or path.startswith('undo.'):
+                path = path[len(repo.path) + 1 :]
+            if path.startswith(b'cache/'):
+                msg = b'accessing cache with vfs instead of cachevfs: "%s"'
+                repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
+            if path.startswith(b'journal.') or path.startswith(b'undo.'):
                 # journal is covered by 'lock'
                 if repo._currentlock(repo._lockref) is None:
-                    repo.ui.develwarn('write with no lock: "%s"' % path,
-                                      stacklevel=3, config='check-locks')
+                    repo.ui.develwarn(
+                        b'write with no lock: "%s"' % path,
+                        stacklevel=3,
+                        config=b'check-locks',
+                    )
             elif repo._currentlock(repo._wlockref) is None:
                 # rest of vfs files are covered by 'wlock'
                 #
@@ -1101,28 +1225,35 @@
                 for prefix in self._wlockfreeprefix:
                     if path.startswith(prefix):
                         return
-                repo.ui.develwarn('write with no wlock: "%s"' % path,
-                                  stacklevel=3, config='check-locks')
+                repo.ui.develwarn(
+                    b'write with no wlock: "%s"' % path,
+                    stacklevel=3,
+                    config=b'check-locks',
+                )
             return ret
+
         return checkvfs
 
     def _getsvfsward(self, origfunc):
         """build a ward for self.svfs"""
         rref = weakref.ref(self)
+
         def checksvfs(path, mode=None):
             ret = origfunc(path, mode=mode)
             repo = rref()
-            if repo is None or not util.safehasattr(repo, '_lockref'):
+            if repo is None or not util.safehasattr(repo, b'_lockref'):
                 return
-            if mode in (None, 'r', 'rb'):
+            if mode in (None, b'r', b'rb'):
                 return
             if path.startswith(repo.sharedpath):
                 # truncate name relative to the repository (.hg)
-                path = path[len(repo.sharedpath) + 1:]
+                path = path[len(repo.sharedpath) + 1 :]
             if repo._currentlock(repo._lockref) is None:
-                repo.ui.develwarn('write with no lock: "%s"' % path,
-                                  stacklevel=4)
+                repo.ui.develwarn(
+                    b'write with no lock: "%s"' % path, stacklevel=4
+                )
             return ret
+
         return checksvfs
 
     def close(self):
@@ -1133,11 +1264,12 @@
             self._revbranchcache.write()
 
     def _restrictcapabilities(self, caps):
-        if self.ui.configbool('experimental', 'bundle2-advertise'):
+        if self.ui.configbool(b'experimental', b'bundle2-advertise'):
             caps = set(caps)
-            capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
-                                                              role='client'))
-            caps.add('bundle2=' + urlreq.quote(capsblob))
+            capsblob = bundle2.encodecaps(
+                bundle2.getrepocaps(self, role=b'client')
+            )
+            caps.add(b'bundle2=' + urlreq.quote(capsblob))
         return caps
 
     def _writerequirements(self):
@@ -1156,14 +1288,15 @@
     def nofsauditor(self):
         # This is only used by context.basectx.match in order to detect
         # files in subrepos.
-        return pathutil.pathauditor(self.root, callback=self._checknested,
-                                    realfs=False, cached=True)
+        return pathutil.pathauditor(
+            self.root, callback=self._checknested, realfs=False, cached=True
+        )
 
     def _checknested(self, path):
         """Determine if path is a legal nested repository."""
         if not path.startswith(self.root):
             return False
-        subpath = path[len(self.root) + 1:]
+        subpath = path[len(self.root) + 1 :]
         normsubpath = util.pconvert(subpath)
 
         # XXX: Checking against the current working copy is wrong in
@@ -1186,19 +1319,19 @@
         ctx = self[None]
         parts = util.splitpath(subpath)
         while parts:
-            prefix = '/'.join(parts)
+            prefix = b'/'.join(parts)
             if prefix in ctx.substate:
                 if prefix == normsubpath:
                     return True
                 else:
                     sub = ctx.sub(prefix)
-                    return sub.checknested(subpath[len(prefix) + 1:])
+                    return sub.checknested(subpath[len(prefix) + 1 :])
             else:
                 parts.pop()
         return False
 
     def peer(self):
-        return localpeer(self) # not cached to avoid reference cycle
+        return localpeer(self)  # not cached to avoid reference cycle
 
     def unfiltered(self):
         """Return unfiltered version of the repository
@@ -1218,14 +1351,18 @@
 
         In other word, there is always only one level of `repoview` "filtering".
         """
-        if self._extrafilterid is not None and '%' not in name:
-            name = name + '%'  + self._extrafilterid
+        if self._extrafilterid is not None and b'%' not in name:
+            name = name + b'%' + self._extrafilterid
 
         cls = repoview.newtype(self.unfiltered().__class__)
         return cls(self, name, visibilityexceptions)
 
-    @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'),
-                         ('bookmarks', ''), ('00changelog.i', ''))
+    @mixedrepostorecache(
+        (b'bookmarks', b'plain'),
+        (b'bookmarks.current', b'plain'),
+        (b'bookmarks', b''),
+        (b'00changelog.i', b''),
+    )
     def _bookmarks(self):
         # Since the multiple files involved in the transaction cannot be
         # written atomically (with current repository format), there is a race
@@ -1280,7 +1417,7 @@
 
     def _refreshchangelog(self):
         """make sure the in memory changelog match the on-disk one"""
-        if ('changelog' in vars(self) and self.currenttransaction() is None):
+        if b'changelog' in vars(self) and self.currenttransaction() is None:
             del self.changelog
 
     @property
@@ -1290,26 +1427,23 @@
     # _phasesets depend on changelog. what we need is to call
     # _phasecache.invalidate() if '00changelog.i' was changed, but it
     # can't be easily expressed in filecache mechanism.
-    @storecache('phaseroots', '00changelog.i')
+    @storecache(b'phaseroots', b'00changelog.i')
     def _phasecache(self):
         return phases.phasecache(self, self._phasedefaults)
 
-    @storecache('obsstore')
+    @storecache(b'obsstore')
     def obsstore(self):
         return obsolete.makestore(self.ui, self)
 
-    @storecache('00changelog.i')
+    @storecache(b'00changelog.i')
     def changelog(self):
-        return changelog.changelog(self.svfs,
-                                   trypending=txnutil.mayhavepending(self.root))
-
-    @storecache('00manifest.i')
+        return self.store.changelog(txnutil.mayhavepending(self.root))
+
+    @storecache(b'00manifest.i')
     def manifestlog(self):
-        rootstore = manifest.manifestrevlog(self.svfs)
-        return manifest.manifestlog(self.svfs, self, rootstore,
-                                    self._storenarrowmatch)
-
-    @repofilecache('dirstate')
+        return self.store.manifestlog(self, self._storenarrowmatch)
+
+    @repofilecache(b'dirstate')
     def dirstate(self):
         return self._makedirstate()
 
@@ -1317,8 +1451,9 @@
         """Extension point for wrapping the dirstate per-repo."""
         sparsematchfn = lambda: sparse.matcher(self)
 
-        return dirstate.dirstate(self.vfs, self.ui, self.root,
-                                 self._dirstatevalidate, sparsematchfn)
+        return dirstate.dirstate(
+            self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
+        )
 
     def _dirstatevalidate(self, node):
         try:
@@ -1327,8 +1462,10 @@
         except error.LookupError:
             if not self._dirstatevalidatewarned:
                 self._dirstatevalidatewarned = True
-                self.ui.warn(_("warning: ignoring unknown"
-                               " working parent %s!\n") % short(node))
+                self.ui.warn(
+                    _(b"warning: ignoring unknown working parent %s!\n")
+                    % short(node)
+                )
             return nullid
 
     @storecache(narrowspec.FILENAME)
@@ -1384,20 +1521,22 @@
             return changeid
         if isinstance(changeid, slice):
             # wdirrev isn't contiguous so the slice shouldn't include it
-            return [self[i]
-                    for i in pycompat.xrange(*changeid.indices(len(self)))
-                    if i not in self.changelog.filteredrevs]
+            return [
+                self[i]
+                for i in pycompat.xrange(*changeid.indices(len(self)))
+                if i not in self.changelog.filteredrevs
+            ]
         try:
             if isinstance(changeid, int):
                 node = self.changelog.node(changeid)
                 rev = changeid
-            elif changeid == 'null':
+            elif changeid == b'null':
                 node = nullid
                 rev = nullrev
-            elif changeid == 'tip':
+            elif changeid == b'tip':
                 node = self.changelog.tip()
                 rev = self.changelog.rev(node)
-            elif changeid == '.':
+            elif changeid == b'.':
                 # this is a hack to delay/avoid loading obsmarkers
                 # when we know that '.' won't be hidden
                 node = self.dirstate.p1()
@@ -1407,18 +1546,20 @@
                     node = changeid
                     rev = self.changelog.rev(changeid)
                 except error.FilteredLookupError:
-                    changeid = hex(changeid) # for the error message
+                    changeid = hex(changeid)  # for the error message
                     raise
                 except LookupError:
                     # check if it might have come from damaged dirstate
                     #
                     # XXX we could avoid the unfiltered if we had a recognizable
                     # exception for filtered changeset access
-                    if (self.local()
-                        and changeid in self.unfiltered().dirstate.parents()):
-                        msg = _("working directory has unknown parent '%s'!")
+                    if (
+                        self.local()
+                        and changeid in self.unfiltered().dirstate.parents()
+                    ):
+                        msg = _(b"working directory has unknown parent '%s'!")
                         raise error.Abort(msg % short(changeid))
-                    changeid = hex(changeid) # for the error message
+                    changeid = hex(changeid)  # for the error message
                     raise
 
             elif len(changeid) == 40:
@@ -1426,17 +1567,20 @@
                 rev = self.changelog.rev(node)
             else:
                 raise error.ProgrammingError(
-                        "unsupported changeid '%s' of type %s" %
-                        (changeid, type(changeid)))
+                    b"unsupported changeid '%s' of type %s"
+                    % (changeid, type(changeid))
+                )
 
             return context.changectx(self, rev, node)
 
         except (error.FilteredIndexError, error.FilteredLookupError):
-            raise error.FilteredRepoLookupError(_("filtered revision '%s'")
-                                                % pycompat.bytestr(changeid))
+            raise error.FilteredRepoLookupError(
+                _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
+            )
         except (IndexError, LookupError):
             raise error.RepoLookupError(
-                _("unknown revision '%s'") % pycompat.bytestr(changeid))
+                _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
+            )
         except error.WdirUnsupported:
             return context.workingctx(self)
 
@@ -1502,15 +1646,18 @@
         ``{name: definitionstring}``.
         '''
         if user:
-            m = revset.matchany(self.ui, specs,
-                                lookup=revset.lookupfn(self),
-                                localalias=localalias)
+            m = revset.matchany(
+                self.ui,
+                specs,
+                lookup=revset.lookupfn(self),
+                localalias=localalias,
+            )
         else:
             m = revset.matchany(None, specs, localalias=localalias)
         return m(self)
 
     def url(self):
-        return 'file:' + self.root
+        return b'file:' + self.root
 
     def hook(self, name, throw=False, **args):
         """Call a hook, passing this repo instance.
@@ -1552,7 +1699,7 @@
         else:
             tags = self._tagscache.tags
         rev = self.changelog.rev
-        for k, v in tags.iteritems():
+        for k, v in pycompat.iteritems(tags):
             try:
                 # ignore tags to unknown nodes
                 rev(v)
@@ -1575,11 +1722,10 @@
         # be one tagtype for all such "virtual" tags?  Or is the status
         # quo fine?
 
-
         # map tag name to (node, hist)
         alltags = tagsmod.findglobaltags(self.ui, self)
         # map tag name to tag type
-        tagtypes = dict((tag, 'global') for tag in alltags)
+        tagtypes = dict((tag, b'global') for tag in alltags)
 
         tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
 
@@ -1588,12 +1734,16 @@
         # writing to the cache), but the rest of Mercurial wants them in
         # local encoding.
         tags = {}
-        for (name, (node, hist)) in alltags.iteritems():
+        for (name, (node, hist)) in pycompat.iteritems(alltags):
             if node != nullid:
                 tags[encoding.tolocal(name)] = node
-        tags['tip'] = self.changelog.tip()
-        tagtypes = dict([(encoding.tolocal(name), value)
-                         for (name, value) in tagtypes.iteritems()])
+        tags[b'tip'] = self.changelog.tip()
+        tagtypes = dict(
+            [
+                (encoding.tolocal(name), value)
+                for (name, value) in pycompat.iteritems(tagtypes)
+            ]
+        )
         return (tags, tagtypes)
 
     def tagtype(self, tagname):
@@ -1611,7 +1761,7 @@
         '''return a list of tags ordered by revision'''
         if not self._tagscache.tagslist:
             l = []
-            for t, n in self.tags().iteritems():
+            for t, n in pycompat.iteritems(self.tags()):
                 l.append((self.changelog.rev(n), t, n))
             self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
 
@@ -1621,9 +1771,9 @@
         '''return the tags associated with a node'''
         if not self._tagscache.nodetagscache:
             nodetagscache = {}
-            for t, n in self._tagscache.tags.iteritems():
+            for t, n in pycompat.iteritems(self._tagscache.tags):
                 nodetagscache.setdefault(n, []).append(t)
-            for tags in nodetagscache.itervalues():
+            for tags in pycompat.itervalues(nodetagscache):
                 tags.sort()
             self._tagscache.nodetagscache = nodetagscache
         return self._tagscache.nodetagscache.get(node, [])
@@ -1655,14 +1805,14 @@
             return self.branchmap().branchtip(branch)
         except KeyError:
             if not ignoremissing:
-                raise error.RepoLookupError(_("unknown branch '%s'") % branch)
+                raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
             else:
                 pass
 
     def lookup(self, key):
         node = scmutil.revsymbol(self, key).node()
         if node is None:
-            raise error.RepoLookupError(_("unknown revision '%s'") % key)
+            raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
         return node
 
     def lookupbranch(self, key):
@@ -1688,7 +1838,7 @@
     def publishing(self):
         # it's safe (and desirable) to trust the publish flag unconditionally
         # so that we don't finalize changes shared between users via ssh or nfs
-        return self.ui.configbool('phases', 'publish', untrusted=True)
+        return self.ui.configbool(b'phases', b'publish', untrusted=True)
 
     def cancopy(self):
         # so statichttprepo's override of local() works
@@ -1697,12 +1847,12 @@
         if not self.publishing():
             return True
         # if publishing we can't copy if there is filtered content
-        return not self.filtered('visible').changelog.filteredrevs
+        return not self.filtered(b'visible').changelog.filteredrevs
 
     def shared(self):
         '''the type of shared repository (None if not shared)'''
         if self.sharedpath != self.path:
-            return 'store'
+            return b'store'
         return None
 
     def wjoin(self, f, *insidef):
@@ -1727,8 +1877,9 @@
     def filectx(self, path, changeid=None, fileid=None, changectx=None):
         """changeid must be a changeset revision, if specified.
            fileid can be a file revision or node."""
-        return context.filectx(self, path, changeid, fileid,
-                               changectx=changectx)
+        return context.filectx(
+            self, path, changeid, fileid, changectx=changectx
+        )
 
     def getcwd(self):
         return self.dirstate.getcwd()
@@ -1740,22 +1891,24 @@
         if filter not in self._filterpats:
             l = []
             for pat, cmd in self.ui.configitems(filter):
-                if cmd == '!':
+                if cmd == b'!':
                     continue
-                mf = matchmod.match(self.root, '', [pat])
+                mf = matchmod.match(self.root, b'', [pat])
                 fn = None
                 params = cmd
-                for name, filterfn in self._datafilters.iteritems():
+                for name, filterfn in pycompat.iteritems(self._datafilters):
                     if cmd.startswith(name):
                         fn = filterfn
-                        params = cmd[len(name):].lstrip()
+                        params = cmd[len(name) :].lstrip()
                         break
                 if not fn:
                     fn = lambda s, c, **kwargs: procutil.filter(s, c)
+                    fn.__name__ = 'commandfilter'
                 # Wrap old filters not supporting keyword arguments
                 if not pycompat.getargspec(fn)[2]:
                     oldfn = fn
-                    fn = lambda s, c, **kwargs: oldfn(s, c)
+                    fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
+                    fn.__name__ = 'compat-' + oldfn.__name__
                 l.append((mf, fn, params))
             self._filterpats[filter] = l
         return self._filterpats[filter]
@@ -1763,7 +1916,10 @@
     def _filter(self, filterpats, filename, data):
         for mf, fn, cmd in filterpats:
             if mf(filename):
-                self.ui.debug("filtering %s through %s\n" % (filename, cmd))
+                self.ui.debug(
+                    b"filtering %s through %s\n"
+                    % (filename, cmd or pycompat.sysbytes(fn.__name__))
+                )
                 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
                 break
 
@@ -1771,11 +1927,11 @@
 
     @unfilteredpropertycache
     def _encodefilterpats(self):
-        return self._loadfilter('encode')
+        return self._loadfilter(b'encode')
 
     @unfilteredpropertycache
     def _decodefilterpats(self):
-        return self._loadfilter('decode')
+        return self._loadfilter(b'decode')
 
     def adddatafilter(self, name, filter):
         self._datafilters[name] = filter
@@ -1793,12 +1949,13 @@
         This returns length of written (maybe decoded) data.
         """
         data = self._filter(self._decodefilterpats, filename, data)
-        if 'l' in flags:
+        if b'l' in flags:
             self.wvfs.symlink(data, filename)
         else:
-            self.wvfs.write(filename, data, backgroundclose=backgroundclose,
-                            **kwargs)
-            if 'x' in flags:
+            self.wvfs.write(
+                filename, data, backgroundclose=backgroundclose, **kwargs
+            )
+            if b'x' in flags:
                 self.wvfs.setflags(filename, False, True)
             else:
                 self.wvfs.setflags(filename, False, False)
@@ -1819,24 +1976,26 @@
         return None
 
     def transaction(self, desc, report=None):
-        if (self.ui.configbool('devel', 'all-warnings')
-                or self.ui.configbool('devel', 'check-locks')):
+        if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
+            b'devel', b'check-locks'
+        ):
             if self._currentlock(self._lockref) is None:
-                raise error.ProgrammingError('transaction requires locking')
+                raise error.ProgrammingError(b'transaction requires locking')
         tr = self.currenttransaction()
         if tr is not None:
             return tr.nest(name=desc)
 
         # abort here if the journal already exists
-        if self.svfs.exists("journal"):
+        if self.svfs.exists(b"journal"):
             raise error.RepoError(
-                _("abandoned transaction found"),
-                hint=_("run 'hg recover' to clean up transaction"))
-
-        idbase = "%.40f#%f" % (random.random(), time.time())
+                _(b"abandoned transaction found"),
+                hint=_(b"run 'hg recover' to clean up transaction"),
+            )
+
+        idbase = b"%.40f#%f" % (random.random(), time.time())
         ha = hex(hashlib.sha1(idbase).digest())
-        txnid = 'TXN:' + ha
-        self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
+        txnid = b'TXN:' + ha
+        self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
 
         self._writejournal(desc)
         renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
@@ -1844,7 +2003,7 @@
             rp = report
         else:
             rp = self.ui.warn
-        vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
+        vfsmap = {b'plain': self.vfs, b'store': self.svfs}  # root of .hg/
         # we must avoid cyclic reference between repo and transaction.
         reporef = weakref.ref(self)
         # Code to track tag movement
@@ -1882,9 +2041,12 @@
         #   "+M": tag is moved (new value),
         tracktags = lambda x: None
         # experimental config: experimental.hook-track-tags
-        shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
-        if desc != 'strip' and shouldtracktags:
+        shouldtracktags = self.ui.configbool(
+            b'experimental', b'hook-track-tags'
+        )
+        if desc != b'strip' and shouldtracktags:
             oldheads = self.changelog.headrevs()
+
             def tracktags(tr2):
                 repo = reporef()
                 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
@@ -1894,13 +2056,15 @@
                 # As we do it only once buiding set would not be cheaper
                 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
                 if changes:
-                    tr2.hookargs['tag_moved'] = '1'
-                    with repo.vfs('changes/tags.changes', 'w',
-                                  atomictemp=True) as changesfile:
+                    tr2.hookargs[b'tag_moved'] = b'1'
+                    with repo.vfs(
+                        b'changes/tags.changes', b'w', atomictemp=True
+                    ) as changesfile:
                         # note: we do not register the file to the transaction
                         # because we needs it to still exist on the transaction
                         # is close (for txnclose hooks)
                         tagsmod.writediff(changesfile, changes)
+
         def validate(tr2):
             """will run pre-closing hooks"""
             # XXX the transaction API is a bit lacking here so we take a hacky
@@ -1921,27 +2085,51 @@
             # gating.
             tracktags(tr2)
             repo = reporef()
-            if repo.ui.configbool('experimental', 'single-head-per-branch'):
-                scmutil.enforcesinglehead(repo, tr2, desc)
-            if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
-                for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
+
+            r = repo.ui.configsuboptions(
+                b'experimental', b'single-head-per-branch'
+            )
+            singlehead, singleheadsub = r
+            if singlehead:
+                accountclosed = singleheadsub.get(
+                    b"account-closed-heads", False
+                )
+                scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
+            if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
+                for name, (old, new) in sorted(
+                    tr.changes[b'bookmarks'].items()
+                ):
                     args = tr.hookargs.copy()
                     args.update(bookmarks.preparehookargs(name, old, new))
-                    repo.hook('pretxnclose-bookmark', throw=True,
-                              **pycompat.strkwargs(args))
-            if hook.hashook(repo.ui, 'pretxnclose-phase'):
+                    repo.hook(
+                        b'pretxnclose-bookmark',
+                        throw=True,
+                        **pycompat.strkwargs(args)
+                    )
+            if hook.hashook(repo.ui, b'pretxnclose-phase'):
                 cl = repo.unfiltered().changelog
-                for rev, (old, new) in tr.changes['phases'].items():
+                for rev, (old, new) in tr.changes[b'phases'].items():
                     args = tr.hookargs.copy()
                     node = hex(cl.node(rev))
                     args.update(phases.preparehookargs(node, old, new))
-                    repo.hook('pretxnclose-phase', throw=True,
-                              **pycompat.strkwargs(args))
-
-            repo.hook('pretxnclose', throw=True,
-                      **pycompat.strkwargs(tr.hookargs))
+                    repo.hook(
+                        b'pretxnclose-phase',
+                        throw=True,
+                        **pycompat.strkwargs(args)
+                    )
+
+            repo.hook(
+                b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
+            )
+
         def releasefn(tr, success):
             repo = reporef()
+            if repo is None:
+                # If the repo has been GC'd (and this release function is being
+                # called from transaction.__del__), there's not much we can do,
+                # so just leave the unfinished transaction there and let the
+                # user run `hg recover`.
+                return
             if success:
                 # this should be explicitly invoked here, because
                 # in-memory changes aren't written out at closing
@@ -1952,32 +2140,37 @@
             else:
                 # discard all changes (including ones already written
                 # out) in this transaction
-                narrowspec.restorebackup(self, 'journal.narrowspec')
-                narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
-                repo.dirstate.restorebackup(None, 'journal.dirstate')
+                narrowspec.restorebackup(self, b'journal.narrowspec')
+                narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
+                repo.dirstate.restorebackup(None, b'journal.dirstate')
 
                 repo.invalidate(clearfilecache=True)
 
-        tr = transaction.transaction(rp, self.svfs, vfsmap,
-                                     "journal",
-                                     "undo",
-                                     aftertrans(renames),
-                                     self.store.createmode,
-                                     validator=validate,
-                                     releasefn=releasefn,
-                                     checkambigfiles=_cachedfiles,
-                                     name=desc)
-        tr.changes['origrepolen'] = len(self)
-        tr.changes['obsmarkers'] = set()
-        tr.changes['phases'] = {}
-        tr.changes['bookmarks'] = {}
-
-        tr.hookargs['txnid'] = txnid
-        tr.hookargs['txnname'] = desc
+        tr = transaction.transaction(
+            rp,
+            self.svfs,
+            vfsmap,
+            b"journal",
+            b"undo",
+            aftertrans(renames),
+            self.store.createmode,
+            validator=validate,
+            releasefn=releasefn,
+            checkambigfiles=_cachedfiles,
+            name=desc,
+        )
+        tr.changes[b'origrepolen'] = len(self)
+        tr.changes[b'obsmarkers'] = set()
+        tr.changes[b'phases'] = {}
+        tr.changes[b'bookmarks'] = {}
+
+        tr.hookargs[b'txnid'] = txnid
+        tr.hookargs[b'txnname'] = desc
         # note: writing the fncache only during finalize mean that the file is
         # outdated when running hooks. As fncache is used for streaming clone,
         # this is not expected to break anything that happen during the hooks.
-        tr.addfinalize('flush-fncache', self.store.write)
+        tr.addfinalize(b'flush-fncache', self.store.write)
+
         def txnclosehook(tr2):
             """To be run if transaction is successful, will schedule a hook run
             """
@@ -1989,87 +2182,107 @@
 
             def hookfunc():
                 repo = reporef()
-                if hook.hashook(repo.ui, 'txnclose-bookmark'):
-                    bmchanges = sorted(tr.changes['bookmarks'].items())
+                if hook.hashook(repo.ui, b'txnclose-bookmark'):
+                    bmchanges = sorted(tr.changes[b'bookmarks'].items())
                     for name, (old, new) in bmchanges:
                         args = tr.hookargs.copy()
                         args.update(bookmarks.preparehookargs(name, old, new))
-                        repo.hook('txnclose-bookmark', throw=False,
-                                  **pycompat.strkwargs(args))
-
-                if hook.hashook(repo.ui, 'txnclose-phase'):
+                        repo.hook(
+                            b'txnclose-bookmark',
+                            throw=False,
+                            **pycompat.strkwargs(args)
+                        )
+
+                if hook.hashook(repo.ui, b'txnclose-phase'):
                     cl = repo.unfiltered().changelog
-                    phasemv = sorted(tr.changes['phases'].items())
+                    phasemv = sorted(tr.changes[b'phases'].items())
                     for rev, (old, new) in phasemv:
                         args = tr.hookargs.copy()
                         node = hex(cl.node(rev))
                         args.update(phases.preparehookargs(node, old, new))
-                        repo.hook('txnclose-phase', throw=False,
-                                  **pycompat.strkwargs(args))
-
-                repo.hook('txnclose', throw=False,
-                          **pycompat.strkwargs(hookargs))
+                        repo.hook(
+                            b'txnclose-phase',
+                            throw=False,
+                            **pycompat.strkwargs(args)
+                        )
+
+                repo.hook(
+                    b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
+                )
+
             reporef()._afterlock(hookfunc)
-        tr.addfinalize('txnclose-hook', txnclosehook)
+
+        tr.addfinalize(b'txnclose-hook', txnclosehook)
         # Include a leading "-" to make it happen before the transaction summary
         # reports registered via scmutil.registersummarycallback() whose names
         # are 00-txnreport etc. That way, the caches will be warm when the
         # callbacks run.
-        tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
+        tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
+
         def txnaborthook(tr2):
             """To be run if transaction is aborted
             """
-            reporef().hook('txnabort', throw=False,
-                           **pycompat.strkwargs(tr2.hookargs))
-        tr.addabort('txnabort-hook', txnaborthook)
+            reporef().hook(
+                b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
+            )
+
+        tr.addabort(b'txnabort-hook', txnaborthook)
         # avoid eager cache invalidation. in-memory data should be identical
         # to stored data if transaction has no error.
-        tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
+        tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
         self._transref = weakref.ref(tr)
         scmutil.registersummarycallback(self, tr, desc)
         return tr
 
     def _journalfiles(self):
-        return ((self.svfs, 'journal'),
-                (self.svfs, 'journal.narrowspec'),
-                (self.vfs, 'journal.narrowspec.dirstate'),
-                (self.vfs, 'journal.dirstate'),
-                (self.vfs, 'journal.branch'),
-                (self.vfs, 'journal.desc'),
-                (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
-                (self.svfs, 'journal.phaseroots'))
+        return (
+            (self.svfs, b'journal'),
+            (self.svfs, b'journal.narrowspec'),
+            (self.vfs, b'journal.narrowspec.dirstate'),
+            (self.vfs, b'journal.dirstate'),
+            (self.vfs, b'journal.branch'),
+            (self.vfs, b'journal.desc'),
+            (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
+            (self.svfs, b'journal.phaseroots'),
+        )
 
     def undofiles(self):
         return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
 
     @unfilteredmethod
     def _writejournal(self, desc):
-        self.dirstate.savebackup(None, 'journal.dirstate')
-        narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
-        narrowspec.savebackup(self, 'journal.narrowspec')
-        self.vfs.write("journal.branch",
-                          encoding.fromlocal(self.dirstate.branch()))
-        self.vfs.write("journal.desc",
-                          "%d\n%s\n" % (len(self), desc))
+        self.dirstate.savebackup(None, b'journal.dirstate')
+        narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
+        narrowspec.savebackup(self, b'journal.narrowspec')
+        self.vfs.write(
+            b"journal.branch", encoding.fromlocal(self.dirstate.branch())
+        )
+        self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
         bookmarksvfs = bookmarks.bookmarksvfs(self)
-        bookmarksvfs.write("journal.bookmarks",
-                           bookmarksvfs.tryread("bookmarks"))
-        self.svfs.write("journal.phaseroots",
-                           self.svfs.tryread("phaseroots"))
+        bookmarksvfs.write(
+            b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
+        )
+        self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
 
     def recover(self):
         with self.lock():
-            if self.svfs.exists("journal"):
-                self.ui.status(_("rolling back interrupted transaction\n"))
-                vfsmap = {'': self.svfs,
-                          'plain': self.vfs,}
-                transaction.rollback(self.svfs, vfsmap, "journal",
-                                     self.ui.warn,
-                                     checkambigfiles=_cachedfiles)
+            if self.svfs.exists(b"journal"):
+                self.ui.status(_(b"rolling back interrupted transaction\n"))
+                vfsmap = {
+                    b'': self.svfs,
+                    b'plain': self.vfs,
+                }
+                transaction.rollback(
+                    self.svfs,
+                    vfsmap,
+                    b"journal",
+                    self.ui.warn,
+                    checkambigfiles=_cachedfiles,
+                )
                 self.invalidate()
                 return True
             else:
-                self.ui.warn(_("no interrupted transaction available\n"))
+                self.ui.warn(_(b"no interrupted transaction available\n"))
                 return False
 
     def rollback(self, dryrun=False, force=False):
@@ -2077,42 +2290,47 @@
         try:
             wlock = self.wlock()
             lock = self.lock()
-            if self.svfs.exists("undo"):
-                dsguard = dirstateguard.dirstateguard(self, 'rollback')
+            if self.svfs.exists(b"undo"):
+                dsguard = dirstateguard.dirstateguard(self, b'rollback')
 
                 return self._rollback(dryrun, force, dsguard)
             else:
-                self.ui.warn(_("no rollback information available\n"))
+                self.ui.warn(_(b"no rollback information available\n"))
                 return 1
         finally:
             release(dsguard, lock, wlock)
 
-    @unfilteredmethod # Until we get smarter cache management
+    @unfilteredmethod  # Until we get smarter cache management
     def _rollback(self, dryrun, force, dsguard):
         ui = self.ui
         try:
-            args = self.vfs.read('undo.desc').splitlines()
+            args = self.vfs.read(b'undo.desc').splitlines()
             (oldlen, desc, detail) = (int(args[0]), args[1], None)
             if len(args) >= 3:
                 detail = args[2]
             oldtip = oldlen - 1
 
             if detail and ui.verbose:
-                msg = (_('repository tip rolled back to revision %d'
-                         ' (undo %s: %s)\n')
-                       % (oldtip, desc, detail))
+                msg = _(
+                    b'repository tip rolled back to revision %d'
+                    b' (undo %s: %s)\n'
+                ) % (oldtip, desc, detail)
             else:
-                msg = (_('repository tip rolled back to revision %d'
-                         ' (undo %s)\n')
-                       % (oldtip, desc))
+                msg = _(
+                    b'repository tip rolled back to revision %d (undo %s)\n'
+                ) % (oldtip, desc)
         except IOError:
-            msg = _('rolling back unknown transaction\n')
+            msg = _(b'rolling back unknown transaction\n')
             desc = None
 
-        if not force and self['.'] != self['tip'] and desc == 'commit':
+        if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
             raise error.Abort(
-                _('rollback of last commit while not checked out '
-                  'may lose data'), hint=_('use -f to force'))
+                _(
+                    b'rollback of last commit while not checked out '
+                    b'may lose data'
+                ),
+                hint=_(b'use -f to force'),
+            )
 
         ui.status(msg)
         if dryrun:
@@ -2120,14 +2338,17 @@
 
         parents = self.dirstate.parents()
         self.destroying()
-        vfsmap = {'plain': self.vfs, '': self.svfs}
-        transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
-                             checkambigfiles=_cachedfiles)
+        vfsmap = {b'plain': self.vfs, b'': self.svfs}
+        transaction.rollback(
+            self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
+        )
         bookmarksvfs = bookmarks.bookmarksvfs(self)
-        if bookmarksvfs.exists('undo.bookmarks'):
-            bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
-        if self.svfs.exists('undo.phaseroots'):
-            self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
+        if bookmarksvfs.exists(b'undo.bookmarks'):
+            bookmarksvfs.rename(
+                b'undo.bookmarks', b'bookmarks', checkambig=True
+            )
+        if self.svfs.exists(b'undo.phaseroots'):
+            self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
         self.invalidate()
 
         parentgone = any(p not in self.changelog.nodemap for p in parents)
@@ -2135,25 +2356,35 @@
             # prevent dirstateguard from overwriting already restored one
             dsguard.close()
 
-            narrowspec.restorebackup(self, 'undo.narrowspec')
-            narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
-            self.dirstate.restorebackup(None, 'undo.dirstate')
+            narrowspec.restorebackup(self, b'undo.narrowspec')
+            narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
+            self.dirstate.restorebackup(None, b'undo.dirstate')
             try:
-                branch = self.vfs.read('undo.branch')
+                branch = self.vfs.read(b'undo.branch')
                 self.dirstate.setbranch(encoding.tolocal(branch))
             except IOError:
-                ui.warn(_('named branch could not be reset: '
-                          'current branch is still \'%s\'\n')
-                        % self.dirstate.branch())
+                ui.warn(
+                    _(
+                        b'named branch could not be reset: '
+                        b'current branch is still \'%s\'\n'
+                    )
+                    % self.dirstate.branch()
+                )
 
             parents = tuple([p.rev() for p in self[None].parents()])
             if len(parents) > 1:
-                ui.status(_('working directory now based on '
-                            'revisions %d and %d\n') % parents)
+                ui.status(
+                    _(
+                        b'working directory now based on '
+                        b'revisions %d and %d\n'
+                    )
+                    % parents
+                )
             else:
-                ui.status(_('working directory now based on '
-                            'revision %d\n') % parents)
-            mergemod.mergestate.clean(self, self['.'].node())
+                ui.status(
+                    _(b'working directory now based on revision %d\n') % parents
+                )
+            mergemod.mergestate.clean(self, self[b'.'].node())
 
         # TODO: if we know which new heads may result from this rollback, pass
         # them to destroy(), which will prevent the branchhead cache from being
@@ -2170,9 +2401,11 @@
         """
         # we must avoid cyclic reference between repo and transaction.
         reporef = weakref.ref(self)
+
         def updater(tr):
             repo = reporef()
             repo.updatecaches(tr)
+
         return updater
 
     @unfilteredmethod
@@ -2186,16 +2419,16 @@
         If 'full' is set, make sure all caches the function knows about have
         up-to-date data. Even the ones usually loaded more lazily.
         """
-        if tr is not None and tr.hookargs.get('source') == 'strip':
+        if tr is not None and tr.hookargs.get(b'source') == b'strip':
             # During strip, many caches are invalid but
             # later call to `destroyed` will refresh them.
             return
 
-        if tr is None or tr.changes['origrepolen'] < len(self):
+        if tr is None or tr.changes[b'origrepolen'] < len(self):
             # accessing the 'ser ved' branchmap should refresh all the others,
-            self.ui.debug('updating the branch cache\n')
-            self.filtered('served').branchmap()
-            self.filtered('served.hidden').branchmap()
+            self.ui.debug(b'updating the branch cache\n')
+            self.filtered(b'served').branchmap()
+            self.filtered(b'served.hidden').branchmap()
 
         if full:
             unfi = self.unfiltered()
@@ -2205,14 +2438,24 @@
             rbc.write()
 
             # ensure the working copy parents are in the manifestfulltextcache
-            for ctx in self['.'].parents():
+            for ctx in self[b'.'].parents():
                 ctx.manifest()  # accessing the manifest is enough
 
             # accessing fnode cache warms the cache
             tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
             # accessing tags warm the cache
             self.tags()
-            self.filtered('served').tags()
+            self.filtered(b'served').tags()
+
+            # The `full` arg is documented as updating even the lazily-loaded
+            # caches immediately, so we're forcing a write to cause these caches
+            # to be warmed up even if they haven't explicitly been requested
+            # yet (if they've never been used by hg, they won't ever have been
+            # written, even if they're a subset of another kind of cache that
+            # *has* been used).
+            for filt in repoview.filtertable.keys():
+                filtered = self.filtered(filt)
+                filtered.branchmap().write(filtered)
 
     def invalidatecaches(self):
 
@@ -2253,14 +2496,16 @@
         (e.g. incomplete fncache causes unintentional failure, but
         redundant one doesn't).
         '''
-        unfiltered = self.unfiltered() # all file caches are stored unfiltered
+        unfiltered = self.unfiltered()  # all file caches are stored unfiltered
         for k in list(self._filecache.keys()):
             # dirstate is invalidated separately in invalidatedirstate()
-            if k == 'dirstate':
+            if k == b'dirstate':
                 continue
-            if (k == 'changelog' and
-                self.currenttransaction() and
-                self.changelog._delayed):
+            if (
+                k == b'changelog'
+                and self.currenttransaction()
+                and self.changelog._delayed
+            ):
                 # The changelog object may store unwritten revisions. We don't
                 # want to lose them.
                 # TODO: Solve the problem instead of working around it.
@@ -2295,8 +2540,17 @@
                 continue
             ce.refresh()
 
-    def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
-              inheritchecker=None, parentenvvar=None):
+    def _lock(
+        self,
+        vfs,
+        lockname,
+        wait,
+        releasefn,
+        acquirefn,
+        desc,
+        inheritchecker=None,
+        parentenvvar=None,
+    ):
         parentlock = None
         # the contents of parentenvvar are used by the underlying lock to
         # determine whether it can be inherited
@@ -2306,17 +2560,24 @@
         timeout = 0
         warntimeout = 0
         if wait:
-            timeout = self.ui.configint("ui", "timeout")
-            warntimeout = self.ui.configint("ui", "timeout.warn")
+            timeout = self.ui.configint(b"ui", b"timeout")
+            warntimeout = self.ui.configint(b"ui", b"timeout.warn")
         # internal config: ui.signal-safe-lock
-        signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
-
-        l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
-                            releasefn=releasefn,
-                            acquirefn=acquirefn, desc=desc,
-                            inheritchecker=inheritchecker,
-                            parentlock=parentlock,
-                            signalsafe=signalsafe)
+        signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
+
+        l = lockmod.trylock(
+            self.ui,
+            vfs,
+            lockname,
+            timeout,
+            warntimeout,
+            releasefn=releasefn,
+            acquirefn=acquirefn,
+            desc=desc,
+            inheritchecker=inheritchecker,
+            parentlock=parentlock,
+            signalsafe=signalsafe,
+        )
         return l
 
     def _afterlock(self, callback):
@@ -2329,7 +2590,7 @@
             if l and l.held:
                 l.postrelease.append(callback)
                 break
-        else: # no lock have been found.
+        else:  # no lock have been found.
             callback()
 
     def lock(self, wait=True):
@@ -2344,19 +2605,22 @@
             l.lock()
             return l
 
-        l = self._lock(vfs=self.svfs,
-                       lockname="lock",
-                       wait=wait,
-                       releasefn=None,
-                       acquirefn=self.invalidate,
-                       desc=_('repository %s') % self.origroot)
+        l = self._lock(
+            vfs=self.svfs,
+            lockname=b"lock",
+            wait=wait,
+            releasefn=None,
+            acquirefn=self.invalidate,
+            desc=_(b'repository %s') % self.origroot,
+        )
         self._lockref = weakref.ref(l)
         return l
 
     def _wlockchecktransaction(self):
         if self.currenttransaction() is not None:
             raise error.LockInheritanceContractViolation(
-                'wlock cannot be inherited in the middle of a transaction')
+                b'wlock cannot be inherited in the middle of a transaction'
+            )
 
     def wlock(self, wait=True):
         '''Lock the non-store parts of the repository (everything under
@@ -2373,10 +2637,12 @@
 
         # We do not need to check for non-waiting lock acquisition.  Such
         # acquisition would not cause dead-lock as they would just fail.
-        if wait and (self.ui.configbool('devel', 'all-warnings')
-                     or self.ui.configbool('devel', 'check-locks')):
+        if wait and (
+            self.ui.configbool(b'devel', b'all-warnings')
+            or self.ui.configbool(b'devel', b'check-locks')
+        ):
             if self._currentlock(self._lockref) is not None:
-                self.ui.develwarn('"wlock" acquired after "lock"')
+                self.ui.develwarn(b'"wlock" acquired after "lock"')
 
         def unlock():
             if self.dirstate.pendingparentchange():
@@ -2384,13 +2650,18 @@
             else:
                 self.dirstate.write(None)
 
-            self._filecache['dirstate'].refresh()
-
-        l = self._lock(self.vfs, "wlock", wait, unlock,
-                       self.invalidatedirstate, _('working directory of %s') %
-                       self.origroot,
-                       inheritchecker=self._wlockchecktransaction,
-                       parentenvvar='HG_WLOCK_LOCKER')
+            self._filecache[b'dirstate'].refresh()
+
+        l = self._lock(
+            self.vfs,
+            b"wlock",
+            wait,
+            unlock,
+            self.invalidatedirstate,
+            _(b'working directory of %s') % self.origroot,
+            inheritchecker=self._wlockchecktransaction,
+            parentenvvar=b'HG_WLOCK_LOCKER',
+        )
         self._wlockref = weakref.ref(l)
         return l
 
@@ -2407,8 +2678,16 @@
         """Returns the wlock if it's held, or None if it's not."""
         return self._currentlock(self._wlockref)
 
-    def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
-                    includecopymeta):
+    def _filecommit(
+        self,
+        fctx,
+        manifest1,
+        manifest2,
+        linkrev,
+        tr,
+        changelist,
+        includecopymeta,
+    ):
         """
         commit an individual file as part of a larger transaction
         """
@@ -2419,11 +2698,14 @@
         if isinstance(fctx, context.filectx):
             node = fctx.filenode()
             if node in [fparent1, fparent2]:
-                self.ui.debug('reusing %s filelog entry\n' % fname)
-                if ((fparent1 != nullid and
-                     manifest1.flags(fname) != fctx.flags()) or
-                    (fparent2 != nullid and
-                     manifest2.flags(fname) != fctx.flags())):
+                self.ui.debug(b'reusing %s filelog entry\n' % fname)
+                if (
+                    fparent1 != nullid
+                    and manifest1.flags(fname) != fctx.flags()
+                ) or (
+                    fparent2 != nullid
+                    and manifest2.flags(fname) != fctx.flags()
+                ):
                     changelist.append(fname)
                 return node
 
@@ -2453,8 +2735,8 @@
             cnode = manifest1.get(cfname)
             newfparent = fparent2
 
-            if manifest2: # branch merge
-                if fparent2 == nullid or cnode is None: # copied on remote side
+            if manifest2:  # branch merge
+                if fparent2 == nullid or cnode is None:  # copied on remote side
                     if cfname in manifest2:
                         cnode = manifest2[cfname]
                         newfparent = fparent1
@@ -2469,14 +2751,21 @@
             # behavior in this circumstance.
 
             if cnode:
-                self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
+                self.ui.debug(
+                    b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
+                )
                 if includecopymeta:
-                    meta["copy"] = cfname
-                    meta["copyrev"] = hex(cnode)
+                    meta[b"copy"] = cfname
+                    meta[b"copyrev"] = hex(cnode)
                 fparent1, fparent2 = nullid, newfparent
             else:
-                self.ui.warn(_("warning: can't find ancestor for '%s' "
-                               "copied from '%s'!\n") % (fname, cfname))
+                self.ui.warn(
+                    _(
+                        b"warning: can't find ancestor for '%s' "
+                        b"copied from '%s'!\n"
+                    )
+                    % (fname, cfname)
+                )
 
         elif fparent1 == nullid:
             fparent1, fparent2 = fparent2, nullid
@@ -2506,23 +2795,31 @@
 
             for f in match.files():
                 f = self.dirstate.normalize(f)
-                if f == '.' or f in matched or f in wctx.substate:
+                if f == b'.' or f in matched or f in wctx.substate:
                     continue
                 if f in status.deleted:
-                    fail(f, _('file not found!'))
-                if f in vdirs: # visited directory
-                    d = f + '/'
+                    fail(f, _(b'file not found!'))
+                if f in vdirs:  # visited directory
+                    d = f + b'/'
                     for mf in matched:
                         if mf.startswith(d):
                             break
                     else:
-                        fail(f, _("no match under directory!"))
+                        fail(f, _(b"no match under directory!"))
                 elif f not in self.dirstate:
-                    fail(f, _("file not tracked!"))
+                    fail(f, _(b"file not tracked!"))
 
     @unfilteredmethod
-    def commit(self, text="", user=None, date=None, match=None, force=False,
-               editor=False, extra=None):
+    def commit(
+        self,
+        text=b"",
+        user=None,
+        date=None,
+        match=None,
+        force=False,
+        editor=False,
+        extra=None,
+    ):
         """Add a new revision to current repository.
 
         Revision information is gathered from the working directory,
@@ -2533,7 +2830,7 @@
             extra = {}
 
         def fail(f, msg):
-            raise error.Abort('%s: %s' % (f, msg))
+            raise error.Abort(b'%s: %s' % (f, msg))
 
         if not match:
             match = matchmod.always()
@@ -2549,40 +2846,52 @@
             merge = len(wctx.parents()) > 1
 
             if not force and merge and not match.always():
-                raise error.Abort(_('cannot partially commit a merge '
-                                   '(do not specify files or patterns)'))
+                raise error.Abort(
+                    _(
+                        b'cannot partially commit a merge '
+                        b'(do not specify files or patterns)'
+                    )
+                )
 
             status = self.status(match=match, clean=force)
             if force:
-                status.modified.extend(status.clean) # mq may commit clean files
+                status.modified.extend(
+                    status.clean
+                )  # mq may commit clean files
 
             # check subrepos
             subs, commitsubs, newstate = subrepoutil.precommit(
-                self.ui, wctx, status, match, force=force)
+                self.ui, wctx, status, match, force=force
+            )
 
             # make sure all explicit patterns are matched
             if not force:
                 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
 
-            cctx = context.workingcommitctx(self, status,
-                                            text, user, date, extra)
+            cctx = context.workingcommitctx(
+                self, status, text, user, date, extra
+            )
 
             # internal config: ui.allowemptycommit
-            allowemptycommit = (wctx.branch() != wctx.p1().branch()
-                                or extra.get('close') or merge or cctx.files()
-                                or self.ui.configbool('ui', 'allowemptycommit'))
+            allowemptycommit = (
+                wctx.branch() != wctx.p1().branch()
+                or extra.get(b'close')
+                or merge
+                or cctx.files()
+                or self.ui.configbool(b'ui', b'allowemptycommit')
+            )
             if not allowemptycommit:
                 return None
 
             if merge and cctx.deleted():
-                raise error.Abort(_("cannot commit merge with missing files"))
+                raise error.Abort(_(b"cannot commit merge with missing files"))
 
             ms = mergemod.mergestate.read(self)
             mergeutil.checkunresolved(ms)
 
             if editor:
                 cctx._text = editor(self, cctx, subs)
-            edited = (text != cctx._text)
+            edited = text != cctx._text
 
             # Save commit message in case this transaction gets rolled back
             # (e.g. by a pretxncommit hook).  Leave the content alone on
@@ -2594,35 +2903,41 @@
                 uipathfn = scmutil.getuipathfn(self)
                 for s in sorted(commitsubs):
                     sub = wctx.sub(s)
-                    self.ui.status(_('committing subrepository %s\n') %
-                                   uipathfn(subrepoutil.subrelpath(sub)))
+                    self.ui.status(
+                        _(b'committing subrepository %s\n')
+                        % uipathfn(subrepoutil.subrelpath(sub))
+                    )
                     sr = sub.commit(cctx._text, user, date)
                     newstate[s] = (newstate[s][0], sr)
                 subrepoutil.writestate(self, newstate)
 
             p1, p2 = self.dirstate.parents()
-            hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
+            hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
             try:
-                self.hook("precommit", throw=True, parent1=hookp1,
-                          parent2=hookp2)
-                with self.transaction('commit'):
+                self.hook(
+                    b"precommit", throw=True, parent1=hookp1, parent2=hookp2
+                )
+                with self.transaction(b'commit'):
                     ret = self.commitctx(cctx, True)
                     # update bookmarks, dirstate and mergestate
                     bookmarks.update(self, [p1, p2], ret)
                     cctx.markcommitted(ret)
                     ms.reset()
-            except: # re-raises
+            except:  # re-raises
                 if edited:
                     self.ui.write(
-                        _('note: commit message saved in %s\n') % msgfn)
+                        _(b'note: commit message saved in %s\n') % msgfn
+                    )
                 raise
 
         def commithook():
             # hack for command that use a temporary commit (eg: histedit)
             # temporary commit got stripped before hook release
             if self.changelog.hasnode(ret):
-                self.hook("commit", node=hex(ret), parent1=hookp1,
-                          parent2=hookp2)
+                self.hook(
+                    b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
+                )
+
         self._afterlock(commithook)
         return ret
 
@@ -2646,21 +2961,28 @@
         p1, p2 = ctx.p1(), ctx.p2()
         user = ctx.user()
 
-        writecopiesto = self.ui.config('experimental', 'copies.write-to')
-        writefilecopymeta = writecopiesto != 'changeset-only'
-        writechangesetcopy = (writecopiesto in
-                              ('changeset-only', 'compatibility'))
+        if self.filecopiesmode == b'changeset-sidedata':
+            writechangesetcopy = True
+            writefilecopymeta = True
+            writecopiesto = None
+        else:
+            writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
+            writefilecopymeta = writecopiesto != b'changeset-only'
+            writechangesetcopy = writecopiesto in (
+                b'changeset-only',
+                b'compatibility',
+            )
         p1copies, p2copies = None, None
         if writechangesetcopy:
             p1copies = ctx.p1copies()
             p2copies = ctx.p2copies()
         filesadded, filesremoved = None, None
-        with self.lock(), self.transaction("commit") as tr:
+        with self.lock(), self.transaction(b"commit") as tr:
             trp = weakref.proxy(tr)
 
             if ctx.manifestnode():
                 # reuse an existing manifest revision
-                self.ui.debug('reusing known manifest\n')
+                self.ui.debug(b'reusing known manifest\n')
                 mn = ctx.manifestnode()
                 files = ctx.files()
                 if writechangesetcopy:
@@ -2680,29 +3002,37 @@
                 changed = []
                 removed = list(ctx.removed())
                 linkrev = len(self)
-                self.ui.note(_("committing files:\n"))
+                self.ui.note(_(b"committing files:\n"))
                 uipathfn = scmutil.getuipathfn(self)
                 for f in sorted(ctx.modified() + ctx.added()):
-                    self.ui.note(uipathfn(f) + "\n")
+                    self.ui.note(uipathfn(f) + b"\n")
                     try:
                         fctx = ctx[f]
                         if fctx is None:
                             removed.append(f)
                         else:
                             added.append(f)
-                            m[f] = self._filecommit(fctx, m1, m2, linkrev,
-                                                    trp, changed,
-                                                    writefilecopymeta)
+                            m[f] = self._filecommit(
+                                fctx,
+                                m1,
+                                m2,
+                                linkrev,
+                                trp,
+                                changed,
+                                writefilecopymeta,
+                            )
                             m.setflag(f, fctx.flags())
                     except OSError:
-                        self.ui.warn(_("trouble committing %s!\n") %
-                                     uipathfn(f))
+                        self.ui.warn(
+                            _(b"trouble committing %s!\n") % uipathfn(f)
+                        )
                         raise
                     except IOError as inst:
                         errcode = getattr(inst, 'errno', errno.ENOENT)
                         if error or errcode and errcode != errno.ENOENT:
-                            self.ui.warn(_("trouble committing %s!\n") %
-                                         uipathfn(f))
+                            self.ui.warn(
+                                _(b"trouble committing %s!\n") % uipathfn(f)
+                            )
                         raise
 
                 # update manifest
@@ -2711,6 +3041,7 @@
                 for f in drop:
                     del m[f]
                 if p2.rev() != nullrev:
+
                     @util.cachefunc
                     def mas():
                         p1n = p1.node()
@@ -2719,6 +3050,7 @@
                         if not cahs:
                             cahs = [nullrev]
                         return [self[r].manifest() for r in cahs]
+
                     def deletionfromparent(f):
                         # When a file is removed relative to p1 in a merge, this
                         # function determines whether the absence is due to a
@@ -2741,14 +3073,18 @@
                         # it does something very similar by comparing filelog
                         # nodes.
                         if f in m1:
-                            return (f not in m2
-                                    and all(f in ma and ma.find(f) == m1.find(f)
-                                            for ma in mas()))
+                            return f not in m2 and all(
+                                f in ma and ma.find(f) == m1.find(f)
+                                for ma in mas()
+                            )
                         elif f in m2:
-                            return all(f in ma and ma.find(f) == m2.find(f)
-                                       for ma in mas())
+                            return all(
+                                f in ma and ma.find(f) == m2.find(f)
+                                for ma in mas()
+                            )
                         else:
                             return True
+
                     removed = [f for f in removed if not deletionfromparent(f)]
 
                 files = changed + removed
@@ -2759,10 +3095,12 @@
                     # exact same commit can be reproduced later on convert.
                     md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
                 if not files and md:
-                    self.ui.debug('not reusing manifest (no file change in '
-                                  'changelog, but manifest differs)\n')
+                    self.ui.debug(
+                        b'not reusing manifest (no file change in '
+                        b'changelog, but manifest differs)\n'
+                    )
                 if files or md:
-                    self.ui.note(_("committing manifest\n"))
+                    self.ui.note(_(b"committing manifest\n"))
                     # we're using narrowmatch here since it's already applied at
                     # other stages (such as dirstate.walk), so we're already
                     # ignoring things outside of narrowspec in most cases. The
@@ -2770,24 +3108,33 @@
                     # at this point is merges, and we already error out in the
                     # case where the merge has files outside of the narrowspec,
                     # so this is safe.
-                    mn = mctx.write(trp, linkrev,
-                                    p1.manifestnode(), p2.manifestnode(),
-                                    added, drop, match=self.narrowmatch())
+                    mn = mctx.write(
+                        trp,
+                        linkrev,
+                        p1.manifestnode(),
+                        p2.manifestnode(),
+                        added,
+                        drop,
+                        match=self.narrowmatch(),
+                    )
 
                     if writechangesetcopy:
-                        filesadded = [f for f in changed
-                                      if not (f in m1 or f in m2)]
+                        filesadded = [
+                            f for f in changed if not (f in m1 or f in m2)
+                        ]
                         filesremoved = removed
                 else:
-                    self.ui.debug('reusing manifest from p1 (listed files '
-                                  'actually unchanged)\n')
+                    self.ui.debug(
+                        b'reusing manifest from p1 (listed files '
+                        b'actually unchanged)\n'
+                    )
                     mn = p1.manifestnode()
             else:
-                self.ui.debug('reusing manifest from p1 (no file change)\n')
+                self.ui.debug(b'reusing manifest from p1 (no file change)\n')
                 mn = p1.manifestnode()
                 files = []
 
-            if writecopiesto == 'changeset-only':
+            if writecopiesto == b'changeset-only':
                 # If writing only to changeset extras, use None to indicate that
                 # no entry should be written. If writing to both, write an empty
                 # entry to prevent the reader from falling back to reading
@@ -2801,15 +3148,31 @@
                 files = origctx.files()
 
             # update changelog
-            self.ui.note(_("committing changelog\n"))
+            self.ui.note(_(b"committing changelog\n"))
             self.changelog.delayupdate(tr)
-            n = self.changelog.add(mn, files, ctx.description(),
-                                   trp, p1.node(), p2.node(),
-                                   user, ctx.date(), ctx.extra().copy(),
-                                   p1copies, p2copies, filesadded, filesremoved)
-            xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
-            self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
-                      parent2=xp2)
+            n = self.changelog.add(
+                mn,
+                files,
+                ctx.description(),
+                trp,
+                p1.node(),
+                p2.node(),
+                user,
+                ctx.date(),
+                ctx.extra().copy(),
+                p1copies,
+                p2copies,
+                filesadded,
+                filesremoved,
+            )
+            xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
+            self.hook(
+                b'pretxncommit',
+                throw=True,
+                node=hex(n),
+                parent1=xp1,
+                parent2=xp2,
+            )
             # set the new commit is proper phase
             targetphase = subrepoutil.newcommitphase(self.ui, ctx)
             if targetphase:
@@ -2836,7 +3199,7 @@
         # When using the same lock to commit and strip, the phasecache is left
         # dirty after committing. Then when we strip, the repo is invalidated,
         # causing those changes to disappear.
-        if '_phasecache' in vars(self):
+        if b'_phasecache' in vars(self):
             self._phasecache.write()
 
     @unfilteredmethod
@@ -2871,12 +3234,20 @@
         # tag cache retrieval" case to work.
         self.invalidate()
 
-    def status(self, node1='.', node2=None, match=None,
-               ignored=False, clean=False, unknown=False,
-               listsubrepos=False):
+    def status(
+        self,
+        node1=b'.',
+        node2=None,
+        match=None,
+        ignored=False,
+        clean=False,
+        unknown=False,
+        listsubrepos=False,
+    ):
         '''a convenience method that calls node1.status(node2)'''
-        return self[node1].status(node2, match, ignored, clean, unknown,
-                                  listsubrepos)
+        return self[node1].status(
+            node2, match, ignored, clean, unknown, listsubrepos
+        )
 
     def addpostdsstatus(self, ps):
         """Add a callback to run within the wlock, at the point at which status
@@ -2996,44 +3367,58 @@
             hookargs[r'key'] = key
             hookargs[r'old'] = old
             hookargs[r'new'] = new
-            self.hook('prepushkey', throw=True, **hookargs)
+            self.hook(b'prepushkey', throw=True, **hookargs)
         except error.HookAbort as exc:
-            self.ui.write_err(_("pushkey-abort: %s\n") % exc)
+            self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
             if exc.hint:
-                self.ui.write_err(_("(%s)\n") % exc.hint)
+                self.ui.write_err(_(b"(%s)\n") % exc.hint)
             return False
-        self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
+        self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
         ret = pushkey.push(self, namespace, key, old, new)
+
         def runhook():
-            self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
-                      ret=ret)
+            self.hook(
+                b'pushkey',
+                namespace=namespace,
+                key=key,
+                old=old,
+                new=new,
+                ret=ret,
+            )
+
         self._afterlock(runhook)
         return ret
 
     def listkeys(self, namespace):
-        self.hook('prelistkeys', throw=True, namespace=namespace)
-        self.ui.debug('listing keys for "%s"\n' % namespace)
+        self.hook(b'prelistkeys', throw=True, namespace=namespace)
+        self.ui.debug(b'listing keys for "%s"\n' % namespace)
         values = pushkey.list(self, namespace)
-        self.hook('listkeys', namespace=namespace, values=values)
+        self.hook(b'listkeys', namespace=namespace, values=values)
         return values
 
     def debugwireargs(self, one, two, three=None, four=None, five=None):
         '''used to test argument passing over the wire'''
-        return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
-                                   pycompat.bytestr(four),
-                                   pycompat.bytestr(five))
+        return b"%s %s %s %s %s" % (
+            one,
+            two,
+            pycompat.bytestr(three),
+            pycompat.bytestr(four),
+            pycompat.bytestr(five),
+        )
 
     def savecommitmessage(self, text):
-        fp = self.vfs('last-message.txt', 'wb')
+        fp = self.vfs(b'last-message.txt', b'wb')
         try:
             fp.write(text)
         finally:
             fp.close()
-        return self.pathto(fp.name[len(self.root) + 1:])
+        return self.pathto(fp.name[len(self.root) + 1 :])
+
 
 # used to avoid circular references so destructors work
 def aftertrans(files):
     renamefiles = [tuple(t) for t in files]
+
     def a():
         for vfs, src, dest in renamefiles:
             # if src and dest refer to a same file, vfs.rename is a no-op,
@@ -3042,14 +3427,17 @@
             vfs.tryunlink(dest)
             try:
                 vfs.rename(src, dest)
-            except OSError: # journal file does not yet exist
+            except OSError:  # journal file does not yet exist
                 pass
+
     return a
 
+
 def undoname(fn):
     base, name = os.path.split(fn)
-    assert name.startswith('journal')
-    return os.path.join(base, name.replace('journal', 'undo', 1))
+    assert name.startswith(b'journal')
+    return os.path.join(base, name.replace(b'journal', b'undo', 1))
+
 
 def instance(ui, path, create, intents=None, createopts=None):
     localpath = util.urllocalpath(path)
@@ -3058,9 +3446,11 @@
 
     return makelocalrepository(ui, localpath, intents=intents)
 
+
 def islocal(path):
     return True
 
+
 def defaultcreateopts(ui, createopts=None):
     """Populate the default creation options for a repository.
 
@@ -3069,12 +3459,13 @@
     """
     createopts = dict(createopts or {})
 
-    if 'backend' not in createopts:
+    if b'backend' not in createopts:
         # experimental config: storage.new-repo-backend
-        createopts['backend'] = ui.config('storage', 'new-repo-backend')
+        createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
 
     return createopts
 
+
 def newreporequirements(ui, createopts):
     """Determine the set of requirements for a new local repository.
 
@@ -3083,73 +3474,95 @@
     """
     # If the repo is being created from a shared repository, we copy
     # its requirements.
-    if 'sharedrepo' in createopts:
-        requirements = set(createopts['sharedrepo'].requirements)
-        if createopts.get('sharedrelative'):
-            requirements.add('relshared')
+    if b'sharedrepo' in createopts:
+        requirements = set(createopts[b'sharedrepo'].requirements)
+        if createopts.get(b'sharedrelative'):
+            requirements.add(b'relshared')
         else:
-            requirements.add('shared')
+            requirements.add(b'shared')
 
         return requirements
 
-    if 'backend' not in createopts:
-        raise error.ProgrammingError('backend key not present in createopts; '
-                                     'was defaultcreateopts() called?')
-
-    if createopts['backend'] != 'revlogv1':
-        raise error.Abort(_('unable to determine repository requirements for '
-                            'storage backend: %s') % createopts['backend'])
-
-    requirements = {'revlogv1'}
-    if ui.configbool('format', 'usestore'):
-        requirements.add('store')
-        if ui.configbool('format', 'usefncache'):
-            requirements.add('fncache')
-            if ui.configbool('format', 'dotencode'):
-                requirements.add('dotencode')
-
-    compengine = ui.config('format', 'revlog-compression')
+    if b'backend' not in createopts:
+        raise error.ProgrammingError(
+            b'backend key not present in createopts; '
+            b'was defaultcreateopts() called?'
+        )
+
+    if createopts[b'backend'] != b'revlogv1':
+        raise error.Abort(
+            _(
+                b'unable to determine repository requirements for '
+                b'storage backend: %s'
+            )
+            % createopts[b'backend']
+        )
+
+    requirements = {b'revlogv1'}
+    if ui.configbool(b'format', b'usestore'):
+        requirements.add(b'store')
+        if ui.configbool(b'format', b'usefncache'):
+            requirements.add(b'fncache')
+            if ui.configbool(b'format', b'dotencode'):
+                requirements.add(b'dotencode')
+
+    compengine = ui.config(b'format', b'revlog-compression')
     if compengine not in util.compengines:
-        raise error.Abort(_('compression engine %s defined by '
-                            'format.revlog-compression not available') %
-                          compengine,
-                          hint=_('run "hg debuginstall" to list available '
-                                 'compression engines'))
+        raise error.Abort(
+            _(
+                b'compression engine %s defined by '
+                b'format.revlog-compression not available'
+            )
+            % compengine,
+            hint=_(
+                b'run "hg debuginstall" to list available '
+                b'compression engines'
+            ),
+        )
 
     # zlib is the historical default and doesn't need an explicit requirement.
-    elif compengine == 'zstd':
-        requirements.add('revlog-compression-zstd')
-    elif compengine != 'zlib':
-        requirements.add('exp-compression-%s' % compengine)
+    elif compengine == b'zstd':
+        requirements.add(b'revlog-compression-zstd')
+    elif compengine != b'zlib':
+        requirements.add(b'exp-compression-%s' % compengine)
 
     if scmutil.gdinitconfig(ui):
-        requirements.add('generaldelta')
-        if ui.configbool('format', 'sparse-revlog'):
+        requirements.add(b'generaldelta')
+        if ui.configbool(b'format', b'sparse-revlog'):
             requirements.add(SPARSEREVLOG_REQUIREMENT)
-    if ui.configbool('experimental', 'treemanifest'):
-        requirements.add('treemanifest')
-
-    revlogv2 = ui.config('experimental', 'revlogv2')
-    if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
-        requirements.remove('revlogv1')
+
+    # experimental config: format.exp-use-side-data
+    if ui.configbool(b'format', b'exp-use-side-data'):
+        requirements.add(SIDEDATA_REQUIREMENT)
+    # experimental config: format.exp-use-copies-side-data-changeset
+    if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
+        requirements.add(SIDEDATA_REQUIREMENT)
+        requirements.add(COPIESSDC_REQUIREMENT)
+    if ui.configbool(b'experimental', b'treemanifest'):
+        requirements.add(b'treemanifest')
+
+    revlogv2 = ui.config(b'experimental', b'revlogv2')
+    if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
+        requirements.remove(b'revlogv1')
         # generaldelta is implied by revlogv2.
-        requirements.discard('generaldelta')
+        requirements.discard(b'generaldelta')
         requirements.add(REVLOGV2_REQUIREMENT)
     # experimental config: format.internal-phase
-    if ui.configbool('format', 'internal-phase'):
-        requirements.add('internal-phase')
-
-    if createopts.get('narrowfiles'):
+    if ui.configbool(b'format', b'internal-phase'):
+        requirements.add(b'internal-phase')
+
+    if createopts.get(b'narrowfiles'):
         requirements.add(repository.NARROW_REQUIREMENT)
 
-    if createopts.get('lfs'):
-        requirements.add('lfs')
-
-    if ui.configbool('format', 'bookmarks-in-store'):
+    if createopts.get(b'lfs'):
+        requirements.add(b'lfs')
+
+    if ui.configbool(b'format', b'bookmarks-in-store'):
         requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
 
     return requirements
 
+
 def filterknowncreateopts(ui, createopts):
     """Filters a dict of repo creation options against options that are known.
 
@@ -3165,17 +3578,18 @@
     they know how to handle.
     """
     known = {
-        'backend',
-        'lfs',
-        'narrowfiles',
-        'sharedrepo',
-        'sharedrelative',
-        'shareditems',
-        'shallowfilestore',
+        b'backend',
+        b'lfs',
+        b'narrowfiles',
+        b'sharedrepo',
+        b'sharedrelative',
+        b'shareditems',
+        b'shallowfilestore',
     }
 
     return {k: v for k, v in createopts.items() if k not in known}
 
+
 def createrepository(ui, path, createopts=None):
     """Create a new repository in a vfs.
 
@@ -3208,14 +3622,19 @@
     unknownopts = filterknowncreateopts(ui, createopts)
 
     if not isinstance(unknownopts, dict):
-        raise error.ProgrammingError('filterknowncreateopts() did not return '
-                                     'a dict')
+        raise error.ProgrammingError(
+            b'filterknowncreateopts() did not return a dict'
+        )
 
     if unknownopts:
-        raise error.Abort(_('unable to create repository because of unknown '
-                            'creation option: %s') %
-                          ', '.join(sorted(unknownopts)),
-                          hint=_('is a required extension not loaded?'))
+        raise error.Abort(
+            _(
+                b'unable to create repository because of unknown '
+                b'creation option: %s'
+            )
+            % b', '.join(sorted(unknownopts)),
+            hint=_(b'is a required extension not loaded?'),
+        )
 
     requirements = newreporequirements(ui, createopts=createopts)
 
@@ -3223,29 +3642,31 @@
 
     hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
     if hgvfs.exists():
-        raise error.RepoError(_('repository %s already exists') % path)
-
-    if 'sharedrepo' in createopts:
-        sharedpath = createopts['sharedrepo'].sharedpath
-
-        if createopts.get('sharedrelative'):
+        raise error.RepoError(_(b'repository %s already exists') % path)
+
+    if b'sharedrepo' in createopts:
+        sharedpath = createopts[b'sharedrepo'].sharedpath
+
+        if createopts.get(b'sharedrelative'):
             try:
                 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
             except (IOError, ValueError) as e:
                 # ValueError is raised on Windows if the drive letters differ
                 # on each path.
-                raise error.Abort(_('cannot calculate relative path'),
-                                  hint=stringutil.forcebytestr(e))
+                raise error.Abort(
+                    _(b'cannot calculate relative path'),
+                    hint=stringutil.forcebytestr(e),
+                )
 
     if not wdirvfs.exists():
         wdirvfs.makedirs()
 
     hgvfs.makedir(notindexed=True)
-    if 'sharedrepo' not in createopts:
+    if b'sharedrepo' not in createopts:
         hgvfs.mkdir(b'cache')
     hgvfs.mkdir(b'wcache')
 
-    if b'store' in requirements and 'sharedrepo' not in createopts:
+    if b'store' in requirements and b'sharedrepo' not in createopts:
         hgvfs.mkdir(b'store')
 
         # We create an invalid changelog outside the store so very old
@@ -3256,20 +3677,23 @@
         #
         # The revlog header has version 2, which won't be recognized by
         # such old clients.
-        hgvfs.append(b'00changelog.i',
-                     b'\0\0\0\2 dummy changelog to prevent using the old repo '
-                     b'layout')
+        hgvfs.append(
+            b'00changelog.i',
+            b'\0\0\0\2 dummy changelog to prevent using the old repo '
+            b'layout',
+        )
 
     scmutil.writerequires(hgvfs, requirements)
 
     # Write out file telling readers where to find the shared store.
-    if 'sharedrepo' in createopts:
+    if b'sharedrepo' in createopts:
         hgvfs.write(b'sharedpath', sharedpath)
 
-    if createopts.get('shareditems'):
-        shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
+    if createopts.get(b'shareditems'):
+        shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
         hgvfs.write(b'shared', shared)
 
+
 def poisonrepository(repo):
     """Poison a repository instance so it can no longer be used."""
     # Perform any cleanup on the instance.
@@ -3285,8 +3709,9 @@
             if item == r'close':
                 return object.__getattribute__(self, item)
 
-            raise error.ProgrammingError('repo instances should not be used '
-                                         'after unshare')
+            raise error.ProgrammingError(
+                b'repo instances should not be used after unshare'
+            )
 
         def close(self):
             pass
--- a/mercurial/lock.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/lock.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,6 +16,7 @@
 import warnings
 
 from .i18n import _
+from .pycompat import getattr
 
 from . import (
     encoding,
@@ -24,9 +25,8 @@
     util,
 )
 
-from .utils import (
-    procutil,
-)
+from .utils import procutil
+
 
 def _getlockprefix():
     """Return a string which is used to differentiate pid namespaces
@@ -36,14 +36,15 @@
     extra Linux-specific pid namespace identifier.
     """
     result = encoding.strtolocal(socket.gethostname())
-    if pycompat.sysplatform.startswith('linux'):
+    if pycompat.sysplatform.startswith(b'linux'):
         try:
-            result += '/%x' % os.stat('/proc/self/ns/pid').st_ino
+            result += b'/%x' % os.stat(b'/proc/self/ns/pid').st_ino
         except OSError as ex:
             if ex.errno not in (errno.ENOENT, errno.EACCES, errno.ENOTDIR):
                 raise
     return result
 
+
 @contextlib.contextmanager
 def _delayedinterrupt():
     """Block signal interrupt while doing something critical
@@ -60,11 +61,13 @@
     orighandlers = {}
 
     def raiseinterrupt(num):
-        if (num == getattr(signal, 'SIGINT', None) or
-            num == getattr(signal, 'CTRL_C_EVENT', None)):
+        if num == getattr(signal, 'SIGINT', None) or num == getattr(
+            signal, 'CTRL_C_EVENT', None
+        ):
             raise KeyboardInterrupt
         else:
             raise error.SignalInterrupt
+
     def catchterm(num, frame):
         if blocked:
             assertedsigs.append(num)
@@ -74,7 +77,13 @@
     try:
         # save handlers first so they can be restored even if a setup is
         # interrupted between signal.signal() and orighandlers[] =.
-        for name in ['CTRL_C_EVENT', 'SIGINT', 'SIGBREAK', 'SIGHUP', 'SIGTERM']:
+        for name in [
+            b'CTRL_C_EVENT',
+            b'SIGINT',
+            b'SIGBREAK',
+            b'SIGHUP',
+            b'SIGTERM',
+        ]:
             num = getattr(signal, name, None)
             if num and num not in orighandlers:
                 orighandlers[num] = signal.getsignal(num)
@@ -82,7 +91,7 @@
             for num in orighandlers:
                 signal.signal(num, catchterm)
         except ValueError:
-            pass # in a thread? no luck
+            pass  # in a thread? no luck
 
         blocked = True
         yield
@@ -95,13 +104,14 @@
             for num, handler in orighandlers.items():
                 signal.signal(num, handler)
         except ValueError:
-            pass # in a thread?
+            pass  # in a thread?
 
     # re-raise interrupt exception if any, which may be shadowed by a new
     # interrupt occurred while re-raising the first one
     if assertedsigs:
         raiseinterrupt(assertedsigs[0])
 
+
 def trylock(ui, vfs, lockname, timeout, warntimeout, *args, **kwargs):
     """return an acquired lock or raise an a LockHeld exception
 
@@ -111,14 +121,20 @@
     def printwarning(printer, locker):
         """issue the usual "waiting on lock" message through any channel"""
         # show more details for new-style locks
-        if ':' in locker:
-            host, pid = locker.split(":", 1)
-            msg = (_("waiting for lock on %s held by process %r on host %r\n")
-                   % (pycompat.bytestr(l.desc), pycompat.bytestr(pid),
-                      pycompat.bytestr(host)))
+        if b':' in locker:
+            host, pid = locker.split(b":", 1)
+            msg = _(
+                b"waiting for lock on %s held by process %r on host %r\n"
+            ) % (
+                pycompat.bytestr(l.desc),
+                pycompat.bytestr(pid),
+                pycompat.bytestr(host),
+            )
         else:
-            msg = (_("waiting for lock on %s held by %r\n")
-                   % (l.desc, pycompat.bytestr(locker)))
+            msg = _(b"waiting for lock on %s held by %r\n") % (
+                l.desc,
+                pycompat.bytestr(locker),
+            )
         printer(msg)
 
     l = lock(vfs, lockname, 0, *args, dolock=False, **kwargs)
@@ -141,21 +157,23 @@
             if delay == warningidx:
                 printwarning(ui.warn, inst.locker)
             if timeout <= delay:
-                raise error.LockHeld(errno.ETIMEDOUT, inst.filename,
-                                     l.desc, inst.locker)
+                raise error.LockHeld(
+                    errno.ETIMEDOUT, inst.filename, l.desc, inst.locker
+                )
             time.sleep(1)
             delay += 1
 
     l.delay = delay
     if l.delay:
         if 0 <= warningidx <= l.delay:
-            ui.warn(_("got lock after %d seconds\n") % l.delay)
+            ui.warn(_(b"got lock after %d seconds\n") % l.delay)
         else:
-            ui.debug("got lock after %d seconds\n" % l.delay)
+            ui.debug(b"got lock after %d seconds\n" % l.delay)
     if l.acquirefn:
         l.acquirefn()
     return l
 
+
 class lock(object):
     '''An advisory lock held by one process to control access to a set
     of files.  Non-cooperating processes or incorrectly written scripts
@@ -176,9 +194,19 @@
 
     _host = None
 
-    def __init__(self, vfs, fname, timeout=-1, releasefn=None, acquirefn=None,
-                 desc=None, inheritchecker=None, parentlock=None,
-                 signalsafe=True, dolock=True):
+    def __init__(
+        self,
+        vfs,
+        fname,
+        timeout=-1,
+        releasefn=None,
+        acquirefn=None,
+        desc=None,
+        inheritchecker=None,
+        parentlock=None,
+        signalsafe=True,
+        dolock=True,
+    ):
         self.vfs = vfs
         self.f = fname
         self.held = 0
@@ -194,7 +222,7 @@
             self._maybedelayedinterrupt = _delayedinterrupt
         else:
             self._maybedelayedinterrupt = util.nullcontextmanager
-        self.postrelease  = []
+        self.postrelease = []
         self.pid = self._getpid()
         if dolock:
             self.delay = self.lock()
@@ -209,9 +237,11 @@
 
     def __del__(self):
         if self.held:
-            warnings.warn(r"use lock.release instead of del lock",
-                    category=DeprecationWarning,
-                    stacklevel=2)
+            warnings.warn(
+                r"use lock.release instead of del lock",
+                category=DeprecationWarning,
+                stacklevel=2,
+            )
 
             # ensure the lock will be removed
             # even if recursive locking did occur
@@ -235,8 +265,9 @@
                     if timeout > 0:
                         timeout -= 1
                     continue
-                raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc,
-                                     inst.locker)
+                raise error.LockHeld(
+                    errno.ETIMEDOUT, inst.filename, self.desc, inst.locker
+                )
 
     def _trylock(self):
         if self.held:
@@ -244,7 +275,7 @@
             return
         if lock._host is None:
             lock._host = _getlockprefix()
-        lockname = '%s:%d' % (lock._host, self.pid)
+        lockname = b'%s:%d' % (lock._host, self.pid)
         retry = 5
         while not self.held and retry:
             retry -= 1
@@ -268,18 +299,23 @@
                         return
                     locker = self._testlock(locker)
                     if locker is not None:
-                        raise error.LockHeld(errno.EAGAIN,
-                                             self.vfs.join(self.f), self.desc,
-                                             locker)
+                        raise error.LockHeld(
+                            errno.EAGAIN,
+                            self.vfs.join(self.f),
+                            self.desc,
+                            locker,
+                        )
                 else:
-                    raise error.LockUnavailable(why.errno, why.strerror,
-                                                why.filename, self.desc)
+                    raise error.LockUnavailable(
+                        why.errno, why.strerror, why.filename, self.desc
+                    )
 
         if not self.held:
             # use empty locker to mean "busy for frequent lock/unlock
             # by many processes"
-            raise error.LockHeld(errno.EAGAIN,
-                                 self.vfs.join(self.f), self.desc, "")
+            raise error.LockHeld(
+                errno.EAGAIN, self.vfs.join(self.f), self.desc, b""
+            )
 
     def _readlock(self):
         """read lock and return its value
@@ -298,7 +334,7 @@
         if locker is None:
             return None
         try:
-            host, pid = locker.split(":", 1)
+            host, pid = locker.split(b":", 1)
         except ValueError:
             return locker
         if host != lock._host:
@@ -312,7 +348,7 @@
         # if locker dead, break lock.  must do this with another lock
         # held, or can race and break valid lock.
         try:
-            l = lock(self.vfs, self.f + '.break', timeout=0)
+            l = lock(self.vfs, self.f + b'.break', timeout=0)
             self.vfs.unlink(self.f)
             l.release()
         except error.LockError:
@@ -342,10 +378,12 @@
         """
         if not self.held:
             raise error.LockInheritanceContractViolation(
-                'inherit can only be called while lock is held')
+                b'inherit can only be called while lock is held'
+            )
         if self._inherited:
             raise error.LockInheritanceContractViolation(
-                'inherit cannot be called while lock is already inherited')
+                b'inherit cannot be called while lock is already inherited'
+            )
         if self._inheritchecker is not None:
             self._inheritchecker()
         if self.releasefn:
@@ -391,6 +429,7 @@
                 # Prevent double usage and help clear cycles.
                 self.postrelease = None
 
+
 def release(*locks):
     for lock in locks:
         if lock is not None:
--- a/mercurial/logcmdutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/logcmdutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -41,57 +41,81 @@
     stringutil,
 )
 
+
 def getlimit(opts):
     """get the log limit according to option -l/--limit"""
-    limit = opts.get('limit')
+    limit = opts.get(b'limit')
     if limit:
         try:
             limit = int(limit)
         except ValueError:
-            raise error.Abort(_('limit must be a positive integer'))
+            raise error.Abort(_(b'limit must be a positive integer'))
         if limit <= 0:
-            raise error.Abort(_('limit must be positive'))
+            raise error.Abort(_(b'limit must be positive'))
     else:
         limit = None
     return limit
 
-def diffordiffstat(ui, repo, diffopts, node1, node2, match,
-                   changes=None, stat=False, fp=None, graphwidth=0,
-                   prefix='', root='', listsubrepos=False, hunksfilterfn=None):
+
+def diffordiffstat(
+    ui,
+    repo,
+    diffopts,
+    node1,
+    node2,
+    match,
+    changes=None,
+    stat=False,
+    fp=None,
+    graphwidth=0,
+    prefix=b'',
+    root=b'',
+    listsubrepos=False,
+    hunksfilterfn=None,
+):
     '''show diff or diffstat.'''
     ctx1 = repo[node1]
     ctx2 = repo[node2]
     if root:
         relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
     else:
-        relroot = ''
+        relroot = b''
     copysourcematch = None
+
     def compose(f, g):
         return lambda x: f(g(x))
+
     def pathfn(f):
         return posixpath.join(prefix, f)
-    if relroot != '':
+
+    if relroot != b'':
         # XXX relative roots currently don't work if the root is within a
         # subrepo
         uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
         uirelroot = uipathfn(pathfn(relroot))
-        relroot += '/'
+        relroot += b'/'
         for matchroot in match.files():
             if not matchroot.startswith(relroot):
-                ui.warn(_('warning: %s not inside relative root %s\n') %
-                        (uipathfn(pathfn(matchroot)), uirelroot))
+                ui.warn(
+                    _(b'warning: %s not inside relative root %s\n')
+                    % (uipathfn(pathfn(matchroot)), uirelroot)
+                )
 
-        relrootmatch = scmutil.match(ctx2, pats=[relroot], default='path')
+        relrootmatch = scmutil.match(ctx2, pats=[relroot], default=b'path')
         match = matchmod.intersectmatchers(match, relrootmatch)
         copysourcematch = relrootmatch
 
-        checkroot = (repo.ui.configbool('devel', 'all-warnings') or
-                     repo.ui.configbool('devel', 'check-relroot'))
+        checkroot = repo.ui.configbool(
+            b'devel', b'all-warnings'
+        ) or repo.ui.configbool(b'devel', b'check-relroot')
+
         def relrootpathfn(f):
             if checkroot and not f.startswith(relroot):
                 raise AssertionError(
-                    "file %s doesn't start with relroot %s" % (f, relroot))
-            return f[len(relroot):]
+                    b"file %s doesn't start with relroot %s" % (f, relroot)
+                )
+            return f[len(relroot) :]
+
         pathfn = compose(relrootpathfn, pathfn)
 
     if stat:
@@ -103,9 +127,15 @@
         if not relroot:
             pathfn = compose(scmutil.getuipathfn(repo), pathfn)
 
-    chunks = ctx2.diff(ctx1, match, changes, opts=diffopts, pathfn=pathfn,
-                       copysourcematch=copysourcematch,
-                       hunksfilterfn=hunksfilterfn)
+    chunks = ctx2.diff(
+        ctx1,
+        match,
+        changes,
+        opts=diffopts,
+        pathfn=pathfn,
+        copysourcematch=copysourcematch,
+        hunksfilterfn=hunksfilterfn,
+    )
 
     if fp is not None or ui.canwritewithoutlabels():
         out = fp or ui
@@ -117,12 +147,15 @@
         if stat:
             chunks = patch.diffstatui(util.iterlines(chunks), width=width)
         else:
-            chunks = patch.difflabel(lambda chunks, **kwargs: chunks, chunks,
-                                     opts=diffopts)
+            chunks = patch.difflabel(
+                lambda chunks, **kwargs: chunks, chunks, opts=diffopts
+            )
         if ui.canbatchlabeledwrites():
+
             def gen():
                 for chunk, label in chunks:
                     yield ui.label(chunk, label=label)
+
             for chunk in util.filechunkiter(util.chunkbuffer(gen())):
                 ui.write(chunk)
         else:
@@ -142,8 +175,17 @@
         submatch = matchmod.subdirmatcher(subpath, match)
         subprefix = repo.wvfs.reljoin(prefix, subpath)
         if listsubrepos or match.exact(subpath) or any(submatch.files()):
-            sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
-                     stat=stat, fp=fp, prefix=subprefix)
+            sub.diff(
+                ui,
+                diffopts,
+                tempnode2,
+                submatch,
+                changes=changes,
+                stat=stat,
+                fp=fp,
+                prefix=subprefix,
+            )
+
 
 class changesetdiffer(object):
     """Generate diff of changeset with pre-configured filtering functions"""
@@ -158,20 +200,29 @@
         repo = ctx.repo()
         node = ctx.node()
         prev = ctx.p1().node()
-        diffordiffstat(ui, repo, diffopts, prev, node,
-                       match=self._makefilematcher(ctx), stat=stat,
-                       graphwidth=graphwidth,
-                       hunksfilterfn=self._makehunksfilter(ctx))
+        diffordiffstat(
+            ui,
+            repo,
+            diffopts,
+            prev,
+            node,
+            match=self._makefilematcher(ctx),
+            stat=stat,
+            graphwidth=graphwidth,
+            hunksfilterfn=self._makehunksfilter(ctx),
+        )
+
 
 def changesetlabels(ctx):
-    labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
+    labels = [b'log.changeset', b'changeset.%s' % ctx.phasestr()]
     if ctx.obsolete():
-        labels.append('changeset.obsolete')
+        labels.append(b'changeset.obsolete')
     if ctx.isunstable():
-        labels.append('changeset.unstable')
+        labels.append(b'changeset.unstable')
         for instability in ctx.instabilities():
-            labels.append('instability.%s' % instability)
-    return ' '.join(labels)
+            labels.append(b'instability.%s' % instability)
+    return b' '.join(labels)
+
 
 class changesetprinter(object):
     '''show changeset information when templating not requested.'''
@@ -182,8 +233,8 @@
         self.buffered = buffered
         self._differ = differ or changesetdiffer()
         self._diffopts = patch.diffallopts(ui, diffopts)
-        self._includestat = diffopts and diffopts.get('stat')
-        self._includediff = diffopts and diffopts.get('patch')
+        self._includestat = diffopts and diffopts.get(b'stat')
+        self._includediff = diffopts and diffopts.get(b'patch')
         self.header = {}
         self.hunk = {}
         self.lastheader = None
@@ -218,40 +269,45 @@
     def _show(self, ctx, copies, props):
         '''show a single changeset or file revision'''
         changenode = ctx.node()
-        graphwidth = props.get('graphwidth', 0)
+        graphwidth = props.get(b'graphwidth', 0)
 
         if self.ui.quiet:
-            self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
-                          label='log.node')
+            self.ui.write(
+                b"%s\n" % scmutil.formatchangeid(ctx), label=b'log.node'
+            )
             return
 
         columns = self._columns
-        self.ui.write(columns['changeset'] % scmutil.formatchangeid(ctx),
-                      label=changesetlabels(ctx))
+        self.ui.write(
+            columns[b'changeset'] % scmutil.formatchangeid(ctx),
+            label=changesetlabels(ctx),
+        )
 
         # branches are shown first before any other names due to backwards
         # compatibility
         branch = ctx.branch()
         # don't show the default branch name
-        if branch != 'default':
-            self.ui.write(columns['branch'] % branch, label='log.branch')
+        if branch != b'default':
+            self.ui.write(columns[b'branch'] % branch, label=b'log.branch')
 
-        for nsname, ns in self.repo.names.iteritems():
+        for nsname, ns in pycompat.iteritems(self.repo.names):
             # branches has special logic already handled above, so here we just
             # skip it
-            if nsname == 'branches':
+            if nsname == b'branches':
                 continue
             # we will use the templatename as the color name since those two
             # should be the same
             for name in ns.names(self.repo, changenode):
-                self.ui.write(ns.logfmt % name,
-                              label='log.%s' % ns.colorname)
+                self.ui.write(ns.logfmt % name, label=b'log.%s' % ns.colorname)
         if self.ui.debugflag:
-            self.ui.write(columns['phase'] % ctx.phasestr(), label='log.phase')
+            self.ui.write(
+                columns[b'phase'] % ctx.phasestr(), label=b'log.phase'
+            )
         for pctx in scmutil.meaningfulparents(self.repo, ctx):
-            label = 'log.parent changeset.%s' % pctx.phasestr()
-            self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx),
-                          label=label)
+            label = b'log.parent changeset.%s' % pctx.phasestr()
+            self.ui.write(
+                columns[b'parent'] % scmutil.formatchangeid(pctx), label=label
+            )
 
         if self.ui.debugflag:
             mnode = ctx.manifestnode()
@@ -260,17 +316,22 @@
                 mrev = wdirrev
             else:
                 mrev = self.repo.manifestlog.rev(mnode)
-            self.ui.write(columns['manifest']
-                          % scmutil.formatrevnode(self.ui, mrev, mnode),
-                          label='ui.debug log.manifest')
-        self.ui.write(columns['user'] % ctx.user(), label='log.user')
-        self.ui.write(columns['date'] % dateutil.datestr(ctx.date()),
-                      label='log.date')
+            self.ui.write(
+                columns[b'manifest']
+                % scmutil.formatrevnode(self.ui, mrev, mnode),
+                label=b'ui.debug log.manifest',
+            )
+        self.ui.write(columns[b'user'] % ctx.user(), label=b'log.user')
+        self.ui.write(
+            columns[b'date'] % dateutil.datestr(ctx.date()), label=b'log.date'
+        )
 
         if ctx.isunstable():
             instabilities = ctx.instabilities()
-            self.ui.write(columns['instability'] % ', '.join(instabilities),
-                          label='log.instability')
+            self.ui.write(
+                columns[b'instability'] % b', '.join(instabilities),
+                label=b'log.instability',
+            )
 
         elif ctx.obsolete():
             self._showobsfate(ctx)
@@ -279,52 +340,66 @@
 
         if self.ui.debugflag:
             files = ctx.p1().status(ctx)[:3]
-            for key, value in zip(['files', 'files+', 'files-'], files):
+            for key, value in zip([b'files', b'files+', b'files-'], files):
                 if value:
-                    self.ui.write(columns[key] % " ".join(value),
-                                  label='ui.debug log.files')
+                    self.ui.write(
+                        columns[key] % b" ".join(value),
+                        label=b'ui.debug log.files',
+                    )
         elif ctx.files() and self.ui.verbose:
-            self.ui.write(columns['files'] % " ".join(ctx.files()),
-                          label='ui.note log.files')
+            self.ui.write(
+                columns[b'files'] % b" ".join(ctx.files()),
+                label=b'ui.note log.files',
+            )
         if copies and self.ui.verbose:
-            copies = ['%s (%s)' % c for c in copies]
-            self.ui.write(columns['copies'] % ' '.join(copies),
-                          label='ui.note log.copies')
+            copies = [b'%s (%s)' % c for c in copies]
+            self.ui.write(
+                columns[b'copies'] % b' '.join(copies),
+                label=b'ui.note log.copies',
+            )
 
         extra = ctx.extra()
         if extra and self.ui.debugflag:
             for key, value in sorted(extra.items()):
-                self.ui.write(columns['extra']
-                              % (key, stringutil.escapestr(value)),
-                              label='ui.debug log.extra')
+                self.ui.write(
+                    columns[b'extra'] % (key, stringutil.escapestr(value)),
+                    label=b'ui.debug log.extra',
+                )
 
         description = ctx.description().strip()
         if description:
             if self.ui.verbose:
-                self.ui.write(_("description:\n"),
-                              label='ui.note log.description')
-                self.ui.write(description,
-                              label='ui.note log.description')
-                self.ui.write("\n\n")
+                self.ui.write(
+                    _(b"description:\n"), label=b'ui.note log.description'
+                )
+                self.ui.write(description, label=b'ui.note log.description')
+                self.ui.write(b"\n\n")
             else:
-                self.ui.write(columns['summary'] % description.splitlines()[0],
-                              label='log.summary')
-        self.ui.write("\n")
+                self.ui.write(
+                    columns[b'summary'] % description.splitlines()[0],
+                    label=b'log.summary',
+                )
+        self.ui.write(b"\n")
 
         self._showpatch(ctx, graphwidth)
 
     def _showobsfate(self, ctx):
         # TODO: do not depend on templater
         tres = formatter.templateresources(self.repo.ui, self.repo)
-        t = formatter.maketemplater(self.repo.ui, '{join(obsfate, "\n")}',
-                                    defaults=templatekw.keywords,
-                                    resources=tres)
-        obsfate = t.renderdefault({'ctx': ctx}).splitlines()
+        t = formatter.maketemplater(
+            self.repo.ui,
+            b'{join(obsfate, "\n")}',
+            defaults=templatekw.keywords,
+            resources=tres,
+        )
+        obsfate = t.renderdefault({b'ctx': ctx}).splitlines()
 
         if obsfate:
             for obsfateline in obsfate:
-                self.ui.write(self._columns['obsolete'] % obsfateline,
-                              label='log.obsfate')
+                self.ui.write(
+                    self._columns[b'obsolete'] % obsfateline,
+                    label=b'log.obsfate',
+                )
 
     def _exthook(self, ctx):
         '''empty method used by extension as a hook point
@@ -332,21 +407,25 @@
 
     def _showpatch(self, ctx, graphwidth=0):
         if self._includestat:
-            self._differ.showdiff(self.ui, ctx, self._diffopts,
-                                  graphwidth, stat=True)
+            self._differ.showdiff(
+                self.ui, ctx, self._diffopts, graphwidth, stat=True
+            )
         if self._includestat and self._includediff:
-            self.ui.write("\n")
+            self.ui.write(b"\n")
         if self._includediff:
-            self._differ.showdiff(self.ui, ctx, self._diffopts,
-                                  graphwidth, stat=False)
+            self._differ.showdiff(
+                self.ui, ctx, self._diffopts, graphwidth, stat=False
+            )
         if self._includestat or self._includediff:
-            self.ui.write("\n")
+            self.ui.write(b"\n")
+
 
 class changesetformatter(changesetprinter):
     """Format changeset information by generic formatter"""
 
-    def __init__(self, ui, repo, fm, differ=None, diffopts=None,
-                 buffered=False):
+    def __init__(
+        self, ui, repo, fm, differ=None, diffopts=None, buffered=False
+    ):
         changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
         self._diffopts = patch.difffeatureopts(ui, diffopts, git=True)
         self._fm = fm
@@ -359,46 +438,61 @@
         fm = self._fm
         fm.startitem()
         fm.context(ctx=ctx)
-        fm.data(rev=scmutil.intrev(ctx),
-                node=fm.hexfunc(scmutil.binnode(ctx)))
+        fm.data(rev=scmutil.intrev(ctx), node=fm.hexfunc(scmutil.binnode(ctx)))
 
-        if self.ui.quiet:
+        datahint = fm.datahint()
+        if self.ui.quiet and not datahint:
             return
 
-        fm.data(branch=ctx.branch(),
-                phase=ctx.phasestr(),
-                user=ctx.user(),
-                date=fm.formatdate(ctx.date()),
-                desc=ctx.description(),
-                bookmarks=fm.formatlist(ctx.bookmarks(), name='bookmark'),
-                tags=fm.formatlist(ctx.tags(), name='tag'),
-                parents=fm.formatlist([fm.hexfunc(c.node())
-                                       for c in ctx.parents()], name='node'))
+        fm.data(
+            branch=ctx.branch(),
+            phase=ctx.phasestr(),
+            user=ctx.user(),
+            date=fm.formatdate(ctx.date()),
+            desc=ctx.description(),
+            bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'),
+            tags=fm.formatlist(ctx.tags(), name=b'tag'),
+            parents=fm.formatlist(
+                [fm.hexfunc(c.node()) for c in ctx.parents()], name=b'node'
+            ),
+        )
+
+        if self.ui.debugflag or b'manifest' in datahint:
+            fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid))
+        if self.ui.debugflag or b'extra' in datahint:
+            fm.data(extra=fm.formatdict(ctx.extra()))
 
-        if self.ui.debugflag:
-            fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid),
-                    extra=fm.formatdict(ctx.extra()))
-
+        if (
+            self.ui.debugflag
+            or b'modified' in datahint
+            or b'added' in datahint
+            or b'removed' in datahint
+        ):
             files = ctx.p1().status(ctx)
-            fm.data(modified=fm.formatlist(files[0], name='file'),
-                    added=fm.formatlist(files[1], name='file'),
-                    removed=fm.formatlist(files[2], name='file'))
+            fm.data(
+                modified=fm.formatlist(files[0], name=b'file'),
+                added=fm.formatlist(files[1], name=b'file'),
+                removed=fm.formatlist(files[2], name=b'file'),
+            )
 
-        elif self.ui.verbose:
-            fm.data(files=fm.formatlist(ctx.files(), name='file'))
-            if copies:
-                fm.data(copies=fm.formatdict(copies,
-                                             key='name', value='source'))
+        verbose = not self.ui.debugflag and self.ui.verbose
+        if verbose or b'files' in datahint:
+            fm.data(files=fm.formatlist(ctx.files(), name=b'file'))
+        if verbose and copies or b'copies' in datahint:
+            fm.data(
+                copies=fm.formatdict(copies or {}, key=b'name', value=b'source')
+            )
 
-        if self._includestat:
+        if self._includestat or b'diffstat' in datahint:
             self.ui.pushbuffer()
             self._differ.showdiff(self.ui, ctx, self._diffopts, stat=True)
             fm.data(diffstat=self.ui.popbuffer())
-        if self._includediff:
+        if self._includediff or b'diff' in datahint:
             self.ui.pushbuffer()
             self._differ.showdiff(self.ui, ctx, self._diffopts, stat=False)
             fm.data(diff=self.ui.popbuffer())
 
+
 class changesettemplater(changesetprinter):
     '''format changeset information.
 
@@ -410,30 +504,38 @@
 
     # Arguments before "buffered" used to be positional. Consider not
     # adding/removing arguments before "buffered" to not break callers.
-    def __init__(self, ui, repo, tmplspec, differ=None, diffopts=None,
-                 buffered=False):
+    def __init__(
+        self, ui, repo, tmplspec, differ=None, diffopts=None, buffered=False
+    ):
         changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
         # tres is shared with _graphnodeformatter()
         self._tresources = tres = formatter.templateresources(ui, repo)
-        self.t = formatter.loadtemplater(ui, tmplspec,
-                                         defaults=templatekw.keywords,
-                                         resources=tres,
-                                         cache=templatekw.defaulttempl)
+        self.t = formatter.loadtemplater(
+            ui,
+            tmplspec,
+            defaults=templatekw.keywords,
+            resources=tres,
+            cache=templatekw.defaulttempl,
+        )
         self._counter = itertools.count()
 
         self._tref = tmplspec.ref
-        self._parts = {'header': '', 'footer': '',
-                       tmplspec.ref: tmplspec.ref,
-                       'docheader': '', 'docfooter': '',
-                       'separator': ''}
+        self._parts = {
+            b'header': b'',
+            b'footer': b'',
+            tmplspec.ref: tmplspec.ref,
+            b'docheader': b'',
+            b'docfooter': b'',
+            b'separator': b'',
+        }
         if tmplspec.mapfile:
             # find correct templates for current mode, for backward
             # compatibility with 'log -v/-q/--debug' using a mapfile
             tmplmodes = [
-                (True, ''),
-                (self.ui.verbose, '_verbose'),
-                (self.ui.quiet, '_quiet'),
-                (self.ui.debugflag, '_debug'),
+                (True, b''),
+                (self.ui.verbose, b'_verbose'),
+                (self.ui.quiet, b'_quiet'),
+                (self.ui.debugflag, b'_debug'),
             ]
             for mode, postfix in tmplmodes:
                 for t in self._parts:
@@ -445,33 +547,33 @@
             m = formatter.templatepartsmap(tmplspec, self.t, partnames)
             self._parts.update(m)
 
-        if self._parts['docheader']:
-            self.ui.write(self.t.render(self._parts['docheader'], {}))
+        if self._parts[b'docheader']:
+            self.ui.write(self.t.render(self._parts[b'docheader'], {}))
 
     def close(self):
-        if self._parts['docfooter']:
+        if self._parts[b'docfooter']:
             if not self.footer:
-                self.footer = ""
-            self.footer += self.t.render(self._parts['docfooter'], {})
+                self.footer = b""
+            self.footer += self.t.render(self._parts[b'docfooter'], {})
         return super(changesettemplater, self).close()
 
     def _show(self, ctx, copies, props):
         '''show a single changeset or file revision'''
         props = props.copy()
-        props['ctx'] = ctx
-        props['index'] = index = next(self._counter)
-        props['revcache'] = {'copies': copies}
-        graphwidth = props.get('graphwidth', 0)
+        props[b'ctx'] = ctx
+        props[b'index'] = index = next(self._counter)
+        props[b'revcache'] = {b'copies': copies}
+        graphwidth = props.get(b'graphwidth', 0)
 
         # write separator, which wouldn't work well with the header part below
         # since there's inherently a conflict between header (across items) and
         # separator (per item)
-        if self._parts['separator'] and index > 0:
-            self.ui.write(self.t.render(self._parts['separator'], {}))
+        if self._parts[b'separator'] and index > 0:
+            self.ui.write(self.t.render(self._parts[b'separator'], {}))
 
         # write header
-        if self._parts['header']:
-            h = self.t.render(self._parts['header'], props)
+        if self._parts[b'header']:
+            h = self.t.render(self._parts[b'header'], props)
             if self.buffered:
                 self.header[ctx.rev()] = h
             else:
@@ -484,17 +586,19 @@
         self.ui.write(self.t.render(key, props))
         self._showpatch(ctx, graphwidth)
 
-        if self._parts['footer']:
+        if self._parts[b'footer']:
             if not self.footer:
-                self.footer = self.t.render(self._parts['footer'], props)
+                self.footer = self.t.render(self._parts[b'footer'], props)
+
 
 def templatespec(tmpl, mapfile):
     if pycompat.ispy3:
-        assert not isinstance(tmpl, str), 'tmpl must not be a str'
+        assert not isinstance(tmpl, str), b'tmpl must not be a str'
     if mapfile:
-        return formatter.templatespec('changeset', tmpl, mapfile)
+        return formatter.templatespec(b'changeset', tmpl, mapfile)
     else:
-        return formatter.templatespec('', tmpl, None)
+        return formatter.templatespec(b'', tmpl, None)
+
 
 def _lookuptemplate(ui, tmpl, style):
     """Find the template matching the given template spec or style
@@ -503,26 +607,25 @@
     """
 
     # ui settings
-    if not tmpl and not style: # template are stronger than style
-        tmpl = ui.config('ui', 'logtemplate')
+    if not tmpl and not style:  # template are stronger than style
+        tmpl = ui.config(b'ui', b'logtemplate')
         if tmpl:
             return templatespec(templater.unquotestring(tmpl), None)
         else:
-            style = util.expandpath(ui.config('ui', 'style'))
+            style = util.expandpath(ui.config(b'ui', b'style'))
 
     if not tmpl and style:
         mapfile = style
         if not os.path.split(mapfile)[0]:
-            mapname = (templater.templatepath('map-cmdline.' + mapfile)
-                       or templater.templatepath(mapfile))
+            mapname = templater.templatepath(
+                b'map-cmdline.' + mapfile
+            ) or templater.templatepath(mapfile)
             if mapname:
                 mapfile = mapname
         return templatespec(None, mapfile)
 
-    if not tmpl:
-        return templatespec(None, None)
+    return formatter.lookuptemplate(ui, b'changeset', tmpl)
 
-    return formatter.lookuptemplate(ui, 'changeset', tmpl)
 
 def maketemplater(ui, repo, tmpl, buffered=False):
     """Create a changesettemplater from a literal template 'tmpl'
@@ -530,6 +633,7 @@
     spec = templatespec(tmpl, None)
     return changesettemplater(ui, repo, spec, buffered=buffered)
 
+
 def changesetdisplayer(ui, repo, opts, differ=None, buffered=False):
     """show one changeset using template or regular display.
 
@@ -542,17 +646,21 @@
     regular display via changesetprinter() is done.
     """
     postargs = (differ, opts, buffered)
-    if opts.get('template') in {'cbor', 'json'}:
-        fm = ui.formatter('log', opts)
+    spec = _lookuptemplate(ui, opts.get(b'template'), opts.get(b'style'))
+
+    # machine-readable formats have slightly different keyword set than
+    # plain templates, which are handled by changesetformatter.
+    # note that {b'pickle', b'debug'} can also be added to the list if needed.
+    if spec.ref in {b'cbor', b'json'}:
+        fm = ui.formatter(b'log', opts)
         return changesetformatter(ui, repo, fm, *postargs)
 
-    spec = _lookuptemplate(ui, opts.get('template'), opts.get('style'))
-
     if not spec.ref and not spec.tmpl and not spec.mapfile:
         return changesetprinter(ui, repo, *postargs)
 
     return changesettemplater(ui, repo, spec, *postargs)
 
+
 def _makematcher(repo, revs, pats, opts):
     """Build matcher and expanded patterns from log options
 
@@ -569,11 +677,11 @@
     # platforms without shell expansion (windows).
     wctx = repo[None]
     match, pats = scmutil.matchandpats(wctx, pats, opts)
-    slowpath = match.anypats() or (not match.always() and opts.get('removed'))
+    slowpath = match.anypats() or (not match.always() and opts.get(b'removed'))
     if not slowpath:
-        follow = opts.get('follow') or opts.get('follow_first')
+        follow = opts.get(b'follow') or opts.get(b'follow_first')
         startctxs = []
-        if follow and opts.get('rev'):
+        if follow and opts.get(b'rev'):
             startctxs = [repo[r] for r in revs]
         for f in match.files():
             if follow and startctxs:
@@ -589,15 +697,21 @@
                     slowpath = True
                     continue
                 else:
-                    raise error.Abort(_('cannot follow file not in parent '
-                                        'revision: "%s"') % f)
+                    raise error.Abort(
+                        _(
+                            b'cannot follow file not in parent '
+                            b'revision: "%s"'
+                        )
+                        % f
+                    )
             filelog = repo.file(f)
             if not filelog:
                 # A zero count may be a directory or deleted file, so
                 # try to find matching entries on the slow path.
                 if follow:
                     raise error.Abort(
-                        _('cannot follow nonexistent file: "%s"') % f)
+                        _(b'cannot follow nonexistent file: "%s"') % f
+                    )
                 slowpath = True
 
         # We decided to fall back to the slowpath because at least one
@@ -606,13 +720,14 @@
         # slowpath; otherwise, we can turn off the slowpath
         if slowpath:
             for path in match.files():
-                if path == '.' or path in repo.store:
+                if path == b'.' or path in repo.store:
                     break
             else:
                 slowpath = False
 
     return match, pats, slowpath
 
+
 def _fileancestors(repo, revs, match, followfirst):
     fctxs = []
     for r in revs:
@@ -625,6 +740,7 @@
     # revision, stored in "fcache". "fcache" is populated as a side effect
     # of the graph traversal.
     fcache = {}
+
     def filematcher(ctx):
         return scmutil.matchfiles(repo, fcache.get(ctx.rev(), []))
 
@@ -632,34 +748,38 @@
         for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst):
             fcache[rev] = [c.path() for c in cs]
             yield rev
+
     return smartset.generatorset(revgen(), iterasc=False), filematcher
 
+
 def _makenofollowfilematcher(repo, pats, opts):
     '''hook for extensions to override the filematcher for non-follow cases'''
     return None
 
+
 _opt2logrevset = {
-    'no_merges':        ('not merge()', None),
-    'only_merges':      ('merge()', None),
-    '_matchfiles':      (None, '_matchfiles(%ps)'),
-    'date':             ('date(%s)', None),
-    'branch':           ('branch(%s)', '%lr'),
-    '_patslog':         ('filelog(%s)', '%lr'),
-    'keyword':          ('keyword(%s)', '%lr'),
-    'prune':            ('ancestors(%s)', 'not %lr'),
-    'user':             ('user(%s)', '%lr'),
+    b'no_merges': (b'not merge()', None),
+    b'only_merges': (b'merge()', None),
+    b'_matchfiles': (None, b'_matchfiles(%ps)'),
+    b'date': (b'date(%s)', None),
+    b'branch': (b'branch(%s)', b'%lr'),
+    b'_patslog': (b'filelog(%s)', b'%lr'),
+    b'keyword': (b'keyword(%s)', b'%lr'),
+    b'prune': (b'ancestors(%s)', b'not %lr'),
+    b'user': (b'user(%s)', b'%lr'),
 }
 
+
 def _makerevset(repo, match, pats, slowpath, opts):
     """Return a revset string built from log options and file patterns"""
     opts = dict(opts)
     # follow or not follow?
-    follow = opts.get('follow') or opts.get('follow_first')
+    follow = opts.get(b'follow') or opts.get(b'follow_first')
 
     # branch and only_branch are really aliases and must be handled at
     # the same time
-    opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
-    opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
+    opts[b'branch'] = opts.get(b'branch', []) + opts.get(b'only_branch', [])
+    opts[b'branch'] = [repo.lookupbranch(b) for b in opts[b'branch']]
 
     if slowpath:
         # See walkchangerevs() slow path.
@@ -670,25 +790,25 @@
         # "a" and "b" while "file(a) and not file(b)" does
         # not. Besides, filesets are evaluated against the working
         # directory.
-        matchargs = ['r:', 'd:relpath']
+        matchargs = [b'r:', b'd:relpath']
         for p in pats:
-            matchargs.append('p:' + p)
-        for p in opts.get('include', []):
-            matchargs.append('i:' + p)
-        for p in opts.get('exclude', []):
-            matchargs.append('x:' + p)
-        opts['_matchfiles'] = matchargs
+            matchargs.append(b'p:' + p)
+        for p in opts.get(b'include', []):
+            matchargs.append(b'i:' + p)
+        for p in opts.get(b'exclude', []):
+            matchargs.append(b'x:' + p)
+        opts[b'_matchfiles'] = matchargs
     elif not follow:
-        opts['_patslog'] = list(pats)
+        opts[b'_patslog'] = list(pats)
 
     expr = []
-    for op, val in sorted(opts.iteritems()):
+    for op, val in sorted(pycompat.iteritems(opts)):
         if not val:
             continue
         if op not in _opt2logrevset:
             continue
         revop, listop = _opt2logrevset[op]
-        if revop and '%' not in revop:
+        if revop and b'%' not in revop:
             expr.append(revop)
         elif not listop:
             expr.append(revsetlang.formatspec(revop, val))
@@ -698,32 +818,34 @@
             expr.append(revsetlang.formatspec(listop, val))
 
     if expr:
-        expr = '(' + ' and '.join(expr) + ')'
+        expr = b'(' + b' and '.join(expr) + b')'
     else:
         expr = None
     return expr
 
+
 def _initialrevs(repo, opts):
     """Return the initial set of revisions to be filtered or followed"""
-    follow = opts.get('follow') or opts.get('follow_first')
-    if opts.get('rev'):
-        revs = scmutil.revrange(repo, opts['rev'])
+    follow = opts.get(b'follow') or opts.get(b'follow_first')
+    if opts.get(b'rev'):
+        revs = scmutil.revrange(repo, opts[b'rev'])
     elif follow and repo.dirstate.p1() == nullid:
         revs = smartset.baseset()
     elif follow:
-        revs = repo.revs('.')
+        revs = repo.revs(b'.')
     else:
         revs = smartset.spanset(repo)
         revs.reverse()
     return revs
 
+
 def getrevs(repo, pats, opts):
     """Return (revs, differ) where revs is a smartset
 
     differ is a changesetdiffer with pre-configured file matcher.
     """
-    follow = opts.get('follow') or opts.get('follow_first')
-    followfirst = opts.get('follow_first')
+    follow = opts.get(b'follow') or opts.get(b'follow_first')
+    followfirst = opts.get(b'follow_first')
     limit = getlimit(opts)
     revs = _initialrevs(repo, opts)
     if not revs:
@@ -739,14 +861,15 @@
     if filematcher is None:
         filematcher = _makenofollowfilematcher(repo, pats, opts)
     if filematcher is None:
+
         def filematcher(ctx):
             return match
 
     expr = _makerevset(repo, match, pats, slowpath, opts)
-    if opts.get('graph'):
+    if opts.get(b'graph'):
         # User-specified revs might be unsorted, but don't sort before
         # _makerevset because it might depend on the order of revs
-        if repo.ui.configbool('experimental', 'log.topo'):
+        if repo.ui.configbool(b'experimental', b'log.topo'):
             if not revs.istopo():
                 revs = dagop.toposort(revs, repo.changelog.parentrevs)
                 # TODO: try to iterate the set lazily
@@ -763,26 +886,29 @@
     differ._makefilematcher = filematcher
     return revs, differ
 
+
 def _parselinerangeopt(repo, opts):
     """Parse --line-range log option and return a list of tuples (filename,
     (fromline, toline)).
     """
     linerangebyfname = []
-    for pat in opts.get('line_range', []):
+    for pat in opts.get(b'line_range', []):
         try:
-            pat, linerange = pat.rsplit(',', 1)
+            pat, linerange = pat.rsplit(b',', 1)
         except ValueError:
-            raise error.Abort(_('malformatted line-range pattern %s') % pat)
+            raise error.Abort(_(b'malformatted line-range pattern %s') % pat)
         try:
-            fromline, toline = map(int, linerange.split(':'))
+            fromline, toline = map(int, linerange.split(b':'))
         except ValueError:
-            raise error.Abort(_("invalid line range for %s") % pat)
-        msg = _("line range pattern '%s' must match exactly one file") % pat
+            raise error.Abort(_(b"invalid line range for %s") % pat)
+        msg = _(b"line range pattern '%s' must match exactly one file") % pat
         fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
         linerangebyfname.append(
-            (fname, util.processlinerange(fromline, toline)))
+            (fname, util.processlinerange(fromline, toline))
+        )
     return linerangebyfname
 
+
 def getlinerangerevs(repo, userrevs, opts):
     """Return (revs, differ).
 
@@ -798,16 +924,17 @@
     linerangesbyrev = {}
     for fname, (fromline, toline) in _parselinerangeopt(repo, opts):
         if fname not in wctx:
-            raise error.Abort(_('cannot follow file not in parent '
-                                'revision: "%s"') % fname)
+            raise error.Abort(
+                _(b'cannot follow file not in parent revision: "%s"') % fname
+            )
         fctx = wctx.filectx(fname)
         for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
             rev = fctx.introrev()
             if rev not in userrevs:
                 continue
-            linerangesbyrev.setdefault(
-                rev, {}).setdefault(
-                    fctx.path(), []).append(linerange)
+            linerangesbyrev.setdefault(rev, {}).setdefault(
+                fctx.path(), []
+            ).append(linerange)
 
     def nofilterhunksfn(fctx, hunks):
         return hunks
@@ -821,11 +948,10 @@
             lineranges = fctxlineranges.get(fctx.path())
             if lineranges is not None:
                 for hr, lines in hunks:
-                    if hr is None: # binary
+                    if hr is None:  # binary
                         yield hr, lines
                         continue
-                    if any(mdiff.hunkinrange(hr[2:], lr)
-                           for lr in lineranges):
+                    if any(mdiff.hunkinrange(hr[2:], lr) for lr in lineranges):
                         yield hr, lines
             else:
                 for hunk in hunks:
@@ -844,8 +970,9 @@
     differ._makehunksfilter = hunksfilter
     return revs, differ
 
+
 def _graphnodeformatter(ui, displayer):
-    spec = ui.config('ui', 'graphnodetemplate')
+    spec = ui.config(b'ui', b'graphnodetemplate')
     if not spec:
         return templatekw.getgraphnode  # fast path for "{graphnode}"
 
@@ -855,38 +982,43 @@
         tres = displayer._tresources
     else:
         tres = formatter.templateresources(ui)
-    templ = formatter.maketemplater(ui, spec, defaults=templatekw.keywords,
-                                    resources=tres)
+    templ = formatter.maketemplater(
+        ui, spec, defaults=templatekw.keywords, resources=tres
+    )
+
     def formatnode(repo, ctx):
-        props = {'ctx': ctx, 'repo': repo}
+        props = {b'ctx': ctx, b'repo': repo}
         return templ.renderdefault(props)
+
     return formatnode
 
+
 def displaygraph(ui, repo, dag, displayer, edgefn, getcopies=None, props=None):
     props = props or {}
     formatnode = _graphnodeformatter(ui, displayer)
     state = graphmod.asciistate()
-    styles = state['styles']
+    styles = state[b'styles']
 
     # only set graph styling if HGPLAIN is not set.
-    if ui.plain('graph'):
+    if ui.plain(b'graph'):
         # set all edge styles to |, the default pre-3.8 behaviour
-        styles.update(dict.fromkeys(styles, '|'))
+        styles.update(dict.fromkeys(styles, b'|'))
     else:
         edgetypes = {
-            'parent': graphmod.PARENT,
-            'grandparent': graphmod.GRANDPARENT,
-            'missing': graphmod.MISSINGPARENT
+            b'parent': graphmod.PARENT,
+            b'grandparent': graphmod.GRANDPARENT,
+            b'missing': graphmod.MISSINGPARENT,
         }
         for name, key in edgetypes.items():
             # experimental config: experimental.graphstyle.*
-            styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
-                                    styles[key])
+            styles[key] = ui.config(
+                b'experimental', b'graphstyle.%s' % name, styles[key]
+            )
             if not styles[key]:
                 styles[key] = None
 
         # experimental config: experimental.graphshorten
-        state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
+        state[b'graphshorten'] = ui.configbool(b'experimental', b'graphshorten')
 
     for rev, type, ctx, parents in dag:
         char = formatnode(repo, ctx)
@@ -894,9 +1026,10 @@
         edges = edgefn(type, char, state, rev, parents)
         firstedge = next(edges)
         width = firstedge[2]
-        displayer.show(ctx, copies=copies,
-                       graphwidth=width, **pycompat.strkwargs(props))
-        lines = displayer.hunk.pop(rev).split('\n')
+        displayer.show(
+            ctx, copies=copies, graphwidth=width, **pycompat.strkwargs(props)
+        )
+        lines = displayer.hunk.pop(rev).split(b'\n')
         if not lines[-1]:
             del lines[-1]
         displayer.flush(ctx)
@@ -905,10 +1038,12 @@
             lines = []
     displayer.close()
 
+
 def displaygraphrevs(ui, repo, revs, displayer, getrenamed):
     revdag = graphmod.dagwalker(repo, revs)
     displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed)
 
+
 def displayrevs(ui, repo, revs, displayer, getcopies):
     for rev in revs:
         ctx = repo[rev]
@@ -917,11 +1052,15 @@
         displayer.flush(ctx)
     displayer.close()
 
+
 def checkunsupportedgraphflags(pats, opts):
-    for op in ["newest_first"]:
+    for op in [b"newest_first"]:
         if op in opts and opts[op]:
-            raise error.Abort(_("-G/--graph option is incompatible with --%s")
-                             % op.replace("_", "-"))
+            raise error.Abort(
+                _(b"-G/--graph option is incompatible with --%s")
+                % op.replace(b"_", b"-")
+            )
+
 
 def graphrevs(repo, nodes, opts):
     limit = getlimit(opts)
--- a/mercurial/logexchange.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/logexchange.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,12 +11,14 @@
 from .node import hex
 
 from . import (
+    pycompat,
     util,
     vfs as vfsmod,
 )
 
 # directory name in .hg/ in which remotenames files will be present
-remotenamedir = 'logexchange'
+remotenamedir = b'logexchange'
+
 
 def readremotenamefile(repo, filename):
     """
@@ -38,13 +40,14 @@
         if lineno == 0:
             lineno += 1
         try:
-            node, remote, rname = line.split('\0')
+            node, remote, rname = line.split(b'\0')
             yield node, remote, rname
         except ValueError:
             pass
 
     f.close()
 
+
 def readremotenames(repo):
     """
     read the details about the remotenames stored in .hg/logexchange/ and
@@ -53,34 +56,36 @@
     information, call the respective functions.
     """
 
-    for bmentry in readremotenamefile(repo, 'bookmarks'):
+    for bmentry in readremotenamefile(repo, b'bookmarks'):
         yield bmentry
-    for branchentry in readremotenamefile(repo, 'branches'):
+    for branchentry in readremotenamefile(repo, b'branches'):
         yield branchentry
 
+
 def writeremotenamefile(repo, remotepath, names, nametype):
     vfs = vfsmod.vfs(repo.vfs.join(remotenamedir))
-    f = vfs(nametype, 'w', atomictemp=True)
+    f = vfs(nametype, b'w', atomictemp=True)
     # write the storage version info on top of file
     # version '0' represents the very initial version of the storage format
-    f.write('0\n\n')
+    f.write(b'0\n\n')
 
     olddata = set(readremotenamefile(repo, nametype))
     # re-save the data from a different remote than this one.
     for node, oldpath, rname in sorted(olddata):
         if oldpath != remotepath:
-            f.write('%s\0%s\0%s\n' % (node, oldpath, rname))
+            f.write(b'%s\0%s\0%s\n' % (node, oldpath, rname))
 
-    for name, node in sorted(names.iteritems()):
-        if nametype == "branches":
+    for name, node in sorted(pycompat.iteritems(names)):
+        if nametype == b"branches":
             for n in node:
-                f.write('%s\0%s\0%s\n' % (n, remotepath, name))
-        elif nametype == "bookmarks":
+                f.write(b'%s\0%s\0%s\n' % (n, remotepath, name))
+        elif nametype == b"bookmarks":
             if node:
-                f.write('%s\0%s\0%s\n' % (node, remotepath, name))
+                f.write(b'%s\0%s\0%s\n' % (node, remotepath, name))
 
     f.close()
 
+
 def saveremotenames(repo, remotepath, branches=None, bookmarks=None):
     """
     save remotenames i.e. remotebookmarks and remotebranches in their
@@ -89,12 +94,13 @@
     wlock = repo.wlock()
     try:
         if bookmarks:
-            writeremotenamefile(repo, remotepath, bookmarks, 'bookmarks')
+            writeremotenamefile(repo, remotepath, bookmarks, b'bookmarks')
         if branches:
-            writeremotenamefile(repo, remotepath, branches, 'branches')
+            writeremotenamefile(repo, remotepath, branches, b'branches')
     finally:
         wlock.release()
 
+
 def activepath(repo, remote):
     """returns remote path"""
     # is the remote a local peer
@@ -109,7 +115,7 @@
         rpath = remote._url
 
     # represent the remotepath with user defined path name if exists
-    for path, url in repo.ui.configitems('paths'):
+    for path, url in repo.ui.configitems(b'paths'):
         # remove auth info from user defined url
         noauthurl = util.removeauth(url)
 
@@ -123,6 +129,7 @@
 
     return rpath
 
+
 def pullremotenames(localrepo, remoterepo):
     """
     pulls bookmarks and branches information of the remote repo during a
@@ -133,9 +140,9 @@
     remotepath = activepath(localrepo, remoterepo)
 
     with remoterepo.commandexecutor() as e:
-        bookmarks = e.callcommand('listkeys', {
-            'namespace': 'bookmarks',
-        }).result()
+        bookmarks = e.callcommand(
+            b'listkeys', {b'namespace': b'bookmarks',}
+        ).result()
 
     # on a push, we don't want to keep obsolete heads since
     # they won't show up as heads on the next pull, so we
@@ -145,9 +152,9 @@
     repo = localrepo.unfiltered()
 
     with remoterepo.commandexecutor() as e:
-        branchmap = e.callcommand('branchmap', {}).result()
+        branchmap = e.callcommand(b'branchmap', {}).result()
 
-    for branch, nodes in branchmap.iteritems():
+    for branch, nodes in pycompat.iteritems(branchmap):
         bmap[branch] = []
         for node in nodes:
             if node in repo and not repo[node].obsolete():
--- a/mercurial/loggingutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/loggingutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,9 +10,7 @@
 
 import errno
 
-from . import (
-    pycompat,
-)
+from . import pycompat
 
 from .utils import (
     dateutil,
@@ -20,25 +18,31 @@
     stringutil,
 )
 
+
 def openlogfile(ui, vfs, name, maxfiles=0, maxsize=0):
     """Open log file in append mode, with optional rotation
 
     If maxsize > 0, the log file will be rotated up to maxfiles.
     """
+
     def rotate(oldpath, newpath):
         try:
             vfs.unlink(newpath)
         except OSError as err:
             if err.errno != errno.ENOENT:
-                ui.debug("warning: cannot remove '%s': %s\n" %
-                         (newpath, err.strerror))
+                ui.debug(
+                    b"warning: cannot remove '%s': %s\n"
+                    % (newpath, err.strerror)
+                )
         try:
             if newpath:
                 vfs.rename(oldpath, newpath)
         except OSError as err:
             if err.errno != errno.ENOENT:
-                ui.debug("warning: cannot rename '%s' to '%s': %s\n" %
-                         (newpath, oldpath, err.strerror))
+                ui.debug(
+                    b"warning: cannot rename '%s' to '%s': %s\n"
+                    % (newpath, oldpath, err.strerror)
+                )
 
     if maxsize > 0:
         try:
@@ -49,20 +53,24 @@
             if st.st_size >= maxsize:
                 path = vfs.join(name)
                 for i in pycompat.xrange(maxfiles - 1, 1, -1):
-                    rotate(oldpath='%s.%d' % (path, i - 1),
-                           newpath='%s.%d' % (path, i))
-                rotate(oldpath=path,
-                       newpath=maxfiles > 0 and path + '.1')
-    return vfs(name, 'a', makeparentdirs=False)
+                    rotate(
+                        oldpath=b'%s.%d' % (path, i - 1),
+                        newpath=b'%s.%d' % (path, i),
+                    )
+                rotate(oldpath=path, newpath=maxfiles > 0 and path + b'.1')
+    return vfs(name, b'a', makeparentdirs=False)
+
 
 def _formatlogline(msg):
     date = dateutil.datestr(format=b'%Y/%m/%d %H:%M:%S')
     pid = procutil.getpid()
     return b'%s (%d)> %s' % (date, pid, msg)
 
+
 def _matchevent(event, tracked):
     return b'*' in tracked or event in tracked
 
+
 class filelogger(object):
     """Basic logger backed by physical file with optional rotation"""
 
@@ -79,13 +87,20 @@
     def log(self, ui, event, msg, opts):
         line = _formatlogline(msg)
         try:
-            with openlogfile(ui, self._vfs, self._name,
-                             maxfiles=self._maxfiles,
-                             maxsize=self._maxsize) as fp:
+            with openlogfile(
+                ui,
+                self._vfs,
+                self._name,
+                maxfiles=self._maxfiles,
+                maxsize=self._maxsize,
+            ) as fp:
                 fp.write(line)
         except IOError as err:
-            ui.debug(b'cannot write to %s: %s\n'
-                     % (self._name, stringutil.forcebytestr(err)))
+            ui.debug(
+                b'cannot write to %s: %s\n'
+                % (self._name, stringutil.forcebytestr(err))
+            )
+
 
 class fileobjectlogger(object):
     """Basic logger backed by file-like object"""
@@ -103,9 +118,14 @@
             self._fp.write(line)
             self._fp.flush()
         except IOError as err:
-            ui.debug(b'cannot write to %s: %s\n'
-                     % (stringutil.forcebytestr(self._fp.name),
-                        stringutil.forcebytestr(err)))
+            ui.debug(
+                b'cannot write to %s: %s\n'
+                % (
+                    stringutil.forcebytestr(self._fp.name),
+                    stringutil.forcebytestr(err),
+                )
+            )
+
 
 class proxylogger(object):
     """Forward log events to another logger to be set later"""
--- a/mercurial/lsprof.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/lsprof.py	Mon Oct 21 11:09:48 2019 -0400
@@ -3,12 +3,16 @@
 import _lsprof
 import sys
 
+from .pycompat import getattr
+from . import pycompat
+
 Profiler = _lsprof.Profiler
 
 # PyPy doesn't expose profiler_entry from the module.
 profiler_entry = getattr(_lsprof, 'profiler_entry', None)
 
-__all__ = ['profile', 'Stats']
+__all__ = [b'profile', b'Stats']
+
 
 def profile(f, *args, **kwds):
     """XXX docstring"""
@@ -32,9 +36,9 @@
         # profiler_entries isn't defined when running under PyPy.
         if profiler_entry:
             if crit not in profiler_entry.__dict__:
-                raise ValueError("Can't sort by %s" % crit)
+                raise ValueError(b"Can't sort by %s" % crit)
         elif self.data and not getattr(self.data[0], crit, None):
-            raise ValueError("Can't sort by %s" % crit)
+            raise ValueError(b"Can't sort by %s" % crit)
 
         self.data.sort(key=lambda x: getattr(x, crit), reverse=True)
         for e in self.data:
@@ -48,23 +52,46 @@
         d = self.data
         if top is not None:
             d = d[:top]
-        cols = "% 12d %12d %11.4f %11.4f   %s\n"
-        hcols = "% 12s %12s %12s %12s %s\n"
-        file.write(hcols % ("CallCount", "Recursive", "Total(s)",
-                            "Inline(s)", "module:lineno(function)"))
+        cols = b"% 12d %12d %11.4f %11.4f   %s\n"
+        hcols = b"% 12s %12s %12s %12s %s\n"
+        file.write(
+            hcols
+            % (
+                b"CallCount",
+                b"Recursive",
+                b"Total(s)",
+                b"Inline(s)",
+                b"module:lineno(function)",
+            )
+        )
         count = 0
         for e in d:
-            file.write(cols % (e.callcount, e.reccallcount, e.totaltime,
-                               e.inlinetime, label(e.code)))
+            file.write(
+                cols
+                % (
+                    e.callcount,
+                    e.reccallcount,
+                    e.totaltime,
+                    e.inlinetime,
+                    label(e.code),
+                )
+            )
             count += 1
             if limit is not None and count == limit:
                 return
             ccount = 0
             if climit and e.calls:
                 for se in e.calls:
-                    file.write(cols % (se.callcount, se.reccallcount,
-                                       se.totaltime, se.inlinetime,
-                                       "    %s" % label(se.code)))
+                    file.write(
+                        cols
+                        % (
+                            se.callcount,
+                            se.reccallcount,
+                            se.totaltime,
+                            se.inlinetime,
+                            b"    %s" % label(se.code),
+                        )
+                    )
                     count += 1
                     ccount += 1
                     if limit is not None and count == limit:
@@ -87,8 +114,10 @@
                     if not isinstance(se.code, str):
                         e.calls[j] = type(se)((label(se.code),) + se[1:])
 
+
 _fn2mod = {}
 
+
 def label(code):
     if isinstance(code, str):
         if sys.version_info.major >= 3:
@@ -97,7 +126,7 @@
     try:
         mname = _fn2mod[code.co_filename]
     except KeyError:
-        for k, v in list(sys.modules.iteritems()):
+        for k, v in list(pycompat.iteritems(sys.modules)):
             if v is None:
                 continue
             if not isinstance(getattr(v, '__file__', None), str):
@@ -114,14 +143,3 @@
         res = res.encode('latin-1')
 
     return res
-
-if __name__ == '__main__':
-    import os
-    sys.argv = sys.argv[1:]
-    if not sys.argv:
-        print("usage: lsprof.py <script> <arguments...>", file=sys.stderr)
-        sys.exit(2)
-    sys.path.insert(0, os.path.abspath(os.path.dirname(sys.argv[0])))
-    stats = profile(execfile, sys.argv[0], globals(), locals())
-    stats.sort()
-    stats.pprint()
--- a/mercurial/lsprofcalltree.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/lsprofcalltree.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,18 +12,20 @@
 
 from __future__ import absolute_import
 
-from . import (
-    pycompat,
-)
+from . import pycompat
+
 
 def label(code):
     if isinstance(code, str):
         # built-in functions ('~' sorts at the end)
-        return '~' + pycompat.sysbytes(code)
+        return b'~' + pycompat.sysbytes(code)
     else:
-        return '%s %s:%d' % (pycompat.sysbytes(code.co_name),
-                             pycompat.sysbytes(code.co_filename),
-                             code.co_firstlineno)
+        return b'%s %s:%d' % (
+            pycompat.sysbytes(code.co_name),
+            pycompat.sysbytes(code.co_filename),
+            code.co_firstlineno,
+        )
+
 
 class KCacheGrind(object):
     def __init__(self, profiler):
@@ -86,8 +88,9 @@
             out_file.write(b'calls=%d 0\n' % subentry.callcount)
         else:
             out_file.write(b'cfi=%s\n' % pycompat.sysbytes(code.co_filename))
-            out_file.write(b'calls=%d %d\n' % (
-                subentry.callcount, code.co_firstlineno))
+            out_file.write(
+                b'calls=%d %d\n' % (subentry.callcount, code.co_firstlineno)
+            )
 
         totaltime = int(subentry.totaltime * 1000)
         out_file.write(b'%d %d\n' % (lineno, totaltime))
--- a/mercurial/mail.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/mail.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,6 +9,7 @@
 
 import email
 import email.charset
+import email.generator
 import email.header
 import email.message
 import email.parser
@@ -19,6 +20,10 @@
 import time
 
 from .i18n import _
+from .pycompat import (
+    getattr,
+    open,
+)
 from . import (
     encoding,
     error,
@@ -31,11 +36,13 @@
     stringutil,
 )
 
+
 class STARTTLS(smtplib.SMTP):
     '''Derived class to verify the peer certificate for STARTTLS.
 
     This class allows to pass any keyword arguments to SSL socket creation.
     '''
+
     def __init__(self, ui, host=None, **kwargs):
         smtplib.SMTP.__init__(self, **kwargs)
         self._ui = ui
@@ -43,27 +50,32 @@
 
     def starttls(self, keyfile=None, certfile=None):
         if not self.has_extn("starttls"):
-            msg = "STARTTLS extension not supported by server"
+            msg = b"STARTTLS extension not supported by server"
             raise smtplib.SMTPException(msg)
         (resp, reply) = self.docmd("STARTTLS")
         if resp == 220:
-            self.sock = sslutil.wrapsocket(self.sock, keyfile, certfile,
-                                           ui=self._ui,
-                                           serverhostname=self._host)
-            self.file = smtplib.SSLFakeFile(self.sock)
+            self.sock = sslutil.wrapsocket(
+                self.sock,
+                keyfile,
+                certfile,
+                ui=self._ui,
+                serverhostname=self._host,
+            )
+            self.file = self.sock.makefile("rb")
             self.helo_resp = None
             self.ehlo_resp = None
             self.esmtp_features = {}
             self.does_esmtp = 0
         return (resp, reply)
 
+
 class SMTPS(smtplib.SMTP):
     '''Derived class to verify the peer certificate for SMTPS.
 
     This class allows to pass any keyword arguments to SSL socket creation.
     '''
-    def __init__(self, ui, keyfile=None, certfile=None, host=None,
-                 **kwargs):
+
+    def __init__(self, ui, keyfile=None, certfile=None, host=None, **kwargs):
         self.keyfile = keyfile
         self.certfile = certfile
         smtplib.SMTP.__init__(self, **kwargs)
@@ -73,38 +85,44 @@
 
     def _get_socket(self, host, port, timeout):
         if self.debuglevel > 0:
-            self._ui.debug('connect: %r\n' % ((host, port),))
+            self._ui.debug(b'connect: %r\n' % ((host, port),))
         new_socket = socket.create_connection((host, port), timeout)
-        new_socket = sslutil.wrapsocket(new_socket,
-                                        self.keyfile, self.certfile,
-                                        ui=self._ui,
-                                        serverhostname=self._host)
+        new_socket = sslutil.wrapsocket(
+            new_socket,
+            self.keyfile,
+            self.certfile,
+            ui=self._ui,
+            serverhostname=self._host,
+        )
         self.file = new_socket.makefile(r'rb')
         return new_socket
 
+
 def _pyhastls():
     """Returns true iff Python has TLS support, false otherwise."""
     try:
         import ssl
+
         getattr(ssl, 'HAS_TLS', False)
         return True
     except ImportError:
         return False
 
+
 def _smtp(ui):
     '''build an smtp connection and return a function to send mail'''
-    local_hostname = ui.config('smtp', 'local_hostname')
-    tls = ui.config('smtp', 'tls')
+    local_hostname = ui.config(b'smtp', b'local_hostname')
+    tls = ui.config(b'smtp', b'tls')
     # backward compatible: when tls = true, we use starttls.
-    starttls = tls == 'starttls' or stringutil.parsebool(tls)
-    smtps = tls == 'smtps'
+    starttls = tls == b'starttls' or stringutil.parsebool(tls)
+    smtps = tls == b'smtps'
     if (starttls or smtps) and not _pyhastls():
-        raise error.Abort(_("can't use TLS: Python SSL support not installed"))
-    mailhost = ui.config('smtp', 'host')
+        raise error.Abort(_(b"can't use TLS: Python SSL support not installed"))
+    mailhost = ui.config(b'smtp', b'host')
     if not mailhost:
-        raise error.Abort(_('smtp.host not configured - cannot send mail'))
+        raise error.Abort(_(b'smtp.host not configured - cannot send mail'))
     if smtps:
-        ui.note(_('(using smtps)\n'))
+        ui.note(_(b'(using smtps)\n'))
         s = SMTPS(ui, local_hostname=local_hostname, host=mailhost)
     elif starttls:
         s = STARTTLS(ui, local_hostname=local_hostname, host=mailhost)
@@ -114,25 +132,27 @@
         defaultport = 465
     else:
         defaultport = 25
-    mailport = util.getport(ui.config('smtp', 'port', defaultport))
-    ui.note(_('sending mail: smtp host %s, port %d\n') %
-            (mailhost, mailport))
+    mailport = util.getport(ui.config(b'smtp', b'port', defaultport))
+    ui.note(_(b'sending mail: smtp host %s, port %d\n') % (mailhost, mailport))
     s.connect(host=mailhost, port=mailport)
     if starttls:
-        ui.note(_('(using starttls)\n'))
+        ui.note(_(b'(using starttls)\n'))
         s.ehlo()
         s.starttls()
         s.ehlo()
     if starttls or smtps:
-        ui.note(_('(verifying remote certificate)\n'))
+        ui.note(_(b'(verifying remote certificate)\n'))
         sslutil.validatesocket(s.sock)
-    username = ui.config('smtp', 'username')
-    password = ui.config('smtp', 'password')
-    if username and not password:
-        password = ui.getpass()
+    username = ui.config(b'smtp', b'username')
+    password = ui.config(b'smtp', b'password')
+    if username:
+        if password:
+            password = encoding.strfromlocal(password)
+        else:
+            password = ui.getpass()
     if username and password:
-        ui.note(_('(authenticating to mail server as %s)\n') %
-                  (username))
+        ui.note(_(b'(authenticating to mail server as %s)\n') % username)
+        username = encoding.strfromlocal(username)
         try:
             s.login(username, password)
         except smtplib.SMTPException as inst:
@@ -143,84 +163,108 @@
             return s.sendmail(sender, recipients, msg)
         except smtplib.SMTPRecipientsRefused as inst:
             recipients = [r[1] for r in inst.recipients.values()]
-            raise error.Abort('\n' + '\n'.join(recipients))
+            raise error.Abort(b'\n' + b'\n'.join(recipients))
         except smtplib.SMTPException as inst:
             raise error.Abort(inst)
 
     return send
 
+
 def _sendmail(ui, sender, recipients, msg):
     '''send mail using sendmail.'''
-    program = ui.config('email', 'method')
-    stremail = lambda x: stringutil.email(encoding.strtolocal(x))
-    cmdline = '%s -f %s %s' % (program, stremail(sender),
-                               ' '.join(map(stremail, recipients)))
-    ui.note(_('sending mail: %s\n') % cmdline)
-    fp = procutil.popen(cmdline, 'wb')
+    program = ui.config(b'email', b'method')
+
+    def stremail(x):
+        return procutil.shellquote(stringutil.email(encoding.strtolocal(x)))
+
+    cmdline = b'%s -f %s %s' % (
+        program,
+        stremail(sender),
+        b' '.join(map(stremail, recipients)),
+    )
+    ui.note(_(b'sending mail: %s\n') % cmdline)
+    fp = procutil.popen(cmdline, b'wb')
     fp.write(util.tonativeeol(msg))
     ret = fp.close()
     if ret:
-        raise error.Abort('%s %s' % (
-            os.path.basename(program.split(None, 1)[0]),
-            procutil.explainexit(ret)))
+        raise error.Abort(
+            b'%s %s'
+            % (
+                os.path.basename(program.split(None, 1)[0]),
+                procutil.explainexit(ret),
+            )
+        )
+
 
 def _mbox(mbox, sender, recipients, msg):
     '''write mails to mbox'''
-    fp = open(mbox, 'ab+')
+    fp = open(mbox, b'ab+')
     # Should be time.asctime(), but Windows prints 2-characters day
     # of month instead of one. Make them print the same thing.
     date = time.strftime(r'%a %b %d %H:%M:%S %Y', time.localtime())
-    fp.write('From %s %s\n' % (encoding.strtolocal(sender),
-                               encoding.strtolocal(date)))
+    fp.write(
+        b'From %s %s\n'
+        % (encoding.strtolocal(sender), encoding.strtolocal(date))
+    )
     fp.write(msg)
-    fp.write('\n\n')
+    fp.write(b'\n\n')
     fp.close()
 
+
 def connect(ui, mbox=None):
     '''make a mail connection. return a function to send mail.
     call as sendmail(sender, list-of-recipients, msg).'''
     if mbox:
-        open(mbox, 'wb').close()
+        open(mbox, b'wb').close()
         return lambda s, r, m: _mbox(mbox, s, r, m)
-    if ui.config('email', 'method') == 'smtp':
+    if ui.config(b'email', b'method') == b'smtp':
         return _smtp(ui)
     return lambda s, r, m: _sendmail(ui, s, r, m)
 
+
 def sendmail(ui, sender, recipients, msg, mbox=None):
     send = connect(ui, mbox=mbox)
     return send(sender, recipients, msg)
 
+
 def validateconfig(ui):
     '''determine if we have enough config data to try sending email.'''
-    method = ui.config('email', 'method')
-    if method == 'smtp':
-        if not ui.config('smtp', 'host'):
-            raise error.Abort(_('smtp specified as email transport, '
-                               'but no smtp host configured'))
+    method = ui.config(b'email', b'method')
+    if method == b'smtp':
+        if not ui.config(b'smtp', b'host'):
+            raise error.Abort(
+                _(
+                    b'smtp specified as email transport, '
+                    b'but no smtp host configured'
+                )
+            )
     else:
         if not procutil.findexe(method):
-            raise error.Abort(_('%r specified as email transport, '
-                               'but not in PATH') % method)
+            raise error.Abort(
+                _(b'%r specified as email transport, but not in PATH') % method
+            )
+
 
 def codec2iana(cs):
     ''''''
     cs = pycompat.sysbytes(email.charset.Charset(cs).input_charset.lower())
 
     # "latin1" normalizes to "iso8859-1", standard calls for "iso-8859-1"
-    if cs.startswith("iso") and not cs.startswith("iso-"):
-        return "iso-" + cs[3:]
+    if cs.startswith(b"iso") and not cs.startswith(b"iso-"):
+        return b"iso-" + cs[3:]
     return cs
 
-def mimetextpatch(s, subtype='plain', display=False):
+
+def mimetextpatch(s, subtype=b'plain', display=False):
     '''Return MIME message suitable for a patch.
     Charset will be detected by first trying to decode as us-ascii, then utf-8,
     and finally the global encodings. If all those fail, fall back to
     ISO-8859-1, an encoding with that allows all byte sequences.
     Transfer encodings will be used if necessary.'''
 
-    cs = ['us-ascii', 'utf-8', encoding.encoding, encoding.fallbackencoding]
+    cs = [b'us-ascii', b'utf-8', encoding.encoding, encoding.fallbackencoding]
     if display:
-        cs = ['us-ascii']
+        cs = [b'us-ascii']
     for charset in cs:
         try:
             s.decode(pycompat.sysstr(charset))
@@ -228,7 +272,8 @@
         except UnicodeDecodeError:
             pass
 
-    return mimetextqp(s, subtype, "iso-8859-1")
+    return mimetextqp(s, subtype, b"iso-8859-1")
+
 
 def mimetextqp(body, subtype, charset):
     '''Return MIME message.
@@ -236,7 +281,7 @@
     '''
     cs = email.charset.Charset(charset)
     msg = email.message.Message()
-    msg.set_type(pycompat.sysstr('text/' + subtype))
+    msg.set_type(pycompat.sysstr(b'text/' + subtype))
 
     for line in body.splitlines():
         if len(line) > 950:
@@ -254,15 +299,20 @@
 
     return msg
 
+
 def _charsets(ui):
     '''Obtains charsets to send mail parts not containing patches.'''
-    charsets = [cs.lower() for cs in ui.configlist('email', 'charsets')]
-    fallbacks = [encoding.fallbackencoding.lower(),
-                 encoding.encoding.lower(), 'utf-8']
-    for cs in fallbacks: # find unique charsets while keeping order
+    charsets = [cs.lower() for cs in ui.configlist(b'email', b'charsets')]
+    fallbacks = [
+        encoding.fallbackencoding.lower(),
+        encoding.encoding.lower(),
+        b'utf-8',
+    ]
+    for cs in fallbacks:  # find unique charsets while keeping order
         if cs not in charsets:
             charsets.append(cs)
-    return [cs for cs in charsets if not cs.endswith('ascii')]
+    return [cs for cs in charsets if not cs.endswith(b'ascii')]
+
 
 def _encode(ui, s, charsets):
     '''Returns (converted) string, charset tuple.
@@ -281,7 +331,7 @@
             except UnicodeEncodeError:
                 pass
             except LookupError:
-                ui.warn(_('ignoring invalid sendcharset: %s\n') % ocs)
+                ui.warn(_(b'ignoring invalid sendcharset: %s\n') % ocs)
         else:
             # Everything failed, ascii-armor what we've got and send it.
             return s.encode('ascii', 'backslashreplace')
@@ -302,9 +352,10 @@
                 except UnicodeEncodeError:
                     pass
                 except LookupError:
-                    ui.warn(_('ignoring invalid sendcharset: %s\n') % ocs)
+                    ui.warn(_(b'ignoring invalid sendcharset: %s\n') % ocs)
     # if ascii, or all conversion attempts fail, send (broken) ascii
-    return s, 'us-ascii'
+    return s, b'us-ascii'
+
 
 def headencode(ui, s, charsets=None, display=False):
     '''Returns RFC-2047 compliant header from given string.'''
@@ -314,74 +365,91 @@
         return str(email.header.Header(s, cs))
     return s
 
+
 def _addressencode(ui, name, addr, charsets=None):
     assert isinstance(addr, bytes)
     name = headencode(ui, name, charsets)
     try:
-        acc, dom = addr.split('@')
+        acc, dom = addr.split(b'@')
         acc.decode('ascii')
         dom = dom.decode(pycompat.sysstr(encoding.encoding)).encode('idna')
-        addr = '%s@%s' % (acc, dom)
+        addr = b'%s@%s' % (acc, dom)
     except UnicodeDecodeError:
-        raise error.Abort(_('invalid email address: %s') % addr)
+        raise error.Abort(_(b'invalid email address: %s') % addr)
     except ValueError:
         try:
             # too strict?
             addr.decode('ascii')
         except UnicodeDecodeError:
-            raise error.Abort(_('invalid local address: %s') % addr)
+            raise error.Abort(_(b'invalid local address: %s') % addr)
     return pycompat.bytesurl(
-        email.utils.formataddr((name, encoding.strfromlocal(addr))))
+        email.utils.formataddr((name, encoding.strfromlocal(addr)))
+    )
+
 
 def addressencode(ui, address, charsets=None, display=False):
     '''Turns address into RFC-2047 compliant header.'''
     if display or not address:
-        return address or ''
+        return address or b''
     name, addr = email.utils.parseaddr(encoding.strfromlocal(address))
     return _addressencode(ui, name, encoding.strtolocal(addr), charsets)
 
+
 def addrlistencode(ui, addrs, charsets=None, display=False):
     '''Turns a list of addresses into a list of RFC-2047 compliant headers.
     A single element of input list may contain multiple addresses, but output
     always has one address per item'''
     for a in addrs:
-        assert isinstance(a, bytes), (r'%r unexpectedly not a bytestr' % a)
+        assert isinstance(a, bytes), r'%r unexpectedly not a bytestr' % a
     if display:
         return [a.strip() for a in addrs if a.strip()]
 
     result = []
     for name, addr in email.utils.getaddresses(
-            [encoding.strfromlocal(a) for a in addrs]):
+        [encoding.strfromlocal(a) for a in addrs]
+    ):
         if name or addr:
             r = _addressencode(ui, name, encoding.strtolocal(addr), charsets)
             result.append(r)
     return result
 
+
 def mimeencode(ui, s, charsets=None, display=False):
     '''creates mime text object, encodes it if needed, and sets
     charset and transfer-encoding accordingly.'''
-    cs = 'us-ascii'
+    cs = b'us-ascii'
     if not display:
         s, cs = _encode(ui, s, charsets)
-    return mimetextqp(s, 'plain', cs)
+    return mimetextqp(s, b'plain', cs)
+
 
 if pycompat.ispy3:
+
+    Generator = email.generator.BytesGenerator
+
     def parse(fp):
         ep = email.parser.Parser()
         # disable the "universal newlines" mode, which isn't binary safe.
         # I have no idea if ascii/surrogateescape is correct, but that's
         # what the standard Python email parser does.
-        fp = io.TextIOWrapper(fp, encoding=r'ascii',
-                              errors=r'surrogateescape', newline=chr(10))
+        fp = io.TextIOWrapper(
+            fp, encoding=r'ascii', errors=r'surrogateescape', newline=chr(10)
+        )
         try:
             return ep.parse(fp)
         finally:
             fp.detach()
+
+
 else:
+
+    Generator = email.generator.Generator
+
     def parse(fp):
         ep = email.parser.Parser()
         return ep.parse(fp)
 
+
 def headdecode(s):
     '''Decodes RFC-2047 header'''
     uparts = []
--- a/mercurial/manifest.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/manifest.py	Mon Oct 21 11:09:48 2019 -0400
@@ -19,17 +19,18 @@
     nullid,
     nullrev,
 )
+from .pycompat import getattr
 from . import (
     error,
     mdiff,
     policy,
     pycompat,
-    repository,
     revlog,
     util,
 )
-from .utils import (
-    interfaceutil,
+from .interfaces import (
+    repository,
+    util as interfaceutil,
 )
 
 parsers = policy.importmod(r'parsers')
@@ -38,24 +39,26 @@
 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
 
+
 def _parse(data):
     # This method does a little bit of excessive-looking
     # precondition checking. This is so that the behavior of this
     # class exactly matches its C counterpart to try and help
     # prevent surprise breakage for anyone that develops against
     # the pure version.
-    if data and data[-1:] != '\n':
-        raise ValueError('Manifest did not end in a newline.')
+    if data and data[-1:] != b'\n':
+        raise ValueError(b'Manifest did not end in a newline.')
     prev = None
     for l in data.splitlines():
         if prev is not None and prev > l:
-            raise ValueError('Manifest lines not in sorted order.')
+            raise ValueError(b'Manifest lines not in sorted order.')
         prev = l
-        f, n = l.split('\0')
+        f, n = l.split(b'\0')
         if len(n) > 40:
             yield f, bin(n[:40]), n[40:]
         else:
-            yield f, bin(n), ''
+            yield f, bin(n), b''
+
 
 def _text(it):
     files = []
@@ -64,10 +67,11 @@
         files.append(f)
         # if this is changed to support newlines in filenames,
         # be sure to check the templates/ dir again (especially *-raw.tmpl)
-        lines.append("%s\0%s%s\n" % (f, hex(n), fl))
+        lines.append(b"%s\0%s%s\n" % (f, hex(n), fl))
 
     _checkforbidden(files)
-    return ''.join(lines)
+    return b''.join(lines)
+
 
 class lazymanifestiter(object):
     def __init__(self, lm):
@@ -86,11 +90,12 @@
             self.pos += 1
             return data[0]
         self.pos += 1
-        zeropos = data.find('\x00', pos)
+        zeropos = data.find(b'\x00', pos)
         return data[pos:zeropos]
 
     __next__ = next
 
+
 class lazymanifestiterentries(object):
     def __init__(self, lm):
         self.lm = lm
@@ -107,24 +112,26 @@
         if pos == -1:
             self.pos += 1
             return data
-        zeropos = data.find('\x00', pos)
-        hashval = unhexlify(data, self.lm.extrainfo[self.pos],
-                            zeropos + 1, 40)
+        zeropos = data.find(b'\x00', pos)
+        hashval = unhexlify(data, self.lm.extrainfo[self.pos], zeropos + 1, 40)
         flags = self.lm._getflags(data, self.pos, zeropos)
         self.pos += 1
         return (data[pos:zeropos], hashval, flags)
 
     __next__ = next
 
+
 def unhexlify(data, extra, pos, length):
-    s = bin(data[pos:pos + length])
+    s = bin(data[pos : pos + length])
     if extra:
-        s += chr(extra & 0xff)
+        s += chr(extra & 0xFF)
     return s
 
+
 def _cmp(a, b):
     return (a > b) - (a < b)
 
+
 class _lazymanifest(object):
     """A pure python manifest backed by a byte string.  It is supplimented with
     internal lists as it is modified, until it is compacted back to a pure byte
@@ -142,8 +149,15 @@
     ``extradata`` is a list of (key, hash, flags) for entries that were added or
     modified since the manifest was created or compacted.
     """
-    def __init__(self, data, positions=None, extrainfo=None, extradata=None,
-                 hasremovals=False):
+
+    def __init__(
+        self,
+        data,
+        positions=None,
+        extrainfo=None,
+        extradata=None,
+        hasremovals=False,
+    ):
         if positions is None:
             self.positions = self.findlines(data)
             self.extrainfo = [0] * len(self.positions)
@@ -160,18 +174,18 @@
     def findlines(self, data):
         if not data:
             return []
-        pos = data.find("\n")
-        if pos == -1 or data[-1:] != '\n':
-            raise ValueError("Manifest did not end in a newline.")
+        pos = data.find(b"\n")
+        if pos == -1 or data[-1:] != b'\n':
+            raise ValueError(b"Manifest did not end in a newline.")
         positions = [0]
-        prev = data[:data.find('\x00')]
+        prev = data[: data.find(b'\x00')]
         while pos < len(data) - 1 and pos != -1:
             positions.append(pos + 1)
-            nexts = data[pos + 1:data.find('\x00', pos + 1)]
+            nexts = data[pos + 1 : data.find(b'\x00', pos + 1)]
             if nexts < prev:
-                raise ValueError("Manifest lines not in sorted order.")
+                raise ValueError(b"Manifest lines not in sorted order.")
             prev = nexts
-            pos = data.find("\n", pos + 1)
+            pos = data.find(b"\n", pos + 1)
         return positions
 
     def _get(self, index):
@@ -185,7 +199,7 @@
 
     def _getkey(self, pos):
         if pos >= 0:
-            return self.data[pos:self.data.find('\x00', pos + 1)]
+            return self.data[pos : self.data.find(b'\x00', pos + 1)]
         return self.extradata[-pos - 1][0]
 
     def bsearch(self, key):
@@ -193,7 +207,7 @@
         last = len(self.positions) - 1
 
         while first <= last:
-            midpoint = (first + last)//2
+            midpoint = (first + last) // 2
             nextpos = self.positions[midpoint]
             candidate = self._getkey(nextpos)
             r = _cmp(key, candidate)
@@ -213,7 +227,7 @@
         last = len(self.positions) - 1
 
         while first <= last:
-            midpoint = (first + last)//2
+            midpoint = (first + last) // 2
             nextpos = self.positions[midpoint]
             candidate = self._getkey(nextpos)
             r = _cmp(key, candidate)
@@ -231,23 +245,23 @@
 
     def _getflags(self, data, needle, pos):
         start = pos + 41
-        end = data.find("\n", start)
+        end = data.find(b"\n", start)
         if end == -1:
             end = len(data) - 1
         if start == end:
-            return ''
+            return b''
         return self.data[start:end]
 
     def __getitem__(self, key):
         if not isinstance(key, bytes):
-            raise TypeError("getitem: manifest keys must be a bytes.")
+            raise TypeError(b"getitem: manifest keys must be a bytes.")
         needle = self.bsearch(key)
         if needle == -1:
             raise KeyError
         data, pos = self._get(needle)
         if pos == -1:
             return (data[1], data[2])
-        zeropos = data.find('\x00', pos)
+        zeropos = data.find(b'\x00', pos)
         assert 0 <= needle <= len(self.positions)
         assert len(self.extrainfo) == len(self.positions)
         hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
@@ -259,27 +273,29 @@
         if not found:
             raise KeyError
         cur = self.positions[needle]
-        self.positions = self.positions[:needle] + self.positions[needle + 1:]
-        self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
+        self.positions = self.positions[:needle] + self.positions[needle + 1 :]
+        self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :]
         if cur >= 0:
             # This does NOT unsort the list as far as the search functions are
             # concerned, as they only examine lines mapped by self.positions.
-            self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
+            self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
             self.hasremovals = True
 
     def __setitem__(self, key, value):
         if not isinstance(key, bytes):
-            raise TypeError("setitem: manifest keys must be a byte string.")
+            raise TypeError(b"setitem: manifest keys must be a byte string.")
         if not isinstance(value, tuple) or len(value) != 2:
-            raise TypeError("Manifest values must be a tuple of (node, flags).")
+            raise TypeError(
+                b"Manifest values must be a tuple of (node, flags)."
+            )
         hashval = value[0]
         if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
-            raise TypeError("node must be a 20-byte byte string")
+            raise TypeError(b"node must be a 20-byte byte string")
         flags = value[1]
         if len(hashval) == 22:
             hashval = hashval[:-1]
         if not isinstance(flags, bytes) or len(flags) > 1:
-            raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
+            raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
         needle, found = self.bsearch2(key)
         if found:
             # put the item
@@ -293,15 +309,24 @@
         else:
             # not found, put it in with extra positions
             self.extradata.append((key, hashval, value[1]))
-            self.positions = (self.positions[:needle] + [-len(self.extradata)]
-                              + self.positions[needle:])
-            self.extrainfo = (self.extrainfo[:needle] + [0] +
-                              self.extrainfo[needle:])
+            self.positions = (
+                self.positions[:needle]
+                + [-len(self.extradata)]
+                + self.positions[needle:]
+            )
+            self.extrainfo = (
+                self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
+            )
 
     def copy(self):
         # XXX call _compact like in C?
-        return _lazymanifest(self.data, self.positions, self.extrainfo,
-            self.extradata, self.hasremovals)
+        return _lazymanifest(
+            self.data,
+            self.positions,
+            self.extrainfo,
+            self.extradata,
+            self.hasremovals,
+        )
 
     def _compact(self):
         # hopefully not called TOO often
@@ -329,14 +354,16 @@
                     # overwritten first byte.  Break out and find the end of the
                     # current good entry/entries if there is a removed file
                     # before the next position.
-                    if (self.hasremovals
-                        and self.data.find('\n\x00', cur,
-                                           self.positions[i]) != -1):
+                    if (
+                        self.hasremovals
+                        and self.data.find(b'\n\x00', cur, self.positions[i])
+                        != -1
+                    ):
                         break
 
                     offset += self.positions[i] - cur
                     cur = self.positions[i]
-                end_cut = self.data.find('\n', cur)
+                end_cut = self.data.find(b'\n', cur)
                 if end_cut != -1:
                     end_cut += 1
                 offset += end_cut - cur
@@ -351,12 +378,12 @@
                         self.extrainfo[i] = ord(t[1][21])
                     offset += len(l[-1])
                     i += 1
-        self.data = ''.join(l)
+        self.data = b''.join(l)
         self.hasremovals = False
         self.extradata = []
 
     def _pack(self, d):
-        return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
+        return d[0] + b'\x00' + hex(d[1][:20]) + d[2] + b'\n'
 
     def text(self):
         self._compact()
@@ -369,7 +396,7 @@
 
         for fn, e1, flags in self.iterentries():
             if fn not in m2:
-                diff[fn] = (e1, flags), (None, '')
+                diff[fn] = (e1, flags), (None, b'')
             else:
                 e2 = m2[fn]
                 if (e1, flags) != e2:
@@ -379,7 +406,7 @@
 
         for fn, e2, flags in m2.iterentries():
             if fn not in self:
-                diff[fn] = (None, ''), (e2, flags)
+                diff[fn] = (None, b''), (e2, flags)
 
         return diff
 
@@ -397,20 +424,22 @@
 
     def filtercopy(self, filterfn):
         # XXX should be optimized
-        c = _lazymanifest('')
+        c = _lazymanifest(b'')
         for f, n, fl in self.iterentries():
             if filterfn(f):
                 c[f] = n, fl
         return c
 
+
 try:
     _lazymanifest = parsers.lazymanifest
 except AttributeError:
     pass
 
+
 @interfaceutil.implementer(repository.imanifestdict)
 class manifestdict(object):
-    def __init__(self, data=''):
+    def __init__(self, data=b''):
         self._lm = _lazymanifest(data)
 
     def __getitem__(self, key):
@@ -430,7 +459,7 @@
     __bool__ = __nonzero__
 
     def __setitem__(self, key, node):
-        self._lm[key] = node, self.flags(key, '')
+        self._lm[key] = node, self.flags(key, b'')
 
     def __contains__(self, key):
         if key is None:
@@ -456,9 +485,11 @@
             m2 = m2.matches(match)
             return m1.filesnotin(m2)
         diff = self.diff(m2)
-        files = set(filepath
-                    for filepath, hashflags in diff.iteritems()
-                    if hashflags[1][0] is None)
+        files = set(
+            filepath
+            for filepath, hashflags in pycompat.iteritems(diff)
+            if hashflags[1][0] is None
+        )
         return files
 
     @propertycache
@@ -475,8 +506,10 @@
         '''Checks whether we can correctly and quickly iterate over matcher
         files instead of over manifest files.'''
         files = match.files()
-        return (len(files) < 100 and (match.isexact() or
-            (match.prefix() and all(fn in self for fn in files))))
+        return len(files) < 100 and (
+            match.isexact()
+            or (match.prefix() and all(fn in self for fn in files))
+        )
 
     def walk(self, match):
         '''Generates matching file names.
@@ -508,7 +541,7 @@
 
         # for dirstate.walk, files=[''] means "walk the whole tree".
         # follow that here, too
-        fset.discard('')
+        fset.discard(b'')
 
         for fn in sorted(fset):
             if not self.hasdir(fn):
@@ -561,7 +594,7 @@
         except KeyError:
             return default
 
-    def flags(self, key, default=''):
+    def flags(self, key, default=b''):
         try:
             return self._lm[key][1]
         except KeyError:
@@ -592,7 +625,7 @@
         delta = []
         dstart = None
         dend = None
-        dline = [""]
+        dline = [b""]
         start = 0
         # zero copy representation of base as a buffer
         addbuf = util.buffer(base)
@@ -606,13 +639,14 @@
                 start, end = _msearch(addbuf, f, start)
                 if not todelete:
                     h, fl = self._lm[f]
-                    l = "%s\0%s%s\n" % (f, hex(h), fl)
+                    l = b"%s\0%s%s\n" % (f, hex(h), fl)
                 else:
                     if start == end:
                         # item we want to delete was not found, error out
                         raise AssertionError(
-                                _("failed to remove %s from manifest") % f)
-                    l = ""
+                            _(b"failed to remove %s from manifest") % f
+                        )
+                    l = b""
                 if dstart is not None and dstart <= start and dend >= start:
                     if dend < end:
                         dend = end
@@ -620,13 +654,13 @@
                         dline.append(l)
                 else:
                     if dstart is not None:
-                        delta.append([dstart, dend, "".join(dline)])
+                        delta.append([dstart, dend, b"".join(dline)])
                     dstart = start
                     dend = end
                     dline = [l]
 
             if dstart is not None:
-                delta.append([dstart, dend, "".join(dline)])
+                delta.append([dstart, dend, b"".join(dline)])
             # apply the delta to the base, and get a delta for addrevision
             deltatext, arraytext = _addlistdelta(base, delta)
         else:
@@ -634,10 +668,12 @@
             # diff it.
             arraytext = bytearray(self.text())
             deltatext = mdiff.textdiff(
-                util.buffer(base), util.buffer(arraytext))
+                util.buffer(base), util.buffer(arraytext)
+            )
 
         return arraytext, deltatext
 
+
 def _msearch(m, s, lo=0, hi=None):
     '''return a tuple (start, end) that says where to find s within m.
 
@@ -647,10 +683,12 @@
 
     m should be a buffer, a memoryview or a byte string.
     s is a byte string'''
+
     def advance(i, c):
-        while i < lenm and m[i:i + 1] != c:
+        while i < lenm and m[i : i + 1] != c:
             i += 1
         return i
+
     if not s:
         return (lo, lo)
     lenm = len(m)
@@ -659,32 +697,34 @@
     while lo < hi:
         mid = (lo + hi) // 2
         start = mid
-        while start > 0 and m[start - 1:start] != '\n':
+        while start > 0 and m[start - 1 : start] != b'\n':
             start -= 1
-        end = advance(start, '\0')
+        end = advance(start, b'\0')
         if bytes(m[start:end]) < s:
             # we know that after the null there are 40 bytes of sha1
             # this translates to the bisect lo = mid + 1
-            lo = advance(end + 40, '\n') + 1
+            lo = advance(end + 40, b'\n') + 1
         else:
             # this translates to the bisect hi = mid
             hi = start
-    end = advance(lo, '\0')
+    end = advance(lo, b'\0')
     found = m[lo:end]
     if s == found:
         # we know that after the null there are 40 bytes of sha1
-        end = advance(end + 40, '\n')
+        end = advance(end + 40, b'\n')
         return (lo, end + 1)
     else:
         return (lo, lo)
 
+
 def _checkforbidden(l):
     """Check filenames for illegal characters."""
     for f in l:
-        if '\n' in f or '\r' in f:
+        if b'\n' in f or b'\r' in f:
             raise error.StorageError(
-                _("'\\n' and '\\r' disallowed in filenames: %r")
-                % pycompat.bytestr(f))
+                _(b"'\\n' and '\\r' disallowed in filenames: %r")
+                % pycompat.bytestr(f)
+            )
 
 
 # apply the changes collected during the bisect loop to our addlist
@@ -704,21 +744,26 @@
 
     newaddlist += addlist[currentposition:]
 
-    deltatext = "".join(struct.pack(">lll", start, end, len(content))
-                   + content for start, end, content in x)
+    deltatext = b"".join(
+        struct.pack(b">lll", start, end, len(content)) + content
+        for start, end, content in x
+    )
     return deltatext, newaddlist
 
+
 def _splittopdir(f):
-    if '/' in f:
-        dir, subpath = f.split('/', 1)
-        return dir + '/', subpath
+    if b'/' in f:
+        dir, subpath = f.split(b'/', 1)
+        return dir + b'/', subpath
     else:
-        return '', f
+        return b'', f
+
 
 _noop = lambda s: None
 
+
 class treemanifest(object):
-    def __init__(self, dir='', text=''):
+    def __init__(self, dir=b'', text=b''):
         self._dir = dir
         self._node = nullid
         self._loadfunc = _noop
@@ -730,18 +775,23 @@
         self._files = {}
         self._flags = {}
         if text:
+
             def readsubtree(subdir, subm):
-                raise AssertionError('treemanifest constructor only accepts '
-                                     'flat manifests')
+                raise AssertionError(
+                    b'treemanifest constructor only accepts flat manifests'
+                )
+
             self.parse(text, readsubtree)
-            self._dirty = True # Mark flat manifest dirty after parsing
+            self._dirty = True  # Mark flat manifest dirty after parsing
 
     def _subpath(self, path):
         return self._dir + path
 
     def _loadalllazy(self):
         selfdirs = self._dirs
-        for d, (path, node, readsubtree, docopy) in self._lazydirs.iteritems():
+        for d, (path, node, readsubtree, docopy) in pycompat.iteritems(
+            self._lazydirs
+        ):
             if docopy:
                 selfdirs[d] = readsubtree(path, node).copy()
             else:
@@ -761,13 +811,13 @@
     def _loadchildrensetlazy(self, visit):
         if not visit:
             return None
-        if visit == 'all' or visit == 'this':
+        if visit == b'all' or visit == b'this':
             self._loadalllazy()
             return None
 
         loadlazy = self._loadlazy
         for k in visit:
-            loadlazy(k + '/')
+            loadlazy(k + b'/')
         return visit
 
     def _loaddifflazy(self, t1, t2):
@@ -780,11 +830,11 @@
           differs, load it in both
         """
         toloadlazy = []
-        for d, v1 in t1._lazydirs.iteritems():
+        for d, v1 in pycompat.iteritems(t1._lazydirs):
             v2 = t2._lazydirs.get(d)
             if not v2 or v2[1] != v1[1]:
                 toloadlazy.append(d)
-        for d, v1 in t2._lazydirs.iteritems():
+        for d, v1 in pycompat.iteritems(t2._lazydirs):
             if d not in t1._lazydirs:
                 toloadlazy.append(d)
 
@@ -807,20 +857,23 @@
     __bool__ = __nonzero__
 
     def _isempty(self):
-        self._load() # for consistency; already loaded by all callers
+        self._load()  # for consistency; already loaded by all callers
         # See if we can skip loading everything.
-        if self._files or (self._dirs and
-                           any(not m._isempty() for m in self._dirs.values())):
+        if self._files or (
+            self._dirs and any(not m._isempty() for m in self._dirs.values())
+        ):
             return False
         self._loadalllazy()
-        return (not self._dirs or
-                all(m._isempty() for m in self._dirs.values()))
+        return not self._dirs or all(m._isempty() for m in self._dirs.values())
 
     def __repr__(self):
-        return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
-                (self._dir, hex(self._node),
-                 bool(self._loadfunc is _noop),
-                 self._dirty, id(self)))
+        return b'<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' % (
+            self._dir,
+            hex(self._node),
+            bool(self._loadfunc is _noop),
+            self._dirty,
+            id(self),
+        )
 
     def dir(self):
         '''The directory that this tree manifest represents, including a
@@ -841,10 +894,11 @@
     def iterentries(self):
         self._load()
         self._loadalllazy()
-        for p, n in sorted(itertools.chain(self._dirs.items(),
-                                           self._files.items())):
+        for p, n in sorted(
+            itertools.chain(self._dirs.items(), self._files.items())
+        ):
             if p in self._files:
-                yield self._subpath(p), n, self._flags.get(p, '')
+                yield self._subpath(p), n, self._flags.get(p, b'')
             else:
                 for x in n.iterentries():
                     yield x
@@ -852,12 +906,13 @@
     def items(self):
         self._load()
         self._loadalllazy()
-        for p, n in sorted(itertools.chain(self._dirs.items(),
-                                           self._files.items())):
+        for p, n in sorted(
+            itertools.chain(self._dirs.items(), self._files.items())
+        ):
             if p in self._files:
                 yield self._subpath(p), n
             else:
-                for f, sn in n.iteritems():
+                for f, sn in pycompat.iteritems(n):
                     yield f, sn
 
     iteritems = items
@@ -922,12 +977,12 @@
             self._loadlazy(dir)
 
             if dir not in self._dirs:
-                return ''
+                return b''
             return self._dirs[dir].flags(subpath)
         else:
             if f in self._lazydirs or f in self._dirs:
-                return ''
-            return self._flags.get(f, '')
+                return b''
+            return self._flags.get(f, b'')
 
     def find(self, f):
         self._load()
@@ -937,7 +992,7 @@
 
             return self._dirs[dir].find(subpath)
         else:
-            return self._files[f], self._flags.get(f, '')
+            return self._files[f], self._flags.get(f, b'')
 
     def __delitem__(self, f):
         self._load()
@@ -965,7 +1020,7 @@
                 self._dirs[dir] = treemanifest(self._subpath(dir))
             self._dirs[dir].__setitem__(subpath, n)
         else:
-            self._files[f] = n[:21] # to match manifestdict's behavior
+            self._files[f] = n[:21]  # to match manifestdict's behavior
         self._dirty = True
 
     def _load(self):
@@ -994,15 +1049,19 @@
         copy._node = self._node
         copy._dirty = self._dirty
         if self._copyfunc is _noop:
+
             def _copyfunc(s):
                 self._load()
-                s._lazydirs = {d: (p, n, r, True) for
-                               d, (p, n, r, c) in self._lazydirs.iteritems()}
+                s._lazydirs = {
+                    d: (p, n, r, True)
+                    for d, (p, n, r, c) in pycompat.iteritems(self._lazydirs)
+                }
                 sdirs = s._dirs
-                for d, v in self._dirs.iteritems():
+                for d, v in pycompat.iteritems(self._dirs):
                     sdirs[d] = v.copy()
                 s._files = dict.copy(self._files)
                 s._flags = dict.copy(self._flags)
+
             if self._loadfunc is _noop:
                 _copyfunc(copy)
             else:
@@ -1019,13 +1078,14 @@
             return m1.filesnotin(m2)
 
         files = set()
+
         def _filesnotin(t1, t2):
             if t1._node == t2._node and not t1._dirty and not t2._dirty:
                 return
             t1._load()
             t2._load()
             self._loaddifflazy(t1, t2)
-            for d, m1 in t1._dirs.iteritems():
+            for d, m1 in pycompat.iteritems(t1._dirs):
                 if d in t2._dirs:
                     m2 = t2._dirs[d]
                     _filesnotin(m1, m2)
@@ -1054,7 +1114,7 @@
             if topdir in self._dirs:
                 return self._dirs[topdir].hasdir(subdir)
             return False
-        dirslash = dir + '/'
+        dirslash = dir + b'/'
         return dirslash in self._dirs or dirslash in self._lazydirs
 
     def walk(self, match):
@@ -1080,7 +1140,7 @@
 
         # for dirstate.walk, files=[''] means "walk the whole tree".
         # follow that here, too
-        fset.discard('')
+        fset.discard(b'')
 
         for fn in sorted(fset):
             if not self.hasdir(fn):
@@ -1117,7 +1177,7 @@
         '''
 
         visit = match.visitchildrenset(self._dir[:-1])
-        if visit == 'all':
+        if visit == b'all':
             return self.copy()
         ret = treemanifest(self._dir)
         if not visit:
@@ -1130,7 +1190,7 @@
             # If visit == 'this', we should obviously look at the files in this
             # directory; if visit is a set, and fn is in it, we should inspect
             # fn (but no need to inspect things not in the set).
-            if visit != 'this' and fn not in visit:
+            if visit != b'this' and fn not in visit:
                 continue
             fullp = self._subpath(fn)
             # visitchildrenset isn't perfect, we still need to call the regular
@@ -1142,7 +1202,7 @@
                 ret._flags[fn] = self._flags[fn]
 
         visit = self._loadchildrensetlazy(visit)
-        for dir, subm in self._dirs.iteritems():
+        for dir, subm in pycompat.iteritems(self._dirs):
             if visit and dir[:-1] not in visit:
                 continue
             m = subm._matches(match)
@@ -1184,27 +1244,27 @@
             t2._load()
             self._loaddifflazy(t1, t2)
 
-            for d, m1 in t1._dirs.iteritems():
+            for d, m1 in pycompat.iteritems(t1._dirs):
                 m2 = t2._dirs.get(d, emptytree)
                 stack.append((m1, m2))
 
-            for d, m2 in t2._dirs.iteritems():
+            for d, m2 in pycompat.iteritems(t2._dirs):
                 if d not in t1._dirs:
                     stack.append((emptytree, m2))
 
-            for fn, n1 in t1._files.iteritems():
-                fl1 = t1._flags.get(fn, '')
+            for fn, n1 in pycompat.iteritems(t1._files):
+                fl1 = t1._flags.get(fn, b'')
                 n2 = t2._files.get(fn, None)
-                fl2 = t2._flags.get(fn, '')
+                fl2 = t2._flags.get(fn, b'')
                 if n1 != n2 or fl1 != fl2:
                     result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
                 elif clean:
                     result[t1._subpath(fn)] = None
 
-            for fn, n2 in t2._files.iteritems():
+            for fn, n2 in pycompat.iteritems(t2._files):
                 if fn not in t1._files:
-                    fl2 = t2._flags.get(fn, '')
-                    result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
+                    fl2 = t2._flags.get(fn, b'')
+                    result[t2._subpath(fn)] = ((None, b''), (n2, fl2))
 
         stackls = []
         _iterativediff(self, m2, stackls)
@@ -1221,12 +1281,12 @@
         selflazy = self._lazydirs
         subpath = self._subpath
         for f, n, fl in _parse(text):
-            if fl == 't':
-                f = f + '/'
+            if fl == b't':
+                f = f + b'/'
                 # False below means "doesn't need to be copied" and can use the
                 # cached value from readsubtree directly.
                 selflazy[f] = (subpath(f), n, readsubtree, False)
-            elif '/' in f:
+            elif b'/' in f:
                 # This is a flat manifest, so use __setitem__ and setflag rather
                 # than assigning directly to _files and _flags, so we can
                 # assign a path in a subdirectory, and to mark dirty (compared
@@ -1252,8 +1312,10 @@
         """
         self._load()
         flags = self.flags
-        lazydirs = [(d[:-1], v[1], 't') for d, v in self._lazydirs.iteritems()]
-        dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
+        lazydirs = [
+            (d[:-1], v[1], b't') for d, v in pycompat.iteritems(self._lazydirs)
+        ]
+        dirs = [(d[:-1], self._dirs[d]._node, b't') for d in self._dirs]
         files = [(f, self._files[f], flags(f)) for f in self._files]
         return _text(sorted(dirs + files + lazydirs))
 
@@ -1261,13 +1323,15 @@
         def _load_for_read(s):
             s.parse(gettext(), readsubtree)
             s._dirty = False
+
         self._loadfunc = _load_for_read
 
     def writesubtrees(self, m1, m2, writesubtree, match):
-        self._load() # for consistency; should never have any effect here
+        self._load()  # for consistency; should never have any effect here
         m1._load()
         m2._load()
         emptytree = treemanifest()
+
         def getnode(m, d):
             ld = m._lazydirs.get(d)
             if ld:
@@ -1277,9 +1341,9 @@
         # let's skip investigating things that `match` says we do not need.
         visit = match.visitchildrenset(self._dir[:-1])
         visit = self._loadchildrensetlazy(visit)
-        if visit == 'this' or visit == 'all':
+        if visit == b'this' or visit == b'all':
             visit = None
-        for d, subm in self._dirs.iteritems():
+        for d, subm in pycompat.iteritems(self._dirs):
             if visit and d[:-1] not in visit:
                 continue
             subp1 = getnode(m1, d)
@@ -1302,10 +1366,11 @@
         self._load()
         # OPT: use visitchildrenset to avoid loading everything.
         self._loadalllazy()
-        for d, subm in self._dirs.iteritems():
+        for d, subm in pycompat.iteritems(self._dirs):
             for subtree in subm.walksubtrees(matcher=matcher):
                 yield subtree
 
+
 class manifestfulltextcache(util.lrucachedict):
     """File-backed LRU cache for the manifest cache
 
@@ -1317,7 +1382,7 @@
 
     """
 
-    _file = 'manifestfulltextcache'
+    _file = b'manifestfulltextcache'
 
     def __init__(self, max):
         super(manifestfulltextcache, self).__init__(max)
@@ -1338,7 +1403,7 @@
                     if len(node) < 20:
                         break
                     try:
-                        size = struct.unpack('>L', fp.read(4))[0]
+                        size = struct.unpack(b'>L', fp.read(4))[0]
                     except struct.error:
                         break
                     value = bytearray(fp.read(size))
@@ -1356,13 +1421,14 @@
         if not self._dirty or self._opener is None:
             return
         # rotate backwards to the first used node
-        with self._opener(self._file, 'w', atomictemp=True, checkambig=True
-            ) as fp:
+        with self._opener(
+            self._file, b'w', atomictemp=True, checkambig=True
+        ) as fp:
             node = self._head.prev
             while True:
                 if node.key in self._cache:
                     fp.write(node.key)
-                    fp.write(struct.pack('>L', len(node.value)))
+                    fp.write(struct.pack(b'>L', len(node.value)))
                     fp.write(node.value)
                 if node is self._head:
                     break
@@ -1417,17 +1483,26 @@
             self.write()
         self._read = False
 
+
 # and upper bound of what we expect from compression
 # (real live value seems to be "3")
 MAXCOMPRESSION = 3
 
+
 @interfaceutil.implementer(repository.imanifeststorage)
 class manifestrevlog(object):
     '''A revlog that stores manifest texts. This is responsible for caching the
     full-text manifest contents.
     '''
-    def __init__(self, opener, tree='', dirlogcache=None, indexfile=None,
-                 treemanifest=False):
+
+    def __init__(
+        self,
+        opener,
+        tree=b'',
+        dirlogcache=None,
+        indexfile=None,
+        treemanifest=False,
+    ):
         """Constructs a new manifest revlog
 
         `indexfile` - used by extensions to have two manifests at once, like
@@ -1445,20 +1520,20 @@
         optiontreemanifest = False
         opts = getattr(opener, 'options', None)
         if opts is not None:
-            cachesize = opts.get('manifestcachesize', cachesize)
-            optiontreemanifest = opts.get('treemanifest', False)
+            cachesize = opts.get(b'manifestcachesize', cachesize)
+            optiontreemanifest = opts.get(b'treemanifest', False)
 
         self._treeondisk = optiontreemanifest or treemanifest
 
         self._fulltextcache = manifestfulltextcache(cachesize)
 
         if tree:
-            assert self._treeondisk, 'opts is %r' % opts
+            assert self._treeondisk, b'opts is %r' % opts
 
         if indexfile is None:
-            indexfile = '00manifest.i'
+            indexfile = b'00manifest.i'
             if tree:
-                indexfile = "meta/" + tree + indexfile
+                indexfile = b"meta/" + tree + indexfile
 
         self.tree = tree
 
@@ -1466,13 +1541,16 @@
         if tree:
             self._dirlogcache = dirlogcache
         else:
-            self._dirlogcache = {'': self}
+            self._dirlogcache = {b'': self}
 
-        self._revlog = revlog.revlog(opener, indexfile,
-                                     # only root indexfile is cached
-                                     checkambig=not bool(tree),
-                                     mmaplargeindex=True,
-                                     upperboundcomp=MAXCOMPRESSION)
+        self._revlog = revlog.revlog(
+            opener,
+            indexfile,
+            # only root indexfile is cached
+            checkambig=not bool(tree),
+            mmaplargeindex=True,
+            upperboundcomp=MAXCOMPRESSION,
+        )
 
         self.index = self._revlog.index
         self.version = self._revlog.version
@@ -1480,7 +1558,7 @@
 
     def _setupmanifestcachehooks(self, repo):
         """Persist the manifestfulltextcache on lock release"""
-        if not util.safehasattr(repo, '_wlockref'):
+        if not util.safehasattr(repo, b'_wlockref'):
             return
 
         self._fulltextcache._opener = repo.wcachevfs
@@ -1515,15 +1593,25 @@
         if d:
             assert self._treeondisk
         if d not in self._dirlogcache:
-            mfrevlog = manifestrevlog(self.opener, d,
-                                      self._dirlogcache,
-                                      treemanifest=self._treeondisk)
+            mfrevlog = manifestrevlog(
+                self.opener, d, self._dirlogcache, treemanifest=self._treeondisk
+            )
             self._dirlogcache[d] = mfrevlog
         return self._dirlogcache[d]
 
-    def add(self, m, transaction, link, p1, p2, added, removed, readtree=None,
-            match=None):
-        if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
+    def add(
+        self,
+        m,
+        transaction,
+        link,
+        p1,
+        p2,
+        added,
+        removed,
+        readtree=None,
+        match=None,
+    ):
+        if p1 in self.fulltextcache and util.safehasattr(m, b'fastdelta'):
             # If our first parent is in the manifest cache, we can
             # compute a delta here using properties we know about the
             # manifest up-front, which may save time later for the
@@ -1531,26 +1619,30 @@
 
             _checkforbidden(added)
             # combine the changed lists into one sorted iterator
-            work = heapq.merge([(x, False) for x in sorted(added)],
-                               [(x, True) for x in sorted(removed)])
+            work = heapq.merge(
+                [(x, False) for x in sorted(added)],
+                [(x, True) for x in sorted(removed)],
+            )
 
             arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
             cachedelta = self._revlog.rev(p1), deltatext
             text = util.buffer(arraytext)
-            n = self._revlog.addrevision(text, transaction, link, p1, p2,
-                                         cachedelta)
+            n = self._revlog.addrevision(
+                text, transaction, link, p1, p2, cachedelta
+            )
         else:
             # The first parent manifest isn't already loaded, so we'll
             # just encode a fulltext of the manifest and pass that
             # through to the revlog layer, and let it handle the delta
             # process.
             if self._treeondisk:
-                assert readtree, "readtree must be set for treemanifest writes"
-                assert match, "match must be specified for treemanifest writes"
+                assert readtree, b"readtree must be set for treemanifest writes"
+                assert match, b"match must be specified for treemanifest writes"
                 m1 = readtree(self.tree, p1)
                 m2 = readtree(self.tree, p2)
-                n = self._addtree(m, transaction, link, m1, m2, readtree,
-                                  match=match)
+                n = self._addtree(
+                    m, transaction, link, m1, m2, readtree, match=match
+                )
                 arraytext = None
             else:
                 text = m.text()
@@ -1565,17 +1657,29 @@
     def _addtree(self, m, transaction, link, m1, m2, readtree, match):
         # If the manifest is unchanged compared to one parent,
         # don't write a new revision
-        if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(
-            m2)):
+        if self.tree != b'' and (
+            m.unmodifiedsince(m1) or m.unmodifiedsince(m2)
+        ):
             return m.node()
+
         def writesubtree(subm, subp1, subp2, match):
             sublog = self.dirlog(subm.dir())
-            sublog.add(subm, transaction, link, subp1, subp2, None, None,
-                       readtree=readtree, match=match)
+            sublog.add(
+                subm,
+                transaction,
+                link,
+                subp1,
+                subp2,
+                None,
+                None,
+                readtree=readtree,
+                match=match,
+            )
+
         m.writesubtrees(m1, m2, writesubtree, match)
         text = m.dirtext()
         n = None
-        if self.tree != '':
+        if self.tree != b'':
             # Double-check whether contents are unchanged to one parent
             if text == m1.dirtext():
                 n = m1.node()
@@ -1583,8 +1687,9 @@
                 n = m2.node()
 
         if not n:
-            n = self._revlog.addrevision(text, transaction, link, m1.node(),
-                                         m2.node())
+            n = self._revlog.addrevision(
+                text, transaction, link, m1.node(), m2.node()
+            )
 
         # Save nodeid so parent manifest can calculate its nodeid
         m.setnode(n)
@@ -1620,6 +1725,9 @@
     def revision(self, node, _df=None, raw=False):
         return self._revlog.revision(node, _df=_df, raw=raw)
 
+    def rawdata(self, node, _df=None):
+        return self._revlog.rawdata(node, _df=_df)
+
     def revdiff(self, rev1, rev2):
         return self._revlog.revdiff(rev1, rev2)
 
@@ -1629,17 +1737,26 @@
     def deltaparent(self, rev):
         return self._revlog.deltaparent(rev)
 
-    def emitrevisions(self, nodes, nodesorder=None,
-                      revisiondata=False, assumehaveparentrevisions=False,
-                      deltamode=repository.CG_DELTAMODE_STD):
+    def emitrevisions(
+        self,
+        nodes,
+        nodesorder=None,
+        revisiondata=False,
+        assumehaveparentrevisions=False,
+        deltamode=repository.CG_DELTAMODE_STD,
+    ):
         return self._revlog.emitrevisions(
-            nodes, nodesorder=nodesorder, revisiondata=revisiondata,
+            nodes,
+            nodesorder=nodesorder,
+            revisiondata=revisiondata,
             assumehaveparentrevisions=assumehaveparentrevisions,
-            deltamode=deltamode)
+            deltamode=deltamode,
+        )
 
     def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
-        return self._revlog.addgroup(deltas, linkmapper, transaction,
-                                     addrevisioncb=addrevisioncb)
+        return self._revlog.addgroup(
+            deltas, linkmapper, transaction, addrevisioncb=addrevisioncb
+        )
 
     def rawsize(self, rev):
         return self._revlog.rawsize(rev)
@@ -1655,17 +1772,25 @@
 
     def clone(self, tr, destrevlog, **kwargs):
         if not isinstance(destrevlog, manifestrevlog):
-            raise error.ProgrammingError('expected manifestrevlog to clone()')
+            raise error.ProgrammingError(b'expected manifestrevlog to clone()')
 
         return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
 
-    def storageinfo(self, exclusivefiles=False, sharedfiles=False,
-                    revisionscount=False, trackedsize=False,
-                    storedsize=False):
+    def storageinfo(
+        self,
+        exclusivefiles=False,
+        sharedfiles=False,
+        revisionscount=False,
+        trackedsize=False,
+        storedsize=False,
+    ):
         return self._revlog.storageinfo(
-            exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
-            revisionscount=revisionscount, trackedsize=trackedsize,
-            storedsize=storedsize)
+            exclusivefiles=exclusivefiles,
+            sharedfiles=sharedfiles,
+            revisionscount=revisionscount,
+            trackedsize=trackedsize,
+            storedsize=storedsize,
+        )
 
     @property
     def indexfile(self):
@@ -1683,6 +1808,7 @@
     def opener(self, value):
         self._revlog.opener = value
 
+
 @interfaceutil.implementer(repository.imanifestlog)
 class manifestlog(object):
     """A collection class representing the collection of manifest snapshots
@@ -1692,14 +1818,15 @@
     of the list of files in the given commit. Consumers of the output of this
     class do not care about the implementation details of the actual manifests
     they receive (i.e. tree or flat or lazily loaded, etc)."""
+
     def __init__(self, opener, repo, rootstore, narrowmatch):
         usetreemanifest = False
         cachesize = 4
 
         opts = getattr(opener, 'options', None)
         if opts is not None:
-            usetreemanifest = opts.get('treemanifest', usetreemanifest)
-            cachesize = opts.get('manifestcachesize', cachesize)
+            usetreemanifest = opts.get(b'treemanifest', usetreemanifest)
+            cachesize = opts.get(b'manifestcachesize', cachesize)
 
         self._treemanifests = usetreemanifest
 
@@ -1709,7 +1836,7 @@
 
         # A cache of the manifestctx or treemanifestctx for each directory
         self._dirmancache = {}
-        self._dirmancache[''] = util.lrucachedict(cachesize)
+        self._dirmancache[b''] = util.lrucachedict(cachesize)
 
         self._cachesize = cachesize
 
@@ -1717,7 +1844,7 @@
         """Retrieves the manifest instance for the given node. Throws a
         LookupError if not found.
         """
-        return self.get('', node)
+        return self.get(b'', node)
 
     def get(self, tree, node, verify=True):
         """Retrieves the manifest instance for the given node. Throws a
@@ -1742,15 +1869,19 @@
                 m = treemanifestctx(self, tree, node)
             else:
                 raise error.Abort(
-                        _("cannot ask for manifest directory '%s' in a flat "
-                          "manifest") % tree)
+                    _(
+                        b"cannot ask for manifest directory '%s' in a flat "
+                        b"manifest"
+                    )
+                    % tree
+                )
         else:
             if verify:
                 # Side-effect is LookupError is raised if node doesn't exist.
                 self._rootstore.rev(node)
 
             if self._treemanifests:
-                m = treemanifestctx(self, '', node)
+                m = treemanifestctx(self, b'', node)
             else:
                 m = manifestctx(self, node)
 
@@ -1772,6 +1903,7 @@
     def rev(self, node):
         return self._rootstore.rev(node)
 
+
 @interfaceutil.implementer(repository.imanifestrevisionwritable)
 class memmanifestctx(object):
     def __init__(self, manifestlog):
@@ -1793,14 +1925,24 @@
         return self._manifestdict
 
     def write(self, transaction, link, p1, p2, added, removed, match=None):
-        return self._storage().add(self._manifestdict, transaction, link,
-                                   p1, p2, added, removed, match=match)
+        return self._storage().add(
+            self._manifestdict,
+            transaction,
+            link,
+            p1,
+            p2,
+            added,
+            removed,
+            match=match,
+        )
+
 
 @interfaceutil.implementer(repository.imanifestrevisionstored)
 class manifestctx(object):
     """A class representing a single revision of a manifest, including its
     contents, its parent revs, and its linkrev.
     """
+
     def __init__(self, manifestlog, node):
         self._manifestlog = manifestlog
         self._data = None
@@ -1810,9 +1952,9 @@
         # TODO: We eventually want p1, p2, and linkrev exposed on this class,
         # but let's add it later when something needs it and we can load it
         # lazily.
-        #self.p1, self.p2 = store.parents(node)
-        #rev = store.rev(node)
-        #self.linkrev = store.linkrev(rev)
+        # self.p1, self.p2 = store.parents(node)
+        # rev = store.rev(node)
+        # self.linkrev = store.linkrev(rev)
 
     def _storage(self):
         return self._manifestlog.getstorage(b'')
@@ -1876,9 +2018,10 @@
     def find(self, key):
         return self.read().find(key)
 
+
 @interfaceutil.implementer(repository.imanifestrevisionwritable)
 class memtreemanifestctx(object):
-    def __init__(self, manifestlog, dir=''):
+    def __init__(self, manifestlog, dir=b''):
         self._manifestlog = manifestlog
         self._dir = dir
         self._treemanifest = treemanifest()
@@ -1886,7 +2029,7 @@
     def _storage(self):
         return self._manifestlog.getstorage(b'')
 
-    def new(self, dir=''):
+    def new(self, dir=b''):
         return memtreemanifestctx(self._manifestlog, dir=dir)
 
     def copy(self):
@@ -1900,9 +2043,19 @@
     def write(self, transaction, link, p1, p2, added, removed, match=None):
         def readtree(dir, node):
             return self._manifestlog.get(dir, node).read()
-        return self._storage().add(self._treemanifest, transaction, link,
-                                   p1, p2, added, removed, readtree=readtree,
-                                   match=match)
+
+        return self._storage().add(
+            self._treemanifest,
+            transaction,
+            link,
+            p1,
+            p2,
+            added,
+            removed,
+            readtree=readtree,
+            match=match,
+        )
+
 
 @interfaceutil.implementer(repository.imanifestrevisionstored)
 class treemanifestctx(object):
@@ -1916,9 +2069,9 @@
         # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
         # we can instantiate treemanifestctx objects for directories we don't
         # have on disk.
-        #self.p1, self.p2 = store.parents(node)
-        #rev = store.rev(node)
-        #self.linkrev = store.linkrev(rev)
+        # self.p1, self.p2 = store.parents(node)
+        # rev = store.rev(node)
+        # self.linkrev = store.linkrev(rev)
 
     def _storage(self):
         narrowmatch = self._manifestlog._narrowmatch
@@ -1935,12 +2088,15 @@
             # TODO accessing non-public API
             elif store._treeondisk:
                 m = treemanifest(dir=self._dir)
+
                 def gettext():
                     return store.revision(self._node)
+
                 def readsubtree(dir, subm):
                     # Set verify to False since we need to be able to create
                     # subtrees for trees that don't exist on disk.
                     return self._manifestlog.get(dir, subm, verify=False).read()
+
                 m.read(gettext, readsubtree)
                 m.setnode(self._node)
                 self._data = m
@@ -1958,7 +2114,7 @@
     def node(self):
         return self._node
 
-    def new(self, dir=''):
+    def new(self, dir=b''):
         return memtreemanifestctx(self._manifestlog, dir=dir)
 
     def copy(self):
@@ -1992,7 +2148,7 @@
             m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
             m1 = self.read()
             md = treemanifest(dir=self._dir)
-            for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
+            for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)):
                 if n1:
                     md[f] = n1
                     if fl1:
@@ -2010,8 +2166,7 @@
         store = self._storage()
         r = store.rev(self._node)
         deltaparent = store.deltaparent(r)
-        if (deltaparent != nullrev and
-            deltaparent in store.parentrevs(r)):
+        if deltaparent != nullrev and deltaparent in store.parentrevs(r):
             return self.readdelta(shallow=shallow)
 
         if shallow:
@@ -2022,6 +2177,7 @@
     def find(self, key):
         return self.read().find(key)
 
+
 class excludeddir(treemanifest):
     """Stand-in for a directory that is excluded from the repository.
 
@@ -2033,13 +2189,14 @@
     class is: it stands in for a directory whose node is known, but
     whose contents are unknown.
     """
+
     def __init__(self, dir, node):
         super(excludeddir, self).__init__(dir)
         self._node = node
         # Add an empty file, which will be included by iterators and such,
         # appearing as the directory itself (i.e. something like "dir/")
-        self._files[''] = node
-        self._flags[''] = 't'
+        self._files[b''] = node
+        self._flags[b''] = b't'
 
     # Manifests outside the narrowspec should never be modified, so avoid
     # copying. This makes a noticeable difference when there are very many
@@ -2049,8 +2206,10 @@
     def copy(self):
         return self
 
+
 class excludeddirmanifestctx(treemanifestctx):
     """context wrapper for excludeddir - see that docstring for rationale"""
+
     def __init__(self, dir, node):
         self._dir = dir
         self._node = node
@@ -2060,7 +2219,9 @@
 
     def write(self, *args):
         raise error.ProgrammingError(
-            'attempt to write manifest from excluded dir %s' % self._dir)
+            b'attempt to write manifest from excluded dir %s' % self._dir
+        )
+
 
 class excludedmanifestrevlog(manifestrevlog):
     """Stand-in for excluded treemanifest revlogs.
@@ -2077,19 +2238,23 @@
 
     def __len__(self):
         raise error.ProgrammingError(
-            'attempt to get length of excluded dir %s' % self._dir)
+            b'attempt to get length of excluded dir %s' % self._dir
+        )
 
     def rev(self, node):
         raise error.ProgrammingError(
-            'attempt to get rev from excluded dir %s' % self._dir)
+            b'attempt to get rev from excluded dir %s' % self._dir
+        )
 
     def linkrev(self, node):
         raise error.ProgrammingError(
-            'attempt to get linkrev from excluded dir %s' % self._dir)
+            b'attempt to get linkrev from excluded dir %s' % self._dir
+        )
 
     def node(self, rev):
         raise error.ProgrammingError(
-            'attempt to get node from excluded dir %s' % self._dir)
+            b'attempt to get node from excluded dir %s' % self._dir
+        )
 
     def add(self, *args, **kwargs):
         # We should never write entries in dirlogs outside the narrow clone.
--- a/mercurial/match.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/match.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,6 +13,7 @@
 import re
 
 from .i18n import _
+from .pycompat import open
 from . import (
     encoding,
     error,
@@ -21,20 +22,30 @@
     pycompat,
     util,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
 
-rustmod = policy.importrust('filepatterns')
+rustmod = policy.importrust(r'filepatterns')
 
-allpatternkinds = ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
-                   'rootglob',
-                   'listfile', 'listfile0', 'set', 'include', 'subinclude',
-                   'rootfilesin')
-cwdrelativepatternkinds = ('relpath', 'glob')
+allpatternkinds = (
+    b're',
+    b'glob',
+    b'path',
+    b'relglob',
+    b'relpath',
+    b'relre',
+    b'rootglob',
+    b'listfile',
+    b'listfile0',
+    b'set',
+    b'include',
+    b'subinclude',
+    b'rootfilesin',
+)
+cwdrelativepatternkinds = (b'relpath', b'glob')
 
 propertycache = util.propertycache
 
+
 def _rematcher(regex):
     '''compile the regexp with the best available regexp engine and return a
     matcher function'''
@@ -45,16 +56,18 @@
     except AttributeError:
         return m.match
 
+
 def _expandsets(kindpats, ctx=None, listsubrepos=False, badfn=None):
     '''Returns the kindpats list with the 'set' patterns expanded to matchers'''
     matchers = []
     other = []
 
     for kind, pat, source in kindpats:
-        if kind == 'set':
+        if kind == b'set':
             if ctx is None:
-                raise error.ProgrammingError("fileset expression with no "
-                                             "context")
+                raise error.ProgrammingError(
+                    b"fileset expression with no context"
+                )
             matchers.append(ctx.matchfileset(pat, badfn=badfn))
 
             if listsubrepos:
@@ -67,6 +80,7 @@
         other.append((kind, pat, source))
     return matchers, other
 
+
 def _expandsubinclude(kindpats, root):
     '''Returns the list of subinclude matcher args and the kindpats without the
     subincludes in it.'''
@@ -74,37 +88,41 @@
     other = []
 
     for kind, pat, source in kindpats:
-        if kind == 'subinclude':
+        if kind == b'subinclude':
             sourceroot = pathutil.dirname(util.normpath(source))
             pat = util.pconvert(pat)
             path = pathutil.join(sourceroot, pat)
 
             newroot = pathutil.dirname(path)
-            matcherargs = (newroot, '', [], ['include:%s' % path])
+            matcherargs = (newroot, b'', [], [b'include:%s' % path])
 
             prefix = pathutil.canonpath(root, root, newroot)
             if prefix:
-                prefix += '/'
+                prefix += b'/'
             relmatchers.append((prefix, matcherargs))
         else:
             other.append((kind, pat, source))
 
     return relmatchers, other
 
+
 def _kindpatsalwaysmatch(kindpats):
     """"Checks whether the kindspats match everything, as e.g.
     'relpath:.' does.
     """
     for kind, pat, source in kindpats:
-        if pat != '' or kind not in ['relpath', 'glob']:
+        if pat != b'' or kind not in [b'relpath', b'glob']:
             return False
     return True
 
-def _buildkindpatsmatcher(matchercls, root, kindpats, ctx=None,
-                          listsubrepos=False, badfn=None):
+
+def _buildkindpatsmatcher(
+    matchercls, root, kindpats, ctx=None, listsubrepos=False, badfn=None
+):
     matchers = []
-    fms, kindpats = _expandsets(kindpats, ctx=ctx,
-                                listsubrepos=listsubrepos, badfn=badfn)
+    fms, kindpats = _expandsets(
+        kindpats, ctx=ctx, listsubrepos=listsubrepos, badfn=badfn
+    )
     if kindpats:
         m = matchercls(root, kindpats, badfn=badfn)
         matchers.append(m)
@@ -116,9 +134,21 @@
         return matchers[0]
     return unionmatcher(matchers)
 
-def match(root, cwd, patterns=None, include=None, exclude=None, default='glob',
-          auditor=None, ctx=None, listsubrepos=False, warn=None,
-          badfn=None, icasefs=False):
+
+def match(
+    root,
+    cwd,
+    patterns=None,
+    include=None,
+    exclude=None,
+    default=b'glob',
+    auditor=None,
+    ctx=None,
+    listsubrepos=False,
+    warn=None,
+    badfn=None,
+    icasefs=False,
+):
     r"""build an object to match a set of file patterns
 
     arguments:
@@ -207,7 +237,7 @@
             kp = _donormalize(patterns, default, root, cwd, auditor, warn)
             kindpats = []
             for kind, pats, source in kp:
-                if kind not in ('re', 'relre'):  # regex can't be normalized
+                if kind not in (b're', b'relre'):  # regex can't be normalized
                     p = pats
                     pats = dsnormalize(pats)
 
@@ -223,34 +253,56 @@
         if _kindpatsalwaysmatch(kindpats):
             m = alwaysmatcher(badfn)
         else:
-            m = _buildkindpatsmatcher(patternmatcher, root, kindpats, ctx=ctx,
-                                      listsubrepos=listsubrepos, badfn=badfn)
+            m = _buildkindpatsmatcher(
+                patternmatcher,
+                root,
+                kindpats,
+                ctx=ctx,
+                listsubrepos=listsubrepos,
+                badfn=badfn,
+            )
     else:
         # It's a little strange that no patterns means to match everything.
         # Consider changing this to match nothing (probably using nevermatcher).
         m = alwaysmatcher(badfn)
 
     if include:
-        kindpats = normalize(include, 'glob', root, cwd, auditor, warn)
-        im = _buildkindpatsmatcher(includematcher, root, kindpats, ctx=ctx,
-                                   listsubrepos=listsubrepos, badfn=None)
+        kindpats = normalize(include, b'glob', root, cwd, auditor, warn)
+        im = _buildkindpatsmatcher(
+            includematcher,
+            root,
+            kindpats,
+            ctx=ctx,
+            listsubrepos=listsubrepos,
+            badfn=None,
+        )
         m = intersectmatchers(m, im)
     if exclude:
-        kindpats = normalize(exclude, 'glob', root, cwd, auditor, warn)
-        em = _buildkindpatsmatcher(includematcher, root, kindpats, ctx=ctx,
-                                   listsubrepos=listsubrepos, badfn=None)
+        kindpats = normalize(exclude, b'glob', root, cwd, auditor, warn)
+        em = _buildkindpatsmatcher(
+            includematcher,
+            root,
+            kindpats,
+            ctx=ctx,
+            listsubrepos=listsubrepos,
+            badfn=None,
+        )
         m = differencematcher(m, em)
     return m
 
+
 def exact(files, badfn=None):
     return exactmatcher(files, badfn=badfn)
 
+
 def always(badfn=None):
     return alwaysmatcher(badfn)
 
+
 def never(badfn=None):
     return nevermatcher(badfn)
 
+
 def badmatch(match, badfn):
     """Make a copy of the given matcher, replacing its bad method with the given
     one.
@@ -259,6 +311,7 @@
     m.bad = badfn
     return m
 
+
 def _donormalize(patterns, default, root, cwd, auditor=None, warn=None):
     '''Convert 'kind:pat' from the patterns list to tuples with kind and
     normalized and rooted patterns and with listfiles expanded.'''
@@ -266,48 +319,53 @@
     for kind, pat in [_patsplit(p, default) for p in patterns]:
         if kind in cwdrelativepatternkinds:
             pat = pathutil.canonpath(root, cwd, pat, auditor=auditor)
-        elif kind in ('relglob', 'path', 'rootfilesin', 'rootglob'):
+        elif kind in (b'relglob', b'path', b'rootfilesin', b'rootglob'):
             pat = util.normpath(pat)
-        elif kind in ('listfile', 'listfile0'):
+        elif kind in (b'listfile', b'listfile0'):
             try:
                 files = util.readfile(pat)
-                if kind == 'listfile0':
-                    files = files.split('\0')
+                if kind == b'listfile0':
+                    files = files.split(b'\0')
                 else:
                     files = files.splitlines()
                 files = [f for f in files if f]
             except EnvironmentError:
-                raise error.Abort(_("unable to read file list (%s)") % pat)
-            for k, p, source in _donormalize(files, default, root, cwd,
-                                             auditor, warn):
+                raise error.Abort(_(b"unable to read file list (%s)") % pat)
+            for k, p, source in _donormalize(
+                files, default, root, cwd, auditor, warn
+            ):
                 kindpats.append((k, p, pat))
             continue
-        elif kind == 'include':
+        elif kind == b'include':
             try:
                 fullpath = os.path.join(root, util.localpath(pat))
                 includepats = readpatternfile(fullpath, warn)
-                for k, p, source in _donormalize(includepats, default,
-                                                 root, cwd, auditor, warn):
+                for k, p, source in _donormalize(
+                    includepats, default, root, cwd, auditor, warn
+                ):
                     kindpats.append((k, p, source or pat))
             except error.Abort as inst:
-                raise error.Abort('%s: %s' % (pat, inst[0]))
+                raise error.Abort(b'%s: %s' % (pat, inst[0]))
             except IOError as inst:
                 if warn:
-                    warn(_("skipping unreadable pattern file '%s': %s\n") %
-                         (pat, stringutil.forcebytestr(inst.strerror)))
+                    warn(
+                        _(b"skipping unreadable pattern file '%s': %s\n")
+                        % (pat, stringutil.forcebytestr(inst.strerror))
+                    )
             continue
         # else: re or relre - which cannot be normalized
-        kindpats.append((kind, pat, ''))
+        kindpats.append((kind, pat, b''))
     return kindpats
 
+
 class basematcher(object):
-
     def __init__(self, badfn=None):
         if badfn is not None:
             self.bad = badfn
 
     def __call__(self, fn):
         return self.matchfn(fn)
+
     # Callbacks related to how the matcher is used by dirstate.walk.
     # Subscribers to these events must monkeypatch the matcher object.
     def bad(self, f, msg):
@@ -397,7 +455,7 @@
           equivalently that if there are files to investigate in 'dir' that it
           will always return 'this').
         '''
-        return 'this'
+        return b'this'
 
     def always(self):
         '''Matcher will match everything and .files() will be empty --
@@ -419,6 +477,7 @@
         optimizations will be difficult.'''
         return not self.always() and not self.isexact() and not self.prefix()
 
+
 class alwaysmatcher(basematcher):
     '''Matches everything.'''
 
@@ -432,14 +491,15 @@
         return True
 
     def visitdir(self, dir):
-        return 'all'
+        return b'all'
 
     def visitchildrenset(self, dir):
-        return 'all'
+        return b'all'
 
     def __repr__(self):
         return r'<alwaysmatcher>'
 
+
 class nevermatcher(basematcher):
     '''Matches nothing.'''
 
@@ -466,6 +526,7 @@
     def __repr__(self):
         return r'<nevermatcher>'
 
+
 class predicatematcher(basematcher):
     """A matcher adapter for a simple boolean function"""
 
@@ -476,15 +537,19 @@
 
     @encoding.strmethod
     def __repr__(self):
-        s = (stringutil.buildrepr(self._predrepr)
-             or pycompat.byterepr(self.matchfn))
-        return '<predicatenmatcher pred=%s>' % s
+        s = stringutil.buildrepr(self._predrepr) or pycompat.byterepr(
+            self.matchfn
+        )
+        return b'<predicatenmatcher pred=%s>' % s
+
 
 def normalizerootdir(dir, funcname):
-    if dir == '.':
-        util.nouideprecwarn("match.%s() no longer accepts "
-                            "'.', use '' instead." % funcname, '5.1')
-        return ''
+    if dir == b'.':
+        util.nouideprecwarn(
+            b"match.%s() no longer accepts '.', use '' instead." % funcname,
+            b'5.1',
+        )
+        return b''
     return dir
 
 
@@ -526,36 +591,40 @@
 
         self._files = _explicitfiles(kindpats)
         self._prefix = _prefix(kindpats)
-        self._pats, self.matchfn = _buildmatch(kindpats, '$', root)
+        self._pats, self.matchfn = _buildmatch(kindpats, b'$', root)
 
     @propertycache
     def _dirs(self):
         return set(util.dirs(self._fileset))
 
     def visitdir(self, dir):
-        dir = normalizerootdir(dir, 'visitdir')
+        dir = normalizerootdir(dir, b'visitdir')
         if self._prefix and dir in self._fileset:
-            return 'all'
-        return (dir in self._fileset or
-                dir in self._dirs or
-                any(parentdir in self._fileset
-                    for parentdir in util.finddirs(dir)))
+            return b'all'
+        return (
+            dir in self._fileset
+            or dir in self._dirs
+            or any(
+                parentdir in self._fileset for parentdir in util.finddirs(dir)
+            )
+        )
 
     def visitchildrenset(self, dir):
         ret = self.visitdir(dir)
         if ret is True:
-            return 'this'
+            return b'this'
         elif not ret:
             return set()
-        assert ret == 'all'
-        return 'all'
+        assert ret == b'all'
+        return b'all'
 
     def prefix(self):
         return self._prefix
 
     @encoding.strmethod
     def __repr__(self):
-        return ('<patternmatcher patterns=%r>' % pycompat.bytestr(self._pats))
+        return b'<patternmatcher patterns=%r>' % pycompat.bytestr(self._pats)
+
 
 # This is basically a reimplementation of util.dirs that stores the children
 # instead of just a count of them, plus a small optional optimization to avoid
@@ -569,7 +638,7 @@
             addpath(f)
 
     def addpath(self, path):
-        if path == '':
+        if path == b'':
             return
         dirs = self._dirs
         findsplitdirs = _dirchildren._findsplitdirs
@@ -586,22 +655,22 @@
         # Unlike manifest._splittopdir, this does not suffix `dirname` with a
         # slash.
         oldpos = len(path)
-        pos = path.rfind('/')
+        pos = path.rfind(b'/')
         while pos != -1:
-            yield path[:pos], path[pos + 1:oldpos]
+            yield path[:pos], path[pos + 1 : oldpos]
             oldpos = pos
-            pos = path.rfind('/', 0, pos)
-        yield '', path[:oldpos]
+            pos = path.rfind(b'/', 0, pos)
+        yield b'', path[:oldpos]
 
     def get(self, path):
         return self._dirs.get(path, set())
 
+
 class includematcher(basematcher):
-
     def __init__(self, root, kindpats, badfn=None):
         super(includematcher, self).__init__(badfn)
 
-        self._pats, self.matchfn = _buildmatch(kindpats, '(?:/|$)', root)
+        self._pats, self.matchfn = _buildmatch(kindpats, b'(?:/|$)', root)
         self._prefix = _prefix(kindpats)
         roots, dirs, parents = _rootsdirsandparents(kindpats)
         # roots are directories which are recursively included.
@@ -613,14 +682,15 @@
         self._parents = parents
 
     def visitdir(self, dir):
-        dir = normalizerootdir(dir, 'visitdir')
+        dir = normalizerootdir(dir, b'visitdir')
         if self._prefix and dir in self._roots:
-            return 'all'
-        return (dir in self._roots or
-                dir in self._dirs or
-                dir in self._parents or
-                any(parentdir in self._roots
-                    for parentdir in util.finddirs(dir)))
+            return b'all'
+        return (
+            dir in self._roots
+            or dir in self._dirs
+            or dir in self._parents
+            or any(parentdir in self._roots for parentdir in util.finddirs(dir))
+        )
 
     @propertycache
     def _allparentschildren(self):
@@ -631,20 +701,22 @@
         # if we asked for the children of 'foo', but had only added
         # self._parents, we wouldn't be able to respond ['bar'].
         return _dirchildren(
-                itertools.chain(self._dirs, self._roots, self._parents),
-                onlyinclude=self._parents)
+            itertools.chain(self._dirs, self._roots, self._parents),
+            onlyinclude=self._parents,
+        )
 
     def visitchildrenset(self, dir):
         if self._prefix and dir in self._roots:
-            return 'all'
+            return b'all'
         # Note: this does *not* include the 'dir in self._parents' case from
         # visitdir, that's handled below.
-        if ('' in self._roots or
-            dir in self._roots or
-            dir in self._dirs or
-            any(parentdir in self._roots
-                for parentdir in util.finddirs(dir))):
-            return 'this'
+        if (
+            b'' in self._roots
+            or dir in self._roots
+            or dir in self._dirs
+            or any(parentdir in self._roots for parentdir in util.finddirs(dir))
+        ):
+            return b'this'
 
         if dir in self._parents:
             return self._allparentschildren.get(dir) or set()
@@ -652,7 +724,8 @@
 
     @encoding.strmethod
     def __repr__(self):
-        return ('<includematcher includes=%r>' % pycompat.bytestr(self._pats))
+        return b'<includematcher includes=%r>' % pycompat.bytestr(self._pats)
+
 
 class exactmatcher(basematcher):
     r'''Matches the input files exactly. They are interpreted as paths, not
@@ -690,26 +763,25 @@
         return set(util.dirs(self._fileset))
 
     def visitdir(self, dir):
-        dir = normalizerootdir(dir, 'visitdir')
+        dir = normalizerootdir(dir, b'visitdir')
         return dir in self._dirs
 
     def visitchildrenset(self, dir):
-        dir = normalizerootdir(dir, 'visitchildrenset')
+        dir = normalizerootdir(dir, b'visitchildrenset')
 
         if not self._fileset or dir not in self._dirs:
             return set()
 
-        candidates = self._fileset | self._dirs - {''}
-        if dir != '':
-            d = dir + '/'
-            candidates = set(c[len(d):] for c in candidates if
-                             c.startswith(d))
+        candidates = self._fileset | self._dirs - {b''}
+        if dir != b'':
+            d = dir + b'/'
+            candidates = set(c[len(d) :] for c in candidates if c.startswith(d))
         # self._dirs includes all of the directories, recursively, so if
         # we're attempting to match foo/bar/baz.txt, it'll have '', 'foo',
         # 'foo/bar' in it. Thus we can safely ignore a candidate that has a
         # '/' in it, indicating a it's for a subdir-of-a-subdir; the
         # immediate subdir will be in there without a slash.
-        ret = {c for c in candidates if '/' not in c}
+        ret = {c for c in candidates if b'/' not in c}
         # We really do not expect ret to be empty, since that would imply that
         # there's something in _dirs that didn't have a file in _fileset.
         assert ret
@@ -720,7 +792,8 @@
 
     @encoding.strmethod
     def __repr__(self):
-        return ('<exactmatcher files=%r>' % self._files)
+        return b'<exactmatcher files=%r>' % self._files
+
 
 class differencematcher(basematcher):
     '''Composes two matchers by matching if the first matches and the second
@@ -729,6 +802,7 @@
     The second matcher's non-matching-attributes (bad, explicitdir,
     traversedir) are ignored.
     '''
+
     def __init__(self, m1, m2):
         super(differencematcher, self).__init__()
         self._m1 = m1
@@ -752,7 +826,7 @@
         return self._m1.files()
 
     def visitdir(self, dir):
-        if self._m2.visitdir(dir) == 'all':
+        if self._m2.visitdir(dir) == b'all':
             return False
         elif not self._m2.visitdir(dir):
             # m2 does not match dir, we can return 'all' here if possible
@@ -761,7 +835,7 @@
 
     def visitchildrenset(self, dir):
         m2_set = self._m2.visitchildrenset(dir)
-        if m2_set == 'all':
+        if m2_set == b'all':
             return set()
         m1_set = self._m1.visitchildrenset(dir)
         # Possible values for m1: 'all', 'this', set(...), set()
@@ -771,11 +845,11 @@
         # return True, not 'all', for some reason.
         if not m2_set:
             return m1_set
-        if m1_set in ['all', 'this']:
+        if m1_set in [b'all', b'this']:
             # Never return 'all' here if m2_set is any kind of non-empty (either
             # 'this' or set(foo)), since m2 might return set() for a
             # subdirectory.
-            return 'this'
+            return b'this'
         # Possible values for m1:         set(...), set()
         # Possible values for m2: 'this', set(...)
         # We ignore m2's set results. They're possibly incorrect:
@@ -789,7 +863,8 @@
 
     @encoding.strmethod
     def __repr__(self):
-        return ('<differencematcher m1=%r, m2=%r>' % (self._m1, self._m2))
+        return b'<differencematcher m1=%r, m2=%r>' % (self._m1, self._m2)
+
 
 def intersectmatchers(m1, m2):
     '''Composes two matchers by matching if both of them match.
@@ -812,6 +887,7 @@
         return m
     return intersectionmatcher(m1, m2)
 
+
 class intersectionmatcher(basematcher):
     def __init__(self, m1, m2):
         super(intersectionmatcher, self).__init__()
@@ -839,7 +915,7 @@
 
     def visitdir(self, dir):
         visit1 = self._m1.visitdir(dir)
-        if visit1 == 'all':
+        if visit1 == b'all':
             return self._m2.visitdir(dir)
         # bool() because visit1=True + visit2='all' should not be 'all'
         return bool(visit1 and self._m2.visitdir(dir))
@@ -852,13 +928,13 @@
         if not m2_set:
             return set()
 
-        if m1_set == 'all':
+        if m1_set == b'all':
             return m2_set
-        elif m2_set == 'all':
+        elif m2_set == b'all':
             return m1_set
 
-        if m1_set == 'this' or m2_set == 'this':
-            return 'this'
+        if m1_set == b'this' or m2_set == b'this':
+            return b'this'
 
         assert isinstance(m1_set, set) and isinstance(m2_set, set)
         return m1_set.intersection(m2_set)
@@ -871,7 +947,8 @@
 
     @encoding.strmethod
     def __repr__(self):
-        return ('<intersectionmatcher m1=%r, m2=%r>' % (self._m1, self._m2))
+        return b'<intersectionmatcher m1=%r, m2=%r>' % (self._m1, self._m2)
+
 
 class subdirmatcher(basematcher):
     """Adapt a matcher to work on a subdirectory only.
@@ -906,8 +983,11 @@
         self._matcher = matcher
         self._always = matcher.always()
 
-        self._files = [f[len(path) + 1:] for f in matcher._files
-                       if f.startswith(path + "/")]
+        self._files = [
+            f[len(path) + 1 :]
+            for f in matcher._files
+            if f.startswith(path + b"/")
+        ]
 
         # If the parent repo had a path to this subrepo and the matcher is
         # a prefix matcher, this submatcher always matches.
@@ -915,29 +995,29 @@
             self._always = any(f == path for f in matcher._files)
 
     def bad(self, f, msg):
-        self._matcher.bad(self._path + "/" + f, msg)
+        self._matcher.bad(self._path + b"/" + f, msg)
 
     def matchfn(self, f):
         # Some information is lost in the superclass's constructor, so we
         # can not accurately create the matching function for the subdirectory
         # from the inputs. Instead, we override matchfn() and visitdir() to
         # call the original matcher with the subdirectory path prepended.
-        return self._matcher.matchfn(self._path + "/" + f)
+        return self._matcher.matchfn(self._path + b"/" + f)
 
     def visitdir(self, dir):
-        dir = normalizerootdir(dir, 'visitdir')
-        if dir == '':
+        dir = normalizerootdir(dir, b'visitdir')
+        if dir == b'':
             dir = self._path
         else:
-            dir = self._path + "/" + dir
+            dir = self._path + b"/" + dir
         return self._matcher.visitdir(dir)
 
     def visitchildrenset(self, dir):
-        dir = normalizerootdir(dir, 'visitchildrenset')
-        if dir == '':
+        dir = normalizerootdir(dir, b'visitchildrenset')
+        if dir == b'':
             dir = self._path
         else:
-            dir = self._path + "/" + dir
+            dir = self._path + b"/" + dir
         return self._matcher.visitchildrenset(dir)
 
     def always(self):
@@ -948,8 +1028,11 @@
 
     @encoding.strmethod
     def __repr__(self):
-        return ('<subdirmatcher path=%r, matcher=%r>' %
-                (self._path, self._matcher))
+        return b'<subdirmatcher path=%r, matcher=%r>' % (
+            self._path,
+            self._matcher,
+        )
+
 
 class prefixdirmatcher(basematcher):
     """Adapt a matcher to work on a parent directory.
@@ -987,9 +1070,9 @@
     def __init__(self, path, matcher, badfn=None):
         super(prefixdirmatcher, self).__init__(badfn)
         if not path:
-            raise error.ProgrammingError('prefix path must not be empty')
+            raise error.ProgrammingError(b'prefix path must not be empty')
         self._path = path
-        self._pathprefix = path + '/'
+        self._pathprefix = path + b'/'
         self._matcher = matcher
 
     @propertycache
@@ -999,7 +1082,7 @@
     def matchfn(self, f):
         if not f.startswith(self._pathprefix):
             return False
-        return self._matcher.matchfn(f[len(self._pathprefix):])
+        return self._matcher.matchfn(f[len(self._pathprefix) :])
 
     @propertycache
     def _pathdirs(self):
@@ -1007,18 +1090,18 @@
 
     def visitdir(self, dir):
         if dir == self._path:
-            return self._matcher.visitdir('')
+            return self._matcher.visitdir(b'')
         if dir.startswith(self._pathprefix):
-            return self._matcher.visitdir(dir[len(self._pathprefix):])
+            return self._matcher.visitdir(dir[len(self._pathprefix) :])
         return dir in self._pathdirs
 
     def visitchildrenset(self, dir):
         if dir == self._path:
-            return self._matcher.visitchildrenset('')
+            return self._matcher.visitchildrenset(b'')
         if dir.startswith(self._pathprefix):
-            return self._matcher.visitchildrenset(dir[len(self._pathprefix):])
+            return self._matcher.visitchildrenset(dir[len(self._pathprefix) :])
         if dir in self._pathdirs:
-            return 'this'
+            return b'this'
         return set()
 
     def isexact(self):
@@ -1029,8 +1112,11 @@
 
     @encoding.strmethod
     def __repr__(self):
-        return ('<prefixdirmatcher path=%r, matcher=%r>'
-                % (pycompat.bytestr(self._path), self._matcher))
+        return b'<prefixdirmatcher path=%r, matcher=%r>' % (
+            pycompat.bytestr(self._path),
+            self._matcher,
+        )
+
 
 class unionmatcher(basematcher):
     """A matcher that is the union of several matchers.
@@ -1056,7 +1142,7 @@
         r = False
         for m in self._matchers:
             v = m.visitdir(dir)
-            if v == 'all':
+            if v == b'all':
                 return v
             r |= v
         return r
@@ -1068,21 +1154,22 @@
             v = m.visitchildrenset(dir)
             if not v:
                 continue
-            if v == 'all':
+            if v == b'all':
                 return v
-            if this or v == 'this':
+            if this or v == b'this':
                 this = True
                 # don't break, we might have an 'all' in here.
                 continue
             assert isinstance(v, set)
             r = r.union(v)
         if this:
-            return 'this'
+            return b'this'
         return r
 
     @encoding.strmethod
     def __repr__(self):
-        return ('<unionmatcher matchers=%r>' % self._matchers)
+        return b'<unionmatcher matchers=%r>' % self._matchers
+
 
 def patkind(pattern, default=None):
     '''If pattern is 'kind:pat' with a known kind, return kind.
@@ -1099,15 +1186,17 @@
     '''
     return _patsplit(pattern, default)[0]
 
+
 def _patsplit(pattern, default):
     """Split a string into the optional pattern kind prefix and the actual
     pattern."""
-    if ':' in pattern:
-        kind, pat = pattern.split(':', 1)
+    if b':' in pattern:
+        kind, pat = pattern.split(b':', 1)
         if kind in allpatternkinds:
             return kind, pat
     return default, pattern
 
+
 def _globre(pat):
     r'''Convert an extended glob string to a regexp string.
 
@@ -1132,53 +1221,55 @@
     \.\*\?
     '''
     i, n = 0, len(pat)
-    res = ''
+    res = b''
     group = 0
     escape = util.stringutil.regexbytesescapemap.get
+
     def peek():
-        return i < n and pat[i:i + 1]
+        return i < n and pat[i : i + 1]
+
     while i < n:
-        c = pat[i:i + 1]
+        c = pat[i : i + 1]
         i += 1
-        if c not in '*?[{},\\':
+        if c not in b'*?[{},\\':
             res += escape(c, c)
-        elif c == '*':
-            if peek() == '*':
+        elif c == b'*':
+            if peek() == b'*':
                 i += 1
-                if peek() == '/':
+                if peek() == b'/':
                     i += 1
-                    res += '(?:.*/)?'
+                    res += b'(?:.*/)?'
                 else:
-                    res += '.*'
+                    res += b'.*'
             else:
-                res += '[^/]*'
-        elif c == '?':
-            res += '.'
-        elif c == '[':
+                res += b'[^/]*'
+        elif c == b'?':
+            res += b'.'
+        elif c == b'[':
             j = i
-            if j < n and pat[j:j + 1] in '!]':
+            if j < n and pat[j : j + 1] in b'!]':
                 j += 1
-            while j < n and pat[j:j + 1] != ']':
+            while j < n and pat[j : j + 1] != b']':
                 j += 1
             if j >= n:
-                res += '\\['
+                res += b'\\['
             else:
-                stuff = pat[i:j].replace('\\','\\\\')
+                stuff = pat[i:j].replace(b'\\', b'\\\\')
                 i = j + 1
-                if stuff[0:1] == '!':
-                    stuff = '^' + stuff[1:]
-                elif stuff[0:1] == '^':
-                    stuff = '\\' + stuff
-                res = '%s[%s]' % (res, stuff)
-        elif c == '{':
+                if stuff[0:1] == b'!':
+                    stuff = b'^' + stuff[1:]
+                elif stuff[0:1] == b'^':
+                    stuff = b'\\' + stuff
+                res = b'%s[%s]' % (res, stuff)
+        elif c == b'{':
             group += 1
-            res += '(?:'
-        elif c == '}' and group:
-            res += ')'
+            res += b'(?:'
+        elif c == b'}' and group:
+            res += b')'
             group -= 1
-        elif c == ',' and group:
-            res += '|'
-        elif c == '\\':
+        elif c == b',' and group:
+            res += b'|'
+        elif c == b'\\':
             p = peek()
             if p:
                 i += 1
@@ -1189,6 +1280,7 @@
             res += escape(c, c)
     return res
 
+
 def _regex(kind, pat, globsuffix):
     '''Convert a (normalized) pattern of any kind into a
     regular expression.
@@ -1196,41 +1288,43 @@
 
     if rustmod is not None:
         try:
-            return rustmod.build_single_regex(
-                kind,
-                pat,
-                globsuffix
-            )
+            return rustmod.build_single_regex(kind, pat, globsuffix)
         except rustmod.PatternError:
             raise error.ProgrammingError(
-                'not a regex pattern: %s:%s' % (kind, pat)
+                b'not a regex pattern: %s:%s' % (kind, pat)
             )
 
-    if not pat and kind in ('glob', 'relpath'):
-        return ''
-    if kind == 're':
+    if not pat and kind in (b'glob', b'relpath'):
+        return b''
+    if kind == b're':
         return pat
-    if kind in ('path', 'relpath'):
-        if pat == '.':
-            return ''
-        return util.stringutil.reescape(pat) + '(?:/|$)'
-    if kind == 'rootfilesin':
-        if pat == '.':
-            escaped = ''
+    if kind in (b'path', b'relpath'):
+        if pat == b'.':
+            return b''
+        return util.stringutil.reescape(pat) + b'(?:/|$)'
+    if kind == b'rootfilesin':
+        if pat == b'.':
+            escaped = b''
         else:
             # Pattern is a directory name.
-            escaped = util.stringutil.reescape(pat) + '/'
+            escaped = util.stringutil.reescape(pat) + b'/'
         # Anything after the pattern must be a non-directory.
-        return escaped + '[^/]+$'
-    if kind == 'relglob':
-        return '(?:|.*/)' + _globre(pat) + globsuffix
-    if kind == 'relre':
-        if pat.startswith('^'):
+        return escaped + b'[^/]+$'
+    if kind == b'relglob':
+        globre = _globre(pat)
+        if globre.startswith(b'[^/]*'):
+            # When pat has the form *XYZ (common), make the returned regex more
+            # legible by returning the regex for **XYZ instead of **/*XYZ.
+            return b'.*' + globre[len(b'[^/]*') :] + globsuffix
+        return b'(?:|.*/)' + globre + globsuffix
+    if kind == b'relre':
+        if pat.startswith(b'^'):
             return pat
-        return '.*' + pat
-    if kind in ('glob', 'rootglob'):
+        return b'.*' + pat
+    if kind in (b'glob', b'rootglob'):
         return _globre(pat) + globsuffix
-    raise error.ProgrammingError('not a regex pattern: %s:%s' % (kind, pat))
+    raise error.ProgrammingError(b'not a regex pattern: %s:%s' % (kind, pat))
+
 
 def _buildmatch(kindpats, globsuffix, root):
     '''Return regexp string and a matcher function for kindpats.
@@ -1240,6 +1334,7 @@
     subincludes, kindpats = _expandsubinclude(kindpats, root)
     if subincludes:
         submatchers = {}
+
         def matchsubinclude(f):
             for prefix, matcherargs in subincludes:
                 if f.startswith(prefix):
@@ -1248,22 +1343,25 @@
                         mf = match(*matcherargs)
                         submatchers[prefix] = mf
 
-                    if mf(f[len(prefix):]):
+                    if mf(f[len(prefix) :]):
                         return True
             return False
+
         matchfuncs.append(matchsubinclude)
 
-    regex = ''
+    regex = b''
     if kindpats:
-        if all(k == 'rootfilesin' for k, p, s in kindpats):
+        if all(k == b'rootfilesin' for k, p, s in kindpats):
             dirs = {p for k, p, s in kindpats}
+
             def mf(f):
-                i = f.rfind('/')
+                i = f.rfind(b'/')
                 if i >= 0:
                     dir = f[:i]
                 else:
-                    dir = '.'
+                    dir = b'.'
                 return dir in dirs
+
             regex = b'rootfilesin: %s' % stringutil.pprint(list(sorted(dirs)))
             matchfuncs.append(mf)
         else:
@@ -1275,11 +1373,14 @@
     else:
         return regex, lambda f: any(mf(f) for mf in matchfuncs)
 
+
 MAX_RE_SIZE = 20000
 
+
 def _joinregexes(regexps):
     """gather multiple regular expressions into a single one"""
-    return '|'.join(regexps)
+    return b'|'.join(regexps)
+
 
 def _buildregexmatch(kindpats, globsuffix):
     """Build a match function from a list of kinds and kindpats,
@@ -1303,7 +1404,7 @@
         for idx, r in enumerate(regexps):
             piecesize = len(r)
             if piecesize > MAX_RE_SIZE:
-                msg = _("matcher pattern is too long (%d bytes)") % piecesize
+                msg = _(b"matcher pattern is too long (%d bytes)") % piecesize
                 raise error.Abort(msg)
             elif (groupsize + piecesize) > MAX_RE_SIZE:
                 group = regexps[startidx:idx]
@@ -1327,11 +1428,13 @@
                 _rematcher(_regex(k, p, globsuffix))
             except re.error:
                 if s:
-                    raise error.Abort(_("%s: invalid pattern (%s): %s") %
-                                      (s, k, p))
+                    raise error.Abort(
+                        _(b"%s: invalid pattern (%s): %s") % (s, k, p)
+                    )
                 else:
-                    raise error.Abort(_("invalid pattern (%s): %s") % (k, p))
-        raise error.Abort(_("invalid pattern"))
+                    raise error.Abort(_(b"invalid pattern (%s): %s") % (k, p))
+        raise error.Abort(_(b"invalid pattern"))
+
 
 def _patternrootsanddirs(kindpats):
     '''Returns roots and directories corresponding to each pattern.
@@ -1344,30 +1447,32 @@
     r = []
     d = []
     for kind, pat, source in kindpats:
-        if kind in ('glob', 'rootglob'): # find the non-glob prefix
+        if kind in (b'glob', b'rootglob'):  # find the non-glob prefix
             root = []
-            for p in pat.split('/'):
-                if '[' in p or '{' in p or '*' in p or '?' in p:
+            for p in pat.split(b'/'):
+                if b'[' in p or b'{' in p or b'*' in p or b'?' in p:
                     break
                 root.append(p)
-            r.append('/'.join(root))
-        elif kind in ('relpath', 'path'):
-            if pat == '.':
-                pat = ''
+            r.append(b'/'.join(root))
+        elif kind in (b'relpath', b'path'):
+            if pat == b'.':
+                pat = b''
             r.append(pat)
-        elif kind in ('rootfilesin',):
-            if pat == '.':
-                pat = ''
+        elif kind in (b'rootfilesin',):
+            if pat == b'.':
+                pat = b''
             d.append(pat)
-        else: # relglob, re, relre
-            r.append('')
+        else:  # relglob, re, relre
+            r.append(b'')
     return r, d
 
+
 def _roots(kindpats):
     '''Returns root directories to match recursively from the given patterns.'''
     roots, dirs = _patternrootsanddirs(kindpats)
     return roots
 
+
 def _rootsdirsandparents(kindpats):
     '''Returns roots and exact directories from patterns.
 
@@ -1411,6 +1516,7 @@
     # 'dirs' to also be in 'parents', consider removing them before returning.
     return r, d, p
 
+
 def _explicitfiles(kindpats):
     '''Returns the potential explicit filenames from the patterns.
 
@@ -1421,18 +1527,21 @@
     '''
     # Keep only the pattern kinds where one can specify filenames (vs only
     # directory names).
-    filable = [kp for kp in kindpats if kp[0] not in ('rootfilesin',)]
+    filable = [kp for kp in kindpats if kp[0] not in (b'rootfilesin',)]
     return _roots(filable)
 
+
 def _prefix(kindpats):
     '''Whether all the patterns match a prefix (i.e. recursively)'''
     for kind, pat, source in kindpats:
-        if kind not in ('path', 'relpath'):
+        if kind not in (b'path', b'relpath'):
             return False
     return True
 
+
 _commentre = None
 
+
 def readpatternfile(filepath, warn, sourceinfo=False):
     '''parse a pattern file, returning a list of
     patterns. These patterns should be given to compile()
@@ -1459,64 +1568,63 @@
 
     if rustmod is not None:
         result, warnings = rustmod.read_pattern_file(
-            filepath,
-            bool(warn),
-            sourceinfo,
+            filepath, bool(warn), sourceinfo,
         )
 
         for warning_params in warnings:
             # Can't be easily emitted from Rust, because it would require
             # a mechanism for both gettext and calling the `warn` function.
-            warn(_("%s: ignoring invalid syntax '%s'\n") % warning_params)
+            warn(_(b"%s: ignoring invalid syntax '%s'\n") % warning_params)
 
         return result
 
     syntaxes = {
-        're': 'relre:',
-        'regexp': 'relre:',
-        'glob': 'relglob:',
-        'rootglob': 'rootglob:',
-        'include': 'include',
-        'subinclude': 'subinclude',
+        b're': b'relre:',
+        b'regexp': b'relre:',
+        b'glob': b'relglob:',
+        b'rootglob': b'rootglob:',
+        b'include': b'include',
+        b'subinclude': b'subinclude',
     }
-    syntax = 'relre:'
+    syntax = b'relre:'
     patterns = []
 
-    fp = open(filepath, 'rb')
+    fp = open(filepath, b'rb')
     for lineno, line in enumerate(util.iterfile(fp), start=1):
-        if "#" in line:
+        if b"#" in line:
             global _commentre
             if not _commentre:
                 _commentre = util.re.compile(br'((?:^|[^\\])(?:\\\\)*)#.*')
             # remove comments prefixed by an even number of escapes
             m = _commentre.search(line)
             if m:
-                line = line[:m.end(1)]
+                line = line[: m.end(1)]
             # fixup properly escaped comments that survived the above
-            line = line.replace("\\#", "#")
+            line = line.replace(b"\\#", b"#")
         line = line.rstrip()
         if not line:
             continue
 
-        if line.startswith('syntax:'):
+        if line.startswith(b'syntax:'):
             s = line[7:].strip()
             try:
                 syntax = syntaxes[s]
             except KeyError:
                 if warn:
-                    warn(_("%s: ignoring invalid syntax '%s'\n") %
-                         (filepath, s))
+                    warn(
+                        _(b"%s: ignoring invalid syntax '%s'\n") % (filepath, s)
+                    )
             continue
 
         linesyntax = syntax
-        for s, rels in syntaxes.iteritems():
+        for s, rels in pycompat.iteritems(syntaxes):
             if line.startswith(rels):
                 linesyntax = rels
-                line = line[len(rels):]
+                line = line[len(rels) :]
                 break
-            elif line.startswith(s+':'):
+            elif line.startswith(s + b':'):
                 linesyntax = rels
-                line = line[len(s) + 1:]
+                line = line[len(s) + 1 :]
                 break
         if sourceinfo:
             patterns.append((linesyntax + line, lineno, line))
--- a/mercurial/mdiff.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/mdiff.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,6 +12,10 @@
 import zlib
 
 from .i18n import _
+from .pycompat import (
+    getattr,
+    setattr,
+)
 from . import (
     encoding,
     error,
@@ -21,7 +25,7 @@
 )
 from .utils import dateutil
 
-_missing_newline_marker = "\\ No newline at end of file\n"
+_missing_newline_marker = b"\\ No newline at end of file\n"
 
 bdiff = policy.importmod(r'bdiff')
 mpatch = policy.importmod(r'mpatch')
@@ -33,6 +37,7 @@
 textdiff = bdiff.bdiff
 splitnewlines = bdiff.splitnewlines
 
+
 class diffopts(object):
     '''context is the number of context lines
     text treats all files as text
@@ -48,23 +53,23 @@
     '''
 
     defaults = {
-        'context': 3,
-        'text': False,
-        'showfunc': False,
-        'git': False,
-        'nodates': False,
-        'nobinary': False,
-        'noprefix': False,
-        'index': 0,
-        'ignorews': False,
-        'ignorewsamount': False,
-        'ignorewseol': False,
-        'ignoreblanklines': False,
-        'upgrade': False,
-        'showsimilarity': False,
-        'worddiff': False,
-        'xdiff': False,
-        }
+        b'context': 3,
+        b'text': False,
+        b'showfunc': False,
+        b'git': False,
+        b'nodates': False,
+        b'nobinary': False,
+        b'noprefix': False,
+        b'index': 0,
+        b'ignorews': False,
+        b'ignorewsamount': False,
+        b'ignorewseol': False,
+        b'ignoreblanklines': False,
+        b'upgrade': False,
+        b'showsimilarity': False,
+        b'worddiff': False,
+        b'xdiff': False,
+    }
 
     def __init__(self, **opts):
         opts = pycompat.byteskwargs(opts)
@@ -77,9 +82,10 @@
         try:
             self.context = int(self.context)
         except ValueError:
-            raise error.Abort(_('diff context lines count must be '
-                                'an integer, not %r') %
-                              pycompat.bytestr(self.context))
+            raise error.Abort(
+                _(b'diff context lines count must be an integer, not %r')
+                % pycompat.bytestr(self.context)
+            )
 
     def copy(self, **kwargs):
         opts = dict((k, getattr(self, k)) for k in self.defaults)
@@ -87,19 +93,22 @@
         opts.update(kwargs)
         return diffopts(**opts)
 
+
 defaultopts = diffopts()
 
+
 def wsclean(opts, text, blank=True):
     if opts.ignorews:
         text = bdiff.fixws(text, 1)
     elif opts.ignorewsamount:
         text = bdiff.fixws(text, 0)
     if blank and opts.ignoreblanklines:
-        text = re.sub('\n+', '\n', text).strip('\n')
+        text = re.sub(b'\n+', b'\n', text).strip(b'\n')
     if opts.ignorewseol:
         text = re.sub(br'[ \t\r\f]+\n', br'\n', text)
     return text
 
+
 def splitblock(base1, lines1, base2, lines2, opts):
     # The input lines matches except for interwoven blank lines. We
     # transform it into a sequence of matching blocks and blank blocks.
@@ -108,11 +117,10 @@
     s1, e1 = 0, len(lines1)
     s2, e2 = 0, len(lines2)
     while s1 < e1 or s2 < e2:
-        i1, i2, btype = s1, s2, '='
-        if (i1 >= e1 or lines1[i1] == 0
-            or i2 >= e2 or lines2[i2] == 0):
+        i1, i2, btype = s1, s2, b'='
+        if i1 >= e1 or lines1[i1] == 0 or i2 >= e2 or lines2[i2] == 0:
             # Consume the block of blank lines
-            btype = '~'
+            btype = b'~'
             while i1 < e1 and lines1[i1] == 0:
                 i1 += 1
             while i2 < e2 and lines2[i2] == 0:
@@ -126,6 +134,7 @@
         s1 = i1
         s2 = i2
 
+
 def hunkinrange(hunk, linerange):
     """Return True if `hunk` defined as (start, length) is in `linerange`
     defined as (lowerbound, upperbound).
@@ -151,6 +160,7 @@
     lowerbound, upperbound = linerange
     return lowerbound < start + length and start < upperbound
 
+
 def blocksinrange(blocks, rangeb):
     """filter `blocks` like (a1, a2, b1, b2) from items outside line range
     `rangeb` from ``(b1, b2)`` point of view.
@@ -168,35 +178,40 @@
     filteredblocks = []
     for block in blocks:
         (a1, a2, b1, b2), stype = block
-        if lbb >= b1 and ubb <= b2 and stype == '=':
+        if lbb >= b1 and ubb <= b2 and stype == b'=':
             # rangeb is within a single "=" hunk, restrict back linerange1
             # by offsetting rangeb
             lba = lbb - b1 + a1
             uba = ubb - b1 + a1
         else:
             if b1 <= lbb < b2:
-                if stype == '=':
+                if stype == b'=':
                     lba = a2 - (b2 - lbb)
                 else:
                     lba = a1
             if b1 < ubb <= b2:
-                if stype == '=':
+                if stype == b'=':
                     uba = a1 + (ubb - b1)
                 else:
                     uba = a2
         if hunkinrange((b1, (b2 - b1)), rangeb):
             filteredblocks.append(block)
     if lba is None or uba is None or uba < lba:
-        raise error.Abort(_('line range exceeds file size'))
+        raise error.Abort(_(b'line range exceeds file size'))
     return filteredblocks, (lba, uba)
 
+
 def chooseblocksfunc(opts=None):
-    if (opts is None or not opts.xdiff
-        or not util.safehasattr(bdiff, 'xdiffblocks')):
+    if (
+        opts is None
+        or not opts.xdiff
+        or not util.safehasattr(bdiff, b'xdiffblocks')
+    ):
         return bdiff.blocks
     else:
         return bdiff.xdiffblocks
 
+
 def allblocks(text1, text2, opts=None, lines1=None, lines2=None):
     """Return (block, type) tuples, where block is an mdiff.blocks
     line entry. type is '=' for blocks matching exactly one another
@@ -225,18 +240,19 @@
         # bdiff sometimes gives huge matches past eof, this check eats them,
         # and deals with the special first match case described above
         if s[0] != s[1] or s[2] != s[3]:
-            type = '!'
+            type = b'!'
             if opts.ignoreblanklines:
                 if lines1 is None:
                     lines1 = splitnewlines(text1)
                 if lines2 is None:
                     lines2 = splitnewlines(text2)
-                old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
-                new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
+                old = wsclean(opts, b"".join(lines1[s[0] : s[1]]))
+                new = wsclean(opts, b"".join(lines2[s[2] : s[3]]))
                 if old == new:
-                    type = '~'
+                    type = b'~'
             yield s, type
-        yield s1, '='
+        yield s1, b'='
+
 
 def unidiff(a, ad, b, bd, fn1, fn2, binary, opts=defaultopts):
     """Return a unified diff as a (headers, hunks) tuple.
@@ -248,22 +264,23 @@
 
     Set binary=True if either a or b should be taken as a binary file.
     """
+
     def datetag(date, fn=None):
         if not opts.git and not opts.nodates:
-            return '\t%s' % date
-        if fn and ' ' in fn:
-            return '\t'
-        return ''
+            return b'\t%s' % date
+        if fn and b' ' in fn:
+            return b'\t'
+        return b''
 
     sentinel = [], ()
     if not a and not b:
         return sentinel
 
     if opts.noprefix:
-        aprefix = bprefix = ''
+        aprefix = bprefix = b''
     else:
-        aprefix = 'a/'
-        bprefix = 'b/'
+        aprefix = b'a/'
+        bprefix = b'b/'
 
     epoch = dateutil.datestr((0, 0))
 
@@ -274,51 +291,52 @@
         if a and b and len(a) == len(b) and a == b:
             return sentinel
         headerlines = []
-        hunks = (None, ['Binary file %s has changed\n' % fn1]),
+        hunks = ((None, [b'Binary file %s has changed\n' % fn1]),)
     elif not a:
-        without_newline = not b.endswith('\n')
+        without_newline = not b.endswith(b'\n')
         b = splitnewlines(b)
         if a is None:
-            l1 = '--- /dev/null%s' % datetag(epoch)
+            l1 = b'--- /dev/null%s' % datetag(epoch)
         else:
-            l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
-        l2 = "+++ %s%s" % (bprefix + fn2, datetag(bd, fn2))
+            l1 = b"--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
+        l2 = b"+++ %s%s" % (bprefix + fn2, datetag(bd, fn2))
         headerlines = [l1, l2]
         size = len(b)
         hunkrange = (0, 0, 1, size)
-        hunklines = ["@@ -0,0 +1,%d @@\n" % size] + ["+" + e for e in b]
+        hunklines = [b"@@ -0,0 +1,%d @@\n" % size] + [b"+" + e for e in b]
         if without_newline:
-            hunklines[-1] += '\n'
+            hunklines[-1] += b'\n'
             hunklines.append(_missing_newline_marker)
-        hunks = (hunkrange, hunklines),
+        hunks = ((hunkrange, hunklines),)
     elif not b:
-        without_newline = not a.endswith('\n')
+        without_newline = not a.endswith(b'\n')
         a = splitnewlines(a)
-        l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
+        l1 = b"--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
         if b is None:
-            l2 = '+++ /dev/null%s' % datetag(epoch)
+            l2 = b'+++ /dev/null%s' % datetag(epoch)
         else:
-            l2 = "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2))
+            l2 = b"+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2))
         headerlines = [l1, l2]
         size = len(a)
         hunkrange = (1, size, 0, 0)
-        hunklines = ["@@ -1,%d +0,0 @@\n" % size] + ["-" + e for e in a]
+        hunklines = [b"@@ -1,%d +0,0 @@\n" % size] + [b"-" + e for e in a]
         if without_newline:
-            hunklines[-1] += '\n'
+            hunklines[-1] += b'\n'
             hunklines.append(_missing_newline_marker)
-        hunks = (hunkrange, hunklines),
+        hunks = ((hunkrange, hunklines),)
     else:
         hunks = _unidiff(a, b, opts=opts)
         if not next(hunks):
             return sentinel
 
         headerlines = [
-            "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)),
-            "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)),
+            b"--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)),
+            b"+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)),
         ]
 
     return headerlines, hunks
 
+
 def _unidiff(t1, t2, opts=defaultopts):
     """Yield hunks of a headerless unified diff from t1 and t2 texts.
 
@@ -332,6 +350,7 @@
     """
     l1 = splitnewlines(t1)
     l2 = splitnewlines(t2)
+
     def contextend(l, len):
         ret = l + opts.context
         if ret > len:
@@ -344,14 +363,15 @@
             return 0
         return ret
 
-    lastfunc = [0, '']
+    lastfunc = [0, b'']
+
     def yieldhunk(hunk):
         (astart, a2, bstart, b2, delta) = hunk
         aend = contextend(a2, len(l1))
         alen = aend - astart
         blen = b2 - bstart + aend - a2
 
-        func = ""
+        func = b""
         if opts.showfunc:
             lastpos, func = lastfunc
             # walk backwards from the start of the context up to the start of
@@ -379,9 +399,9 @@
 
         hunkrange = astart, alen, bstart, blen
         hunklines = (
-            ["@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))]
+            [b"@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))]
             + delta
-            + [' ' + l1[x] for x in pycompat.xrange(a2, aend)]
+            + [b' ' + l1[x] for x in pycompat.xrange(a2, aend)]
         )
         # If either file ends without a newline and the last line of
         # that file is part of a hunk, a marker is printed. If the
@@ -389,18 +409,18 @@
         # a newline, print only one marker. That's the only case in
         # which the hunk can end in a shared line without a newline.
         skip = False
-        if not t1.endswith('\n') and astart + alen == len(l1) + 1:
+        if not t1.endswith(b'\n') and astart + alen == len(l1) + 1:
             for i in pycompat.xrange(len(hunklines) - 1, -1, -1):
-                if hunklines[i].startswith(('-', ' ')):
-                    if hunklines[i].startswith(' '):
+                if hunklines[i].startswith((b'-', b' ')):
+                    if hunklines[i].startswith(b' '):
                         skip = True
-                    hunklines[i] += '\n'
+                    hunklines[i] += b'\n'
                     hunklines.insert(i + 1, _missing_newline_marker)
                     break
-        if not skip and not t2.endswith('\n') and bstart + blen == len(l2) + 1:
+        if not skip and not t2.endswith(b'\n') and bstart + blen == len(l2) + 1:
             for i in pycompat.xrange(len(hunklines) - 1, -1, -1):
-                if hunklines[i].startswith('+'):
-                    hunklines[i] += '\n'
+                if hunklines[i].startswith(b'+'):
+                    hunklines[i] += b'\n'
                     hunklines.insert(i + 1, _missing_newline_marker)
                     break
         yield hunkrange, hunklines
@@ -414,8 +434,8 @@
     has_hunks = False
     for s, stype in allblocks(t1, t2, opts, l1, l2):
         a1, a2, b1, b2 = s
-        if stype != '!':
-            if stype == '~':
+        if stype != b'!':
+            if stype == b'~':
                 # The diff context lines are based on t1 content. When
                 # blank lines are ignored, the new lines offsets must
                 # be adjusted as if equivalent blocks ('~') had the
@@ -452,9 +472,9 @@
             # create a new hunk
             hunk = [astart, a2, bstart, b2, delta]
 
-        delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
-        delta[len(delta):] = ['-' + x for x in old]
-        delta[len(delta):] = ['+' + x for x in new]
+        delta[len(delta) :] = [b' ' + x for x in l1[astart:a1]]
+        delta[len(delta) :] = [b'-' + x for x in old]
+        delta[len(delta) :] = [b'+' + x for x in new]
 
     if hunk:
         if not has_hunks:
@@ -465,50 +485,54 @@
     elif not has_hunks:
         yield False
 
+
 def b85diff(to, tn):
     '''print base85-encoded binary diff'''
+
     def fmtline(line):
         l = len(line)
         if l <= 26:
-            l = pycompat.bytechr(ord('A') + l - 1)
+            l = pycompat.bytechr(ord(b'A') + l - 1)
         else:
-            l = pycompat.bytechr(l - 26 + ord('a') - 1)
-        return '%c%s\n' % (l, util.b85encode(line, True))
+            l = pycompat.bytechr(l - 26 + ord(b'a') - 1)
+        return b'%c%s\n' % (l, util.b85encode(line, True))
 
     def chunk(text, csize=52):
         l = len(text)
         i = 0
         while i < l:
-            yield text[i:i + csize]
+            yield text[i : i + csize]
             i += csize
 
     if to is None:
-        to = ''
+        to = b''
     if tn is None:
-        tn = ''
+        tn = b''
 
     if to == tn:
-        return ''
+        return b''
 
     # TODO: deltas
     ret = []
-    ret.append('GIT binary patch\n')
-    ret.append('literal %d\n' % len(tn))
+    ret.append(b'GIT binary patch\n')
+    ret.append(b'literal %d\n' % len(tn))
     for l in chunk(zlib.compress(tn)):
         ret.append(fmtline(l))
-    ret.append('\n')
+    ret.append(b'\n')
 
-    return ''.join(ret)
+    return b''.join(ret)
+
 
 def patchtext(bin):
     pos = 0
     t = []
     while pos < len(bin):
-        p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
+        p1, p2, l = struct.unpack(b">lll", bin[pos : pos + 12])
         pos += 12
-        t.append(bin[pos:pos + l])
+        t.append(bin[pos : pos + l])
         pos += l
-    return "".join(t)
+    return b"".join(t)
+
 
 def patch(a, bin):
     if len(a) == 0:
@@ -516,12 +540,15 @@
         return util.buffer(bin, 12)
     return mpatch.patches(a, [bin])
 
+
 # similar to difflib.SequenceMatcher.get_matching_blocks
 def get_matching_blocks(a, b):
     return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
 
+
 def trivialdiffheader(length):
-    return struct.pack(">lll", 0, 0, length) if length else ''
+    return struct.pack(b">lll", 0, 0, length) if length else b''
+
 
 def replacediffheader(oldlen, newlen):
-    return struct.pack(">lll", 0, oldlen, newlen)
+    return struct.pack(b">lll", 0, oldlen, newlen)
--- a/mercurial/merge.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/merge.py	Mon Oct 21 11:09:48 2019 -0400
@@ -23,9 +23,8 @@
     nullid,
     nullrev,
 )
-from .thirdparty import (
-    attr,
-)
+from .pycompat import delattr
+from .thirdparty import attr
 from . import (
     copies,
     encoding,
@@ -43,11 +42,13 @@
 _pack = struct.pack
 _unpack = struct.unpack
 
+
 def _droponode(data):
     # used for compatibility for v1
-    bits = data.split('\0')
+    bits = data.split(b'\0')
     bits = bits[:-2] + bits[-1:]
-    return '\0'.join(bits)
+    return b'\0'.join(bits)
+
 
 # Merge state record types. See ``mergestate`` docs for more.
 RECORD_LOCAL = b'L'
@@ -90,6 +91,7 @@
 ACTION_EXEC = b'e'
 ACTION_CREATED_MERGE = b'cm'
 
+
 class mergestate(object):
     '''track 3-way merge state of individual files
 
@@ -136,8 +138,9 @@
     The resolve command transitions between 'u' and 'r' for conflicts and
     'pu' and 'pr' for path conflicts.
     '''
-    statepathv1 = 'merge/state'
-    statepathv2 = 'merge/state2'
+
+    statepathv1 = b'merge/state'
+    statepathv2 = b'merge/state2'
 
     @staticmethod
     def clean(repo, node=None, other=None, labels=None):
@@ -168,7 +171,7 @@
         self._local = None
         self._other = None
         self._labels = labels
-        for var in ('localctx', 'otherctx'):
+        for var in (b'localctx', b'otherctx'):
             if var in vars(self):
                 delattr(self, var)
         if node:
@@ -179,7 +182,7 @@
             self._mdstate = MERGE_DRIVER_STATE_SUCCESS
         else:
             self._mdstate = MERGE_DRIVER_STATE_UNMARKED
-        shutil.rmtree(self._repo.vfs.join('merge'), True)
+        shutil.rmtree(self._repo.vfs.join(b'merge'), True)
         self._results = {}
         self._dirty = False
 
@@ -193,7 +196,7 @@
         self._stateextras = {}
         self._local = None
         self._other = None
-        for var in ('localctx', 'otherctx'):
+        for var in (b'localctx', b'otherctx'):
             if var in vars(self):
                 delattr(self, var)
         self._readmergedriver = None
@@ -206,23 +209,29 @@
             elif rtype == RECORD_OTHER:
                 self._other = bin(record)
             elif rtype == RECORD_MERGE_DRIVER_STATE:
-                bits = record.split('\0', 1)
+                bits = record.split(b'\0', 1)
                 mdstate = bits[1]
                 if len(mdstate) != 1 or mdstate not in (
-                    MERGE_DRIVER_STATE_UNMARKED, MERGE_DRIVER_STATE_MARKED,
-                    MERGE_DRIVER_STATE_SUCCESS):
+                    MERGE_DRIVER_STATE_UNMARKED,
+                    MERGE_DRIVER_STATE_MARKED,
+                    MERGE_DRIVER_STATE_SUCCESS,
+                ):
                     # the merge driver should be idempotent, so just rerun it
                     mdstate = MERGE_DRIVER_STATE_UNMARKED
 
                 self._readmergedriver = bits[0]
                 self._mdstate = mdstate
-            elif rtype in (RECORD_MERGED, RECORD_CHANGEDELETE_CONFLICT,
-                           RECORD_PATH_CONFLICT, RECORD_MERGE_DRIVER_MERGE):
-                bits = record.split('\0')
+            elif rtype in (
+                RECORD_MERGED,
+                RECORD_CHANGEDELETE_CONFLICT,
+                RECORD_PATH_CONFLICT,
+                RECORD_MERGE_DRIVER_MERGE,
+            ):
+                bits = record.split(b'\0')
                 self._state[bits[0]] = bits[1:]
             elif rtype == RECORD_FILE_VALUES:
-                filename, rawextras = record.split('\0', 1)
-                extraparts = rawextras.split('\0')
+                filename, rawextras = record.split(b'\0', 1)
+                extraparts = rawextras.split(b'\0')
                 extras = {}
                 i = 0
                 while i < len(extraparts):
@@ -231,7 +240,7 @@
 
                 self._stateextras[filename] = extras
             elif rtype == RECORD_LABELS:
-                labels = record.split('\0', 2)
+                labels = record.split(b'\0', 2)
                 self._labels = [l for l in labels if len(l) > 0]
             elif not rtype.islower():
                 unsupported.add(rtype)
@@ -270,13 +279,13 @@
             # if mctx was wrong `mctx[bits[-2]]` may fails.
             for idx, r in enumerate(v1records):
                 if r[0] == RECORD_MERGED:
-                    bits = r[1].split('\0')
-                    bits.insert(-2, '')
-                    v1records[idx] = (r[0], '\0'.join(bits))
+                    bits = r[1].split(b'\0')
+                    bits.insert(-2, b'')
+                    v1records[idx] = (r[0], b'\0'.join(bits))
             return v1records
 
     def _v1v2match(self, v1records, v2records):
-        oldv2 = set() # old format version of v2 record
+        oldv2 = set()  # old format version of v2 record
         for rec in v2records:
             if rec[0] == RECORD_LOCAL:
                 oldv2.add(rec)
@@ -336,11 +345,11 @@
             off = 0
             end = len(data)
             while off < end:
-                rtype = data[off:off + 1]
+                rtype = data[off : off + 1]
                 off += 1
-                length = _unpack('>I', data[off:(off + 4)])[0]
+                length = _unpack(b'>I', data[off : (off + 4)])[0]
                 off += 4
-                record = data[off:(off + length)]
+                record = data[off : (off + length)]
                 off += length
                 if rtype == RECORD_OVERRIDE:
                     rtype, record = record[0:1], record[1:]
@@ -361,26 +370,31 @@
         # - B inspects .hgrc and finds it to be clean
         # - B then continues the merge and the malicious merge driver
         #  gets invoked
-        configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
-        if (self._readmergedriver is not None
-            and self._readmergedriver != configmergedriver):
+        configmergedriver = self._repo.ui.config(
+            b'experimental', b'mergedriver'
+        )
+        if (
+            self._readmergedriver is not None
+            and self._readmergedriver != configmergedriver
+        ):
             raise error.ConfigError(
-                _("merge driver changed since merge started"),
-                hint=_("revert merge driver change or abort merge"))
+                _(b"merge driver changed since merge started"),
+                hint=_(b"revert merge driver change or abort merge"),
+            )
 
         return configmergedriver
 
     @util.propertycache
     def localctx(self):
         if self._local is None:
-            msg = "localctx accessed but self._local isn't set"
+            msg = b"localctx accessed but self._local isn't set"
             raise error.ProgrammingError(msg)
         return self._repo[self._local]
 
     @util.propertycache
     def otherctx(self):
         if self._other is None:
-            msg = "otherctx accessed but self._other isn't set"
+            msg = b"otherctx accessed but self._other isn't set"
             raise error.ProgrammingError(msg)
         return self._repo[self._other]
 
@@ -392,9 +406,12 @@
         """
         # Check local variables before looking at filesystem for performance
         # reasons.
-        return (bool(self._local) or bool(self._state) or
-                self._repo.vfs.exists(self.statepathv1) or
-                self._repo.vfs.exists(self.statepathv2))
+        return (
+            bool(self._local)
+            or bool(self._state)
+            or self._repo.vfs.exists(self.statepathv1)
+            or self._repo.vfs.exists(self.statepathv2)
+        )
 
     def commit(self):
         """Write current state on disk (if necessary)"""
@@ -408,42 +425,52 @@
         records.append((RECORD_LOCAL, hex(self._local)))
         records.append((RECORD_OTHER, hex(self._other)))
         if self.mergedriver:
-            records.append((RECORD_MERGE_DRIVER_STATE, '\0'.join([
-                self.mergedriver, self._mdstate])))
+            records.append(
+                (
+                    RECORD_MERGE_DRIVER_STATE,
+                    b'\0'.join([self.mergedriver, self._mdstate]),
+                )
+            )
         # Write out state items. In all cases, the value of the state map entry
         # is written as the contents of the record. The record type depends on
         # the type of state that is stored, and capital-letter records are used
         # to prevent older versions of Mercurial that do not support the feature
         # from loading them.
-        for filename, v in self._state.iteritems():
+        for filename, v in pycompat.iteritems(self._state):
             if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
                 # Driver-resolved merge. These are stored in 'D' records.
-                records.append((RECORD_MERGE_DRIVER_MERGE,
-                                '\0'.join([filename] + v)))
-            elif v[0] in (MERGE_RECORD_UNRESOLVED_PATH,
-                          MERGE_RECORD_RESOLVED_PATH):
+                records.append(
+                    (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
+                )
+            elif v[0] in (
+                MERGE_RECORD_UNRESOLVED_PATH,
+                MERGE_RECORD_RESOLVED_PATH,
+            ):
                 # Path conflicts. These are stored in 'P' records.  The current
                 # resolution state ('pu' or 'pr') is stored within the record.
-                records.append((RECORD_PATH_CONFLICT,
-                                '\0'.join([filename] + v)))
+                records.append(
+                    (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
+                )
             elif v[1] == nullhex or v[6] == nullhex:
                 # Change/Delete or Delete/Change conflicts. These are stored in
                 # 'C' records. v[1] is the local file, and is nullhex when the
                 # file is deleted locally ('dc'). v[6] is the remote file, and
                 # is nullhex when the file is deleted remotely ('cd').
-                records.append((RECORD_CHANGEDELETE_CONFLICT,
-                                '\0'.join([filename] + v)))
+                records.append(
+                    (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
+                )
             else:
                 # Normal files.  These are stored in 'F' records.
-                records.append((RECORD_MERGED,
-                                '\0'.join([filename] + v)))
-        for filename, extras in sorted(self._stateextras.iteritems()):
-            rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
-                                  extras.iteritems())
-            records.append((RECORD_FILE_VALUES,
-                            '%s\0%s' % (filename, rawextras)))
+                records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
+        for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
+            rawextras = b'\0'.join(
+                b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
+            )
+            records.append(
+                (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
+            )
         if self._labels is not None:
-            labels = '\0'.join(self._labels)
+            labels = b'\0'.join(self._labels)
             records.append((RECORD_LABELS, labels))
         return records
 
@@ -454,14 +481,14 @@
 
     def _writerecordsv1(self, records):
         """Write current state on disk in a version 1 file"""
-        f = self._repo.vfs(self.statepathv1, 'wb')
+        f = self._repo.vfs(self.statepathv1, b'wb')
         irecords = iter(records)
         lrecords = next(irecords)
         assert lrecords[0] == RECORD_LOCAL
-        f.write(hex(self._local) + '\n')
+        f.write(hex(self._local) + b'\n')
         for rtype, data in irecords:
             if rtype == RECORD_MERGED:
-                f.write('%s\n' % _droponode(data))
+                f.write(b'%s\n' % _droponode(data))
         f.close()
 
     def _writerecordsv2(self, records):
@@ -470,12 +497,12 @@
         See the docstring for _readrecordsv2 for why we use 't'."""
         # these are the records that all version 2 clients can read
         allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
-        f = self._repo.vfs(self.statepathv2, 'wb')
+        f = self._repo.vfs(self.statepathv2, b'wb')
         for key, data in records:
             assert len(key) == 1
             if key not in allowlist:
-                key, data = RECORD_OVERRIDE, '%s%s' % (key, data)
-            format = '>sI%is' % len(data)
+                key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
+            format = b'>sI%is' % len(data)
             f.write(_pack(format, key, len(data), data))
         f.close()
 
@@ -499,12 +526,18 @@
             localkey = nullhex
         else:
             localkey = mergestate.getlocalkey(fcl.path())
-            self._repo.vfs.write('merge/' + localkey, fcl.data())
-        self._state[fd] = [MERGE_RECORD_UNRESOLVED, localkey, fcl.path(),
-                           fca.path(), hex(fca.filenode()),
-                           fco.path(), hex(fco.filenode()),
-                           fcl.flags()]
-        self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
+            self._repo.vfs.write(b'merge/' + localkey, fcl.data())
+        self._state[fd] = [
+            MERGE_RECORD_UNRESOLVED,
+            localkey,
+            fcl.path(),
+            fca.path(),
+            hex(fca.filenode()),
+            fco.path(),
+            hex(fco.filenode()),
+            fcl.flags(),
+        ]
+        self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
         self._dirty = True
 
     def addpath(self, path, frename, forigin):
@@ -538,9 +571,11 @@
     def unresolved(self):
         """Obtain the paths of unresolved files."""
 
-        for f, entry in self._state.iteritems():
-            if entry[0] in (MERGE_RECORD_UNRESOLVED,
-                            MERGE_RECORD_UNRESOLVED_PATH):
+        for f, entry in pycompat.iteritems(self._state):
+            if entry[0] in (
+                MERGE_RECORD_UNRESOLVED,
+                MERGE_RECORD_UNRESOLVED_PATH,
+            ):
                 yield f
 
     def driverresolved(self):
@@ -555,14 +590,13 @@
 
     def _resolve(self, preresolve, dfile, wctx):
         """rerun merge process for file path `dfile`"""
-        if self[dfile] in (MERGE_RECORD_RESOLVED,
-                           MERGE_RECORD_DRIVER_RESOLVED):
+        if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
             return True, 0
         stateentry = self._state[dfile]
         state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
         octx = self._repo[self._other]
         extras = self.extras(dfile)
-        anccommitnode = extras.get('ancestorlinknode')
+        anccommitnode = extras.get(b'ancestorlinknode')
         if anccommitnode:
             actx = self._repo[anccommitnode]
         else:
@@ -574,32 +608,47 @@
         # "premerge" x flags
         flo = fco.flags()
         fla = fca.flags()
-        if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
+        if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
             if fca.node() == nullid and flags != flo:
                 if preresolve:
                     self._repo.ui.warn(
-                        _('warning: cannot merge flags for %s '
-                          'without common ancestor - keeping local flags\n')
-                        % afile)
+                        _(
+                            b'warning: cannot merge flags for %s '
+                            b'without common ancestor - keeping local flags\n'
+                        )
+                        % afile
+                    )
             elif flags == fla:
                 flags = flo
         if preresolve:
             # restore local
             if localkey != nullhex:
-                f = self._repo.vfs('merge/' + localkey)
+                f = self._repo.vfs(b'merge/' + localkey)
                 wctx[dfile].write(f.read(), flags)
                 f.close()
             else:
                 wctx[dfile].remove(ignoremissing=True)
-            complete, r, deleted = filemerge.premerge(self._repo, wctx,
-                                                      self._local, lfile, fcd,
-                                                      fco, fca,
-                                                      labels=self._labels)
+            complete, r, deleted = filemerge.premerge(
+                self._repo,
+                wctx,
+                self._local,
+                lfile,
+                fcd,
+                fco,
+                fca,
+                labels=self._labels,
+            )
         else:
-            complete, r, deleted = filemerge.filemerge(self._repo, wctx,
-                                                       self._local, lfile, fcd,
-                                                       fco, fca,
-                                                       labels=self._labels)
+            complete, r, deleted = filemerge.filemerge(
+                self._repo,
+                wctx,
+                self._local,
+                lfile,
+                fcd,
+                fco,
+                fca,
+                labels=self._labels,
+            )
         if r is None:
             # no real conflict
             del self._state[dfile]
@@ -619,9 +668,9 @@
                     # cd: remote picked (or otherwise deleted)
                     action = ACTION_REMOVE
             else:
-                if fcd.isabsent(): # dc: remote picked
+                if fcd.isabsent():  # dc: remote picked
                     action = ACTION_GET
-                elif fco.isabsent(): # cd: local picked
+                elif fco.isabsent():  # cd: local picked
                     if dfile in self.localctx:
                         action = ACTION_ADD_MODIFIED
                     else:
@@ -653,7 +702,7 @@
         """return counts for updated, merged and removed files in this
         session"""
         updated, merged, removed = 0, 0, 0
-        for r, action in self._results.itervalues():
+        for r, action in pycompat.itervalues(self._results):
             if r is None:
                 updated += 1
             elif r == 0:
@@ -676,9 +725,9 @@
             ACTION_ADD_MODIFIED: [],
             ACTION_GET: [],
         }
-        for f, (r, action) in self._results.iteritems():
+        for f, (r, action) in pycompat.iteritems(self._results):
             if action is not None:
-                actions[action].append((f, None, "merge result"))
+                actions[action].append((f, None, b"merge result"))
         return actions
 
     def recordactions(self):
@@ -704,16 +753,19 @@
         Meant for use by custom merge drivers."""
         self._results[f] = 0, ACTION_GET
 
+
 def _getcheckunknownconfig(repo, section, name):
     config = repo.ui.config(section, name)
-    valid = ['abort', 'ignore', 'warn']
+    valid = [b'abort', b'ignore', b'warn']
     if config not in valid:
-        validstr = ', '.join(["'" + v + "'" for v in valid])
-        raise error.ConfigError(_("%s.%s not valid "
-                                  "('%s' is none of %s)")
-                                % (section, name, config, validstr))
+        validstr = b', '.join([b"'" + v + b"'" for v in valid])
+        raise error.ConfigError(
+            _(b"%s.%s not valid ('%s' is none of %s)")
+            % (section, name, config, validstr)
+        )
     return config
 
+
 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
     if wctx.isinmemory():
         # Nothing to do in IMM because nothing in the "working copy" can be an
@@ -725,10 +777,13 @@
 
     if f2 is None:
         f2 = f
-    return (repo.wvfs.audit.check(f)
+    return (
+        repo.wvfs.audit.check(f)
         and repo.wvfs.isfileorlink(f)
         and repo.dirstate.normalize(f) not in repo.dirstate
-        and mctx[f2].cmp(wctx[f]))
+        and mctx[f2].cmp(wctx[f])
+    )
+
 
 class _unknowndirschecker(object):
     """
@@ -740,6 +795,7 @@
     Returns the shortest path at which a conflict occurs, or None if there is
     no conflict.
     """
+
     def __init__(self):
         # A set of paths known to be good.  This prevents repeated checking of
         # dirs.  It will be updated with any new dirs that are checked and found
@@ -763,8 +819,10 @@
             if p in self._unknowndircache:
                 continue
             if repo.wvfs.audit.check(p):
-                if (repo.wvfs.isfileorlink(p)
-                        and repo.dirstate.normalize(p) not in repo.dirstate):
+                if (
+                    repo.wvfs.isfileorlink(p)
+                    and repo.dirstate.normalize(p) not in repo.dirstate
+                ):
                     return p
                 if not repo.wvfs.lexists(p):
                     self._missingdircache.add(p)
@@ -782,6 +840,7 @@
                         return f
         return None
 
+
 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
     """
     Considers any actions that care about the presence of conflicting unknown
@@ -792,18 +851,21 @@
     pathconflicts = set()
     warnconflicts = set()
     abortconflicts = set()
-    unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
-    ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
-    pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
+    unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
+    ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
+    pathconfig = repo.ui.configbool(
+        b'experimental', b'merge.checkpathconflicts'
+    )
     if not force:
+
         def collectconflicts(conflicts, config):
-            if config == 'abort':
+            if config == b'abort':
                 abortconflicts.update(conflicts)
-            elif config == 'warn':
+            elif config == b'warn':
                 warnconflicts.update(conflicts)
 
         checkunknowndirs = _unknowndirschecker()
-        for f, (m, args, msg) in actions.iteritems():
+        for f, (m, args, msg) in pycompat.iteritems(actions):
             if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
                 if _checkunknownfile(repo, wctx, mctx, f):
                     fileconflicts.add(f)
@@ -816,13 +878,12 @@
                     fileconflicts.add(f)
 
         allconflicts = fileconflicts | pathconflicts
-        ignoredconflicts = {c for c in allconflicts
-                            if repo.dirstate._ignore(c)}
+        ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
         unknownconflicts = allconflicts - ignoredconflicts
         collectconflicts(ignoredconflicts, ignoredconfig)
         collectconflicts(unknownconflicts, unknownconfig)
     else:
-        for f, (m, args, msg) in actions.iteritems():
+        for f, (m, args, msg) in pycompat.iteritems(actions):
             if m == ACTION_CREATED_MERGE:
                 fl2, anc = args
                 different = _checkunknownfile(repo, wctx, mctx, f)
@@ -844,43 +905,54 @@
                 #     don't like an abort happening in the middle of
                 #     merge.update.
                 if not different:
-                    actions[f] = (ACTION_GET, (fl2, False), 'remote created')
-                elif mergeforce or config == 'abort':
-                    actions[f] = (ACTION_MERGE, (f, f, None, False, anc),
-                                  'remote differs from untracked local')
-                elif config == 'abort':
+                    actions[f] = (ACTION_GET, (fl2, False), b'remote created')
+                elif mergeforce or config == b'abort':
+                    actions[f] = (
+                        ACTION_MERGE,
+                        (f, f, None, False, anc),
+                        b'remote differs from untracked local',
+                    )
+                elif config == b'abort':
                     abortconflicts.add(f)
                 else:
-                    if config == 'warn':
+                    if config == b'warn':
                         warnconflicts.add(f)
-                    actions[f] = (ACTION_GET, (fl2, True), 'remote created')
+                    actions[f] = (ACTION_GET, (fl2, True), b'remote created')
 
     for f in sorted(abortconflicts):
         warn = repo.ui.warn
         if f in pathconflicts:
             if repo.wvfs.isfileorlink(f):
-                warn(_("%s: untracked file conflicts with directory\n") % f)
+                warn(_(b"%s: untracked file conflicts with directory\n") % f)
             else:
-                warn(_("%s: untracked directory conflicts with file\n") % f)
+                warn(_(b"%s: untracked directory conflicts with file\n") % f)
         else:
-            warn(_("%s: untracked file differs\n") % f)
+            warn(_(b"%s: untracked file differs\n") % f)
     if abortconflicts:
-        raise error.Abort(_("untracked files in working directory "
-                            "differ from files in requested revision"))
+        raise error.Abort(
+            _(
+                b"untracked files in working directory "
+                b"differ from files in requested revision"
+            )
+        )
 
     for f in sorted(warnconflicts):
         if repo.wvfs.isfileorlink(f):
-            repo.ui.warn(_("%s: replacing untracked file\n") % f)
+            repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
         else:
-            repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
+            repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
 
-    for f, (m, args, msg) in actions.iteritems():
+    for f, (m, args, msg) in pycompat.iteritems(actions):
         if m == ACTION_CREATED:
-            backup = (f in fileconflicts or f in pathconflicts or
-                      any(p in pathconflicts for p in util.finddirs(f)))
-            flags, = args
+            backup = (
+                f in fileconflicts
+                or f in pathconflicts
+                or any(p in pathconflicts for p in util.finddirs(f))
+            )
+            (flags,) = args
             actions[f] = (ACTION_GET, (flags, backup), msg)
 
+
 def _forgetremoved(wctx, mctx, branchmerge):
     """
     Forget removed files
@@ -902,15 +974,16 @@
         m = ACTION_REMOVE
     for f in wctx.deleted():
         if f not in mctx:
-            actions[f] = m, None, "forget deleted"
+            actions[f] = m, None, b"forget deleted"
 
     if not branchmerge:
         for f in wctx.removed():
             if f not in mctx:
-                actions[f] = ACTION_FORGET, None, "forget removed"
+                actions[f] = ACTION_FORGET, None, b"forget removed"
 
     return actions
 
+
 def _checkcollision(repo, wmf, actions):
     """
     Check for case-folding collisions.
@@ -922,7 +995,7 @@
         wmf = wmf.matches(narrowmatch)
         if actions:
             narrowactions = {}
-            for m, actionsfortype in actions.iteritems():
+            for m, actionsfortype in pycompat.iteritems(actions):
                 narrowactions[m] = []
                 for (f, args, msg) in actionsfortype:
                     if narrowmatch(f):
@@ -934,8 +1007,14 @@
 
     if actions:
         # KEEP and EXEC are no-op
-        for m in (ACTION_ADD, ACTION_ADD_MODIFIED, ACTION_FORGET, ACTION_GET,
-                  ACTION_CHANGED_DELETED, ACTION_DELETED_CHANGED):
+        for m in (
+            ACTION_ADD,
+            ACTION_ADD_MODIFIED,
+            ACTION_FORGET,
+            ACTION_GET,
+            ACTION_CHANGED_DELETED,
+            ACTION_DELETED_CHANGED,
+        ):
             for f, args, msg in actions[m]:
                 pmmf.add(f)
         for f, args, msg in actions[ACTION_REMOVE]:
@@ -957,33 +1036,40 @@
     for f in pmmf:
         fold = util.normcase(f)
         if fold in foldmap:
-            raise error.Abort(_("case-folding collision between %s and %s")
-                             % (f, foldmap[fold]))
+            raise error.Abort(
+                _(b"case-folding collision between %s and %s")
+                % (f, foldmap[fold])
+            )
         foldmap[fold] = f
 
     # check case-folding of directories
-    foldprefix = unfoldprefix = lastfull = ''
+    foldprefix = unfoldprefix = lastfull = b''
     for fold, f in sorted(foldmap.items()):
         if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
             # the folded prefix matches but actual casing is different
-            raise error.Abort(_("case-folding collision between "
-                                "%s and directory of %s") % (lastfull, f))
-        foldprefix = fold + '/'
-        unfoldprefix = f + '/'
+            raise error.Abort(
+                _(b"case-folding collision between %s and directory of %s")
+                % (lastfull, f)
+            )
+        foldprefix = fold + b'/'
+        unfoldprefix = f + b'/'
         lastfull = f
 
+
 def driverpreprocess(repo, ms, wctx, labels=None):
     """run the preprocess step of the merge driver, if any
 
     This is currently not implemented -- it's an extension point."""
     return True
 
+
 def driverconclude(repo, ms, wctx, labels=None):
     """run the conclude step of the merge driver, if any
 
     This is currently not implemented -- it's an extension point."""
     return True
 
+
 def _filesindirs(repo, manifest, dirs):
     """
     Generator that yields pairs of all the files in the manifest that are found
@@ -996,6 +1082,7 @@
                 yield f, p
                 break
 
+
 def checkpathconflicts(repo, wctx, mctx, actions):
     """
     Check if any actions introduce path conflicts in the repository, updating
@@ -1022,8 +1109,12 @@
     deletedfiles = set()
 
     for f, (m, args, msg) in actions.items():
-        if m in (ACTION_CREATED, ACTION_DELETED_CHANGED, ACTION_MERGE,
-                 ACTION_CREATED_MERGE):
+        if m in (
+            ACTION_CREATED,
+            ACTION_DELETED_CHANGED,
+            ACTION_MERGE,
+            ACTION_CREATED_MERGE,
+        ):
             # This action may create a new local file.
             createdfiledirs.update(util.finddirs(f))
             if mf.hasdir(f):
@@ -1054,10 +1145,12 @@
                 # A file is in a directory which aliases a local file.
                 # We will need to rename the local file.
                 localconflicts.add(p)
-        if p in actions and actions[p][0] in (ACTION_CREATED,
-                                              ACTION_DELETED_CHANGED,
-                                              ACTION_MERGE,
-                                              ACTION_CREATED_MERGE):
+        if p in actions and actions[p][0] in (
+            ACTION_CREATED,
+            ACTION_DELETED_CHANGED,
+            ACTION_MERGE,
+            ACTION_CREATED_MERGE,
+        ):
             # The file is in a directory which aliases a remote file.
             # This is an internal inconsistency within the remote
             # manifest.
@@ -1066,16 +1159,18 @@
     # Rename all local conflicting files that have not been deleted.
     for p in localconflicts:
         if p not in deletedfiles:
-            ctxname = bytes(wctx).rstrip('+')
+            ctxname = bytes(wctx).rstrip(b'+')
             pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
-            actions[pnew] = (ACTION_PATH_CONFLICT_RESOLVE, (p,),
-                             'local path conflict')
-            actions[p] = (ACTION_PATH_CONFLICT, (pnew, 'l'),
-                          'path conflict')
+            actions[pnew] = (
+                ACTION_PATH_CONFLICT_RESOLVE,
+                (p,),
+                b'local path conflict',
+            )
+            actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
 
     if remoteconflicts:
         # Check if all files in the conflicting directories have been removed.
-        ctxname = bytes(mctx).rstrip('+')
+        ctxname = bytes(mctx).rstrip(b'+')
         for f, p in _filesindirs(repo, mf, remoteconflicts):
             if f not in deletedfiles:
                 m, args, msg = actions[p]
@@ -1086,17 +1181,24 @@
                 else:
                     # Action was create, change to renamed get action.
                     fl = args[0]
-                    actions[pnew] = (ACTION_LOCAL_DIR_RENAME_GET, (p, fl),
-                                     'remote path conflict')
-                actions[p] = (ACTION_PATH_CONFLICT, (pnew, ACTION_REMOVE),
-                              'path conflict')
+                    actions[pnew] = (
+                        ACTION_LOCAL_DIR_RENAME_GET,
+                        (p, fl),
+                        b'remote path conflict',
+                    )
+                actions[p] = (
+                    ACTION_PATH_CONFLICT,
+                    (pnew, ACTION_REMOVE),
+                    b'path conflict',
+                )
                 remoteconflicts.remove(p)
                 break
 
     if invalidconflicts:
         for p in invalidconflicts:
-            repo.ui.warn(_("%s: is both a file and a directory\n") % p)
-        raise error.Abort(_("destination manifest contains path conflicts"))
+            repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
+        raise error.Abort(_(b"destination manifest contains path conflicts"))
+
 
 def _filternarrowactions(narrowmatch, branchmerge, actions):
     """
@@ -1105,28 +1207,44 @@
     Raise an exception if the merge cannot be completed because the repo is
     narrowed.
     """
-    nooptypes = {'k'} # TODO: handle with nonconflicttypes
-    nonconflicttypes = set('a am c cm f g r e'.split())
+    nooptypes = {b'k'}  # TODO: handle with nonconflicttypes
+    nonconflicttypes = set(b'a am c cm f g r e'.split())
     # We mutate the items in the dict during iteration, so iterate
     # over a copy.
     for f, action in list(actions.items()):
         if narrowmatch(f):
             pass
         elif not branchmerge:
-            del actions[f] # just updating, ignore changes outside clone
+            del actions[f]  # just updating, ignore changes outside clone
         elif action[0] in nooptypes:
-            del actions[f] # merge does not affect file
+            del actions[f]  # merge does not affect file
         elif action[0] in nonconflicttypes:
-            raise error.Abort(_('merge affects file \'%s\' outside narrow, '
-                                'which is not yet supported') % f,
-                              hint=_('merging in the other direction '
-                                     'may work'))
+            raise error.Abort(
+                _(
+                    b'merge affects file \'%s\' outside narrow, '
+                    b'which is not yet supported'
+                )
+                % f,
+                hint=_(b'merging in the other direction may work'),
+            )
         else:
-            raise error.Abort(_('conflict in file \'%s\' is outside '
-                                'narrow clone') % f)
+            raise error.Abort(
+                _(b'conflict in file \'%s\' is outside narrow clone') % f
+            )
+
 
-def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
-                  acceptremote, followcopies, forcefulldiff=False):
+def manifestmerge(
+    repo,
+    wctx,
+    p2,
+    pa,
+    branchmerge,
+    force,
+    matcher,
+    acceptremote,
+    followcopies,
+    forcefulldiff=False,
+):
     """
     Merge wctx and p2 with ancestor pa and generate merge action list
 
@@ -1140,8 +1258,10 @@
     copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
 
     # manifests fetched in order are going to be faster, so prime the caches
-    [x.manifest() for x in
-     sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
+    [
+        x.manifest()
+        for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
+    ]
 
     if followcopies:
         ret = copies.mergecopies(repo, wctx, p2, pa)
@@ -1150,34 +1270,35 @@
     boolbm = pycompat.bytestr(bool(branchmerge))
     boolf = pycompat.bytestr(bool(force))
     boolm = pycompat.bytestr(bool(matcher))
-    repo.ui.note(_("resolving manifests\n"))
-    repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
-                  % (boolbm, boolf, boolm))
-    repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
+    repo.ui.note(_(b"resolving manifests\n"))
+    repo.ui.debug(
+        b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
+    )
+    repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
 
     m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
     copied = set(copy.values())
     copied.update(movewithdir.values())
 
-    if '.hgsubstate' in m1 and wctx.rev() is None:
+    if b'.hgsubstate' in m1 and wctx.rev() is None:
         # Check whether sub state is modified, and overwrite the manifest
         # to flag the change. If wctx is a committed revision, we shouldn't
         # care for the dirty state of the working directory.
         if any(wctx.sub(s).dirty() for s in wctx.substate):
-            m1['.hgsubstate'] = modifiednodeid
+            m1[b'.hgsubstate'] = modifiednodeid
 
     # Don't use m2-vs-ma optimization if:
     # - ma is the same as m1 or m2, which we're just going to diff again later
     # - The caller specifically asks for a full diff, which is useful during bid
     #   merge.
-    if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
+    if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
         # Identify which files are relevant to the merge, so we can limit the
         # total m1-vs-m2 diff to just those files. This has significant
         # performance benefits in large repositories.
         relevantfiles = set(ma.diff(m2).keys())
 
         # For copied and moved files, we need to add the source file too.
-        for copykey, copyvalue in copy.iteritems():
+        for copykey, copyvalue in pycompat.iteritems(copy):
             if copyvalue in relevantfiles:
                 relevantfiles.add(copykey)
         for movedirkey in movewithdir:
@@ -1188,85 +1309,123 @@
     diff = m1.diff(m2, match=matcher)
 
     actions = {}
-    for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
-        if n1 and n2: # file exists on both local and remote side
+    for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
+        if n1 and n2:  # file exists on both local and remote side
             if f not in ma:
                 fa = copy.get(f, None)
                 if fa is not None:
-                    actions[f] = (ACTION_MERGE, (f, f, fa, False, pa.node()),
-                                  'both renamed from %s' % fa)
+                    actions[f] = (
+                        ACTION_MERGE,
+                        (f, f, fa, False, pa.node()),
+                        b'both renamed from %s' % fa,
+                    )
                 else:
-                    actions[f] = (ACTION_MERGE, (f, f, None, False, pa.node()),
-                                  'both created')
+                    actions[f] = (
+                        ACTION_MERGE,
+                        (f, f, None, False, pa.node()),
+                        b'both created',
+                    )
             else:
                 a = ma[f]
                 fla = ma.flags(f)
-                nol = 'l' not in fl1 + fl2 + fla
+                nol = b'l' not in fl1 + fl2 + fla
                 if n2 == a and fl2 == fla:
-                    actions[f] = (ACTION_KEEP, (), 'remote unchanged')
-                elif n1 == a and fl1 == fla: # local unchanged - use remote
-                    if n1 == n2: # optimization: keep local content
-                        actions[f] = (ACTION_EXEC, (fl2,), 'update permissions')
+                    actions[f] = (ACTION_KEEP, (), b'remote unchanged')
+                elif n1 == a and fl1 == fla:  # local unchanged - use remote
+                    if n1 == n2:  # optimization: keep local content
+                        actions[f] = (
+                            ACTION_EXEC,
+                            (fl2,),
+                            b'update permissions',
+                        )
                     else:
-                        actions[f] = (ACTION_GET, (fl2, False),
-                                      'remote is newer')
-                elif nol and n2 == a: # remote only changed 'x'
-                    actions[f] = (ACTION_EXEC, (fl2,), 'update permissions')
-                elif nol and n1 == a: # local only changed 'x'
-                    actions[f] = (ACTION_GET, (fl1, False), 'remote is newer')
-                else: # both changed something
-                    actions[f] = (ACTION_MERGE, (f, f, f, False, pa.node()),
-                                  'versions differ')
-        elif n1: # file exists only on local side
+                        actions[f] = (
+                            ACTION_GET,
+                            (fl2, False),
+                            b'remote is newer',
+                        )
+                elif nol and n2 == a:  # remote only changed 'x'
+                    actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
+                elif nol and n1 == a:  # local only changed 'x'
+                    actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
+                else:  # both changed something
+                    actions[f] = (
+                        ACTION_MERGE,
+                        (f, f, f, False, pa.node()),
+                        b'versions differ',
+                    )
+        elif n1:  # file exists only on local side
             if f in copied:
-                pass # we'll deal with it on m2 side
-            elif f in movewithdir: # directory rename, move local
+                pass  # we'll deal with it on m2 side
+            elif f in movewithdir:  # directory rename, move local
                 f2 = movewithdir[f]
                 if f2 in m2:
-                    actions[f2] = (ACTION_MERGE, (f, f2, None, True, pa.node()),
-                                   'remote directory rename, both created')
+                    actions[f2] = (
+                        ACTION_MERGE,
+                        (f, f2, None, True, pa.node()),
+                        b'remote directory rename, both created',
+                    )
                 else:
-                    actions[f2] = (ACTION_DIR_RENAME_MOVE_LOCAL, (f, fl1),
-                                   'remote directory rename - move from %s' % f)
+                    actions[f2] = (
+                        ACTION_DIR_RENAME_MOVE_LOCAL,
+                        (f, fl1),
+                        b'remote directory rename - move from %s' % f,
+                    )
             elif f in copy:
                 f2 = copy[f]
-                actions[f] = (ACTION_MERGE, (f, f2, f2, False, pa.node()),
-                              'local copied/moved from %s' % f2)
-            elif f in ma: # clean, a different, no remote
+                actions[f] = (
+                    ACTION_MERGE,
+                    (f, f2, f2, False, pa.node()),
+                    b'local copied/moved from %s' % f2,
+                )
+            elif f in ma:  # clean, a different, no remote
                 if n1 != ma[f]:
                     if acceptremote:
-                        actions[f] = (ACTION_REMOVE, None, 'remote delete')
+                        actions[f] = (ACTION_REMOVE, None, b'remote delete')
                     else:
-                        actions[f] = (ACTION_CHANGED_DELETED,
-                                      (f, None, f, False, pa.node()),
-                                      'prompt changed/deleted')
+                        actions[f] = (
+                            ACTION_CHANGED_DELETED,
+                            (f, None, f, False, pa.node()),
+                            b'prompt changed/deleted',
+                        )
                 elif n1 == addednodeid:
                     # This extra 'a' is added by working copy manifest to mark
                     # the file as locally added. We should forget it instead of
                     # deleting it.
-                    actions[f] = (ACTION_FORGET, None, 'remote deleted')
+                    actions[f] = (ACTION_FORGET, None, b'remote deleted')
                 else:
-                    actions[f] = (ACTION_REMOVE, None, 'other deleted')
-        elif n2: # file exists only on remote side
+                    actions[f] = (ACTION_REMOVE, None, b'other deleted')
+        elif n2:  # file exists only on remote side
             if f in copied:
-                pass # we'll deal with it on m1 side
+                pass  # we'll deal with it on m1 side
             elif f in movewithdir:
                 f2 = movewithdir[f]
                 if f2 in m1:
-                    actions[f2] = (ACTION_MERGE,
-                                   (f2, f, None, False, pa.node()),
-                                   'local directory rename, both created')
+                    actions[f2] = (
+                        ACTION_MERGE,
+                        (f2, f, None, False, pa.node()),
+                        b'local directory rename, both created',
+                    )
                 else:
-                    actions[f2] = (ACTION_LOCAL_DIR_RENAME_GET, (f, fl2),
-                                   'local directory rename - get from %s' % f)
+                    actions[f2] = (
+                        ACTION_LOCAL_DIR_RENAME_GET,
+                        (f, fl2),
+                        b'local directory rename - get from %s' % f,
+                    )
             elif f in copy:
                 f2 = copy[f]
                 if f2 in m2:
-                    actions[f] = (ACTION_MERGE, (f2, f, f2, False, pa.node()),
-                                  'remote copied from %s' % f2)
+                    actions[f] = (
+                        ACTION_MERGE,
+                        (f2, f, f2, False, pa.node()),
+                        b'remote copied from %s' % f2,
+                    )
                 else:
-                    actions[f] = (ACTION_MERGE, (f2, f, f2, True, pa.node()),
-                                  'remote moved from %s' % f2)
+                    actions[f] = (
+                        ACTION_MERGE,
+                        (f2, f, f2, True, pa.node()),
+                        b'remote moved from %s' % f2,
+                    )
             elif f not in ma:
                 # local unknown, remote created: the logic is described by the
                 # following table:
@@ -1280,31 +1439,39 @@
                 # Checking whether the files are different is expensive, so we
                 # don't do that when we can avoid it.
                 if not force:
-                    actions[f] = (ACTION_CREATED, (fl2,), 'remote created')
+                    actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
                 elif not branchmerge:
-                    actions[f] = (ACTION_CREATED, (fl2,), 'remote created')
+                    actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
                 else:
-                    actions[f] = (ACTION_CREATED_MERGE, (fl2, pa.node()),
-                                  'remote created, get or merge')
+                    actions[f] = (
+                        ACTION_CREATED_MERGE,
+                        (fl2, pa.node()),
+                        b'remote created, get or merge',
+                    )
             elif n2 != ma[f]:
                 df = None
                 for d in dirmove:
                     if f.startswith(d):
                         # new file added in a directory that was moved
-                        df = dirmove[d] + f[len(d):]
+                        df = dirmove[d] + f[len(d) :]
                         break
                 if df is not None and df in m1:
-                    actions[df] = (ACTION_MERGE, (df, f, f, False, pa.node()),
-                                   'local directory rename - respect move '
-                                   'from %s' % f)
+                    actions[df] = (
+                        ACTION_MERGE,
+                        (df, f, f, False, pa.node()),
+                        b'local directory rename - respect move '
+                        b'from %s' % f,
+                    )
                 elif acceptremote:
-                    actions[f] = (ACTION_CREATED, (fl2,), 'remote recreating')
+                    actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
                 else:
-                    actions[f] = (ACTION_DELETED_CHANGED,
-                                  (None, f, f, False, pa.node()),
-                                  'prompt deleted/changed')
+                    actions[f] = (
+                        ACTION_DELETED_CHANGED,
+                        (None, f, f, False, pa.node()),
+                        b'prompt deleted/changed',
+                    )
 
-    if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
+    if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
         # If we are merging, look for path conflicts.
         checkpathconflicts(repo, wctx, p2, actions)
 
@@ -1315,48 +1482,88 @@
 
     return actions, diverge, renamedelete
 
+
 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
     """Resolves false conflicts where the nodeid changed but the content
        remained the same."""
     # We force a copy of actions.items() because we're going to mutate
     # actions as we resolve trivial conflicts.
     for f, (m, args, msg) in list(actions.items()):
-        if (m == ACTION_CHANGED_DELETED and f in ancestor
-            and not wctx[f].cmp(ancestor[f])):
+        if (
+            m == ACTION_CHANGED_DELETED
+            and f in ancestor
+            and not wctx[f].cmp(ancestor[f])
+        ):
             # local did change but ended up with same content
-            actions[f] = ACTION_REMOVE, None, 'prompt same'
-        elif (m == ACTION_DELETED_CHANGED and f in ancestor
-              and not mctx[f].cmp(ancestor[f])):
+            actions[f] = ACTION_REMOVE, None, b'prompt same'
+        elif (
+            m == ACTION_DELETED_CHANGED
+            and f in ancestor
+            and not mctx[f].cmp(ancestor[f])
+        ):
             # remote did change but ended up with same content
-            del actions[f] # don't get = keep local deleted
+            del actions[f]  # don't get = keep local deleted
+
 
-def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
-                     acceptremote, followcopies, matcher=None,
-                     mergeforce=False):
+def calculateupdates(
+    repo,
+    wctx,
+    mctx,
+    ancestors,
+    branchmerge,
+    force,
+    acceptremote,
+    followcopies,
+    matcher=None,
+    mergeforce=False,
+):
     """Calculate the actions needed to merge mctx into wctx using ancestors"""
     # Avoid cycle.
     from . import sparse
 
-    if len(ancestors) == 1: # default
+    if len(ancestors) == 1:  # default
         actions, diverge, renamedelete = manifestmerge(
-            repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
-            acceptremote, followcopies)
+            repo,
+            wctx,
+            mctx,
+            ancestors[0],
+            branchmerge,
+            force,
+            matcher,
+            acceptremote,
+            followcopies,
+        )
         _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
 
-    else: # only when merge.preferancestor=* - the default
+    else:  # only when merge.preferancestor=* - the default
         repo.ui.note(
-            _("note: merging %s and %s using bids from ancestors %s\n") %
-            (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
-                                            for anc in ancestors)))
+            _(b"note: merging %s and %s using bids from ancestors %s\n")
+            % (
+                wctx,
+                mctx,
+                _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
+            )
+        )
 
         # Call for bids
-        fbids = {} # mapping filename to bids (action method to list af actions)
+        fbids = (
+            {}
+        )  # mapping filename to bids (action method to list af actions)
         diverge, renamedelete = None, None
         for ancestor in ancestors:
-            repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
+            repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
             actions, diverge1, renamedelete1 = manifestmerge(
-                repo, wctx, mctx, ancestor, branchmerge, force, matcher,
-                acceptremote, followcopies, forcefulldiff=True)
+                repo,
+                wctx,
+                mctx,
+                ancestor,
+                branchmerge,
+                force,
+                matcher,
+                acceptremote,
+                followcopies,
+                forcefulldiff=True,
+            )
             _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
 
             # Track the shortest set of warning on the theory that bid
@@ -1366,9 +1573,9 @@
             if renamedelete is None or len(renamedelete) < len(renamedelete1):
                 renamedelete = renamedelete1
 
-            for f, a in sorted(actions.iteritems()):
+            for f, a in sorted(pycompat.iteritems(actions)):
                 m, args, msg = a
-                repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
+                repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
                 if f in fbids:
                     d = fbids[f]
                     if m in d:
@@ -1379,53 +1586,56 @@
                     fbids[f] = {m: [a]}
 
         # Pick the best bid for each file
-        repo.ui.note(_('\nauction for merging merge bids\n'))
+        repo.ui.note(_(b'\nauction for merging merge bids\n'))
         actions = {}
         for f, bids in sorted(fbids.items()):
             # bids is a mapping from action method to list af actions
             # Consensus?
-            if len(bids) == 1: # all bids are the same kind of method
+            if len(bids) == 1:  # all bids are the same kind of method
                 m, l = list(bids.items())[0]
-                if all(a == l[0] for a in l[1:]): # len(bids) is > 1
-                    repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
+                if all(a == l[0] for a in l[1:]):  # len(bids) is > 1
+                    repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
                     actions[f] = l[0]
                     continue
             # If keep is an option, just do it.
             if ACTION_KEEP in bids:
-                repo.ui.note(_(" %s: picking 'keep' action\n") % f)
+                repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
                 actions[f] = bids[ACTION_KEEP][0]
                 continue
             # If there are gets and they all agree [how could they not?], do it.
             if ACTION_GET in bids:
                 ga0 = bids[ACTION_GET][0]
                 if all(a == ga0 for a in bids[ACTION_GET][1:]):
-                    repo.ui.note(_(" %s: picking 'get' action\n") % f)
+                    repo.ui.note(_(b" %s: picking 'get' action\n") % f)
                     actions[f] = ga0
                     continue
             # TODO: Consider other simple actions such as mode changes
             # Handle inefficient democrazy.
-            repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
+            repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
             for m, l in sorted(bids.items()):
                 for _f, args, msg in l:
-                    repo.ui.note('  %s -> %s\n' % (msg, m))
+                    repo.ui.note(b'  %s -> %s\n' % (msg, m))
             # Pick random action. TODO: Instead, prompt user when resolving
             m, l = list(bids.items())[0]
-            repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
-                         (f, m))
+            repo.ui.warn(
+                _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
+            )
             actions[f] = l[0]
             continue
-        repo.ui.note(_('end of auction\n\n'))
+        repo.ui.note(_(b'end of auction\n\n'))
 
     if wctx.rev() is None:
         fractions = _forgetremoved(wctx, mctx, branchmerge)
         actions.update(fractions)
 
-    prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
-                                                actions)
+    prunedactions = sparse.filterupdatesactions(
+        repo, wctx, mctx, branchmerge, actions
+    )
     _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
 
     return prunedactions, diverge, renamedelete
 
+
 def _getcwd():
     try:
         return encoding.getcwd()
@@ -1434,6 +1644,7 @@
             return None
         raise
 
+
 def batchremove(repo, wctx, actions):
     """apply removes to the working directory
 
@@ -1443,15 +1654,16 @@
     cwd = _getcwd()
     i = 0
     for f, args, msg in actions:
-        repo.ui.debug(" %s: %s -> r\n" % (f, msg))
+        repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
         if verbose:
-            repo.ui.note(_("removing %s\n") % f)
+            repo.ui.note(_(b"removing %s\n") % f)
         wctx[f].audit()
         try:
             wctx[f].remove(ignoremissing=True)
         except OSError as inst:
-            repo.ui.warn(_("update failed to remove %s: %s!\n") %
-                         (f, inst.strerror))
+            repo.ui.warn(
+                _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
+            )
         if i == 100:
             yield i, f
             i = 0
@@ -1462,8 +1674,14 @@
     if cwd and not _getcwd():
         # cwd was removed in the course of removing files; print a helpful
         # warning.
-        repo.ui.warn(_("current directory was removed\n"
-                       "(consider changing to repo root: %s)\n") % repo.root)
+        repo.ui.warn(
+            _(
+                b"current directory was removed\n"
+                b"(consider changing to repo root: %s)\n"
+            )
+            % repo.root
+        )
+
 
 def batchget(repo, mctx, wctx, wantfiledata, actions):
     """apply gets to the working directory
@@ -1482,9 +1700,9 @@
     i = 0
     with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
         for f, (flags, backup), msg in actions:
-            repo.ui.debug(" %s: %s -> g\n" % (f, msg))
+            repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
             if verbose:
-                repo.ui.note(_("getting %s\n") % f)
+                repo.ui.note(_(b"getting %s\n") % f)
 
             if backup:
                 # If a file or directory exists with the same name, back that
@@ -1501,15 +1719,18 @@
                     util.rename(repo.wjoin(conflicting), orig)
             wfctx = wctx[f]
             wfctx.clearunknown()
-            atomictemp = ui.configbool("experimental", "update.atomic-file")
-            size = wfctx.write(fctx(f).data(), flags,
-                               backgroundclose=True,
-                               atomictemp=atomictemp)
+            atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
+            size = wfctx.write(
+                fctx(f).data(),
+                flags,
+                backgroundclose=True,
+                atomictemp=atomictemp,
+            )
             if wantfiledata:
                 s = wfctx.lstat()
                 mode = s.st_mode
                 mtime = s[stat.ST_MTIME]
-                filedata[f] = ((mode, size, mtime)) # for dirstate.normal
+                filedata[f] = (mode, size, mtime)  # for dirstate.normal
             if i == 100:
                 yield False, (i, f)
                 i = 0
@@ -1518,6 +1739,7 @@
         yield False, (i, f)
     yield True, filedata
 
+
 def _prefetchfiles(repo, ctx, actions):
     """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
     of merge actions.  ``ctx`` is the context being merged in."""
@@ -1525,13 +1747,23 @@
     # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
     # don't touch the context to be merged in.  'cd' is skipped, because
     # changed/deleted never resolves to something from the remote side.
-    oplist = [actions[a] for a in (ACTION_GET, ACTION_DELETED_CHANGED,
-                                   ACTION_LOCAL_DIR_RENAME_GET, ACTION_MERGE)]
+    oplist = [
+        actions[a]
+        for a in (
+            ACTION_GET,
+            ACTION_DELETED_CHANGED,
+            ACTION_LOCAL_DIR_RENAME_GET,
+            ACTION_MERGE,
+        )
+    ]
     prefetch = scmutil.prefetchfiles
     matchfiles = scmutil.matchfiles
-    prefetch(repo, [ctx.rev()],
-             matchfiles(repo,
-                        [f for sublist in oplist for f, args, msg in sublist]))
+    prefetch(
+        repo,
+        [ctx.rev()],
+        matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
+    )
+
 
 @attr.s(frozen=True)
 class updateresult(object):
@@ -1541,30 +1773,40 @@
     unresolvedcount = attr.ib()
 
     def isempty(self):
-        return not (self.updatedcount or self.mergedcount
-                    or self.removedcount or self.unresolvedcount)
+        return not (
+            self.updatedcount
+            or self.mergedcount
+            or self.removedcount
+            or self.unresolvedcount
+        )
+
 
 def emptyactions():
     """create an actions dict, to be populated and passed to applyupdates()"""
-    return dict((m, [])
-                for m in (
-                    ACTION_ADD,
-                    ACTION_ADD_MODIFIED,
-                    ACTION_FORGET,
-                    ACTION_GET,
-                    ACTION_CHANGED_DELETED,
-                    ACTION_DELETED_CHANGED,
-                    ACTION_REMOVE,
-                    ACTION_DIR_RENAME_MOVE_LOCAL,
-                    ACTION_LOCAL_DIR_RENAME_GET,
-                    ACTION_MERGE,
-                    ACTION_EXEC,
-                    ACTION_KEEP,
-                    ACTION_PATH_CONFLICT,
-                    ACTION_PATH_CONFLICT_RESOLVE))
+    return dict(
+        (m, [])
+        for m in (
+            ACTION_ADD,
+            ACTION_ADD_MODIFIED,
+            ACTION_FORGET,
+            ACTION_GET,
+            ACTION_CHANGED_DELETED,
+            ACTION_DELETED_CHANGED,
+            ACTION_REMOVE,
+            ACTION_DIR_RENAME_MOVE_LOCAL,
+            ACTION_LOCAL_DIR_RENAME_GET,
+            ACTION_MERGE,
+            ACTION_EXEC,
+            ACTION_KEEP,
+            ACTION_PATH_CONFLICT,
+            ACTION_PATH_CONFLICT_RESOLVE,
+        )
+    )
 
-def applyupdates(repo, actions, wctx, mctx, overwrite, wantfiledata,
-                 labels=None):
+
+def applyupdates(
+    repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
+):
     """apply the merge action list to the working directory
 
     wctx is the working copy context
@@ -1590,12 +1832,12 @@
     mergeactions.extend(actions[ACTION_MERGE])
     for f, args, msg in mergeactions:
         f1, f2, fa, move, anc = args
-        if f == '.hgsubstate': # merged internally
+        if f == b'.hgsubstate':  # merged internally
             continue
         if f1 is None:
             fcl = filemerge.absentfilectx(wctx, fa)
         else:
-            repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
+            repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
             fcl = wctx[f1]
         if f2 is None:
             fco = filemerge.absentfilectx(mctx, fa)
@@ -1614,29 +1856,34 @@
     # remove renamed files after safely stored
     for f in moves:
         if wctx[f].lexists():
-            repo.ui.debug("removing %s\n" % f)
+            repo.ui.debug(b"removing %s\n" % f)
             wctx[f].audit()
             wctx[f].remove()
 
-    numupdates = sum(len(l) for m, l in actions.items()
-                     if m != ACTION_KEEP)
-    progress = repo.ui.makeprogress(_('updating'), unit=_('files'),
-                                    total=numupdates)
+    numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
+    progress = repo.ui.makeprogress(
+        _(b'updating'), unit=_(b'files'), total=numupdates
+    )
 
-    if [a for a in actions[ACTION_REMOVE] if a[0] == '.hgsubstate']:
+    if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
         subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
 
     # record path conflicts
     for f, args, msg in actions[ACTION_PATH_CONFLICT]:
         f1, fo = args
         s = repo.ui.status
-        s(_("%s: path conflict - a file or link has the same name as a "
-            "directory\n") % f)
-        if fo == 'l':
-            s(_("the local file has been renamed to %s\n") % f1)
+        s(
+            _(
+                b"%s: path conflict - a file or link has the same name as a "
+                b"directory\n"
+            )
+            % f
+        )
+        if fo == b'l':
+            s(_(b"the local file has been renamed to %s\n") % f1)
         else:
-            s(_("the remote file has been renamed to %s\n") % f1)
-        s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
+            s(_(b"the remote file has been renamed to %s\n") % f1)
+        s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
         ms.addpath(f, f1, fo)
         progress.increment(item=f)
 
@@ -1645,31 +1892,37 @@
     cost = 0 if wctx.isinmemory() else 0.001
 
     # remove in parallel (must come before resolving path conflicts and getting)
-    prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
-                         actions[ACTION_REMOVE])
+    prog = worker.worker(
+        repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
+    )
     for i, item in prog:
         progress.increment(step=i, item=item)
     removed = len(actions[ACTION_REMOVE])
 
     # resolve path conflicts (must come before getting)
     for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
-        repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
-        f0, = args
+        repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
+        (f0,) = args
         if wctx[f0].lexists():
-            repo.ui.note(_("moving %s to %s\n") % (f0, f))
+            repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
             wctx[f].audit()
             wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
             wctx[f0].remove()
         progress.increment(item=f)
 
     # get in parallel.
-    threadsafe = repo.ui.configbool('experimental',
-                                    'worker.wdir-get-thread-safe')
-    prog = worker.worker(repo.ui, cost, batchget,
-                         (repo, mctx, wctx, wantfiledata),
-                         actions[ACTION_GET],
-                         threadsafe=threadsafe,
-                         hasretval=True)
+    threadsafe = repo.ui.configbool(
+        b'experimental', b'worker.wdir-get-thread-safe'
+    )
+    prog = worker.worker(
+        repo.ui,
+        cost,
+        batchget,
+        (repo, mctx, wctx, wantfiledata),
+        actions[ACTION_GET],
+        threadsafe=threadsafe,
+        hasretval=True,
+    )
     getfiledata = {}
     for final, res in prog:
         if final:
@@ -1679,35 +1932,35 @@
             progress.increment(step=i, item=item)
     updated = len(actions[ACTION_GET])
 
-    if [a for a in actions[ACTION_GET] if a[0] == '.hgsubstate']:
+    if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
         subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
 
     # forget (manifest only, just log it) (must come first)
     for f, args, msg in actions[ACTION_FORGET]:
-        repo.ui.debug(" %s: %s -> f\n" % (f, msg))
+        repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
         progress.increment(item=f)
 
     # re-add (manifest only, just log it)
     for f, args, msg in actions[ACTION_ADD]:
-        repo.ui.debug(" %s: %s -> a\n" % (f, msg))
+        repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
         progress.increment(item=f)
 
     # re-add/mark as modified (manifest only, just log it)
     for f, args, msg in actions[ACTION_ADD_MODIFIED]:
-        repo.ui.debug(" %s: %s -> am\n" % (f, msg))
+        repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
         progress.increment(item=f)
 
     # keep (noop, just log it)
     for f, args, msg in actions[ACTION_KEEP]:
-        repo.ui.debug(" %s: %s -> k\n" % (f, msg))
+        repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
         # no progress
 
     # directory rename, move local
     for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
-        repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
+        repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
         progress.increment(item=f)
         f0, flags = args
-        repo.ui.note(_("moving %s to %s\n") % (f0, f))
+        repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
         wctx[f].audit()
         wctx[f].write(wctx.filectx(f0).data(), flags)
         wctx[f0].remove()
@@ -1715,20 +1968,20 @@
 
     # local directory rename, get
     for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
-        repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
+        repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
         progress.increment(item=f)
         f0, flags = args
-        repo.ui.note(_("getting %s to %s\n") % (f0, f))
+        repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
         wctx[f].write(mctx.filectx(f0).data(), flags)
         updated += 1
 
     # exec
     for f, args, msg in actions[ACTION_EXEC]:
-        repo.ui.debug(" %s: %s -> e\n" % (f, msg))
+        repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
         progress.increment(item=f)
-        flags, = args
+        (flags,) = args
         wctx[f].audit()
-        wctx[f].setflags('l' in flags, 'x' in flags)
+        wctx[f].setflags(b'l' in flags, b'x' in flags)
         updated += 1
 
     # the ordering is important here -- ms.mergedriver will raise if the merge
@@ -1738,8 +1991,9 @@
 
     if usemergedriver:
         if wctx.isinmemory():
-            raise error.InMemoryMergeConflictsError("in-memory merge does not "
-                                                    "support mergedriver")
+            raise error.InMemoryMergeConflictsError(
+                b"in-memory merge does not support mergedriver"
+            )
         ms.commit()
         proceed = driverpreprocess(repo, ms, wctx, labels=labels)
         # the driver might leave some files unresolved
@@ -1747,8 +2001,9 @@
         if not proceed:
             # XXX setting unresolved to at least 1 is a hack to make sure we
             # error out
-            return updateresult(updated, merged, removed,
-                                max(len(unresolvedf), 1))
+            return updateresult(
+                updated, merged, removed, max(len(unresolvedf), 1)
+            )
         newactions = []
         for f, args, msg in mergeactions:
             if f in unresolvedf:
@@ -1759,11 +2014,12 @@
         # premerge
         tocomplete = []
         for f, args, msg in mergeactions:
-            repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
+            repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
             progress.increment(item=f)
-            if f == '.hgsubstate': # subrepo states need updating
-                subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
-                                     overwrite, labels)
+            if f == b'.hgsubstate':  # subrepo states need updating
+                subrepoutil.submerge(
+                    repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
+                )
                 continue
             wctx[f].audit()
             complete, r = ms.preresolve(f, wctx)
@@ -1773,7 +2029,7 @@
 
         # merge
         for f, args, msg in tocomplete:
-            repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
+            repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
             progress.increment(item=f, total=numupdates)
             ms.resolve(f, wctx)
 
@@ -1782,8 +2038,11 @@
 
     unresolved = ms.unresolvedcount()
 
-    if (usemergedriver and not unresolved
-        and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS):
+    if (
+        usemergedriver
+        and not unresolved
+        and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
+    ):
         if not driverconclude(repo, ms, wctx, labels=labels):
             # XXX setting unresolved to at least 1 is a hack to make sure we
             # error out
@@ -1799,7 +2058,7 @@
     extraactions = ms.actions()
     if extraactions:
         mfiles = set(a[0] for a in actions[ACTION_MERGE])
-        for k, acts in extraactions.iteritems():
+        for k, acts in pycompat.iteritems(extraactions):
             actions[k].extend(acts)
             if k == ACTION_GET and wantfiledata:
                 # no filedata until mergestate is updated to provide it
@@ -1823,15 +2082,17 @@
             # those lists aren't consulted again.
             mfiles.difference_update(a[0] for a in acts)
 
-        actions[ACTION_MERGE] = [a for a in actions[ACTION_MERGE]
-                                 if a[0] in mfiles]
+        actions[ACTION_MERGE] = [
+            a for a in actions[ACTION_MERGE] if a[0] in mfiles
+        ]
 
     progress.complete()
     assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
     return updateresult(updated, merged, removed, unresolved), getfiledata
 
+
 def recordupdates(repo, actions, branchmerge, getfiledata):
-    "record merge actions to the dirstate"
+    b"record merge actions to the dirstate"
     # remove (must come first)
     for f, args, msg in actions.get(ACTION_REMOVE, []):
         if branchmerge:
@@ -1845,7 +2106,7 @@
 
     # resolve path conflicts
     for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
-        f0, = args
+        (f0,) = args
         origf0 = repo.dirstate.copied(f0) or f0
         repo.dirstate.add(f)
         repo.dirstate.copy(origf0, f)
@@ -1888,7 +2149,7 @@
             # We've done a branch merge, mark this file as merged
             # so that we properly record the merger later
             repo.dirstate.merge(f)
-            if f1 != f2: # copy/rename
+            if f1 != f2:  # copy/rename
                 if move:
                     repo.dirstate.remove(f1)
                 if f1 != f:
@@ -1901,7 +2162,7 @@
             # of that file some time in the past. Thus our
             # merge will appear as a normal local file
             # modification.
-            if f2 == f: # file not locally copied/moved
+            if f2 == f:  # file not locally copied/moved
                 repo.dirstate.normallookup(f)
             if move:
                 repo.dirstate.drop(f1)
@@ -1926,9 +2187,26 @@
         else:
             repo.dirstate.normal(f)
 
-def update(repo, node, branchmerge, force, ancestor=None,
-           mergeancestor=False, labels=None, matcher=None, mergeforce=False,
-           updatecheck=None, wc=None):
+
+UPDATECHECK_ABORT = b'abort'  # handled at higher layers
+UPDATECHECK_NONE = b'none'
+UPDATECHECK_LINEAR = b'linear'
+UPDATECHECK_NO_CONFLICT = b'noconflict'
+
+
+def update(
+    repo,
+    node,
+    branchmerge,
+    force,
+    ancestor=None,
+    mergeancestor=False,
+    labels=None,
+    matcher=None,
+    mergeforce=False,
+    updatecheck=None,
+    wc=None,
+):
     """
     Perform a merge between the working directory and the given node
 
@@ -1992,8 +2270,23 @@
         # and force=False pass a value for updatecheck. We may want to allow
         # updatecheck='abort' to better suppport some of these callers.
         if updatecheck is None:
-            updatecheck = 'linear'
-        assert updatecheck in ('none', 'linear', 'noconflict')
+            updatecheck = UPDATECHECK_LINEAR
+        if updatecheck not in (
+            UPDATECHECK_NONE,
+            UPDATECHECK_LINEAR,
+            UPDATECHECK_NO_CONFLICT,
+        ):
+            raise ValueError(
+                r'Invalid updatecheck %r (can accept %r)'
+                % (
+                    updatecheck,
+                    (
+                        UPDATECHECK_NONE,
+                        UPDATECHECK_LINEAR,
+                        UPDATECHECK_NO_CONFLICT,
+                    ),
+                )
+            )
     # If we're doing a partial update, we need to skip updating
     # the dirstate, so make a note of any partial-ness to the
     # update here.
@@ -2010,7 +2303,7 @@
         if ancestor is not None:
             pas = [repo[ancestor]]
         else:
-            if repo.ui.configlist('merge', 'preferancestor') == ['*']:
+            if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
                 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
                 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
             else:
@@ -2022,35 +2315,47 @@
         ### check phase
         if not overwrite:
             if len(pl) > 1:
-                raise error.Abort(_("outstanding uncommitted merge"))
+                raise error.Abort(_(b"outstanding uncommitted merge"))
             ms = mergestate.read(repo)
             if list(ms.unresolved()):
-                raise error.Abort(_("outstanding merge conflicts"))
+                raise error.Abort(
+                    _(b"outstanding merge conflicts"),
+                    hint=_(b"use 'hg resolve' to resolve"),
+                )
         if branchmerge:
             if pas == [p2]:
-                raise error.Abort(_("merging with a working directory ancestor"
-                                   " has no effect"))
+                raise error.Abort(
+                    _(
+                        b"merging with a working directory ancestor"
+                        b" has no effect"
+                    )
+                )
             elif pas == [p1]:
                 if not mergeancestor and wc.branch() == p2.branch():
-                    raise error.Abort(_("nothing to merge"),
-                                     hint=_("use 'hg update' "
-                                            "or check 'hg heads'"))
+                    raise error.Abort(
+                        _(b"nothing to merge"),
+                        hint=_(b"use 'hg update' or check 'hg heads'"),
+                    )
             if not force and (wc.files() or wc.deleted()):
-                raise error.Abort(_("uncommitted changes"),
-                                 hint=_("use 'hg status' to list changes"))
+                raise error.Abort(
+                    _(b"uncommitted changes"),
+                    hint=_(b"use 'hg status' to list changes"),
+                )
             if not wc.isinmemory():
                 for s in sorted(wc.substate):
                     wc.sub(s).bailifchanged()
 
         elif not overwrite:
-            if p1 == p2: # no-op update
+            if p1 == p2:  # no-op update
                 # call the hooks and exit early
-                repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
-                repo.hook('update', parent1=xp2, parent2='', error=0)
+                repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
+                repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
                 return updateresult(0, 0, 0, 0)
 
-            if (updatecheck == 'linear' and
-                    pas not in ([p1], [p2])):  # nonlinear
+            if updatecheck == UPDATECHECK_LINEAR and pas not in (
+                [p1],
+                [p2],
+            ):  # nonlinear
                 dirty = wc.dirty(missing=True)
                 if dirty:
                     # Branching is a bit strange to ensure we do the minimal
@@ -2058,10 +2363,10 @@
                     foreground = obsutil.foreground(repo, [p1.node()])
                     # note: the <node> variable contains a random identifier
                     if repo[node].node() in foreground:
-                        pass # allow updating to successors
+                        pass  # allow updating to successors
                     else:
-                        msg = _("uncommitted changes")
-                        hint = _("commit or update --clean to discard changes")
+                        msg = _(b"uncommitted changes")
+                        hint = _(b"commit or update --clean to discard changes")
                         raise error.UpdateAbort(msg, hint=hint)
                 else:
                     # Allow jumping branches if clean and specific rev given
@@ -2073,7 +2378,7 @@
             pas = [p1]
 
         # deprecated config: merge.followcopies
-        followcopies = repo.ui.configbool('merge', 'followcopies')
+        followcopies = repo.ui.configbool(b'merge', b'followcopies')
         if overwrite:
             followcopies = False
         elif not pas[0]:
@@ -2083,83 +2388,128 @@
 
         ### calculate phase
         actionbyfile, diverge, renamedelete = calculateupdates(
-            repo, wc, p2, pas, branchmerge, force, mergeancestor,
-            followcopies, matcher=matcher, mergeforce=mergeforce)
+            repo,
+            wc,
+            p2,
+            pas,
+            branchmerge,
+            force,
+            mergeancestor,
+            followcopies,
+            matcher=matcher,
+            mergeforce=mergeforce,
+        )
 
-        if updatecheck == 'noconflict':
-            for f, (m, args, msg) in actionbyfile.iteritems():
-                if m not in (ACTION_GET, ACTION_KEEP, ACTION_EXEC,
-                             ACTION_REMOVE, ACTION_PATH_CONFLICT_RESOLVE):
-                    msg = _("conflicting changes")
-                    hint = _("commit or update --clean to discard changes")
+        if updatecheck == UPDATECHECK_NO_CONFLICT:
+            for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
+                if m not in (
+                    ACTION_GET,
+                    ACTION_KEEP,
+                    ACTION_EXEC,
+                    ACTION_REMOVE,
+                    ACTION_PATH_CONFLICT_RESOLVE,
+                ):
+                    msg = _(b"conflicting changes")
+                    hint = _(b"commit or update --clean to discard changes")
                     raise error.Abort(msg, hint=hint)
 
         # Prompt and create actions. Most of this is in the resolve phase
         # already, but we can't handle .hgsubstate in filemerge or
         # subrepoutil.submerge yet so we have to keep prompting for it.
-        if '.hgsubstate' in actionbyfile:
-            f = '.hgsubstate'
+        if b'.hgsubstate' in actionbyfile:
+            f = b'.hgsubstate'
             m, args, msg = actionbyfile[f]
             prompts = filemerge.partextras(labels)
-            prompts['f'] = f
+            prompts[b'f'] = f
             if m == ACTION_CHANGED_DELETED:
                 if repo.ui.promptchoice(
-                    _("local%(l)s changed %(f)s which other%(o)s deleted\n"
-                      "use (c)hanged version or (d)elete?"
-                      "$$ &Changed $$ &Delete") % prompts, 0):
-                    actionbyfile[f] = (ACTION_REMOVE, None, 'prompt delete')
+                    _(
+                        b"local%(l)s changed %(f)s which other%(o)s deleted\n"
+                        b"use (c)hanged version or (d)elete?"
+                        b"$$ &Changed $$ &Delete"
+                    )
+                    % prompts,
+                    0,
+                ):
+                    actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
                 elif f in p1:
-                    actionbyfile[f] = (ACTION_ADD_MODIFIED, None, 'prompt keep')
+                    actionbyfile[f] = (
+                        ACTION_ADD_MODIFIED,
+                        None,
+                        b'prompt keep',
+                    )
                 else:
-                    actionbyfile[f] = (ACTION_ADD, None, 'prompt keep')
+                    actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
             elif m == ACTION_DELETED_CHANGED:
                 f1, f2, fa, move, anc = args
                 flags = p2[f2].flags()
-                if repo.ui.promptchoice(
-                    _("other%(o)s changed %(f)s which local%(l)s deleted\n"
-                      "use (c)hanged version or leave (d)eleted?"
-                      "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
-                    actionbyfile[f] = (ACTION_GET, (flags, False),
-                                       'prompt recreating')
+                if (
+                    repo.ui.promptchoice(
+                        _(
+                            b"other%(o)s changed %(f)s which local%(l)s deleted\n"
+                            b"use (c)hanged version or leave (d)eleted?"
+                            b"$$ &Changed $$ &Deleted"
+                        )
+                        % prompts,
+                        0,
+                    )
+                    == 0
+                ):
+                    actionbyfile[f] = (
+                        ACTION_GET,
+                        (flags, False),
+                        b'prompt recreating',
+                    )
                 else:
                     del actionbyfile[f]
 
         # Convert to dictionary-of-lists format
         actions = emptyactions()
-        for f, (m, args, msg) in actionbyfile.iteritems():
+        for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
             if m not in actions:
                 actions[m] = []
             actions[m].append((f, args, msg))
 
         if not util.fscasesensitive(repo.path):
             # check collision between files only in p2 for clean update
-            if (not branchmerge and
-                (force or not wc.dirty(missing=True, branch=False))):
+            if not branchmerge and (
+                force or not wc.dirty(missing=True, branch=False)
+            ):
                 _checkcollision(repo, p2.manifest(), None)
             else:
                 _checkcollision(repo, wc.manifest(), actions)
 
         # divergent renames
-        for f, fl in sorted(diverge.iteritems()):
-            repo.ui.warn(_("note: possible conflict - %s was renamed "
-                           "multiple times to:\n") % f)
+        for f, fl in sorted(pycompat.iteritems(diverge)):
+            repo.ui.warn(
+                _(
+                    b"note: possible conflict - %s was renamed "
+                    b"multiple times to:\n"
+                )
+                % f
+            )
             for nf in sorted(fl):
-                repo.ui.warn(" %s\n" % nf)
+                repo.ui.warn(b" %s\n" % nf)
 
         # rename and delete
-        for f, fl in sorted(renamedelete.iteritems()):
-            repo.ui.warn(_("note: possible conflict - %s was deleted "
-                           "and renamed to:\n") % f)
+        for f, fl in sorted(pycompat.iteritems(renamedelete)):
+            repo.ui.warn(
+                _(
+                    b"note: possible conflict - %s was deleted "
+                    b"and renamed to:\n"
+                )
+                % f
+            )
             for nf in sorted(fl):
-                repo.ui.warn(" %s\n" % nf)
+                repo.ui.warn(b" %s\n" % nf)
 
         ### apply phase
-        if not branchmerge: # just jump to the new rev
-            fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
+        if not branchmerge:  # just jump to the new rev
+            fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
         if not partial and not wc.isinmemory():
-            repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
+            repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
             # note that we're in the middle of an update
-            repo.vfs.write('updatestate', p2.hex())
+            repo.vfs.write(b'updatestate', p2.hex())
 
         # Advertise fsmonitor when its presence could be useful.
         #
@@ -2171,41 +2521,49 @@
         #
         # We only allow on Linux and MacOS because that's where fsmonitor is
         # considered stable.
-        fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
-        fsmonitorthreshold = repo.ui.configint('fsmonitor',
-                                               'warn_update_file_count')
+        fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
+        fsmonitorthreshold = repo.ui.configint(
+            b'fsmonitor', b'warn_update_file_count'
+        )
         try:
             # avoid cycle: extensions -> cmdutil -> merge
             from . import extensions
-            extensions.find('fsmonitor')
-            fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
+
+            extensions.find(b'fsmonitor')
+            fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
             # We intentionally don't look at whether fsmonitor has disabled
             # itself because a) fsmonitor may have already printed a warning
             # b) we only care about the config state here.
         except KeyError:
             fsmonitorenabled = False
 
-        if (fsmonitorwarning
-                and not fsmonitorenabled
-                and p1.node() == nullid
-                and len(actions[ACTION_GET]) >= fsmonitorthreshold
-                and pycompat.sysplatform.startswith(('linux', 'darwin'))):
+        if (
+            fsmonitorwarning
+            and not fsmonitorenabled
+            and p1.node() == nullid
+            and len(actions[ACTION_GET]) >= fsmonitorthreshold
+            and pycompat.sysplatform.startswith((b'linux', b'darwin'))
+        ):
             repo.ui.warn(
-                _('(warning: large working directory being used without '
-                  'fsmonitor enabled; enable fsmonitor to improve performance; '
-                  'see "hg help -e fsmonitor")\n'))
+                _(
+                    b'(warning: large working directory being used without '
+                    b'fsmonitor enabled; enable fsmonitor to improve performance; '
+                    b'see "hg help -e fsmonitor")\n'
+                )
+            )
 
         updatedirstate = not partial and not wc.isinmemory()
         wantfiledata = updatedirstate and not branchmerge
-        stats, getfiledata = applyupdates(repo, actions, wc, p2, overwrite,
-                                          wantfiledata, labels=labels)
+        stats, getfiledata = applyupdates(
+            repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
+        )
 
         if updatedirstate:
             with repo.dirstate.parentchange():
                 repo.setparents(fp1, fp2)
                 recordupdates(repo, actions, branchmerge, getfiledata)
                 # update completed, clear state
-                util.unlink(repo.vfs.join('updatestate'))
+                util.unlink(repo.vfs.join(b'updatestate'))
 
                 if not branchmerge:
                     repo.dirstate.setbranch(p2.branch())
@@ -2216,12 +2574,15 @@
         sparse.prunetemporaryincludes(repo)
 
     if not partial:
-        repo.hook('update', parent1=xp1, parent2=xp2,
-                  error=stats.unresolvedcount)
+        repo.hook(
+            b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
+        )
     return stats
 
-def graft(repo, ctx, pctx, labels=None, keepparent=False,
-          keepconflictparent=False):
+
+def graft(
+    repo, ctx, pctx, labels=None, keepparent=False, keepconflictparent=False
+):
     """Do a graft-like merge.
 
     This is a merge where the merge ancestor is chosen such that one
@@ -2243,11 +2604,17 @@
     # to copy commits), and 2) informs update that the incoming changes are
     # newer than the destination so it doesn't prompt about "remote changed foo
     # which local deleted".
-    mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
+    mergeancestor = repo.changelog.isancestor(repo[b'.'].node(), ctx.node())
 
-    stats = update(repo, ctx.node(), True, True, pctx.node(),
-                   mergeancestor=mergeancestor, labels=labels)
-
+    stats = update(
+        repo,
+        ctx.node(),
+        True,
+        True,
+        pctx.node(),
+        mergeancestor=mergeancestor,
+        labels=labels,
+    )
 
     if keepconflictparent and stats.unresolvedcount:
         pother = ctx.node()
@@ -2259,14 +2626,22 @@
             pother = parents[0].node()
 
     with repo.dirstate.parentchange():
-        repo.setparents(repo['.'].node(), pother)
+        repo.setparents(repo[b'.'].node(), pother)
         repo.dirstate.write(repo.currenttransaction())
         # fix up dirstate for copies and renames
         copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
     return stats
 
-def purge(repo, matcher, ignored=False, removeemptydirs=True,
-          removefiles=True, abortonerror=False, noop=False):
+
+def purge(
+    repo,
+    matcher,
+    ignored=False,
+    removeemptydirs=True,
+    removefiles=True,
+    abortonerror=False,
+    noop=False,
+):
     """Purge the working directory of untracked files.
 
     ``matcher`` is a matcher configured to scan the working directory -
@@ -2292,11 +2667,11 @@
         try:
             removefn(path)
         except OSError:
-            m = _('%s cannot be removed') % path
+            m = _(b'%s cannot be removed') % path
             if abortonerror:
                 raise error.Abort(m)
             else:
-                repo.ui.warn(_('warning: %s\n') % m)
+                repo.ui.warn(_(b'warning: %s\n') % m)
 
     # There's no API to copy a matcher. So mutate the passed matcher and
     # restore it when we're done.
@@ -2315,7 +2690,7 @@
         if removefiles:
             for f in sorted(status.unknown + status.ignored):
                 if not noop:
-                    repo.ui.note(_('removing file %s\n') % f)
+                    repo.ui.note(_(b'removing file %s\n') % f)
                     remove(repo.wvfs.unlink, f)
                 res.append(f)
 
@@ -2323,7 +2698,7 @@
             for f in sorted(directories, reverse=True):
                 if matcher(f) and not repo.wvfs.listdir(f):
                     if not noop:
-                        repo.ui.note(_('removing directory %s\n') % f)
+                        repo.ui.note(_(b'removing directory %s\n') % f)
                         remove(repo.wvfs.rmdir, f)
                     res.append(f)
 
--- a/mercurial/mergeutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/mergeutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,14 +9,16 @@
 
 from .i18n import _
 
-from . import (
-    error,
-)
+from . import error
+
 
 def checkunresolved(ms):
     if list(ms.unresolved()):
-        raise error.Abort(_("unresolved merge conflicts "
-                            "(see 'hg help resolve')"))
-    if ms.mdstate() != 's' or list(ms.driverresolved()):
-        raise error.Abort(_('driver-resolved merge conflicts'),
-                          hint=_('run "hg resolve --all" to resolve'))
+        raise error.Abort(
+            _(b"unresolved merge conflicts (see 'hg help resolve')")
+        )
+    if ms.mdstate() != b's' or list(ms.driverresolved()):
+        raise error.Abort(
+            _(b'driver-resolved merge conflicts'),
+            hint=_(b'run "hg resolve --all" to resolve'),
+        )
--- a/mercurial/minifileset.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/minifileset.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,47 +15,54 @@
     pycompat,
 )
 
+
 def _sizep(x):
     # i18n: "size" is a keyword
-    expr = filesetlang.getstring(x, _("size requires an expression"))
+    expr = filesetlang.getstring(x, _(b"size requires an expression"))
     return fileset.sizematcher(expr)
 
+
 def _compile(tree):
     if not tree:
-        raise error.ParseError(_("missing argument"))
+        raise error.ParseError(_(b"missing argument"))
     op = tree[0]
-    if op == 'withstatus':
+    if op == b'withstatus':
         return _compile(tree[1])
-    elif op in {'symbol', 'string', 'kindpat'}:
-        name = filesetlang.getpattern(tree, {'path'}, _('invalid file pattern'))
-        if name.startswith('**'): # file extension test, ex. "**.tar.gz"
+    elif op in {b'symbol', b'string', b'kindpat'}:
+        name = filesetlang.getpattern(
+            tree, {b'path'}, _(b'invalid file pattern')
+        )
+        if name.startswith(b'**'):  # file extension test, ex. "**.tar.gz"
             ext = name[2:]
             for c in pycompat.bytestr(ext):
-                if c in '*{}[]?/\\':
-                    raise error.ParseError(_('reserved character: %s') % c)
+                if c in b'*{}[]?/\\':
+                    raise error.ParseError(_(b'reserved character: %s') % c)
             return lambda n, s: n.endswith(ext)
-        elif name.startswith('path:'): # directory or full path test
-            p = name[5:] # prefix
+        elif name.startswith(b'path:'):  # directory or full path test
+            p = name[5:]  # prefix
             pl = len(p)
-            f = lambda n, s: n.startswith(p) and (len(n) == pl
-                                                  or n[pl:pl + 1] == '/')
+            f = lambda n, s: n.startswith(p) and (
+                len(n) == pl or n[pl : pl + 1] == b'/'
+            )
             return f
-        raise error.ParseError(_("unsupported file pattern: %s") % name,
-                               hint=_('paths must be prefixed with "path:"'))
-    elif op in {'or', 'patterns'}:
+        raise error.ParseError(
+            _(b"unsupported file pattern: %s") % name,
+            hint=_(b'paths must be prefixed with "path:"'),
+        )
+    elif op in {b'or', b'patterns'}:
         funcs = [_compile(x) for x in tree[1:]]
         return lambda n, s: any(f(n, s) for f in funcs)
-    elif op == 'and':
+    elif op == b'and':
         func1 = _compile(tree[1])
         func2 = _compile(tree[2])
         return lambda n, s: func1(n, s) and func2(n, s)
-    elif op == 'not':
+    elif op == b'not':
         return lambda n, s: not _compile(tree[1])(n, s)
-    elif op == 'func':
+    elif op == b'func':
         symbols = {
-            'all': lambda n, s: True,
-            'none': lambda n, s: False,
-            'size': lambda n, s: _sizep(tree[2])(s),
+            b'all': lambda n, s: True,
+            b'none': lambda n, s: False,
+            b'size': lambda n, s: _sizep(tree[2])(s),
         }
 
         name = filesetlang.getsymbol(tree[1])
@@ -63,14 +70,17 @@
             return symbols[name]
 
         raise error.UnknownIdentifier(name, symbols.keys())
-    elif op == 'minus':     # equivalent to 'x and not y'
+    elif op == b'minus':  # equivalent to 'x and not y'
         func1 = _compile(tree[1])
         func2 = _compile(tree[2])
         return lambda n, s: func1(n, s) and not func2(n, s)
-    elif op == 'list':
-        raise error.ParseError(_("can't use a list in this context"),
-                               hint=_('see \'hg help "filesets.x or y"\''))
-    raise error.ProgrammingError('illegal tree: %r' % (tree,))
+    elif op == b'list':
+        raise error.ParseError(
+            _(b"can't use a list in this context"),
+            hint=_(b'see \'hg help "filesets.x or y"\''),
+        )
+    raise error.ProgrammingError(b'illegal tree: %r' % (tree,))
+
 
 def compile(text):
     """generate a function (path, size) -> bool from filter specification.
--- a/mercurial/minirst.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/minirst.py	Mon Oct 21 11:09:48 2019 -0400
@@ -28,24 +28,28 @@
     pycompat,
     url,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
+
 
 def section(s):
-    return "%s\n%s\n\n" % (s, "\"" * encoding.colwidth(s))
+    return b"%s\n%s\n\n" % (s, b"\"" * encoding.colwidth(s))
+
 
 def subsection(s):
-    return "%s\n%s\n\n" % (s, '=' * encoding.colwidth(s))
+    return b"%s\n%s\n\n" % (s, b'=' * encoding.colwidth(s))
+
 
 def subsubsection(s):
-    return "%s\n%s\n\n" % (s, "-" * encoding.colwidth(s))
+    return b"%s\n%s\n\n" % (s, b"-" * encoding.colwidth(s))
+
 
 def subsubsubsection(s):
-    return "%s\n%s\n\n" % (s, "." * encoding.colwidth(s))
+    return b"%s\n%s\n\n" % (s, b"." * encoding.colwidth(s))
+
 
 def subsubsubsubsection(s):
-    return "%s\n%s\n\n" % (s, "'" * encoding.colwidth(s))
+    return b"%s\n%s\n\n" % (s, b"'" * encoding.colwidth(s))
+
 
 def replace(text, substs):
     '''
@@ -70,8 +74,10 @@
         utext = utext.replace(f.decode("ascii"), t.decode("ascii"))
     return utext.encode(pycompat.sysstr(encoding.encoding))
 
+
 _blockre = re.compile(br"\n(?:\s*\n)+")
 
+
 def findblocks(text):
     """Find continuous blocks of lines in text.
 
@@ -79,14 +85,15 @@
     has an 'indent' field and a 'lines' field.
     """
     blocks = []
-    for b in _blockre.split(text.lstrip('\n').rstrip()):
+    for b in _blockre.split(text.lstrip(b'\n').rstrip()):
         lines = b.splitlines()
         if lines:
             indent = min((len(l) - len(l.lstrip())) for l in lines)
             lines = [l[indent:] for l in lines]
-            blocks.append({'indent': indent, 'lines': lines})
+            blocks.append({b'indent': indent, b'lines': lines})
     return blocks
 
+
 def findliteralblocks(blocks):
     """Finds literal blocks and adds a 'type' field to the blocks.
 
@@ -104,62 +111,69 @@
         #    +---------------------------+
         #    | indented literal block    |
         #    +---------------------------+
-        blocks[i]['type'] = 'paragraph'
-        if blocks[i]['lines'][-1].endswith('::') and i + 1 < len(blocks):
-            indent = blocks[i]['indent']
-            adjustment = blocks[i + 1]['indent'] - indent
+        blocks[i][b'type'] = b'paragraph'
+        if blocks[i][b'lines'][-1].endswith(b'::') and i + 1 < len(blocks):
+            indent = blocks[i][b'indent']
+            adjustment = blocks[i + 1][b'indent'] - indent
 
-            if blocks[i]['lines'] == ['::']:
+            if blocks[i][b'lines'] == [b'::']:
                 # Expanded form: remove block
                 del blocks[i]
                 i -= 1
-            elif blocks[i]['lines'][-1].endswith(' ::'):
+            elif blocks[i][b'lines'][-1].endswith(b' ::'):
                 # Partially minimized form: remove space and both
                 # colons.
-                blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-3]
-            elif (len(blocks[i]['lines']) == 1 and
-                  blocks[i]['lines'][0].lstrip(' ').startswith('.. ') and
-                  blocks[i]['lines'][0].find(' ', 3) == -1):
+                blocks[i][b'lines'][-1] = blocks[i][b'lines'][-1][:-3]
+            elif (
+                len(blocks[i][b'lines']) == 1
+                and blocks[i][b'lines'][0].lstrip(b' ').startswith(b'.. ')
+                and blocks[i][b'lines'][0].find(b' ', 3) == -1
+            ):
                 # directive on its own line, not a literal block
                 i += 1
                 continue
             else:
                 # Fully minimized form: remove just one colon.
-                blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-1]
+                blocks[i][b'lines'][-1] = blocks[i][b'lines'][-1][:-1]
 
             # List items are formatted with a hanging indent. We must
             # correct for this here while we still have the original
             # information on the indentation of the subsequent literal
             # blocks available.
-            m = _bulletre.match(blocks[i]['lines'][0])
+            m = _bulletre.match(blocks[i][b'lines'][0])
             if m:
                 indent += m.end()
                 adjustment -= m.end()
 
             # Mark the following indented blocks.
-            while i + 1 < len(blocks) and blocks[i + 1]['indent'] > indent:
-                blocks[i + 1]['type'] = 'literal'
-                blocks[i + 1]['indent'] -= adjustment
+            while i + 1 < len(blocks) and blocks[i + 1][b'indent'] > indent:
+                blocks[i + 1][b'type'] = b'literal'
+                blocks[i + 1][b'indent'] -= adjustment
                 i += 1
         i += 1
     return blocks
 
+
 _bulletre = re.compile(br'(\*|-|[0-9A-Za-z]+\.|\(?[0-9A-Za-z]+\)|\|) ')
-_optionre = re.compile(br'^(-([a-zA-Z0-9]), )?(--[a-z0-9-]+)'
-                       br'((.*)  +)(.*)$')
+_optionre = re.compile(
+    br'^(-([a-zA-Z0-9]), )?(--[a-z0-9-]+)' br'((.*)  +)(.*)$'
+)
 _fieldre = re.compile(br':(?![: ])([^:]*)(?<! ):[ ]+(.*)')
 _definitionre = re.compile(br'[^ ]')
 _tablere = re.compile(br'(=+\s+)*=+')
 
+
 def splitparagraphs(blocks):
     """Split paragraphs into lists."""
     # Tuples with (list type, item regexp, single line items?). Order
     # matters: definition lists has the least specific regexp and must
     # come last.
-    listtypes = [('bullet', _bulletre, True),
-                 ('option', _optionre, True),
-                 ('field', _fieldre, True),
-                 ('definition', _definitionre, False)]
+    listtypes = [
+        (b'bullet', _bulletre, True),
+        (b'option', _optionre, True),
+        (b'field', _fieldre, True),
+        (b'definition', _definitionre, False),
+    ]
 
     def match(lines, i, itemre, singleline):
         """Does itemre match an item at line i?
@@ -168,89 +182,99 @@
         item (but only if singleline is True).
         """
         line1 = lines[i]
-        line2 = i + 1 < len(lines) and lines[i + 1] or ''
+        line2 = i + 1 < len(lines) and lines[i + 1] or b''
         if not itemre.match(line1):
             return False
         if singleline:
-            return line2 == '' or line2[0:1] == ' ' or itemre.match(line2)
+            return line2 == b'' or line2[0:1] == b' ' or itemre.match(line2)
         else:
-            return line2.startswith(' ')
+            return line2.startswith(b' ')
 
     i = 0
     while i < len(blocks):
-        if blocks[i]['type'] == 'paragraph':
-            lines = blocks[i]['lines']
+        if blocks[i][b'type'] == b'paragraph':
+            lines = blocks[i][b'lines']
             for type, itemre, singleline in listtypes:
                 if match(lines, 0, itemre, singleline):
                     items = []
                     for j, line in enumerate(lines):
                         if match(lines, j, itemre, singleline):
-                            items.append({'type': type, 'lines': [],
-                                          'indent': blocks[i]['indent']})
-                        items[-1]['lines'].append(line)
-                    blocks[i:i + 1] = items
+                            items.append(
+                                {
+                                    b'type': type,
+                                    b'lines': [],
+                                    b'indent': blocks[i][b'indent'],
+                                }
+                            )
+                        items[-1][b'lines'].append(line)
+                    blocks[i : i + 1] = items
                     break
         i += 1
     return blocks
 
+
 _fieldwidth = 14
 
+
 def updatefieldlists(blocks):
     """Find key for field lists."""
     i = 0
     while i < len(blocks):
-        if blocks[i]['type'] != 'field':
+        if blocks[i][b'type'] != b'field':
             i += 1
             continue
 
         j = i
-        while j < len(blocks) and blocks[j]['type'] == 'field':
-            m = _fieldre.match(blocks[j]['lines'][0])
+        while j < len(blocks) and blocks[j][b'type'] == b'field':
+            m = _fieldre.match(blocks[j][b'lines'][0])
             key, rest = m.groups()
-            blocks[j]['lines'][0] = rest
-            blocks[j]['key'] = key
+            blocks[j][b'lines'][0] = rest
+            blocks[j][b'key'] = key
             j += 1
 
         i = j + 1
 
     return blocks
 
+
 def updateoptionlists(blocks):
     i = 0
     while i < len(blocks):
-        if blocks[i]['type'] != 'option':
+        if blocks[i][b'type'] != b'option':
             i += 1
             continue
 
         optstrwidth = 0
         j = i
-        while j < len(blocks) and blocks[j]['type'] == 'option':
-            m = _optionre.match(blocks[j]['lines'][0])
+        while j < len(blocks) and blocks[j][b'type'] == b'option':
+            m = _optionre.match(blocks[j][b'lines'][0])
 
             shortoption = m.group(2)
             group3 = m.group(3)
             longoption = group3[2:].strip()
             desc = m.group(6).strip()
             longoptionarg = m.group(5).strip()
-            blocks[j]['lines'][0] = desc
+            blocks[j][b'lines'][0] = desc
 
-            noshortop = ''
+            noshortop = b''
             if not shortoption:
-                noshortop = '   '
+                noshortop = b'   '
 
-            opt = "%s%s" %   (shortoption and "-%s " % shortoption or '',
-                            ("%s--%s %s") % (noshortop, longoption,
-                                             longoptionarg))
+            opt = b"%s%s" % (
+                shortoption and b"-%s " % shortoption or b'',
+                b"%s--%s %s" % (noshortop, longoption, longoptionarg),
+            )
             opt = opt.rstrip()
-            blocks[j]['optstr'] = opt
+            blocks[j][b'optstr'] = opt
             optstrwidth = max(optstrwidth, encoding.colwidth(opt))
             j += 1
 
         for block in blocks[i:j]:
-            block['optstrwidth'] = optstrwidth
+            block[b'optstrwidth'] = optstrwidth
         i = j + 1
     return blocks
 
+
 def prunecontainers(blocks, keep):
     """Prune unwanted containers.
 
@@ -267,14 +291,15 @@
         # +---+                               |
         #     | blocks                        |
         #     +-------------------------------+
-        if (blocks[i]['type'] == 'paragraph' and
-            blocks[i]['lines'][0].startswith('.. container::')):
-            indent = blocks[i]['indent']
-            adjustment = blocks[i + 1]['indent'] - indent
-            containertype = blocks[i]['lines'][0][15:]
+        if blocks[i][b'type'] == b'paragraph' and blocks[i][b'lines'][
+            0
+        ].startswith(b'.. container::'):
+            indent = blocks[i][b'indent']
+            adjustment = blocks[i + 1][b'indent'] - indent
+            containertype = blocks[i][b'lines'][0][15:]
             prune = True
             for c in keep:
-                if c in containertype.split('.'):
+                if c in containertype.split(b'.'):
                     prune = False
             if prune:
                 pruned.append(containertype)
@@ -283,17 +308,19 @@
             del blocks[i]
             j = i
             i -= 1
-            while j < len(blocks) and blocks[j]['indent'] > indent:
+            while j < len(blocks) and blocks[j][b'indent'] > indent:
                 if prune:
                     del blocks[j]
                 else:
-                    blocks[j]['indent'] -= adjustment
+                    blocks[j][b'indent'] -= adjustment
                     j += 1
         i += 1
     return blocks, pruned
 
+
 _sectionre = re.compile(br"""^([-=`:.'"~^_*+#])\1+$""")
 
+
 def findtables(blocks):
     '''Find simple tables
 
@@ -309,42 +336,47 @@
         #  1    2   3
         #  x    y   z
         # === ==== ===
-        if (block['type'] == 'paragraph' and
-            len(block['lines']) > 2 and
-            _tablere.match(block['lines'][0]) and
-            block['lines'][0] == block['lines'][-1]):
-            block['type'] = 'table'
-            block['header'] = False
-            div = block['lines'][0]
+        if (
+            block[b'type'] == b'paragraph'
+            and len(block[b'lines']) > 2
+            and _tablere.match(block[b'lines'][0])
+            and block[b'lines'][0] == block[b'lines'][-1]
+        ):
+            block[b'type'] = b'table'
+            block[b'header'] = False
+            div = block[b'lines'][0]
 
             # column markers are ASCII so we can calculate column
             # position in bytes
-            columns = [x for x in pycompat.xrange(len(div))
-                       if div[x:x + 1] == '=' and (x == 0 or
-                                                   div[x - 1:x] == ' ')]
+            columns = [
+                x
+                for x in pycompat.xrange(len(div))
+                if div[x : x + 1] == b'=' and (x == 0 or div[x - 1 : x] == b' ')
+            ]
             rows = []
-            for l in block['lines'][1:-1]:
+            for l in block[b'lines'][1:-1]:
                 if l == div:
-                    block['header'] = True
+                    block[b'header'] = True
                     continue
                 row = []
                 # we measure columns not in bytes or characters but in
                 # colwidth which makes things tricky
-                pos = columns[0] # leading whitespace is bytes
+                pos = columns[0]  # leading whitespace is bytes
                 for n, start in enumerate(columns):
                     if n + 1 < len(columns):
                         width = columns[n + 1] - start
-                        v = encoding.getcols(l, pos, width) # gather columns
-                        pos += len(v) # calculate byte position of end
+                        v = encoding.getcols(l, pos, width)  # gather columns
+                        pos += len(v)  # calculate byte position of end
                         row.append(v.strip())
                     else:
                         row.append(l[pos:].strip())
                 rows.append(row)
 
-            block['table'] = rows
+            block[b'table'] = rows
 
     return blocks
 
+
 def findsections(blocks):
     """Finds sections.
 
@@ -358,33 +390,38 @@
         # | Section title                |
         # | -------------                |
         # +------------------------------+
-        if (block['type'] == 'paragraph' and
-            len(block['lines']) == 2 and
-            encoding.colwidth(block['lines'][0]) == len(block['lines'][1]) and
-            _sectionre.match(block['lines'][1])):
-            block['underline'] = block['lines'][1][0:1]
-            block['type'] = 'section'
-            del block['lines'][1]
+        if (
+            block[b'type'] == b'paragraph'
+            and len(block[b'lines']) == 2
+            and encoding.colwidth(block[b'lines'][0]) == len(block[b'lines'][1])
+            and _sectionre.match(block[b'lines'][1])
+        ):
+            block[b'underline'] = block[b'lines'][1][0:1]
+            block[b'type'] = b'section'
+            del block[b'lines'][1]
     return blocks
 
+
 def inlineliterals(blocks):
-    substs = [('``', '"')]
+    substs = [(b'``', b'"')]
     for b in blocks:
-        if b['type'] in ('paragraph', 'section'):
-            b['lines'] = [replace(l, substs) for l in b['lines']]
+        if b[b'type'] in (b'paragraph', b'section'):
+            b[b'lines'] = [replace(l, substs) for l in b[b'lines']]
     return blocks
 
+
 def hgrole(blocks):
-    substs = [(':hg:`', "'hg "), ('`', "'")]
+    substs = [(b':hg:`', b"'hg "), (b'`', b"'")]
     for b in blocks:
-        if b['type'] in ('paragraph', 'section'):
+        if b[b'type'] in (b'paragraph', b'section'):
             # Turn :hg:`command` into "hg command". This also works
             # when there is a line break in the command and relies on
             # the fact that we have no stray back-quotes in the input
             # (run the blocks through inlineliterals first).
-            b['lines'] = [replace(l, substs) for l in b['lines']]
+            b[b'lines'] = [replace(l, substs) for l in b[b'lines']]
     return blocks
 
+
 def addmargins(blocks):
     """Adds empty blocks for vertical spacing.
 
@@ -393,26 +430,31 @@
     """
     i = 1
     while i < len(blocks):
-        if (blocks[i]['type'] == blocks[i - 1]['type'] and
-            blocks[i]['type'] in ('bullet', 'option', 'field')):
+        if blocks[i][b'type'] == blocks[i - 1][b'type'] and blocks[i][
+            b'type'
+        ] in (b'bullet', b'option', b'field',):
             i += 1
-        elif not blocks[i - 1]['lines']:
+        elif not blocks[i - 1][b'lines']:
             # no lines in previous block, do not separate
             i += 1
         else:
-            blocks.insert(i, {'lines': [''], 'indent': 0, 'type': 'margin'})
+            blocks.insert(
+                i, {b'lines': [b''], b'indent': 0, b'type': b'margin'}
+            )
             i += 2
     return blocks
 
+
 def prunecomments(blocks):
     """Remove comments."""
     i = 0
     while i < len(blocks):
         b = blocks[i]
-        if b['type'] == 'paragraph' and (b['lines'][0].startswith('.. ') or
-                                         b['lines'] == ['..']):
+        if b[b'type'] == b'paragraph' and (
+            b[b'lines'][0].startswith(b'.. ') or b[b'lines'] == [b'..']
+        ):
             del blocks[i]
-            if i < len(blocks) and blocks[i]['type'] == 'margin':
+            if i < len(blocks) and blocks[i][b'type'] == b'margin':
                 del blocks[i]
         else:
             i += 1
@@ -426,136 +468,151 @@
     """
     admonitions = admonitions or _admonitiontitles.keys()
 
-    admonitionre = re.compile(br'\.\. (%s)::' % '|'.join(sorted(admonitions)),
-                              flags=re.IGNORECASE)
+    admonitionre = re.compile(
+        br'\.\. (%s)::' % b'|'.join(sorted(admonitions)), flags=re.IGNORECASE
+    )
 
     i = 0
     while i < len(blocks):
-        m = admonitionre.match(blocks[i]['lines'][0])
+        m = admonitionre.match(blocks[i][b'lines'][0])
         if m:
-            blocks[i]['type'] = 'admonition'
-            admonitiontitle = blocks[i]['lines'][0][3:m.end() - 2].lower()
+            blocks[i][b'type'] = b'admonition'
+            admonitiontitle = blocks[i][b'lines'][0][3 : m.end() - 2].lower()
 
-            firstline = blocks[i]['lines'][0][m.end() + 1:]
+            firstline = blocks[i][b'lines'][0][m.end() + 1 :]
             if firstline:
-                blocks[i]['lines'].insert(1, '   ' + firstline)
+                blocks[i][b'lines'].insert(1, b'   ' + firstline)
 
-            blocks[i]['admonitiontitle'] = admonitiontitle
-            del blocks[i]['lines'][0]
+            blocks[i][b'admonitiontitle'] = admonitiontitle
+            del blocks[i][b'lines'][0]
         i = i + 1
     return blocks
 
+
 _admonitiontitles = {
-    'attention': _('Attention:'),
-    'caution': _('Caution:'),
-    'danger': _('!Danger!'),
-    'error': _('Error:'),
-    'hint': _('Hint:'),
-    'important': _('Important:'),
-    'note': _('Note:'),
-    'tip': _('Tip:'),
-    'warning': _('Warning!'),
+    b'attention': _(b'Attention:'),
+    b'caution': _(b'Caution:'),
+    b'danger': _(b'!Danger!'),
+    b'error': _(b'Error:'),
+    b'hint': _(b'Hint:'),
+    b'important': _(b'Important:'),
+    b'note': _(b'Note:'),
+    b'tip': _(b'Tip:'),
+    b'warning': _(b'Warning!'),
 }
 
+
 def formatoption(block, width):
-    desc = ' '.join(map(bytes.strip, block['lines']))
-    colwidth = encoding.colwidth(block['optstr'])
+    desc = b' '.join(map(bytes.strip, block[b'lines']))
+    colwidth = encoding.colwidth(block[b'optstr'])
     usablewidth = width - 1
-    hanging = block['optstrwidth']
-    initindent = '%s%s  ' % (block['optstr'], ' ' * ((hanging - colwidth)))
-    hangindent = ' ' * (encoding.colwidth(initindent) + 1)
-    return ' %s\n' % (stringutil.wrap(desc, usablewidth,
-                                      initindent=initindent,
-                                      hangindent=hangindent))
+    hanging = block[b'optstrwidth']
+    initindent = b'%s%s  ' % (block[b'optstr'], b' ' * ((hanging - colwidth)))
+    hangindent = b' ' * (encoding.colwidth(initindent) + 1)
+    return b' %s\n' % (
+        stringutil.wrap(
+            desc, usablewidth, initindent=initindent, hangindent=hangindent
+        )
+    )
+
 
 def formatblock(block, width):
     """Format a block according to width."""
     if width <= 0:
         width = 78
-    indent = ' ' * block['indent']
-    if block['type'] == 'admonition':
-        admonition = _admonitiontitles[block['admonitiontitle']]
-        if not block['lines']:
-            return indent + admonition + '\n'
-        hang = len(block['lines'][-1]) - len(block['lines'][-1].lstrip())
+    indent = b' ' * block[b'indent']
+    if block[b'type'] == b'admonition':
+        admonition = _admonitiontitles[block[b'admonitiontitle']]
+        if not block[b'lines']:
+            return indent + admonition + b'\n'
+        hang = len(block[b'lines'][-1]) - len(block[b'lines'][-1].lstrip())
 
-        defindent = indent + hang * ' '
-        text = ' '.join(map(bytes.strip, block['lines']))
-        return '%s\n%s\n' % (indent + admonition,
-                             stringutil.wrap(text, width=width,
-                                             initindent=defindent,
-                                             hangindent=defindent))
-    if block['type'] == 'margin':
-        return '\n'
-    if block['type'] == 'literal':
-        indent += '  '
-        return indent + ('\n' + indent).join(block['lines']) + '\n'
-    if block['type'] == 'section':
-        underline = encoding.colwidth(block['lines'][0]) * block['underline']
-        return "%s%s\n%s%s\n" % (indent, block['lines'][0],indent, underline)
-    if block['type'] == 'table':
-        table = block['table']
+        defindent = indent + hang * b' '
+        text = b' '.join(map(bytes.strip, block[b'lines']))
+        return b'%s\n%s\n' % (
+            indent + admonition,
+            stringutil.wrap(
+                text, width=width, initindent=defindent, hangindent=defindent
+            ),
+        )
+    if block[b'type'] == b'margin':
+        return b'\n'
+    if block[b'type'] == b'literal':
+        indent += b'  '
+        return indent + (b'\n' + indent).join(block[b'lines']) + b'\n'
+    if block[b'type'] == b'section':
+        underline = encoding.colwidth(block[b'lines'][0]) * block[b'underline']
+        return b"%s%s\n%s%s\n" % (indent, block[b'lines'][0], indent, underline)
+    if block[b'type'] == b'table':
+        table = block[b'table']
         # compute column widths
         widths = [max([encoding.colwidth(e) for e in c]) for c in zip(*table)]
-        text = ''
+        text = b''
         span = sum(widths) + len(widths) - 1
-        indent = ' ' * block['indent']
-        hang = ' ' * (len(indent) + span - widths[-1])
+        indent = b' ' * block[b'indent']
+        hang = b' ' * (len(indent) + span - widths[-1])
 
         for row in table:
             l = []
             for w, v in zip(widths, row):
-                pad = ' ' * (w - encoding.colwidth(v))
+                pad = b' ' * (w - encoding.colwidth(v))
                 l.append(v + pad)
-            l = ' '.join(l)
-            l = stringutil.wrap(l, width=width,
-                                initindent=indent,
-                                hangindent=hang)
-            if not text and block['header']:
-                text = l + '\n' + indent + '-' * (min(width, span)) + '\n'
+            l = b' '.join(l)
+            l = stringutil.wrap(
+                l, width=width, initindent=indent, hangindent=hang
+            )
+            if not text and block[b'header']:
+                text = l + b'\n' + indent + b'-' * (min(width, span)) + b'\n'
             else:
-                text += l + "\n"
+                text += l + b"\n"
         return text
-    if block['type'] == 'definition':
-        term = indent + block['lines'][0]
-        hang = len(block['lines'][-1]) - len(block['lines'][-1].lstrip())
-        defindent = indent + hang * ' '
-        text = ' '.join(map(bytes.strip, block['lines'][1:]))
-        return '%s\n%s\n' % (term, stringutil.wrap(text, width=width,
-                                                   initindent=defindent,
-                                                   hangindent=defindent))
+    if block[b'type'] == b'definition':
+        term = indent + block[b'lines'][0]
+        hang = len(block[b'lines'][-1]) - len(block[b'lines'][-1].lstrip())
+        defindent = indent + hang * b' '
+        text = b' '.join(map(bytes.strip, block[b'lines'][1:]))
+        return b'%s\n%s\n' % (
+            term,
+            stringutil.wrap(
+                text, width=width, initindent=defindent, hangindent=defindent
+            ),
+        )
     subindent = indent
-    if block['type'] == 'bullet':
-        if block['lines'][0].startswith('| '):
+    if block[b'type'] == b'bullet':
+        if block[b'lines'][0].startswith(b'| '):
             # Remove bullet for line blocks and add no extra
             # indentation.
-            block['lines'][0] = block['lines'][0][2:]
+            block[b'lines'][0] = block[b'lines'][0][2:]
         else:
-            m = _bulletre.match(block['lines'][0])
-            subindent = indent + m.end() * ' '
-    elif block['type'] == 'field':
-        key = block['key']
-        subindent = indent + _fieldwidth * ' '
+            m = _bulletre.match(block[b'lines'][0])
+            subindent = indent + m.end() * b' '
+    elif block[b'type'] == b'field':
+        key = block[b'key']
+        subindent = indent + _fieldwidth * b' '
         if len(key) + 2 > _fieldwidth:
             # key too large, use full line width
             key = key.ljust(width)
         else:
             # key fits within field width
             key = key.ljust(_fieldwidth)
-        block['lines'][0] = key + block['lines'][0]
-    elif block['type'] == 'option':
+        block[b'lines'][0] = key + block[b'lines'][0]
+    elif block[b'type'] == b'option':
         return formatoption(block, width)
 
-    text = ' '.join(map(bytes.strip, block['lines']))
-    return stringutil.wrap(text, width=width,
-                           initindent=indent,
-                           hangindent=subindent) + '\n'
+    text = b' '.join(map(bytes.strip, block[b'lines']))
+    return (
+        stringutil.wrap(
+            text, width=width, initindent=indent, hangindent=subindent
+        )
+        + b'\n'
+    )
+
 
 def formathtml(blocks):
     """Format RST blocks as HTML"""
 
     out = []
-    headernest = ''
+    headernest = b''
     listnest = []
 
     def escape(s):
@@ -564,89 +621,91 @@
     def openlist(start, level):
         if not listnest or listnest[-1][0] != start:
             listnest.append((start, level))
-            out.append('<%s>\n' % start)
+            out.append(b'<%s>\n' % start)
 
-    blocks = [b for b in blocks if b['type'] != 'margin']
+    blocks = [b for b in blocks if b[b'type'] != b'margin']
 
     for pos, b in enumerate(blocks):
-        btype = b['type']
-        level = b['indent']
-        lines = b['lines']
+        btype = b[b'type']
+        level = b[b'indent']
+        lines = b[b'lines']
 
-        if btype == 'admonition':
-            admonition = escape(_admonitiontitles[b['admonitiontitle']])
-            text = escape(' '.join(map(bytes.strip, lines)))
-            out.append('<p>\n<b>%s</b> %s\n</p>\n' % (admonition, text))
-        elif btype == 'paragraph':
-            out.append('<p>\n%s\n</p>\n' % escape('\n'.join(lines)))
-        elif btype == 'margin':
+        if btype == b'admonition':
+            admonition = escape(_admonitiontitles[b[b'admonitiontitle']])
+            text = escape(b' '.join(map(bytes.strip, lines)))
+            out.append(b'<p>\n<b>%s</b> %s\n</p>\n' % (admonition, text))
+        elif btype == b'paragraph':
+            out.append(b'<p>\n%s\n</p>\n' % escape(b'\n'.join(lines)))
+        elif btype == b'margin':
             pass
-        elif btype == 'literal':
-            out.append('<pre>\n%s\n</pre>\n' % escape('\n'.join(lines)))
-        elif btype == 'section':
-            i = b['underline']
+        elif btype == b'literal':
+            out.append(b'<pre>\n%s\n</pre>\n' % escape(b'\n'.join(lines)))
+        elif btype == b'section':
+            i = b[b'underline']
             if i not in headernest:
                 headernest += i
             level = headernest.index(i) + 1
-            out.append('<h%d>%s</h%d>\n' % (level, escape(lines[0]), level))
-        elif btype == 'table':
-            table = b['table']
-            out.append('<table>\n')
+            out.append(b'<h%d>%s</h%d>\n' % (level, escape(lines[0]), level))
+        elif btype == b'table':
+            table = b[b'table']
+            out.append(b'<table>\n')
             for row in table:
-                out.append('<tr>')
+                out.append(b'<tr>')
                 for v in row:
-                    out.append('<td>')
+                    out.append(b'<td>')
                     out.append(escape(v))
-                    out.append('</td>')
-                    out.append('\n')
+                    out.append(b'</td>')
+                    out.append(b'\n')
                 out.pop()
-                out.append('</tr>\n')
-            out.append('</table>\n')
-        elif btype == 'definition':
-            openlist('dl', level)
+                out.append(b'</tr>\n')
+            out.append(b'</table>\n')
+        elif btype == b'definition':
+            openlist(b'dl', level)
             term = escape(lines[0])
-            text = escape(' '.join(map(bytes.strip, lines[1:])))
-            out.append(' <dt>%s\n <dd>%s\n' % (term, text))
-        elif btype == 'bullet':
-            bullet, head = lines[0].split(' ', 1)
-            if bullet in ('*', '-'):
-                openlist('ul', level)
+            text = escape(b' '.join(map(bytes.strip, lines[1:])))
+            out.append(b' <dt>%s\n <dd>%s\n' % (term, text))
+        elif btype == b'bullet':
+            bullet, head = lines[0].split(b' ', 1)
+            if bullet in (b'*', b'-'):
+                openlist(b'ul', level)
             else:
-                openlist('ol', level)
-            out.append(' <li> %s\n' % escape(' '.join([head] + lines[1:])))
-        elif btype == 'field':
-            openlist('dl', level)
-            key = escape(b['key'])
-            text = escape(' '.join(map(bytes.strip, lines)))
-            out.append(' <dt>%s\n <dd>%s\n' % (key, text))
-        elif btype == 'option':
-            openlist('dl', level)
-            opt = escape(b['optstr'])
-            desc = escape(' '.join(map(bytes.strip, lines)))
-            out.append(' <dt>%s\n <dd>%s\n' % (opt, desc))
+                openlist(b'ol', level)
+            out.append(b' <li> %s\n' % escape(b' '.join([head] + lines[1:])))
+        elif btype == b'field':
+            openlist(b'dl', level)
+            key = escape(b[b'key'])
+            text = escape(b' '.join(map(bytes.strip, lines)))
+            out.append(b' <dt>%s\n <dd>%s\n' % (key, text))
+        elif btype == b'option':
+            openlist(b'dl', level)
+            opt = escape(b[b'optstr'])
+            desc = escape(b' '.join(map(bytes.strip, lines)))
+            out.append(b' <dt>%s\n <dd>%s\n' % (opt, desc))
 
         # close lists if indent level of next block is lower
         if listnest:
             start, level = listnest[-1]
             if pos == len(blocks) - 1:
-                out.append('</%s>\n' % start)
+                out.append(b'</%s>\n' % start)
                 listnest.pop()
             else:
                 nb = blocks[pos + 1]
-                ni = nb['indent']
-                if (ni < level or
-                    (ni == level and
-                     nb['type'] not in 'definition bullet field option')):
-                    out.append('</%s>\n' % start)
+                ni = nb[b'indent']
+                if ni < level or (
+                    ni == level
+                    and nb[b'type'] not in b'definition bullet field option'
+                ):
+                    out.append(b'</%s>\n' % start)
                     listnest.pop()
 
-    return ''.join(out)
+    return b''.join(out)
+
 
 def parse(text, indent=0, keep=None, admonitions=None):
     """Parse text into a list of blocks"""
     blocks = findblocks(text)
     for b in blocks:
-        b['indent'] += indent
+        b[b'indent'] += indent
     blocks = findliteralblocks(blocks)
     blocks = findtables(blocks)
     blocks, pruned = prunecontainers(blocks, keep or [])
@@ -661,24 +720,28 @@
     blocks = prunecomments(blocks)
     return blocks, pruned
 
+
 def formatblocks(blocks, width):
-    text = ''.join(formatblock(b, width) for b in blocks)
+    text = b''.join(formatblock(b, width) for b in blocks)
     return text
 
+
 def formatplain(blocks, width):
     """Format parsed blocks as plain text"""
-    return ''.join(formatblock(b, width) for b in blocks)
+    return b''.join(formatblock(b, width) for b in blocks)
 
-def format(text, width=80, indent=0, keep=None, style='plain', section=None):
+
+def format(text, width=80, indent=0, keep=None, style=b'plain', section=None):
     """Parse and format the text according to width."""
     blocks, pruned = parse(text, indent, keep or [])
     if section:
         blocks = filtersections(blocks, section)
-    if style == 'html':
+    if style == b'html':
         return formathtml(blocks)
     else:
         return formatplain(blocks, width=width)
 
+
 def filtersections(blocks, section):
     """Select parsed blocks under the specified section
 
@@ -696,7 +759,7 @@
         path, nest, b = sections[i]
         del parents[nest:]
         parents.append(i)
-        if path == section or path.endswith('.' + section):
+        if path == section or path.endswith(b'.' + section):
             if lastparents != parents:
                 llen = len(lastparents)
                 plen = len(parents)
@@ -705,8 +768,7 @@
                 s = []
                 for j in pycompat.xrange(3, plen - 1):
                     parent = parents[j]
-                    if (j >= llen or
-                        lastparents[j] != parent):
+                    if j >= llen or lastparents[j] != parent:
                         s.append(len(blocks))
                         sec = sections[parent][2]
                         blocks.append(sec[0])
@@ -725,42 +787,44 @@
     if collapse:
         synthetic.reverse()
         for s in synthetic:
-            path = [blocks[syn]['lines'][0] for syn in s]
+            path = [blocks[syn][b'lines'][0] for syn in s]
             real = s[-1] + 2
-            realline = blocks[real]['lines']
-            realline[0] = ('"%s"' %
-                           '.'.join(path + [realline[0]]).replace('"', ''))
-            del blocks[s[0]:real]
+            realline = blocks[real][b'lines']
+            realline[0] = b'"%s"' % b'.'.join(path + [realline[0]]).replace(
+                b'"', b''
+            )
+            del blocks[s[0] : real]
 
     return blocks
 
+
 def _getsections(blocks):
     '''return a list of (section path, nesting level, blocks) tuples'''
-    nest = ""
+    nest = b""
     names = ()
     secs = []
 
     def getname(b):
-        if b['type'] == 'field':
-            x = b['key']
+        if b[b'type'] == b'field':
+            x = b[b'key']
         else:
-            x = b['lines'][0]
-        x = encoding.lower(x).strip('"')
-        if '(' in x:
-            x = x.split('(')[0]
+            x = b[b'lines'][0]
+        x = encoding.lower(x).strip(b'"')
+        if b'(' in x:
+            x = x.split(b'(')[0]
         return x
 
     for b in blocks:
-        if b['type'] == 'section':
-            i = b['underline']
+        if b[b'type'] == b'section':
+            i = b[b'underline']
             if i not in nest:
                 nest += i
             level = nest.index(i) + 1
             nest = nest[:level]
             names = names[:level] + (getname(b),)
-            secs.append(('.'.join(names), level, [b]))
-        elif b['type'] in ('definition', 'field'):
-            i = ' '
+            secs.append((b'.'.join(names), level, [b]))
+        elif b[b'type'] in (b'definition', b'field'):
+            i = b' '
             if i not in nest:
                 nest += i
             level = nest.index(i) + 1
@@ -769,10 +833,10 @@
                 sec = secs[-i]
                 if sec[1] < level:
                     break
-                siblings = [a for a in sec[2] if a['type'] == 'definition']
+                siblings = [a for a in sec[2] if a[b'type'] == b'definition']
                 if siblings:
-                    siblingindent = siblings[-1]['indent']
-                    indent = b['indent']
+                    siblingindent = siblings[-1][b'indent']
+                    indent = b[b'indent']
                     if siblingindent < indent:
                         level += 1
                         break
@@ -780,50 +844,52 @@
                         level = sec[1]
                         break
             names = names[:level] + (getname(b),)
-            secs.append(('.'.join(names), level, [b]))
+            secs.append((b'.'.join(names), level, [b]))
         else:
             if not secs:
                 # add an initial empty section
-                secs = [('', 0, [])]
-            if b['type'] != 'margin':
+                secs = [(b'', 0, [])]
+            if b[b'type'] != b'margin':
                 pointer = 1
-                bindent = b['indent']
+                bindent = b[b'indent']
                 while pointer < len(secs):
                     section = secs[-pointer][2][0]
-                    if section['type'] != 'margin':
-                        sindent = section['indent']
-                        if len(section['lines']) > 1:
-                            sindent += (len(section['lines'][1]) -
-                                        len(section['lines'][1].lstrip(' ')))
+                    if section[b'type'] != b'margin':
+                        sindent = section[b'indent']
+                        if len(section[b'lines']) > 1:
+                            sindent += len(section[b'lines'][1]) - len(
+                                section[b'lines'][1].lstrip(b' ')
+                            )
                         if bindent >= sindent:
                             break
                     pointer += 1
                 if pointer > 1:
                     blevel = secs[-pointer][1]
-                    if section['type'] != b['type']:
+                    if section[b'type'] != b[b'type']:
                         blevel += 1
-                    secs.append(('', blevel, []))
+                    secs.append((b'', blevel, []))
             secs[-1][2].append(b)
     return secs
 
+
 def maketable(data, indent=0, header=False):
     '''Generate an RST table for the given table data as a list of lines'''
 
     widths = [max(encoding.colwidth(e) for e in c) for c in zip(*data)]
-    indent = ' ' * indent
-    div = indent + ' '.join('=' * w for w in widths) + '\n'
+    indent = b' ' * indent
+    div = indent + b' '.join(b'=' * w for w in widths) + b'\n'
 
     out = [div]
     for row in data:
         l = []
         for w, v in zip(widths, row):
-            if '\n' in v:
+            if b'\n' in v:
                 # only remove line breaks and indentation, long lines are
                 # handled by the next tool
-                v = ' '.join(e.lstrip() for e in v.split('\n'))
-            pad = ' ' * (w - encoding.colwidth(v))
+                v = b' '.join(e.lstrip() for e in v.split(b'\n'))
+            pad = b' ' * (w - encoding.colwidth(v))
             l.append(v + pad)
-        out.append(indent + ' '.join(l) + "\n")
+        out.append(indent + b' '.join(l) + b"\n")
     if header and len(data) > 1:
         out.insert(2, div)
     out.append(div)
--- a/mercurial/namespaces.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/namespaces.py	Mon Oct 21 11:09:48 2019 -0400
@@ -2,11 +2,13 @@
 
 from .i18n import _
 from . import (
+    pycompat,
     registrar,
     templatekw,
     util,
 )
 
+
 def tolist(val):
     """
     a convenience method to return an empty list instead of None
@@ -16,6 +18,7 @@
     else:
         return [val]
 
+
 class namespaces(object):
     """provides an interface to register and operate on multiple namespaces. See
     the namespace class below for details on the namespace object.
@@ -33,32 +36,44 @@
         bmknames = lambda repo: repo._bookmarks.keys()
         bmknamemap = lambda repo, name: tolist(repo._bookmarks.get(name))
         bmknodemap = lambda repo, node: repo.nodebookmarks(node)
-        n = namespace("bookmarks", templatename="bookmark",
-                      logfmt=columns['bookmark'],
-                      listnames=bmknames,
-                      namemap=bmknamemap, nodemap=bmknodemap,
-                      builtin=True)
+        n = namespace(
+            b"bookmarks",
+            templatename=b"bookmark",
+            logfmt=columns[b'bookmark'],
+            listnames=bmknames,
+            namemap=bmknamemap,
+            nodemap=bmknodemap,
+            builtin=True,
+        )
         self.addnamespace(n)
 
         tagnames = lambda repo: [t for t, n in repo.tagslist()]
         tagnamemap = lambda repo, name: tolist(repo._tagscache.tags.get(name))
         tagnodemap = lambda repo, node: repo.nodetags(node)
-        n = namespace("tags", templatename="tag",
-                      logfmt=columns['tag'],
-                      listnames=tagnames,
-                      namemap=tagnamemap, nodemap=tagnodemap,
-                      deprecated={'tip'},
-                      builtin=True)
+        n = namespace(
+            b"tags",
+            templatename=b"tag",
+            logfmt=columns[b'tag'],
+            listnames=tagnames,
+            namemap=tagnamemap,
+            nodemap=tagnodemap,
+            deprecated={b'tip'},
+            builtin=True,
+        )
         self.addnamespace(n)
 
         bnames = lambda repo: repo.branchmap().keys()
         bnamemap = lambda repo, name: tolist(repo.branchtip(name, True))
         bnodemap = lambda repo, node: [repo[node].branch()]
-        n = namespace("branches", templatename="branch",
-                      logfmt=columns['branch'],
-                      listnames=bnames,
-                      namemap=bnamemap, nodemap=bnodemap,
-                      builtin=True)
+        n = namespace(
+            b"branches",
+            templatename=b"branch",
+            logfmt=columns[b'branch'],
+            listnames=bnames,
+            namemap=bnamemap,
+            nodemap=bnodemap,
+            builtin=True,
+        )
         self.addnamespace(n)
 
     def __getitem__(self, namespace):
@@ -69,7 +84,7 @@
         return self._names.__iter__()
 
     def items(self):
-        return self._names.iteritems()
+        return pycompat.iteritems(self._names)
 
     iteritems = items
 
@@ -89,7 +104,8 @@
         # we only generate a template keyword if one does not already exist
         if namespace.name not in templatekw.keywords:
             templatekeyword = registrar.templatekeyword(templatekw.keywords)
-            @templatekeyword(namespace.name, requires={'repo', 'ctx'})
+
+            @templatekeyword(namespace.name, requires={b'repo', b'ctx'})
             def generatekw(context, mapping):
                 return templatekw.shownames(context, mapping, namespace.name)
 
@@ -101,11 +117,12 @@
 
         Raises a KeyError if there is no such node.
         """
-        for ns, v in self._names.iteritems():
+        for ns, v in pycompat.iteritems(self._names):
             n = v.singlenode(repo, name)
             if n:
                 return n
-        raise KeyError(_('no such name: %s') % name)
+        raise KeyError(_(b'no such name: %s') % name)
+
 
 class namespace(object):
     """provides an interface to a namespace
@@ -135,9 +152,20 @@
                  Mercurial.
     """
 
-    def __init__(self, name, templatename=None, logname=None, colorname=None,
-                 logfmt=None, listnames=None, namemap=None, nodemap=None,
-                 deprecated=None, builtin=False, singlenode=None):
+    def __init__(
+        self,
+        name,
+        templatename=None,
+        logname=None,
+        colorname=None,
+        logfmt=None,
+        listnames=None,
+        namemap=None,
+        nodemap=None,
+        deprecated=None,
+        builtin=False,
+        singlenode=None,
+    ):
         """create a namespace
 
         name: the namespace to be registered (in plural form)
@@ -177,7 +205,7 @@
         # if logfmt is not specified, compose it from logname as backup
         if self.logfmt is None:
             # i18n: column positioning for "hg log"
-            self.logfmt = ("%s:" % self.logname).ljust(13) + "%s\n"
+            self.logfmt = (b"%s:" % self.logname).ljust(13) + b"%s\n"
 
         if deprecated is None:
             self.deprecated = set()
--- a/mercurial/narrowspec.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/narrowspec.py	Mon Oct 21 11:09:48 2019 -0400
@@ -8,20 +8,21 @@
 from __future__ import absolute_import
 
 from .i18n import _
+from .pycompat import getattr
+from .interfaces import repository
 from . import (
     error,
     match as matchmod,
     merge,
-    repository,
     scmutil,
     sparse,
     util,
 )
 
 # The file in .hg/store/ that indicates which paths exit in the store
-FILENAME = 'narrowspec'
+FILENAME = b'narrowspec'
 # The file in .hg/ that indicates which paths exit in the dirstate
-DIRSTATE_FILENAME = 'narrowspec.dirstate'
+DIRSTATE_FILENAME = b'narrowspec.dirstate'
 
 # Pattern prefixes that are allowed in narrow patterns. This list MUST
 # only contain patterns that are fast and safe to evaluate. Keep in mind
@@ -34,21 +35,24 @@
     b'rootfilesin:',
 )
 
+
 def normalizesplitpattern(kind, pat):
     """Returns the normalized version of a pattern and kind.
 
     Returns a tuple with the normalized kind and normalized pattern.
     """
-    pat = pat.rstrip('/')
+    pat = pat.rstrip(b'/')
     _validatepattern(pat)
     return kind, pat
 
+
 def _numlines(s):
     """Returns the number of lines in s, including ending empty lines."""
     # We use splitlines because it is Unicode-friendly and thus Python 3
     # compatible. However, it does not count empty lines at the end, so trick
     # it by adding a character at the end.
-    return len((s + 'x').splitlines())
+    return len((s + b'x').splitlines())
+
 
 def _validatepattern(pat):
     """Validates the pattern and aborts if it is invalid.
@@ -60,19 +64,23 @@
     # We use newlines as separators in the narrowspec file, so don't allow them
     # in patterns.
     if _numlines(pat) > 1:
-        raise error.Abort(_('newlines are not allowed in narrowspec paths'))
+        raise error.Abort(_(b'newlines are not allowed in narrowspec paths'))
 
-    components = pat.split('/')
-    if '.' in components or '..' in components:
-        raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
+    components = pat.split(b'/')
+    if b'.' in components or b'..' in components:
+        raise error.Abort(
+            _(b'"." and ".." are not allowed in narrowspec paths')
+        )
 
-def normalizepattern(pattern, defaultkind='path'):
+
+def normalizepattern(pattern, defaultkind=b'path'):
     """Returns the normalized version of a text-format pattern.
 
     If the pattern has no kind, the default will be added.
     """
     kind, pat = matchmod._patsplit(pattern, defaultkind)
-    return '%s:%s' % normalizesplitpattern(kind, pat)
+    return b'%s:%s' % normalizesplitpattern(kind, pat)
+
 
 def parsepatterns(pats):
     """Parses an iterable of patterns into a typed pattern set.
@@ -89,6 +97,7 @@
     validatepatterns(res)
     return res
 
+
 def validatepatterns(pats):
     """Validate that patterns are in the expected data structure and format.
 
@@ -100,64 +109,81 @@
     prefixed pattern representation (but can't necessarily be fully trusted).
     """
     if not isinstance(pats, set):
-        raise error.ProgrammingError('narrow patterns should be a set; '
-                                     'got %r' % pats)
+        raise error.ProgrammingError(
+            b'narrow patterns should be a set; got %r' % pats
+        )
 
     for pat in pats:
         if not pat.startswith(VALID_PREFIXES):
             # Use a Mercurial exception because this can happen due to user
             # bugs (e.g. manually updating spec file).
-            raise error.Abort(_('invalid prefix on narrow pattern: %s') % pat,
-                              hint=_('narrow patterns must begin with one of '
-                                     'the following: %s') %
-                                   ', '.join(VALID_PREFIXES))
+            raise error.Abort(
+                _(b'invalid prefix on narrow pattern: %s') % pat,
+                hint=_(
+                    b'narrow patterns must begin with one of '
+                    b'the following: %s'
+                )
+                % b', '.join(VALID_PREFIXES),
+            )
+
 
 def format(includes, excludes):
-    output = '[include]\n'
+    output = b'[include]\n'
     for i in sorted(includes - excludes):
-        output += i + '\n'
-    output += '[exclude]\n'
+        output += i + b'\n'
+    output += b'[exclude]\n'
     for e in sorted(excludes):
-        output += e + '\n'
+        output += e + b'\n'
     return output
 
+
 def match(root, include=None, exclude=None):
     if not include:
         # Passing empty include and empty exclude to matchmod.match()
         # gives a matcher that matches everything, so explicitly use
         # the nevermatcher.
         return matchmod.never()
-    return matchmod.match(root, '', [], include=include or [],
-                          exclude=exclude or [])
+    return matchmod.match(
+        root, b'', [], include=include or [], exclude=exclude or []
+    )
+
 
 def parseconfig(ui, spec):
     # maybe we should care about the profiles returned too
-    includepats, excludepats, profiles = sparse.parseconfig(ui, spec, 'narrow')
+    includepats, excludepats, profiles = sparse.parseconfig(ui, spec, b'narrow')
     if profiles:
-        raise error.Abort(_("including other spec files using '%include' is not"
-                            " supported in narrowspec"))
+        raise error.Abort(
+            _(
+                b"including other spec files using '%include' is not"
+                b" supported in narrowspec"
+            )
+        )
 
     validatepatterns(includepats)
     validatepatterns(excludepats)
 
     return includepats, excludepats
 
+
 def load(repo):
     # Treat "narrowspec does not exist" the same as "narrowspec file exists
     # and is empty".
     spec = repo.svfs.tryread(FILENAME)
     return parseconfig(repo.ui, spec)
 
+
 def save(repo, includepats, excludepats):
     validatepatterns(includepats)
     validatepatterns(excludepats)
     spec = format(includepats, excludepats)
     repo.svfs.write(FILENAME, spec)
 
+
 def copytoworkingcopy(repo):
     spec = repo.svfs.read(FILENAME)
     repo.vfs.write(DIRSTATE_FILENAME, spec)
 
+
 def savebackup(repo, backupname):
     if repository.NARROW_REQUIREMENT not in repo.requirements:
         return
@@ -165,11 +191,13 @@
     svfs.tryunlink(backupname)
     util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
 
+
 def restorebackup(repo, backupname):
     if repository.NARROW_REQUIREMENT not in repo.requirements:
         return
     util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
 
+
 def savewcbackup(repo, backupname):
     if repository.NARROW_REQUIREMENT not in repo.requirements:
         return
@@ -177,8 +205,10 @@
     vfs.tryunlink(backupname)
     # It may not exist in old repos
     if vfs.exists(DIRSTATE_FILENAME):
-        util.copyfile(vfs.join(DIRSTATE_FILENAME), vfs.join(backupname),
-                      hardlink=True)
+        util.copyfile(
+            vfs.join(DIRSTATE_FILENAME), vfs.join(backupname), hardlink=True
+        )
+
 
 def restorewcbackup(repo, backupname):
     if repository.NARROW_REQUIREMENT not in repo.requirements:
@@ -187,11 +217,13 @@
     if repo.vfs.exists(backupname):
         util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
 
+
 def clearwcbackup(repo, backupname):
     if repository.NARROW_REQUIREMENT not in repo.requirements:
         return
     repo.vfs.tryunlink(backupname)
 
+
 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
     r""" Restricts the patterns according to repo settings,
     results in a logical AND operation
@@ -222,7 +254,7 @@
     invalid_includes = []
     if not req_includes:
         res_includes = set(repo_includes)
-    elif 'path:.' not in repo_includes:
+    elif b'path:.' not in repo_includes:
         res_includes = []
         for req_include in req_includes:
             req_include = util.expandpath(util.normpath(req_include))
@@ -231,35 +263,44 @@
                 continue
             valid = False
             for repo_include in repo_includes:
-                if req_include.startswith(repo_include + '/'):
+                if req_include.startswith(repo_include + b'/'):
                     valid = True
                     res_includes.append(req_include)
                     break
             if not valid:
                 invalid_includes.append(req_include)
         if len(res_includes) == 0:
-            res_excludes = {'path:.'}
+            res_excludes = {b'path:.'}
         else:
             res_includes = set(res_includes)
     else:
         res_includes = set(req_includes)
     return res_includes, res_excludes, invalid_includes
 
+
 # These two are extracted for extensions (specifically for Google's CitC file
 # system)
 def _deletecleanfiles(repo, files):
     for f in files:
         repo.wvfs.unlinkpath(f)
 
+
 def _writeaddedfiles(repo, pctx, files):
     actions = merge.emptyactions()
     addgaction = actions[merge.ACTION_GET].append
-    mf = repo['.'].manifest()
+    mf = repo[b'.'].manifest()
     for f in files:
         if not repo.wvfs.exists(f):
-            addgaction((f, (mf.flags(f), False), "narrowspec updated"))
-    merge.applyupdates(repo, actions, wctx=repo[None],
-                       mctx=repo['.'], overwrite=False, wantfiledata=False)
+            addgaction((f, (mf.flags(f), False), b"narrowspec updated"))
+    merge.applyupdates(
+        repo,
+        actions,
+        wctx=repo[None],
+        mctx=repo[b'.'],
+        overwrite=False,
+        wantfiledata=False,
+    )
+
 
 def checkworkingcopynarrowspec(repo):
     # Avoid infinite recursion when updating the working copy
@@ -268,8 +309,11 @@
     storespec = repo.svfs.tryread(FILENAME)
     wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
     if wcspec != storespec:
-        raise error.Abort(_("working copy's narrowspec is stale"),
-                          hint=_("run 'hg tracked --update-working-copy'"))
+        raise error.Abort(
+            _(b"working copy's narrowspec is stale"),
+            hint=_(b"run 'hg tracked --update-working-copy'"),
+        )
+
 
 def updateworkingcopy(repo, assumeclean=False):
     """updates the working copy and dirstate from the store narrowspec
@@ -289,8 +333,9 @@
     removedmatch = matchmod.differencematcher(oldmatch, newmatch)
 
     ds = repo.dirstate
-    lookup, status = ds.status(removedmatch, subrepos=[], ignored=True,
-                               clean=True, unknown=True)
+    lookup, status = ds.status(
+        removedmatch, subrepos=[], ignored=True, clean=True, unknown=True
+    )
     trackeddirty = status.modified + status.added
     clean = status.clean
     if assumeclean:
@@ -301,15 +346,17 @@
     _deletecleanfiles(repo, clean)
     uipathfn = scmutil.getuipathfn(repo)
     for f in sorted(trackeddirty):
-        repo.ui.status(_('not deleting possibly dirty file %s\n') % uipathfn(f))
+        repo.ui.status(
+            _(b'not deleting possibly dirty file %s\n') % uipathfn(f)
+        )
     for f in sorted(status.unknown):
-        repo.ui.status(_('not deleting unknown file %s\n') % uipathfn(f))
+        repo.ui.status(_(b'not deleting unknown file %s\n') % uipathfn(f))
     for f in sorted(status.ignored):
-        repo.ui.status(_('not deleting ignored file %s\n') % uipathfn(f))
+        repo.ui.status(_(b'not deleting ignored file %s\n') % uipathfn(f))
     for f in clean + trackeddirty:
         ds.drop(f)
 
-    pctx = repo['.']
+    pctx = repo[b'.']
     newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
     for f in newfiles:
         ds.normallookup(f)
--- a/mercurial/node.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/node.py	Mon Oct 21 11:09:48 2019 -0400
@@ -20,6 +20,7 @@
     except binascii.Error as e:
         raise TypeError(e)
 
+
 nullrev = -1
 # In hex, this is '0000000000000000000000000000000000000000'
 nullid = b"\0" * 20
@@ -28,20 +29,21 @@
 # Phony node value to stand-in for new files in some uses of
 # manifests.
 # In hex, this is '2121212121212121212121212121212121212121'
-newnodeid = '!!!!!!!!!!!!!!!!!!!!'
+newnodeid = b'!!!!!!!!!!!!!!!!!!!!'
 # In hex, this is '3030303030303030303030303030306164646564'
-addednodeid = '000000000000000added'
+addednodeid = b'000000000000000added'
 # In hex, this is '3030303030303030303030306d6f646966696564'
-modifiednodeid = '000000000000modified'
+modifiednodeid = b'000000000000modified'
 
 wdirfilenodeids = {newnodeid, addednodeid, modifiednodeid}
 
 # pseudo identifiers for working directory
 # (they are experimental, so don't add too many dependencies on them)
-wdirrev = 0x7fffffff
+wdirrev = 0x7FFFFFFF
 # In hex, this is 'ffffffffffffffffffffffffffffffffffffffff'
 wdirid = b"\xff" * 20
 wdirhex = hex(wdirid)
 
+
 def short(node):
     return hex(node[:6])
--- a/mercurial/obsolete.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/obsolete.py	Mon Oct 21 11:09:48 2019 -0400
@@ -74,6 +74,7 @@
 import struct
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     encoding,
     error,
@@ -94,16 +95,17 @@
 propertycache = util.propertycache
 
 # Options for obsolescence
-createmarkersopt = 'createmarkers'
-allowunstableopt = 'allowunstable'
-exchangeopt = 'exchange'
+createmarkersopt = b'createmarkers'
+allowunstableopt = b'allowunstable'
+exchangeopt = b'exchange'
+
 
 def _getoptionvalue(repo, option):
     """Returns True if the given repository has the given obsolete option
     enabled.
     """
-    configkey = 'evolution.%s' % option
-    newconfig = repo.ui.configbool('experimental', configkey)
+    configkey = b'evolution.%s' % option
+    newconfig = repo.ui.configbool(b'experimental', configkey)
 
     # Return the value only if defined
     if newconfig is not None:
@@ -111,22 +113,23 @@
 
     # Fallback on generic option
     try:
-        return repo.ui.configbool('experimental', 'evolution')
+        return repo.ui.configbool(b'experimental', b'evolution')
     except (error.ConfigError, AttributeError):
         # Fallback on old-fashion config
         # inconsistent config: experimental.evolution
-        result = set(repo.ui.configlist('experimental', 'evolution'))
+        result = set(repo.ui.configlist(b'experimental', b'evolution'))
 
-        if 'all' in result:
+        if b'all' in result:
             return True
 
         # Temporary hack for next check
-        newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
+        newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
         if newconfig:
-            result.add('createmarkers')
+            result.add(b'createmarkers')
 
         return option in result
 
+
 def getoptions(repo):
     """Returns dicts showing state of obsolescence features."""
 
@@ -135,9 +138,13 @@
     exchangevalue = _getoptionvalue(repo, exchangeopt)
 
     # createmarkers must be enabled if other options are enabled
-    if ((unstablevalue or exchangevalue) and not createmarkersvalue):
-        raise error.Abort(_("'createmarkers' obsolete option must be enabled "
-                            "if other obsolete options are enabled"))
+    if (unstablevalue or exchangevalue) and not createmarkersvalue:
+        raise error.Abort(
+            _(
+                b"'createmarkers' obsolete option must be enabled "
+                b"if other obsolete options are enabled"
+            )
+        )
 
     return {
         createmarkersopt: createmarkersvalue,
@@ -145,12 +152,14 @@
         exchangeopt: exchangevalue,
     }
 
+
 def isenabled(repo, option):
     """Returns True if the given repository has the given obsolete option
     enabled.
     """
     return getoptions(repo)[option]
 
+
 # Creating aliases for marker flags because evolve extension looks for
 # bumpedfix in obsolete.py
 bumpedfix = obsutil.bumpedfix
@@ -177,45 +186,50 @@
 #   additional encoding. Keys cannot contain '\0' or ':' and values
 #   cannot contain '\0'.
 _fm0version = 0
-_fm0fixed   = '>BIB20s'
-_fm0node = '20s'
+_fm0fixed = b'>BIB20s'
+_fm0node = b'20s'
 _fm0fsize = _calcsize(_fm0fixed)
 _fm0fnodesize = _calcsize(_fm0node)
 
+
 def _fm0readmarkers(data, off, stop):
     # Loop on markers
     while off < stop:
         # read fixed part
-        cur = data[off:off + _fm0fsize]
+        cur = data[off : off + _fm0fsize]
         off += _fm0fsize
         numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
         # read replacement
         sucs = ()
         if numsuc:
-            s = (_fm0fnodesize * numsuc)
-            cur = data[off:off + s]
+            s = _fm0fnodesize * numsuc
+            cur = data[off : off + s]
             sucs = _unpack(_fm0node * numsuc, cur)
             off += s
         # read metadata
         # (metadata will be decoded on demand)
-        metadata = data[off:off + mdsize]
+        metadata = data[off : off + mdsize]
         if len(metadata) != mdsize:
-            raise error.Abort(_('parsing obsolete marker: metadata is too '
-                               'short, %d bytes expected, got %d')
-                             % (mdsize, len(metadata)))
+            raise error.Abort(
+                _(
+                    b'parsing obsolete marker: metadata is too '
+                    b'short, %d bytes expected, got %d'
+                )
+                % (mdsize, len(metadata))
+            )
         off += mdsize
         metadata = _fm0decodemeta(metadata)
         try:
-            when, offset = metadata.pop('date', '0 0').split(' ')
+            when, offset = metadata.pop(b'date', b'0 0').split(b' ')
             date = float(when), int(offset)
         except ValueError:
-            date = (0., 0)
+            date = (0.0, 0)
         parents = None
-        if 'p2' in metadata:
-            parents = (metadata.pop('p1', None), metadata.pop('p2', None))
-        elif 'p1' in metadata:
-            parents = (metadata.pop('p1', None),)
-        elif 'p0' in metadata:
+        if b'p2' in metadata:
+            parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
+        elif b'p1' in metadata:
+            parents = (metadata.pop(b'p1', None),)
+        elif b'p0' in metadata:
             parents = ()
         if parents is not None:
             try:
@@ -229,23 +243,24 @@
                 # if content cannot be translated to nodeid drop the data.
                 parents = None
 
-        metadata = tuple(sorted(metadata.iteritems()))
+        metadata = tuple(sorted(pycompat.iteritems(metadata)))
 
         yield (pre, sucs, flags, metadata, date, parents)
 
+
 def _fm0encodeonemarker(marker):
     pre, sucs, flags, metadata, date, parents = marker
     if flags & usingsha256:
-        raise error.Abort(_('cannot handle sha256 with old obsstore format'))
+        raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
     metadata = dict(metadata)
     time, tz = date
-    metadata['date'] = '%r %i' % (time, tz)
+    metadata[b'date'] = b'%r %i' % (time, tz)
     if parents is not None:
         if not parents:
             # mark that we explicitly recorded no parents
-            metadata['p0'] = ''
+            metadata[b'p0'] = b''
         for i, p in enumerate(parents, 1):
-            metadata['p%i' % i] = node.hex(p)
+            metadata[b'p%i' % i] = node.hex(p)
     metadata = _fm0encodemeta(metadata)
     numsuc = len(sucs)
     format = _fm0fixed + (_fm0node * numsuc)
@@ -253,26 +268,29 @@
     data.extend(sucs)
     return _pack(format, *data) + metadata
 
+
 def _fm0encodemeta(meta):
     """Return encoded metadata string to string mapping.
 
     Assume no ':' in key and no '\0' in both key and value."""
-    for key, value in meta.iteritems():
-        if ':' in key or '\0' in key:
-            raise ValueError("':' and '\0' are forbidden in metadata key'")
-        if '\0' in value:
-            raise ValueError("':' is forbidden in metadata value'")
-    return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
+    for key, value in pycompat.iteritems(meta):
+        if b':' in key or b'\0' in key:
+            raise ValueError(b"':' and '\0' are forbidden in metadata key'")
+        if b'\0' in value:
+            raise ValueError(b"':' is forbidden in metadata value'")
+    return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
+
 
 def _fm0decodemeta(data):
     """Return string to string dictionary from encoded version."""
     d = {}
-    for l in data.split('\0'):
+    for l in data.split(b'\0'):
         if l:
-            key, value = l.split(':', 1)
+            key, value = l.split(b':', 1)
             d[key] = value
     return d
 
+
 ## Parsing and writing of version "1"
 #
 # The header is followed by the markers. Each marker is made of:
@@ -308,18 +326,19 @@
 #
 # - remaining bytes: the metadata, each (key, value) pair after the other.
 _fm1version = 1
-_fm1fixed = '>IdhHBBB20s'
-_fm1nodesha1 = '20s'
-_fm1nodesha256 = '32s'
+_fm1fixed = b'>IdhHBBB20s'
+_fm1nodesha1 = b'20s'
+_fm1nodesha256 = b'32s'
 _fm1nodesha1size = _calcsize(_fm1nodesha1)
 _fm1nodesha256size = _calcsize(_fm1nodesha256)
 _fm1fsize = _calcsize(_fm1fixed)
 _fm1parentnone = 3
 _fm1parentshift = 14
-_fm1parentmask = (_fm1parentnone << _fm1parentshift)
-_fm1metapair = 'BB'
+_fm1parentmask = _fm1parentnone << _fm1parentshift
+_fm1metapair = b'BB'
 _fm1metapairsize = _calcsize(_fm1metapair)
 
+
 def _fm1purereadmarkers(data, off, stop):
     # make some global constants local for performance
     noneflag = _fm1parentnone
@@ -384,7 +403,7 @@
 
         # read metadata
         off = o3 + metasize * nummeta
-        metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
+        metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
         metadata = []
         for idx in pycompat.xrange(0, len(metapairsize), 2):
             o1 = off + metapairsize[idx]
@@ -394,6 +413,7 @@
 
         yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
 
+
 def _fm1encodeonemarker(marker):
     pre, sucs, flags, metadata, date, parents = marker
     # determine node size
@@ -411,7 +431,7 @@
     formatmeta = _fm1metapair * len(metadata)
     format = _fm1fixed + formatnodes + formatmeta
     # tz is stored in minutes so we divide by 60
-    tz = date[1]//60
+    tz = date[1] // 60
     data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
     data.extend(sucs)
     if parents is not None:
@@ -421,12 +441,16 @@
         lk = len(key)
         lv = len(value)
         if lk > 255:
-            msg = ('obsstore metadata key cannot be longer than 255 bytes'
-                   ' (key "%s" is %u bytes)') % (key, lk)
+            msg = (
+                b'obsstore metadata key cannot be longer than 255 bytes'
+                b' (key "%s" is %u bytes)'
+            ) % (key, lk)
             raise error.ProgrammingError(msg)
         if lv > 255:
-            msg = ('obsstore metadata value cannot be longer than 255 bytes'
-                   ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
+            msg = (
+                b'obsstore metadata value cannot be longer than 255 bytes'
+                b' (value "%s" for key "%s" is %u bytes)'
+            ) % (value, key, lv)
             raise error.ProgrammingError(msg)
         data.append(lk)
         data.append(lv)
@@ -436,7 +460,8 @@
     for key, value in metadata:
         data.append(key)
         data.append(value)
-    return ''.join(data)
+    return b''.join(data)
+
 
 def _fm1readmarkers(data, off, stop):
     native = getattr(parsers, 'fm1readmarkers', None)
@@ -444,13 +469,18 @@
         return _fm1purereadmarkers(data, off, stop)
     return native(data, off, stop)
 
+
 # mapping to read/write various marker formats
 # <version> -> (decoder, encoder)
-formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
-           _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
+formats = {
+    _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
+    _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
+}
+
 
 def _readmarkerversion(data):
-    return _unpack('>B', data[0:1])[0]
+    return _unpack(b'>B', data[0:1])[0]
+
 
 @util.nogc
 def _readmarkers(data, off=None, stop=None):
@@ -461,12 +491,14 @@
     if stop is None:
         stop = len(data)
     if diskversion not in formats:
-        msg = _('parsing obsolete marker: unknown version %r') % diskversion
+        msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
         raise error.UnknownVersion(msg, version=diskversion)
     return diskversion, formats[diskversion][0](data, off, stop)
 
+
 def encodeheader(version=_fm0version):
-    return _pack('>B', version)
+    return _pack(b'>B', version)
+
 
 def encodemarkers(markers, addheader=False, version=_fm0version):
     # Kept separate from flushmarkers(), it will be reused for
@@ -477,17 +509,20 @@
     for marker in markers:
         yield encodeone(marker)
 
+
 @util.nogc
 def _addsuccessors(successors, markers):
     for mark in markers:
         successors.setdefault(mark[0], set()).add(mark)
 
+
 @util.nogc
 def _addpredecessors(predecessors, markers):
     for mark in markers:
         for suc in mark[1]:
             predecessors.setdefault(suc, set()).add(mark)
 
+
 @util.nogc
 def _addchildren(children, markers):
     for mark in markers:
@@ -496,6 +531,7 @@
             for p in parents:
                 children.setdefault(p, set()).add(mark)
 
+
 def _checkinvalidmarkers(markers):
     """search for marker with invalid data and raise error if needed
 
@@ -504,8 +540,13 @@
     """
     for mark in markers:
         if node.nullid in mark[1]:
-            raise error.Abort(_('bad obsolescence marker detected: '
-                               'invalid successors nullid'))
+            raise error.Abort(
+                _(
+                    b'bad obsolescence marker detected: '
+                    b'invalid successors nullid'
+                )
+            )
+
 
 class obsstore(object):
     """Store obsolete markers
@@ -516,7 +557,7 @@
     - children[x]   -> set(markers on predecessors edges of children(x)
     """
 
-    fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
+    fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
     # prec:    nodeid, predecessors changesets
     # succs:   tuple of nodeid, successor changesets (0-N length)
     # flag:    integer, flag field carrying modifier for the markers (see doc)
@@ -541,7 +582,7 @@
     def __nonzero__(self):
         if not self._cached(r'_all'):
             try:
-                return self.svfs.stat('obsstore').st_size > 1
+                return self.svfs.stat(b'obsstore').st_size > 1
             except OSError as inst:
                 if inst.errno != errno.ENOENT:
                     raise
@@ -558,8 +599,17 @@
         Remove me in the future when obsolete marker is always on."""
         return self._readonly
 
-    def create(self, transaction, prec, succs=(), flag=0, parents=None,
-               date=None, metadata=None, ui=None):
+    def create(
+        self,
+        transaction,
+        prec,
+        succs=(),
+        flag=0,
+        parents=None,
+        date=None,
+        metadata=None,
+        ui=None,
+    ):
         """obsolete: add a new obsolete marker
 
         * ensuring it is hashable
@@ -575,11 +625,11 @@
         if metadata is None:
             metadata = {}
         if date is None:
-            if 'date' in metadata:
+            if b'date' in metadata:
                 # as a courtesy for out-of-tree extensions
-                date = dateutil.parsedate(metadata.pop('date'))
+                date = dateutil.parsedate(metadata.pop(b'date'))
             elif ui is not None:
-                date = ui.configdate('devel', 'default-date')
+                date = ui.configdate(b'devel', b'default-date')
                 if date is None:
                     date = dateutil.makedate()
             else:
@@ -591,9 +641,10 @@
                 raise ValueError(succ)
         if prec in succs:
             raise ValueError(
-                r'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec)))
+                r'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec))
+            )
 
-        metadata = tuple(sorted(metadata.iteritems()))
+        metadata = tuple(sorted(pycompat.iteritems(metadata)))
         for k, v in metadata:
             try:
                 # might be better to reject non-ASCII keys
@@ -601,9 +652,10 @@
                 v.decode('utf-8')
             except UnicodeDecodeError:
                 raise error.ProgrammingError(
-                    'obsstore metadata must be valid UTF-8 sequence '
-                    '(key = %r, value = %r)'
-                    % (pycompat.bytestr(k), pycompat.bytestr(v)))
+                    b'obsstore metadata must be valid UTF-8 sequence '
+                    b'(key = %r, value = %r)'
+                    % (pycompat.bytestr(k), pycompat.bytestr(v))
+                )
 
         marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
         return bool(self.add(transaction, [marker]))
@@ -614,8 +666,9 @@
         Take care of filtering duplicate.
         Return the number of new marker."""
         if self._readonly:
-            raise error.Abort(_('creating obsolete markers is not enabled on '
-                              'this repo'))
+            raise error.Abort(
+                _(b'creating obsolete markers is not enabled on this repo')
+            )
         known = set()
         getsuccessors = self.successors.get
         new = []
@@ -624,10 +677,10 @@
                 known.add(m)
                 new.append(m)
         if new:
-            f = self.svfs('obsstore', 'ab')
+            f = self.svfs(b'obsstore', b'ab')
             try:
                 offset = f.tell()
-                transaction.add('obsstore', offset)
+                transaction.add(b'obsstore', offset)
                 # offset == 0: new file - add the version header
                 data = b''.join(encodemarkers(new, offset == 0, self._version))
                 f.write(data)
@@ -635,15 +688,15 @@
                 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
                 # call 'filecacheentry.refresh()'  here
                 f.close()
-            addedmarkers = transaction.changes.get('obsmarkers')
+            addedmarkers = transaction.changes.get(b'obsmarkers')
             if addedmarkers is not None:
                 addedmarkers.update(new)
             self._addmarkers(new, data)
             # new marker *may* have changed several set. invalidate the cache.
             self.caches.clear()
         # records the number of new markers for the transaction hooks
-        previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
-        transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
+        previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
+        transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
         return len(new)
 
     def mergemarkers(self, transaction, data):
@@ -655,7 +708,7 @@
 
     @propertycache
     def _data(self):
-        return self.svfs.tryread('obsstore')
+        return self.svfs.tryread(b'obsstore')
 
     @propertycache
     def _version(self):
@@ -696,7 +749,7 @@
         return attr in self.__dict__
 
     def _addmarkers(self, markers, rawdata):
-        markers = list(markers) # to allow repeated iteration
+        markers = list(markers)  # to allow repeated iteration
         self._data = self._data + rawdata
         self._all.extend(markers)
         if self._cached(r'successors'):
@@ -740,11 +793,12 @@
             seennodes |= pendingnodes
         return seenmarkers
 
+
 def makestore(ui, repo):
     """Create an obsstore instance from a repo."""
     # read default format for new obsstore.
     # developer config: format.obsstore-version
-    defaultformat = ui.configint('format', 'obsstore-version')
+    defaultformat = ui.configint(b'format', b'obsstore-version')
     # rely on obsstore class default when possible.
     kwargs = {}
     if defaultformat is not None:
@@ -752,10 +806,13 @@
     readonly = not isenabled(repo, createmarkersopt)
     store = obsstore(repo.svfs, readonly=readonly, **kwargs)
     if store and readonly:
-        ui.warn(_('obsolete feature not enabled but %i markers found!\n')
-                % len(list(store)))
+        ui.warn(
+            _(b'obsolete feature not enabled but %i markers found!\n')
+            % len(list(store))
+        )
     return store
 
+
 def commonversion(versions):
     """Return the newest version listed in both versions and our local formats.
 
@@ -768,12 +825,14 @@
             return v
     return None
 
+
 # arbitrary picked to fit into 8K limit from HTTP server
 # you have to take in account:
 # - the version header
 # - the base85 encoding
 _maxpayload = 5300
 
+
 def _pushkeyescape(markers):
     """encode markers into a dict suitable for pushkey exchange
 
@@ -784,59 +843,69 @@
     currentlen = _maxpayload * 2  # ensure we create a new part
     for marker in markers:
         nextdata = _fm0encodeonemarker(marker)
-        if (len(nextdata) + currentlen > _maxpayload):
+        if len(nextdata) + currentlen > _maxpayload:
             currentpart = []
             currentlen = 0
             parts.append(currentpart)
         currentpart.append(nextdata)
         currentlen += len(nextdata)
     for idx, part in enumerate(reversed(parts)):
-        data = ''.join([_pack('>B', _fm0version)] + part)
-        keys['dump%i' % idx] = util.b85encode(data)
+        data = b''.join([_pack(b'>B', _fm0version)] + part)
+        keys[b'dump%i' % idx] = util.b85encode(data)
     return keys
 
+
 def listmarkers(repo):
     """List markers over pushkey"""
     if not repo.obsstore:
         return {}
     return _pushkeyescape(sorted(repo.obsstore))
 
+
 def pushmarker(repo, key, old, new):
     """Push markers over pushkey"""
-    if not key.startswith('dump'):
-        repo.ui.warn(_('unknown key: %r') % key)
+    if not key.startswith(b'dump'):
+        repo.ui.warn(_(b'unknown key: %r') % key)
         return False
     if old:
-        repo.ui.warn(_('unexpected old value for %r') % key)
+        repo.ui.warn(_(b'unexpected old value for %r') % key)
         return False
     data = util.b85decode(new)
-    with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
+    with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
         repo.obsstore.mergemarkers(tr, data)
         repo.invalidatevolatilesets()
         return True
 
+
 # mapping of 'set-name' -> <function to compute this set>
 cachefuncs = {}
+
+
 def cachefor(name):
     """Decorator to register a function as computing the cache for a set"""
+
     def decorator(func):
         if name in cachefuncs:
-            msg = "duplicated registration for volatileset '%s' (existing: %r)"
+            msg = b"duplicated registration for volatileset '%s' (existing: %r)"
             raise error.ProgrammingError(msg % (name, cachefuncs[name]))
         cachefuncs[name] = func
         return func
+
     return decorator
 
+
 def getrevs(repo, name):
     """Return the set of revision that belong to the <name> set
 
     Such access may compute the set and cache it for future use"""
     repo = repo.unfiltered()
-    if not repo.obsstore:
-        return frozenset()
-    if name not in repo.obsstore.caches:
-        repo.obsstore.caches[name] = cachefuncs[name](repo)
-    return repo.obsstore.caches[name]
+    with util.timedcm('getrevs %s', name):
+        if not repo.obsstore:
+            return frozenset()
+        if name not in repo.obsstore.caches:
+            repo.obsstore.caches[name] = cachefuncs[name](repo)
+        return repo.obsstore.caches[name]
+
 
 # To be simple we need to invalidate obsolescence cache when:
 #
@@ -853,14 +922,16 @@
     (We could be smarter here given the exact event that trigger the cache
     clearing)"""
     # only clear cache is there is obsstore data in this repo
-    if 'obsstore' in repo._filecache:
+    if b'obsstore' in repo._filecache:
         repo.obsstore.caches.clear()
 
+
 def _mutablerevs(repo):
     """the set of mutable revision in the repository"""
     return repo._phasecache.getrevset(repo, phases.mutablephases)
 
-@cachefor('obsolete')
+
+@cachefor(b'obsolete')
 def _computeobsoleteset(repo):
     """the set of obsolete revisions"""
     getnode = repo.changelog.node
@@ -869,12 +940,13 @@
     obs = set(r for r in notpublic if isobs(getnode(r)))
     return obs
 
-@cachefor('orphan')
+
+@cachefor(b'orphan')
 def _computeorphanset(repo):
     """the set of non obsolete revisions with obsolete parents"""
     pfunc = repo.changelog.parentrevs
     mutable = _mutablerevs(repo)
-    obsolete = getrevs(repo, 'obsolete')
+    obsolete = getrevs(repo, b'obsolete')
     others = mutable - obsolete
     unstable = set()
     for r in sorted(others):
@@ -886,42 +958,47 @@
                 break
     return unstable
 
-@cachefor('suspended')
+
+@cachefor(b'suspended')
 def _computesuspendedset(repo):
     """the set of obsolete parents with non obsolete descendants"""
-    suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
-    return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
+    suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
+    return set(r for r in getrevs(repo, b'obsolete') if r in suspended)
 
-@cachefor('extinct')
+
+@cachefor(b'extinct')
 def _computeextinctset(repo):
     """the set of obsolete parents without non obsolete descendants"""
-    return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
+    return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
 
-@cachefor('phasedivergent')
+
+@cachefor(b'phasedivergent')
 def _computephasedivergentset(repo):
     """the set of revs trying to obsolete public revisions"""
     bumped = set()
     # util function (avoid attribute lookup in the loop)
-    phase = repo._phasecache.phase # would be faster to grab the full list
+    phase = repo._phasecache.phase  # would be faster to grab the full list
     public = phases.public
     cl = repo.changelog
     torev = cl.nodemap.get
     tonode = cl.node
     obsstore = repo.obsstore
-    for rev in repo.revs('(not public()) and (not obsolete())'):
+    for rev in repo.revs(b'(not public()) and (not obsolete())'):
         # We only evaluate mutable, non-obsolete revision
         node = tonode(rev)
         # (future) A cache of predecessors may worth if split is very common
-        for pnode in obsutil.allpredecessors(obsstore, [node],
-                                   ignoreflags=bumpedfix):
-            prev = torev(pnode) # unfiltered! but so is phasecache
+        for pnode in obsutil.allpredecessors(
+            obsstore, [node], ignoreflags=bumpedfix
+        ):
+            prev = torev(pnode)  # unfiltered! but so is phasecache
             if (prev is not None) and (phase(repo, prev) <= public):
                 # we have a public predecessor
                 bumped.add(rev)
-                break # Next draft!
+                break  # Next draft!
     return bumped
 
-@cachefor('contentdivergent')
+
+@cachefor(b'contentdivergent')
 def _computecontentdivergentset(repo):
     """the set of rev that compete to be the final successors of some revision.
     """
@@ -929,7 +1006,7 @@
     obsstore = repo.obsstore
     newermap = {}
     tonode = repo.changelog.node
-    for rev in repo.revs('(not public()) - obsolete()'):
+    for rev in repo.revs(b'(not public()) - obsolete()'):
         node = tonode(rev)
         mark = obsstore.predecessors.get(node, ())
         toprocess = set(mark)
@@ -937,7 +1014,7 @@
         while toprocess:
             prec = toprocess.pop()[0]
             if prec in seen:
-                continue # emergency cycle hanging prevention
+                continue  # emergency cycle hanging prevention
             seen.add(prec)
             if prec not in newermap:
                 obsutil.successorssets(repo, prec, cache=newermap)
@@ -948,18 +1025,21 @@
             toprocess.update(obsstore.predecessors.get(prec, ()))
     return divergent
 
+
 def makefoldid(relation, user):
 
     folddigest = hashlib.sha1(user)
     for p in relation[0] + relation[1]:
-        folddigest.update('%d' % p.rev())
+        folddigest.update(b'%d' % p.rev())
         folddigest.update(p.node())
     # Since fold only has to compete against fold for the same successors, it
     # seems fine to use a small ID. Smaller ID save space.
     return node.hex(folddigest.digest())[:8]
 
-def createmarkers(repo, relations, flag=0, date=None, metadata=None,
-                  operation=None):
+
+def createmarkers(
+    repo, relations, flag=0, date=None, metadata=None, operation=None
+):
     """Add obsolete markers between changesets in a repo
 
     <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
@@ -979,21 +1059,25 @@
     # prepare metadata
     if metadata is None:
         metadata = {}
-    if 'user' not in metadata:
-        luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username()
-        metadata['user'] = encoding.fromlocal(luser)
+    if b'user' not in metadata:
+        luser = (
+            repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
+        )
+        metadata[b'user'] = encoding.fromlocal(luser)
 
     # Operation metadata handling
-    useoperation = repo.ui.configbool('experimental',
-        'evolution.track-operation')
+    useoperation = repo.ui.configbool(
+        b'experimental', b'evolution.track-operation'
+    )
     if useoperation and operation:
-        metadata['operation'] = operation
+        metadata[b'operation'] = operation
 
     # Effect flag metadata handling
-    saveeffectflag = repo.ui.configbool('experimental',
-                                        'evolution.effect-flags')
+    saveeffectflag = repo.ui.configbool(
+        b'experimental', b'evolution.effect-flags'
+    )
 
-    with repo.transaction('add-obsolescence-marker') as tr:
+    with repo.transaction(b'add-obsolescence-marker') as tr:
         markerargs = []
         for rel in relations:
             predecessors = rel[0]
@@ -1001,41 +1085,43 @@
                 # preserve compat with old API until all caller are migrated
                 predecessors = (predecessors,)
             if len(predecessors) > 1 and len(rel[1]) != 1:
-                msg = 'Fold markers can only have 1 successors, not %d'
+                msg = b'Fold markers can only have 1 successors, not %d'
                 raise error.ProgrammingError(msg % len(rel[1]))
             foldid = None
             foldsize = len(predecessors)
             if 1 < foldsize:
-                foldid = makefoldid(rel, metadata['user'])
+                foldid = makefoldid(rel, metadata[b'user'])
             for foldidx, prec in enumerate(predecessors, 1):
                 sucs = rel[1]
                 localmetadata = metadata.copy()
                 if len(rel) > 2:
                     localmetadata.update(rel[2])
                 if foldid is not None:
-                    localmetadata['fold-id'] = foldid
-                    localmetadata['fold-idx'] = '%d' % foldidx
-                    localmetadata['fold-size'] = '%d' % foldsize
+                    localmetadata[b'fold-id'] = foldid
+                    localmetadata[b'fold-idx'] = b'%d' % foldidx
+                    localmetadata[b'fold-size'] = b'%d' % foldsize
 
                 if not prec.mutable():
-                    raise error.Abort(_("cannot obsolete public changeset: %s")
-                                     % prec,
-                                     hint="see 'hg help phases' for details")
+                    raise error.Abort(
+                        _(b"cannot obsolete public changeset: %s") % prec,
+                        hint=b"see 'hg help phases' for details",
+                    )
                 nprec = prec.node()
                 nsucs = tuple(s.node() for s in sucs)
                 npare = None
                 if not nsucs:
                     npare = tuple(p.node() for p in prec.parents())
                 if nprec in nsucs:
-                    raise error.Abort(_("changeset %s cannot obsolete itself")
-                                      % prec)
+                    raise error.Abort(
+                        _(b"changeset %s cannot obsolete itself") % prec
+                    )
 
                 # Effect flag can be different by relation
                 if saveeffectflag:
                     # The effect flag is saved in a versioned field name for
                     # future evolution
                     effectflag = obsutil.geteffectflag(prec, sucs)
-                    localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
+                    localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
 
                 # Creating the marker causes the hidden cache to become
                 # invalid, which causes recomputation when we ask for
@@ -1045,7 +1131,14 @@
 
         for args in markerargs:
             nprec, nsucs, npare, localmetadata = args
-            repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
-                                 date=date, metadata=localmetadata,
-                                 ui=repo.ui)
+            repo.obsstore.create(
+                tr,
+                nprec,
+                nsucs,
+                flag,
+                parents=npare,
+                date=date,
+                metadata=localmetadata,
+                ui=repo.ui,
+            )
             repo.filteredrevcache.clear()
--- a/mercurial/obsutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/obsutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,11 +15,10 @@
     encoding,
     node as nodemod,
     phases,
+    pycompat,
     util,
 )
-from .utils import (
-    dateutil,
-)
+from .utils import dateutil
 
 ### obsolescence marker flag
 
@@ -54,6 +53,7 @@
 bumpedfix = 1
 usingsha256 = 2
 
+
 class marker(object):
     """Wrap obsolete marker raw data"""
 
@@ -95,6 +95,7 @@
         """The flags field of the marker"""
         return self._data[2]
 
+
 def getmarkers(repo, nodes=None, exclusive=False):
     """returns markers known in a repository
 
@@ -110,6 +111,7 @@
     for markerdata in rawmarkers:
         yield marker(repo, markerdata)
 
+
 def closestpredecessors(repo, nodeid):
     """yield the list of next predecessors pointing on visible changectx nodes
 
@@ -138,6 +140,7 @@
             else:
                 stack.append(precnodeid)
 
+
 def allpredecessors(obsstore, nodes, ignoreflags=0):
     """Yield node for every precursors of <nodes>.
 
@@ -161,6 +164,7 @@
                 seen.add(suc)
                 remaining.add(suc)
 
+
 def allsuccessors(obsstore, nodes, ignoreflags=0):
     """Yield node for every successor of <nodes>.
 
@@ -182,10 +186,12 @@
                     seen.add(suc)
                     remaining.add(suc)
 
+
 def _filterprunes(markers):
     """return a set with no prune markers"""
     return set(m for m in markers if m[1])
 
+
 def exclusivemarkers(repo, nodes):
     """set of markers relevant to "nodes" but no other locally-known nodes
 
@@ -307,6 +313,7 @@
 
     return exclmarkers
 
+
 def foreground(repo, nodes):
     """return all nodes in the "foreground" of other node
 
@@ -317,7 +324,7 @@
     Beware that possible obsolescence cycle may result if complex situation.
     """
     repo = repo.unfiltered()
-    foreground = set(repo.set('%ln::', nodes))
+    foreground = set(repo.set(b'%ln::', nodes))
     if repo.obsstore:
         # We only need this complicated logic if there is obsolescence
         # XXX will probably deserve an optimised revset.
@@ -330,9 +337,10 @@
             mutable = [c.node() for c in foreground if c.mutable()]
             succs.update(allsuccessors(repo.obsstore, mutable))
             known = (n for n in succs if n in nm)
-            foreground = set(repo.set('%ln::', known))
+            foreground = set(repo.set(b'%ln::', known))
     return set(c.node() for c in foreground)
 
+
 # effectflag field
 #
 # Effect-flag is a 1-byte bit field used to store what changed between a
@@ -348,23 +356,24 @@
 # `effect-flags` set to off by default.
 #
 
-EFFECTFLAGFIELD = "ef1"
+EFFECTFLAGFIELD = b"ef1"
 
-DESCCHANGED = 1 << 0 # action changed the description
-METACHANGED = 1 << 1 # action change the meta
-DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
-PARENTCHANGED = 1 << 2 # action change the parent
-USERCHANGED = 1 << 4 # the user changed
-DATECHANGED = 1 << 5 # the date changed
-BRANCHCHANGED = 1 << 6 # the branch changed
+DESCCHANGED = 1 << 0  # action changed the description
+METACHANGED = 1 << 1  # action change the meta
+DIFFCHANGED = 1 << 3  # action change diff introduced by the changeset
+PARENTCHANGED = 1 << 2  # action change the parent
+USERCHANGED = 1 << 4  # the user changed
+DATECHANGED = 1 << 5  # the date changed
+BRANCHCHANGED = 1 << 6  # the branch changed
 
 METABLACKLIST = [
-    re.compile('^branch$'),
-    re.compile('^.*-source$'),
-    re.compile('^.*_source$'),
-    re.compile('^source$'),
+    re.compile(b'^branch$'),
+    re.compile(b'^.*-source$'),
+    re.compile(b'^.*_source$'),
+    re.compile(b'^source$'),
 ]
 
+
 def metanotblacklisted(metaitem):
     """ Check that the key of a meta item (extrakey, extravalue) does not
     match at least one of the blacklist pattern
@@ -373,6 +382,7 @@
 
     return not any(pattern.match(metakey) for pattern in METABLACKLIST)
 
+
 def _prepare_hunk(hunk):
     """Drop all information but the username and patch"""
     cleanhunk = []
@@ -383,6 +393,7 @@
             cleanhunk.append(line)
     return cleanhunk
 
+
 def _getdifflines(iterdiff):
     """return a cleaned up lines"""
     lines = next(iterdiff, None)
@@ -392,12 +403,13 @@
 
     return _prepare_hunk(lines)
 
+
 def _cmpdiff(leftctx, rightctx):
     """return True if both ctx introduce the "same diff"
 
     This is a first and basic implementation, with many shortcoming.
     """
-    diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True})
+    diffopts = diffutil.diffallopts(leftctx.repo().ui, {b'git': True})
 
     # Leftctx or right ctx might be filtered, so we need to use the contexts
     # with an unfiltered repository to safely compute the diff
@@ -419,6 +431,7 @@
             return False
     return True
 
+
 def geteffectflag(source, successors):
     """ From an obs-marker relation, compute what changed between the
     predecessor and the successor.
@@ -462,14 +475,15 @@
 
     return effects
 
+
 def getobsoleted(repo, tr):
     """return the set of pre-existing revisions obsoleted by a transaction"""
     torev = repo.unfiltered().changelog.nodemap.get
     phase = repo._phasecache.phase
     succsmarkers = repo.obsstore.successors.get
     public = phases.public
-    addedmarkers = tr.changes['obsmarkers']
-    origrepolen = tr.changes['origrepolen']
+    addedmarkers = tr.changes[b'obsmarkers']
+    origrepolen = tr.changes[b'origrepolen']
     seenrevs = set()
     obsoleted = set()
     for mark in addedmarkers:
@@ -484,6 +498,7 @@
             obsoleted.add(rev)
     return obsoleted
 
+
 class _succs(list):
     """small class to represent a successors with some metadata about it"""
 
@@ -504,6 +519,7 @@
     def canmerge(self, other):
         return self._set.issubset(other._set)
 
+
 def successorssets(repo, initialnode, closest=False, cache=None):
     """Return set of all latest successors of initial nodes
 
@@ -611,9 +627,9 @@
 
         # case 2 condition is a bit hairy because of closest,
         # we compute it on its own
-        case2condition =  ((current not in succmarkers)
-                           or (closest and current != initialnode
-                               and current in repo))
+        case2condition = (current not in succmarkers) or (
+            closest and current != initialnode and current in repo
+        )
 
         if current in cache:
             # case (1): We already know the successors sets
@@ -720,8 +736,9 @@
                 # remove duplicated and subset
                 seen = []
                 final = []
-                candidates = sorted((s for s in succssets if s),
-                                    key=len, reverse=True)
+                candidates = sorted(
+                    (s for s in succssets if s), key=len, reverse=True
+                )
                 for cand in candidates:
                     for seensuccs in seen:
                         if cand.canmerge(seensuccs):
@@ -730,10 +747,11 @@
                     else:
                         final.append(cand)
                         seen.append(cand)
-                final.reverse() # put small successors set first
+                final.reverse()  # put small successors set first
                 cache[current] = final
     return cache[initialnode]
 
+
 def successorsandmarkers(repo, ctx):
     """compute the raw data needed for computing obsfate
     Returns a list of dict, one dict per successors set
@@ -750,7 +768,7 @@
 
     # Try to recover pruned markers
     succsmap = repo.obsstore.successors
-    fullsuccessorsets = [] # successor set + markers
+    fullsuccessorsets = []  # successor set + markers
     for sset in ssets:
         if sset:
             fullsuccessorsets.append(sset)
@@ -777,10 +795,11 @@
 
     values = []
     for sset in fullsuccessorsets:
-        values.append({'successors': sset, 'markers': sset.markers})
+        values.append({b'successors': sset, b'markers': sset.markers})
 
     return values
 
+
 def _getobsfate(successorssets):
     """ Compute a changeset obsolescence fate based on its successorssets.
     Successors can be the tipmost ones or the immediate ones. This function
@@ -795,53 +814,62 @@
 
     if len(successorssets) == 0:
         # The commit has been pruned
-        return 'pruned'
+        return b'pruned'
     elif len(successorssets) > 1:
-        return 'diverged'
+        return b'diverged'
     else:
         # No divergence, only one set of successors
         successors = successorssets[0]
 
         if len(successors) == 1:
-            return 'superseded'
+            return b'superseded'
         else:
-            return 'superseded_split'
+            return b'superseded_split'
+
 
 def obsfateverb(successorset, markers):
     """ Return the verb summarizing the successorset and potentially using
     information from the markers
     """
     if not successorset:
-        verb = 'pruned'
+        verb = b'pruned'
     elif len(successorset) == 1:
-        verb = 'rewritten'
+        verb = b'rewritten'
     else:
-        verb = 'split'
+        verb = b'split'
     return verb
 
+
 def markersdates(markers):
     """returns the list of dates for a list of markers
     """
     return [m[4] for m in markers]
 
+
 def markersusers(markers):
     """ Returns a sorted list of markers users without duplicates
     """
     markersmeta = [dict(m[3]) for m in markers]
-    users = set(encoding.tolocal(meta['user']) for meta in markersmeta
-                if meta.get('user'))
+    users = set(
+        encoding.tolocal(meta[b'user'])
+        for meta in markersmeta
+        if meta.get(b'user')
+    )
 
     return sorted(users)
 
+
 def markersoperations(markers):
     """ Returns a sorted list of markers operations without duplicates
     """
     markersmeta = [dict(m[3]) for m in markers]
-    operations = set(meta.get('operation') for meta in markersmeta
-                     if meta.get('operation'))
+    operations = set(
+        meta.get(b'operation') for meta in markersmeta if meta.get(b'operation')
+    )
 
     return sorted(operations)
 
+
 def obsfateprinter(ui, repo, successors, markers, formatctx):
     """ Build a obsfate string for a single successorset using all obsfate
     related function defined in obsutil
@@ -858,12 +886,12 @@
     # Operations
     operations = markersoperations(markers)
     if operations:
-        line.append(" using %s" % ", ".join(operations))
+        line.append(b" using %s" % b", ".join(operations))
 
     # Successors
     if successors:
         fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
-        line.append(" as %s" % ", ".join(fmtsuccessors))
+        line.append(b" as %s" % b", ".join(fmtsuccessors))
 
     # Users
     users = markersusers(markers)
@@ -875,7 +903,7 @@
             users = None
 
     if (verbose or normal) and users:
-        line.append(" by %s" % ", ".join(users))
+        line.append(b" by %s" % b", ".join(users))
 
     # Date
     dates = markersdates(markers)
@@ -885,25 +913,27 @@
         max_date = max(dates)
 
         if min_date == max_date:
-            fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
-            line.append(" (at %s)" % fmtmin_date)
+            fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
+            line.append(b" (at %s)" % fmtmin_date)
         else:
-            fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
-            fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
-            line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
+            fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
+            fmtmax_date = dateutil.datestr(max_date, b'%Y-%m-%d %H:%M %1%2')
+            line.append(b" (between %s and %s)" % (fmtmin_date, fmtmax_date))
 
-    return "".join(line)
+    return b"".join(line)
 
 
 filteredmsgtable = {
-    "pruned": _("hidden revision '%s' is pruned"),
-    "diverged": _("hidden revision '%s' has diverged"),
-    "superseded": _("hidden revision '%s' was rewritten as: %s"),
-    "superseded_split": _("hidden revision '%s' was split as: %s"),
-    "superseded_split_several": _("hidden revision '%s' was split as: %s and "
-                                  "%d more"),
+    b"pruned": _(b"hidden revision '%s' is pruned"),
+    b"diverged": _(b"hidden revision '%s' has diverged"),
+    b"superseded": _(b"hidden revision '%s' was rewritten as: %s"),
+    b"superseded_split": _(b"hidden revision '%s' was split as: %s"),
+    b"superseded_split_several": _(
+        b"hidden revision '%s' was split as: %s and %d more"
+    ),
 }
 
+
 def _getfilteredreason(repo, changeid, ctx):
     """return a human-friendly string on why a obsolete changeset is hidden
     """
@@ -911,28 +941,29 @@
     fate = _getobsfate(successors)
 
     # Be more precise in case the revision is superseded
-    if fate == 'pruned':
-        return filteredmsgtable['pruned'] % changeid
-    elif fate == 'diverged':
-        return filteredmsgtable['diverged'] % changeid
-    elif fate == 'superseded':
+    if fate == b'pruned':
+        return filteredmsgtable[b'pruned'] % changeid
+    elif fate == b'diverged':
+        return filteredmsgtable[b'diverged'] % changeid
+    elif fate == b'superseded':
         single_successor = nodemod.short(successors[0][0])
-        return filteredmsgtable['superseded'] % (changeid, single_successor)
-    elif fate == 'superseded_split':
+        return filteredmsgtable[b'superseded'] % (changeid, single_successor)
+    elif fate == b'superseded_split':
 
         succs = []
         for node_id in successors[0]:
             succs.append(nodemod.short(node_id))
 
         if len(succs) <= 2:
-            fmtsuccs = ', '.join(succs)
-            return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
+            fmtsuccs = b', '.join(succs)
+            return filteredmsgtable[b'superseded_split'] % (changeid, fmtsuccs)
         else:
-            firstsuccessors = ', '.join(succs[:2])
+            firstsuccessors = b', '.join(succs[:2])
             remainingnumber = len(succs) - 2
 
             args = (changeid, firstsuccessors, remainingnumber)
-            return filteredmsgtable['superseded_split_several'] % args
+            return filteredmsgtable[b'superseded_split_several'] % args
+
 
 def divergentsets(repo, ctx):
     """Compute sets of commits divergent with a given one"""
@@ -951,8 +982,11 @@
                 # we already know the latest base for this divergency
                 continue
             base[tuple(nsuccset)] = n
-    return [{'divergentnodes': divset, 'commonpredecessor': b}
-            for divset, b in base.iteritems()]
+    return [
+        {b'divergentnodes': divset, b'commonpredecessor': b}
+        for divset, b in pycompat.iteritems(base)
+    ]
+
 
 def whyunstable(repo, ctx):
     result = []
@@ -960,28 +994,42 @@
         for parent in ctx.parents():
             kind = None
             if parent.orphan():
-                kind = 'orphan'
+                kind = b'orphan'
             elif parent.obsolete():
-                kind = 'obsolete'
+                kind = b'obsolete'
             if kind is not None:
-                result.append({'instability': 'orphan',
-                               'reason': '%s parent' % kind,
-                               'node': parent.hex()})
+                result.append(
+                    {
+                        b'instability': b'orphan',
+                        b'reason': b'%s parent' % kind,
+                        b'node': parent.hex(),
+                    }
+                )
     if ctx.phasedivergent():
-        predecessors = allpredecessors(repo.obsstore, [ctx.node()],
-                                       ignoreflags=bumpedfix)
-        immutable = [repo[p] for p in predecessors
-                     if p in repo and not repo[p].mutable()]
+        predecessors = allpredecessors(
+            repo.obsstore, [ctx.node()], ignoreflags=bumpedfix
+        )
+        immutable = [
+            repo[p] for p in predecessors if p in repo and not repo[p].mutable()
+        ]
         for predecessor in immutable:
-            result.append({'instability': 'phase-divergent',
-                           'reason': 'immutable predecessor',
-                           'node': predecessor.hex()})
+            result.append(
+                {
+                    b'instability': b'phase-divergent',
+                    b'reason': b'immutable predecessor',
+                    b'node': predecessor.hex(),
+                }
+            )
     if ctx.contentdivergent():
         dsets = divergentsets(repo, ctx)
         for dset in dsets:
-            divnodes = [repo[n] for n in dset['divergentnodes']]
-            result.append({'instability': 'content-divergent',
-                           'divergentnodes': divnodes,
-                           'reason': 'predecessor',
-                           'node': nodemod.hex(dset['commonpredecessor'])})
+            divnodes = [repo[n] for n in dset[b'divergentnodes']]
+            result.append(
+                {
+                    b'instability': b'content-divergent',
+                    b'divergentnodes': divnodes,
+                    b'reason': b'predecessor',
+                    b'node': nodemod.hex(dset[b'commonpredecessor']),
+                }
+            )
     return result
--- a/mercurial/parser.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/parser.py	Mon Oct 21 11:09:48 2019 -0400
@@ -24,31 +24,35 @@
     pycompat,
     util,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
+
 
 class parser(object):
     def __init__(self, elements, methods=None):
         self._elements = elements
         self._methods = methods
         self.current = None
+
     def _advance(self):
-        'advance the tokenizer'
+        b'advance the tokenizer'
         t = self.current
         self.current = next(self._iter, None)
         return t
+
     def _hasnewterm(self):
-        'True if next token may start new term'
+        b'True if next token may start new term'
         return any(self._elements[self.current[0]][1:3])
+
     def _match(self, m):
-        'make sure the tokenizer matches an end condition'
+        b'make sure the tokenizer matches an end condition'
         if self.current[0] != m:
-            raise error.ParseError(_("unexpected token: %s") % self.current[0],
-                                   self.current[2])
+            raise error.ParseError(
+                _(b"unexpected token: %s") % self.current[0], self.current[2]
+            )
         self._advance()
+
     def _parseoperand(self, bind, m=None):
-        'gather right-hand-side operand until an end condition or binding met'
+        b'gather right-hand-side operand until an end condition or binding met'
         if m and self.current[0] == m:
             expr = None
         else:
@@ -56,6 +60,7 @@
         if m:
             self._match(m)
         return expr
+
     def _parse(self, bind=0):
         token, value, pos = self._advance()
         # handle prefix rules on current token, take as primary if unambiguous
@@ -65,7 +70,7 @@
         elif prefix:
             expr = (prefix[0], self._parseoperand(*prefix[1:]))
         else:
-            raise error.ParseError(_("not a prefix: %s") % token, pos)
+            raise error.ParseError(_(b"not a prefix: %s") % token, pos)
         # gather tokens until we meet a lower binding strength
         while bind < self._elements[self.current[0]][0]:
             token, value, pos = self._advance()
@@ -76,27 +81,31 @@
             elif infix:
                 expr = (infix[0], expr, self._parseoperand(*infix[1:]))
             else:
-                raise error.ParseError(_("not an infix: %s") % token, pos)
+                raise error.ParseError(_(b"not an infix: %s") % token, pos)
         return expr
+
     def parse(self, tokeniter):
-        'generate a parse tree from tokens'
+        b'generate a parse tree from tokens'
         self._iter = tokeniter
         self._advance()
         res = self._parse()
         token, value, pos = self.current
         return res, pos
+
     def eval(self, tree):
-        'recursively evaluate a parse tree using node methods'
+        b'recursively evaluate a parse tree using node methods'
         if not isinstance(tree, tuple):
             return tree
         return self._methods[tree[0]](*[self.eval(t) for t in tree[1:]])
+
     def __call__(self, tokeniter):
-        'parse tokens into a parse tree and evaluate if methods given'
+        b'parse tokens into a parse tree and evaluate if methods given'
         t = self.parse(tokeniter)
         if self._methods:
             return self.eval(t)
         return t
 
+
 def splitargspec(spec):
     """Parse spec of function arguments into (poskeys, varkey, keys, optkey)
 
@@ -112,24 +121,25 @@
     ([], None, [], 'foo')
     """
     optkey = None
-    pre, sep, post = spec.partition('**')
+    pre, sep, post = spec.partition(b'**')
     if sep:
         posts = post.split()
         if not posts:
-            raise error.ProgrammingError('no **optkey name provided')
+            raise error.ProgrammingError(b'no **optkey name provided')
         if len(posts) > 1:
-            raise error.ProgrammingError('excessive **optkey names provided')
+            raise error.ProgrammingError(b'excessive **optkey names provided')
         optkey = posts[0]
 
-    pre, sep, post = pre.partition('*')
+    pre, sep, post = pre.partition(b'*')
     pres = pre.split()
     posts = post.split()
     if sep:
         if not posts:
-            raise error.ProgrammingError('no *varkey name provided')
+            raise error.ProgrammingError(b'no *varkey name provided')
         return pres, posts[0], posts[1:], optkey
     return [], None, pres, optkey
 
+
 def buildargsdict(trees, funcname, argspec, keyvaluenode, keynode):
     """Build dict from list containing positional and keyword arguments
 
@@ -147,50 +157,59 @@
     arguments are rejected, but missing keyword arguments are just omitted.
     """
     poskeys, varkey, keys, optkey = argspec
-    kwstart = next((i for i, x in enumerate(trees)
-                    if x and x[0] == keyvaluenode),
-                   len(trees))
+    kwstart = next(
+        (i for i, x in enumerate(trees) if x and x[0] == keyvaluenode),
+        len(trees),
+    )
     if kwstart < len(poskeys):
-        raise error.ParseError(_("%(func)s takes at least %(nargs)d positional "
-                                 "arguments")
-                               % {'func': funcname, 'nargs': len(poskeys)})
+        raise error.ParseError(
+            _(b"%(func)s takes at least %(nargs)d positional arguments")
+            % {b'func': funcname, b'nargs': len(poskeys)}
+        )
     if not varkey and kwstart > len(poskeys) + len(keys):
-        raise error.ParseError(_("%(func)s takes at most %(nargs)d positional "
-                                 "arguments")
-                               % {'func': funcname,
-                                  'nargs': len(poskeys) + len(keys)})
+        raise error.ParseError(
+            _(b"%(func)s takes at most %(nargs)d positional arguments")
+            % {b'func': funcname, b'nargs': len(poskeys) + len(keys)}
+        )
     args = util.sortdict()
     # consume positional arguments
     for k, x in zip(poskeys, trees[:kwstart]):
         args[k] = x
     if varkey:
-        args[varkey] = trees[len(args):kwstart]
+        args[varkey] = trees[len(args) : kwstart]
     else:
-        for k, x in zip(keys, trees[len(args):kwstart]):
+        for k, x in zip(keys, trees[len(args) : kwstart]):
             args[k] = x
     # remainder should be keyword arguments
     if optkey:
         args[optkey] = util.sortdict()
     for x in trees[kwstart:]:
         if not x or x[0] != keyvaluenode or x[1][0] != keynode:
-            raise error.ParseError(_("%(func)s got an invalid argument")
-                                   % {'func': funcname})
+            raise error.ParseError(
+                _(b"%(func)s got an invalid argument") % {b'func': funcname}
+            )
         k = x[1][1]
         if k in keys:
             d = args
         elif not optkey:
-            raise error.ParseError(_("%(func)s got an unexpected keyword "
-                                     "argument '%(key)s'")
-                                   % {'func': funcname, 'key': k})
+            raise error.ParseError(
+                _(b"%(func)s got an unexpected keyword argument '%(key)s'")
+                % {b'func': funcname, b'key': k}
+            )
         else:
             d = args[optkey]
         if k in d:
-            raise error.ParseError(_("%(func)s got multiple values for keyword "
-                                     "argument '%(key)s'")
-                                   % {'func': funcname, 'key': k})
+            raise error.ParseError(
+                _(
+                    b"%(func)s got multiple values for keyword "
+                    b"argument '%(key)s'"
+                )
+                % {b'func': funcname, b'key': k}
+            )
         d[k] = x[2]
     return args
 
+
 def unescapestr(s):
     try:
         return stringutil.unescapestr(s)
@@ -198,24 +217,27 @@
         # mangle Python's exception into our format
         raise error.ParseError(pycompat.bytestr(e).lower())
 
+
 def _prettyformat(tree, leafnodes, level, lines):
     if not isinstance(tree, tuple):
         lines.append((level, stringutil.pprint(tree)))
     elif tree[0] in leafnodes:
         rs = map(stringutil.pprint, tree[1:])
-        lines.append((level, '(%s %s)' % (tree[0], ' '.join(rs))))
+        lines.append((level, b'(%s %s)' % (tree[0], b' '.join(rs))))
     else:
-        lines.append((level, '(%s' % tree[0]))
+        lines.append((level, b'(%s' % tree[0]))
         for s in tree[1:]:
             _prettyformat(s, leafnodes, level + 1, lines)
-        lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
+        lines[-1:] = [(lines[-1][0], lines[-1][1] + b')')]
+
 
 def prettyformat(tree, leafnodes):
     lines = []
     _prettyformat(tree, leafnodes, 0, lines)
-    output = '\n'.join(('  ' * l + s) for l, s in lines)
+    output = b'\n'.join((b'  ' * l + s) for l, s in lines)
     return output
 
+
 def simplifyinfixops(tree, targetnodes):
     """Flatten chained infix operations to reduce usage of Python stack
 
@@ -295,6 +317,7 @@
     simplified.append(op)
     return tuple(reversed(simplified))
 
+
 def _buildtree(template, placeholder, replstack):
     if template == placeholder:
         return replstack.pop()
@@ -302,6 +325,7 @@
         return template
     return tuple(_buildtree(x, placeholder, replstack) for x in template)
 
+
 def buildtree(template, placeholder, *repls):
     """Create new tree by substituting placeholders by replacements
 
@@ -315,13 +339,14 @@
     ('and', ('symbol', '1'), ('not', ('symbol', '2')))
     """
     if not isinstance(placeholder, tuple):
-        raise error.ProgrammingError('placeholder must be a node tuple')
+        raise error.ProgrammingError(b'placeholder must be a node tuple')
     replstack = list(reversed(repls))
     r = _buildtree(template, placeholder, replstack)
     if replstack:
-        raise error.ProgrammingError('too many replacements')
+        raise error.ProgrammingError(b'too many replacements')
     return r
 
+
 def _matchtree(pattern, tree, placeholder, incompletenodes, matches):
     if pattern == tree:
         return True
@@ -332,8 +357,11 @@
         return True
     if len(pattern) != len(tree):
         return False
-    return all(_matchtree(p, x, placeholder, incompletenodes, matches)
-               for p, x in zip(pattern, tree))
+    return all(
+        _matchtree(p, x, placeholder, incompletenodes, matches)
+        for p, x in zip(pattern, tree)
+    )
+
 
 def matchtree(pattern, tree, placeholder=None, incompletenodes=()):
     """If a tree matches the pattern, return a list of the tree and nodes
@@ -370,19 +398,21 @@
     ...   (b'func', (b'symbol', b'ancestors'), (b'symbol', b'0')))
     """
     if placeholder is not None and not isinstance(placeholder, tuple):
-        raise error.ProgrammingError('placeholder must be a node tuple')
+        raise error.ProgrammingError(b'placeholder must be a node tuple')
     matches = [tree]
     if _matchtree(pattern, tree, placeholder, incompletenodes, matches):
         return matches
 
+
 def parseerrordetail(inst):
     """Compose error message from specified ParseError object
     """
     if len(inst.args) > 1:
-        return _('at %d: %s') % (inst.args[1], inst.args[0])
+        return _(b'at %d: %s') % (inst.args[1], inst.args[0])
     else:
         return inst.args[0]
 
+
 class alias(object):
     """Parsed result of alias"""
 
@@ -396,6 +426,7 @@
         # `expandaliases`.
         self.warned = False
 
+
 class basealiasrules(object):
     """Parsing and expansion rule set of aliases
 
@@ -408,13 +439,14 @@
         h = heads(default)
         b($1) = ancestors($1) - ancestors(default)
     """
+
     # typically a config section, which will be included in error messages
     _section = None
     # tag of symbol node
-    _symbolnode = 'symbol'
+    _symbolnode = b'symbol'
 
     def __new__(cls):
-        raise TypeError("'%s' is not instantiatable" % cls.__name__)
+        raise TypeError(b"'%s' is not instantiatable" % cls.__name__)
 
     @staticmethod
     def _parse(spec):
@@ -511,23 +543,27 @@
         if tree[0] == cls._symbolnode:
             # "name = ...." style
             name = tree[1]
-            if name.startswith('$'):
-                return (decl, None, _("invalid symbol '%s'") % name)
+            if name.startswith(b'$'):
+                return (decl, None, _(b"invalid symbol '%s'") % name)
             return (name, None, None)
 
         func = cls._trygetfunc(tree)
         if func:
             # "name(arg, ....) = ...." style
             name, args = func
-            if name.startswith('$'):
-                return (decl, None, _("invalid function '%s'") % name)
+            if name.startswith(b'$'):
+                return (decl, None, _(b"invalid function '%s'") % name)
             if any(t[0] != cls._symbolnode for t in args):
-                return (decl, None, _("invalid argument list"))
+                return (decl, None, _(b"invalid argument list"))
             if len(args) != len(set(args)):
-                return (name, None, _("argument names collide with each other"))
+                return (
+                    name,
+                    None,
+                    _(b"argument names collide with each other"),
+                )
             return (name, [t[1] for t in args], None)
 
-        return (decl, None, _("invalid format"))
+        return (decl, None, _(b"invalid format"))
 
     @classmethod
     def _relabelargs(cls, tree, args):
@@ -541,9 +577,9 @@
         assert len(tree) == 2
         sym = tree[1]
         if sym in args:
-            op = '_aliasarg'
-        elif sym.startswith('$'):
-            raise error.ParseError(_("invalid symbol '%s'") % sym)
+            op = b'_aliasarg'
+        elif sym.startswith(b'$'):
+            raise error.ParseError(_(b"invalid symbol '%s'") % sym)
         return (op, sym)
 
     @classmethod
@@ -606,15 +642,19 @@
         repl = efmt = None
         name, args, err = cls._builddecl(decl)
         if err:
-            efmt = _('bad declaration of %(section)s "%(name)s": %(error)s')
+            efmt = _(b'bad declaration of %(section)s "%(name)s": %(error)s')
         else:
             try:
                 repl = cls._builddefn(defn, args)
             except error.ParseError as inst:
                 err = parseerrordetail(inst)
-                efmt = _('bad definition of %(section)s "%(name)s": %(error)s')
+                efmt = _(b'bad definition of %(section)s "%(name)s": %(error)s')
         if err:
-            err = efmt % {'section': cls._section, 'name': name, 'error': err}
+            err = efmt % {
+                b'section': cls._section,
+                b'name': name,
+                b'error': err,
+            }
         return alias(name, args, err, repl)
 
     @classmethod
@@ -654,7 +694,7 @@
         """
         if not isinstance(tree, tuple):
             return tree
-        if tree[0] == '_aliasarg':
+        if tree[0] == b'_aliasarg':
             sym = tree[1]
             return args[sym]
         return tuple(cls._expandargs(t, args) for t in tree)
@@ -665,28 +705,32 @@
             return tree
         r = cls._getalias(aliases, tree)
         if r is None:
-            return tuple(cls._expand(aliases, t, expanding, cache)
-                         for t in tree)
+            return tuple(
+                cls._expand(aliases, t, expanding, cache) for t in tree
+            )
         a, l = r
         if a.error:
             raise error.Abort(a.error)
         if a in expanding:
-            raise error.ParseError(_('infinite expansion of %(section)s '
-                                     '"%(name)s" detected')
-                                   % {'section': cls._section, 'name': a.name})
+            raise error.ParseError(
+                _(b'infinite expansion of %(section)s "%(name)s" detected')
+                % {b'section': cls._section, b'name': a.name}
+            )
         # get cacheable replacement tree by expanding aliases recursively
         expanding.append(a)
         if a.name not in cache:
-            cache[a.name] = cls._expand(aliases, a.replacement, expanding,
-                                        cache)
+            cache[a.name] = cls._expand(
+                aliases, a.replacement, expanding, cache
+            )
         result = cache[a.name]
         expanding.pop()
         if a.args is None:
             return result
         # substitute function arguments in replacement tree
         if len(l) != len(a.args):
-            raise error.ParseError(_('invalid number of arguments: %d')
-                                   % len(l))
+            raise error.ParseError(
+                _(b'invalid number of arguments: %d') % len(l)
+            )
         l = [cls._expand(aliases, t, [], cache) for t in l]
         return cls._expandargs(result, dict(zip(a.args, l)))
 
--- a/mercurial/patch.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/patch.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,7 +11,6 @@
 import collections
 import contextlib
 import copy
-import email
 import errno
 import hashlib
 import os
@@ -24,6 +23,7 @@
     hex,
     short,
 )
+from .pycompat import open
 from . import (
     copies,
     diffhelper,
@@ -49,27 +49,30 @@
 
 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
 tabsplitter = re.compile(br'(\t+|[^\t]+)')
-wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
-                          b'[^ \ta-zA-Z0-9_\x80-\xff])')
+wordsplitter = re.compile(
+    br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|[^ \ta-zA-Z0-9_\x80-\xff])'
+)
 
 PatchError = error.PatchError
 
 # public functions
 
+
 def split(stream):
     '''return an iterator of individual patches from a stream'''
+
     def isheader(line, inheader):
-        if inheader and line.startswith((' ', '\t')):
+        if inheader and line.startswith((b' ', b'\t')):
             # continuation
             return True
-        if line.startswith((' ', '-', '+')):
+        if line.startswith((b' ', b'-', b'+')):
             # diff line - don't check for header pattern in there
             return False
-        l = line.split(': ', 1)
-        return len(l) == 2 and ' ' not in l[0]
+        l = line.split(b': ', 1)
+        return len(l) == 2 and b' ' not in l[0]
 
     def chunk(lines):
-        return stringio(''.join(lines))
+        return stringio(b''.join(lines))
 
     def hgsplit(stream, cur):
         inheader = True
@@ -77,7 +80,7 @@
         for line in stream:
             if not line.strip():
                 inheader = False
-            if not inheader and line.startswith('# HG changeset patch'):
+            if not inheader and line.startswith(b'# HG changeset patch'):
                 yield chunk(cur)
                 cur = []
                 inheader = True
@@ -89,7 +92,7 @@
 
     def mboxsplit(stream, cur):
         for line in stream:
-            if line.startswith('From '):
+            if line.startswith(b'From '):
                 for c in split(chunk(cur[1:])):
                     yield c
                 cur = []
@@ -103,7 +106,7 @@
     def mimesplit(stream, cur):
         def msgfp(m):
             fp = stringio()
-            g = email.Generator.Generator(fp, mangle_from_=False)
+            g = mail.Generator(fp, mangle_from_=False)
             g.flatten(m)
             fp.seek(0)
             return fp
@@ -116,7 +119,7 @@
         if not m.is_multipart():
             yield msgfp(m)
         else:
-            ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
+            ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
             for part in m.walk():
                 ct = part.get_content_type()
                 if ct not in ok_types:
@@ -160,24 +163,24 @@
     inheader = False
     cur = []
 
-    mimeheaders = ['content-type']
-
-    if not util.safehasattr(stream, 'next'):
+    mimeheaders = [b'content-type']
+
+    if not util.safehasattr(stream, b'next'):
         # http responses, for example, have readline but not next
         stream = fiter(stream)
 
     for line in stream:
         cur.append(line)
-        if line.startswith('# HG changeset patch'):
+        if line.startswith(b'# HG changeset patch'):
             return hgsplit(stream, cur)
-        elif line.startswith('From '):
+        elif line.startswith(b'From '):
             return mboxsplit(stream, cur)
         elif isheader(line, inheader):
             inheader = True
-            if line.split(':', 1)[0].lower() in mimeheaders:
+            if line.split(b':', 1)[0].lower() in mimeheaders:
                 # let email parser handle this
                 return mimesplit(stream, cur)
-        elif line.startswith('--- ') and inheader:
+        elif line.startswith(b'--- ') and inheader:
             # No evil headers seen by diff start, split by hand
             return headersplit(stream, cur)
         # Not enough info, keep reading
@@ -185,12 +188,15 @@
     # if we are here, we have a very plain patch
     return remainder(cur)
 
+
 ## Some facility for extensible patch parsing:
 # list of pairs ("header to match", "data key")
-patchheadermap = [('Date', 'date'),
-                  ('Branch', 'branch'),
-                  ('Node ID', 'nodeid'),
-                 ]
+patchheadermap = [
+    (b'Date', b'date'),
+    (b'Branch', b'branch'),
+    (b'Node ID', b'nodeid'),
+]
+
 
 @contextlib.contextmanager
 def extract(ui, fileobj):
@@ -210,7 +216,7 @@
     Any item can be missing from the dictionary. If filename is missing,
     fileobj did not contain a patch. Caller must unlink filename when done.'''
 
-    fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
+    fd, tmpname = pycompat.mkstemp(prefix=b'hg-patch-')
     tmpfp = os.fdopen(fd, r'wb')
     try:
         yield _extract(ui, fileobj, tmpname, tmpfp)
@@ -218,45 +224,57 @@
         tmpfp.close()
         os.unlink(tmpname)
 
+
 def _extract(ui, fileobj, tmpname, tmpfp):
 
     # attempt to detect the start of a patch
     # (this heuristic is borrowed from quilt)
-    diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
-                        br'retrieving revision [0-9]+(\.[0-9]+)*$|'
-                        br'---[ \t].*?^\+\+\+[ \t]|'
-                        br'\*\*\*[ \t].*?^---[ \t])',
-                        re.MULTILINE | re.DOTALL)
+    diffre = re.compile(
+        br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
+        br'retrieving revision [0-9]+(\.[0-9]+)*$|'
+        br'---[ \t].*?^\+\+\+[ \t]|'
+        br'\*\*\*[ \t].*?^---[ \t])',
+        re.MULTILINE | re.DOTALL,
+    )
 
     data = {}
 
     msg = mail.parse(fileobj)
 
     subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
-    data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
-    if not subject and not data['user']:
+    data[b'user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
+    if not subject and not data[b'user']:
         # Not an email, restore parsed headers if any
-        subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
-                            for h in msg.items()) + '\n'
+        subject = (
+            b'\n'.join(
+                b': '.join(map(encoding.strtolocal, h)) for h in msg.items()
+            )
+            + b'\n'
+        )
 
     # should try to parse msg['Date']
     parents = []
 
+    nodeid = msg[r'X-Mercurial-Node']
+    if nodeid:
+        data[b'nodeid'] = nodeid = mail.headdecode(nodeid)
+        ui.debug(b'Node ID: %s\n' % nodeid)
+
     if subject:
-        if subject.startswith('[PATCH'):
-            pend = subject.find(']')
+        if subject.startswith(b'[PATCH'):
+            pend = subject.find(b']')
             if pend >= 0:
-                subject = subject[pend + 1:].lstrip()
-        subject = re.sub(br'\n[ \t]+', ' ', subject)
-        ui.debug('Subject: %s\n' % subject)
-    if data['user']:
-        ui.debug('From: %s\n' % data['user'])
+                subject = subject[pend + 1 :].lstrip()
+        subject = re.sub(br'\n[ \t]+', b' ', subject)
+        ui.debug(b'Subject: %s\n' % subject)
+    if data[b'user']:
+        ui.debug(b'From: %s\n' % data[b'user'])
     diffs_seen = 0
-    ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
-    message = ''
+    ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
+    message = b''
     for part in msg.walk():
         content_type = pycompat.bytestr(part.get_content_type())
-        ui.debug('Content-Type: %s\n' % content_type)
+        ui.debug(b'Content-Type: %s\n' % content_type)
         if content_type not in ok_types:
             continue
         payload = part.get_payload(decode=True)
@@ -266,12 +284,12 @@
             hgpatchheader = False
             ignoretext = False
 
-            ui.debug('found patch at byte %d\n' % m.start(0))
+            ui.debug(b'found patch at byte %d\n' % m.start(0))
             diffs_seen += 1
             cfp = stringio()
-            for line in payload[:m.start(0)].splitlines():
-                if line.startswith('# HG changeset patch') and not hgpatch:
-                    ui.debug('patch generated by hg export\n')
+            for line in payload[: m.start(0)].splitlines():
+                if line.startswith(b'# HG changeset patch') and not hgpatch:
+                    ui.debug(b'patch generated by hg export\n')
                     hgpatch = True
                     hgpatchheader = True
                     # drop earlier commit message content
@@ -279,45 +297,47 @@
                     cfp.truncate()
                     subject = None
                 elif hgpatchheader:
-                    if line.startswith('# User '):
-                        data['user'] = line[7:]
-                        ui.debug('From: %s\n' % data['user'])
-                    elif line.startswith("# Parent "):
+                    if line.startswith(b'# User '):
+                        data[b'user'] = line[7:]
+                        ui.debug(b'From: %s\n' % data[b'user'])
+                    elif line.startswith(b"# Parent "):
                         parents.append(line[9:].lstrip())
-                    elif line.startswith("# "):
+                    elif line.startswith(b"# "):
                         for header, key in patchheadermap:
-                            prefix = '# %s ' % header
+                            prefix = b'# %s ' % header
                             if line.startswith(prefix):
-                                data[key] = line[len(prefix):]
+                                data[key] = line[len(prefix) :]
+                                ui.debug(b'%s: %s\n' % (header, data[key]))
                     else:
                         hgpatchheader = False
-                elif line == '---':
+                elif line == b'---':
                     ignoretext = True
                 if not hgpatchheader and not ignoretext:
                     cfp.write(line)
-                    cfp.write('\n')
+                    cfp.write(b'\n')
             message = cfp.getvalue()
             if tmpfp:
                 tmpfp.write(payload)
-                if not payload.endswith('\n'):
-                    tmpfp.write('\n')
-        elif not diffs_seen and message and content_type == 'text/plain':
-            message += '\n' + payload
+                if not payload.endswith(b'\n'):
+                    tmpfp.write(b'\n')
+        elif not diffs_seen and message and content_type == b'text/plain':
+            message += b'\n' + payload
 
     if subject and not message.startswith(subject):
-        message = '%s\n%s' % (subject, message)
-    data['message'] = message
+        message = b'%s\n%s' % (subject, message)
+    data[b'message'] = message
     tmpfp.close()
     if parents:
-        data['p1'] = parents.pop(0)
+        data[b'p1'] = parents.pop(0)
         if parents:
-            data['p2'] = parents.pop(0)
+            data[b'p2'] = parents.pop(0)
 
     if diffs_seen:
-        data['filename'] = tmpname
+        data[b'filename'] = tmpname
 
     return data
 
+
 class patchmeta(object):
     """Patched file metadata
 
@@ -328,11 +348,12 @@
     'islink' is True if the file is a symlink and 'isexec' is True if
     the file is executable. Otherwise, 'mode' is None.
     """
+
     def __init__(self, path):
         self.path = path
         self.oldpath = None
         self.mode = None
-        self.op = 'MODIFY'
+        self.op = b'MODIFY'
         self.binary = False
 
     def setmode(self, mode):
@@ -349,14 +370,14 @@
         return other
 
     def _ispatchinga(self, afile):
-        if afile == '/dev/null':
-            return self.op == 'ADD'
-        return afile == 'a/' + (self.oldpath or self.path)
+        if afile == b'/dev/null':
+            return self.op == b'ADD'
+        return afile == b'a/' + (self.oldpath or self.path)
 
     def _ispatchingb(self, bfile):
-        if bfile == '/dev/null':
-            return self.op == 'DELETE'
-        return bfile == 'b/' + self.path
+        if bfile == b'/dev/null':
+            return self.op == b'DELETE'
+        return bfile == b'b/' + self.path
 
     def ispatching(self, afile, bfile):
         return self._ispatchinga(afile) and self._ispatchingb(bfile)
@@ -364,6 +385,7 @@
     def __repr__(self):
         return r"<patchmeta %s %r>" % (self.op, self.path)
 
+
 def readgitpatch(lr):
     """extract git-style metadata about patches from <patchname>"""
 
@@ -371,8 +393,8 @@
     gp = None
     gitpatches = []
     for line in lr:
-        line = line.rstrip(' \r\n')
-        if line.startswith('diff --git a/'):
+        line = line.rstrip(b' \r\n')
+        if line.startswith(b'diff --git a/'):
             m = gitre.match(line)
             if m:
                 if gp:
@@ -380,34 +402,35 @@
                 dst = m.group(2)
                 gp = patchmeta(dst)
         elif gp:
-            if line.startswith('--- '):
+            if line.startswith(b'--- '):
                 gitpatches.append(gp)
                 gp = None
                 continue
-            if line.startswith('rename from '):
-                gp.op = 'RENAME'
+            if line.startswith(b'rename from '):
+                gp.op = b'RENAME'
                 gp.oldpath = line[12:]
-            elif line.startswith('rename to '):
+            elif line.startswith(b'rename to '):
                 gp.path = line[10:]
-            elif line.startswith('copy from '):
-                gp.op = 'COPY'
+            elif line.startswith(b'copy from '):
+                gp.op = b'COPY'
                 gp.oldpath = line[10:]
-            elif line.startswith('copy to '):
+            elif line.startswith(b'copy to '):
                 gp.path = line[8:]
-            elif line.startswith('deleted file'):
-                gp.op = 'DELETE'
-            elif line.startswith('new file mode '):
-                gp.op = 'ADD'
+            elif line.startswith(b'deleted file'):
+                gp.op = b'DELETE'
+            elif line.startswith(b'new file mode '):
+                gp.op = b'ADD'
                 gp.setmode(int(line[-6:], 8))
-            elif line.startswith('new mode '):
+            elif line.startswith(b'new mode '):
                 gp.setmode(int(line[-6:], 8))
-            elif line.startswith('GIT binary patch'):
+            elif line.startswith(b'GIT binary patch'):
                 gp.binary = True
     if gp:
         gitpatches.append(gp)
 
     return gitpatches
 
+
 class linereader(object):
     # simple class to allow pushing lines back into the input stream
     def __init__(self, fp):
@@ -426,7 +449,8 @@
         return self.fp.readline()
 
     def __iter__(self):
-        return iter(self.readline, '')
+        return iter(self.readline, b'')
+
 
 class abstractbackend(object):
     def __init__(self, ui):
@@ -462,6 +486,7 @@
     def close(self):
         raise NotImplementedError
 
+
 class fsbackend(abstractbackend):
     def __init__(self, ui, basedir):
         super(fsbackend, self).__init__(ui)
@@ -497,21 +522,23 @@
                 self.opener.setflags(fname, False, True)
 
     def unlink(self, fname):
-        rmdir = self.ui.configbool('experimental', 'removeemptydirs')
+        rmdir = self.ui.configbool(b'experimental', b'removeemptydirs')
         self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
 
     def writerej(self, fname, failed, total, lines):
-        fname = fname + ".rej"
+        fname = fname + b".rej"
         self.ui.warn(
-            _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
-            (failed, total, fname))
-        fp = self.opener(fname, 'w')
+            _(b"%d out of %d hunks FAILED -- saving rejects to file %s\n")
+            % (failed, total, fname)
+        )
+        fp = self.opener(fname, b'w')
         fp.writelines(lines)
         fp.close()
 
     def exists(self, fname):
         return self.opener.lexists(fname)
 
+
 class workingbackend(fsbackend):
     def __init__(self, ui, repo, similarity):
         super(workingbackend, self).__init__(ui, repo.root)
@@ -522,8 +549,8 @@
         self.copied = []
 
     def _checkknown(self, fname):
-        if self.repo.dirstate[fname] == '?' and self.exists(fname):
-            raise PatchError(_('cannot patch %s: file is not tracked') % fname)
+        if self.repo.dirstate[fname] == b'?' and self.exists(fname):
+            raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
 
     def setfile(self, fname, data, mode, copysource):
         self._checkknown(fname)
@@ -556,6 +583,7 @@
             scmutil.marktouched(self.repo, changed, self.similarity)
         return sorted(self.changed)
 
+
 class filestore(object):
     def __init__(self, maxsize=None):
         self.opener = None
@@ -563,7 +591,7 @@
         self.created = 0
         self.maxsize = maxsize
         if self.maxsize is None:
-            self.maxsize = 4*(2**20)
+            self.maxsize = 4 * (2 ** 20)
         self.size = 0
         self.data = {}
 
@@ -573,10 +601,10 @@
             self.size += len(data)
         else:
             if self.opener is None:
-                root = pycompat.mkdtemp(prefix='hg-patch-')
+                root = pycompat.mkdtemp(prefix=b'hg-patch-')
                 self.opener = vfsmod.vfs(root)
             # Avoid filename issues with these simple names
-            fn = '%d' % self.created
+            fn = b'%d' % self.created
             self.opener.write(fn, data)
             self.created += 1
             self.files[fname] = (fn, mode, copied)
@@ -593,6 +621,7 @@
         if self.opener:
             shutil.rmtree(self.opener.base)
 
+
 class repobackend(abstractbackend):
     def __init__(self, ui, repo, ctx, store):
         super(repobackend, self).__init__(ui)
@@ -605,7 +634,7 @@
 
     def _checkknown(self, fname):
         if fname not in self.ctx:
-            raise PatchError(_('cannot patch %s: file is not tracked') % fname)
+            raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
 
     def getfile(self, fname):
         try:
@@ -613,7 +642,7 @@
         except error.LookupError:
             return None, None
         flags = fctx.flags()
-        return fctx.data(), ('l' in flags, 'x' in flags)
+        return fctx.data(), (b'l' in flags, b'x' in flags)
 
     def setfile(self, fname, data, mode, copysource):
         if copysource:
@@ -635,13 +664,15 @@
     def close(self):
         return self.changed | self.removed
 
+
 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
-eolmodes = ['strict', 'crlf', 'lf', 'auto']
+eolmodes = [b'strict', b'crlf', b'lf', b'auto']
+
 
 class patchfile(object):
-    def __init__(self, ui, gp, backend, store, eolmode='strict'):
+    def __init__(self, ui, gp, backend, store, eolmode=b'strict'):
         self.fname = gp.path
         self.eolmode = eolmode
         self.eol = None
@@ -652,8 +683,8 @@
         self.missing = True
         self.mode = gp.mode
         self.copysource = gp.oldpath
-        self.create = gp.op in ('ADD', 'COPY', 'RENAME')
-        self.remove = gp.op == 'DELETE'
+        self.create = gp.op in (b'ADD', b'COPY', b'RENAME')
+        self.remove = gp.op == b'DELETE'
         if self.copysource is None:
             data, mode = backend.getfile(self.fname)
         else:
@@ -667,15 +698,15 @@
                 self.mode = mode
             if self.lines:
                 # Normalize line endings
-                if self.lines[0].endswith('\r\n'):
-                    self.eol = '\r\n'
-                elif self.lines[0].endswith('\n'):
-                    self.eol = '\n'
-                if eolmode != 'strict':
+                if self.lines[0].endswith(b'\r\n'):
+                    self.eol = b'\r\n'
+                elif self.lines[0].endswith(b'\n'):
+                    self.eol = b'\n'
+                if eolmode != b'strict':
                     nlines = []
                     for l in self.lines:
-                        if l.endswith('\r\n'):
-                            l = l[:-2] + '\n'
+                        if l.endswith(b'\r\n'):
+                            l = l[:-2] + b'\n'
                         nlines.append(l)
                     self.lines = nlines
         else:
@@ -684,9 +715,13 @@
             if self.mode is None:
                 self.mode = (False, False)
         if self.missing:
-            self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
-            self.ui.warn(_("(use '--prefix' to apply patch relative to the "
-                           "current directory)\n"))
+            self.ui.warn(_(b"unable to find '%s' for patching\n") % self.fname)
+            self.ui.warn(
+                _(
+                    b"(use '--prefix' to apply patch relative to the "
+                    b"current directory)\n"
+                )
+            )
 
         self.hash = {}
         self.dirty = 0
@@ -698,35 +733,34 @@
         self.hunks = 0
 
     def writelines(self, fname, lines, mode):
-        if self.eolmode == 'auto':
+        if self.eolmode == b'auto':
             eol = self.eol
-        elif self.eolmode == 'crlf':
-            eol = '\r\n'
+        elif self.eolmode == b'crlf':
+            eol = b'\r\n'
         else:
-            eol = '\n'
-
-        if self.eolmode != 'strict' and eol and eol != '\n':
+            eol = b'\n'
+
+        if self.eolmode != b'strict' and eol and eol != b'\n':
             rawlines = []
             for l in lines:
-                if l and l.endswith('\n'):
+                if l and l.endswith(b'\n'):
                     l = l[:-1] + eol
                 rawlines.append(l)
             lines = rawlines
 
-        self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
+        self.backend.setfile(fname, b''.join(lines), mode, self.copysource)
 
     def printfile(self, warn):
         if self.fileprinted:
             return
         if warn or self.ui.verbose:
             self.fileprinted = True
-        s = _("patching file %s\n") % self.fname
+        s = _(b"patching file %s\n") % self.fname
         if warn:
             self.ui.warn(s)
         else:
             self.ui.note(s)
 
-
     def findlines(self, l, linenum):
         # looks through the hash and finds candidate lines.  The
         # result is a list of line numbers sorted based on distance
@@ -746,19 +780,20 @@
         if not self.rej:
             return
         base = os.path.basename(self.fname)
-        lines = ["--- %s\n+++ %s\n" % (base, base)]
+        lines = [b"--- %s\n+++ %s\n" % (base, base)]
         for x in self.rej:
             for l in x.hunk:
                 lines.append(l)
-                if l[-1:] != '\n':
-                    lines.append("\n\\ No newline at end of file\n")
+                if l[-1:] != b'\n':
+                    lines.append(b"\n\\ No newline at end of file\n")
         self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
 
     def apply(self, h):
         if not h.complete():
-            raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
-                            (h.number, h.desc, len(h.a), h.lena, len(h.b),
-                            h.lenb))
+            raise PatchError(
+                _(b"bad hunk #%d %s (%d %d %d %d)")
+                % (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb)
+            )
 
         self.hunks += 1
 
@@ -768,10 +803,12 @@
 
         if self.exists and self.create:
             if self.copysource:
-                self.ui.warn(_("cannot create %s: destination already "
-                               "exists\n") % self.fname)
+                self.ui.warn(
+                    _(b"cannot create %s: destination already exists\n")
+                    % self.fname
+                )
             else:
-                self.ui.warn(_("file %s already exists\n") % self.fname)
+                self.ui.warn(_(b"file %s already exists\n") % self.fname)
             self.rej.append(h)
             return -1
 
@@ -786,8 +823,11 @@
             return 0
 
         horig = h
-        if (self.eolmode in ('crlf', 'lf')
-            or self.eolmode == 'auto' and self.eol):
+        if (
+            self.eolmode in (b'crlf', b'lf')
+            or self.eolmode == b'auto'
+            and self.eol
+        ):
             # If new eols are going to be normalized, then normalize
             # hunk data before patching. Otherwise, preserve input
             # line-endings.
@@ -804,7 +844,7 @@
             if self.remove:
                 self.backend.unlink(self.fname)
             else:
-                self.lines[oldstart:oldstart + len(old)] = new
+                self.lines[oldstart : oldstart + len(old)] = new
                 self.offset += len(new) - len(old)
                 self.dirty = True
             return 0
@@ -814,7 +854,9 @@
         for x, s in enumerate(self.lines):
             self.hash.setdefault(s, []).append(x)
 
-        for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
+        for fuzzlen in pycompat.xrange(
+            self.ui.configint(b"patch", b"fuzz") + 1
+        ):
             for toponly in [True, False]:
                 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
                 oldstart = oldstart + self.offset + self.skew
@@ -834,19 +876,24 @@
                         self.dirty = True
                         offset = l - orig_start - fuzzlen
                         if fuzzlen:
-                            msg = _("Hunk #%d succeeded at %d "
-                                    "with fuzz %d "
-                                    "(offset %d lines).\n")
+                            msg = _(
+                                b"Hunk #%d succeeded at %d "
+                                b"with fuzz %d "
+                                b"(offset %d lines).\n"
+                            )
                             self.printfile(True)
-                            self.ui.warn(msg %
-                                (h.number, l + 1, fuzzlen, offset))
+                            self.ui.warn(
+                                msg % (h.number, l + 1, fuzzlen, offset)
+                            )
                         else:
-                            msg = _("Hunk #%d succeeded at %d "
-                                    "(offset %d lines).\n")
+                            msg = _(
+                                b"Hunk #%d succeeded at %d "
+                                b"(offset %d lines).\n"
+                            )
                             self.ui.note(msg % (h.number, l + 1, offset))
                         return fuzzlen
         self.printfile(True)
-        self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
+        self.ui.warn(_(b"Hunk #%d FAILED at %d\n") % (h.number, orig_start))
         self.rej.append(horig)
         return -1
 
@@ -856,42 +903,48 @@
         self.write_rej()
         return len(self.rej)
 
+
 class header(object):
     """patch header
     """
-    diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
-    diff_re = re.compile('diff -r .* (.*)$')
-    allhunks_re = re.compile('(?:index|deleted file) ')
-    pretty_re = re.compile('(?:new file|deleted file) ')
-    special_re = re.compile('(?:index|deleted|copy|rename|new mode) ')
-    newfile_re = re.compile('(?:new file)')
+
+    diffgit_re = re.compile(b'diff --git a/(.*) b/(.*)$')
+    diff_re = re.compile(b'diff -r .* (.*)$')
+    allhunks_re = re.compile(b'(?:index|deleted file) ')
+    pretty_re = re.compile(b'(?:new file|deleted file) ')
+    special_re = re.compile(b'(?:index|deleted|copy|rename|new mode) ')
+    newfile_re = re.compile(b'(?:new file|copy to|rename to)')
 
     def __init__(self, header):
         self.header = header
         self.hunks = []
 
     def binary(self):
-        return any(h.startswith('index ') for h in self.header)
+        return any(h.startswith(b'index ') for h in self.header)
 
     def pretty(self, fp):
         for h in self.header:
-            if h.startswith('index '):
-                fp.write(_('this modifies a binary file (all or nothing)\n'))
+            if h.startswith(b'index '):
+                fp.write(_(b'this modifies a binary file (all or nothing)\n'))
                 break
             if self.pretty_re.match(h):
                 fp.write(h)
                 if self.binary():
-                    fp.write(_('this is a binary file\n'))
+                    fp.write(_(b'this is a binary file\n'))
                 break
-            if h.startswith('---'):
-                fp.write(_('%d hunks, %d lines changed\n') %
-                         (len(self.hunks),
-                          sum([max(h.added, h.removed) for h in self.hunks])))
+            if h.startswith(b'---'):
+                fp.write(
+                    _(b'%d hunks, %d lines changed\n')
+                    % (
+                        len(self.hunks),
+                        sum([max(h.added, h.removed) for h in self.hunks]),
+                    )
+                )
                 break
             fp.write(h)
 
     def write(self, fp):
-        fp.write(''.join(self.header))
+        fp.write(b''.join(self.header))
 
     def allhunks(self):
         return any(self.allhunks_re.match(h) for h in self.header)
@@ -910,7 +963,7 @@
         return self.files()[-1]
 
     def __repr__(self):
-        return '<header %s>' % (' '.join(map(repr, self.files())))
+        return b'<header %s>' % (b' '.join(map(repr, self.files())))
 
     def isnewfile(self):
         return any(self.newfile_re.match(h) for h in self.header)
@@ -925,8 +978,10 @@
         # if they have some content as we want to be able to change it
         nocontent = len(self.header) == 2
         emptynewfile = self.isnewfile() and nocontent
-        return (emptynewfile
-                or any(self.special_re.match(h) for h in self.header))
+        return emptynewfile or any(
+            self.special_re.match(h) for h in self.header
+        )
+
 
 class recordhunk(object):
     """patch hunk
@@ -934,8 +989,17 @@
     XXX shouldn't we merge this with the other hunk class?
     """
 
-    def __init__(self, header, fromline, toline, proc, before, hunk, after,
-                 maxcontext=None):
+    def __init__(
+        self,
+        header,
+        fromline,
+        toline,
+        proc,
+        before,
+        hunk,
+        after,
+        maxcontext=None,
+    ):
         def trimcontext(lines, reverse=False):
             if maxcontext is not None:
                 delta = len(lines) - maxcontext
@@ -959,21 +1023,27 @@
         if not isinstance(v, recordhunk):
             return False
 
-        return ((v.hunk == self.hunk) and
-                (v.proc == self.proc) and
-                (self.fromline == v.fromline) and
-                (self.header.files() == v.header.files()))
+        return (
+            (v.hunk == self.hunk)
+            and (v.proc == self.proc)
+            and (self.fromline == v.fromline)
+            and (self.header.files() == v.header.files())
+        )
 
     def __hash__(self):
-        return hash((tuple(self.hunk),
-            tuple(self.header.files()),
-            self.fromline,
-            self.proc))
+        return hash(
+            (
+                tuple(self.hunk),
+                tuple(self.header.files()),
+                self.fromline,
+                self.proc,
+            )
+        )
 
     def countchanges(self, hunk):
         """hunk -> (n+,n-)"""
-        add = len([h for h in hunk if h.startswith('+')])
-        rem = len([h for h in hunk if h.startswith('-')])
+        add = len([h for h in hunk if h.startswith(b'+')])
+        rem = len([h for h in hunk if h.startswith(b'-')])
         return add, rem
 
     def reversehunk(self):
@@ -983,21 +1053,35 @@
         that, swap fromline/toline and +/- signs while keep other things
         unchanged.
         """
-        m = {'+': '-', '-': '+', '\\': '\\'}
-        hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
-        return recordhunk(self.header, self.toline, self.fromline, self.proc,
-                          self.before, hunk, self.after)
+        m = {b'+': b'-', b'-': b'+', b'\\': b'\\'}
+        hunk = [b'%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
+        return recordhunk(
+            self.header,
+            self.toline,
+            self.fromline,
+            self.proc,
+            self.before,
+            hunk,
+            self.after,
+        )
 
     def write(self, fp):
         delta = len(self.before) + len(self.after)
-        if self.after and self.after[-1] == '\\ No newline at end of file\n':
+        if self.after and self.after[-1] == b'\\ No newline at end of file\n':
             delta -= 1
         fromlen = delta + self.removed
         tolen = delta + self.added
-        fp.write('@@ -%d,%d +%d,%d @@%s\n' %
-                 (self.fromline, fromlen, self.toline, tolen,
-                  self.proc and (' ' + self.proc)))
-        fp.write(''.join(self.before + self.hunk + self.after))
+        fp.write(
+            b'@@ -%d,%d +%d,%d @@%s\n'
+            % (
+                self.fromline,
+                fromlen,
+                self.toline,
+                tolen,
+                self.proc and (b' ' + self.proc),
+            )
+        )
+        fp.write(b''.join(self.before + self.hunk + self.after))
 
     pretty = write
 
@@ -1005,72 +1089,82 @@
         return self.header.filename()
 
     def __repr__(self):
-        return '<hunk %r@%d>' % (self.filename(), self.fromline)
+        return b'<hunk %r@%d>' % (self.filename(), self.fromline)
+
 
 def getmessages():
     return {
-        'multiple': {
-            'apply': _("apply change %d/%d to '%s'?"),
-            'discard': _("discard change %d/%d to '%s'?"),
-            'keep': _("keep change %d/%d to '%s'?"),
-            'record': _("record change %d/%d to '%s'?"),
+        b'multiple': {
+            b'apply': _(b"apply change %d/%d to '%s'?"),
+            b'discard': _(b"discard change %d/%d to '%s'?"),
+            b'keep': _(b"keep change %d/%d to '%s'?"),
+            b'record': _(b"record change %d/%d to '%s'?"),
         },
-        'single': {
-            'apply': _("apply this change to '%s'?"),
-            'discard': _("discard this change to '%s'?"),
-            'keep': _("keep this change to '%s'?"),
-            'record': _("record this change to '%s'?"),
+        b'single': {
+            b'apply': _(b"apply this change to '%s'?"),
+            b'discard': _(b"discard this change to '%s'?"),
+            b'keep': _(b"keep this change to '%s'?"),
+            b'record': _(b"record this change to '%s'?"),
         },
-        'help': {
-            'apply': _('[Ynesfdaq?]'
-                         '$$ &Yes, apply this change'
-                         '$$ &No, skip this change'
-                         '$$ &Edit this change manually'
-                         '$$ &Skip remaining changes to this file'
-                         '$$ Apply remaining changes to this &file'
-                         '$$ &Done, skip remaining changes and files'
-                         '$$ Apply &all changes to all remaining files'
-                         '$$ &Quit, applying no changes'
-                         '$$ &? (display help)'),
-            'discard': _('[Ynesfdaq?]'
-                         '$$ &Yes, discard this change'
-                         '$$ &No, skip this change'
-                         '$$ &Edit this change manually'
-                         '$$ &Skip remaining changes to this file'
-                         '$$ Discard remaining changes to this &file'
-                         '$$ &Done, skip remaining changes and files'
-                         '$$ Discard &all changes to all remaining files'
-                         '$$ &Quit, discarding no changes'
-                         '$$ &? (display help)'),
-            'keep': _('[Ynesfdaq?]'
-                         '$$ &Yes, keep this change'
-                         '$$ &No, skip this change'
-                         '$$ &Edit this change manually'
-                         '$$ &Skip remaining changes to this file'
-                         '$$ Keep remaining changes to this &file'
-                         '$$ &Done, skip remaining changes and files'
-                         '$$ Keep &all changes to all remaining files'
-                         '$$ &Quit, keeping all changes'
-                         '$$ &? (display help)'),
-            'record': _('[Ynesfdaq?]'
-                        '$$ &Yes, record this change'
-                        '$$ &No, skip this change'
-                        '$$ &Edit this change manually'
-                        '$$ &Skip remaining changes to this file'
-                        '$$ Record remaining changes to this &file'
-                        '$$ &Done, skip remaining changes and files'
-                        '$$ Record &all changes to all remaining files'
-                        '$$ &Quit, recording no changes'
-                        '$$ &? (display help)'),
-        }
+        b'help': {
+            b'apply': _(
+                b'[Ynesfdaq?]'
+                b'$$ &Yes, apply this change'
+                b'$$ &No, skip this change'
+                b'$$ &Edit this change manually'
+                b'$$ &Skip remaining changes to this file'
+                b'$$ Apply remaining changes to this &file'
+                b'$$ &Done, skip remaining changes and files'
+                b'$$ Apply &all changes to all remaining files'
+                b'$$ &Quit, applying no changes'
+                b'$$ &? (display help)'
+            ),
+            b'discard': _(
+                b'[Ynesfdaq?]'
+                b'$$ &Yes, discard this change'
+                b'$$ &No, skip this change'
+                b'$$ &Edit this change manually'
+                b'$$ &Skip remaining changes to this file'
+                b'$$ Discard remaining changes to this &file'
+                b'$$ &Done, skip remaining changes and files'
+                b'$$ Discard &all changes to all remaining files'
+                b'$$ &Quit, discarding no changes'
+                b'$$ &? (display help)'
+            ),
+            b'keep': _(
+                b'[Ynesfdaq?]'
+                b'$$ &Yes, keep this change'
+                b'$$ &No, skip this change'
+                b'$$ &Edit this change manually'
+                b'$$ &Skip remaining changes to this file'
+                b'$$ Keep remaining changes to this &file'
+                b'$$ &Done, skip remaining changes and files'
+                b'$$ Keep &all changes to all remaining files'
+                b'$$ &Quit, keeping all changes'
+                b'$$ &? (display help)'
+            ),
+            b'record': _(
+                b'[Ynesfdaq?]'
+                b'$$ &Yes, record this change'
+                b'$$ &No, skip this change'
+                b'$$ &Edit this change manually'
+                b'$$ &Skip remaining changes to this file'
+                b'$$ Record remaining changes to this &file'
+                b'$$ &Done, skip remaining changes and files'
+                b'$$ Record &all changes to all remaining files'
+                b'$$ &Quit, recording no changes'
+                b'$$ &? (display help)'
+            ),
+        },
     }
 
+
 def filterpatch(ui, headers, match, operation=None):
     """Interactively filter patch chunks into applied-only chunks"""
     messages = getmessages()
 
     if operation is None:
-        operation = 'record'
+        operation = b'record'
 
     def prompt(skipfile, skipall, query, chunk):
         """prompt query, and process base inputs
@@ -1088,31 +1182,32 @@
         if skipfile is not None:
             return skipfile, skipfile, skipall, newpatches
         while True:
-            resps = messages['help'][operation]
+            resps = messages[b'help'][operation]
             # IMPORTANT: keep the last line of this prompt short (<40 english
             # chars is a good target) because of issue6158.
-            r = ui.promptchoice("%s\n(enter ? for help) %s" % (query, resps))
-            ui.write("\n")
-            if r == 8: # ?
+            r = ui.promptchoice(b"%s\n(enter ? for help) %s" % (query, resps))
+            ui.write(b"\n")
+            if r == 8:  # ?
                 for c, t in ui.extractchoices(resps)[1]:
-                    ui.write('%s - %s\n' % (c, encoding.lower(t)))
+                    ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
                 continue
-            elif r == 0: # yes
+            elif r == 0:  # yes
                 ret = True
-            elif r == 1: # no
+            elif r == 1:  # no
                 ret = False
-            elif r == 2: # Edit patch
+            elif r == 2:  # Edit patch
                 if chunk is None:
-                    ui.write(_('cannot edit patch for whole file'))
-                    ui.write("\n")
+                    ui.write(_(b'cannot edit patch for whole file'))
+                    ui.write(b"\n")
                     continue
                 if chunk.header.binary():
-                    ui.write(_('cannot edit patch for binary file'))
-                    ui.write("\n")
+                    ui.write(_(b'cannot edit patch for binary file'))
+                    ui.write(b"\n")
                     continue
                 # Patch comment based on the Git one (based on comment at end of
                 # https://mercurial-scm.org/wiki/RecordExtension)
-                phelp = '---' + _("""
+                phelp = b'---' + _(
+                    """
 To remove '-' lines, make them ' ' lines (context).
 To remove '+' lines, delete them.
 Lines starting with # will be removed from the patch.
@@ -1122,32 +1217,39 @@
 file will be generated: you can use that when you try again. If
 all lines of the hunk are removed, then the edit is aborted and
 the hunk is left unchanged.
-""")
-                (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
-                                                      suffix=".diff")
+"""
+                )
+                (patchfd, patchfn) = pycompat.mkstemp(
+                    prefix=b"hg-editor-", suffix=b".diff"
+                )
                 ncpatchfp = None
                 try:
                     # Write the initial patch
                     f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
                     chunk.header.write(f)
                     chunk.write(f)
-                    f.write(''.join(['# ' + i + '\n'
-                                     for i in phelp.splitlines()]))
+                    f.write(
+                        b''.join(
+                            [b'# ' + i + b'\n' for i in phelp.splitlines()]
+                        )
+                    )
                     f.close()
                     # Start the editor and wait for it to complete
                     editor = ui.geteditor()
-                    ret = ui.system("%s \"%s\"" % (editor, patchfn),
-                                    environ={'HGUSER': ui.username()},
-                                    blockedtag='filterpatch')
+                    ret = ui.system(
+                        b"%s \"%s\"" % (editor, patchfn),
+                        environ={b'HGUSER': ui.username()},
+                        blockedtag=b'filterpatch',
+                    )
                     if ret != 0:
-                        ui.warn(_("editor exited with exit code %d\n") % ret)
+                        ui.warn(_(b"editor exited with exit code %d\n") % ret)
                         continue
                     # Remove comment lines
                     patchfp = open(patchfn, r'rb')
                     ncpatchfp = stringio()
                     for line in util.iterfile(patchfp):
                         line = util.fromnativeeol(line)
-                        if not line.startswith('#'):
+                        if not line.startswith(b'#'):
                             ncpatchfp.write(line)
                     patchfp.close()
                     ncpatchfp.seek(0)
@@ -1158,35 +1260,36 @@
                 # Signal that the chunk shouldn't be applied as-is, but
                 # provide the new patch to be used instead.
                 ret = False
-            elif r == 3: # Skip
+            elif r == 3:  # Skip
                 ret = skipfile = False
-            elif r == 4: # file (Record remaining)
+            elif r == 4:  # file (Record remaining)
                 ret = skipfile = True
-            elif r == 5: # done, skip remaining
+            elif r == 5:  # done, skip remaining
                 ret = skipall = False
-            elif r == 6: # all
+            elif r == 6:  # all
                 ret = skipall = True
-            elif r == 7: # quit
-                raise error.Abort(_('user quit'))
+            elif r == 7:  # quit
+                raise error.Abort(_(b'user quit'))
             return ret, skipfile, skipall, newpatches
 
     seen = set()
-    applied = {}        # 'filename' -> [] of chunks
+    applied = {}  # 'filename' -> [] of chunks
     skipfile, skipall = None, None
     pos, total = 1, sum(len(h.hunks) for h in headers)
     for h in headers:
         pos += len(h.hunks)
         skipfile = None
         fixoffset = 0
-        hdr = ''.join(h.header)
+        hdr = b''.join(h.header)
         if hdr in seen:
             continue
         seen.add(hdr)
         if skipall is None:
             h.pretty(ui)
         files = h.files()
-        msg = (_('examine changes to %s?') %
-               _(' and ').join("'%s'" % f for f in files))
+        msg = _(b'examine changes to %s?') % _(b' and ').join(
+            b"'%s'" % f for f in files
+        )
         if all(match.exact(f) for f in files):
             r, skipall, np = True, None, None
         else:
@@ -1201,13 +1304,17 @@
             if skipfile is None and skipall is None:
                 chunk.pretty(ui)
             if total == 1:
-                msg = messages['single'][operation] % chunk.filename()
+                msg = messages[b'single'][operation] % chunk.filename()
             else:
                 idx = pos - len(h.hunks) + i
-                msg = messages['multiple'][operation] % (idx, total,
-                                                         chunk.filename())
-            r, skipfile, skipall, newpatches = prompt(skipfile,
-                    skipall, msg, chunk)
+                msg = messages[b'multiple'][operation] % (
+                    idx,
+                    total,
+                    chunk.filename(),
+                )
+            r, skipfile, skipall, newpatches = prompt(
+                skipfile, skipall, msg, chunk
+            )
             if r:
                 if fixoffset:
                     chunk = copy.copy(chunk)
@@ -1221,8 +1328,19 @@
                         applied[newhunk.filename()].append(newhunk)
             else:
                 fixoffset += chunk.removed - chunk.added
-    return (sum([h for h in applied.itervalues()
-               if h[0].special() or len(h) > 1], []), {})
+    return (
+        sum(
+            [
+                h
+                for h in pycompat.itervalues(applied)
+                if h[0].special() or len(h) > 1
+            ],
+            [],
+        ),
+        {},
+    )
+
+
 class hunk(object):
     def __init__(self, desc, num, lr, context):
         self.number = num
@@ -1244,8 +1362,8 @@
         def normalize(lines):
             nlines = []
             for line in lines:
-                if line.endswith('\r\n'):
-                    line = line[:-2] + '\n'
+                if line.endswith(b'\r\n'):
+                    line = line[:-2] + b'\n'
                 nlines.append(line)
             return nlines
 
@@ -1265,7 +1383,7 @@
     def read_unified_hunk(self, lr):
         m = unidesc.match(self.desc)
         if not m:
-            raise PatchError(_("bad hunk #%d") % self.number)
+            raise PatchError(_(b"bad hunk #%d") % self.number)
         self.starta, self.lena, self.startb, self.lenb = m.groups()
         if self.lena is None:
             self.lena = 1
@@ -1278,10 +1396,11 @@
         self.starta = int(self.starta)
         self.startb = int(self.startb)
         try:
-            diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
-                                self.a, self.b)
+            diffhelper.addlines(
+                lr, self.hunk, self.lena, self.lenb, self.a, self.b
+            )
         except error.ParseError as e:
-            raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
+            raise PatchError(_(b"bad hunk #%d: %s") % (self.number, e))
         # if we hit eof before finishing out the hunk, the last line will
         # be zero length.  Lets try to fix it up.
         while len(self.hunk[-1]) == 0:
@@ -1296,7 +1415,7 @@
         self.desc = lr.readline()
         m = contextdesc.match(self.desc)
         if not m:
-            raise PatchError(_("bad hunk #%d") % self.number)
+            raise PatchError(_(b"bad hunk #%d") % self.number)
         self.starta, aend = m.groups()
         self.starta = int(self.starta)
         if aend is None:
@@ -1306,18 +1425,19 @@
             self.lena += 1
         for x in pycompat.xrange(self.lena):
             l = lr.readline()
-            if l.startswith('---'):
+            if l.startswith(b'---'):
                 # lines addition, old block is empty
                 lr.push(l)
                 break
             s = l[2:]
-            if l.startswith('- ') or l.startswith('! '):
-                u = '-' + s
-            elif l.startswith('  '):
-                u = ' ' + s
+            if l.startswith(b'- ') or l.startswith(b'! '):
+                u = b'-' + s
+            elif l.startswith(b'  '):
+                u = b' ' + s
             else:
-                raise PatchError(_("bad hunk #%d old text line %d") %
-                                 (self.number, x))
+                raise PatchError(
+                    _(b"bad hunk #%d old text line %d") % (self.number, x)
+                )
             self.a.append(u)
             self.hunk.append(u)
 
@@ -1329,7 +1449,7 @@
             l = lr.readline()
         m = contextdesc.match(l)
         if not m:
-            raise PatchError(_("bad hunk #%d") % self.number)
+            raise PatchError(_(b"bad hunk #%d") % self.number)
         self.startb, bend = m.groups()
         self.startb = int(self.startb)
         if bend is None:
@@ -1353,27 +1473,28 @@
                 lr.push(l)
                 break
             s = l[2:]
-            if l.startswith('+ ') or l.startswith('! '):
-                u = '+' + s
-            elif l.startswith('  '):
-                u = ' ' + s
+            if l.startswith(b'+ ') or l.startswith(b'! '):
+                u = b'+' + s
+            elif l.startswith(b'  '):
+                u = b' ' + s
             elif len(self.b) == 0:
                 # line deletions, new block is empty
                 lr.push(l)
                 break
             else:
-                raise PatchError(_("bad hunk #%d old text line %d") %
-                                 (self.number, x))
+                raise PatchError(
+                    _(b"bad hunk #%d old text line %d") % (self.number, x)
+                )
             self.b.append(s)
             while True:
                 if hunki >= len(self.hunk):
-                    h = ""
+                    h = b""
                 else:
                     h = self.hunk[hunki]
                 hunki += 1
                 if h == u:
                     break
-                elif h.startswith('-'):
+                elif h.startswith(b'-'):
                     continue
                 else:
                     self.hunk.insert(hunki - 1, u)
@@ -1382,16 +1503,20 @@
         if not self.a:
             # this happens when lines were only added to the hunk
             for x in self.hunk:
-                if x.startswith('-') or x.startswith(' '):
+                if x.startswith(b'-') or x.startswith(b' '):
                     self.a.append(x)
         if not self.b:
             # this happens when lines were only deleted from the hunk
             for x in self.hunk:
-                if x.startswith('+') or x.startswith(' '):
+                if x.startswith(b'+') or x.startswith(b' '):
                     self.b.append(x[1:])
         # @@ -start,len +start,len @@
-        self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
-                                             self.startb, self.lenb)
+        self.desc = b"@@ -%d,%d +%d,%d @@\n" % (
+            self.starta,
+            self.lena,
+            self.startb,
+            self.lenb,
+        )
         self.hunk[0] = self.desc
         self._fixnewline(lr)
 
@@ -1416,20 +1541,20 @@
             hlen = len(self.hunk)
             for x in pycompat.xrange(hlen - 1):
                 # the hunk starts with the @@ line, so use x+1
-                if self.hunk[x + 1].startswith(' '):
+                if self.hunk[x + 1].startswith(b' '):
                     top += 1
                 else:
                     break
             if not toponly:
                 for x in pycompat.xrange(hlen - 1):
-                    if self.hunk[hlen - bot - 1].startswith(' '):
+                    if self.hunk[hlen - bot - 1].startswith(b' '):
                         bot += 1
                     else:
                         break
 
             bot = min(fuzz, bot)
             top = min(fuzz, top)
-            return old[top:len(old) - bot], new[top:len(new) - bot], top
+            return old[top : len(old) - bot], new[top : len(new) - bot], top
         return old, new, 0
 
     def fuzzit(self, fuzz, toponly):
@@ -1443,12 +1568,14 @@
             newstart -= 1
         return old, oldstart, new, newstart
 
+
 class binhunk(object):
-    'A binary patch file.'
+    b'A binary patch file.'
+
     def __init__(self, lr, fname):
         self.text = None
         self.delta = False
-        self.hunk = ['GIT binary patch\n']
+        self.hunk = [b'GIT binary patch\n']
         self._fname = fname
         self._read(lr)
 
@@ -1457,24 +1584,25 @@
 
     def new(self, lines):
         if self.delta:
-            return [applybindelta(self.text, ''.join(lines))]
+            return [applybindelta(self.text, b''.join(lines))]
         return [self.text]
 
     def _read(self, lr):
         def getline(lr, hunk):
             l = lr.readline()
             hunk.append(l)
-            return l.rstrip('\r\n')
+            return l.rstrip(b'\r\n')
 
         while True:
             line = getline(lr, self.hunk)
             if not line:
-                raise PatchError(_('could not extract "%s" binary data')
-                                 % self._fname)
-            if line.startswith('literal '):
+                raise PatchError(
+                    _(b'could not extract "%s" binary data') % self._fname
+                )
+            if line.startswith(b'literal '):
                 size = int(line[8:].rstrip())
                 break
-            if line.startswith('delta '):
+            if line.startswith(b'delta '):
                 size = int(line[6:].rstrip())
                 self.delta = True
                 break
@@ -1482,32 +1610,38 @@
         line = getline(lr, self.hunk)
         while len(line) > 1:
             l = line[0:1]
-            if l <= 'Z' and l >= 'A':
-                l = ord(l) - ord('A') + 1
+            if l <= b'Z' and l >= b'A':
+                l = ord(l) - ord(b'A') + 1
             else:
-                l = ord(l) - ord('a') + 27
+                l = ord(l) - ord(b'a') + 27
             try:
                 dec.append(util.b85decode(line[1:])[:l])
             except ValueError as e:
-                raise PatchError(_('could not decode "%s" binary patch: %s')
-                                 % (self._fname, stringutil.forcebytestr(e)))
+                raise PatchError(
+                    _(b'could not decode "%s" binary patch: %s')
+                    % (self._fname, stringutil.forcebytestr(e))
+                )
             line = getline(lr, self.hunk)
-        text = zlib.decompress(''.join(dec))
+        text = zlib.decompress(b''.join(dec))
         if len(text) != size:
-            raise PatchError(_('"%s" length is %d bytes, should be %d')
-                             % (self._fname, len(text), size))
+            raise PatchError(
+                _(b'"%s" length is %d bytes, should be %d')
+                % (self._fname, len(text), size)
+            )
         self.text = text
 
+
 def parsefilename(str):
     # --- filename \t|space stuff
-    s = str[4:].rstrip('\r\n')
-    i = s.find('\t')
+    s = str[4:].rstrip(b'\r\n')
+    i = s.find(b'\t')
     if i < 0:
-        i = s.find(' ')
+        i = s.find(b' ')
         if i < 0:
             return s
     return s[:i]
 
+
 def reversehunks(hunks):
     '''reverse the signs in the hunks given as argument
 
@@ -1566,11 +1700,12 @@
 
     newhunks = []
     for c in hunks:
-        if util.safehasattr(c, 'reversehunk'):
+        if util.safehasattr(c, b'reversehunk'):
             c = c.reversehunk()
         newhunks.append(c)
     return newhunks
 
+
 def parsepatch(originalchunks, maxcontext=None):
     """patch -> [] of headers -> [] of hunks
 
@@ -1614,12 +1749,14 @@
      8
     +9
     """
+
     class parser(object):
         """patch parsing state machine"""
+
         def __init__(self):
             self.fromline = 0
             self.toline = 0
-            self.proc = ''
+            self.proc = b''
             self.header = None
             self.context = []
             self.before = []
@@ -1635,8 +1772,16 @@
 
         def addcontext(self, context):
             if self.hunk:
-                h = recordhunk(self.header, self.fromline, self.toline,
-                        self.proc, self.before, self.hunk, context, maxcontext)
+                h = recordhunk(
+                    self.header,
+                    self.fromline,
+                    self.toline,
+                    self.proc,
+                    self.before,
+                    self.hunk,
+                    context,
+                    maxcontext,
+                )
                 self.header.hunks.append(h)
                 self.fromline += len(self.before) + h.removed
                 self.toline += len(self.before) + h.added
@@ -1659,45 +1804,52 @@
             self.header = h
 
         def addother(self, line):
-            pass # 'other' lines are ignored
+            pass  # 'other' lines are ignored
 
         def finished(self):
             self.addcontext([])
             return self.headers
 
         transitions = {
-            'file': {'context': addcontext,
-                     'file': newfile,
-                     'hunk': addhunk,
-                     'range': addrange},
-            'context': {'file': newfile,
-                        'hunk': addhunk,
-                        'range': addrange,
-                        'other': addother},
-            'hunk': {'context': addcontext,
-                     'file': newfile,
-                     'range': addrange},
-            'range': {'context': addcontext,
-                      'hunk': addhunk},
-            'other': {'other': addother},
-            }
+            b'file': {
+                b'context': addcontext,
+                b'file': newfile,
+                b'hunk': addhunk,
+                b'range': addrange,
+            },
+            b'context': {
+                b'file': newfile,
+                b'hunk': addhunk,
+                b'range': addrange,
+                b'other': addother,
+            },
+            b'hunk': {
+                b'context': addcontext,
+                b'file': newfile,
+                b'range': addrange,
+            },
+            b'range': {b'context': addcontext, b'hunk': addhunk},
+            b'other': {b'other': addother},
+        }
 
     p = parser()
     fp = stringio()
-    fp.write(''.join(originalchunks))
+    fp.write(b''.join(originalchunks))
     fp.seek(0)
 
-    state = 'context'
+    state = b'context'
     for newstate, data in scanpatch(fp):
         try:
             p.transitions[state][newstate](p, data)
         except KeyError:
-            raise PatchError('unhandled transition: %s -> %s' %
-                                   (state, newstate))
+            raise PatchError(
+                b'unhandled transition: %s -> %s' % (state, newstate)
+            )
         state = newstate
     del fp
     return p.finished()
 
+
 def pathtransform(path, strip, prefix):
     '''turn a path from a patch into a path suitable for the repository
 
@@ -1722,23 +1874,26 @@
     pathlen = len(path)
     i = 0
     if strip == 0:
-        return '', prefix + path.rstrip()
+        return b'', prefix + path.rstrip()
     count = strip
     while count > 0:
-        i = path.find('/', i)
+        i = path.find(b'/', i)
         if i == -1:
-            raise PatchError(_("unable to strip away %d of %d dirs from %s") %
-                             (count, strip, path))
+            raise PatchError(
+                _(b"unable to strip away %d of %d dirs from %s")
+                % (count, strip, path)
+            )
         i += 1
         # consume '//' in the path
-        while i < pathlen - 1 and path[i:i + 1] == '/':
+        while i < pathlen - 1 and path[i : i + 1] == b'/':
             i += 1
         count -= 1
     return path[:i].lstrip(), prefix + path[i:].rstrip()
 
+
 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
-    nulla = afile_orig == "/dev/null"
-    nullb = bfile_orig == "/dev/null"
+    nulla = afile_orig == b"/dev/null"
+    nullb = bfile_orig == b"/dev/null"
     create = nulla and hunk.starta == 0 and hunk.lena == 0
     remove = nullb and hunk.startb == 0 and hunk.lenb == 0
     abase, afile = pathtransform(afile_orig, strip, prefix)
@@ -1752,17 +1907,22 @@
 
     # some diff programs apparently produce patches where the afile is
     # not /dev/null, but afile starts with bfile
-    abasedir = afile[:afile.rfind('/') + 1]
-    bbasedir = bfile[:bfile.rfind('/') + 1]
-    if (missing and abasedir == bbasedir and afile.startswith(bfile)
-        and hunk.starta == 0 and hunk.lena == 0):
+    abasedir = afile[: afile.rfind(b'/') + 1]
+    bbasedir = bfile[: bfile.rfind(b'/') + 1]
+    if (
+        missing
+        and abasedir == bbasedir
+        and afile.startswith(bfile)
+        and hunk.starta == 0
+        and hunk.lena == 0
+    ):
         create = True
         missing = False
 
     # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
     # diff is between a file and its backup. In this case, the original
     # file should be patched (see original mpatch code).
-    isbackup = (abase == bbase and bfile.startswith(afile))
+    isbackup = abase == bbase and bfile.startswith(afile)
     fname = None
     if not missing:
         if gooda and goodb:
@@ -1782,15 +1942,16 @@
         elif not nulla:
             fname = afile
         else:
-            raise PatchError(_("undefined source and destination files"))
+            raise PatchError(_(b"undefined source and destination files"))
 
     gp = patchmeta(fname)
     if create:
-        gp.op = 'ADD'
+        gp.op = b'ADD'
     elif remove:
-        gp.op = 'DELETE'
+        gp.op = b'DELETE'
     return gp
 
+
 def scanpatch(fp):
     """like patch.iterhunks, but yield different events
 
@@ -1805,7 +1966,7 @@
     def scanwhile(first, p):
         """scan lr while predicate holds"""
         lines = [first]
-        for line in iter(lr.readline, ''):
+        for line in iter(lr.readline, b''):
             if p(line):
                 lines.append(line)
             else:
@@ -1813,31 +1974,34 @@
                 break
         return lines
 
-    for line in iter(lr.readline, ''):
-        if line.startswith('diff --git a/') or line.startswith('diff -r '):
+    for line in iter(lr.readline, b''):
+        if line.startswith(b'diff --git a/') or line.startswith(b'diff -r '):
+
             def notheader(line):
                 s = line.split(None, 1)
-                return not s or s[0] not in ('---', 'diff')
+                return not s or s[0] not in (b'---', b'diff')
+
             header = scanwhile(line, notheader)
             fromfile = lr.readline()
-            if fromfile.startswith('---'):
+            if fromfile.startswith(b'---'):
                 tofile = lr.readline()
                 header += [fromfile, tofile]
             else:
                 lr.push(fromfile)
-            yield 'file', header
-        elif line.startswith(' '):
-            cs = (' ', '\\')
-            yield 'context', scanwhile(line, lambda l: l.startswith(cs))
-        elif line.startswith(('-', '+')):
-            cs = ('-', '+', '\\')
-            yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
+            yield b'file', header
+        elif line.startswith(b' '):
+            cs = (b' ', b'\\')
+            yield b'context', scanwhile(line, lambda l: l.startswith(cs))
+        elif line.startswith((b'-', b'+')):
+            cs = (b'-', b'+', b'\\')
+            yield b'hunk', scanwhile(line, lambda l: l.startswith(cs))
         else:
             m = lines_re.match(line)
             if m:
-                yield 'range', m.groups()
+                yield b'range', m.groups()
             else:
-                yield 'other', line
+                yield b'other', line
+
 
 def scangitpatch(lr, firstline):
     """
@@ -1865,6 +2029,7 @@
     fp.seek(pos)
     return gitpatches
 
+
 def iterhunks(fp):
     """Read a patch and yield the following events:
     - ("file", afile, bfile, firsthunk): select a new target file.
@@ -1873,8 +2038,8 @@
     - ("git", gitchanges): current diff is in git format, gitchanges
     maps filenames to gitpatch records. Unique event.
     """
-    afile = ""
-    bfile = ""
+    afile = b""
+    bfile = b""
     state = None
     hunknum = 0
     emitfile = newfile = False
@@ -1885,64 +2050,71 @@
     context = None
     lr = linereader(fp)
 
-    for x in iter(lr.readline, ''):
+    for x in iter(lr.readline, b''):
         if state == BFILE and (
-            (not context and x.startswith('@'))
-            or (context is not False and x.startswith('***************'))
-            or x.startswith('GIT binary patch')):
+            (not context and x.startswith(b'@'))
+            or (context is not False and x.startswith(b'***************'))
+            or x.startswith(b'GIT binary patch')
+        ):
             gp = None
-            if (gitpatches and
-                gitpatches[-1].ispatching(afile, bfile)):
+            if gitpatches and gitpatches[-1].ispatching(afile, bfile):
                 gp = gitpatches.pop()
-            if x.startswith('GIT binary patch'):
+            if x.startswith(b'GIT binary patch'):
                 h = binhunk(lr, gp.path)
             else:
-                if context is None and x.startswith('***************'):
+                if context is None and x.startswith(b'***************'):
                     context = True
                 h = hunk(x, hunknum + 1, lr, context)
             hunknum += 1
             if emitfile:
                 emitfile = False
-                yield 'file', (afile, bfile, h, gp and gp.copy() or None)
-            yield 'hunk', h
-        elif x.startswith('diff --git a/'):
-            m = gitre.match(x.rstrip(' \r\n'))
+                yield b'file', (afile, bfile, h, gp and gp.copy() or None)
+            yield b'hunk', h
+        elif x.startswith(b'diff --git a/'):
+            m = gitre.match(x.rstrip(b' \r\n'))
             if not m:
                 continue
             if gitpatches is None:
                 # scan whole input for git metadata
                 gitpatches = scangitpatch(lr, x)
-                yield 'git', [g.copy() for g in gitpatches
-                              if g.op in ('COPY', 'RENAME')]
+                yield b'git', [
+                    g.copy() for g in gitpatches if g.op in (b'COPY', b'RENAME')
+                ]
                 gitpatches.reverse()
-            afile = 'a/' + m.group(1)
-            bfile = 'b/' + m.group(2)
+            afile = b'a/' + m.group(1)
+            bfile = b'b/' + m.group(2)
             while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
                 gp = gitpatches.pop()
-                yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
+                yield b'file', (
+                    b'a/' + gp.path,
+                    b'b/' + gp.path,
+                    None,
+                    gp.copy(),
+                )
             if not gitpatches:
-                raise PatchError(_('failed to synchronize metadata for "%s"')
-                                 % afile[2:])
+                raise PatchError(
+                    _(b'failed to synchronize metadata for "%s"') % afile[2:]
+                )
             newfile = True
-        elif x.startswith('---'):
+        elif x.startswith(b'---'):
             # check for a unified diff
             l2 = lr.readline()
-            if not l2.startswith('+++'):
+            if not l2.startswith(b'+++'):
                 lr.push(l2)
                 continue
             newfile = True
             context = False
             afile = parsefilename(x)
             bfile = parsefilename(l2)
-        elif x.startswith('***'):
+        elif x.startswith(b'***'):
             # check for a context diff
             l2 = lr.readline()
-            if not l2.startswith('---'):
+            if not l2.startswith(b'---'):
                 lr.push(l2)
                 continue
             l3 = lr.readline()
             lr.push(l3)
-            if not l3.startswith("***************"):
+            if not l3.startswith(b"***************"):
                 lr.push(l2)
                 continue
             newfile = True
@@ -1958,12 +2130,14 @@
 
     while gitpatches:
         gp = gitpatches.pop()
-        yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
+        yield b'file', (b'a/' + gp.path, b'b/' + gp.path, None, gp.copy())
+
 
 def applybindelta(binchunk, data):
     """Apply a binary delta hunk
     The algorithm used is the algorithm from git's patch-delta.c
     """
+
     def deltahead(binchunk):
         i = 0
         for c in pycompat.bytestr(binchunk):
@@ -1971,38 +2145,39 @@
             if not (ord(c) & 0x80):
                 return i
         return i
-    out = ""
+
+    out = b""
     s = deltahead(binchunk)
     binchunk = binchunk[s:]
     s = deltahead(binchunk)
     binchunk = binchunk[s:]
     i = 0
     while i < len(binchunk):
-        cmd = ord(binchunk[i:i + 1])
+        cmd = ord(binchunk[i : i + 1])
         i += 1
-        if (cmd & 0x80):
+        if cmd & 0x80:
             offset = 0
             size = 0
-            if (cmd & 0x01):
-                offset = ord(binchunk[i:i + 1])
+            if cmd & 0x01:
+                offset = ord(binchunk[i : i + 1])
                 i += 1
-            if (cmd & 0x02):
-                offset |= ord(binchunk[i:i + 1]) << 8
+            if cmd & 0x02:
+                offset |= ord(binchunk[i : i + 1]) << 8
                 i += 1
-            if (cmd & 0x04):
-                offset |= ord(binchunk[i:i + 1]) << 16
+            if cmd & 0x04:
+                offset |= ord(binchunk[i : i + 1]) << 16
                 i += 1
-            if (cmd & 0x08):
-                offset |= ord(binchunk[i:i + 1]) << 24
+            if cmd & 0x08:
+                offset |= ord(binchunk[i : i + 1]) << 24
                 i += 1
-            if (cmd & 0x10):
-                size = ord(binchunk[i:i + 1])
+            if cmd & 0x10:
+                size = ord(binchunk[i : i + 1])
                 i += 1
-            if (cmd & 0x20):
-                size |= ord(binchunk[i:i + 1]) << 8
+            if cmd & 0x20:
+                size |= ord(binchunk[i : i + 1]) << 8
                 i += 1
-            if (cmd & 0x40):
-                size |= ord(binchunk[i:i + 1]) << 16
+            if cmd & 0x40:
+                size |= ord(binchunk[i : i + 1]) << 16
                 i += 1
             if size == 0:
                 size = 0x10000
@@ -2013,10 +2188,11 @@
             out += binchunk[i:offset_end]
             i += cmd
         else:
-            raise PatchError(_('unexpected delta opcode 0'))
+            raise PatchError(_(b'unexpected delta opcode 0'))
     return out
 
-def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
+
+def applydiff(ui, fp, backend, store, strip=1, prefix=b'', eolmode=b'strict'):
     """Reads a patch from fp and tries to apply it.
 
     Returns 0 for a clean patch, -1 if any rejects were found and 1 if
@@ -2026,19 +2202,31 @@
     read in binary mode. Otherwise, line endings are ignored when
     patching then normalized according to 'eolmode'.
     """
-    return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
-                      prefix=prefix, eolmode=eolmode)
+    return _applydiff(
+        ui,
+        fp,
+        patchfile,
+        backend,
+        store,
+        strip=strip,
+        prefix=prefix,
+        eolmode=eolmode,
+    )
+
 
 def _canonprefix(repo, prefix):
     if prefix:
         prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
-        if prefix != '':
-            prefix += '/'
+        if prefix != b'':
+            prefix += b'/'
     return prefix
 
-def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
-               eolmode='strict'):
+
+def _applydiff(
+    ui, fp, patcher, backend, store, strip=1, prefix=b'', eolmode=b'strict'
+):
     prefix = _canonprefix(backend.repo, prefix)
+
     def pstrip(p):
         return pathtransform(p, strip - 1, prefix)[1]
 
@@ -2047,13 +2235,13 @@
     current_file = None
 
     for state, values in iterhunks(fp):
-        if state == 'hunk':
+        if state == b'hunk':
             if not current_file:
                 continue
             ret = current_file.apply(values)
             if ret > 0:
                 err = 1
-        elif state == 'file':
+        elif state == b'file':
             if current_file:
                 rejects += current_file.close()
                 current_file = None
@@ -2063,43 +2251,50 @@
                 if gp.oldpath:
                     gp.oldpath = pstrip(gp.oldpath)
             else:
-                gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
-                                   prefix)
-            if gp.op == 'RENAME':
+                gp = makepatchmeta(
+                    backend, afile, bfile, first_hunk, strip, prefix
+                )
+            if gp.op == b'RENAME':
                 backend.unlink(gp.oldpath)
             if not first_hunk:
-                if gp.op == 'DELETE':
+                if gp.op == b'DELETE':
                     backend.unlink(gp.path)
                     continue
                 data, mode = None, None
-                if gp.op in ('RENAME', 'COPY'):
+                if gp.op in (b'RENAME', b'COPY'):
                     data, mode = store.getfile(gp.oldpath)[:2]
                     if data is None:
                         # This means that the old path does not exist
-                        raise PatchError(_("source file '%s' does not exist")
-                                           % gp.oldpath)
+                        raise PatchError(
+                            _(b"source file '%s' does not exist") % gp.oldpath
+                        )
                 if gp.mode:
                     mode = gp.mode
-                    if gp.op == 'ADD':
+                    if gp.op == b'ADD':
                         # Added files without content have no hunk and
                         # must be created
-                        data = ''
+                        data = b''
                 if data or mode:
-                    if (gp.op in ('ADD', 'RENAME', 'COPY')
-                        and backend.exists(gp.path)):
-                        raise PatchError(_("cannot create %s: destination "
-                                           "already exists") % gp.path)
+                    if gp.op in (b'ADD', b'RENAME', b'COPY') and backend.exists(
+                        gp.path
+                    ):
+                        raise PatchError(
+                            _(
+                                b"cannot create %s: destination "
+                                b"already exists"
+                            )
+                            % gp.path
+                        )
                     backend.setfile(gp.path, data, mode, gp.oldpath)
                 continue
             try:
-                current_file = patcher(ui, gp, backend, store,
-                                       eolmode=eolmode)
+                current_file = patcher(ui, gp, backend, store, eolmode=eolmode)
             except PatchError as inst:
-                ui.warn(str(inst) + '\n')
+                ui.warn(str(inst) + b'\n')
                 current_file = None
                 rejects += 1
                 continue
-        elif state == 'git':
+        elif state == b'git':
             for gp in values:
                 path = pstrip(gp.oldpath)
                 data, mode = backend.getfile(path)
@@ -2112,7 +2307,7 @@
                 else:
                     store.setfile(path, data, mode)
         else:
-            raise error.Abort(_('unsupported parser state: %s') % state)
+            raise error.Abort(_(b'unsupported parser state: %s') % state)
 
     if current_file:
         rejects += current_file.close()
@@ -2121,8 +2316,8 @@
         return -1
     return err
 
-def _externalpatch(ui, repo, patcher, patchname, strip, files,
-                   similarity):
+
+def _externalpatch(ui, repo, patcher, patchname, strip, files, similarity):
     """use <patcher> to apply <patchname> to the working directory.
     returns whether patch was applied with fuzz factor."""
 
@@ -2130,82 +2325,110 @@
     args = []
     cwd = repo.root
     if cwd:
-        args.append('-d %s' % procutil.shellquote(cwd))
-    cmd = ('%s %s -p%d < %s'
-           % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
-    ui.debug('Using external patch tool: %s\n' % cmd)
-    fp = procutil.popen(cmd, 'rb')
+        args.append(b'-d %s' % procutil.shellquote(cwd))
+    cmd = b'%s %s -p%d < %s' % (
+        patcher,
+        b' '.join(args),
+        strip,
+        procutil.shellquote(patchname),
+    )
+    ui.debug(b'Using external patch tool: %s\n' % cmd)
+    fp = procutil.popen(cmd, b'rb')
     try:
         for line in util.iterfile(fp):
             line = line.rstrip()
-            ui.note(line + '\n')
-            if line.startswith('patching file '):
+            ui.note(line + b'\n')
+            if line.startswith(b'patching file '):
                 pf = util.parsepatchoutput(line)
                 printed_file = False
                 files.add(pf)
-            elif line.find('with fuzz') >= 0:
+            elif line.find(b'with fuzz') >= 0:
                 fuzz = True
                 if not printed_file:
-                    ui.warn(pf + '\n')
+                    ui.warn(pf + b'\n')
                     printed_file = True
-                ui.warn(line + '\n')
-            elif line.find('saving rejects to file') >= 0:
-                ui.warn(line + '\n')
-            elif line.find('FAILED') >= 0:
+                ui.warn(line + b'\n')
+            elif line.find(b'saving rejects to file') >= 0:
+                ui.warn(line + b'\n')
+            elif line.find(b'FAILED') >= 0:
                 if not printed_file:
-                    ui.warn(pf + '\n')
+                    ui.warn(pf + b'\n')
                     printed_file = True
-                ui.warn(line + '\n')
+                ui.warn(line + b'\n')
     finally:
         if files:
             scmutil.marktouched(repo, files, similarity)
     code = fp.close()
     if code:
-        raise PatchError(_("patch command failed: %s") %
-                         procutil.explainexit(code))
+        raise PatchError(
+            _(b"patch command failed: %s") % procutil.explainexit(code)
+        )
     return fuzz
 
-def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
-                 eolmode='strict'):
+
+def patchbackend(
+    ui, backend, patchobj, strip, prefix, files=None, eolmode=b'strict'
+):
     if files is None:
         files = set()
     if eolmode is None:
-        eolmode = ui.config('patch', 'eol')
+        eolmode = ui.config(b'patch', b'eol')
     if eolmode.lower() not in eolmodes:
-        raise error.Abort(_('unsupported line endings type: %s') % eolmode)
+        raise error.Abort(_(b'unsupported line endings type: %s') % eolmode)
     eolmode = eolmode.lower()
 
     store = filestore()
     try:
-        fp = open(patchobj, 'rb')
+        fp = open(patchobj, b'rb')
     except TypeError:
         fp = patchobj
     try:
-        ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
-                        eolmode=eolmode)
+        ret = applydiff(
+            ui, fp, backend, store, strip=strip, prefix=prefix, eolmode=eolmode
+        )
     finally:
         if fp != patchobj:
             fp.close()
         files.update(backend.close())
         store.close()
     if ret < 0:
-        raise PatchError(_('patch failed to apply'))
+        raise PatchError(_(b'patch failed to apply'))
     return ret > 0
 
-def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
-                  eolmode='strict', similarity=0):
+
+def internalpatch(
+    ui,
+    repo,
+    patchobj,
+    strip,
+    prefix=b'',
+    files=None,
+    eolmode=b'strict',
+    similarity=0,
+):
     """use builtin patch to apply <patchobj> to the working directory.
     returns whether patch was applied with fuzz factor."""
     backend = workingbackend(ui, repo, similarity)
     return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
 
-def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
-              eolmode='strict'):
+
+def patchrepo(
+    ui, repo, ctx, store, patchobj, strip, prefix, files=None, eolmode=b'strict'
+):
     backend = repobackend(ui, repo, ctx, store)
     return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
 
-def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
-          similarity=0):
+
+def patch(
+    ui,
+    repo,
+    patchname,
+    strip=1,
+    prefix=b'',
+    files=None,
+    eolmode=b'strict',
+    similarity=0,
+):
     """Apply <patchname> to the working directory.
 
     'eolmode' specifies how end of lines should be handled. It can be:
@@ -2217,48 +2440,66 @@
 
     Returns whether patch was applied with fuzz factor.
     """
-    patcher = ui.config('ui', 'patch')
+    patcher = ui.config(b'ui', b'patch')
     if files is None:
         files = set()
     if patcher:
-        return _externalpatch(ui, repo, patcher, patchname, strip,
-                              files, similarity)
-    return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
-                         similarity)
-
-def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
+        return _externalpatch(
+            ui, repo, patcher, patchname, strip, files, similarity
+        )
+    return internalpatch(
+        ui, repo, patchname, strip, prefix, files, eolmode, similarity
+    )
+
+
+def changedfiles(ui, repo, patchpath, strip=1, prefix=b''):
     backend = fsbackend(ui, repo.root)
     prefix = _canonprefix(repo, prefix)
-    with open(patchpath, 'rb') as fp:
+    with open(patchpath, b'rb') as fp:
         changed = set()
         for state, values in iterhunks(fp):
-            if state == 'file':
+            if state == b'file':
                 afile, bfile, first_hunk, gp = values
                 if gp:
                     gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
                     if gp.oldpath:
-                        gp.oldpath = pathtransform(gp.oldpath, strip - 1,
-                                                   prefix)[1]
+                        gp.oldpath = pathtransform(
+                            gp.oldpath, strip - 1, prefix
+                        )[1]
                 else:
-                    gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
-                                       prefix)
+                    gp = makepatchmeta(
+                        backend, afile, bfile, first_hunk, strip, prefix
+                    )
                 changed.add(gp.path)
-                if gp.op == 'RENAME':
+                if gp.op == b'RENAME':
                     changed.add(gp.oldpath)
-            elif state not in ('hunk', 'git'):
-                raise error.Abort(_('unsupported parser state: %s') % state)
+            elif state not in (b'hunk', b'git'):
+                raise error.Abort(_(b'unsupported parser state: %s') % state)
         return changed
 
+
 class GitDiffRequired(Exception):
     pass
 
+
 diffopts = diffutil.diffallopts
 diffallopts = diffutil.diffallopts
 difffeatureopts = diffutil.difffeatureopts
 
-def diff(repo, node1=None, node2=None, match=None, changes=None,
-         opts=None, losedatafn=None, pathfn=None, copy=None,
-         copysourcematch=None, hunksfilterfn=None):
+
+def diff(
+    repo,
+    node1=None,
+    node2=None,
+    match=None,
+    changes=None,
+    opts=None,
+    losedatafn=None,
+    pathfn=None,
+    copy=None,
+    copysourcematch=None,
+    hunksfilterfn=None,
+):
     '''yields diff of changes to files between two nodes, or node and
     working directory.
 
@@ -2295,24 +2536,44 @@
     ctx2 = repo[node2]
 
     for fctx1, fctx2, hdr, hunks in diffhunks(
-            repo, ctx1=ctx1, ctx2=ctx2, match=match, changes=changes, opts=opts,
-            losedatafn=losedatafn, pathfn=pathfn, copy=copy,
-            copysourcematch=copysourcematch):
+        repo,
+        ctx1=ctx1,
+        ctx2=ctx2,
+        match=match,
+        changes=changes,
+        opts=opts,
+        losedatafn=losedatafn,
+        pathfn=pathfn,
+        copy=copy,
+        copysourcematch=copysourcematch,
+    ):
         if hunksfilterfn is not None:
             # If the file has been removed, fctx2 is None; but this should
             # not occur here since we catch removed files early in
             # logcmdutil.getlinerangerevs() for 'hg log -L'.
-            assert fctx2 is not None, (
-                'fctx2 unexpectly None in diff hunks filtering')
+            assert (
+                fctx2 is not None
+            ), b'fctx2 unexpectly None in diff hunks filtering'
             hunks = hunksfilterfn(fctx2, hunks)
-        text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
+        text = b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
         if hdr and (text or len(hdr) > 1):
-            yield '\n'.join(hdr) + '\n'
+            yield b'\n'.join(hdr) + b'\n'
         if text:
             yield text
 
-def diffhunks(repo, ctx1, ctx2, match=None, changes=None, opts=None,
-              losedatafn=None, pathfn=None, copy=None, copysourcematch=None):
+
+def diffhunks(
+    repo,
+    ctx1,
+    ctx2,
+    match=None,
+    changes=None,
+    opts=None,
+    losedatafn=None,
+    pathfn=None,
+    copy=None,
+    copysourcematch=None,
+):
     """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
     where `header` is a list of diff headers and `hunks` is an iterable of
     (`hunkrange`, `hunklines`) tuples.
@@ -2326,6 +2587,7 @@
     def lrugetfilectx():
         cache = {}
         order = collections.deque()
+
         def getfilectx(f, ctx):
             fctx = ctx.filectx(f, filelog=cache.get(f))
             if f not in cache:
@@ -2336,7 +2598,9 @@
                 order.remove(f)
             order.append(f)
             return fctx
+
         return getfilectx
+
     getfilectx = lrugetfilectx()
 
     if not changes:
@@ -2360,8 +2624,11 @@
     if copysourcematch:
         # filter out copies where source side isn't inside the matcher
         # (copies.pathcopies() already filtered out the destination)
-        copy = {dst: src for dst, src in copy.iteritems()
-                if copysourcematch(src)}
+        copy = {
+            dst: src
+            for dst, src in pycompat.iteritems(copy)
+            if copysourcematch(src)
+        }
 
     modifiedset = set(modified)
     addedset = set(added)
@@ -2387,17 +2654,33 @@
             del copy[dst]
 
     prefetchmatch = scmutil.matchfiles(
-        repo, list(modifiedset | addedset | removedset))
+        repo, list(modifiedset | addedset | removedset)
+    )
     scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
 
     def difffn(opts, losedata):
-        return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
-                       copy, getfilectx, opts, losedata, pathfn)
+        return trydiff(
+            repo,
+            revs,
+            ctx1,
+            ctx2,
+            modified,
+            added,
+            removed,
+            copy,
+            getfilectx,
+            opts,
+            losedata,
+            pathfn,
+        )
+
     if opts.upgrade and not opts.git:
         try:
+
             def losedata(fn):
                 if not losedatafn or not losedatafn(fn=fn):
                     raise GitDiffRequired
+
             # Buffer the whole output until we are sure it can be generated
             return list(difffn(opts.copy(git=False), losedata))
         except GitDiffRequired:
@@ -2405,42 +2688,44 @@
     else:
         return difffn(opts, None)
 
+
 def diffsinglehunk(hunklines):
     """yield tokens for a list of lines in a single hunk"""
     for line in hunklines:
         # chomp
-        chompline = line.rstrip('\r\n')
+        chompline = line.rstrip(b'\r\n')
         # highlight tabs and trailing whitespace
         stripline = chompline.rstrip()
-        if line.startswith('-'):
-            label = 'diff.deleted'
-        elif line.startswith('+'):
-            label = 'diff.inserted'
+        if line.startswith(b'-'):
+            label = b'diff.deleted'
+        elif line.startswith(b'+'):
+            label = b'diff.inserted'
         else:
-            raise error.ProgrammingError('unexpected hunk line: %s' % line)
+            raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
         for token in tabsplitter.findall(stripline):
-            if token.startswith('\t'):
-                yield (token, 'diff.tab')
+            if token.startswith(b'\t'):
+                yield (token, b'diff.tab')
             else:
                 yield (token, label)
 
         if chompline != stripline:
-            yield (chompline[len(stripline):], 'diff.trailingwhitespace')
+            yield (chompline[len(stripline) :], b'diff.trailingwhitespace')
         if chompline != line:
-            yield (line[len(chompline):], '')
+            yield (line[len(chompline) :], b'')
+
 
 def diffsinglehunkinline(hunklines):
     """yield tokens for a list of lines in a single hunk, with inline colors"""
     # prepare deleted, and inserted content
-    a = ''
-    b = ''
+    a = b''
+    b = b''
     for line in hunklines:
-        if line[0:1] == '-':
+        if line[0:1] == b'-':
             a += line[1:]
-        elif line[0:1] == '+':
+        elif line[0:1] == b'+':
             b += line[1:]
         else:
-            raise error.ProgrammingError('unexpected hunk line: %s' % line)
+            raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
     # fast path: if either side is empty, use diffsinglehunk
     if not a or not b:
         for t in diffsinglehunk(hunklines):
@@ -2450,77 +2735,84 @@
     al = wordsplitter.findall(a)
     bl = wordsplitter.findall(b)
     # re-arrange the words to lines since the diff algorithm is line-based
-    aln = [s if s == '\n' else s + '\n' for s in al]
-    bln = [s if s == '\n' else s + '\n' for s in bl]
-    an = ''.join(aln)
-    bn = ''.join(bln)
+    aln = [s if s == b'\n' else s + b'\n' for s in al]
+    bln = [s if s == b'\n' else s + b'\n' for s in bl]
+    an = b''.join(aln)
+    bn = b''.join(bln)
     # run the diff algorithm, prepare atokens and btokens
     atokens = []
     btokens = []
     blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
     for (a1, a2, b1, b2), btype in blocks:
-        changed = btype == '!'
-        for token in mdiff.splitnewlines(''.join(al[a1:a2])):
+        changed = btype == b'!'
+        for token in mdiff.splitnewlines(b''.join(al[a1:a2])):
             atokens.append((changed, token))
-        for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
+        for token in mdiff.splitnewlines(b''.join(bl[b1:b2])):
             btokens.append((changed, token))
 
     # yield deleted tokens, then inserted ones
-    for prefix, label, tokens in [('-', 'diff.deleted', atokens),
-                                  ('+', 'diff.inserted', btokens)]:
+    for prefix, label, tokens in [
+        (b'-', b'diff.deleted', atokens),
+        (b'+', b'diff.inserted', btokens),
+    ]:
         nextisnewline = True
         for changed, token in tokens:
             if nextisnewline:
                 yield (prefix, label)
                 nextisnewline = False
             # special handling line end
-            isendofline = token.endswith('\n')
+            isendofline = token.endswith(b'\n')
             if isendofline:
-                chomp = token[:-1] # chomp
-                if chomp.endswith('\r'):
+                chomp = token[:-1]  # chomp
+                if chomp.endswith(b'\r'):
                     chomp = chomp[:-1]
-                endofline = token[len(chomp):]
-                token = chomp.rstrip() # detect spaces at the end
-                endspaces = chomp[len(token):]
+                endofline = token[len(chomp) :]
+                token = chomp.rstrip()  # detect spaces at the end
+                endspaces = chomp[len(token) :]
             # scan tabs
             for maybetab in tabsplitter.findall(token):
                 if b'\t' == maybetab[0:1]:
-                    currentlabel = 'diff.tab'
+                    currentlabel = b'diff.tab'
                 else:
                     if changed:
-                        currentlabel = label + '.changed'
+                        currentlabel = label + b'.changed'
                     else:
-                        currentlabel = label + '.unchanged'
+                        currentlabel = label + b'.unchanged'
                 yield (maybetab, currentlabel)
             if isendofline:
                 if endspaces:
-                    yield (endspaces, 'diff.trailingwhitespace')
-                yield (endofline, '')
+                    yield (endspaces, b'diff.trailingwhitespace')
+                yield (endofline, b'')
                 nextisnewline = True
 
+
 def difflabel(func, *args, **kw):
     '''yields 2-tuples of (output, label) based on the output of func()'''
     if kw.get(r'opts') and kw[r'opts'].worddiff:
         dodiffhunk = diffsinglehunkinline
     else:
         dodiffhunk = diffsinglehunk
-    headprefixes = [('diff', 'diff.diffline'),
-                    ('copy', 'diff.extended'),
-                    ('rename', 'diff.extended'),
-                    ('old', 'diff.extended'),
-                    ('new', 'diff.extended'),
-                    ('deleted', 'diff.extended'),
-                    ('index', 'diff.extended'),
-                    ('similarity', 'diff.extended'),
-                    ('---', 'diff.file_a'),
-                    ('+++', 'diff.file_b')]
-    textprefixes = [('@', 'diff.hunk'),
-                    # - and + are handled by diffsinglehunk
-                   ]
+    headprefixes = [
+        (b'diff', b'diff.diffline'),
+        (b'copy', b'diff.extended'),
+        (b'rename', b'diff.extended'),
+        (b'old', b'diff.extended'),
+        (b'new', b'diff.extended'),
+        (b'deleted', b'diff.extended'),
+        (b'index', b'diff.extended'),
+        (b'similarity', b'diff.extended'),
+        (b'---', b'diff.file_a'),
+        (b'+++', b'diff.file_b'),
+    ]
+    textprefixes = [
+        (b'@', b'diff.hunk'),
+        # - and + are handled by diffsinglehunk
+    ]
     head = False
 
     # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
     hunkbuffer = []
+
     def consumehunkbuffer():
         if hunkbuffer:
             for token in dodiffhunk(hunkbuffer):
@@ -2528,17 +2820,19 @@
             hunkbuffer[:] = []
 
     for chunk in func(*args, **kw):
-        lines = chunk.split('\n')
+        lines = chunk.split(b'\n')
         linecount = len(lines)
         for i, line in enumerate(lines):
             if head:
-                if line.startswith('@'):
+                if line.startswith(b'@'):
                     head = False
             else:
-                if line and not line.startswith((' ', '+', '-', '@', '\\')):
+                if line and not line.startswith(
+                    (b' ', b'+', b'-', b'@', b'\\')
+                ):
                     head = True
             diffline = False
-            if not head and line and line.startswith(('+', '-')):
+            if not head and line and line.startswith((b'+', b'-')):
                 diffline = True
 
             prefixes = textprefixes
@@ -2548,7 +2842,7 @@
                 # buffered
                 bufferedline = line
                 if i + 1 < linecount:
-                    bufferedline += "\n"
+                    bufferedline += b"\n"
                 hunkbuffer.append(bufferedline)
             else:
                 # unbuffered
@@ -2559,20 +2853,24 @@
                     if stripline.startswith(prefix):
                         yield (stripline, label)
                         if line != stripline:
-                            yield (line[len(stripline):],
-                                   'diff.trailingwhitespace')
+                            yield (
+                                line[len(stripline) :],
+                                b'diff.trailingwhitespace',
+                            )
                         break
                 else:
-                    yield (line, '')
+                    yield (line, b'')
                 if i + 1 < linecount:
-                    yield ('\n', '')
+                    yield (b'\n', b'')
         for token in consumehunkbuffer():
             yield token
 
+
 def diffui(*args, **kw):
     '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
     return difflabel(diff, *args, **kw)
 
+
 def _filepairs(modified, added, removed, copy, opts):
     '''generates tuples (f1, f2, copyop), where f1 is the name of the file
     before and f2 is the the name after. For added files, f1 will be None,
@@ -2593,21 +2891,37 @@
                 if opts.git:
                     f1 = copy[f]
                     if f1 in removedset and f1 not in gone:
-                        copyop = 'rename'
+                        copyop = b'rename'
                         gone.add(f1)
                     else:
-                        copyop = 'copy'
+                        copyop = b'copy'
         elif f in removedset:
             f2 = None
             if opts.git:
                 # have we already reported a copy above?
-                if (f in copyto and copyto[f] in addedset
-                    and copy[copyto[f]] == f):
+                if (
+                    f in copyto
+                    and copyto[f] in addedset
+                    and copy[copyto[f]] == f
+                ):
                     continue
         yield f1, f2, copyop
 
-def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
-            copy, getfilectx, opts, losedatafn, pathfn):
+
+def trydiff(
+    repo,
+    revs,
+    ctx1,
+    ctx2,
+    modified,
+    added,
+    removed,
+    copy,
+    getfilectx,
+    opts,
+    losedatafn,
+    pathfn,
+):
     '''given input data, generate a diff and yield it in blocks
 
     If generating a diff would lose data like flags or binary data and
@@ -2618,21 +2932,21 @@
 
     def gitindex(text):
         if not text:
-            text = ""
+            text = b""
         l = len(text)
-        s = hashlib.sha1('blob %d\0' % l)
+        s = hashlib.sha1(b'blob %d\0' % l)
         s.update(text)
         return hex(s.digest())
 
     if opts.noprefix:
-        aprefix = bprefix = ''
+        aprefix = bprefix = b''
     else:
-        aprefix = 'a/'
-        bprefix = 'b/'
+        aprefix = b'a/'
+        bprefix = b'b/'
 
     def diffline(f, revs):
-        revinfo = ' '.join(["-r %s" % rev for rev in revs])
-        return 'diff %s %s' % (revinfo, f)
+        revinfo = b' '.join([b"-r %s" % rev for rev in revs])
+        return b'diff %s %s' % (revinfo, f)
 
     def isempty(fctx):
         return fctx is None or fctx.size() == 0
@@ -2640,7 +2954,7 @@
     date1 = dateutil.datestr(ctx1.date())
     date2 = dateutil.datestr(ctx2.date())
 
-    gitmode = {'l': '120000', 'x': '100755', '': '100644'}
+    gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
 
     if not pathfn:
         pathfn = lambda f: f
@@ -2667,40 +2981,48 @@
             binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
 
         if losedatafn and not opts.git:
-            if (binary or
+            if (
+                binary
+                or
                 # copy/rename
-                f2 in copy or
+                f2 in copy
+                or
                 # empty file creation
-                (not f1 and isempty(fctx2)) or
+                (not f1 and isempty(fctx2))
+                or
                 # empty file deletion
-                (isempty(fctx1) and not f2) or
+                (isempty(fctx1) and not f2)
+                or
                 # create with flags
-                (not f1 and flag2) or
+                (not f1 and flag2)
+                or
                 # change flags
-                (f1 and f2 and flag1 != flag2)):
+                (f1 and f2 and flag1 != flag2)
+            ):
                 losedatafn(f2 or f1)
 
         path1 = pathfn(f1 or f2)
         path2 = pathfn(f2 or f1)
         header = []
         if opts.git:
-            header.append('diff --git %s%s %s%s' %
-                          (aprefix, path1, bprefix, path2))
-            if not f1: # added
-                header.append('new file mode %s' % gitmode[flag2])
-            elif not f2: # removed
-                header.append('deleted file mode %s' % gitmode[flag1])
+            header.append(
+                b'diff --git %s%s %s%s' % (aprefix, path1, bprefix, path2)
+            )
+            if not f1:  # added
+                header.append(b'new file mode %s' % gitmode[flag2])
+            elif not f2:  # removed
+                header.append(b'deleted file mode %s' % gitmode[flag1])
             else:  # modified/copied/renamed
                 mode1, mode2 = gitmode[flag1], gitmode[flag2]
                 if mode1 != mode2:
-                    header.append('old mode %s' % mode1)
-                    header.append('new mode %s' % mode2)
+                    header.append(b'old mode %s' % mode1)
+                    header.append(b'new mode %s' % mode2)
                 if copyop is not None:
                     if opts.showsimilarity:
                         sim = similar.score(ctx1[path1], ctx2[path2]) * 100
-                        header.append('similarity index %d%%' % sim)
-                    header.append('%s from %s' % (copyop, path1))
-                    header.append('%s to %s' % (copyop, path2))
+                        header.append(b'similarity index %d%%' % sim)
+                    header.append(b'%s from %s' % (copyop, path1))
+                    header.append(b'%s to %s' % (copyop, path2))
         elif revs:
             header.append(diffline(path1, revs))
 
@@ -2715,8 +3037,9 @@
         #  yes      | yes  *        *   *     | text diff | yes
         #  no       | *    *        *   *     | text diff | yes
         # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
-        if binary and (not opts.git or (opts.git and opts.nobinary and not
-                                        opts.index)):
+        if binary and (
+            not opts.git or (opts.git and opts.nobinary and not opts.index)
+        ):
             # fast path: no binary content will be displayed, content1 and
             # content2 are only used for equivalent test. cmp() could have a
             # fast path.
@@ -2724,7 +3047,7 @@
                 content1 = b'\0'
             if fctx2 is not None:
                 if fctx1 is not None and not fctx1.cmp(fctx2):
-                    content2 = b'\0' # not different
+                    content2 = b'\0'  # not different
                 else:
                     content2 = b'\0\0'
         else:
@@ -2737,26 +3060,38 @@
         if binary and opts.git and not opts.nobinary:
             text = mdiff.b85diff(content1, content2)
             if text:
-                header.append('index %s..%s' %
-                              (gitindex(content1), gitindex(content2)))
-            hunks = (None, [text]),
+                header.append(
+                    b'index %s..%s' % (gitindex(content1), gitindex(content2))
+                )
+            hunks = ((None, [text]),)
         else:
             if opts.git and opts.index > 0:
                 flag = flag1
                 if flag is None:
                     flag = flag2
-                header.append('index %s..%s %s' %
-                              (gitindex(content1)[0:opts.index],
-                               gitindex(content2)[0:opts.index],
-                               gitmode[flag]))
-
-            uheaders, hunks = mdiff.unidiff(content1, date1,
-                                            content2, date2,
-                                            path1, path2,
-                                            binary=binary, opts=opts)
+                header.append(
+                    b'index %s..%s %s'
+                    % (
+                        gitindex(content1)[0 : opts.index],
+                        gitindex(content2)[0 : opts.index],
+                        gitmode[flag],
+                    )
+                )
+
+            uheaders, hunks = mdiff.unidiff(
+                content1,
+                date1,
+                content2,
+                date2,
+                path1,
+                path2,
+                binary=binary,
+                opts=opts,
+            )
             header.extend(uheaders)
         yield fctx1, fctx2, header, hunks
 
+
 def diffstatsum(stats):
     maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
     for f, a, r, b in stats:
@@ -2768,6 +3103,7 @@
 
     return maxfile, maxtotal, addtotal, removetotal, binary
 
+
 def diffstatdata(lines):
     diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
 
@@ -2784,33 +3120,35 @@
     inheader = False
 
     for line in lines:
-        if line.startswith('diff'):
+        if line.startswith(b'diff'):
             addresult()
             # starting a new file diff
             # set numbers to 0 and reset inheader
             inheader = True
             adds, removes, isbinary = 0, 0, False
-            if line.startswith('diff --git a/'):
+            if line.startswith(b'diff --git a/'):
                 filename = gitre.search(line).group(2)
-            elif line.startswith('diff -r'):
+            elif line.startswith(b'diff -r'):
                 # format: "diff -r ... -r ... filename"
                 filename = diffre.search(line).group(1)
-        elif line.startswith('@@'):
+        elif line.startswith(b'@@'):
             inheader = False
-        elif line.startswith('+') and not inheader:
+        elif line.startswith(b'+') and not inheader:
             adds += 1
-        elif line.startswith('-') and not inheader:
+        elif line.startswith(b'-') and not inheader:
             removes += 1
-        elif (line.startswith('GIT binary patch') or
-              line.startswith('Binary file')):
+        elif line.startswith(b'GIT binary patch') or line.startswith(
+            b'Binary file'
+        ):
             isbinary = True
-        elif line.startswith('rename from'):
+        elif line.startswith(b'rename from'):
             filename = line[12:]
-        elif line.startswith('rename to'):
-            filename += ' => %s' % line[10:]
+        elif line.startswith(b'rename to'):
+            filename += b' => %s' % line[10:]
     addresult()
     return results
 
+
 def diffstat(lines, width=80):
     output = []
     stats = diffstatdata(lines)
@@ -2833,21 +3171,31 @@
 
     for filename, adds, removes, isbinary in stats:
         if isbinary:
-            count = 'Bin'
+            count = b'Bin'
         else:
-            count = '%d' % (adds + removes)
-        pluses = '+' * scale(adds)
-        minuses = '-' * scale(removes)
-        output.append(' %s%s |  %*s %s%s\n' %
-                      (filename, ' ' * (maxname - encoding.colwidth(filename)),
-                       countwidth, count, pluses, minuses))
+            count = b'%d' % (adds + removes)
+        pluses = b'+' * scale(adds)
+        minuses = b'-' * scale(removes)
+        output.append(
+            b' %s%s |  %*s %s%s\n'
+            % (
+                filename,
+                b' ' * (maxname - encoding.colwidth(filename)),
+                countwidth,
+                count,
+                pluses,
+                minuses,
+            )
+        )
 
     if stats:
-        output.append(_(' %d files changed, %d insertions(+), '
-                        '%d deletions(-)\n')
-                      % (len(stats), totaladds, totalremoves))
-
-    return ''.join(output)
+        output.append(
+            _(b' %d files changed, %d insertions(+), %d deletions(-)\n')
+            % (len(stats), totaladds, totalremoves)
+        )
+
+    return b''.join(output)
+
 
 def diffstatui(*args, **kw):
     '''like diffstat(), but yields 2-tuples of (output, label) for
@@ -2855,15 +3203,15 @@
     '''
 
     for line in diffstat(*args, **kw).splitlines():
-        if line and line[-1] in '+-':
-            name, graph = line.rsplit(' ', 1)
-            yield (name + ' ', '')
+        if line and line[-1] in b'+-':
+            name, graph = line.rsplit(b' ', 1)
+            yield (name + b' ', b'')
             m = re.search(br'\++', graph)
             if m:
-                yield (m.group(0), 'diffstat.inserted')
+                yield (m.group(0), b'diffstat.inserted')
             m = re.search(br'-+', graph)
             if m:
-                yield (m.group(0), 'diffstat.deleted')
+                yield (m.group(0), b'diffstat.deleted')
         else:
-            yield (line, '')
-        yield ('\n', '')
+            yield (line, b'')
+        yield (b'\n', b'')
--- a/mercurial/pathutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/pathutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,9 +13,11 @@
     util,
 )
 
+
 def _lowerclean(s):
     return encoding.hfsignoreclean(s.lower())
 
+
 class pathauditor(object):
     '''ensure that a filesystem path contains no banned components.
     the following properties of a path are checked:
@@ -61,27 +63,32 @@
             return
         # AIX ignores "/" at end of path, others raise EISDIR.
         if util.endswithsep(path):
-            raise error.Abort(_("path ends in directory separator: %s") % path)
+            raise error.Abort(_(b"path ends in directory separator: %s") % path)
         parts = util.splitpath(path)
-        if (os.path.splitdrive(path)[0]
-            or _lowerclean(parts[0]) in ('.hg', '.hg.', '')
-            or pycompat.ospardir in parts):
-            raise error.Abort(_("path contains illegal component: %s") % path)
+        if (
+            os.path.splitdrive(path)[0]
+            or _lowerclean(parts[0]) in (b'.hg', b'.hg.', b'')
+            or pycompat.ospardir in parts
+        ):
+            raise error.Abort(_(b"path contains illegal component: %s") % path)
         # Windows shortname aliases
         for p in parts:
-            if "~" in p:
-                first, last = p.split("~", 1)
-                if last.isdigit() and first.upper() in ["HG", "HG8B6C"]:
-                    raise error.Abort(_("path contains illegal component: %s")
-                                     % path)
-        if '.hg' in _lowerclean(path):
+            if b"~" in p:
+                first, last = p.split(b"~", 1)
+                if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]:
+                    raise error.Abort(
+                        _(b"path contains illegal component: %s") % path
+                    )
+        if b'.hg' in _lowerclean(path):
             lparts = [_lowerclean(p.lower()) for p in parts]
-            for p in '.hg', '.hg.':
+            for p in b'.hg', b'.hg.':
                 if p in lparts[1:]:
                     pos = lparts.index(p)
                     base = os.path.join(*parts[:pos])
-                    raise error.Abort(_("path '%s' is inside nested repo %r")
-                                      % (path, pycompat.bytestr(base)))
+                    raise error.Abort(
+                        _(b"path '%s' is inside nested repo %r")
+                        % (path, pycompat.bytestr(base))
+                    )
 
         normparts = util.splitpath(normpath)
         assert len(parts) == len(normparts)
@@ -93,8 +100,8 @@
         # This means we won't accidentally traverse a symlink into some other
         # filesystem (which is potentially expensive to access).
         for i in range(len(parts)):
-            prefix = pycompat.ossep.join(parts[:i + 1])
-            normprefix = pycompat.ossep.join(normparts[:i + 1])
+            prefix = pycompat.ossep.join(parts[: i + 1])
+            normprefix = pycompat.ossep.join(normparts[: i + 1])
             if normprefix in self.auditeddir:
                 continue
             if self._realfs:
@@ -119,13 +126,16 @@
                 raise
         else:
             if stat.S_ISLNK(st.st_mode):
-                msg = (_('path %r traverses symbolic link %r')
-                       % (pycompat.bytestr(path), pycompat.bytestr(prefix)))
+                msg = _(b'path %r traverses symbolic link %r') % (
+                    pycompat.bytestr(path),
+                    pycompat.bytestr(prefix),
+                )
                 raise error.Abort(msg)
-            elif (stat.S_ISDIR(st.st_mode) and
-                  os.path.isdir(os.path.join(curpath, '.hg'))):
+            elif stat.S_ISDIR(st.st_mode) and os.path.isdir(
+                os.path.join(curpath, b'.hg')
+            ):
                 if not self.callback or not self.callback(curpath):
-                    msg = _("path '%s' is inside nested repo %r")
+                    msg = _(b"path '%s' is inside nested repo %r")
                     raise error.Abort(msg % (path, pycompat.bytestr(prefix)))
 
     def check(self, path):
@@ -135,6 +145,7 @@
         except (OSError, error.Abort):
             return False
 
+
 def canonpath(root, cwd, myname, auditor=None):
     '''return the canonical path of myname, given cwd and root
 
@@ -188,11 +199,11 @@
     if auditor is None:
         auditor = pathauditor(root)
     if name != rootsep and name.startswith(rootsep):
-        name = name[len(rootsep):]
+        name = name[len(rootsep) :]
         auditor(name)
         return util.pconvert(name)
     elif name == root:
-        return ''
+        return b''
     else:
         # Determine whether `name' is in the hierarchy at or beneath `root',
         # by iterating name=dirname(name) until that causes no change (can't
@@ -208,7 +219,7 @@
             if s:
                 if not rel:
                     # name was actually the same as root (maybe a symlink)
-                    return ''
+                    return b''
                 rel.reverse()
                 name = os.path.join(*rel)
                 auditor(name)
@@ -225,15 +236,17 @@
         try:
             if cwd != root:
                 canonpath(root, root, myname, auditor)
-                relpath = util.pathto(root, cwd, '')
+                relpath = util.pathto(root, cwd, b'')
                 if relpath.endswith(pycompat.ossep):
                     relpath = relpath[:-1]
-                hint = (_("consider using '--cwd %s'") % relpath)
+                hint = _(b"consider using '--cwd %s'") % relpath
         except error.Abort:
             pass
 
-        raise error.Abort(_("%s not under root '%s'") % (myname, root),
-                         hint=hint)
+        raise error.Abort(
+            _(b"%s not under root '%s'") % (myname, root), hint=hint
+        )
+
 
 def normasprefix(path):
     '''normalize the specified path as path prefix
@@ -257,6 +270,7 @@
     else:
         return path
 
+
 # forward two methods from posixpath that do what we need, but we'd
 # rather not let our internals know that we're thinking in posix terms
 # - instead we'll let them be oblivious.
--- a/mercurial/phases.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/phases.py	Mon Oct 21 11:09:48 2019 -0400
@@ -113,6 +113,10 @@
     nullrev,
     short,
 )
+from .pycompat import (
+    getattr,
+    setattr,
+)
 from . import (
     error,
     pycompat,
@@ -121,10 +125,10 @@
     util,
 )
 
-_fphasesentry = struct.Struct('>i20s')
+_fphasesentry = struct.Struct(b'>i20s')
 
-INTERNAL_FLAG = 64 # Phases for mercurial internal usage only
-HIDEABLE_FLAG = 32 # Phases that are hideable
+INTERNAL_FLAG = 64  # Phases for mercurial internal usage only
+HIDEABLE_FLAG = 32  # Phases that are hideable
 
 # record phase index
 public, draft, secret = range(3)
@@ -133,19 +137,21 @@
 allphases = range(internal + 1)
 trackedphases = allphases[1:]
 # record phase names
-cmdphasenames = ['public', 'draft', 'secret']  # known to `hg phase` command
+cmdphasenames = [b'public', b'draft', b'secret']  # known to `hg phase` command
 phasenames = [None] * len(allphases)
-phasenames[:len(cmdphasenames)] = cmdphasenames
-phasenames[archived] = 'archived'
-phasenames[internal] = 'internal'
+phasenames[: len(cmdphasenames)] = cmdphasenames
+phasenames[archived] = b'archived'
+phasenames[internal] = b'internal'
 # record phase property
 mutablephases = tuple(allphases[1:])
 remotehiddenphases = tuple(allphases[2:])
 localhiddenphases = tuple(p for p in allphases if p & HIDEABLE_FLAG)
 
+
 def supportinternal(repo):
     """True if the internal phase can be used on a repository"""
-    return 'internal-phase' in repo.requirements
+    return b'internal-phase' in repo.requirements
+
 
 def _readroots(repo, phasedefaults=None):
     """Read phase roots from disk
@@ -162,7 +168,7 @@
     dirty = False
     roots = [set() for i in allphases]
     try:
-        f, pending = txnutil.trypending(repo.root, repo.svfs, 'phaseroots')
+        f, pending = txnutil.trypending(repo.root, repo.svfs, b'phaseroots')
         try:
             for line in f:
                 phase, nh = line.split()
@@ -178,6 +184,7 @@
         dirty = True
     return roots, dirty
 
+
 def binaryencode(phasemapping):
     """encode a 'phase -> nodes' mapping into a binary stream
 
@@ -188,7 +195,8 @@
     for phase, nodes in enumerate(phasemapping):
         for head in nodes:
             binarydata.append(_fphasesentry.pack(phase, head))
-    return ''.join(binarydata)
+    return b''.join(binarydata)
+
 
 def binarydecode(stream):
     """decode a binary stream into a 'phase -> nodes' mapping
@@ -200,12 +208,13 @@
         entry = stream.read(entrysize)
         if len(entry) < entrysize:
             if entry:
-                raise error.Abort(_('bad phase-heads stream'))
+                raise error.Abort(_(b'bad phase-heads stream'))
             break
         phase, node = _fphasesentry.unpack(entry)
         headsbyphase[phase].append(node)
     return headsbyphase
 
+
 def _trackphasechange(data, rev, old, new):
     """add a phase move the <data> dictionnary
 
@@ -218,6 +227,7 @@
         old = existing[0]
     data[rev] = (old, new)
 
+
 class phasecache(object):
     def __init__(self, repo, phasedefaults, _load=True):
         if _load:
@@ -230,7 +240,7 @@
 
     def getrevset(self, repo, phases, subset=None):
         """return a smartset for the given phases"""
-        self.loadphaserevs(repo) # ensure phase's sets are loaded
+        self.loadphaserevs(repo)  # ensure phase's sets are loaded
         phases = set(phases)
         if public not in phases:
             # fast path: _phasesets contains the interesting sets,
@@ -274,16 +284,22 @@
 
     def replace(self, phcache):
         """replace all values in 'self' with content of phcache"""
-        for a in ('phaseroots', 'dirty', 'opener', '_loadedrevslen',
-                  '_phasesets'):
+        for a in (
+            b'phaseroots',
+            b'dirty',
+            b'opener',
+            b'_loadedrevslen',
+            b'_phasesets',
+        ):
             setattr(self, a, getattr(phcache, a))
 
     def _getphaserevsnative(self, repo):
         repo = repo.unfiltered()
         nativeroots = []
         for phase in trackedphases:
-            nativeroots.append(pycompat.maplist(repo.changelog.rev,
-                                                self.phaseroots[phase]))
+            nativeroots.append(
+                pycompat.maplist(repo.changelog.rev, self.phaseroots[phase])
+            )
         return repo.changelog.computephases(nativeroots)
 
     def _computephaserevspure(self, repo):
@@ -324,7 +340,7 @@
         if rev == nullrev:
             return public
         if rev < nullrev:
-            raise ValueError(_('cannot lookup negative revision'))
+            raise ValueError(_(b'cannot lookup negative revision'))
         if rev >= self._loadedrevslen:
             self.invalidate()
             self.loadphaserevs(repo)
@@ -336,7 +352,7 @@
     def write(self):
         if not self.dirty:
             return
-        f = self.opener('phaseroots', 'w', atomictemp=True, checkambig=True)
+        f = self.opener(b'phaseroots', b'w', atomictemp=True, checkambig=True)
         try:
             self._write(f)
         finally:
@@ -345,7 +361,7 @@
     def _write(self, fp):
         for phase, roots in enumerate(self.phaseroots):
             for h in sorted(roots):
-                fp.write('%i %s\n' % (phase, hex(h)))
+                fp.write(b'%i %s\n' % (phase, hex(h)))
         self.dirty = False
 
     def _updateroots(self, phase, newroots, tr):
@@ -353,14 +369,14 @@
         self.invalidate()
         self.dirty = True
 
-        tr.addfilegenerator('phase', ('phaseroots',), self._write)
-        tr.hookargs['phases_moved'] = '1'
+        tr.addfilegenerator(b'phase', (b'phaseroots',), self._write)
+        tr.hookargs[b'phases_moved'] = b'1'
 
     def registernew(self, repo, tr, targetphase, nodes):
         repo = repo.unfiltered()
         self._retractboundary(repo, tr, targetphase, nodes)
-        if tr is not None and 'phases' in tr.changes:
-            phasetracking = tr.changes['phases']
+        if tr is not None and b'phases' in tr.changes:
+            phasetracking = tr.changes[b'phases']
             torev = repo.changelog.rev
             phase = self.phase
             for n in nodes:
@@ -383,31 +399,35 @@
         if tr is None:
             phasetracking = None
         else:
-            phasetracking = tr.changes.get('phases')
+            phasetracking = tr.changes.get(b'phases')
 
         repo = repo.unfiltered()
 
-        changes = set() # set of revisions to be changed
-        delroots = [] # set of root deleted by this path
+        changes = set()  # set of revisions to be changed
+        delroots = []  # set of root deleted by this path
         for phase in pycompat.xrange(targetphase + 1, len(allphases)):
             # filter nodes that are not in a compatible phase already
-            nodes = [n for n in nodes
-                     if self.phase(repo, repo[n].rev()) >= phase]
+            nodes = [
+                n for n in nodes if self.phase(repo, repo[n].rev()) >= phase
+            ]
             if not nodes:
-                break # no roots to move anymore
+                break  # no roots to move anymore
 
             olds = self.phaseroots[phase]
 
-            affected = repo.revs('%ln::%ln', olds, nodes)
+            affected = repo.revs(b'%ln::%ln', olds, nodes)
             changes.update(affected)
             if dryrun:
                 continue
             for r in affected:
-                _trackphasechange(phasetracking, r, self.phase(repo, r),
-                                  targetphase)
+                _trackphasechange(
+                    phasetracking, r, self.phase(repo, r), targetphase
+                )
 
-            roots = set(ctx.node() for ctx in repo.set(
-                    'roots((%ln::) - %ld)', olds, affected))
+            roots = set(
+                ctx.node()
+                for ctx in repo.set(b'roots((%ln::) - %ld)', olds, affected)
+            )
             if olds != roots:
                 self._updateroots(phase, roots, tr)
                 # some roots may need to be declared for lower phases
@@ -420,27 +440,29 @@
         return changes
 
     def retractboundary(self, repo, tr, targetphase, nodes):
-        oldroots = self.phaseroots[:targetphase + 1]
+        oldroots = self.phaseroots[: targetphase + 1]
         if tr is None:
             phasetracking = None
         else:
-            phasetracking = tr.changes.get('phases')
+            phasetracking = tr.changes.get(b'phases')
         repo = repo.unfiltered()
-        if (self._retractboundary(repo, tr, targetphase, nodes)
-            and phasetracking is not None):
+        if (
+            self._retractboundary(repo, tr, targetphase, nodes)
+            and phasetracking is not None
+        ):
 
             # find the affected revisions
             new = self.phaseroots[targetphase]
             old = oldroots[targetphase]
-            affected = set(repo.revs('(%ln::) - (%ln::)', new, old))
+            affected = set(repo.revs(b'(%ln::) - (%ln::)', new, old))
 
             # find the phase of the affected revision
             for phase in pycompat.xrange(targetphase, -1, -1):
                 if phase:
                     roots = oldroots[phase]
-                    revs = set(repo.revs('%ln::%ld', roots, affected))
+                    revs = set(repo.revs(b'%ln::%ld', roots, affected))
                     affected -= revs
-                else: # public phase
+                else:  # public phase
                     revs = affected
                 for r in revs:
                     _trackphasechange(phasetracking, r, phase, targetphase)
@@ -451,30 +473,33 @@
         # phaseroots values, replace them.
         if targetphase in (archived, internal) and not supportinternal(repo):
             name = phasenames[targetphase]
-            msg = 'this repository does not support the %s phase' % name
+            msg = b'this repository does not support the %s phase' % name
             raise error.ProgrammingError(msg)
 
         repo = repo.unfiltered()
         currentroots = self.phaseroots[targetphase]
         finalroots = oldroots = set(currentroots)
-        newroots = [n for n in nodes
-                    if self.phase(repo, repo[n].rev()) < targetphase]
+        newroots = [
+            n for n in nodes if self.phase(repo, repo[n].rev()) < targetphase
+        ]
         if newroots:
 
             if nullid in newroots:
-                raise error.Abort(_('cannot change null revision phase'))
+                raise error.Abort(_(b'cannot change null revision phase'))
             currentroots = currentroots.copy()
             currentroots.update(newroots)
 
             # Only compute new roots for revs above the roots that are being
             # retracted.
             minnewroot = min(repo[n].rev() for n in newroots)
-            aboveroots = [n for n in currentroots
-                          if repo[n].rev() >= minnewroot]
-            updatedroots = repo.set('roots(%ln::)', aboveroots)
+            aboveroots = [
+                n for n in currentroots if repo[n].rev() >= minnewroot
+            ]
+            updatedroots = repo.set(b'roots(%ln::)', aboveroots)
 
-            finalroots = set(n for n in currentroots if repo[n].rev() <
-                             minnewroot)
+            finalroots = set(
+                n for n in currentroots if repo[n].rev() < minnewroot
+            )
             finalroots.update(ctx.node() for ctx in updatedroots)
         if finalroots != oldroots:
             self._updateroots(targetphase, finalroots, tr)
@@ -487,14 +512,15 @@
         Nothing is lost as unknown nodes only hold data for their descendants.
         """
         filtered = False
-        nodemap = repo.changelog.nodemap # to filter unknown nodes
+        nodemap = repo.changelog.nodemap  # to filter unknown nodes
         for phase, nodes in enumerate(self.phaseroots):
             missing = sorted(node for node in nodes if node not in nodemap)
             if missing:
                 for mnode in missing:
                     repo.ui.debug(
-                        'removing unknown node %s from %i-phase boundary\n'
-                        % (short(mnode), phase))
+                        b'removing unknown node %s from %i-phase boundary\n'
+                        % (short(mnode), phase)
+                    )
                 nodes.symmetric_difference_update(missing)
                 filtered = True
         if filtered:
@@ -509,6 +535,7 @@
         # (see branchmap one)
         self.invalidate()
 
+
 def advanceboundary(repo, tr, targetphase, nodes, dryrun=None):
     """Add nodes to a phase changing other nodes phases if necessary.
 
@@ -522,12 +549,14 @@
     Returns a set of revs whose phase is changed or should be changed
     """
     phcache = repo._phasecache.copy()
-    changes = phcache.advanceboundary(repo, tr, targetphase, nodes,
-                                      dryrun=dryrun)
+    changes = phcache.advanceboundary(
+        repo, tr, targetphase, nodes, dryrun=dryrun
+    )
     if not dryrun:
         repo._phasecache.replace(phcache)
     return changes
 
+
 def retractboundary(repo, tr, targetphase, nodes):
     """Set nodes back to a phase changing other nodes phases if
     necessary.
@@ -540,6 +569,7 @@
     phcache.retractboundary(repo, tr, targetphase, nodes)
     repo._phasecache.replace(phcache)
 
+
 def registernew(repo, tr, targetphase, nodes):
     """register a new revision and its phase
 
@@ -550,11 +580,12 @@
     phcache.registernew(repo, tr, targetphase, nodes)
     repo._phasecache.replace(phcache)
 
+
 def listphases(repo):
     """List phases root for serialization over pushkey"""
     # Use ordered dictionary so behavior is deterministic.
     keys = util.sortdict()
-    value = '%i' % draft
+    value = b'%i' % draft
     cl = repo.unfiltered().changelog
     for root in repo._phasecache.phaseroots[draft]:
         if repo._phasecache.phase(repo, cl.rev(root)) <= draft:
@@ -577,18 +608,19 @@
         #
         # The server can't handle it on it's own as it has no idea of
         # client phase data.
-        keys['publishing'] = 'True'
+        keys[b'publishing'] = b'True'
     return keys
 
+
 def pushphase(repo, nhex, oldphasestr, newphasestr):
     """List phases root for serialization over pushkey"""
     repo = repo.unfiltered()
     with repo.lock():
         currentphase = repo[nhex].phase()
-        newphase = abs(int(newphasestr)) # let's avoid negative index surprise
-        oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
+        newphase = abs(int(newphasestr))  # let's avoid negative index surprise
+        oldphase = abs(int(oldphasestr))  # let's avoid negative index surprise
         if currentphase == oldphase and newphase < oldphase:
-            with repo.transaction('pushkey-phase') as tr:
+            with repo.transaction(b'pushkey-phase') as tr:
                 advanceboundary(repo, tr, newphase, [bin(nhex)])
             return True
         elif currentphase == newphase:
@@ -597,6 +629,7 @@
         else:
             return False
 
+
 def subsetphaseheads(repo, subset):
     """Finds the phase heads for a subset of a history
 
@@ -609,10 +642,11 @@
     # No need to keep track of secret phase; any heads in the subset that
     # are not mentioned are implicitly secret.
     for phase in allphases[:secret]:
-        revset = "heads(%%ln & %s())" % phasenames[phase]
+        revset = b"heads(%%ln & %s())" % phasenames[phase]
         headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
     return headsbyphase
 
+
 def updatephases(repo, trgetter, headsbyphase):
     """Updates the repo with the given phase heads"""
     # Now advance phase boundaries of all but secret phase
@@ -621,11 +655,12 @@
     # to update. This avoid creating empty transaction during no-op operation.
 
     for phase in allphases[:-1]:
-        revset = '%ln - _phase(%s)'
+        revset = b'%ln - _phase(%s)'
         heads = [c.node() for c in repo.set(revset, headsbyphase[phase], phase)]
         if heads:
             advanceboundary(repo, trgetter(), phase, heads)
 
+
 def analyzeremotephases(repo, subset, roots):
     """Compute phases heads and root in a subset of node from root dict
 
@@ -637,26 +672,34 @@
     repo = repo.unfiltered()
     # build list from dictionary
     draftroots = []
-    nodemap = repo.changelog.nodemap # to filter unknown nodes
-    for nhex, phase in roots.iteritems():
-        if nhex == 'publishing': # ignore data related to publish option
+    nodemap = repo.changelog.nodemap  # to filter unknown nodes
+    for nhex, phase in pycompat.iteritems(roots):
+        if nhex == b'publishing':  # ignore data related to publish option
             continue
         node = bin(nhex)
         phase = int(phase)
         if phase == public:
             if node != nullid:
-                repo.ui.warn(_('ignoring inconsistent public root'
-                               ' from remote: %s\n') % nhex)
+                repo.ui.warn(
+                    _(
+                        b'ignoring inconsistent public root'
+                        b' from remote: %s\n'
+                    )
+                    % nhex
+                )
         elif phase == draft:
             if node in nodemap:
                 draftroots.append(node)
         else:
-            repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
-                         % (phase, nhex))
+            repo.ui.warn(
+                _(b'ignoring unexpected root from remote: %i %s\n')
+                % (phase, nhex)
+            )
     # compute heads
     publicheads = newheads(repo, subset, draftroots)
     return publicheads, draftroots
 
+
 class remotephasessummary(object):
     """summarize phase information on the remote side
 
@@ -670,14 +713,15 @@
         unfi = repo.unfiltered()
         self._allremoteroots = remoteroots
 
-        self.publishing = remoteroots.get('publishing', False)
+        self.publishing = remoteroots.get(b'publishing', False)
 
         ana = analyzeremotephases(repo, remotesubset, remoteroots)
         self.publicheads, self.draftroots = ana
         # Get the list of all "heads" revs draft on remote
-        dheads = unfi.set('heads(%ln::%ln)', self.draftroots, remotesubset)
+        dheads = unfi.set(b'heads(%ln::%ln)', self.draftroots, remotesubset)
         self.draftheads = [c.node() for c in dheads]
 
+
 def newheads(repo, heads, roots):
     """compute new head of a subset minus another
 
@@ -698,48 +742,50 @@
     new_heads = set(rev(n) for n in heads if n != nullid)
     roots = [rev(n) for n in roots]
     # compute the area we need to remove
-    affected_zone = repo.revs("(%ld::%ld)", roots, new_heads)
+    affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
     # heads in the area are no longer heads
     new_heads.difference_update(affected_zone)
     # revisions in the area have children outside of it,
     # They might be new heads
-    candidates = repo.revs("parents(%ld + (%ld and merge())) and not null",
-                           roots, affected_zone)
+    candidates = repo.revs(
+        b"parents(%ld + (%ld and merge())) and not null", roots, affected_zone
+    )
     candidates -= affected_zone
     if new_heads or candidates:
         # remove candidate that are ancestors of other heads
         new_heads.update(candidates)
-        prunestart = repo.revs("parents(%ld) and not null", new_heads)
+        prunestart = repo.revs(b"parents(%ld) and not null", new_heads)
         pruned = dagop.reachableroots(repo, candidates, prunestart)
         new_heads.difference_update(pruned)
 
     return pycompat.maplist(cl.node, sorted(new_heads))
 
+
 def newcommitphase(ui):
     """helper to get the target phase of new commit
 
     Handle all possible values for the phases.new-commit options.
 
     """
-    v = ui.config('phases', 'new-commit')
+    v = ui.config(b'phases', b'new-commit')
     try:
         return phasenames.index(v)
     except ValueError:
         try:
             return int(v)
         except ValueError:
-            msg = _("phases.new-commit: not a valid phase name ('%s')")
+            msg = _(b"phases.new-commit: not a valid phase name ('%s')")
             raise error.ConfigError(msg % v)
 
+
 def hassecret(repo):
     """utility function that check if a repo have any secret changeset."""
     return bool(repo._phasecache.phaseroots[2])
 
+
 def preparehookargs(node, old, new):
     if old is None:
-        old = ''
+        old = b''
     else:
         old = phasenames[old]
-    return {'node': node,
-            'oldphase': old,
-            'phase': phasenames[new]}
+    return {b'node': node, b'oldphase': old, b'phase': phasenames[new]}
--- a/mercurial/policy.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/policy.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,6 +10,8 @@
 import os
 import sys
 
+from .pycompat import getattr
+
 # Rules for how modules can be loaded. Values are:
 #
 #    c - require C extensions
@@ -39,6 +41,7 @@
 
 try:
     from . import __modulepolicy__
+
     policy = __modulepolicy__.modulepolicy
 except ImportError:
     pass
@@ -57,6 +60,7 @@
 else:
     policy = os.environ.get(r'HGMODULEPOLICY', policy)
 
+
 def _importfrom(pkgname, modname):
     # from .<pkgname> import <modname> (where . is looked through this module)
     fakelocals = {}
@@ -66,9 +70,10 @@
     except AttributeError:
         raise ImportError(r'cannot import name %s' % modname)
     # force import; fakelocals[modname] may be replaced with the real module
-    getattr(mod, r'__doc__', None)
+    getattr(mod, '__doc__', None)
     return fakelocals[modname]
 
+
 # keep in sync with "version" in C modules
 _cextversions = {
     (r'cext', r'base85'): 1,
@@ -86,13 +91,17 @@
     (r'cffi', r'parsers'): (r'pure', r'parsers'),
 }
 
+
 def _checkmod(pkgname, modname, mod):
     expected = _cextversions.get((pkgname, modname))
-    actual = getattr(mod, r'version', None)
+    actual = getattr(mod, 'version', None)
     if actual != expected:
-        raise ImportError(r'cannot import module %s.%s '
-                          r'(expected version: %d, actual: %r)'
-                          % (pkgname, modname, expected, actual))
+        raise ImportError(
+            r'cannot import module %s.%s '
+            r'(expected version: %d, actual: %r)'
+            % (pkgname, modname, expected, actual)
+        )
+
 
 def importmod(modname):
     """Import module according to policy and check API version"""
@@ -114,10 +123,12 @@
     pn, mn = _modredirects.get((purepkg, modname), (purepkg, modname))
     return _importfrom(pn, mn)
 
+
 def _isrustpermissive():
     """Assuming the policy is a Rust one, tell if it's permissive."""
     return policy.endswith(b'-allow')
 
+
 def importrust(modname, member=None, default=None):
     """Import Rust module according to policy and availability.
 
--- a/mercurial/posix.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/posix.py	Mon Oct 21 11:09:48 2019 -0400
@@ -21,6 +21,10 @@
 import unicodedata
 
 from .i18n import _
+from .pycompat import (
+    getattr,
+    open,
+)
 from . import (
     encoding,
     error,
@@ -40,8 +44,11 @@
     # poor souls, just say we tried and that it failed so we fall back
     # to copies.
     def oslink(src, dst):
-        raise OSError(errno.EINVAL,
-                      'hardlinks not supported: %s to %s' % (src, dst))
+        raise OSError(
+            errno.EINVAL, b'hardlinks not supported: %s to %s' % (src, dst)
+        )
+
+
 readlink = os.readlink
 unlink = os.unlink
 rename = os.rename
@@ -52,6 +59,7 @@
 os.umask(umask)
 
 if not pycompat.ispy3:
+
     def posixfile(name, mode=r'r', buffering=-1):
         fp = open(name, mode=mode, buffering=buffering)
         # The position when opening in append mode is implementation defined, so
@@ -59,11 +67,14 @@
         if r'a' in mode:
             fp.seek(0, os.SEEK_END)
         return fp
+
+
 else:
     # The underlying file object seeks as required in Python 3:
     # https://github.com/python/cpython/blob/v3.7.3/Modules/_io/fileio.c#L474
     posixfile = open
 
+
 def split(p):
     '''Same as posixpath.split, but faster
 
@@ -78,47 +89,54 @@
     ...           b'']:
     ...     assert split(f) == posixpath.split(f), f
     '''
-    ht = p.rsplit('/', 1)
+    ht = p.rsplit(b'/', 1)
     if len(ht) == 1:
-        return '', p
-    nh = ht[0].rstrip('/')
+        return b'', p
+    nh = ht[0].rstrip(b'/')
     if nh:
         return nh, ht[1]
-    return ht[0] + '/', ht[1]
+    return ht[0] + b'/', ht[1]
+
 
 def openhardlinks():
     '''return true if it is safe to hold open file handles to hardlinks'''
     return True
 
+
 def nlinks(name):
     '''return number of hardlinks for the given file'''
     return os.lstat(name).st_nlink
 
+
 def parsepatchoutput(output_line):
     """parses the output produced by patch and returns the filename"""
     pf = output_line[14:]
-    if pycompat.sysplatform == 'OpenVMS':
-        if pf[0] == '`':
-            pf = pf[1:-1] # Remove the quotes
+    if pycompat.sysplatform == b'OpenVMS':
+        if pf[0] == b'`':
+            pf = pf[1:-1]  # Remove the quotes
     else:
-        if pf.startswith("'") and pf.endswith("'") and " " in pf:
-            pf = pf[1:-1] # Remove the quotes
+        if pf.startswith(b"'") and pf.endswith(b"'") and b" " in pf:
+            pf = pf[1:-1]  # Remove the quotes
     return pf
 
+
 def sshargs(sshcmd, host, user, port):
     '''Build argument list for ssh'''
-    args = user and ("%s@%s" % (user, host)) or host
-    if '-' in args[:1]:
+    args = user and (b"%s@%s" % (user, host)) or host
+    if b'-' in args[:1]:
         raise error.Abort(
-            _('illegal ssh hostname or username starting with -: %s') % args)
+            _(b'illegal ssh hostname or username starting with -: %s') % args
+        )
     args = shellquote(args)
     if port:
-        args = '-p %s %s' % (shellquote(port), args)
+        args = b'-p %s %s' % (shellquote(port), args)
     return args
 
+
 def isexec(f):
     """check whether a file is executable"""
-    return (os.lstat(f).st_mode & 0o100 != 0)
+    return os.lstat(f).st_mode & 0o100 != 0
+
 
 def setflags(f, l, x):
     st = os.lstat(f)
@@ -126,7 +144,7 @@
     if l:
         if not stat.S_ISLNK(s):
             # switch file to link
-            fp = open(f, 'rb')
+            fp = open(f, b'rb')
             data = fp.read()
             fp.close()
             unlink(f)
@@ -134,7 +152,7 @@
                 os.symlink(data, f)
             except OSError:
                 # failed to make a link, rewrite file
-                fp = open(f, "wb")
+                fp = open(f, b"wb")
                 fp.write(data)
                 fp.close()
         # no chmod needed at this point
@@ -143,18 +161,18 @@
         # switch link to file
         data = os.readlink(f)
         unlink(f)
-        fp = open(f, "wb")
+        fp = open(f, b"wb")
         fp.write(data)
         fp.close()
-        s = 0o666 & ~umask # avoid restatting for chmod
+        s = 0o666 & ~umask  # avoid restatting for chmod
 
     sx = s & 0o100
     if st.st_nlink > 1 and bool(x) != bool(sx):
         # the file is a hardlink, break it
-        with open(f, "rb") as fp:
+        with open(f, b"rb") as fp:
             data = fp.read()
         unlink(f)
-        with open(f, "wb") as fp:
+        with open(f, b"wb") as fp:
             fp.write(data)
 
     if x and not sx:
@@ -165,6 +183,7 @@
         # Turn off all +x bits
         os.chmod(f, s & 0o666)
 
+
 def copymode(src, dst, mode=None, enforcewritable=False):
     '''Copy the file mode from the file at path src to dst.
     If src doesn't exist, we're using mode instead. If mode is None, we're
@@ -186,6 +205,7 @@
 
     os.chmod(dst, new_mode)
 
+
 def checkexec(path):
     """
     Check whether the given path is on a filesystem with UNIX-like exec flags
@@ -199,9 +219,9 @@
 
     try:
         EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
-        basedir = os.path.join(path, '.hg')
-        cachedir = os.path.join(basedir, 'wcache')
-        storedir = os.path.join(basedir, 'store')
+        basedir = os.path.join(path, b'.hg')
+        cachedir = os.path.join(basedir, b'wcache')
+        storedir = os.path.join(basedir, b'store')
         if not os.path.exists(cachedir):
             try:
                 # we want to create the 'cache' directory, not the '.hg' one.
@@ -216,8 +236,8 @@
                 # we other fallback logic triggers
                 pass
         if os.path.isdir(cachedir):
-            checkisexec = os.path.join(cachedir, 'checkisexec')
-            checknoexec = os.path.join(cachedir, 'checknoexec')
+            checkisexec = os.path.join(cachedir, b'checkisexec')
+            checknoexec = os.path.join(cachedir, b'checknoexec')
 
             try:
                 m = os.stat(checkisexec).st_mode
@@ -234,7 +254,7 @@
                     except OSError as e:
                         if e.errno != errno.ENOENT:
                             raise
-                        open(checknoexec, 'w').close() # might fail
+                        open(checknoexec, b'w').close()  # might fail
                         m = os.stat(checknoexec).st_mode
                     if m & EXECFLAGS == 0:
                         # check-exec is exec and check-no-exec is not exec
@@ -250,7 +270,7 @@
             # check directly in path and don't leave checkisexec behind
             checkdir = path
             checkisexec = None
-        fh, fn = pycompat.mkstemp(dir=checkdir, prefix='hg-checkexec-')
+        fh, fn = pycompat.mkstemp(dir=checkdir, prefix=b'hg-checkexec-')
         try:
             os.close(fh)
             m = os.stat(fn).st_mode
@@ -268,13 +288,14 @@
         # we don't care, the user probably won't be able to commit anyway
         return False
 
+
 def checklink(path):
     """check whether the given path is on a symlink-capable filesystem"""
     # mktemp is not racy because symlink creation will fail if the
     # file already exists
     while True:
-        cachedir = os.path.join(path, '.hg', 'wcache')
-        checklink = os.path.join(cachedir, 'checklink')
+        cachedir = os.path.join(path, b'.hg', b'wcache')
+        checklink = os.path.join(cachedir, b'checklink')
         # try fast path, read only
         if os.path.islink(checklink):
             return True
@@ -283,22 +304,24 @@
         else:
             checkdir = path
             cachedir = None
-        name = tempfile.mktemp(dir=pycompat.fsdecode(checkdir),
-                               prefix=r'checklink-')
+        name = tempfile.mktemp(
+            dir=pycompat.fsdecode(checkdir), prefix=r'checklink-'
+        )
         name = pycompat.fsencode(name)
         try:
             fd = None
             if cachedir is None:
-                fd = pycompat.namedtempfile(dir=checkdir,
-                                            prefix='hg-checklink-')
+                fd = pycompat.namedtempfile(
+                    dir=checkdir, prefix=b'hg-checklink-'
+                )
                 target = os.path.basename(fd.name)
             else:
                 # create a fixed file to link to; doesn't matter if it
                 # already exists.
-                target = 'checklink-target'
+                target = b'checklink-target'
                 try:
                     fullpath = os.path.join(cachedir, target)
-                    open(fullpath, 'w').close()
+                    open(fullpath, b'w').close()
                 except IOError as inst:
                     if inst[0] == errno.EACCES:
                         # If we can't write to cachedir, just pretend
@@ -334,10 +357,12 @@
                 unlink(name)
             return False
 
+
 def checkosfilename(path):
     '''Check that the base-relative path is a valid filename on this platform.
     Returns None if the path is ok, or a UI string describing the problem.'''
-    return None # on posix platforms, every path is ok
+    return None  # on posix platforms, every path is ok
+
 
 def getfsmountpoint(dirpath):
     '''Get the filesystem mount point from a directory (best-effort)
@@ -346,6 +371,7 @@
     '''
     return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath)
 
+
 def getfstype(dirpath):
     '''Get the filesystem type name from a directory (best-effort)
 
@@ -353,20 +379,25 @@
     '''
     return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
 
+
 def setbinary(fd):
     pass
 
+
 def pconvert(path):
     return path
 
+
 def localpath(path):
     return path
 
+
 def samefile(fpath1, fpath2):
     """Returns whether path1 and path2 refer to the same file. This is only
     guaranteed to work for files, not directories."""
     return os.path.samefile(fpath1, fpath2)
 
+
 def samedevice(fpath1, fpath2):
     """Returns whether fpath1 and fpath2 are on the same device. This is only
     guaranteed to work for files, not directories."""
@@ -374,10 +405,12 @@
     st2 = os.lstat(fpath2)
     return st1.st_dev == st2.st_dev
 
+
 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
 def normcase(path):
     return path.lower()
 
+
 # what normcase does to ASCII strings
 normcasespec = encoding.normcasespecs.lower
 # fallback normcase function for non-ASCII strings
@@ -415,7 +448,7 @@
             u = path.decode('utf-8')
         except UnicodeDecodeError:
             # OS X percent-encodes any bytes that aren't valid utf-8
-            s = ''
+            s = b''
             pos = 0
             l = len(path)
             while pos < l:
@@ -423,7 +456,7 @@
                     c = encoding.getutf8char(path, pos)
                     pos += len(c)
                 except ValueError:
-                    c = '%%%02X' % ord(path[pos:pos + 1])
+                    c = b'%%%02X' % ord(path[pos : pos + 1])
                     pos += 1
                 s += c
 
@@ -434,17 +467,16 @@
         # drop HFS+ ignored characters
         return encoding.hfsignoreclean(enc)
 
-if pycompat.sysplatform == 'cygwin':
+
+if pycompat.sysplatform == b'cygwin':
     # workaround for cygwin, in which mount point part of path is
     # treated as case sensitive, even though underlying NTFS is case
     # insensitive.
 
     # default mount points
-    cygwinmountpoints = sorted([
-            "/usr/bin",
-            "/usr/lib",
-            "/cygdrive",
-            ], reverse=True)
+    cygwinmountpoints = sorted(
+        [b"/usr/bin", b"/usr/lib", b"/cygdrive",], reverse=True
+    )
 
     # use upper-ing as normcase as same as NTFS workaround
     def normcase(path):
@@ -459,7 +491,7 @@
                 continue
 
             mplen = len(mp)
-            if mplen == pathlen: # mount point itself
+            if mplen == pathlen:  # mount point itself
                 return mp
             if path[mplen] == pycompat.ossep:
                 return mp + encoding.upper(path[mplen:])
@@ -482,10 +514,13 @@
     def checklink(path):
         return False
 
+
 _needsshellquote = None
+
+
 def shellquote(s):
-    if pycompat.sysplatform == 'OpenVMS':
-        return '"%s"' % s
+    if pycompat.sysplatform == b'OpenVMS':
+        return b'"%s"' % s
     global _needsshellquote
     if _needsshellquote is None:
         _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search
@@ -493,18 +528,21 @@
         # "s" shouldn't have to be quoted
         return s
     else:
-        return "'%s'" % s.replace("'", "'\\''")
+        return b"'%s'" % s.replace(b"'", b"'\\''")
+
 
 def shellsplit(s):
     """Parse a command string in POSIX shell way (best-effort)"""
     return pycompat.shlexsplit(s, posix=True)
 
+
 def quotecommand(cmd):
     return cmd
 
+
 def testpid(pid):
     '''return False if pid dead, True if running or not sure'''
-    if pycompat.sysplatform == 'OpenVMS':
+    if pycompat.sysplatform == b'OpenVMS':
         return True
     try:
         os.kill(pid, 0)
@@ -512,20 +550,22 @@
     except OSError as inst:
         return inst.errno != errno.ESRCH
 
+
 def isowner(st):
     """Return True if the stat object st is from the current user."""
     return st.st_uid == os.getuid()
 
+
 def findexe(command):
     '''Find executable for command searching like which does.
     If command is a basename then PATH is searched for command.
     PATH isn't searched if command is an absolute or relative path.
     If command isn't found None is returned.'''
-    if pycompat.sysplatform == 'OpenVMS':
+    if pycompat.sysplatform == b'OpenVMS':
         return command
 
     def findexisting(executable):
-        'Will return executable if existing file'
+        b'Will return executable if existing file'
         if os.path.isfile(executable) and os.access(executable, os.X_OK):
             return executable
         return None
@@ -533,20 +573,23 @@
     if pycompat.ossep in command:
         return findexisting(command)
 
-    if pycompat.sysplatform == 'plan9':
-        return findexisting(os.path.join('/bin', command))
+    if pycompat.sysplatform == b'plan9':
+        return findexisting(os.path.join(b'/bin', command))
 
-    for path in encoding.environ.get('PATH', '').split(pycompat.ospathsep):
+    for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep):
         executable = findexisting(os.path.join(path, command))
         if executable is not None:
             return executable
     return None
 
+
 def setsignalhandler():
     pass
 
+
 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
 
+
 def statfiles(files):
     '''Stat each file in files. Yield each stat, or None if a file does not
     exist or has a type we don't care about.'''
@@ -563,10 +606,12 @@
             st = None
         yield st
 
+
 def getuser():
     '''return name of current user'''
     return pycompat.fsencode(getpass.getuser())
 
+
 def username(uid=None):
     """Return the name of the user with the given uid.
 
@@ -579,6 +624,7 @@
     except KeyError:
         return b'%d' % uid
 
+
 def groupname(gid=None):
     """Return the name of the group with the given gid.
 
@@ -591,6 +637,7 @@
     except KeyError:
         return pycompat.bytestr(gid)
 
+
 def groupmembers(name):
     """Return the list of members of the group with the given
     name, KeyError if the group does not exist.
@@ -598,19 +645,23 @@
     name = pycompat.fsdecode(name)
     return pycompat.rapply(pycompat.fsencode, list(grp.getgrnam(name).gr_mem))
 
+
 def spawndetached(args):
-    return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
-                      args[0], args)
+    return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), args[0], args)
+
 
 def gethgcmd():
     return sys.argv[:1]
 
+
 def makedir(path, notindexed):
     os.mkdir(path)
 
+
 def lookupreg(key, name=None, scope=None):
     return None
 
+
 def hidewindow():
     """Hide current shell window.
 
@@ -619,6 +670,7 @@
     """
     pass
 
+
 class cachestat(object):
     def __init__(self, path):
         self.stat = os.stat(path)
@@ -635,29 +687,34 @@
             # rest. However, one of the other fields changing indicates
             # something fishy going on, so return False if anything but atime
             # changes.
-            return (self.stat.st_mode == other.stat.st_mode and
-                    self.stat.st_ino == other.stat.st_ino and
-                    self.stat.st_dev == other.stat.st_dev and
-                    self.stat.st_nlink == other.stat.st_nlink and
-                    self.stat.st_uid == other.stat.st_uid and
-                    self.stat.st_gid == other.stat.st_gid and
-                    self.stat.st_size == other.stat.st_size and
-                    self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME] and
-                    self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME])
+            return (
+                self.stat.st_mode == other.stat.st_mode
+                and self.stat.st_ino == other.stat.st_ino
+                and self.stat.st_dev == other.stat.st_dev
+                and self.stat.st_nlink == other.stat.st_nlink
+                and self.stat.st_uid == other.stat.st_uid
+                and self.stat.st_gid == other.stat.st_gid
+                and self.stat.st_size == other.stat.st_size
+                and self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME]
+                and self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME]
+            )
         except AttributeError:
             return False
 
     def __ne__(self, other):
         return not self == other
 
+
 def statislink(st):
     '''check whether a stat result is a symlink'''
     return st and stat.S_ISLNK(st.st_mode)
 
+
 def statisexec(st):
     '''check whether a stat result is an executable file'''
     return st and (st.st_mode & 0o100 != 0)
 
+
 def poll(fds):
     """block until something happens on any file descriptor
 
@@ -674,10 +731,11 @@
                 if inst.args[0] == errno.EINTR:
                     continue
                 raise
-    except ValueError: # out of range file descriptor
+    except ValueError:  # out of range file descriptor
         raise NotImplementedError()
     return sorted(list(set(sum(res, []))))
 
+
 def readpipe(pipe):
     """Read all available data from a pipe."""
     # We can't fstat() a pipe because Linux will always report 0.
@@ -698,10 +756,11 @@
             except IOError:
                 break
 
-        return ''.join(chunks)
+        return b''.join(chunks)
     finally:
         fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
 
+
 def bindunixsocket(sock, path):
     """Bind the UNIX domain socket to the specified path"""
     # use relative path instead of full path at bind() if possible, since
@@ -710,7 +769,7 @@
     dirname, basename = os.path.split(path)
     bakwdfd = None
     if dirname:
-        bakwdfd = os.open('.', os.O_DIRECTORY)
+        bakwdfd = os.open(b'.', os.O_DIRECTORY)
         os.chdir(dirname)
     sock.bind(basename)
     if bakwdfd:
--- a/mercurial/profiling.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/profiling.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,6 +10,10 @@
 import contextlib
 
 from .i18n import _
+from .pycompat import (
+    getattr,
+    open,
+)
 from . import (
     encoding,
     error,
@@ -18,6 +22,7 @@
     util,
 )
 
+
 def _loadprofiler(ui, profiler):
     """load profiler extension. return profile method, or None on failure"""
     extname = profiler
@@ -29,24 +34,27 @@
     else:
         return getattr(mod, 'profile', None)
 
+
 @contextlib.contextmanager
 def lsprofile(ui, fp):
-    format = ui.config('profiling', 'format')
-    field = ui.config('profiling', 'sort')
-    limit = ui.configint('profiling', 'limit')
-    climit = ui.configint('profiling', 'nested')
+    format = ui.config(b'profiling', b'format')
+    field = ui.config(b'profiling', b'sort')
+    limit = ui.configint(b'profiling', b'limit')
+    climit = ui.configint(b'profiling', b'nested')
 
-    if format not in ['text', 'kcachegrind']:
-        ui.warn(_("unrecognized profiling format '%s'"
-                    " - Ignored\n") % format)
-        format = 'text'
+    if format not in [b'text', b'kcachegrind']:
+        ui.warn(_(b"unrecognized profiling format '%s' - Ignored\n") % format)
+        format = b'text'
 
     try:
         from . import lsprof
     except ImportError:
-        raise error.Abort(_(
-            'lsprof not available - install from '
-            'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
+        raise error.Abort(
+            _(
+                b'lsprof not available - install from '
+                b'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'
+            )
+        )
     p = lsprof.Profiler()
     p.enable(subcalls=True)
     try:
@@ -54,8 +62,9 @@
     finally:
         p.disable()
 
-        if format == 'kcachegrind':
+        if format == b'kcachegrind':
             from . import lsprofcalltree
+
             calltree = lsprofcalltree.KCacheGrind(p)
             calltree.output(fp)
         else:
@@ -64,20 +73,25 @@
             stats.sort(pycompat.sysstr(field))
             stats.pprint(limit=limit, file=fp, climit=climit)
 
+
 @contextlib.contextmanager
 def flameprofile(ui, fp):
     try:
         from flamegraph import flamegraph
     except ImportError:
-        raise error.Abort(_(
-            'flamegraph not available - install from '
-            'https://github.com/evanhempel/python-flamegraph'))
+        raise error.Abort(
+            _(
+                b'flamegraph not available - install from '
+                b'https://github.com/evanhempel/python-flamegraph'
+            )
+        )
     # developer config: profiling.freq
-    freq = ui.configint('profiling', 'freq')
+    freq = ui.configint(b'profiling', b'freq')
     filter_ = None
     collapse_recursion = True
-    thread = flamegraph.ProfileThread(fp, 1.0 / freq,
-                                      filter_, collapse_recursion)
+    thread = flamegraph.ProfileThread(
+        fp, 1.0 / freq, filter_, collapse_recursion
+    )
     start_time = util.timer()
     try:
         thread.start()
@@ -85,45 +99,52 @@
     finally:
         thread.stop()
         thread.join()
-        print('Collected %d stack frames (%d unique) in %2.2f seconds.' % (
-            util.timer() - start_time, thread.num_frames(),
-            thread.num_frames(unique=True)))
+        print(
+            b'Collected %d stack frames (%d unique) in %2.2f seconds.'
+            % (
+                util.timer() - start_time,
+                thread.num_frames(),
+                thread.num_frames(unique=True),
+            )
+        )
+
 
 @contextlib.contextmanager
 def statprofile(ui, fp):
     from . import statprof
 
-    freq = ui.configint('profiling', 'freq')
+    freq = ui.configint(b'profiling', b'freq')
     if freq > 0:
         # Cannot reset when profiler is already active. So silently no-op.
         if statprof.state.profile_level == 0:
             statprof.reset(freq)
     else:
-        ui.warn(_("invalid sampling frequency '%s' - ignoring\n") % freq)
+        ui.warn(_(b"invalid sampling frequency '%s' - ignoring\n") % freq)
 
-    track = ui.config('profiling', 'time-track',
-                      pycompat.iswindows and 'cpu' or 'real')
-    statprof.start(mechanism='thread', track=track)
+    track = ui.config(
+        b'profiling', b'time-track', pycompat.iswindows and b'cpu' or b'real'
+    )
+    statprof.start(mechanism=b'thread', track=track)
 
     try:
         yield
     finally:
         data = statprof.stop()
 
-        profformat = ui.config('profiling', 'statformat')
+        profformat = ui.config(b'profiling', b'statformat')
 
         formats = {
-            'byline': statprof.DisplayFormats.ByLine,
-            'bymethod': statprof.DisplayFormats.ByMethod,
-            'hotpath': statprof.DisplayFormats.Hotpath,
-            'json': statprof.DisplayFormats.Json,
-            'chrome': statprof.DisplayFormats.Chrome,
+            b'byline': statprof.DisplayFormats.ByLine,
+            b'bymethod': statprof.DisplayFormats.ByMethod,
+            b'hotpath': statprof.DisplayFormats.Hotpath,
+            b'json': statprof.DisplayFormats.Json,
+            b'chrome': statprof.DisplayFormats.Chrome,
         }
 
         if profformat in formats:
             displayformat = formats[profformat]
         else:
-            ui.warn(_('unknown profiler output format: %s\n') % profformat)
+            ui.warn(_(b'unknown profiler output format: %s\n') % profformat)
             displayformat = statprof.DisplayFormats.Hotpath
 
         kwargs = {}
@@ -131,7 +152,7 @@
         def fraction(s):
             if isinstance(s, (float, int)):
                 return float(s)
-            if s.endswith('%'):
+            if s.endswith(b'%'):
                 v = float(s[:-1]) / 100
             else:
                 v = float(s)
@@ -139,25 +160,27 @@
                 return v
             raise ValueError(s)
 
-        if profformat == 'chrome':
-            showmin = ui.configwith(fraction, 'profiling', 'showmin', 0.005)
-            showmax = ui.configwith(fraction, 'profiling', 'showmax')
+        if profformat == b'chrome':
+            showmin = ui.configwith(fraction, b'profiling', b'showmin', 0.005)
+            showmax = ui.configwith(fraction, b'profiling', b'showmax')
             kwargs.update(minthreshold=showmin, maxthreshold=showmax)
-        elif profformat == 'hotpath':
+        elif profformat == b'hotpath':
             # inconsistent config: profiling.showmin
-            limit = ui.configwith(fraction, 'profiling', 'showmin', 0.05)
+            limit = ui.configwith(fraction, b'profiling', b'showmin', 0.05)
             kwargs[r'limit'] = limit
-            showtime = ui.configbool('profiling', 'showtime')
+            showtime = ui.configbool(b'profiling', b'showtime')
             kwargs[r'showtime'] = showtime
 
         statprof.display(fp, data=data, format=displayformat, **kwargs)
 
+
 class profile(object):
     """Start profiling.
 
     Profiling is active when the context manager is active. When the context
     manager exits, profiling results will be written to the configured output.
     """
+
     def __init__(self, ui, enabled=True):
         self._ui = ui
         self._output = None
@@ -185,35 +208,39 @@
         if self._started:
             return
         self._started = True
-        profiler = encoding.environ.get('HGPROF')
+        profiler = encoding.environ.get(b'HGPROF')
         proffn = None
         if profiler is None:
-            profiler = self._ui.config('profiling', 'type')
-        if profiler not in ('ls', 'stat', 'flame'):
+            profiler = self._ui.config(b'profiling', b'type')
+        if profiler not in (b'ls', b'stat', b'flame'):
             # try load profiler from extension with the same name
             proffn = _loadprofiler(self._ui, profiler)
             if proffn is None:
-                self._ui.warn(_("unrecognized profiler '%s' - ignored\n")
-                              % profiler)
-                profiler = 'stat'
+                self._ui.warn(
+                    _(b"unrecognized profiler '%s' - ignored\n") % profiler
+                )
+                profiler = b'stat'
 
-        self._output = self._ui.config('profiling', 'output')
+        self._output = self._ui.config(b'profiling', b'output')
 
         try:
-            if self._output == 'blackbox':
+            if self._output == b'blackbox':
                 self._fp = util.stringio()
             elif self._output:
                 path = self._ui.expandpath(self._output)
-                self._fp = open(path, 'wb')
+                self._fp = open(path, b'wb')
             elif pycompat.iswindows:
                 # parse escape sequence by win32print()
                 class uifp(object):
                     def __init__(self, ui):
                         self._ui = ui
+
                     def write(self, data):
                         self._ui.write_err(data)
+
                     def flush(self):
                         self._ui.flush()
+
                 self._fpdoclose = False
                 self._fp = uifp(self._ui)
             else:
@@ -222,30 +249,31 @@
 
             if proffn is not None:
                 pass
-            elif profiler == 'ls':
+            elif profiler == b'ls':
                 proffn = lsprofile
-            elif profiler == 'flame':
+            elif profiler == b'flame':
                 proffn = flameprofile
             else:
                 proffn = statprofile
 
             self._profiler = proffn(self._ui, self._fp)
             self._profiler.__enter__()
-        except: # re-raises
+        except:  # re-raises
             self._closefp()
             raise
 
     def __exit__(self, exception_type, exception_value, traceback):
         propagate = None
         if self._profiler is not None:
-            propagate = self._profiler.__exit__(exception_type, exception_value,
-                                                traceback)
-            if self._output == 'blackbox':
-                val = 'Profile:\n%s' % self._fp.getvalue()
+            propagate = self._profiler.__exit__(
+                exception_type, exception_value, traceback
+            )
+            if self._output == b'blackbox':
+                val = b'Profile:\n%s' % self._fp.getvalue()
                 # ui.log treats the input as a format string,
                 # so we need to escape any % signs.
-                val = val.replace('%', '%%')
-                self._ui.log('profile', val)
+                val = val.replace(b'%', b'%%')
+                self._ui.log(b'profile', val)
         self._closefp()
         return propagate
 
--- a/mercurial/progress.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/progress.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,12 +14,16 @@
 from .i18n import _
 from . import encoding
 
+
 def spacejoin(*args):
-    return ' '.join(s for s in args if s)
+    return b' '.join(s for s in args if s)
+
 
 def shouldprint(ui):
-    return not (ui.quiet or ui.plain('progress')) and (
-        ui._isatty(ui.ferr) or ui.configbool('progress', 'assume-tty'))
+    return not (ui.quiet or ui.plain(b'progress')) and (
+        ui._isatty(ui.ferr) or ui.configbool(b'progress', b'assume-tty')
+    )
+
 
 def fmtremaining(seconds):
     """format a number of remaining seconds in human readable way
@@ -27,39 +31,40 @@
     This will properly display seconds, minutes, hours, days if needed"""
     if seconds < 60:
         # i18n: format XX seconds as "XXs"
-        return _("%02ds") % (seconds)
+        return _(b"%02ds") % seconds
     minutes = seconds // 60
     if minutes < 60:
         seconds -= minutes * 60
         # i18n: format X minutes and YY seconds as "XmYYs"
-        return _("%dm%02ds") % (minutes, seconds)
+        return _(b"%dm%02ds") % (minutes, seconds)
     # we're going to ignore seconds in this case
     minutes += 1
     hours = minutes // 60
     minutes -= hours * 60
     if hours < 30:
         # i18n: format X hours and YY minutes as "XhYYm"
-        return _("%dh%02dm") % (hours, minutes)
+        return _(b"%dh%02dm") % (hours, minutes)
     # we're going to ignore minutes in this case
     hours += 1
     days = hours // 24
     hours -= days * 24
     if days < 15:
         # i18n: format X days and YY hours as "XdYYh"
-        return _("%dd%02dh") % (days, hours)
+        return _(b"%dd%02dh") % (days, hours)
     # we're going to ignore hours in this case
     days += 1
     weeks = days // 7
     days -= weeks * 7
     if weeks < 55:
         # i18n: format X weeks and YY days as "XwYYd"
-        return _("%dw%02dd") % (weeks, days)
+        return _(b"%dw%02dd") % (weeks, days)
     # we're going to ignore days and treat a year as 52 weeks
     weeks += 1
     years = weeks // 52
     weeks -= years * 52
     # i18n: format X years and YY weeks as "XyYYw"
-    return _("%dy%02dw") % (years, weeks)
+    return _(b"%dy%02dw") % (years, weeks)
+
 
 # file_write() and file_flush() of Python 2 do not restart on EINTR if
 # the file is attached to a "slow" device (e.g. a terminal) and raise
@@ -79,6 +84,7 @@
                 continue
             raise
 
+
 class progbar(object):
     def __init__(self, ui):
         self.ui = ui
@@ -91,59 +97,60 @@
         self.starttimes = {}
         self.startvals = {}
         self.printed = False
-        self.lastprint = time.time() + float(self.ui.config(
-            'progress', 'delay'))
+        self.lastprint = time.time() + float(
+            self.ui.config(b'progress', b'delay')
+        )
         self.curtopic = None
         self.lasttopic = None
         self.indetcount = 0
-        self.refresh = float(self.ui.config(
-            'progress', 'refresh'))
-        self.changedelay = max(3 * self.refresh,
-                               float(self.ui.config(
-                                   'progress', 'changedelay')))
-        self.order = self.ui.configlist('progress', 'format')
+        self.refresh = float(self.ui.config(b'progress', b'refresh'))
+        self.changedelay = max(
+            3 * self.refresh, float(self.ui.config(b'progress', b'changedelay'))
+        )
+        self.order = self.ui.configlist(b'progress', b'format')
         self.estimateinterval = self.ui.configwith(
-            float, 'progress', 'estimateinterval')
+            float, b'progress', b'estimateinterval'
+        )
 
     def show(self, now, topic, pos, item, unit, total):
         if not shouldprint(self.ui):
             return
         termwidth = self.width()
         self.printed = True
-        head = ''
+        head = b''
         needprogress = False
-        tail = ''
+        tail = b''
         for indicator in self.order:
-            add = ''
-            if indicator == 'topic':
+            add = b''
+            if indicator == b'topic':
                 add = topic
-            elif indicator == 'number':
+            elif indicator == b'number':
                 if total:
                     add = b'%*d/%d' % (len(str(total)), pos, total)
                 else:
                     add = b'%d' % pos
-            elif indicator.startswith('item') and item:
-                slice = 'end'
-                if '-' in indicator:
-                    wid = int(indicator.split('-')[1])
-                elif '+' in indicator:
-                    slice = 'beginning'
-                    wid = int(indicator.split('+')[1])
+            elif indicator.startswith(b'item') and item:
+                slice = b'end'
+                if b'-' in indicator:
+                    wid = int(indicator.split(b'-')[1])
+                elif b'+' in indicator:
+                    slice = b'beginning'
+                    wid = int(indicator.split(b'+')[1])
                 else:
                     wid = 20
-                if slice == 'end':
+                if slice == b'end':
                     add = encoding.trim(item, wid, leftside=True)
                 else:
                     add = encoding.trim(item, wid)
-                add += (wid - encoding.colwidth(add)) * ' '
-            elif indicator == 'bar':
-                add = ''
+                add += (wid - encoding.colwidth(add)) * b' '
+            elif indicator == b'bar':
+                add = b''
                 needprogress = True
-            elif indicator == 'unit' and unit:
+            elif indicator == b'unit' and unit:
                 add = unit
-            elif indicator == 'estimate':
+            elif indicator == b'estimate':
                 add = self.estimate(topic, pos, total, now)
-            elif indicator == 'speed':
+            elif indicator == b'speed':
                 add = self.speed(topic, pos, unit, now)
             if not needprogress:
                 head = spacejoin(head, add)
@@ -158,10 +165,10 @@
             progwidth = termwidth - used - 3
             if total and pos <= total:
                 amt = pos * progwidth // total
-                bar = '=' * (amt - 1)
+                bar = b'=' * (amt - 1)
                 if amt > 0:
-                    bar += '>'
-                bar += ' ' * (progwidth - amt)
+                    bar += b'>'
+                bar += b' ' * (progwidth - amt)
             else:
                 progwidth -= 3
                 self.indetcount += 1
@@ -169,20 +176,23 @@
                 # cursor bounce between the right and left sides
                 amt = self.indetcount % (2 * progwidth)
                 amt -= progwidth
-                bar = (' ' * int(progwidth - abs(amt)) + '<=>' +
-                       ' ' * int(abs(amt)))
-            prog = ''.join(('[', bar, ']'))
+                bar = (
+                    b' ' * int(progwidth - abs(amt))
+                    + b'<=>'
+                    + b' ' * int(abs(amt))
+                )
+            prog = b''.join((b'[', bar, b']'))
             out = spacejoin(head, prog, tail)
         else:
             out = spacejoin(head, tail)
-        self._writeerr('\r' + encoding.trim(out, termwidth))
+        self._writeerr(b'\r' + encoding.trim(out, termwidth))
         self.lasttopic = topic
         self._flusherr()
 
     def clear(self):
         if not self.printed or not self.lastprint or not shouldprint(self.ui):
             return
-        self._writeerr('\r%s\r' % (' ' * self.width()))
+        self._writeerr(b'\r%s\r' % (b' ' * self.width()))
         if self.printed:
             # force immediate re-paint of progress bar
             self.lastprint = 0
@@ -190,10 +200,10 @@
     def complete(self):
         if not shouldprint(self.ui):
             return
-        if self.ui.configbool('progress', 'clear-complete'):
+        if self.ui.configbool(b'progress', b'clear-complete'):
             self.clear()
         else:
-            self._writeerr('\n')
+            self._writeerr(b'\n')
         self._flusherr()
 
     def _flusherr(self):
@@ -204,11 +214,11 @@
 
     def width(self):
         tw = self.ui.termwidth()
-        return min(int(self.ui.config('progress', 'width', default=tw)), tw)
+        return min(int(self.ui.config(b'progress', b'width', default=tw)), tw)
 
     def estimate(self, topic, pos, total, now):
         if total is None:
-            return ''
+            return b''
         initialpos = self.startvals[topic]
         target = total - initialpos
         delta = pos - initialpos
@@ -216,23 +226,25 @@
             elapsed = now - self.starttimes[topic]
             seconds = (elapsed * (target - delta)) // delta + 1
             return fmtremaining(seconds)
-        return ''
+        return b''
 
     def speed(self, topic, pos, unit, now):
         initialpos = self.startvals[topic]
         delta = pos - initialpos
         elapsed = now - self.starttimes[topic]
         if elapsed > 0:
-            return _('%d %s/sec') % (delta / elapsed, unit)
-        return ''
+            return _(b'%d %s/sec') % (delta / elapsed, unit)
+        return b''
 
     def _oktoprint(self, now):
         '''Check if conditions are met to print - e.g. changedelay elapsed'''
-        if (self.lasttopic is None # first time we printed
+        if (
+            self.lasttopic is None  # first time we printed
             # not a topic change
             or self.curtopic == self.lasttopic
             # it's been long enough we should print anyway
-            or now - self.lastprint >= self.changedelay):
+            or now - self.lastprint >= self.changedelay
+        ):
             return True
         else:
             return False
@@ -263,7 +275,7 @@
             self.startvals[topic] = pos - newdelta
             self.starttimes[topic] = now - interval
 
-    def progress(self, topic, pos, item='', unit='', total=None):
+    def progress(self, topic, pos, item=b'', unit=b'', total=None):
         if pos is None:
             self.closetopic(topic)
             return
@@ -293,7 +305,7 @@
             # truncate the list of topics assuming all topics within
             # this one are also closed
             if topic in self.topics:
-                self.topics = self.topics[:self.topics.index(topic)]
+                self.topics = self.topics[: self.topics.index(topic)]
                 # reset the last topic to the one we just unwound to,
                 # so that higher-level topics will be stickier than
                 # lower-level topics
--- a/mercurial/pure/base85.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/pure/base85.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,28 +11,34 @@
 
 from .. import pycompat
 
-_b85chars = pycompat.bytestr("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef"
-                             "ghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~")
+_b85chars = pycompat.bytestr(
+    b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef"
+    b"ghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~"
+)
 _b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
 _b85dec = {}
 
+
 def _mkb85dec():
     for i, c in enumerate(_b85chars):
         _b85dec[c] = i
 
+
 def b85encode(text, pad=False):
     """encode text in base85 format"""
     l = len(text)
     r = l % 4
     if r:
-        text += '\0' * (4 - r)
+        text += b'\0' * (4 - r)
     longs = len(text) >> 2
-    words = struct.unpack('>%dL' % (longs), text)
+    words = struct.unpack(b'>%dL' % longs, text)
 
-    out = ''.join(_b85chars[(word // 52200625) % 85] +
-                  _b85chars2[(word // 7225) % 7225] +
-                  _b85chars2[word % 7225]
-                  for word in words)
+    out = b''.join(
+        _b85chars[(word // 52200625) % 85]
+        + _b85chars2[(word // 7225) % 7225]
+        + _b85chars2[word % 7225]
+        for word in words
+    )
 
     if pad:
         return out
@@ -44,6 +50,7 @@
     olen += l // 4 * 5
     return out[:olen]
 
+
 def b85decode(text):
     """decode base85-encoded text"""
     if not _b85dec:
@@ -52,17 +59,18 @@
     l = len(text)
     out = []
     for i in range(0, len(text), 5):
-        chunk = text[i:i + 5]
+        chunk = text[i : i + 5]
         chunk = pycompat.bytestr(chunk)
         acc = 0
         for j, c in enumerate(chunk):
             try:
                 acc = acc * 85 + _b85dec[c]
             except KeyError:
-                raise ValueError('bad base85 character at position %d'
-                                 % (i + j))
+                raise ValueError(
+                    b'bad base85 character at position %d' % (i + j)
+                )
         if acc > 4294967295:
-            raise ValueError('Base85 overflow in hunk starting at byte %d' % i)
+            raise ValueError(b'Base85 overflow in hunk starting at byte %d' % i)
         out.append(acc)
 
     # Pad final chunk if necessary
@@ -70,11 +78,11 @@
     if cl:
         acc *= 85 ** (5 - cl)
         if cl > 1:
-            acc += 0xffffff >> (cl - 2) * 8
+            acc += 0xFFFFFF >> (cl - 2) * 8
         out[-1] = acc
 
-    out = struct.pack('>%dL' % (len(out)), *out)
+    out = struct.pack(b'>%dL' % (len(out)), *out)
     if cl:
-        out = out[:-(5 - cl)]
+        out = out[: -(5 - cl)]
 
     return out
--- a/mercurial/pure/bdiff.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/pure/bdiff.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,16 +11,18 @@
 import re
 import struct
 
+
 def splitnewlines(text):
     '''like str.splitlines, but only split on newlines.'''
-    lines = [l + '\n' for l in text.split('\n')]
+    lines = [l + b'\n' for l in text.split(b'\n')]
     if lines:
-        if lines[-1] == '\n':
+        if lines[-1] == b'\n':
             lines.pop()
         else:
             lines[-1] = lines[-1][:-1]
     return lines
 
+
 def _normalizeblocks(a, b, blocks):
     prev = None
     r = []
@@ -38,25 +40,28 @@
         a2end = a2 + l2
         b2end = b2 + l2
         if a1end == a2:
-            while (a1end + shift < a2end and
-                   a[a1end + shift] == b[b1end + shift]):
+            while (
+                a1end + shift < a2end and a[a1end + shift] == b[b1end + shift]
+            ):
                 shift += 1
         elif b1end == b2:
-            while (b1end + shift < b2end and
-                   a[a1end + shift] == b[b1end + shift]):
+            while (
+                b1end + shift < b2end and a[a1end + shift] == b[b1end + shift]
+            ):
                 shift += 1
         r.append((a1, b1, l1 + shift))
         prev = a2 + shift, b2 + shift, l2 - shift
     r.append(prev)
     return r
 
+
 def bdiff(a, b):
     a = bytes(a).splitlines(True)
     b = bytes(b).splitlines(True)
 
     if not a:
-        s = "".join(b)
-        return s and (struct.pack(">lll", 0, 0, len(s)) + s)
+        s = b"".join(b)
+        return s and (struct.pack(b">lll", 0, 0, len(s)) + s)
 
     bin = []
     p = [0]
@@ -68,13 +73,14 @@
     la = 0
     lb = 0
     for am, bm, size in d:
-        s = "".join(b[lb:bm])
+        s = b"".join(b[lb:bm])
         if am > la or s:
-            bin.append(struct.pack(">lll", p[la], p[am], len(s)) + s)
+            bin.append(struct.pack(b">lll", p[la], p[am], len(s)) + s)
         la = am + size
         lb = bm + size
 
-    return "".join(bin)
+    return b"".join(bin)
+
 
 def blocks(a, b):
     an = splitnewlines(a)
@@ -83,10 +89,11 @@
     d = _normalizeblocks(an, bn, d)
     return [(i, i + n, j, j + n) for (i, j, n) in d]
 
+
 def fixws(text, allws):
     if allws:
-        text = re.sub('[ \t\r]+', '', text)
+        text = re.sub(b'[ \t\r]+', b'', text)
     else:
-        text = re.sub('[ \t\r]+', ' ', text)
-        text = text.replace(' \n', '\n')
+        text = re.sub(b'[ \t\r]+', b' ', text)
+        text = text.replace(b' \n', b'\n')
     return text
--- a/mercurial/pure/charencode.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/pure/charencode.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,9 +9,8 @@
 
 import array
 
-from .. import (
-    pycompat,
-)
+from .. import pycompat
+
 
 def isasciistr(s):
     try:
@@ -20,6 +19,7 @@
     except UnicodeDecodeError:
         return False
 
+
 def asciilower(s):
     '''convert a string to lowercase if ASCII
 
@@ -27,6 +27,7 @@
     s.decode('ascii')
     return s.lower()
 
+
 def asciiupper(s):
     '''convert a string to uppercase if ASCII
 
@@ -34,22 +35,24 @@
     s.decode('ascii')
     return s.upper()
 
+
 _jsonmap = []
-_jsonmap.extend("\\u%04x" % x for x in range(32))
+_jsonmap.extend(b"\\u%04x" % x for x in range(32))
 _jsonmap.extend(pycompat.bytechr(x) for x in range(32, 127))
-_jsonmap.append('\\u007f')
-_jsonmap[0x09] = '\\t'
-_jsonmap[0x0a] = '\\n'
-_jsonmap[0x22] = '\\"'
-_jsonmap[0x5c] = '\\\\'
-_jsonmap[0x08] = '\\b'
-_jsonmap[0x0c] = '\\f'
-_jsonmap[0x0d] = '\\r'
+_jsonmap.append(b'\\u007f')
+_jsonmap[0x09] = b'\\t'
+_jsonmap[0x0A] = b'\\n'
+_jsonmap[0x22] = b'\\"'
+_jsonmap[0x5C] = b'\\\\'
+_jsonmap[0x08] = b'\\b'
+_jsonmap[0x0C] = b'\\f'
+_jsonmap[0x0D] = b'\\r'
 _paranoidjsonmap = _jsonmap[:]
-_paranoidjsonmap[0x3c] = '\\u003c'  # '<' (e.g. escape "</script>")
-_paranoidjsonmap[0x3e] = '\\u003e'  # '>'
+_paranoidjsonmap[0x3C] = b'\\u003c'  # '<' (e.g. escape "</script>")
+_paranoidjsonmap[0x3E] = b'\\u003e'  # '>'
 _jsonmap.extend(pycompat.bytechr(x) for x in range(128, 256))
 
+
 def jsonescapeu8fast(u8chars, paranoid):
     """Convert a UTF-8 byte string to JSON-escaped form (fast path)
 
@@ -60,15 +63,17 @@
     else:
         jm = _jsonmap
     try:
-        return ''.join(jm[x] for x in bytearray(u8chars))
+        return b''.join(jm[x] for x in bytearray(u8chars))
     except IndexError:
         raise ValueError
 
+
 if pycompat.ispy3:
     _utf8strict = r'surrogatepass'
 else:
     _utf8strict = r'strict'
 
+
 def jsonescapeu8fallback(u8chars, paranoid):
     """Convert a UTF-8 byte string to JSON-escaped form (slow path)
 
@@ -82,4 +87,4 @@
     u16b = u8chars.decode('utf-8', _utf8strict).encode('utf-16', _utf8strict)
     u16codes = array.array(r'H', u16b)
     u16codes.pop(0)  # drop BOM
-    return ''.join(jm[x] if x < 128 else '\\u%04x' % x for x in u16codes)
+    return b''.join(jm[x] if x < 128 else b'\\u%04x' % x for x in u16codes)
--- a/mercurial/pure/mpatch.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/pure/mpatch.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,12 +10,15 @@
 import struct
 
 from .. import pycompat
+
 stringio = pycompat.bytesio
 
+
 class mpatchError(Exception):
     """error raised when a delta cannot be decoded
     """
 
+
 # This attempts to apply a series of patches in time proportional to
 # the total size of the patches, rather than patches * len(text). This
 # means rather than shuffling strings around, we shuffle around
@@ -26,16 +29,18 @@
 # mmap and simply use memmove. This avoids creating a bunch of large
 # temporary string buffers.
 
-def _pull(dst, src, l): # pull l bytes from src
+
+def _pull(dst, src, l):  # pull l bytes from src
     while l:
         f = src.pop()
-        if f[0] > l: # do we need to split?
+        if f[0] > l:  # do we need to split?
             src.append((f[0] - l, f[1] + l))
             dst.append((l, f[1]))
             return
         dst.append(f)
         l -= f[0]
 
+
 def _move(m, dest, src, count):
     """move count bytes from src to dest
 
@@ -46,6 +51,7 @@
     m.seek(dest)
     m.write(buf)
 
+
 def _collect(m, buf, list):
     start = buf
     for l, p in reversed(list):
@@ -53,6 +59,7 @@
         buf += l
     return (buf - start, start)
 
+
 def patches(a, bins):
     if not bins:
         return a
@@ -60,7 +67,7 @@
     plens = [len(x) for x in bins]
     pl = sum(plens)
     bl = len(a) + pl
-    tl = bl + bl + pl # enough for the patches and two working texts
+    tl = bl + bl + pl  # enough for the patches and two working texts
     b1, b2 = 0, bl
 
     if not tl:
@@ -90,29 +97,30 @@
         while pos < end:
             m.seek(pos)
             try:
-                p1, p2, l = struct.unpack(">lll", m.read(12))
+                p1, p2, l = struct.unpack(b">lll", m.read(12))
             except struct.error:
-                raise mpatchError("patch cannot be decoded")
-            _pull(new, frags, p1 - last) # what didn't change
-            _pull([], frags, p2 - p1)    # what got deleted
-            new.append((l, pos + 12))   # what got added
+                raise mpatchError(b"patch cannot be decoded")
+            _pull(new, frags, p1 - last)  # what didn't change
+            _pull([], frags, p2 - p1)  # what got deleted
+            new.append((l, pos + 12))  # what got added
             pos += l + 12
             last = p2
-        frags.extend(reversed(new))     # what was left at the end
+        frags.extend(reversed(new))  # what was left at the end
 
     t = _collect(m, b2, frags)
 
     m.seek(t[1])
     return m.read(t[0])
 
+
 def patchedsize(orig, delta):
     outlen, last, bin = 0, 0, 0
     binend = len(delta)
     data = 12
 
     while data <= binend:
-        decode = delta[bin:bin + 12]
-        start, end, length = struct.unpack(">lll", decode)
+        decode = delta[bin : bin + 12]
+        start, end, length = struct.unpack(b">lll", decode)
         if start > end:
             break
         bin = data + length
@@ -122,7 +130,7 @@
         outlen += length
 
     if bin != binend:
-        raise mpatchError("patch cannot be decoded")
+        raise mpatchError(b"patch cannot be decoded")
 
     outlen += orig - last
     return outlen
--- a/mercurial/pure/osutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/pure/osutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,11 +13,13 @@
 import socket
 import stat as statmod
 
+from ..pycompat import getattr
 from .. import (
     encoding,
     pycompat,
 )
 
+
 def _mode_to_kind(mode):
     if statmod.S_ISREG(mode):
         return statmod.S_IFREG
@@ -35,6 +37,7 @@
         return statmod.S_IFSOCK
     return mode
 
+
 def listdir(path, stat=False, skip=None):
     '''listdir(path, stat=False) -> list_of_tuples
 
@@ -65,13 +68,14 @@
             result.append((fn, _mode_to_kind(st.st_mode)))
     return result
 
+
 if not pycompat.iswindows:
     posixfile = open
 
     _SCM_RIGHTS = 0x01
     _socklen_t = ctypes.c_uint
 
-    if pycompat.sysplatform.startswith('linux'):
+    if pycompat.sysplatform.startswith(b'linux'):
         # socket.h says "the type should be socklen_t but the definition of
         # the kernel is incompatible with this."
         _cmsg_len_t = ctypes.c_size_t
@@ -111,12 +115,15 @@
     _recvmsg = getattr(_libc, 'recvmsg', None)
     if _recvmsg:
         _recvmsg.restype = getattr(ctypes, 'c_ssize_t', ctypes.c_long)
-        _recvmsg.argtypes = (ctypes.c_int, ctypes.POINTER(_msghdr),
-                             ctypes.c_int)
+        _recvmsg.argtypes = (
+            ctypes.c_int,
+            ctypes.POINTER(_msghdr),
+            ctypes.c_int,
+        )
     else:
         # recvmsg isn't always provided by libc; such systems are unsupported
         def _recvmsg(sockfd, msg, flags):
-            raise NotImplementedError('unsupported platform')
+            raise NotImplementedError(b'unsupported platform')
 
     def _CMSG_FIRSTHDR(msgh):
         if msgh.msg_controllen < ctypes.sizeof(_cmsghdr):
@@ -132,10 +139,15 @@
         dummy = (ctypes.c_ubyte * 1)()
         iov = _iovec(ctypes.cast(dummy, ctypes.c_void_p), ctypes.sizeof(dummy))
         cbuf = ctypes.create_string_buffer(256)
-        msgh = _msghdr(None, 0,
-                       ctypes.pointer(iov), 1,
-                       ctypes.cast(cbuf, ctypes.c_void_p), ctypes.sizeof(cbuf),
-                       0)
+        msgh = _msghdr(
+            None,
+            0,
+            ctypes.pointer(iov),
+            1,
+            ctypes.cast(cbuf, ctypes.c_void_p),
+            ctypes.sizeof(cbuf),
+            0,
+        )
         r = _recvmsg(sockfd, ctypes.byref(msgh), 0)
         if r < 0:
             e = ctypes.get_errno()
@@ -145,14 +157,18 @@
         cmsg = _CMSG_FIRSTHDR(msgh)
         if not cmsg:
             return []
-        if (cmsg.cmsg_level != socket.SOL_SOCKET or
-            cmsg.cmsg_type != _SCM_RIGHTS):
+        if (
+            cmsg.cmsg_level != socket.SOL_SOCKET
+            or cmsg.cmsg_type != _SCM_RIGHTS
+        ):
             return []
         rfds = ctypes.cast(cmsg.cmsg_data, ctypes.POINTER(ctypes.c_int))
-        rfdscount = ((cmsg.cmsg_len - _cmsghdr.cmsg_data.offset) //
-                     ctypes.sizeof(ctypes.c_int))
+        rfdscount = (
+            cmsg.cmsg_len - _cmsghdr.cmsg_data.offset
+        ) // ctypes.sizeof(ctypes.c_int)
         return [rfds[i] for i in pycompat.xrange(rfdscount)]
 
+
 else:
     import msvcrt
 
@@ -188,14 +204,22 @@
 
     # types of parameters of C functions used (required by pypy)
 
-    _kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p,
-        _DWORD, _DWORD, _HANDLE]
+    _kernel32.CreateFileA.argtypes = [
+        _LPCSTR,
+        _DWORD,
+        _DWORD,
+        ctypes.c_void_p,
+        _DWORD,
+        _DWORD,
+        _HANDLE,
+    ]
     _kernel32.CreateFileA.restype = _HANDLE
 
     def _raiseioerror(name):
         err = ctypes.WinError()
-        raise IOError(err.errno, r'%s: %s' % (encoding.strfromlocal(name),
-                                              err.strerror))
+        raise IOError(
+            err.errno, r'%s: %s' % (encoding.strfromlocal(name), err.strerror)
+        )
 
     class posixfile(object):
         '''a file object aiming for POSIX-like semantics
@@ -235,9 +259,15 @@
             else:
                 raise ValueError(r"invalid mode: %s" % pycompat.sysstr(mode))
 
-            fh = _kernel32.CreateFileA(name, access,
-                    _FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
-                    None, creation, _FILE_ATTRIBUTE_NORMAL, None)
+            fh = _kernel32.CreateFileA(
+                name,
+                access,
+                _FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
+                None,
+                creation,
+                _FILE_ATTRIBUTE_NORMAL,
+                None,
+            )
             if fh == _INVALID_HANDLE_VALUE:
                 _raiseioerror(name)
 
--- a/mercurial/pure/parsers.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/pure/parsers.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,6 +12,7 @@
 
 from ..node import nullid
 from .. import pycompat
+
 stringio = pycompat.bytesio
 
 
@@ -26,17 +27,21 @@
     # x is a tuple
     return x
 
-indexformatng = ">Qiiiiii20s12x"
-indexfirst = struct.calcsize('Q')
-sizeint = struct.calcsize('i')
+
+indexformatng = b">Qiiiiii20s12x"
+indexfirst = struct.calcsize(b'Q')
+sizeint = struct.calcsize(b'i')
 indexsize = struct.calcsize(indexformatng)
 
+
 def gettype(q):
     return int(q & 0xFFFF)
 
+
 def offset_type(offset, type):
     return int(int(offset) << 16 | type)
 
+
 class BaseIndexObject(object):
     def __len__(self):
         return self._lgt + len(self._extra)
@@ -46,7 +51,7 @@
 
     def _check_index(self, i):
         if not isinstance(i, int):
-            raise TypeError("expecting int indexes")
+            raise TypeError(b"expecting int indexes")
         if i < 0 or i >= len(self):
             raise IndexError
 
@@ -57,7 +62,7 @@
         if i >= self._lgt:
             return self._extra[i - self._lgt]
         index = self._calculate_index(i)
-        r = struct.unpack(indexformatng, self._data[index:index + indexsize])
+        r = struct.unpack(indexformatng, self._data[index : index + indexsize])
         if i == 0:
             e = list(r)
             type = gettype(e[0])
@@ -65,6 +70,7 @@
             return tuple(e)
         return r
 
+
 class IndexObject(BaseIndexObject):
     def __init__(self, data):
         assert len(data) % indexsize == 0
@@ -77,15 +83,16 @@
 
     def __delitem__(self, i):
         if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
-            raise ValueError("deleting slices only supports a:-1 with step 1")
+            raise ValueError(b"deleting slices only supports a:-1 with step 1")
         i = i.start
         self._check_index(i)
         if i < self._lgt:
-            self._data = self._data[:i * indexsize]
+            self._data = self._data[: i * indexsize]
             self._lgt = i
             self._extra = []
         else:
-            self._extra = self._extra[:i - self._lgt]
+            self._extra = self._extra[: i - self._lgt]
+
 
 class InlinedIndexObject(BaseIndexObject):
     def __init__(self, data, inline=0):
@@ -100,19 +107,20 @@
             self._offsets = [0] * lgt
         count = 0
         while off <= len(self._data) - indexsize:
-            s, = struct.unpack('>i',
-                self._data[off + indexfirst:off + sizeint + indexfirst])
+            (s,) = struct.unpack(
+                b'>i', self._data[off + indexfirst : off + sizeint + indexfirst]
+            )
             if lgt is not None:
                 self._offsets[count] = off
             count += 1
             off += indexsize + s
         if off != len(self._data):
-            raise ValueError("corrupted data")
+            raise ValueError(b"corrupted data")
         return count
 
     def __delitem__(self, i):
         if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
-            raise ValueError("deleting slices only supports a:-1 with step 1")
+            raise ValueError(b"deleting slices only supports a:-1 with step 1")
         i = i.start
         self._check_index(i)
         if i < self._lgt:
@@ -120,20 +128,22 @@
             self._lgt = i
             self._extra = []
         else:
-            self._extra = self._extra[:i - self._lgt]
+            self._extra = self._extra[: i - self._lgt]
 
     def _calculate_index(self, i):
         return self._offsets[i]
 
+
 def parse_index2(data, inline):
     if not inline:
         return IndexObject(data), None
     return InlinedIndexObject(data, inline), (0, data)
 
+
 def parse_dirstate(dmap, copymap, st):
-    parents = [st[:20], st[20: 40]]
+    parents = [st[:20], st[20:40]]
     # dereference fields so they will be local in loop
-    format = ">cllll"
+    format = b">cllll"
     e_size = struct.calcsize(format)
     pos1 = 40
     l = len(st)
@@ -141,22 +151,23 @@
     # the inner loop
     while pos1 < l:
         pos2 = pos1 + e_size
-        e = _unpack(">cllll", st[pos1:pos2]) # a literal here is faster
+        e = _unpack(b">cllll", st[pos1:pos2])  # a literal here is faster
         pos1 = pos2 + e[4]
         f = st[pos2:pos1]
-        if '\0' in f:
-            f, c = f.split('\0')
+        if b'\0' in f:
+            f, c = f.split(b'\0')
             copymap[f] = c
         dmap[f] = e[:4]
     return parents
 
+
 def pack_dirstate(dmap, copymap, pl, now):
     now = int(now)
     cs = stringio()
     write = cs.write
-    write("".join(pl))
-    for f, e in dmap.iteritems():
-        if e[0] == 'n' and e[3] == now:
+    write(b"".join(pl))
+    for f, e in pycompat.iteritems(dmap):
+        if e[0] == b'n' and e[3] == now:
             # The file was last modified "simultaneously" with the current
             # write to dirstate (i.e. within the same second for file-
             # systems with a granularity of 1 sec). This commonly happens
@@ -170,8 +181,8 @@
             dmap[f] = e
 
         if f in copymap:
-            f = "%s\0%s" % (f, copymap[f])
-        e = _pack(">cllll", e[0], e[1], e[2], e[3], len(f))
+            f = b"%s\0%s" % (f, copymap[f])
+        e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
         write(e)
         write(f)
     return cs.getvalue()
--- a/mercurial/pushkey.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/pushkey.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,48 +14,58 @@
     phases,
 )
 
+
 def _nslist(repo):
     n = {}
     for k in _namespaces:
-        n[k] = ""
+        n[k] = b""
     if not obsolete.isenabled(repo, obsolete.exchangeopt):
-        n.pop('obsolete')
+        n.pop(b'obsolete')
     return n
 
-_namespaces = {"namespaces": (lambda *x: False, _nslist),
-               "bookmarks": (bookmarks.pushbookmark, bookmarks.listbookmarks),
-               "phases": (phases.pushphase, phases.listphases),
-               "obsolete": (obsolete.pushmarker, obsolete.listmarkers),
-              }
+
+_namespaces = {
+    b"namespaces": (lambda *x: False, _nslist),
+    b"bookmarks": (bookmarks.pushbookmark, bookmarks.listbookmarks),
+    b"phases": (phases.pushphase, phases.listphases),
+    b"obsolete": (obsolete.pushmarker, obsolete.listmarkers),
+}
+
 
 def register(namespace, pushkey, listkeys):
     _namespaces[namespace] = (pushkey, listkeys)
 
+
 def _get(namespace):
     return _namespaces.get(namespace, (lambda *x: False, lambda *x: {}))
 
+
 def push(repo, namespace, key, old, new):
     '''should succeed iff value was old'''
     pk = _get(namespace)[0]
     return pk(repo, key, old, new)
 
+
 def list(repo, namespace):
     '''return a dict'''
     lk = _get(namespace)[1]
     return lk(repo)
 
+
 encode = encoding.fromlocal
 
 decode = encoding.tolocal
 
+
 def encodekeys(keys):
     """encode the content of a pushkey namespace for exchange over the wire"""
-    return '\n'.join(['%s\t%s' % (encode(k), encode(v)) for k, v in keys])
+    return b'\n'.join([b'%s\t%s' % (encode(k), encode(v)) for k, v in keys])
+
 
 def decodekeys(data):
     """decode the content of a pushkey namespace from exchange over the wire"""
     result = {}
     for l in data.splitlines():
-        k, v = l.split('\t')
+        k, v = l.split(b'\t')
         result[decode(k)] = decode(v)
     return result
--- a/mercurial/pvec.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/pvec.py	Mon Oct 21 11:09:48 2019 -0400
@@ -56,13 +56,14 @@
     util,
 )
 
-_size = 448 # 70 chars b85-encoded
+_size = 448  # 70 chars b85-encoded
 _bytes = _size / 8
 _depthbits = 24
 _depthbytes = _depthbits / 8
 _vecbytes = _bytes - _depthbytes
 _vecbits = _vecbytes * 8
-_radius = (_vecbits - 30) / 2 # high probability vectors are related
+_radius = (_vecbits - 30) / 2  # high probability vectors are related
+
 
 def _bin(bs):
     '''convert a bytestring to a long'''
@@ -71,20 +72,24 @@
         v = v * 256 + ord(b)
     return v
 
+
 def _str(v, l):
-    bs = ""
+    bs = b""
     for p in pycompat.xrange(l):
         bs = chr(v & 255) + bs
         v >>= 8
     return bs
 
+
 def _split(b):
     '''depth and bitvec'''
     return _bin(b[:_depthbytes]), _bin(b[_depthbytes:])
 
+
 def _join(depth, bitvec):
     return _str(depth, _depthbytes) + _str(bitvec, _vecbytes)
 
+
 def _hweight(x):
     c = 0
     while x:
@@ -92,17 +97,21 @@
             c += 1
         x >>= 1
     return c
+
+
 _htab = [_hweight(x) for x in pycompat.xrange(256)]
 
+
 def _hamming(a, b):
     '''find the hamming distance between two longs'''
     d = a ^ b
     c = 0
     while d:
-        c += _htab[d & 0xff]
+        c += _htab[d & 0xFF]
         d >>= 8
     return c
 
+
 def _mergevec(x, y, c):
     # Ideally, this function would be x ^ y ^ ancestor, but finding
     # ancestors is a nuisance. So instead we find the minimal number
@@ -116,7 +125,7 @@
     hdist = _hamming(v1, v2)
     ddist = d1 - d2
     v = v1
-    m = v1 ^ v2 # mask of different bits
+    m = v1 ^ v2  # mask of different bits
     i = 1
 
     if hdist > ddist:
@@ -140,10 +149,12 @@
 
     return depth, v
 
+
 def _flipbit(v, node):
     # converting bit strings to longs is slow
-    bit = (hash(node) & 0xffffffff) % _vecbits
-    return v ^ (1<<bit)
+    bit = (hash(node) & 0xFFFFFFFF) % _vecbits
+    return v ^ (1 << bit)
+
 
 def ctxpvec(ctx):
     '''construct a pvec for ctx while filling in the cache'''
@@ -168,6 +179,7 @@
     bs = _join(*pvc[ctx.rev()])
     return pvec(util.b85encode(bs))
 
+
 class pvec(object):
     def __init__(self, hashorctx):
         if isinstance(hashorctx, str):
@@ -185,7 +197,7 @@
     def __lt__(self, b):
         delta = b._depth - self._depth
         if delta < 0:
-            return False # always correct
+            return False  # always correct
         if _hamming(self._vec, b._vec) > delta:
             return False
         return True
@@ -201,7 +213,7 @@
 
     def __sub__(self, b):
         if self | b:
-            raise ValueError("concurrent pvecs")
+            raise ValueError(b"concurrent pvecs")
         return self._depth - b._depth
 
     def distance(self, b):
--- a/mercurial/pycompat.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/pycompat.py	Mon Oct 21 11:09:48 2019 -0400
@@ -17,8 +17,8 @@
 import sys
 import tempfile
 
-ispy3 = (sys.version_info[0] >= 3)
-ispypy = (r'__pypy__' in sys.builtin_module_names)
+ispy3 = sys.version_info[0] >= 3
+ispypy = r'__pypy__' in sys.builtin_module_names
 
 if not ispy3:
     import cookielib
@@ -32,6 +32,8 @@
 
     def future_set_exception_info(f, exc_info):
         f.set_exception_info(*exc_info)
+
+
 else:
     import concurrent.futures as futures
     import http.cookiejar as cookielib
@@ -44,9 +46,11 @@
     def future_set_exception_info(f, exc_info):
         f.set_exception(exc_info[0])
 
+
 def identity(a):
     return a
 
+
 def _rapply(f, xs):
     if xs is None:
         # assume None means non-value of optional data
@@ -57,6 +61,7 @@
         return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
     return f(xs)
 
+
 def rapply(f, xs):
     """Apply function recursively to every item preserving the data structure
 
@@ -80,6 +85,7 @@
         return xs
     return _rapply(f, xs)
 
+
 if ispy3:
     import builtins
     import functools
@@ -195,9 +201,12 @@
         def __new__(cls, s=b''):
             if isinstance(s, bytestr):
                 return s
-            if (not isinstance(s, (bytes, bytearray))
-                and not hasattr(s, u'__bytes__')):  # hasattr-py3-only
-                s = str(s).encode(u'ascii')
+            if not isinstance(
+                s, (bytes, bytearray)
+            ) and not hasattr(  # hasattr-py3-only
+                s, u'__bytes__'
+            ):
+                s = str(s).encode('ascii')
             return bytes.__new__(cls, s)
 
         def __getitem__(self, key):
@@ -228,7 +237,7 @@
         This never raises UnicodeEncodeError, but only ASCII characters
         can be round-trip by sysstr(sysbytes(s)).
         """
-        return s.encode(u'utf-8')
+        return s.encode('utf-8')
 
     def sysstr(s):
         """Return a keyword str to be passed to Python functions such as
@@ -240,18 +249,18 @@
         """
         if isinstance(s, builtins.str):
             return s
-        return s.decode(u'latin-1')
+        return s.decode('latin-1')
 
     def strurl(url):
         """Converts a bytes url back to str"""
         if isinstance(url, bytes):
-            return url.decode(u'ascii')
+            return url.decode('ascii')
         return url
 
     def bytesurl(url):
         """Converts a str url to bytes by encoding in ascii"""
         if isinstance(url, str):
-            return url.encode(u'ascii')
+            return url.encode('ascii')
         return url
 
     def raisewithtb(exc, tb):
@@ -261,7 +270,7 @@
     def getdoc(obj):
         """Get docstring as bytes; may be None so gettext() won't confuse it
         with _('')"""
-        doc = getattr(obj, u'__doc__', None)
+        doc = getattr(obj, '__doc__', None)
         if doc is None:
             return doc
         return sysbytes(doc)
@@ -270,6 +279,7 @@
         @functools.wraps(f)
         def w(object, name, *args):
             return f(object, sysstr(name), *args)
+
         return w
 
     # these wrappers are automagically imported by hgloader
@@ -296,8 +306,7 @@
         shortlist = shortlist.decode('latin-1')
         namelist = [a.decode('latin-1') for a in namelist]
         opts, args = orig(args, shortlist, namelist)
-        opts = [(a[0].encode('latin-1'), a[1].encode('latin-1'))
-                for a in opts]
+        opts = [(a[0].encode('latin-1'), a[1].encode('latin-1')) for a in opts]
         args = [a.encode('latin-1') for a in args]
         return opts, args
 
@@ -307,7 +316,7 @@
         they can be passed as keyword arguments as dictonaries with bytes keys
         can't be passed as keyword arguments to functions on Python 3.
         """
-        dic = dict((k.decode('latin-1'), v) for k, v in dic.iteritems())
+        dic = dict((k.decode('latin-1'), v) for k, v in dic.items())
         return dic
 
     def byteskwargs(dic):
@@ -315,7 +324,7 @@
         Converts keys of python dictonaries to bytes as they were converted to
         str to pass that dictonary as a keyword argument on Python 3.
         """
-        dic = dict((k.encode('latin-1'), v) for k, v in dic.iteritems())
+        dic = dict((k.encode('latin-1'), v) for k, v in dic.items())
         return dic
 
     # TODO: handle shlex.shlex().
@@ -328,6 +337,9 @@
         ret = shlex.split(s.decode('latin-1'), comments, posix)
         return [a.encode('latin-1') for a in ret]
 
+    iteritems = lambda x: x.items()
+    itervalues = lambda x: x.values()
+
 else:
     import cStringIO
 
@@ -342,10 +354,14 @@
     sysstr = identity
     strurl = identity
     bytesurl = identity
+    open = open
+    delattr = delattr
+    getattr = getattr
+    hasattr = hasattr
+    setattr = setattr
 
     # this can't be parsed on Python 3
-    exec('def raisewithtb(exc, tb):\n'
-         '    raise exc, None, tb\n')
+    exec(b'def raisewithtb(exc, tb):\n    raise exc, None, tb\n')
 
     def fsencode(filename):
         """
@@ -356,8 +372,7 @@
         if isinstance(filename, str):
             return filename
         else:
-            raise TypeError(
-                r"expect str, not %s" % type(filename).__name__)
+            raise TypeError(r"expect str, not %s" % type(filename).__name__)
 
     # In Python 2, fsdecode() has a very chance to receive bytes. So it's
     # better not to touch Python 2 part as it's already working fine.
@@ -400,6 +415,8 @@
     ziplist = zip
     rawinput = raw_input
     getargspec = inspect.getargspec
+    iteritems = lambda x: x.iteritems()
+    itervalues = lambda x: x.itervalues()
 
 isjython = sysplatform.startswith(b'java')
 
@@ -408,23 +425,30 @@
 isposix = osname == b'posix'
 iswindows = osname == b'nt'
 
+
 def getoptb(args, shortlist, namelist):
     return _getoptbwrapper(getopt.getopt, args, shortlist, namelist)
 
+
 def gnugetoptb(args, shortlist, namelist):
     return _getoptbwrapper(getopt.gnu_getopt, args, shortlist, namelist)
 
+
 def mkdtemp(suffix=b'', prefix=b'tmp', dir=None):
     return tempfile.mkdtemp(suffix, prefix, dir)
 
+
 # text=True is not supported; use util.from/tonativeeol() instead
 def mkstemp(suffix=b'', prefix=b'tmp', dir=None):
     return tempfile.mkstemp(suffix, prefix, dir)
 
+
 # mode must include 'b'ytes as encoding= is not supported
-def namedtempfile(mode=b'w+b', bufsize=-1, suffix=b'', prefix=b'tmp', dir=None,
-                  delete=True):
+def namedtempfile(
+    mode=b'w+b', bufsize=-1, suffix=b'', prefix=b'tmp', dir=None, delete=True
+):
     mode = sysstr(mode)
     assert r'b' in mode
-    return tempfile.NamedTemporaryFile(mode, bufsize, suffix=suffix,
-                                       prefix=prefix, dir=dir, delete=delete)
+    return tempfile.NamedTemporaryFile(
+        mode, bufsize, suffix=suffix, prefix=prefix, dir=dir, delete=delete
+    )
--- a/mercurial/rcutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/rcutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -24,15 +24,18 @@
 systemrcpath = scmplatform.systemrcpath
 userrcpath = scmplatform.userrcpath
 
+
 def _expandrcpath(path):
     '''path could be a file or a directory. return a list of file paths'''
     p = util.expandpath(path)
     if os.path.isdir(p):
         join = os.path.join
-        return sorted(join(p, f) for f, k in util.listdir(p)
-                      if f.endswith('.rc'))
+        return sorted(
+            join(p, f) for f, k in util.listdir(p) if f.endswith(b'.rc')
+        )
     return [p]
 
+
 def envrcitems(env=None):
     '''Return [(section, name, value, source)] config items.
 
@@ -44,25 +47,27 @@
     if env is None:
         env = encoding.environ
     checklist = [
-        ('EDITOR', 'ui', 'editor'),
-        ('VISUAL', 'ui', 'editor'),
-        ('PAGER', 'pager', 'pager'),
+        (b'EDITOR', b'ui', b'editor'),
+        (b'VISUAL', b'ui', b'editor'),
+        (b'PAGER', b'pager', b'pager'),
     ]
     result = []
     for envname, section, configname in checklist:
         if envname not in env:
             continue
-        result.append((section, configname, env[envname], '$%s' % envname))
+        result.append((section, configname, env[envname], b'$%s' % envname))
     return result
 
+
 def defaultrcpath():
     '''return rc paths in default.d'''
     path = []
-    defaultpath = os.path.join(util.datapath, 'default.d')
+    defaultpath = os.path.join(util.datapath, b'default.d')
     if os.path.isdir(defaultpath):
         path = _expandrcpath(defaultpath)
     return path
 
+
 def rccomponents():
     '''return an ordered [(type, obj)] about where to load configs.
 
@@ -75,25 +80,28 @@
     and is the config file path. if type is 'items', obj is a list of (section,
     name, value, source) that should fill the config directly.
     '''
-    envrc = ('items', envrcitems())
+    envrc = (b'items', envrcitems())
 
-    if 'HGRCPATH' in encoding.environ:
+    if b'HGRCPATH' in encoding.environ:
         # assume HGRCPATH is all about user configs so environments can be
         # overridden.
         _rccomponents = [envrc]
-        for p in encoding.environ['HGRCPATH'].split(pycompat.ospathsep):
+        for p in encoding.environ[b'HGRCPATH'].split(pycompat.ospathsep):
             if not p:
                 continue
-            _rccomponents.extend(('path', p) for p in _expandrcpath(p))
+            _rccomponents.extend((b'path', p) for p in _expandrcpath(p))
     else:
-        normpaths = lambda paths: [('path', os.path.normpath(p)) for p in paths]
+        normpaths = lambda paths: [
+            (b'path', os.path.normpath(p)) for p in paths
+        ]
         _rccomponents = normpaths(defaultrcpath() + systemrcpath())
         _rccomponents.append(envrc)
         _rccomponents.extend(normpaths(userrcpath()))
     return _rccomponents
 
+
 def defaultpagerenv():
     '''return a dict of default environment variables and their values,
     intended to be set before starting a pager.
     '''
-    return {'LESS': 'FRX', 'LV': '-c'}
+    return {b'LESS': b'FRX', b'LV': b'-c'}
--- a/mercurial/registrar.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/registrar.py	Mon Oct 21 11:09:48 2019 -0400
@@ -21,6 +21,7 @@
 # the other items extensions want might to register.
 configitem = configitems.getitemregister
 
+
 class _funcregistrarbase(object):
     """Base of decorator to register a function for specific purpose
 
@@ -47,6 +48,7 @@
     - 'barfunc' is stored as 'bar' in '_table' of an instance 'keyword' above
     - 'barfunc.__doc__' becomes ":bar: Explanation of bar keyword"
     """
+
     def __init__(self, table=None):
         if table is None:
             self._table = {}
@@ -60,7 +62,7 @@
         name = self._getname(decl)
 
         if name in self._table:
-            msg = 'duplicate registration for name: "%s"' % name
+            msg = b'duplicate registration for name: "%s"' % name
             raise error.ProgrammingError(msg)
 
         if func.__doc__ and not util.safehasattr(func, '_origdoc'):
@@ -81,13 +83,13 @@
         of the two registrars must match.
         """
         if not isinstance(registrarbase, type(self)):
-            msg = "cannot merge different types of registrar"
+            msg = b"cannot merge different types of registrar"
             raise error.ProgrammingError(msg)
 
         dups = set(registrarbase._table).intersection(self._table)
 
         if dups:
-            msg = 'duplicate registration for names: "%s"' % '", "'.join(dups)
+            msg = b'duplicate registration for names: "%s"' % b'", "'.join(dups)
             raise error.ProgrammingError(msg)
 
         self._table.update(registrarbase._table)
@@ -95,7 +97,7 @@
     def _parsefuncdecl(self, decl):
         """Parse function declaration and return the name of function in it
         """
-        i = decl.find('(')
+        i = decl.find(b'(')
         if i >= 0:
             return decl[:i]
         else:
@@ -122,6 +124,7 @@
         """Execute exra setup for registered function, if needed
         """
 
+
 class command(_funcregistrarbase):
     """Decorator to register a command function to table
 
@@ -192,23 +195,33 @@
     # [alias]
     # myalias = something
     # myalias:category = repo
-    CATEGORY_REPO_CREATION = 'repo'
-    CATEGORY_REMOTE_REPO_MANAGEMENT = 'remote'
-    CATEGORY_COMMITTING = 'commit'
-    CATEGORY_CHANGE_MANAGEMENT = 'management'
-    CATEGORY_CHANGE_ORGANIZATION = 'organization'
-    CATEGORY_FILE_CONTENTS = 'files'
-    CATEGORY_CHANGE_NAVIGATION  = 'navigation'
-    CATEGORY_WORKING_DIRECTORY = 'wdir'
-    CATEGORY_IMPORT_EXPORT = 'import'
-    CATEGORY_MAINTENANCE = 'maintenance'
-    CATEGORY_HELP = 'help'
-    CATEGORY_MISC = 'misc'
-    CATEGORY_NONE = 'none'
+    CATEGORY_REPO_CREATION = b'repo'
+    CATEGORY_REMOTE_REPO_MANAGEMENT = b'remote'
+    CATEGORY_COMMITTING = b'commit'
+    CATEGORY_CHANGE_MANAGEMENT = b'management'
+    CATEGORY_CHANGE_ORGANIZATION = b'organization'
+    CATEGORY_FILE_CONTENTS = b'files'
+    CATEGORY_CHANGE_NAVIGATION = b'navigation'
+    CATEGORY_WORKING_DIRECTORY = b'wdir'
+    CATEGORY_IMPORT_EXPORT = b'import'
+    CATEGORY_MAINTENANCE = b'maintenance'
+    CATEGORY_HELP = b'help'
+    CATEGORY_MISC = b'misc'
+    CATEGORY_NONE = b'none'
 
-    def _doregister(self, func, name, options=(), synopsis=None,
-                    norepo=False, optionalrepo=False, inferrepo=False,
-                    intents=None, helpcategory=None, helpbasic=False):
+    def _doregister(
+        self,
+        func,
+        name,
+        options=(),
+        synopsis=None,
+        norepo=False,
+        optionalrepo=False,
+        inferrepo=False,
+        intents=None,
+        helpcategory=None,
+        helpbasic=False,
+    ):
         func.norepo = norepo
         func.optionalrepo = optionalrepo
         func.inferrepo = inferrepo
@@ -221,8 +234,10 @@
             self._table[name] = func, list(options)
         return func
 
+
 INTENT_READONLY = b'readonly'
 
+
 class revsetpredicate(_funcregistrarbase):
     """Decorator to register revset predicate
 
@@ -263,14 +278,16 @@
 
     Otherwise, explicit 'revset.loadpredicate()' is needed.
     """
+
     _getname = _funcregistrarbase._parsefuncdecl
-    _docformat = "``%s``\n    %s"
+    _docformat = b"``%s``\n    %s"
 
     def _extrasetup(self, name, func, safe=False, takeorder=False, weight=1):
         func._safe = safe
         func._takeorder = takeorder
         func._weight = weight
 
+
 class filesetpredicate(_funcregistrarbase):
     """Decorator to register fileset predicate
 
@@ -312,17 +329,21 @@
 
     Otherwise, explicit 'fileset.loadpredicate()' is needed.
     """
+
     _getname = _funcregistrarbase._parsefuncdecl
-    _docformat = "``%s``\n    %s"
+    _docformat = b"``%s``\n    %s"
 
     def _extrasetup(self, name, func, callstatus=False, weight=1):
         func._callstatus = callstatus
         func._weight = weight
 
+
 class _templateregistrarbase(_funcregistrarbase):
     """Base of decorator to register functions as template specific one
     """
-    _docformat = ":%s: %s"
+
+    _docformat = b":%s: %s"
+
 
 class templatekeyword(_templateregistrarbase):
     """Decorator to register template keyword
@@ -356,6 +377,7 @@
     def _extrasetup(self, name, func, requires=()):
         func._requires = requires
 
+
 class templatefilter(_templateregistrarbase):
     """Decorator to register template filer
 
@@ -387,6 +409,7 @@
     def _extrasetup(self, name, func, intype=None):
         func._intype = intype
 
+
 class templatefunc(_templateregistrarbase):
     """Decorator to register template function
 
@@ -419,12 +442,14 @@
 
     Otherwise, explicit 'templatefuncs.loadfunction()' is needed.
     """
+
     _getname = _funcregistrarbase._parsefuncdecl
 
     def _extrasetup(self, name, func, argspec=None, requires=()):
         func._argspec = argspec
         func._requires = requires
 
+
 class internalmerge(_funcregistrarbase):
     """Decorator to register in-process merge tool
 
@@ -480,16 +505,24 @@
 
     Otherwise, explicit 'filemerge.loadinternalmerge()' is needed.
     """
-    _docformat = "``:%s``\n    %s"
+
+    _docformat = b"``:%s``\n    %s"
 
     # merge type definitions:
     nomerge = None
-    mergeonly = 'mergeonly'  # just the full merge, no premerge
-    fullmerge = 'fullmerge'  # both premerge and merge
+    mergeonly = b'mergeonly'  # just the full merge, no premerge
+    fullmerge = b'fullmerge'  # both premerge and merge
 
-    def _extrasetup(self, name, func, mergetype,
-                    onfailure=None, precheck=None,
-                    binary=False, symlink=False):
+    def _extrasetup(
+        self,
+        name,
+        func,
+        mergetype,
+        onfailure=None,
+        precheck=None,
+        binary=False,
+        symlink=False,
+    ):
         func.mergetype = mergetype
         func.onfailure = onfailure
         func.precheck = precheck
@@ -498,4 +531,4 @@
         symlinkcap = symlink or mergetype == self.nomerge
 
         # actual capabilities, which this internal merge tool has
-        func.capabilities = {"binary": binarycap, "symlink": symlinkcap}
+        func.capabilities = {b"binary": binarycap, b"symlink": symlinkcap}
--- a/mercurial/repair.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/repair.py	Mon Oct 21 11:09:48 2019 -0400
@@ -28,45 +28,59 @@
     pycompat,
     util,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
+
 
-def backupbundle(repo, bases, heads, node, suffix, compress=True,
-                 obsolescence=True):
+def backupbundle(
+    repo, bases, heads, node, suffix, compress=True, obsolescence=True
+):
     """create a bundle with the specified revisions as a backup"""
 
-    backupdir = "strip-backup"
+    backupdir = b"strip-backup"
     vfs = repo.vfs
     if not vfs.isdir(backupdir):
         vfs.mkdir(backupdir)
 
     # Include a hash of all the nodes in the filename for uniqueness
-    allcommits = repo.set('%ln::%ln', bases, heads)
+    allcommits = repo.set(b'%ln::%ln', bases, heads)
     allhashes = sorted(c.hex() for c in allcommits)
-    totalhash = hashlib.sha1(''.join(allhashes)).digest()
-    name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
-                               hex(totalhash[:4]), suffix)
+    totalhash = hashlib.sha1(b''.join(allhashes)).digest()
+    name = b"%s/%s-%s-%s.hg" % (
+        backupdir,
+        short(node),
+        hex(totalhash[:4]),
+        suffix,
+    )
 
     cgversion = changegroup.localversion(repo)
     comp = None
-    if cgversion != '01':
-        bundletype = "HG20"
+    if cgversion != b'01':
+        bundletype = b"HG20"
         if compress:
-            comp = 'BZ'
+            comp = b'BZ'
     elif compress:
-        bundletype = "HG10BZ"
+        bundletype = b"HG10BZ"
     else:
-        bundletype = "HG10UN"
+        bundletype = b"HG10UN"
 
     outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
     contentopts = {
-        'cg.version': cgversion,
-        'obsolescence': obsolescence,
-        'phases': True,
+        b'cg.version': cgversion,
+        b'obsolescence': obsolescence,
+        b'phases': True,
     }
-    return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
-                                  outgoing, contentopts, vfs, compression=comp)
+    return bundle2.writenewbundle(
+        repo.ui,
+        repo,
+        b'strip',
+        name,
+        bundletype,
+        outgoing,
+        contentopts,
+        vfs,
+        compression=comp,
+    )
+
 
 def _collectfiles(repo, striprev):
     """find out the filelogs affected by the strip"""
@@ -77,33 +91,34 @@
 
     return sorted(files)
 
+
 def _collectrevlog(revlog, striprev):
     _, brokenset = revlog.getstrippoint(striprev)
     return [revlog.linkrev(r) for r in brokenset]
 
-def _collectmanifest(repo, striprev):
-    return _collectrevlog(repo.manifestlog.getstorage(b''), striprev)
 
 def _collectbrokencsets(repo, files, striprev):
     """return the changesets which will be broken by the truncation"""
     s = set()
 
-    s.update(_collectmanifest(repo, striprev))
+    for revlog in manifestrevlogs(repo):
+        s.update(_collectrevlog(revlog, striprev))
     for fname in files:
         s.update(_collectrevlog(repo.file(fname), striprev))
 
     return s
 
-def strip(ui, repo, nodelist, backup=True, topic='backup'):
+
+def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
     # This function requires the caller to lock the repo, but it operates
     # within a transaction of its own, and thus requires there to be no current
     # transaction when it is called.
     if repo.currenttransaction() is not None:
-        raise error.ProgrammingError('cannot strip from inside a transaction')
+        raise error.ProgrammingError(b'cannot strip from inside a transaction')
 
     # Simple way to maintain backwards compatibility for this
     # argument.
-    if backup in ['none', 'strip']:
+    if backup in [b'none', b'strip']:
         backup = False
 
     repo = repo.unfiltered()
@@ -150,11 +165,12 @@
     stripbases = [cl.node(r) for r in tostrip]
 
     stripobsidx = obsmarkers = ()
-    if repo.ui.configbool('devel', 'strip-obsmarkers'):
+    if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
         obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
     if obsmarkers:
-        stripobsidx = [i for i, m in enumerate(repo.obsstore)
-                       if m in obsmarkers]
+        stripobsidx = [
+            i for i, m in enumerate(repo.obsstore) if m in obsmarkers
+        ]
 
     newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
 
@@ -171,12 +187,19 @@
         # we are trying to strip.  This is harmless since the stripped markers
         # are already backed up and we did not touched the markers for the
         # saved changesets.
-        tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
-                                     compress=False, obsolescence=False)
+        tmpbundlefile = backupbundle(
+            repo,
+            savebases,
+            saveheads,
+            node,
+            b'temp',
+            compress=False,
+            obsolescence=False,
+        )
 
     with ui.uninterruptible():
         try:
-            with repo.transaction("strip") as tr:
+            with repo.transaction(b"strip") as tr:
                 # TODO this code violates the interface abstraction of the
                 # transaction and makes assumptions that file storage is
                 # using append-only files. We'll need some kind of storage
@@ -193,7 +216,7 @@
 
                 for i in pycompat.xrange(offset, len(tr._entries)):
                     file, troffset, ignore = tr._entries[i]
-                    with repo.svfs(file, 'a', checkambig=True) as fp:
+                    with repo.svfs(file, b'a', checkambig=True) as fp:
                         fp.truncate(troffset)
                     if troffset == 0:
                         repo.store.markremoved(file)
@@ -204,24 +227,25 @@
                 repo._phasecache.filterunknown(repo)
 
             if tmpbundlefile:
-                ui.note(_("adding branch\n"))
-                f = vfs.open(tmpbundlefile, "rb")
+                ui.note(_(b"adding branch\n"))
+                f = vfs.open(tmpbundlefile, b"rb")
                 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
                 if not repo.ui.verbose:
                     # silence internal shuffling chatter
                     repo.ui.pushbuffer()
-                tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
-                txnname = 'strip'
+                tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
+                txnname = b'strip'
                 if not isinstance(gen, bundle2.unbundle20):
-                    txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
+                    txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl)
                 with repo.transaction(txnname) as tr:
-                    bundle2.applybundle(repo, gen, tr, source='strip',
-                                        url=tmpbundleurl)
+                    bundle2.applybundle(
+                        repo, gen, tr, source=b'strip', url=tmpbundleurl
+                    )
                 if not repo.ui.verbose:
                     repo.ui.popbuffer()
                 f.close()
 
-            with repo.transaction('repair') as tr:
+            with repo.transaction(b'repair') as tr:
                 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
                 repo._bookmarks.applychanges(repo, tr, bmchanges)
 
@@ -231,19 +255,32 @@
                     undovfs.unlink(undofile)
                 except OSError as e:
                     if e.errno != errno.ENOENT:
-                        ui.warn(_('error removing %s: %s\n') %
-                                (undovfs.join(undofile),
-                                 stringutil.forcebytestr(e)))
+                        ui.warn(
+                            _(b'error removing %s: %s\n')
+                            % (
+                                undovfs.join(undofile),
+                                stringutil.forcebytestr(e),
+                            )
+                        )
 
-        except: # re-raises
+        except:  # re-raises
             if backupfile:
-                ui.warn(_("strip failed, backup bundle stored in '%s'\n")
-                        % vfs.join(backupfile))
+                ui.warn(
+                    _(b"strip failed, backup bundle stored in '%s'\n")
+                    % vfs.join(backupfile)
+                )
             if tmpbundlefile:
-                ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
-                        % vfs.join(tmpbundlefile))
-                ui.warn(_("(fix the problem, then recover the changesets with "
-                          "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
+                ui.warn(
+                    _(b"strip failed, unrecovered changes stored in '%s'\n")
+                    % vfs.join(tmpbundlefile)
+                )
+                ui.warn(
+                    _(
+                        b"(fix the problem, then recover the changesets with "
+                        b"\"hg unbundle '%s'\")\n"
+                    )
+                    % vfs.join(tmpbundlefile)
+                )
             raise
         else:
             if tmpbundlefile:
@@ -255,9 +292,10 @@
     # extensions can use it
     return backupfile
 
-def softstrip(ui, repo, nodelist, backup=True, topic='backup'):
+
+def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
     """perform a "soft" strip using the archived phase"""
-    tostrip = [c.node() for c in repo.set('sort(%ln::)', nodelist)]
+    tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
     if not tostrip:
         return None
 
@@ -266,7 +304,7 @@
         node = tostrip[0]
         backupfile = _createstripbackup(repo, tostrip, node, topic)
 
-    with repo.transaction('strip') as tr:
+    with repo.transaction(b'strip') as tr:
         phases.retractboundary(repo, tr, phases.archived, tostrip)
         bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
         repo._bookmarks.applychanges(repo, tr, bmchanges)
@@ -287,24 +325,26 @@
     if updatebm:
         # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
         # but is much faster
-        newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
+        newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
         if newbmtarget:
             newbmtarget = repo[newbmtarget.first()].node()
         else:
-            newbmtarget = '.'
+            newbmtarget = b'.'
     return newbmtarget, updatebm
 
+
 def _createstripbackup(repo, stripbases, node, topic):
     # backup the changeset we are about to strip
     vfs = repo.vfs
     cl = repo.changelog
     backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
-    repo.ui.status(_("saved backup bundle to %s\n") %
-                   vfs.join(backupfile))
-    repo.ui.log("backupbundle", "saved backup bundle to %s\n",
-                vfs.join(backupfile))
+    repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
+    repo.ui.log(
+        b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
+    )
     return backupfile
 
+
 def safestriproots(ui, repo, nodes):
     """return list of roots of nodes where descendants are covered by nodes"""
     torev = repo.unfiltered().changelog.rev
@@ -313,14 +353,17 @@
     # orphaned = affected - wanted
     # affected = descendants(roots(wanted))
     # wanted = revs
-    revset = '%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
+    revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
     tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
     notstrip = revs - tostrip
     if notstrip:
-        nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
-        ui.warn(_('warning: orphaned descendants detected, '
-                  'not stripping %s\n') % nodestr)
-    return [c.node() for c in repo.set('roots(%ld)', tostrip)]
+        nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
+        ui.warn(
+            _(b'warning: orphaned descendants detected, not stripping %s\n')
+            % nodestr
+        )
+    return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
+
 
 class stripcallback(object):
     """used as a transaction postclose callback"""
@@ -329,7 +372,7 @@
         self.ui = ui
         self.repo = repo
         self.backup = backup
-        self.topic = topic or 'backup'
+        self.topic = topic or b'backup'
         self.nodelist = []
 
     def addnodes(self, nodes):
@@ -340,6 +383,7 @@
         if roots:
             strip(self.ui, self.repo, roots, self.backup, self.topic)
 
+
 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
     """like strip, but works inside transaction and won't strip irreverent revs
 
@@ -355,28 +399,32 @@
         return strip(ui, repo, nodes, backup=backup, topic=topic)
     # transaction postclose callbacks are called in alphabet order.
     # use '\xff' as prefix so we are likely to be called last.
-    callback = tr.getpostclose('\xffstrip')
+    callback = tr.getpostclose(b'\xffstrip')
     if callback is None:
         callback = stripcallback(ui, repo, backup=backup, topic=topic)
-        tr.addpostclose('\xffstrip', callback)
+        tr.addpostclose(b'\xffstrip', callback)
     if topic:
         callback.topic = topic
     callback.addnodes(nodelist)
 
+
 def stripmanifest(repo, striprev, tr, files):
-    revlog = repo.manifestlog.getstorage(b'')
-    revlog.strip(striprev, tr)
-    striptrees(repo, tr, striprev, files)
+    for revlog in manifestrevlogs(repo):
+        revlog.strip(striprev, tr)
+
 
-def striptrees(repo, tr, striprev, files):
-    if 'treemanifest' in repo.requirements:
+def manifestrevlogs(repo):
+    yield repo.manifestlog.getstorage(b'')
+    if b'treemanifest' in repo.requirements:
         # This logic is safe if treemanifest isn't enabled, but also
         # pointless, so we skip it if treemanifest isn't enabled.
         for unencoded, encoded, size in repo.store.datafiles():
-            if (unencoded.startswith('meta/') and
-                unencoded.endswith('00manifest.i')):
+            if unencoded.startswith(b'meta/') and unencoded.endswith(
+                b'00manifest.i'
+            ):
                 dir = unencoded[5:-12]
-                repo.manifestlog.getstorage(dir).strip(striprev, tr)
+                yield repo.manifestlog.getstorage(dir)
+
 
 def rebuildfncache(ui, repo):
     """Rebuilds the fncache file from repo history.
@@ -385,9 +433,13 @@
     """
     repo = repo.unfiltered()
 
-    if 'fncache' not in repo.requirements:
-        ui.warn(_('(not rebuilding fncache because repository does not '
-                  'support fncache)\n'))
+    if b'fncache' not in repo.requirements:
+        ui.warn(
+            _(
+                b'(not rebuilding fncache because repository does not '
+                b'support fncache)\n'
+            )
+        )
         return
 
     with repo.lock():
@@ -398,8 +450,9 @@
         newentries = set()
         seenfiles = set()
 
-        progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'),
-                                   total=len(repo))
+        progress = ui.makeprogress(
+            _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
+        )
         for rev in repo:
             progress.update(rev)
 
@@ -410,8 +463,8 @@
                     continue
                 seenfiles.add(f)
 
-                i = 'data/%s.i' % f
-                d = 'data/%s.d' % f
+                i = b'data/%s.i' % f
+                d = b'data/%s.d' % f
 
                 if repo.store._exists(i):
                     newentries.add(i)
@@ -420,12 +473,12 @@
 
         progress.complete()
 
-        if 'treemanifest' in repo.requirements:
+        if b'treemanifest' in repo.requirements:
             # This logic is safe if treemanifest isn't enabled, but also
             # pointless, so we skip it if treemanifest isn't enabled.
             for dir in util.dirs(seenfiles):
-                i = 'meta/%s/00manifest.i' % dir
-                d = 'meta/%s/00manifest.d' % dir
+                i = b'meta/%s/00manifest.i' % dir
+                d = b'meta/%s/00manifest.d' % dir
 
                 if repo.store._exists(i):
                     newentries.add(i)
@@ -435,20 +488,23 @@
         addcount = len(newentries - oldentries)
         removecount = len(oldentries - newentries)
         for p in sorted(oldentries - newentries):
-            ui.write(_('removing %s\n') % p)
+            ui.write(_(b'removing %s\n') % p)
         for p in sorted(newentries - oldentries):
-            ui.write(_('adding %s\n') % p)
+            ui.write(_(b'adding %s\n') % p)
 
         if addcount or removecount:
-            ui.write(_('%d items added, %d removed from fncache\n') %
-                     (addcount, removecount))
+            ui.write(
+                _(b'%d items added, %d removed from fncache\n')
+                % (addcount, removecount)
+            )
             fnc.entries = newentries
             fnc._dirty = True
 
-            with repo.transaction('fncache') as tr:
+            with repo.transaction(b'fncache') as tr:
                 fnc.write(tr)
         else:
-            ui.write(_('fncache already up to date\n'))
+            ui.write(_(b'fncache already up to date\n'))
+
 
 def deleteobsmarkers(obsstore, indices):
     """Delete some obsmarkers from obsstore and return how many were deleted
@@ -473,7 +529,7 @@
             continue
         left.append(m)
 
-    newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
+    newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
     for bytes in obsolete.encodemarkers(left, True, obsstore._version):
         newobsstorefile.write(bytes)
     newobsstorefile.close()
--- a/mercurial/repocache.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/repocache.py	Mon Oct 21 11:09:48 2019 -0400
@@ -19,6 +19,7 @@
     util,
 )
 
+
 class repoloader(object):
     """Load repositories in background thread
 
@@ -68,8 +69,9 @@
         loader thread.
         """
         if self._thread and self._thread.is_alive():
-            raise error.ProgrammingError(b'cannot obtain cached repo while '
-                                         b'loader is active')
+            raise error.ProgrammingError(
+                b'cannot obtain cached repo while loader is active'
+            )
         return self._cache.peek(path, None)
 
     def _mainloop(self):
@@ -99,10 +101,15 @@
         except KeyError:
             repo = hg.repository(self._ui, path).unfiltered()
         _warmupcache(repo)
-        repo.ui.log(b'repocache', b'loaded repo into cache: %s (in %.3fs)\n',
-                    path, util.timer() - start)
+        repo.ui.log(
+            b'repocache',
+            b'loaded repo into cache: %s (in %.3fs)\n',
+            path,
+            util.timer() - start,
+        )
         self._cache.insert(path, repo)
 
+
 # TODO: think about proper API of preloading cache
 def _warmupcache(repo):
     repo.invalidateall()
@@ -115,17 +122,18 @@
         obsolete.getrevs(repo, name)
     repo._phasecache.loadphaserevs(repo)
 
+
 # TODO: think about proper API of attaching preloaded attributes
 def copycache(srcrepo, destrepo):
     """Copy cached attributes from srcrepo to destrepo"""
     destfilecache = destrepo._filecache
     srcfilecache = srcrepo._filecache
-    if 'changelog' in srcfilecache:
-        destfilecache['changelog'] = ce = srcfilecache['changelog']
+    if b'changelog' in srcfilecache:
+        destfilecache[b'changelog'] = ce = srcfilecache[b'changelog']
         ce.obj.opener = ce.obj._realopener = destrepo.svfs
-    if 'obsstore' in srcfilecache:
-        destfilecache['obsstore'] = ce = srcfilecache['obsstore']
+    if b'obsstore' in srcfilecache:
+        destfilecache[b'obsstore'] = ce = srcfilecache[b'obsstore']
         ce.obj.svfs = destrepo.svfs
-    if '_phasecache' in srcfilecache:
-        destfilecache['_phasecache'] = ce = srcfilecache['_phasecache']
+    if b'_phasecache' in srcfilecache:
+        destfilecache[b'_phasecache'] = ce = srcfilecache[b'_phasecache']
         ce.obj.opener = destrepo.svfs
--- a/mercurial/repository.py	Wed Oct 02 12:20:36 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1870 +0,0 @@
-# repository.py - Interfaces and base classes for repositories and peers.
-#
-# Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-from .i18n import _
-from . import (
-    error,
-)
-from .utils import (
-    interfaceutil,
-)
-
-# When narrowing is finalized and no longer subject to format changes,
-# we should move this to just "narrow" or similar.
-NARROW_REQUIREMENT = 'narrowhg-experimental'
-
-# Local repository feature string.
-
-# Revlogs are being used for file storage.
-REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
-# The storage part of the repository is shared from an external source.
-REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
-# LFS supported for backing file storage.
-REPO_FEATURE_LFS = b'lfs'
-# Repository supports being stream cloned.
-REPO_FEATURE_STREAM_CLONE = b'streamclone'
-# Files storage may lack data for all ancestors.
-REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
-
-REVISION_FLAG_CENSORED = 1 << 15
-REVISION_FLAG_ELLIPSIS = 1 << 14
-REVISION_FLAG_EXTSTORED = 1 << 13
-
-REVISION_FLAGS_KNOWN = (
-    REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
-
-CG_DELTAMODE_STD = b'default'
-CG_DELTAMODE_PREV = b'previous'
-CG_DELTAMODE_FULL = b'fulltext'
-CG_DELTAMODE_P1 = b'p1'
-
-class ipeerconnection(interfaceutil.Interface):
-    """Represents a "connection" to a repository.
-
-    This is the base interface for representing a connection to a repository.
-    It holds basic properties and methods applicable to all peer types.
-
-    This is not a complete interface definition and should not be used
-    outside of this module.
-    """
-    ui = interfaceutil.Attribute("""ui.ui instance""")
-
-    def url():
-        """Returns a URL string representing this peer.
-
-        Currently, implementations expose the raw URL used to construct the
-        instance. It may contain credentials as part of the URL. The
-        expectations of the value aren't well-defined and this could lead to
-        data leakage.
-
-        TODO audit/clean consumers and more clearly define the contents of this
-        value.
-        """
-
-    def local():
-        """Returns a local repository instance.
-
-        If the peer represents a local repository, returns an object that
-        can be used to interface with it. Otherwise returns ``None``.
-        """
-
-    def peer():
-        """Returns an object conforming to this interface.
-
-        Most implementations will ``return self``.
-        """
-
-    def canpush():
-        """Returns a boolean indicating if this peer can be pushed to."""
-
-    def close():
-        """Close the connection to this peer.
-
-        This is called when the peer will no longer be used. Resources
-        associated with the peer should be cleaned up.
-        """
-
-class ipeercapabilities(interfaceutil.Interface):
-    """Peer sub-interface related to capabilities."""
-
-    def capable(name):
-        """Determine support for a named capability.
-
-        Returns ``False`` if capability not supported.
-
-        Returns ``True`` if boolean capability is supported. Returns a string
-        if capability support is non-boolean.
-
-        Capability strings may or may not map to wire protocol capabilities.
-        """
-
-    def requirecap(name, purpose):
-        """Require a capability to be present.
-
-        Raises a ``CapabilityError`` if the capability isn't present.
-        """
-
-class ipeercommands(interfaceutil.Interface):
-    """Client-side interface for communicating over the wire protocol.
-
-    This interface is used as a gateway to the Mercurial wire protocol.
-    methods commonly call wire protocol commands of the same name.
-    """
-
-    def branchmap():
-        """Obtain heads in named branches.
-
-        Returns a dict mapping branch name to an iterable of nodes that are
-        heads on that branch.
-        """
-
-    def capabilities():
-        """Obtain capabilities of the peer.
-
-        Returns a set of string capabilities.
-        """
-
-    def clonebundles():
-        """Obtains the clone bundles manifest for the repo.
-
-        Returns the manifest as unparsed bytes.
-        """
-
-    def debugwireargs(one, two, three=None, four=None, five=None):
-        """Used to facilitate debugging of arguments passed over the wire."""
-
-    def getbundle(source, **kwargs):
-        """Obtain remote repository data as a bundle.
-
-        This command is how the bulk of repository data is transferred from
-        the peer to the local repository
-
-        Returns a generator of bundle data.
-        """
-
-    def heads():
-        """Determine all known head revisions in the peer.
-
-        Returns an iterable of binary nodes.
-        """
-
-    def known(nodes):
-        """Determine whether multiple nodes are known.
-
-        Accepts an iterable of nodes whose presence to check for.
-
-        Returns an iterable of booleans indicating of the corresponding node
-        at that index is known to the peer.
-        """
-
-    def listkeys(namespace):
-        """Obtain all keys in a pushkey namespace.
-
-        Returns an iterable of key names.
-        """
-
-    def lookup(key):
-        """Resolve a value to a known revision.
-
-        Returns a binary node of the resolved revision on success.
-        """
-
-    def pushkey(namespace, key, old, new):
-        """Set a value using the ``pushkey`` protocol.
-
-        Arguments correspond to the pushkey namespace and key to operate on and
-        the old and new values for that key.
-
-        Returns a string with the peer result. The value inside varies by the
-        namespace.
-        """
-
-    def stream_out():
-        """Obtain streaming clone data.
-
-        Successful result should be a generator of data chunks.
-        """
-
-    def unbundle(bundle, heads, url):
-        """Transfer repository data to the peer.
-
-        This is how the bulk of data during a push is transferred.
-
-        Returns the integer number of heads added to the peer.
-        """
-
-class ipeerlegacycommands(interfaceutil.Interface):
-    """Interface for implementing support for legacy wire protocol commands.
-
-    Wire protocol commands transition to legacy status when they are no longer
-    used by modern clients. To facilitate identifying which commands are
-    legacy, the interfaces are split.
-    """
-
-    def between(pairs):
-        """Obtain nodes between pairs of nodes.
-
-        ``pairs`` is an iterable of node pairs.
-
-        Returns an iterable of iterables of nodes corresponding to each
-        requested pair.
-        """
-
-    def branches(nodes):
-        """Obtain ancestor changesets of specific nodes back to a branch point.
-
-        For each requested node, the peer finds the first ancestor node that is
-        a DAG root or is a merge.
-
-        Returns an iterable of iterables with the resolved values for each node.
-        """
-
-    def changegroup(nodes, source):
-        """Obtain a changegroup with data for descendants of specified nodes."""
-
-    def changegroupsubset(bases, heads, source):
-        pass
-
-class ipeercommandexecutor(interfaceutil.Interface):
-    """Represents a mechanism to execute remote commands.
-
-    This is the primary interface for requesting that wire protocol commands
-    be executed. Instances of this interface are active in a context manager
-    and have a well-defined lifetime. When the context manager exits, all
-    outstanding requests are waited on.
-    """
-
-    def callcommand(name, args):
-        """Request that a named command be executed.
-
-        Receives the command name and a dictionary of command arguments.
-
-        Returns a ``concurrent.futures.Future`` that will resolve to the
-        result of that command request. That exact value is left up to
-        the implementation and possibly varies by command.
-
-        Not all commands can coexist with other commands in an executor
-        instance: it depends on the underlying wire protocol transport being
-        used and the command itself.
-
-        Implementations MAY call ``sendcommands()`` automatically if the
-        requested command can not coexist with other commands in this executor.
-
-        Implementations MAY call ``sendcommands()`` automatically when the
-        future's ``result()`` is called. So, consumers using multiple
-        commands with an executor MUST ensure that ``result()`` is not called
-        until all command requests have been issued.
-        """
-
-    def sendcommands():
-        """Trigger submission of queued command requests.
-
-        Not all transports submit commands as soon as they are requested to
-        run. When called, this method forces queued command requests to be
-        issued. It will no-op if all commands have already been sent.
-
-        When called, no more new commands may be issued with this executor.
-        """
-
-    def close():
-        """Signal that this command request is finished.
-
-        When called, no more new commands may be issued. All outstanding
-        commands that have previously been issued are waited on before
-        returning. This not only includes waiting for the futures to resolve,
-        but also waiting for all response data to arrive. In other words,
-        calling this waits for all on-wire state for issued command requests
-        to finish.
-
-        When used as a context manager, this method is called when exiting the
-        context manager.
-
-        This method may call ``sendcommands()`` if there are buffered commands.
-        """
-
-class ipeerrequests(interfaceutil.Interface):
-    """Interface for executing commands on a peer."""
-
-    limitedarguments = interfaceutil.Attribute(
-        """True if the peer cannot receive large argument value for commands."""
-    )
-
-    def commandexecutor():
-        """A context manager that resolves to an ipeercommandexecutor.
-
-        The object this resolves to can be used to issue command requests
-        to the peer.
-
-        Callers should call its ``callcommand`` method to issue command
-        requests.
-
-        A new executor should be obtained for each distinct set of commands
-        (possibly just a single command) that the consumer wants to execute
-        as part of a single operation or round trip. This is because some
-        peers are half-duplex and/or don't support persistent connections.
-        e.g. in the case of HTTP peers, commands sent to an executor represent
-        a single HTTP request. While some peers may support multiple command
-        sends over the wire per executor, consumers need to code to the least
-        capable peer. So it should be assumed that command executors buffer
-        called commands until they are told to send them and that each
-        command executor could result in a new connection or wire-level request
-        being issued.
-        """
-
-class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
-    """Unified interface for peer repositories.
-
-    All peer instances must conform to this interface.
-    """
-
-class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
-    """Unified peer interface for wire protocol version 2 peers."""
-
-    apidescriptor = interfaceutil.Attribute(
-        """Data structure holding description of server API.""")
-
-@interfaceutil.implementer(ipeerbase)
-class peer(object):
-    """Base class for peer repositories."""
-
-    limitedarguments = False
-
-    def capable(self, name):
-        caps = self.capabilities()
-        if name in caps:
-            return True
-
-        name = '%s=' % name
-        for cap in caps:
-            if cap.startswith(name):
-                return cap[len(name):]
-
-        return False
-
-    def requirecap(self, name, purpose):
-        if self.capable(name):
-            return
-
-        raise error.CapabilityError(
-            _('cannot %s; remote repository does not support the '
-              '\'%s\' capability') % (purpose, name))
-
-class iverifyproblem(interfaceutil.Interface):
-    """Represents a problem with the integrity of the repository.
-
-    Instances of this interface are emitted to describe an integrity issue
-    with a repository (e.g. corrupt storage, missing data, etc).
-
-    Instances are essentially messages associated with severity.
-    """
-    warning = interfaceutil.Attribute(
-        """Message indicating a non-fatal problem.""")
-
-    error = interfaceutil.Attribute(
-        """Message indicating a fatal problem.""")
-
-    node = interfaceutil.Attribute(
-        """Revision encountering the problem.
-
-        ``None`` means the problem doesn't apply to a single revision.
-        """)
-
-class irevisiondelta(interfaceutil.Interface):
-    """Represents a delta between one revision and another.
-
-    Instances convey enough information to allow a revision to be exchanged
-    with another repository.
-
-    Instances represent the fulltext revision data or a delta against
-    another revision. Therefore the ``revision`` and ``delta`` attributes
-    are mutually exclusive.
-
-    Typically used for changegroup generation.
-    """
-
-    node = interfaceutil.Attribute(
-        """20 byte node of this revision.""")
-
-    p1node = interfaceutil.Attribute(
-        """20 byte node of 1st parent of this revision.""")
-
-    p2node = interfaceutil.Attribute(
-        """20 byte node of 2nd parent of this revision.""")
-
-    linknode = interfaceutil.Attribute(
-        """20 byte node of the changelog revision this node is linked to.""")
-
-    flags = interfaceutil.Attribute(
-        """2 bytes of integer flags that apply to this revision.
-
-        This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
-        """)
-
-    basenode = interfaceutil.Attribute(
-        """20 byte node of the revision this data is a delta against.
-
-        ``nullid`` indicates that the revision is a full revision and not
-        a delta.
-        """)
-
-    baserevisionsize = interfaceutil.Attribute(
-        """Size of base revision this delta is against.
-
-        May be ``None`` if ``basenode`` is ``nullid``.
-        """)
-
-    revision = interfaceutil.Attribute(
-        """Raw fulltext of revision data for this node.""")
-
-    delta = interfaceutil.Attribute(
-        """Delta between ``basenode`` and ``node``.
-
-        Stored in the bdiff delta format.
-        """)
-
-class ifilerevisionssequence(interfaceutil.Interface):
-    """Contains index data for all revisions of a file.
-
-    Types implementing this behave like lists of tuples. The index
-    in the list corresponds to the revision number. The values contain
-    index metadata.
-
-    The *null* revision (revision number -1) is always the last item
-    in the index.
-    """
-
-    def __len__():
-        """The total number of revisions."""
-
-    def __getitem__(rev):
-        """Returns the object having a specific revision number.
-
-        Returns an 8-tuple with the following fields:
-
-        offset+flags
-           Contains the offset and flags for the revision. 64-bit unsigned
-           integer where first 6 bytes are the offset and the next 2 bytes
-           are flags. The offset can be 0 if it is not used by the store.
-        compressed size
-            Size of the revision data in the store. It can be 0 if it isn't
-            needed by the store.
-        uncompressed size
-            Fulltext size. It can be 0 if it isn't needed by the store.
-        base revision
-            Revision number of revision the delta for storage is encoded
-            against. -1 indicates not encoded against a base revision.
-        link revision
-            Revision number of changelog revision this entry is related to.
-        p1 revision
-            Revision number of 1st parent. -1 if no 1st parent.
-        p2 revision
-            Revision number of 2nd parent. -1 if no 1st parent.
-        node
-            Binary node value for this revision number.
-
-        Negative values should index off the end of the sequence. ``-1``
-        should return the null revision. ``-2`` should return the most
-        recent revision.
-        """
-
-    def __contains__(rev):
-        """Whether a revision number exists."""
-
-    def insert(self, i, entry):
-        """Add an item to the index at specific revision."""
-
-class ifileindex(interfaceutil.Interface):
-    """Storage interface for index data of a single file.
-
-    File storage data is divided into index metadata and data storage.
-    This interface defines the index portion of the interface.
-
-    The index logically consists of:
-
-    * A mapping between revision numbers and nodes.
-    * DAG data (storing and querying the relationship between nodes).
-    * Metadata to facilitate storage.
-    """
-    def __len__():
-        """Obtain the number of revisions stored for this file."""
-
-    def __iter__():
-        """Iterate over revision numbers for this file."""
-
-    def hasnode(node):
-        """Returns a bool indicating if a node is known to this store.
-
-        Implementations must only return True for full, binary node values:
-        hex nodes, revision numbers, and partial node matches must be
-        rejected.
-
-        The null node is never present.
-        """
-
-    def revs(start=0, stop=None):
-        """Iterate over revision numbers for this file, with control."""
-
-    def parents(node):
-        """Returns a 2-tuple of parent nodes for a revision.
-
-        Values will be ``nullid`` if the parent is empty.
-        """
-
-    def parentrevs(rev):
-        """Like parents() but operates on revision numbers."""
-
-    def rev(node):
-        """Obtain the revision number given a node.
-
-        Raises ``error.LookupError`` if the node is not known.
-        """
-
-    def node(rev):
-        """Obtain the node value given a revision number.
-
-        Raises ``IndexError`` if the node is not known.
-        """
-
-    def lookup(node):
-        """Attempt to resolve a value to a node.
-
-        Value can be a binary node, hex node, revision number, or a string
-        that can be converted to an integer.
-
-        Raises ``error.LookupError`` if a node could not be resolved.
-        """
-
-    def linkrev(rev):
-        """Obtain the changeset revision number a revision is linked to."""
-
-    def iscensored(rev):
-        """Return whether a revision's content has been censored."""
-
-    def commonancestorsheads(node1, node2):
-        """Obtain an iterable of nodes containing heads of common ancestors.
-
-        See ``ancestor.commonancestorsheads()``.
-        """
-
-    def descendants(revs):
-        """Obtain descendant revision numbers for a set of revision numbers.
-
-        If ``nullrev`` is in the set, this is equivalent to ``revs()``.
-        """
-
-    def heads(start=None, stop=None):
-        """Obtain a list of nodes that are DAG heads, with control.
-
-        The set of revisions examined can be limited by specifying
-        ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
-        iterable of nodes. DAG traversal starts at earlier revision
-        ``start`` and iterates forward until any node in ``stop`` is
-        encountered.
-        """
-
-    def children(node):
-        """Obtain nodes that are children of a node.
-
-        Returns a list of nodes.
-        """
-
-class ifiledata(interfaceutil.Interface):
-    """Storage interface for data storage of a specific file.
-
-    This complements ``ifileindex`` and provides an interface for accessing
-    data for a tracked file.
-    """
-    def size(rev):
-        """Obtain the fulltext size of file data.
-
-        Any metadata is excluded from size measurements.
-        """
-
-    def revision(node, raw=False):
-        """"Obtain fulltext data for a node.
-
-        By default, any storage transformations are applied before the data
-        is returned. If ``raw`` is True, non-raw storage transformations
-        are not applied.
-
-        The fulltext data may contain a header containing metadata. Most
-        consumers should use ``read()`` to obtain the actual file data.
-        """
-
-    def read(node):
-        """Resolve file fulltext data.
-
-        This is similar to ``revision()`` except any metadata in the data
-        headers is stripped.
-        """
-
-    def renamed(node):
-        """Obtain copy metadata for a node.
-
-        Returns ``False`` if no copy metadata is stored or a 2-tuple of
-        (path, node) from which this revision was copied.
-        """
-
-    def cmp(node, fulltext):
-        """Compare fulltext to another revision.
-
-        Returns True if the fulltext is different from what is stored.
-
-        This takes copy metadata into account.
-
-        TODO better document the copy metadata and censoring logic.
-        """
-
-    def emitrevisions(nodes,
-                      nodesorder=None,
-                      revisiondata=False,
-                      assumehaveparentrevisions=False,
-                      deltamode=CG_DELTAMODE_STD):
-        """Produce ``irevisiondelta`` for revisions.
-
-        Given an iterable of nodes, emits objects conforming to the
-        ``irevisiondelta`` interface that describe revisions in storage.
-
-        This method is a generator.
-
-        The input nodes may be unordered. Implementations must ensure that a
-        node's parents are emitted before the node itself. Transitively, this
-        means that a node may only be emitted once all its ancestors in
-        ``nodes`` have also been emitted.
-
-        By default, emits "index" data (the ``node``, ``p1node``, and
-        ``p2node`` attributes). If ``revisiondata`` is set, revision data
-        will also be present on the emitted objects.
-
-        With default argument values, implementations can choose to emit
-        either fulltext revision data or a delta. When emitting deltas,
-        implementations must consider whether the delta's base revision
-        fulltext is available to the receiver.
-
-        The base revision fulltext is guaranteed to be available if any of
-        the following are met:
-
-        * Its fulltext revision was emitted by this method call.
-        * A delta for that revision was emitted by this method call.
-        * ``assumehaveparentrevisions`` is True and the base revision is a
-          parent of the node.
-
-        ``nodesorder`` can be used to control the order that revisions are
-        emitted. By default, revisions can be reordered as long as they are
-        in DAG topological order (see above). If the value is ``nodes``,
-        the iteration order from ``nodes`` should be used. If the value is
-        ``storage``, then the native order from the backing storage layer
-        is used. (Not all storage layers will have strong ordering and behavior
-        of this mode is storage-dependent.) ``nodes`` ordering can force
-        revisions to be emitted before their ancestors, so consumers should
-        use it with care.
-
-        The ``linknode`` attribute on the returned ``irevisiondelta`` may not
-        be set and it is the caller's responsibility to resolve it, if needed.
-
-        If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
-        all revision data should be emitted as deltas against the revision
-        emitted just prior. The initial revision should be a delta against its
-        1st parent.
-        """
-
-class ifilemutation(interfaceutil.Interface):
-    """Storage interface for mutation events of a tracked file."""
-
-    def add(filedata, meta, transaction, linkrev, p1, p2):
-        """Add a new revision to the store.
-
-        Takes file data, dictionary of metadata, a transaction, linkrev,
-        and parent nodes.
-
-        Returns the node that was added.
-
-        May no-op if a revision matching the supplied data is already stored.
-        """
-
-    def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
-                    flags=0, cachedelta=None):
-        """Add a new revision to the store.
-
-        This is similar to ``add()`` except it operates at a lower level.
-
-        The data passed in already contains a metadata header, if any.
-
-        ``node`` and ``flags`` can be used to define the expected node and
-        the flags to use with storage. ``flags`` is a bitwise value composed
-        of the various ``REVISION_FLAG_*`` constants.
-
-        ``add()`` is usually called when adding files from e.g. the working
-        directory. ``addrevision()`` is often called by ``add()`` and for
-        scenarios where revision data has already been computed, such as when
-        applying raw data from a peer repo.
-        """
-
-    def addgroup(deltas, linkmapper, transaction, addrevisioncb=None,
-                 maybemissingparents=False):
-        """Process a series of deltas for storage.
-
-        ``deltas`` is an iterable of 7-tuples of
-        (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
-        to add.
-
-        The ``delta`` field contains ``mpatch`` data to apply to a base
-        revision, identified by ``deltabase``. The base node can be
-        ``nullid``, in which case the header from the delta can be ignored
-        and the delta used as the fulltext.
-
-        ``addrevisioncb`` should be called for each node as it is committed.
-
-        ``maybemissingparents`` is a bool indicating whether the incoming
-        data may reference parents/ancestor revisions that aren't present.
-        This flag is set when receiving data into a "shallow" store that
-        doesn't hold all history.
-
-        Returns a list of nodes that were processed. A node will be in the list
-        even if it existed in the store previously.
-        """
-
-    def censorrevision(tr, node, tombstone=b''):
-        """Remove the content of a single revision.
-
-        The specified ``node`` will have its content purged from storage.
-        Future attempts to access the revision data for this node will
-        result in failure.
-
-        A ``tombstone`` message can optionally be stored. This message may be
-        displayed to users when they attempt to access the missing revision
-        data.
-
-        Storage backends may have stored deltas against the previous content
-        in this revision. As part of censoring a revision, these storage
-        backends are expected to rewrite any internally stored deltas such
-        that they no longer reference the deleted content.
-        """
-
-    def getstrippoint(minlink):
-        """Find the minimum revision that must be stripped to strip a linkrev.
-
-        Returns a 2-tuple containing the minimum revision number and a set
-        of all revisions numbers that would be broken by this strip.
-
-        TODO this is highly revlog centric and should be abstracted into
-        a higher-level deletion API. ``repair.strip()`` relies on this.
-        """
-
-    def strip(minlink, transaction):
-        """Remove storage of items starting at a linkrev.
-
-        This uses ``getstrippoint()`` to determine the first node to remove.
-        Then it effectively truncates storage for all revisions after that.
-
-        TODO this is highly revlog centric and should be abstracted into a
-        higher-level deletion API.
-        """
-
-class ifilestorage(ifileindex, ifiledata, ifilemutation):
-    """Complete storage interface for a single tracked file."""
-
-    def files():
-        """Obtain paths that are backing storage for this file.
-
-        TODO this is used heavily by verify code and there should probably
-        be a better API for that.
-        """
-
-    def storageinfo(exclusivefiles=False, sharedfiles=False,
-                    revisionscount=False, trackedsize=False,
-                    storedsize=False):
-        """Obtain information about storage for this file's data.
-
-        Returns a dict describing storage for this tracked path. The keys
-        in the dict map to arguments of the same. The arguments are bools
-        indicating whether to calculate and obtain that data.
-
-        exclusivefiles
-           Iterable of (vfs, path) describing files that are exclusively
-           used to back storage for this tracked path.
-
-        sharedfiles
-           Iterable of (vfs, path) describing files that are used to back
-           storage for this tracked path. Those files may also provide storage
-           for other stored entities.
-
-        revisionscount
-           Number of revisions available for retrieval.
-
-        trackedsize
-           Total size in bytes of all tracked revisions. This is a sum of the
-           length of the fulltext of all revisions.
-
-        storedsize
-           Total size in bytes used to store data for all tracked revisions.
-           This is commonly less than ``trackedsize`` due to internal usage
-           of deltas rather than fulltext revisions.
-
-        Not all storage backends may support all queries are have a reasonable
-        value to use. In that case, the value should be set to ``None`` and
-        callers are expected to handle this special value.
-        """
-
-    def verifyintegrity(state):
-        """Verifies the integrity of file storage.
-
-        ``state`` is a dict holding state of the verifier process. It can be
-        used to communicate data between invocations of multiple storage
-        primitives.
-
-        If individual revisions cannot have their revision content resolved,
-        the method is expected to set the ``skipread`` key to a set of nodes
-        that encountered problems.
-
-        The method yields objects conforming to the ``iverifyproblem``
-        interface.
-        """
-
-class idirs(interfaceutil.Interface):
-    """Interface representing a collection of directories from paths.
-
-    This interface is essentially a derived data structure representing
-    directories from a collection of paths.
-    """
-
-    def addpath(path):
-        """Add a path to the collection.
-
-        All directories in the path will be added to the collection.
-        """
-
-    def delpath(path):
-        """Remove a path from the collection.
-
-        If the removal was the last path in a particular directory, the
-        directory is removed from the collection.
-        """
-
-    def __iter__():
-        """Iterate over the directories in this collection of paths."""
-
-    def __contains__(path):
-        """Whether a specific directory is in this collection."""
-
-class imanifestdict(interfaceutil.Interface):
-    """Interface representing a manifest data structure.
-
-    A manifest is effectively a dict mapping paths to entries. Each entry
-    consists of a binary node and extra flags affecting that entry.
-    """
-
-    def __getitem__(path):
-        """Returns the binary node value for a path in the manifest.
-
-        Raises ``KeyError`` if the path does not exist in the manifest.
-
-        Equivalent to ``self.find(path)[0]``.
-        """
-
-    def find(path):
-        """Returns the entry for a path in the manifest.
-
-        Returns a 2-tuple of (node, flags).
-
-        Raises ``KeyError`` if the path does not exist in the manifest.
-        """
-
-    def __len__():
-        """Return the number of entries in the manifest."""
-
-    def __nonzero__():
-        """Returns True if the manifest has entries, False otherwise."""
-
-    __bool__ = __nonzero__
-
-    def __setitem__(path, node):
-        """Define the node value for a path in the manifest.
-
-        If the path is already in the manifest, its flags will be copied to
-        the new entry.
-        """
-
-    def __contains__(path):
-        """Whether a path exists in the manifest."""
-
-    def __delitem__(path):
-        """Remove a path from the manifest.
-
-        Raises ``KeyError`` if the path is not in the manifest.
-        """
-
-    def __iter__():
-        """Iterate over paths in the manifest."""
-
-    def iterkeys():
-        """Iterate over paths in the manifest."""
-
-    def keys():
-        """Obtain a list of paths in the manifest."""
-
-    def filesnotin(other, match=None):
-        """Obtain the set of paths in this manifest but not in another.
-
-        ``match`` is an optional matcher function to be applied to both
-        manifests.
-
-        Returns a set of paths.
-        """
-
-    def dirs():
-        """Returns an object implementing the ``idirs`` interface."""
-
-    def hasdir(dir):
-        """Returns a bool indicating if a directory is in this manifest."""
-
-    def matches(match):
-        """Generate a new manifest filtered through a matcher.
-
-        Returns an object conforming to the ``imanifestdict`` interface.
-        """
-
-    def walk(match):
-        """Generator of paths in manifest satisfying a matcher.
-
-        This is equivalent to ``self.matches(match).iterkeys()`` except a new
-        manifest object is not created.
-
-        If the matcher has explicit files listed and they don't exist in
-        the manifest, ``match.bad()`` is called for each missing file.
-        """
-
-    def diff(other, match=None, clean=False):
-        """Find differences between this manifest and another.
-
-        This manifest is compared to ``other``.
-
-        If ``match`` is provided, the two manifests are filtered against this
-        matcher and only entries satisfying the matcher are compared.
-
-        If ``clean`` is True, unchanged files are included in the returned
-        object.
-
-        Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
-        the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
-        represents the node and flags for this manifest and ``(node2, flag2)``
-        are the same for the other manifest.
-        """
-
-    def setflag(path, flag):
-        """Set the flag value for a given path.
-
-        Raises ``KeyError`` if the path is not already in the manifest.
-        """
-
-    def get(path, default=None):
-        """Obtain the node value for a path or a default value if missing."""
-
-    def flags(path, default=''):
-        """Return the flags value for a path or a default value if missing."""
-
-    def copy():
-        """Return a copy of this manifest."""
-
-    def items():
-        """Returns an iterable of (path, node) for items in this manifest."""
-
-    def iteritems():
-        """Identical to items()."""
-
-    def iterentries():
-        """Returns an iterable of (path, node, flags) for this manifest.
-
-        Similar to ``iteritems()`` except items are a 3-tuple and include
-        flags.
-        """
-
-    def text():
-        """Obtain the raw data representation for this manifest.
-
-        Result is used to create a manifest revision.
-        """
-
-    def fastdelta(base, changes):
-        """Obtain a delta between this manifest and another given changes.
-
-        ``base`` in the raw data representation for another manifest.
-
-        ``changes`` is an iterable of ``(path, to_delete)``.
-
-        Returns a 2-tuple containing ``bytearray(self.text())`` and the
-        delta between ``base`` and this manifest.
-        """
-
-class imanifestrevisionbase(interfaceutil.Interface):
-    """Base interface representing a single revision of a manifest.
-
-    Should not be used as a primary interface: should always be inherited
-    as part of a larger interface.
-    """
-
-    def new():
-        """Obtain a new manifest instance.
-
-        Returns an object conforming to the ``imanifestrevisionwritable``
-        interface. The instance will be associated with the same
-        ``imanifestlog`` collection as this instance.
-        """
-
-    def copy():
-        """Obtain a copy of this manifest instance.
-
-        Returns an object conforming to the ``imanifestrevisionwritable``
-        interface. The instance will be associated with the same
-        ``imanifestlog`` collection as this instance.
-        """
-
-    def read():
-        """Obtain the parsed manifest data structure.
-
-        The returned object conforms to the ``imanifestdict`` interface.
-        """
-
-class imanifestrevisionstored(imanifestrevisionbase):
-    """Interface representing a manifest revision committed to storage."""
-
-    def node():
-        """The binary node for this manifest."""
-
-    parents = interfaceutil.Attribute(
-        """List of binary nodes that are parents for this manifest revision."""
-    )
-
-    def readdelta(shallow=False):
-        """Obtain the manifest data structure representing changes from parent.
-
-        This manifest is compared to its 1st parent. A new manifest representing
-        those differences is constructed.
-
-        The returned object conforms to the ``imanifestdict`` interface.
-        """
-
-    def readfast(shallow=False):
-        """Calls either ``read()`` or ``readdelta()``.
-
-        The faster of the two options is called.
-        """
-
-    def find(key):
-        """Calls self.read().find(key)``.
-
-        Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
-        """
-
-class imanifestrevisionwritable(imanifestrevisionbase):
-    """Interface representing a manifest revision that can be committed."""
-
-    def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
-        """Add this revision to storage.
-
-        Takes a transaction object, the changeset revision number it will
-        be associated with, its parent nodes, and lists of added and
-        removed paths.
-
-        If match is provided, storage can choose not to inspect or write out
-        items that do not match. Storage is still required to be able to provide
-        the full manifest in the future for any directories written (these
-        manifests should not be "narrowed on disk").
-
-        Returns the binary node of the created revision.
-        """
-
-class imanifeststorage(interfaceutil.Interface):
-    """Storage interface for manifest data."""
-
-    tree = interfaceutil.Attribute(
-        """The path to the directory this manifest tracks.
-
-        The empty bytestring represents the root manifest.
-        """)
-
-    index = interfaceutil.Attribute(
-        """An ``ifilerevisionssequence`` instance.""")
-
-    indexfile = interfaceutil.Attribute(
-        """Path of revlog index file.
-
-        TODO this is revlog specific and should not be exposed.
-        """)
-
-    opener = interfaceutil.Attribute(
-        """VFS opener to use to access underlying files used for storage.
-
-        TODO this is revlog specific and should not be exposed.
-        """)
-
-    version = interfaceutil.Attribute(
-        """Revlog version number.
-
-        TODO this is revlog specific and should not be exposed.
-        """)
-
-    _generaldelta = interfaceutil.Attribute(
-        """Whether generaldelta storage is being used.
-
-        TODO this is revlog specific and should not be exposed.
-        """)
-
-    fulltextcache = interfaceutil.Attribute(
-        """Dict with cache of fulltexts.
-
-        TODO this doesn't feel appropriate for the storage interface.
-        """)
-
-    def __len__():
-        """Obtain the number of revisions stored for this manifest."""
-
-    def __iter__():
-        """Iterate over revision numbers for this manifest."""
-
-    def rev(node):
-        """Obtain the revision number given a binary node.
-
-        Raises ``error.LookupError`` if the node is not known.
-        """
-
-    def node(rev):
-        """Obtain the node value given a revision number.
-
-        Raises ``error.LookupError`` if the revision is not known.
-        """
-
-    def lookup(value):
-        """Attempt to resolve a value to a node.
-
-        Value can be a binary node, hex node, revision number, or a bytes
-        that can be converted to an integer.
-
-        Raises ``error.LookupError`` if a ndoe could not be resolved.
-        """
-
-    def parents(node):
-        """Returns a 2-tuple of parent nodes for a node.
-
-        Values will be ``nullid`` if the parent is empty.
-        """
-
-    def parentrevs(rev):
-        """Like parents() but operates on revision numbers."""
-
-    def linkrev(rev):
-        """Obtain the changeset revision number a revision is linked to."""
-
-    def revision(node, _df=None, raw=False):
-        """Obtain fulltext data for a node."""
-
-    def revdiff(rev1, rev2):
-        """Obtain a delta between two revision numbers.
-
-        The returned data is the result of ``bdiff.bdiff()`` on the raw
-        revision data.
-        """
-
-    def cmp(node, fulltext):
-        """Compare fulltext to another revision.
-
-        Returns True if the fulltext is different from what is stored.
-        """
-
-    def emitrevisions(nodes,
-                      nodesorder=None,
-                      revisiondata=False,
-                      assumehaveparentrevisions=False):
-        """Produce ``irevisiondelta`` describing revisions.
-
-        See the documentation for ``ifiledata`` for more.
-        """
-
-    def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
-        """Process a series of deltas for storage.
-
-        See the documentation in ``ifilemutation`` for more.
-        """
-
-    def rawsize(rev):
-        """Obtain the size of tracked data.
-
-        Is equivalent to ``len(m.revision(node, raw=True))``.
-
-        TODO this method is only used by upgrade code and may be removed.
-        """
-
-    def getstrippoint(minlink):
-        """Find minimum revision that must be stripped to strip a linkrev.
-
-        See the documentation in ``ifilemutation`` for more.
-        """
-
-    def strip(minlink, transaction):
-        """Remove storage of items starting at a linkrev.
-
-        See the documentation in ``ifilemutation`` for more.
-        """
-
-    def checksize():
-        """Obtain the expected sizes of backing files.
-
-        TODO this is used by verify and it should not be part of the interface.
-        """
-
-    def files():
-        """Obtain paths that are backing storage for this manifest.
-
-        TODO this is used by verify and there should probably be a better API
-        for this functionality.
-        """
-
-    def deltaparent(rev):
-        """Obtain the revision that a revision is delta'd against.
-
-        TODO delta encoding is an implementation detail of storage and should
-        not be exposed to the storage interface.
-        """
-
-    def clone(tr, dest, **kwargs):
-        """Clone this instance to another."""
-
-    def clearcaches(clear_persisted_data=False):
-        """Clear any caches associated with this instance."""
-
-    def dirlog(d):
-        """Obtain a manifest storage instance for a tree."""
-
-    def add(m, transaction, link, p1, p2, added, removed, readtree=None,
-            match=None):
-        """Add a revision to storage.
-
-        ``m`` is an object conforming to ``imanifestdict``.
-
-        ``link`` is the linkrev revision number.
-
-        ``p1`` and ``p2`` are the parent revision numbers.
-
-        ``added`` and ``removed`` are iterables of added and removed paths,
-        respectively.
-
-        ``readtree`` is a function that can be used to read the child tree(s)
-        when recursively writing the full tree structure when using
-        treemanifets.
-
-        ``match`` is a matcher that can be used to hint to storage that not all
-        paths must be inspected; this is an optimization and can be safely
-        ignored. Note that the storage must still be able to reproduce a full
-        manifest including files that did not match.
-        """
-
-    def storageinfo(exclusivefiles=False, sharedfiles=False,
-                    revisionscount=False, trackedsize=False,
-                    storedsize=False):
-        """Obtain information about storage for this manifest's data.
-
-        See ``ifilestorage.storageinfo()`` for a description of this method.
-        This one behaves the same way, except for manifest data.
-        """
-
-class imanifestlog(interfaceutil.Interface):
-    """Interface representing a collection of manifest snapshots.
-
-    Represents the root manifest in a repository.
-
-    Also serves as a means to access nested tree manifests and to cache
-    tree manifests.
-    """
-
-    def __getitem__(node):
-        """Obtain a manifest instance for a given binary node.
-
-        Equivalent to calling ``self.get('', node)``.
-
-        The returned object conforms to the ``imanifestrevisionstored``
-        interface.
-        """
-
-    def get(tree, node, verify=True):
-        """Retrieve the manifest instance for a given directory and binary node.
-
-        ``node`` always refers to the node of the root manifest (which will be
-        the only manifest if flat manifests are being used).
-
-        If ``tree`` is the empty string, the root manifest is returned.
-        Otherwise the manifest for the specified directory will be returned
-        (requires tree manifests).
-
-        If ``verify`` is True, ``LookupError`` is raised if the node is not
-        known.
-
-        The returned object conforms to the ``imanifestrevisionstored``
-        interface.
-        """
-
-    def getstorage(tree):
-        """Retrieve an interface to storage for a particular tree.
-
-        If ``tree`` is the empty bytestring, storage for the root manifest will
-        be returned. Otherwise storage for a tree manifest is returned.
-
-        TODO formalize interface for returned object.
-        """
-
-    def clearcaches():
-        """Clear caches associated with this collection."""
-
-    def rev(node):
-        """Obtain the revision number for a binary node.
-
-        Raises ``error.LookupError`` if the node is not known.
-        """
-
-class ilocalrepositoryfilestorage(interfaceutil.Interface):
-    """Local repository sub-interface providing access to tracked file storage.
-
-    This interface defines how a repository accesses storage for a single
-    tracked file path.
-    """
-
-    def file(f):
-        """Obtain a filelog for a tracked path.
-
-        The returned type conforms to the ``ifilestorage`` interface.
-        """
-
-class ilocalrepositorymain(interfaceutil.Interface):
-    """Main interface for local repositories.
-
-    This currently captures the reality of things - not how things should be.
-    """
-
-    supportedformats = interfaceutil.Attribute(
-        """Set of requirements that apply to stream clone.
-
-        This is actually a class attribute and is shared among all instances.
-        """)
-
-    supported = interfaceutil.Attribute(
-        """Set of requirements that this repo is capable of opening.""")
-
-    requirements = interfaceutil.Attribute(
-        """Set of requirements this repo uses.""")
-
-    features = interfaceutil.Attribute(
-        """Set of "features" this repository supports.
-
-        A "feature" is a loosely-defined term. It can refer to a feature
-        in the classical sense or can describe an implementation detail
-        of the repository. For example, a ``readonly`` feature may denote
-        the repository as read-only. Or a ``revlogfilestore`` feature may
-        denote that the repository is using revlogs for file storage.
-
-        The intent of features is to provide a machine-queryable mechanism
-        for repo consumers to test for various repository characteristics.
-
-        Features are similar to ``requirements``. The main difference is that
-        requirements are stored on-disk and represent requirements to open the
-        repository. Features are more run-time capabilities of the repository
-        and more granular capabilities (which may be derived from requirements).
-        """)
-
-    filtername = interfaceutil.Attribute(
-        """Name of the repoview that is active on this repo.""")
-
-    wvfs = interfaceutil.Attribute(
-        """VFS used to access the working directory.""")
-
-    vfs = interfaceutil.Attribute(
-        """VFS rooted at the .hg directory.
-
-        Used to access repository data not in the store.
-        """)
-
-    svfs = interfaceutil.Attribute(
-        """VFS rooted at the store.
-
-        Used to access repository data in the store. Typically .hg/store.
-        But can point elsewhere if the store is shared.
-        """)
-
-    root = interfaceutil.Attribute(
-        """Path to the root of the working directory.""")
-
-    path = interfaceutil.Attribute(
-        """Path to the .hg directory.""")
-
-    origroot = interfaceutil.Attribute(
-        """The filesystem path that was used to construct the repo.""")
-
-    auditor = interfaceutil.Attribute(
-        """A pathauditor for the working directory.
-
-        This checks if a path refers to a nested repository.
-
-        Operates on the filesystem.
-        """)
-
-    nofsauditor = interfaceutil.Attribute(
-        """A pathauditor for the working directory.
-
-        This is like ``auditor`` except it doesn't do filesystem checks.
-        """)
-
-    baseui = interfaceutil.Attribute(
-        """Original ui instance passed into constructor.""")
-
-    ui = interfaceutil.Attribute(
-        """Main ui instance for this instance.""")
-
-    sharedpath = interfaceutil.Attribute(
-        """Path to the .hg directory of the repo this repo was shared from.""")
-
-    store = interfaceutil.Attribute(
-        """A store instance.""")
-
-    spath = interfaceutil.Attribute(
-        """Path to the store.""")
-
-    sjoin = interfaceutil.Attribute(
-        """Alias to self.store.join.""")
-
-    cachevfs = interfaceutil.Attribute(
-        """A VFS used to access the cache directory.
-
-        Typically .hg/cache.
-        """)
-
-    wcachevfs = interfaceutil.Attribute(
-        """A VFS used to access the cache directory dedicated to working copy
-
-        Typically .hg/wcache.
-        """)
-
-    filteredrevcache = interfaceutil.Attribute(
-        """Holds sets of revisions to be filtered.""")
-
-    names = interfaceutil.Attribute(
-        """A ``namespaces`` instance.""")
-
-    def close():
-        """Close the handle on this repository."""
-
-    def peer():
-        """Obtain an object conforming to the ``peer`` interface."""
-
-    def unfiltered():
-        """Obtain an unfiltered/raw view of this repo."""
-
-    def filtered(name, visibilityexceptions=None):
-        """Obtain a named view of this repository."""
-
-    obsstore = interfaceutil.Attribute(
-        """A store of obsolescence data.""")
-
-    changelog = interfaceutil.Attribute(
-        """A handle on the changelog revlog.""")
-
-    manifestlog = interfaceutil.Attribute(
-        """An instance conforming to the ``imanifestlog`` interface.
-
-        Provides access to manifests for the repository.
-        """)
-
-    dirstate = interfaceutil.Attribute(
-        """Working directory state.""")
-
-    narrowpats = interfaceutil.Attribute(
-        """Matcher patterns for this repository's narrowspec.""")
-
-    def narrowmatch(match=None, includeexact=False):
-        """Obtain a matcher for the narrowspec."""
-
-    def setnarrowpats(newincludes, newexcludes):
-        """Define the narrowspec for this repository."""
-
-    def __getitem__(changeid):
-        """Try to resolve a changectx."""
-
-    def __contains__(changeid):
-        """Whether a changeset exists."""
-
-    def __nonzero__():
-        """Always returns True."""
-        return True
-
-    __bool__ = __nonzero__
-
-    def __len__():
-        """Returns the number of changesets in the repo."""
-
-    def __iter__():
-        """Iterate over revisions in the changelog."""
-
-    def revs(expr, *args):
-        """Evaluate a revset.
-
-        Emits revisions.
-        """
-
-    def set(expr, *args):
-        """Evaluate a revset.
-
-        Emits changectx instances.
-        """
-
-    def anyrevs(specs, user=False, localalias=None):
-        """Find revisions matching one of the given revsets."""
-
-    def url():
-        """Returns a string representing the location of this repo."""
-
-    def hook(name, throw=False, **args):
-        """Call a hook."""
-
-    def tags():
-        """Return a mapping of tag to node."""
-
-    def tagtype(tagname):
-        """Return the type of a given tag."""
-
-    def tagslist():
-        """Return a list of tags ordered by revision."""
-
-    def nodetags(node):
-        """Return the tags associated with a node."""
-
-    def nodebookmarks(node):
-        """Return the list of bookmarks pointing to the specified node."""
-
-    def branchmap():
-        """Return a mapping of branch to heads in that branch."""
-
-    def revbranchcache():
-        pass
-
-    def branchtip(branchtip, ignoremissing=False):
-        """Return the tip node for a given branch."""
-
-    def lookup(key):
-        """Resolve the node for a revision."""
-
-    def lookupbranch(key):
-        """Look up the branch name of the given revision or branch name."""
-
-    def known(nodes):
-        """Determine whether a series of nodes is known.
-
-        Returns a list of bools.
-        """
-
-    def local():
-        """Whether the repository is local."""
-        return True
-
-    def publishing():
-        """Whether the repository is a publishing repository."""
-
-    def cancopy():
-        pass
-
-    def shared():
-        """The type of shared repository or None."""
-
-    def wjoin(f, *insidef):
-        """Calls self.vfs.reljoin(self.root, f, *insidef)"""
-
-    def setparents(p1, p2):
-        """Set the parent nodes of the working directory."""
-
-    def filectx(path, changeid=None, fileid=None):
-        """Obtain a filectx for the given file revision."""
-
-    def getcwd():
-        """Obtain the current working directory from the dirstate."""
-
-    def pathto(f, cwd=None):
-        """Obtain the relative path to a file."""
-
-    def adddatafilter(name, fltr):
-        pass
-
-    def wread(filename):
-        """Read a file from wvfs, using data filters."""
-
-    def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
-        """Write data to a file in the wvfs, using data filters."""
-
-    def wwritedata(filename, data):
-        """Resolve data for writing to the wvfs, using data filters."""
-
-    def currenttransaction():
-        """Obtain the current transaction instance or None."""
-
-    def transaction(desc, report=None):
-        """Open a new transaction to write to the repository."""
-
-    def undofiles():
-        """Returns a list of (vfs, path) for files to undo transactions."""
-
-    def recover():
-        """Roll back an interrupted transaction."""
-
-    def rollback(dryrun=False, force=False):
-        """Undo the last transaction.
-
-        DANGEROUS.
-        """
-
-    def updatecaches(tr=None, full=False):
-        """Warm repo caches."""
-
-    def invalidatecaches():
-        """Invalidate cached data due to the repository mutating."""
-
-    def invalidatevolatilesets():
-        pass
-
-    def invalidatedirstate():
-        """Invalidate the dirstate."""
-
-    def invalidate(clearfilecache=False):
-        pass
-
-    def invalidateall():
-        pass
-
-    def lock(wait=True):
-        """Lock the repository store and return a lock instance."""
-
-    def wlock(wait=True):
-        """Lock the non-store parts of the repository."""
-
-    def currentwlock():
-        """Return the wlock if it's held or None."""
-
-    def checkcommitpatterns(wctx, vdirs, match, status, fail):
-        pass
-
-    def commit(text='', user=None, date=None, match=None, force=False,
-               editor=False, extra=None):
-        """Add a new revision to the repository."""
-
-    def commitctx(ctx, error=False, origctx=None):
-        """Commit a commitctx instance to the repository."""
-
-    def destroying():
-        """Inform the repository that nodes are about to be destroyed."""
-
-    def destroyed():
-        """Inform the repository that nodes have been destroyed."""
-
-    def status(node1='.', node2=None, match=None, ignored=False,
-               clean=False, unknown=False, listsubrepos=False):
-        """Convenience method to call repo[x].status()."""
-
-    def addpostdsstatus(ps):
-        pass
-
-    def postdsstatus():
-        pass
-
-    def clearpostdsstatus():
-        pass
-
-    def heads(start=None):
-        """Obtain list of nodes that are DAG heads."""
-
-    def branchheads(branch=None, start=None, closed=False):
-        pass
-
-    def branches(nodes):
-        pass
-
-    def between(pairs):
-        pass
-
-    def checkpush(pushop):
-        pass
-
-    prepushoutgoinghooks = interfaceutil.Attribute(
-        """util.hooks instance.""")
-
-    def pushkey(namespace, key, old, new):
-        pass
-
-    def listkeys(namespace):
-        pass
-
-    def debugwireargs(one, two, three=None, four=None, five=None):
-        pass
-
-    def savecommitmessage(text):
-        pass
-
-class completelocalrepository(ilocalrepositorymain,
-                              ilocalrepositoryfilestorage):
-    """Complete interface for a local repository."""
-
-class iwireprotocolcommandcacher(interfaceutil.Interface):
-    """Represents a caching backend for wire protocol commands.
-
-    Wire protocol version 2 supports transparent caching of many commands.
-    To leverage this caching, servers can activate objects that cache
-    command responses. Objects handle both cache writing and reading.
-    This interface defines how that response caching mechanism works.
-
-    Wire protocol version 2 commands emit a series of objects that are
-    serialized and sent to the client. The caching layer exists between
-    the invocation of the command function and the sending of its output
-    objects to an output layer.
-
-    Instances of this interface represent a binding to a cache that
-    can serve a response (in place of calling a command function) and/or
-    write responses to a cache for subsequent use.
-
-    When a command request arrives, the following happens with regards
-    to this interface:
-
-    1. The server determines whether the command request is cacheable.
-    2. If it is, an instance of this interface is spawned.
-    3. The cacher is activated in a context manager (``__enter__`` is called).
-    4. A cache *key* for that request is derived. This will call the
-       instance's ``adjustcachekeystate()`` method so the derivation
-       can be influenced.
-    5. The cacher is informed of the derived cache key via a call to
-       ``setcachekey()``.
-    6. The cacher's ``lookup()`` method is called to test for presence of
-       the derived key in the cache.
-    7. If ``lookup()`` returns a hit, that cached result is used in place
-       of invoking the command function. ``__exit__`` is called and the instance
-       is discarded.
-    8. The command function is invoked.
-    9. ``onobject()`` is called for each object emitted by the command
-       function.
-    10. After the final object is seen, ``onfinished()`` is called.
-    11. ``__exit__`` is called to signal the end of use of the instance.
-
-    Cache *key* derivation can be influenced by the instance.
-
-    Cache keys are initially derived by a deterministic representation of
-    the command request. This includes the command name, arguments, protocol
-    version, etc. This initial key derivation is performed by CBOR-encoding a
-    data structure and feeding that output into a hasher.
-
-    Instances of this interface can influence this initial key derivation
-    via ``adjustcachekeystate()``.
-
-    The instance is informed of the derived cache key via a call to
-    ``setcachekey()``. The instance must store the key locally so it can
-    be consulted on subsequent operations that may require it.
-
-    When constructed, the instance has access to a callable that can be used
-    for encoding response objects. This callable receives as its single
-    argument an object emitted by a command function. It returns an iterable
-    of bytes chunks representing the encoded object. Unless the cacher is
-    caching native Python objects in memory or has a way of reconstructing
-    the original Python objects, implementations typically call this function
-    to produce bytes from the output objects and then store those bytes in
-    the cache. When it comes time to re-emit those bytes, they are wrapped
-    in a ``wireprototypes.encodedresponse`` instance to tell the output
-    layer that they are pre-encoded.
-
-    When receiving the objects emitted by the command function, instances
-    can choose what to do with those objects. The simplest thing to do is
-    re-emit the original objects. They will be forwarded to the output
-    layer and will be processed as if the cacher did not exist.
-
-    Implementations could also choose to not emit objects - instead locally
-    buffering objects or their encoded representation. They could then emit
-    a single "coalesced" object when ``onfinished()`` is called. In
-    this way, the implementation would function as a filtering layer of
-    sorts.
-
-    When caching objects, typically the encoded form of the object will
-    be stored. Keep in mind that if the original object is forwarded to
-    the output layer, it will need to be encoded there as well. For large
-    output, this redundant encoding could add overhead. Implementations
-    could wrap the encoded object data in ``wireprototypes.encodedresponse``
-    instances to avoid this overhead.
-    """
-    def __enter__():
-        """Marks the instance as active.
-
-        Should return self.
-        """
-
-    def __exit__(exctype, excvalue, exctb):
-        """Called when cacher is no longer used.
-
-        This can be used by implementations to perform cleanup actions (e.g.
-        disconnecting network sockets, aborting a partially cached response.
-        """
-
-    def adjustcachekeystate(state):
-        """Influences cache key derivation by adjusting state to derive key.
-
-        A dict defining the state used to derive the cache key is passed.
-
-        Implementations can modify this dict to record additional state that
-        is wanted to influence key derivation.
-
-        Implementations are *highly* encouraged to not modify or delete
-        existing keys.
-        """
-
-    def setcachekey(key):
-        """Record the derived cache key for this request.
-
-        Instances may mutate the key for internal usage, as desired. e.g.
-        instances may wish to prepend the repo name, introduce path
-        components for filesystem or URL addressing, etc. Behavior is up to
-        the cache.
-
-        Returns a bool indicating if the request is cacheable by this
-        instance.
-        """
-
-    def lookup():
-        """Attempt to resolve an entry in the cache.
-
-        The instance is instructed to look for the cache key that it was
-        informed about via the call to ``setcachekey()``.
-
-        If there's no cache hit or the cacher doesn't wish to use the cached
-        entry, ``None`` should be returned.
-
-        Else, a dict defining the cached result should be returned. The
-        dict may have the following keys:
-
-        objs
-           An iterable of objects that should be sent to the client. That
-           iterable of objects is expected to be what the command function
-           would return if invoked or an equivalent representation thereof.
-        """
-
-    def onobject(obj):
-        """Called when a new object is emitted from the command function.
-
-        Receives as its argument the object that was emitted from the
-        command function.
-
-        This method returns an iterator of objects to forward to the output
-        layer. The easiest implementation is a generator that just
-        ``yield obj``.
-        """
-
-    def onfinished():
-        """Called after all objects have been emitted from the command function.
-
-        Implementations should return an iterator of objects to forward to
-        the output layer.
-
-        This method can be a generator.
-        """
--- a/mercurial/repoview.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/repoview.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,6 +12,11 @@
 import weakref
 
 from .node import nullrev
+from .pycompat import (
+    delattr,
+    getattr,
+    setattr,
+)
 from . import (
     obsolete,
     phases,
@@ -19,9 +24,8 @@
     tags as tagsmod,
     util,
 )
-from .utils import (
-    repoviewutil,
-)
+from .utils import repoviewutil
+
 
 def hideablerevs(repo):
     """Revision candidates to be hidden
@@ -32,11 +36,12 @@
     branchmap (see mercurial.utils.repoviewutils.subsettable), you cannot set
     "public" changesets as "hideable". Doing so would break multiple code
     assertions and lead to crashes."""
-    obsoletes = obsolete.getrevs(repo, 'obsolete')
+    obsoletes = obsolete.getrevs(repo, b'obsolete')
     internals = repo._phasecache.getrevset(repo, phases.localhiddenphases)
     internals = frozenset(internals)
     return obsoletes | internals
 
+
 def pinnedrevs(repo):
     """revisions blocking hidden changesets from being filtered
     """
@@ -72,6 +77,7 @@
                 hidden.remove(p)
                 stack.append(p)
 
+
 def computehidden(repo, visibilityexceptions=None):
     """compute the set of hidden revision to filter
 
@@ -90,6 +96,7 @@
         _revealancestors(pfunc, hidden, visible)
     return frozenset(hidden)
 
+
 def computesecret(repo, visibilityexceptions=None):
     """compute the set of revision that can never be exposed through hgweb
 
@@ -98,28 +105,31 @@
     secrets = repo._phasecache.getrevset(repo, phases.remotehiddenphases)
     return frozenset(secrets)
 
+
 def computeunserved(repo, visibilityexceptions=None):
     """compute the set of revision that should be filtered when used a server
 
     Secret and hidden changeset should not pretend to be here."""
     assert not repo.changelog.filteredrevs
     # fast path in simple case to avoid impact of non optimised code
-    hiddens = filterrevs(repo, 'visible')
-    secrets = filterrevs(repo, 'served.hidden')
+    hiddens = filterrevs(repo, b'visible')
+    secrets = filterrevs(repo, b'served.hidden')
     if secrets:
         return frozenset(hiddens | secrets)
     else:
         return hiddens
 
+
 def computemutable(repo, visibilityexceptions=None):
     assert not repo.changelog.filteredrevs
     # fast check to avoid revset call on huge repo
     if any(repo._phasecache.phaseroots[1:]):
         getphase = repo._phasecache.phase
-        maymutable = filterrevs(repo, 'base')
+        maymutable = filterrevs(repo, b'base')
         return frozenset(r for r in maymutable if getphase(repo, r))
     return frozenset()
 
+
 def computeimpactable(repo, visibilityexceptions=None):
     """Everything impactable by mutable revision
 
@@ -145,48 +155,55 @@
     firstmutable = max(0, firstmutable)
     return frozenset(pycompat.xrange(firstmutable, len(cl)))
 
+
 # function to compute filtered set
 #
 # When adding a new filter you MUST update the table at:
 #     mercurial.utils.repoviewutil.subsettable
 # Otherwise your filter will have to recompute all its branches cache
 # from scratch (very slow).
-filtertable = {'visible': computehidden,
-               'visible-hidden': computehidden,
-               'served.hidden': computesecret,
-               'served': computeunserved,
-               'immutable':  computemutable,
-               'base':  computeimpactable}
+filtertable = {
+    b'visible': computehidden,
+    b'visible-hidden': computehidden,
+    b'served.hidden': computesecret,
+    b'served': computeunserved,
+    b'immutable': computemutable,
+    b'base': computeimpactable,
+}
 
 _basefiltername = list(filtertable)
 
+
 def extrafilter(ui):
     """initialize extra filter and return its id
 
     If extra filtering is configured, we make sure the associated filtered view
     are declared and return the associated id.
     """
-    frevs = ui.config('experimental', 'extra-filter-revs')
+    frevs = ui.config(b'experimental', b'extra-filter-revs')
     if frevs is None:
         return None
 
-    fid = pycompat.sysbytes(util.DIGESTS['sha1'](frevs).hexdigest())[:12]
+    fid = pycompat.sysbytes(util.DIGESTS[b'sha1'](frevs).hexdigest())[:12]
 
-    combine = lambda fname: fname + '%' + fid
+    combine = lambda fname: fname + b'%' + fid
 
     subsettable = repoviewutil.subsettable
 
-    if combine('base') not in filtertable:
+    if combine(b'base') not in filtertable:
         for name in _basefiltername:
+
             def extrafilteredrevs(repo, *args, **kwargs):
                 baserevs = filtertable[name](repo, *args, **kwargs)
                 extrarevs = frozenset(repo.revs(frevs))
                 return baserevs | extrarevs
+
             filtertable[combine(name)] = extrafilteredrevs
             if name in subsettable:
                 subsettable[combine(name)] = combine(subsettable[name])
     return fid
 
+
 def filterrevs(repo, filtername, visibilityexceptions=None):
     """returns set of filtered revision for this filter name
 
@@ -200,6 +217,7 @@
         repo.filteredrevcache[filtername] = func(repo.unfiltered())
     return repo.filteredrevcache[filtername]
 
+
 class repoview(object):
     """Provide a read/write view of a repo through a filtered changelog
 
@@ -241,8 +259,7 @@
         object.__setattr__(self, r'_clcachekey', None)
         object.__setattr__(self, r'_clcache', None)
         # revs which are exceptions and must not be hidden
-        object.__setattr__(self, r'_visibilityexceptions',
-                           visibilityexceptions)
+        object.__setattr__(self, r'_visibilityexceptions', visibilityexceptions)
 
     # not a propertycache on purpose we shall implement a proper cache later
     @property
@@ -257,14 +274,15 @@
         unfiindex = unfichangelog.index
         unfilen = len(unfiindex)
         unfinode = unfiindex[unfilen - 1][7]
-
-        revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
+        with util.timedcm('repo filter for %s', self.filtername):
+            revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
         cl = self._clcache
         newkey = (unfilen, unfinode, hash(revs), unfichangelog._delayed)
         # if cl.index is not unfiindex, unfi.changelog would be
         # recreated, and our clcache refers to garbage object
-        if (cl is not None and
-            (cl.index is not unfiindex or newkey != self._clcachekey)):
+        if cl is not None and (
+            cl.index is not unfiindex or newkey != self._clcachekey
+        ):
             cl = None
         # could have been made None by the previous if
         if cl is None:
@@ -285,9 +303,11 @@
         return self.unfiltered().filtered(name, visibilityexceptions)
 
     def __repr__(self):
-        return r'<%s:%s %r>' % (self.__class__.__name__,
-                                pycompat.sysstr(self.filtername),
-                                self.unfiltered())
+        return r'<%s:%s %r>' % (
+            self.__class__.__name__,
+            pycompat.sysstr(self.filtername),
+            self.unfiltered(),
+        )
 
     # everything access are forwarded to the proxied repo
     def __getattr__(self, attr):
@@ -299,15 +319,19 @@
     def __delattr__(self, attr):
         return delattr(self._unfilteredrepo, attr)
 
+
 # Python <3.4 easily leaks types via __mro__. See
 # https://bugs.python.org/issue17950. We cache dynamically created types
 # so they won't be leaked on every invocation of repo.filtered().
 _filteredrepotypes = weakref.WeakKeyDictionary()
 
+
 def newtype(base):
     """Create a new type with the repoview mixin and the given base class"""
     if base not in _filteredrepotypes:
+
         class filteredrepo(repoview, base):
             pass
+
         _filteredrepotypes[base] = filteredrepo
     return _filteredrepotypes[base]
--- a/mercurial/revlog.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/revlog.py	Mon Oct 21 11:09:48 2019 -0400
@@ -35,16 +35,10 @@
     wdirrev,
 )
 from .i18n import _
+from .pycompat import getattr
 from .revlogutils.constants import (
     FLAG_GENERALDELTA,
     FLAG_INLINE_DATA,
-    REVIDX_DEFAULT_FLAGS,
-    REVIDX_ELLIPSIS,
-    REVIDX_EXTSTORED,
-    REVIDX_FLAGS_ORDER,
-    REVIDX_ISCENSORED,
-    REVIDX_KNOWN_FLAGS,
-    REVIDX_RAWTEXT_CHANGING_FLAGS,
     REVLOGV0,
     REVLOGV1,
     REVLOGV1_FLAGS,
@@ -54,9 +48,16 @@
     REVLOG_DEFAULT_FORMAT,
     REVLOG_DEFAULT_VERSION,
 )
-from .thirdparty import (
-    attr,
+from .revlogutils.flagutil import (
+    REVIDX_DEFAULT_FLAGS,
+    REVIDX_ELLIPSIS,
+    REVIDX_EXTSTORED,
+    REVIDX_FLAGS_ORDER,
+    REVIDX_ISCENSORED,
+    REVIDX_RAWTEXT_CHANGING_FLAGS,
+    REVIDX_SIDEDATA,
 )
+from .thirdparty import attr
 from . import (
     ancestor,
     dagop,
@@ -64,15 +65,19 @@
     mdiff,
     policy,
     pycompat,
-    repository,
     templatefilters,
     util,
 )
+from .interfaces import (
+    repository,
+    util as interfaceutil,
+)
 from .revlogutils import (
     deltas as deltautil,
+    flagutil,
+    sidedata as sidedatautil,
 )
 from .utils import (
-    interfaceutil,
     storageutil,
     stringutil,
 )
@@ -91,10 +96,10 @@
 REVLOGV2_FLAGS
 REVIDX_ISCENSORED
 REVIDX_ELLIPSIS
+REVIDX_SIDEDATA
 REVIDX_EXTSTORED
 REVIDX_DEFAULT_FLAGS
 REVIDX_FLAGS_ORDER
-REVIDX_KNOWN_FLAGS
 REVIDX_RAWTEXT_CHANGING_FLAGS
 
 parsers = policy.importmod(r'parsers')
@@ -108,77 +113,40 @@
 _maxinline = 131072
 _chunksize = 1048576
 
-# Store flag processors (cf. 'addflagprocessor()' to register)
-_flagprocessors = {
-    REVIDX_ISCENSORED: None,
-}
-
 # Flag processors for REVIDX_ELLIPSIS.
 def ellipsisreadprocessor(rl, text):
+    return text, False, {}
+
+
+def ellipsiswriteprocessor(rl, text, sidedata):
     return text, False
 
-def ellipsiswriteprocessor(rl, text):
-    return text, False
 
 def ellipsisrawprocessor(rl, text):
     return False
 
+
 ellipsisprocessor = (
     ellipsisreadprocessor,
     ellipsiswriteprocessor,
     ellipsisrawprocessor,
 )
 
-def addflagprocessor(flag, processor):
-    """Register a flag processor on a revision data flag.
-
-    Invariant:
-    - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
-      and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
-    - Only one flag processor can be registered on a specific flag.
-    - flagprocessors must be 3-tuples of functions (read, write, raw) with the
-      following signatures:
-          - (read)  f(self, rawtext) -> text, bool
-          - (write) f(self, text) -> rawtext, bool
-          - (raw)   f(self, rawtext) -> bool
-      "text" is presented to the user. "rawtext" is stored in revlog data, not
-      directly visible to the user.
-      The boolean returned by these transforms is used to determine whether
-      the returned text can be used for hash integrity checking. For example,
-      if "write" returns False, then "text" is used to generate hash. If
-      "write" returns True, that basically means "rawtext" returned by "write"
-      should be used to generate hash. Usually, "write" and "read" return
-      different booleans. And "raw" returns a same boolean as "write".
-
-      Note: The 'raw' transform is used for changegroup generation and in some
-      debug commands. In this case the transform only indicates whether the
-      contents can be used for hash integrity checks.
-    """
-    _insertflagprocessor(flag, processor, _flagprocessors)
-
-def _insertflagprocessor(flag, processor, flagprocessors):
-    if not flag & REVIDX_KNOWN_FLAGS:
-        msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
-        raise error.ProgrammingError(msg)
-    if flag not in REVIDX_FLAGS_ORDER:
-        msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
-        raise error.ProgrammingError(msg)
-    if flag in flagprocessors:
-        msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
-        raise error.Abort(msg)
-    flagprocessors[flag] = processor
 
 def getoffset(q):
     return int(q >> 16)
 
+
 def gettype(q):
     return int(q & 0xFFFF)
 
+
 def offset_type(offset, type):
-    if (type & ~REVIDX_KNOWN_FLAGS) != 0:
-        raise ValueError('unknown revlog index flags')
+    if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
+        raise ValueError(b'unknown revlog index flags')
     return int(int(offset) << 16 | type)
 
+
 @attr.s(slots=True, frozen=True)
 class _revisioninfo(object):
     """Information about a revision that allows building its fulltext
@@ -190,6 +158,7 @@
 
     One of btext[0] or cachedelta must be set.
     """
+
     node = attr.ib()
     p1 = attr.ib()
     p2 = attr.ib()
@@ -198,6 +167,7 @@
     cachedelta = attr.ib()
     flags = attr.ib()
 
+
 @interfaceutil.implementer(repository.irevisiondelta)
 @attr.s(slots=True)
 class revlogrevisiondelta(object):
@@ -211,6 +181,7 @@
     delta = attr.ib()
     linknode = attr.ib(default=None)
 
+
 @interfaceutil.implementer(repository.iverifyproblem)
 @attr.s(frozen=True)
 class revlogproblem(object):
@@ -218,6 +189,7 @@
     error = attr.ib(default=None)
     node = attr.ib(default=None)
 
+
 # index v0:
 #  4 bytes: offset
 #  4 bytes: compressed length
@@ -226,16 +198,18 @@
 # 20 bytes: parent 1 nodeid
 # 20 bytes: parent 2 nodeid
 # 20 bytes: nodeid
-indexformatv0 = struct.Struct(">4l20s20s20s")
+indexformatv0 = struct.Struct(b">4l20s20s20s")
 indexformatv0_pack = indexformatv0.pack
 indexformatv0_unpack = indexformatv0.unpack
 
+
 class revlogoldindex(list):
     def __getitem__(self, i):
         if i == -1:
             return (0, 0, 0, -1, -1, -1, -1, nullid)
         return list.__getitem__(self, i)
 
+
 class revlogoldio(object):
     def __init__(self):
         self.size = indexformatv0.size
@@ -247,12 +221,20 @@
         n = off = 0
         l = len(data)
         while off + s <= l:
-            cur = data[off:off + s]
+            cur = data[off : off + s]
             off += s
             e = indexformatv0_unpack(cur)
             # transform to revlogv1 format
-            e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
-                  nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
+            e2 = (
+                offset_type(e[0], 0),
+                e[1],
+                -1,
+                e[2],
+                e[3],
+                nodemap.get(e[4], nullrev),
+                nodemap.get(e[5], nullrev),
+                e[6],
+            )
             index.append(e2)
             nodemap[e[6]] = n
             n += 1
@@ -261,12 +243,21 @@
 
     def packentry(self, entry, node, version, rev):
         if gettype(entry[0]):
-            raise error.RevlogError(_('index entry flags need revlog '
-                                      'version 1'))
-        e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
-              node(entry[5]), node(entry[6]), entry[7])
+            raise error.RevlogError(
+                _(b'index entry flags need revlog version 1')
+            )
+        e2 = (
+            getoffset(entry[0]),
+            entry[1],
+            entry[3],
+            entry[4],
+            node(entry[5]),
+            node(entry[6]),
+            entry[7],
+        )
         return indexformatv0_pack(*e2)
 
+
 # index ng:
 #  6 bytes: offset
 #  2 bytes: flags
@@ -277,15 +268,16 @@
 #  4 bytes: parent 1 rev
 #  4 bytes: parent 2 rev
 # 32 bytes: nodeid
-indexformatng = struct.Struct(">Qiiiiii20s12x")
+indexformatng = struct.Struct(b">Qiiiiii20s12x")
 indexformatng_pack = indexformatng.pack
-versionformat = struct.Struct(">I")
+versionformat = struct.Struct(b">I")
 versionformat_pack = versionformat.pack
 versionformat_unpack = versionformat.unpack
 
 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
 # signed integer)
-_maxentrysize = 0x7fffffff
+_maxentrysize = 0x7FFFFFFF
+
 
 class revlogio(object):
     def __init__(self):
@@ -302,6 +294,7 @@
             p = versionformat_pack(version) + p[4:]
         return p
 
+
 class revlog(object):
     """
     the underlying revision storage object
@@ -339,9 +332,19 @@
     If `upperboundcomp` is not None, this is the expected maximal gain from
     compression for the data content.
     """
-    def __init__(self, opener, indexfile, datafile=None, checkambig=False,
-                 mmaplargeindex=False, censorable=False,
-                 upperboundcomp=None):
+
+    _flagserrorclass = error.RevlogError
+
+    def __init__(
+        self,
+        opener,
+        indexfile,
+        datafile=None,
+        checkambig=False,
+        mmaplargeindex=False,
+        censorable=False,
+        upperboundcomp=None,
+    ):
         """
         create a revlog object
 
@@ -351,7 +354,7 @@
         """
         self.upperboundcomp = upperboundcomp
         self.indexfile = indexfile
-        self.datafile = datafile or (indexfile[:-2] + ".d")
+        self.datafile = datafile or (indexfile[:-2] + b".d")
         self.opener = opener
         #  When True, indexfile is opened with checkambig=True at writing, to
         #  avoid file stat ambiguity.
@@ -363,7 +366,7 @@
         # Maps rev to chain base rev.
         self._chainbasecache = util.lrucachedict(100)
         # 2-tuple of (offset, data) of raw data from the revlog at an offset.
-        self._chunkcache = (0, '')
+        self._chunkcache = (0, b'')
         # How much data to read and cache into the raw revlog data cache.
         self._chunkcachesize = 65536
         self._maxchainlen = None
@@ -374,7 +377,7 @@
         # Mapping of revision integer to full node.
         self._nodecache = {nullid: nullrev}
         self._nodepos = None
-        self._compengine = 'zlib'
+        self._compengine = b'zlib'
         self._compengineopts = {}
         self._maxdeltachainspan = -1
         self._withsparseread = False
@@ -384,7 +387,7 @@
 
         # Make copy of flag processors so each revlog instance can support
         # custom flags.
-        self._flagprocessors = dict(_flagprocessors)
+        self._flagprocessors = dict(flagutil.flagprocessors)
 
         # 2-tuple of file handles being used for active writing.
         self._writinghandles = None
@@ -393,70 +396,78 @@
 
     def _loadindex(self):
         mmapindexthreshold = None
-        opts = getattr(self.opener, 'options', {}) or {}
-
-        if 'revlogv2' in opts:
+        opts = self.opener.options
+
+        if b'revlogv2' in opts:
             newversionflags = REVLOGV2 | FLAG_INLINE_DATA
-        elif 'revlogv1' in opts:
+        elif b'revlogv1' in opts:
             newversionflags = REVLOGV1 | FLAG_INLINE_DATA
-            if 'generaldelta' in opts:
+            if b'generaldelta' in opts:
                 newversionflags |= FLAG_GENERALDELTA
-        elif getattr(self.opener, 'options', None) is not None:
-            # If options provided but no 'revlog*' found, the repository
-            # would have no 'requires' file in it, which means we have to
-            # stick to the old format.
+        elif b'revlogv0' in self.opener.options:
             newversionflags = REVLOGV0
         else:
             newversionflags = REVLOG_DEFAULT_VERSION
 
-        if 'chunkcachesize' in opts:
-            self._chunkcachesize = opts['chunkcachesize']
-        if 'maxchainlen' in opts:
-            self._maxchainlen = opts['maxchainlen']
-        if 'deltabothparents' in opts:
-            self._deltabothparents = opts['deltabothparents']
-        self._lazydelta = bool(opts.get('lazydelta', True))
+        if b'chunkcachesize' in opts:
+            self._chunkcachesize = opts[b'chunkcachesize']
+        if b'maxchainlen' in opts:
+            self._maxchainlen = opts[b'maxchainlen']
+        if b'deltabothparents' in opts:
+            self._deltabothparents = opts[b'deltabothparents']
+        self._lazydelta = bool(opts.get(b'lazydelta', True))
         self._lazydeltabase = False
         if self._lazydelta:
-            self._lazydeltabase = bool(opts.get('lazydeltabase', False))
-        if 'compengine' in opts:
-            self._compengine = opts['compengine']
-        if 'zlib.level' in opts:
-            self._compengineopts['zlib.level'] = opts['zlib.level']
-        if 'zstd.level' in opts:
-            self._compengineopts['zstd.level'] = opts['zstd.level']
-        if 'maxdeltachainspan' in opts:
-            self._maxdeltachainspan = opts['maxdeltachainspan']
-        if self._mmaplargeindex and 'mmapindexthreshold' in opts:
-            mmapindexthreshold = opts['mmapindexthreshold']
-        self._sparserevlog = bool(opts.get('sparse-revlog', False))
-        withsparseread = bool(opts.get('with-sparse-read', False))
+            self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
+        if b'compengine' in opts:
+            self._compengine = opts[b'compengine']
+        if b'zlib.level' in opts:
+            self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
+        if b'zstd.level' in opts:
+            self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
+        if b'maxdeltachainspan' in opts:
+            self._maxdeltachainspan = opts[b'maxdeltachainspan']
+        if self._mmaplargeindex and b'mmapindexthreshold' in opts:
+            mmapindexthreshold = opts[b'mmapindexthreshold']
+        self.hassidedata = bool(opts.get(b'side-data', False))
+        if self.hassidedata:
+            self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
+        self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
+        withsparseread = bool(opts.get(b'with-sparse-read', False))
         # sparse-revlog forces sparse-read
         self._withsparseread = self._sparserevlog or withsparseread
-        if 'sparse-read-density-threshold' in opts:
-            self._srdensitythreshold = opts['sparse-read-density-threshold']
-        if 'sparse-read-min-gap-size' in opts:
-            self._srmingapsize = opts['sparse-read-min-gap-size']
-        if opts.get('enableellipsis'):
+        if b'sparse-read-density-threshold' in opts:
+            self._srdensitythreshold = opts[b'sparse-read-density-threshold']
+        if b'sparse-read-min-gap-size' in opts:
+            self._srmingapsize = opts[b'sparse-read-min-gap-size']
+        if opts.get(b'enableellipsis'):
             self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
 
         # revlog v0 doesn't have flag processors
-        for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
-            _insertflagprocessor(flag, processor, self._flagprocessors)
+        for flag, processor in pycompat.iteritems(
+            opts.get(b'flagprocessors', {})
+        ):
+            flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
 
         if self._chunkcachesize <= 0:
-            raise error.RevlogError(_('revlog chunk cache size %r is not '
-                                      'greater than 0') % self._chunkcachesize)
+            raise error.RevlogError(
+                _(b'revlog chunk cache size %r is not greater than 0')
+                % self._chunkcachesize
+            )
         elif self._chunkcachesize & (self._chunkcachesize - 1):
-            raise error.RevlogError(_('revlog chunk cache size %r is not a '
-                                      'power of 2') % self._chunkcachesize)
-
-        indexdata = ''
+            raise error.RevlogError(
+                _(b'revlog chunk cache size %r is not a power of 2')
+                % self._chunkcachesize
+            )
+
+        indexdata = b''
         self._initempty = True
         try:
             with self._indexfp() as f:
-                if (mmapindexthreshold is not None and
-                    self.opener.fstat(f).st_size >= mmapindexthreshold):
+                if (
+                    mmapindexthreshold is not None
+                    and self.opener.fstat(f).st_size >= mmapindexthreshold
+                ):
                     # TODO: should .close() to release resources without
                     # relying on Python GC
                     indexdata = util.buffer(util.mmapread(f))
@@ -480,35 +491,39 @@
 
         if fmt == REVLOGV0:
             if flags:
-                raise error.RevlogError(_('unknown flags (%#04x) in version %d '
-                                          'revlog %s') %
-                                        (flags >> 16, fmt, self.indexfile))
+                raise error.RevlogError(
+                    _(b'unknown flags (%#04x) in version %d revlog %s')
+                    % (flags >> 16, fmt, self.indexfile)
+                )
 
             self._inline = False
             self._generaldelta = False
 
         elif fmt == REVLOGV1:
             if flags & ~REVLOGV1_FLAGS:
-                raise error.RevlogError(_('unknown flags (%#04x) in version %d '
-                                          'revlog %s') %
-                                        (flags >> 16, fmt, self.indexfile))
+                raise error.RevlogError(
+                    _(b'unknown flags (%#04x) in version %d revlog %s')
+                    % (flags >> 16, fmt, self.indexfile)
+                )
 
             self._inline = versionflags & FLAG_INLINE_DATA
             self._generaldelta = versionflags & FLAG_GENERALDELTA
 
         elif fmt == REVLOGV2:
             if flags & ~REVLOGV2_FLAGS:
-                raise error.RevlogError(_('unknown flags (%#04x) in version %d '
-                                          'revlog %s') %
-                                        (flags >> 16, fmt, self.indexfile))
+                raise error.RevlogError(
+                    _(b'unknown flags (%#04x) in version %d revlog %s')
+                    % (flags >> 16, fmt, self.indexfile)
+                )
 
             self._inline = versionflags & FLAG_INLINE_DATA
             # generaldelta implied by version 2 revlogs.
             self._generaldelta = True
 
         else:
-            raise error.RevlogError(_('unknown version (%d) in revlog %s') %
-                                    (fmt, self.indexfile))
+            raise error.RevlogError(
+                _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
+            )
         # sparse-revlog can't be on without general-delta (issue6056)
         if not self._generaldelta:
             self._sparserevlog = False
@@ -521,8 +536,9 @@
         try:
             d = self._io.parseindex(indexdata, self._inline)
         except (ValueError, IndexError):
-            raise error.RevlogError(_("index %s is corrupted") %
-                                    self.indexfile)
+            raise error.RevlogError(
+                _(b"index %s is corrupted") % self.indexfile
+            )
         self.index, nodemap, self._chunkcache = d
         if nodemap is not None:
             self.nodemap = self._nodecache = nodemap
@@ -538,16 +554,16 @@
         engine = util.compengines[self._compengine]
         return engine.revlogcompressor(self._compengineopts)
 
-    def _indexfp(self, mode='r'):
+    def _indexfp(self, mode=b'r'):
         """file object for the revlog's index file"""
         args = {r'mode': mode}
-        if mode != 'r':
+        if mode != b'r':
             args[r'checkambig'] = self._checkambig
-        if mode == 'w':
+        if mode == b'w':
             args[r'atomictemp'] = True
         return self.opener(self.indexfile, **args)
 
-    def _datafp(self, mode='r'):
+    def _datafp(self, mode=b'r'):
         """file object for the revlog's data file"""
         return self.opener(self.datafile, mode=mode)
 
@@ -579,12 +595,16 @@
 
     def tip(self):
         return self.node(len(self.index) - 1)
+
     def __contains__(self, rev):
         return 0 <= rev < len(self)
+
     def __len__(self):
         return len(self.index)
+
     def __iter__(self):
         return iter(pycompat.xrange(len(self)))
+
     def revs(self, start=0, stop=None):
         """iterate over all rev in this revlog (from start to stop)"""
         return storageutil.iterrevs(len(self), start=start, stop=stop)
@@ -611,15 +631,16 @@
         # the rawtext content that the delta will be based on, and two clients
         # could have a same revlog node with different flags (i.e. different
         # rawtext contents) and the delta could be incompatible.
-        if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
-            or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
+        if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
+            self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
+        ):
             return False
         return True
 
     def clearcaches(self):
         self._revisioncache = None
         self._chainbasecache.clear()
-        self._chunkcache = (0, '')
+        self._chunkcache = (0, b'')
         self._pcache = {}
 
         try:
@@ -640,7 +661,7 @@
             # parsers.c radix tree lookup failed
             if node == wdirid or node in wdirfilenodeids:
                 raise error.WdirUnsupported
-            raise error.LookupError(node, self.indexfile, _('no node'))
+            raise error.LookupError(node, self.indexfile, _(b'no node'))
         except KeyError:
             # pure python cache lookup failed
             n = self._nodecache
@@ -658,7 +679,7 @@
                     return r
             if node == wdirid or node in wdirfilenodeids:
                 raise error.WdirUnsupported
-            raise error.LookupError(node, self.indexfile, _('no node'))
+            raise error.LookupError(node, self.indexfile, _(b'no node'))
 
     # Accessors for index entries.
 
@@ -679,7 +700,7 @@
         if l >= 0:
             return l
 
-        t = self.revision(rev, raw=True)
+        t = self.rawdata(rev)
         return len(t)
 
     def size(self, rev):
@@ -687,7 +708,7 @@
         # fast path: if no "read" flag processor could change the content,
         # size is rawsize. note: ELLIPSIS is known to not change the content.
         flags = self.flags(rev)
-        if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
+        if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
             return self.rawsize(rev)
 
         return len(self.revision(rev, raw=False))
@@ -739,7 +760,7 @@
     def parents(self, node):
         i = self.index
         d = i[self.rev(node)]
-        return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
+        return i[d[5]][7], i[d[6]][7]  # map revisions to nodes inline
 
     def chainlen(self, rev):
         return self._chaininfo(rev)[0]
@@ -832,7 +853,7 @@
         if rustancestor is not None:
             lazyancestors = rustancestor.LazyAncestors
             arg = self.index
-        elif util.safehasattr(parsers, 'rustlazyancestors'):
+        elif util.safehasattr(parsers, b'rustlazyancestors'):
             lazyancestors = ancestor.rustlazyancestors
             arg = self.index
         else:
@@ -1001,7 +1022,7 @@
                 return nonodes
             lowestrev = min([self.rev(n) for n in roots])
         else:
-            roots = [nullid] # Everybody's a descendant of nullid
+            roots = [nullid]  # Everybody's a descendant of nullid
             lowestrev = nullrev
         if (lowestrev == nullrev) and (heads is None):
             # We want _all_ the nodes!
@@ -1040,11 +1061,12 @@
                     if n not in ancestors:
                         # If we are possibly a descendant of one of the roots
                         # and we haven't already been marked as an ancestor
-                        ancestors.add(n) # Mark as ancestor
+                        ancestors.add(n)  # Mark as ancestor
                         # Add non-nullid parents to list of nodes to tag.
-                        nodestotag.update([p for p in self.parents(n) if
-                                           p != nullid])
-                    elif n in heads: # We've seen it before, is it a fake head?
+                        nodestotag.update(
+                            [p for p in self.parents(n) if p != nullid]
+                        )
+                    elif n in heads:  # We've seen it before, is it a fake head?
                         # So it is, real heads should not be the ancestors of
                         # any other heads.
                         heads.pop(n)
@@ -1121,7 +1143,7 @@
                     # But, obviously its parents aren't.
                     for p in self.parents(n):
                         heads.pop(p, None)
-        heads = [head for head, flag in heads.iteritems() if flag]
+        heads = [head for head, flag in pycompat.iteritems(heads) if flag]
         roots = list(roots)
         assert orderedout
         assert roots
@@ -1174,8 +1196,9 @@
 
         stoprevs = set(self.rev(n) for n in stop or [])
 
-        revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
-                                    stoprevs=stoprevs)
+        revs = dagop.headrevssubset(
+            self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
+        )
 
         return [self.node(rev) for rev in revs]
 
@@ -1203,7 +1226,7 @@
         """calculate all the heads of the common ancestors of revs"""
         try:
             ancs = self.index.commonancestorsheads(*revs)
-        except (AttributeError, OverflowError): # C implementation failed
+        except (AttributeError, OverflowError):  # C implementation failed
             ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
         return ancs
 
@@ -1234,11 +1257,13 @@
 
         If includepath is True, return (<roots>::<heads>)."""
         try:
-            return self.index.reachableroots2(minroot, heads, roots,
-                                              includepath)
+            return self.index.reachableroots2(
+                minroot, heads, roots, includepath
+            )
         except AttributeError:
-            return dagop._reachablerootspure(self.parentrevs,
-                                             minroot, roots, heads, includepath)
+            return dagop._reachablerootspure(
+                self.parentrevs, minroot, roots, heads, includepath
+            )
 
     def ancestor(self, a, b):
         """calculate the "best" common ancestor of nodes a and b"""
@@ -1262,14 +1287,14 @@
             # odds of a binary node being all hex in ASCII are 1 in 10**25
             try:
                 node = id
-                self.rev(node) # quick search the index
+                self.rev(node)  # quick search the index
                 return node
             except error.LookupError:
-                pass # may be partial hex id
+                pass  # may be partial hex id
         try:
             # str(rev)
             rev = int(id)
-            if "%d" % rev != id:
+            if b"%d" % rev != id:
                 raise ValueError
             if rev < 0:
                 rev = len(self) + rev
@@ -1306,7 +1331,8 @@
             # fast path: for unfiltered changelog, radix tree is accurate
             if not getattr(self, 'filteredrevs', None):
                 raise error.AmbiguousPrefixLookupError(
-                    id, self.indexfile, _('ambiguous identifier'))
+                    id, self.indexfile, _(b'ambiguous identifier')
+                )
             # fall through to slow path that filters hidden revisions
         except (AttributeError, ValueError):
             # we are pure python, or key was too short to search radix tree
@@ -1319,10 +1345,11 @@
             try:
                 # hex(node)[:...]
                 l = len(id) // 2  # grab an even number of digits
-                prefix = bin(id[:l * 2])
+                prefix = bin(id[: l * 2])
                 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
-                nl = [n for n in nl if hex(n).startswith(id) and
-                      self.hasnode(n)]
+                nl = [
+                    n for n in nl if hex(n).startswith(id) and self.hasnode(n)
+                ]
                 if nullhex.startswith(id):
                     nl.append(nullid)
                 if len(nl) > 0:
@@ -1330,7 +1357,8 @@
                         self._pcache[id] = nl[0]
                         return nl[0]
                     raise error.AmbiguousPrefixLookupError(
-                        id, self.indexfile, _('ambiguous identifier'))
+                        id, self.indexfile, _(b'ambiguous identifier')
+                    )
                 if maybewdir:
                     raise error.WdirUnsupported
                 return None
@@ -1349,10 +1377,11 @@
         if n:
             return n
 
-        raise error.LookupError(id, self.indexfile, _('no match found'))
+        raise error.LookupError(id, self.indexfile, _(b'no match found'))
 
     def shortest(self, node, minlength=1):
         """Find the shortest unambiguous prefix that matches node."""
+
         def isvalid(prefix):
             try:
                 matchednode = self._partialmatch(prefix)
@@ -1362,11 +1391,11 @@
                 # single 'ff...' match
                 return True
             if matchednode is None:
-                raise error.LookupError(node, self.indexfile, _('no node'))
+                raise error.LookupError(node, self.indexfile, _(b'no node'))
             return True
 
         def maybewdir(prefix):
-            return all(c == 'f' for c in pycompat.iterbytestr(prefix))
+            return all(c == b'f' for c in pycompat.iterbytestr(prefix))
 
         hexnode = hex(node)
 
@@ -1383,7 +1412,7 @@
                 return disambiguate(hexnode, length)
             except error.RevlogError:
                 if node != wdirid:
-                    raise error.LookupError(node, self.indexfile, _('no node'))
+                    raise error.LookupError(node, self.indexfile, _(b'no node'))
             except AttributeError:
                 # Fall through to pure code
                 pass
@@ -1437,8 +1466,9 @@
         # involving reading the revlog backwards.
         cachesize = self._chunkcachesize
         realoffset = offset & ~(cachesize - 1)
-        reallength = (((offset + length + cachesize) & ~(cachesize - 1))
-                      - realoffset)
+        reallength = (
+            (offset + length + cachesize) & ~(cachesize - 1)
+        ) - realoffset
         with self._datareadfp(df) as df:
             df.seek(realoffset)
             d = df.read(reallength)
@@ -1448,19 +1478,33 @@
             startoffset = offset - realoffset
             if len(d) - startoffset < length:
                 raise error.RevlogError(
-                    _('partial read of revlog %s; expected %d bytes from '
-                      'offset %d, got %d') %
-                    (self.indexfile if self._inline else self.datafile,
-                     length, realoffset, len(d) - startoffset))
+                    _(
+                        b'partial read of revlog %s; expected %d bytes from '
+                        b'offset %d, got %d'
+                    )
+                    % (
+                        self.indexfile if self._inline else self.datafile,
+                        length,
+                        realoffset,
+                        len(d) - startoffset,
+                    )
+                )
 
             return util.buffer(d, startoffset, length)
 
         if len(d) < length:
             raise error.RevlogError(
-                _('partial read of revlog %s; expected %d bytes from offset '
-                  '%d, got %d') %
-                (self.indexfile if self._inline else self.datafile,
-                 length, offset, len(d)))
+                _(
+                    b'partial read of revlog %s; expected %d bytes from offset '
+                    b'%d, got %d'
+                )
+                % (
+                    self.indexfile if self._inline else self.datafile,
+                    length,
+                    offset,
+                    len(d),
+                )
+            )
 
         return d
 
@@ -1483,7 +1527,7 @@
         cacheend = cachestart + length
         if cachestart >= 0 and cacheend <= l:
             if cachestart == 0 and cacheend == l:
-                return d # avoid a copy
+                return d  # avoid a copy
             return util.buffer(d, cachestart, cacheend - cachestart)
 
         return self._readsegment(offset, length, df=df)
@@ -1560,8 +1604,9 @@
         if not self._withsparseread:
             slicedchunks = (revs,)
         else:
-            slicedchunks = deltautil.slicechunk(self, revs,
-                                                targetsize=targetsize)
+            slicedchunks = deltautil.slicechunk(
+                self, revs, targetsize=targetsize
+            )
 
         for revschunk in slicedchunks:
             firstrev = revschunk[0]
@@ -1589,7 +1634,7 @@
 
     def _chunkclear(self):
         """Clear the raw chunk cache."""
-        self._chunkcache = (0, '')
+        self._chunkcache = (0, b'')
 
     def deltaparent(self, rev):
         """return deltaparent of the given revision"""
@@ -1606,7 +1651,7 @@
         """
         if not self._sparserevlog:
             return self.deltaparent(rev) == nullrev
-        elif util.safehasattr(self.index, 'issnapshot'):
+        elif util.safehasattr(self.index, b'issnapshot'):
             # directly assign the method to cache the testing and access
             self.issnapshot = self.index.issnapshot
             return self.issnapshot(rev)
@@ -1627,7 +1672,7 @@
     def snapshotdepth(self, rev):
         """number of snapshot in the chain before this one"""
         if not self.issnapshot(rev):
-            raise error.ProgrammingError('revision %d not a snapshot')
+            raise error.ProgrammingError(b'revision %d not a snapshot')
         return len(self._deltachain(rev)[0]) - 1
 
     def revdiff(self, rev1, rev2):
@@ -1639,8 +1684,18 @@
         if rev1 != nullrev and self.deltaparent(rev2) == rev1:
             return bytes(self._chunk(rev2))
 
-        return mdiff.textdiff(self.revision(rev1, raw=True),
-                              self.revision(rev2, raw=True))
+        return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
+
+    def _processflags(self, text, flags, operation, raw=False):
+        """deprecated entry point to access flag processors"""
+        msg = b'_processflag(...) use the specialized variant'
+        util.nouideprecwarn(msg, b'5.2', stacklevel=2)
+        if raw:
+            return text, flagutil.processflagsraw(self, text, flags)
+        elif operation == b'read':
+            return flagutil.processflagsread(self, text, flags)
+        else:  # write operation
+            return flagutil.processflagswrite(self, text, flags)
 
     def revision(self, nodeorrev, _df=None, raw=False):
         """return an uncompressed revision of a given node or revision
@@ -1651,6 +1706,25 @@
         treated as raw data when applying flag transforms. 'raw' should be set
         to True when generating changegroups or in debug commands.
         """
+        if raw:
+            msg = (
+                b'revlog.revision(..., raw=True) is deprecated, '
+                b'use revlog.rawdata(...)'
+            )
+            util.nouideprecwarn(msg, b'5.2', stacklevel=2)
+        return self._revisiondata(nodeorrev, _df, raw=raw)[0]
+
+    def sidedata(self, nodeorrev, _df=None):
+        """a map of extra data related to the changeset but not part of the hash
+
+        This function currently return a dictionary. However, more advanced
+        mapping object will likely be used in the future for a more
+        efficient/lazy code.
+        """
+        return self._revisiondata(nodeorrev, _df)[1]
+
+    def _revisiondata(self, nodeorrev, _df=None, raw=False):
+        # deal with <nodeorrev> argument type
         if isinstance(nodeorrev, int):
             rev = nodeorrev
             node = self.node(rev)
@@ -1658,64 +1732,98 @@
             node = nodeorrev
             rev = None
 
-        cachedrev = None
-        flags = None
+        # fast path the special `nullid` rev
+        if node == nullid:
+            return b"", {}
+
+        # The text as stored inside the revlog. Might be the revision or might
+        # need to be processed to retrieve the revision.
         rawtext = None
-        if node == nullid:
-            return ""
+
+        rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
+
+        if raw and validated:
+            # if we don't want to process the raw text and that raw
+            # text is cached, we can exit early.
+            return rawtext, {}
+        if rev is None:
+            rev = self.rev(node)
+        # the revlog's flag for this revision
+        # (usually alter its state or content)
+        flags = self.flags(rev)
+
+        if validated and flags == REVIDX_DEFAULT_FLAGS:
+            # no extra flags set, no flag processor runs, text = rawtext
+            return rawtext, {}
+
+        sidedata = {}
+        if raw:
+            validatehash = flagutil.processflagsraw(self, rawtext, flags)
+            text = rawtext
+        else:
+            try:
+                r = flagutil.processflagsread(self, rawtext, flags)
+            except error.SidedataHashError as exc:
+                msg = _(b"integrity check failed on %s:%s sidedata key %d")
+                msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
+                raise error.RevlogError(msg)
+            text, validatehash, sidedata = r
+        if validatehash:
+            self.checkhash(text, node, rev=rev)
+        if not validated:
+            self._revisioncache = (node, rev, rawtext)
+
+        return text, sidedata
+
+    def _rawtext(self, node, rev, _df=None):
+        """return the possibly unvalidated rawtext for a revision
+
+        returns (rev, rawtext, validated)
+        """
+
+        # revision in the cache (could be useful to apply delta)
+        cachedrev = None
+        # An intermediate text to apply deltas to
+        basetext = None
+
+        # Check if we have the entry in cache
+        # The cache entry looks like (node, rev, rawtext)
         if self._revisioncache:
             if self._revisioncache[0] == node:
-                # _cache only stores rawtext
-                if raw:
-                    return self._revisioncache[2]
-                # duplicated, but good for perf
-                if rev is None:
-                    rev = self.rev(node)
-                if flags is None:
-                    flags = self.flags(rev)
-                # no extra flags set, no flag processor runs, text = rawtext
-                if flags == REVIDX_DEFAULT_FLAGS:
-                    return self._revisioncache[2]
-                # rawtext is reusable. need to run flag processor
-                rawtext = self._revisioncache[2]
-
+                return (rev, self._revisioncache[2], True)
             cachedrev = self._revisioncache[1]
 
-        # look up what we need to read
-        if rawtext is None:
-            if rev is None:
-                rev = self.rev(node)
-
-            chain, stopped = self._deltachain(rev, stoprev=cachedrev)
-            if stopped:
-                rawtext = self._revisioncache[2]
-
-            # drop cache to save memory
-            self._revisioncache = None
-
-            targetsize = None
-            rawsize = self.index[rev][2]
-            if 0 <= rawsize:
-                targetsize = 4 * rawsize
-
-            bins = self._chunks(chain, df=_df, targetsize=targetsize)
-            if rawtext is None:
-                rawtext = bytes(bins[0])
-                bins = bins[1:]
-
-            rawtext = mdiff.patches(rawtext, bins)
-            self._revisioncache = (node, rev, rawtext)
-
-        if flags is None:
-            if rev is None:
-                rev = self.rev(node)
-            flags = self.flags(rev)
-
-        text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
-        if validatehash:
-            self.checkhash(text, node, rev=rev)
-
-        return text
+        if rev is None:
+            rev = self.rev(node)
+
+        chain, stopped = self._deltachain(rev, stoprev=cachedrev)
+        if stopped:
+            basetext = self._revisioncache[2]
+
+        # drop cache to save memory, the caller is expected to
+        # update self._revisioncache after validating the text
+        self._revisioncache = None
+
+        targetsize = None
+        rawsize = self.index[rev][2]
+        if 0 <= rawsize:
+            targetsize = 4 * rawsize
+
+        bins = self._chunks(chain, df=_df, targetsize=targetsize)
+        if basetext is None:
+            basetext = bytes(bins[0])
+            bins = bins[1:]
+
+        rawtext = mdiff.patches(basetext, bins)
+        del basetext  # let us have a chance to free memory early
+        return (rev, rawtext, False)
+
+    def rawdata(self, nodeorrev, _df=None):
+        """return an uncompressed raw data of a given node or revision number.
+
+        _df - an existing file handle to read from. (internal-only)
+        """
+        return self._revisiondata(nodeorrev, _df, raw=True)[0]
 
     def hash(self, text, p1, p2):
         """Compute a node hash.
@@ -1725,69 +1833,6 @@
         """
         return storageutil.hashrevisionsha1(text, p1, p2)
 
-    def _processflags(self, text, flags, operation, raw=False):
-        """Inspect revision data flags and applies transforms defined by
-        registered flag processors.
-
-        ``text`` - the revision data to process
-        ``flags`` - the revision flags
-        ``operation`` - the operation being performed (read or write)
-        ``raw`` - an optional argument describing if the raw transform should be
-        applied.
-
-        This method processes the flags in the order (or reverse order if
-        ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
-        flag processors registered for present flags. The order of flags defined
-        in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
-
-        Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
-        processed text and ``validatehash`` is a bool indicating whether the
-        returned text should be checked for hash integrity.
-
-        Note: If the ``raw`` argument is set, it has precedence over the
-        operation and will only update the value of ``validatehash``.
-        """
-        # fast path: no flag processors will run
-        if flags == 0:
-            return text, True
-        if not operation in ('read', 'write'):
-            raise error.ProgrammingError(_("invalid '%s' operation") %
-                                         operation)
-        # Check all flags are known.
-        if flags & ~REVIDX_KNOWN_FLAGS:
-            raise error.RevlogError(_("incompatible revision flag '%#x'") %
-                                    (flags & ~REVIDX_KNOWN_FLAGS))
-        validatehash = True
-        # Depending on the operation (read or write), the order might be
-        # reversed due to non-commutative transforms.
-        orderedflags = REVIDX_FLAGS_ORDER
-        if operation == 'write':
-            orderedflags = reversed(orderedflags)
-
-        for flag in orderedflags:
-            # If a flagprocessor has been registered for a known flag, apply the
-            # related operation transform and update result tuple.
-            if flag & flags:
-                vhash = True
-
-                if flag not in self._flagprocessors:
-                    message = _("missing processor for flag '%#x'") % (flag)
-                    raise error.RevlogError(message)
-
-                processor = self._flagprocessors[flag]
-                if processor is not None:
-                    readtransform, writetransform, rawtransform = processor
-
-                    if raw:
-                        vhash = rawtransform(self, text)
-                    elif operation == 'read':
-                        text, vhash = readtransform(self, text)
-                    else: # write operation
-                        text, vhash = writetransform(self, text)
-                validatehash = validatehash and vhash
-
-        return text, validatehash
-
     def checkhash(self, text, node, p1=None, p2=None, rev=None):
         """Check node hash integrity.
 
@@ -1810,8 +1855,10 @@
                 revornode = rev
                 if revornode is None:
                     revornode = templatefilters.short(hex(node))
-                raise error.RevlogError(_("integrity check failed on %s:%s")
-                    % (self.indexfile, pycompat.bytestr(revornode)))
+                raise error.RevlogError(
+                    _(b"integrity check failed on %s:%s")
+                    % (self.indexfile, pycompat.bytestr(revornode))
+                )
         except error.RevlogError:
             if self._censorable and storageutil.iscensoredtext(text):
                 raise error.CensoredNodeError(self.indexfile, node, text)
@@ -1825,14 +1872,17 @@
         to use multiple index and data files.
         """
         tiprev = len(self) - 1
-        if (not self._inline or
-            (self.start(tiprev) + self.length(tiprev)) < _maxinline):
+        if (
+            not self._inline
+            or (self.start(tiprev) + self.length(tiprev)) < _maxinline
+        ):
             return
 
         trinfo = tr.find(self.indexfile)
         if trinfo is None:
-            raise error.RevlogError(_("%s not found in the transaction")
-                                    % self.indexfile)
+            raise error.RevlogError(
+                _(b"%s not found in the transaction") % self.indexfile
+            )
 
         trindex = trinfo[2]
         if trindex is not None:
@@ -1851,11 +1901,11 @@
             # its usage.
             self._writinghandles = None
 
-        with self._indexfp('r') as ifh, self._datafp('w') as dfh:
+        with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
             for r in self:
                 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
 
-        with self._indexfp('w') as fp:
+        with self._indexfp(b'w') as fp:
             self.version &= ~FLAG_INLINE_DATA
             self._inline = False
             io = self._io
@@ -1873,8 +1923,19 @@
         """called when trying to add a node already stored.
         """
 
-    def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
-                    node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
+    def addrevision(
+        self,
+        text,
+        transaction,
+        link,
+        p1,
+        p2,
+        cachedelta=None,
+        node=None,
+        flags=REVIDX_DEFAULT_FLAGS,
+        deltacomputer=None,
+        sidedata=None,
+    ):
         """add a revision to the log
 
         text - the revision data to add
@@ -1890,13 +1951,26 @@
             multiple calls
         """
         if link == nullrev:
-            raise error.RevlogError(_("attempted to add linkrev -1 to %s")
-                                    % self.indexfile)
+            raise error.RevlogError(
+                _(b"attempted to add linkrev -1 to %s") % self.indexfile
+            )
+
+        if sidedata is None:
+            sidedata = {}
+            flags = flags & ~REVIDX_SIDEDATA
+        elif not self.hassidedata:
+            raise error.ProgrammingError(
+                _(b"trying to add sidedata to a revlog who don't support them")
+            )
+        else:
+            flags |= REVIDX_SIDEDATA
 
         if flags:
             node = node or self.hash(text, p1, p2)
 
-        rawtext, validatehash = self._processflags(text, flags, 'write')
+        rawtext, validatehash = flagutil.processflagswrite(
+            self, text, flags, sidedata=sidedata
+        )
 
         # If the flag processor modifies the revision data, ignore any provided
         # cachedelta.
@@ -1905,8 +1979,11 @@
 
         if len(rawtext) > _maxentrysize:
             raise error.RevlogError(
-                _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
-                % (self.indexfile, len(rawtext)))
+                _(
+                    b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
+                )
+                % (self.indexfile, len(rawtext))
+            )
 
         node = node or self.hash(rawtext, p1, p2)
         if node in self.nodemap:
@@ -1915,24 +1992,52 @@
         if validatehash:
             self.checkhash(rawtext, node, p1=p1, p2=p2)
 
-        return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
-                                   flags, cachedelta=cachedelta,
-                                   deltacomputer=deltacomputer)
-
-    def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
-                       cachedelta=None, deltacomputer=None):
+        return self.addrawrevision(
+            rawtext,
+            transaction,
+            link,
+            p1,
+            p2,
+            node,
+            flags,
+            cachedelta=cachedelta,
+            deltacomputer=deltacomputer,
+        )
+
+    def addrawrevision(
+        self,
+        rawtext,
+        transaction,
+        link,
+        p1,
+        p2,
+        node,
+        flags,
+        cachedelta=None,
+        deltacomputer=None,
+    ):
         """add a raw revision with known flags, node and parents
         useful when reusing a revision not stored in this revlog (ex: received
         over wire, or read from an external bundle).
         """
         dfh = None
         if not self._inline:
-            dfh = self._datafp("a+")
-        ifh = self._indexfp("a+")
+            dfh = self._datafp(b"a+")
+        ifh = self._indexfp(b"a+")
         try:
-            return self._addrevision(node, rawtext, transaction, link, p1, p2,
-                                     flags, cachedelta, ifh, dfh,
-                                     deltacomputer=deltacomputer)
+            return self._addrevision(
+                node,
+                rawtext,
+                transaction,
+                link,
+                p1,
+                p2,
+                flags,
+                cachedelta,
+                ifh,
+                dfh,
+                deltacomputer=deltacomputer,
+            )
         finally:
             if dfh:
                 dfh.close()
@@ -1941,17 +2046,17 @@
     def compress(self, data):
         """Generate a possibly-compressed representation of data."""
         if not data:
-            return '', data
+            return b'', data
 
         compressed = self._compressor.compress(data)
 
         if compressed:
             # The revlog compressor added the header in the returned data.
-            return '', compressed
-
-        if data[0:1] == '\0':
-            return '', data
-        return 'u', data
+            return b'', compressed
+
+        if data[0:1] == b'\0':
+            return b'', data
+        return b'u', data
 
     def decompress(self, data):
         """Decompress a revlog chunk.
@@ -1985,16 +2090,18 @@
         # compressed chunks. And this matters for changelog and manifest reads.
         t = data[0:1]
 
-        if t == 'x':
+        if t == b'x':
             try:
                 return _zlibdecompress(data)
             except zlib.error as e:
-                raise error.RevlogError(_('revlog decompress error: %s') %
-                                        stringutil.forcebytestr(e))
+                raise error.RevlogError(
+                    _(b'revlog decompress error: %s')
+                    % stringutil.forcebytestr(e)
+                )
         # '\0' is more common than 'u' so it goes first.
-        elif t == '\0':
+        elif t == b'\0':
             return data
-        elif t == 'u':
+        elif t == b'u':
             return util.buffer(data, 1)
 
         try:
@@ -2005,13 +2112,25 @@
                 compressor = engine.revlogcompressor(self._compengineopts)
                 self._decompressors[t] = compressor
             except KeyError:
-                raise error.RevlogError(_('unknown compression type %r') % t)
+                raise error.RevlogError(_(b'unknown compression type %r') % t)
 
         return compressor.decompress(data)
 
-    def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
-                     cachedelta, ifh, dfh, alwayscache=False,
-                     deltacomputer=None):
+    def _addrevision(
+        self,
+        node,
+        rawtext,
+        transaction,
+        link,
+        p1,
+        p2,
+        flags,
+        cachedelta,
+        ifh,
+        dfh,
+        alwayscache=False,
+        deltacomputer=None,
+    ):
         """internal function to add revisions to the log
 
         see addrevision for argument descriptions.
@@ -2026,11 +2145,13 @@
           if both are set, they must correspond to each other.
         """
         if node == nullid:
-            raise error.RevlogError(_("%s: attempt to add null revision") %
-                                    self.indexfile)
+            raise error.RevlogError(
+                _(b"%s: attempt to add null revision") % self.indexfile
+            )
         if node == wdirid or node in wdirfilenodeids:
-            raise error.RevlogError(_("%s: attempt to add wdir revision") %
-                                    self.indexfile)
+            raise error.RevlogError(
+                _(b"%s: attempt to add wdir revision") % self.indexfile
+            )
 
         if self._inline:
             fh = ifh
@@ -2050,8 +2171,9 @@
             # need rawtext size, before changed by flag processors, which is
             # the non-raw size. use revlog explicitly to avoid filelog's extra
             # logic that might remove metadata size.
-            textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
-                                        cachedelta[1])
+            textlen = mdiff.patchedsize(
+                revlog.size(self, cachedelta[0]), cachedelta[1]
+            )
         else:
             textlen = len(rawtext)
 
@@ -2062,8 +2184,16 @@
 
         deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
 
-        e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
-             deltainfo.base, link, p1r, p2r, node)
+        e = (
+            offset_type(offset, flags),
+            deltainfo.deltalen,
+            textlen,
+            deltainfo.base,
+            link,
+            p1r,
+            p2r,
+            node,
+        )
         self.index.append(e)
         self.nodemap[node] = curr
 
@@ -2073,15 +2203,16 @@
             self._nodepos = curr
 
         entry = self._io.packentry(e, self.node, self.version, curr)
-        self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
-                         link, offset)
+        self._writeentry(
+            transaction, ifh, dfh, entry, deltainfo.data, link, offset
+        )
 
         rawtext = btext[0]
 
         if alwayscache and rawtext is None:
             rawtext = deltacomputer.buildtext(revinfo, fh)
 
-        if type(rawtext) == bytes: # only accept immutable objects
+        if type(rawtext) == bytes:  # only accept immutable objects
             self._revisioncache = (node, curr, rawtext)
         self._chainbasecache[curr] = deltainfo.chainbase
         return node
@@ -2132,7 +2263,7 @@
         """
 
         if self._writinghandles:
-            raise error.ProgrammingError('cannot nest addgroup() calls')
+            raise error.ProgrammingError(b'cannot nest addgroup() calls')
 
         nodes = []
 
@@ -2140,7 +2271,7 @@
         end = 0
         if r:
             end = self.end(r - 1)
-        ifh = self._indexfp("a+")
+        ifh = self._indexfp(b"a+")
         isize = r * self._io.size
         if self._inline:
             transaction.add(self.indexfile, end + isize, r)
@@ -2148,7 +2279,8 @@
         else:
             transaction.add(self.indexfile, isize, r)
             transaction.add(self.datafile, end)
-            dfh = self._datafp("a+")
+            dfh = self._datafp(b"a+")
+
         def flush():
             if dfh:
                 dfh.flush()
@@ -2173,24 +2305,27 @@
 
                 for p in (p1, p2):
                     if p not in self.nodemap:
-                        raise error.LookupError(p, self.indexfile,
-                                                _('unknown parent'))
+                        raise error.LookupError(
+                            p, self.indexfile, _(b'unknown parent')
+                        )
 
                 if deltabase not in self.nodemap:
-                    raise error.LookupError(deltabase, self.indexfile,
-                                            _('unknown delta base'))
+                    raise error.LookupError(
+                        deltabase, self.indexfile, _(b'unknown delta base')
+                    )
 
                 baserev = self.rev(deltabase)
 
                 if baserev != nullrev and self.iscensored(baserev):
                     # if base is censored, delta must be full replacement in a
                     # single patch operation
-                    hlen = struct.calcsize(">lll")
+                    hlen = struct.calcsize(b">lll")
                     oldlen = self.rawsize(baserev)
                     newlen = len(delta) - hlen
                     if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
-                        raise error.CensoredBaseError(self.indexfile,
-                                                      self.node(baserev))
+                        raise error.CensoredBaseError(
+                            self.indexfile, self.node(baserev)
+                        )
 
                 if not flags and self._peek_iscensored(baserev, delta, flush):
                     flags |= REVIDX_ISCENSORED
@@ -2202,11 +2337,20 @@
                 # We're only using addgroup() in the context of changegroup
                 # generation so the revision data can always be handled as raw
                 # by the flagprocessor.
-                self._addrevision(node, None, transaction, link,
-                                  p1, p2, flags, (baserev, delta),
-                                  ifh, dfh,
-                                  alwayscache=bool(addrevisioncb),
-                                  deltacomputer=deltacomputer)
+                self._addrevision(
+                    node,
+                    None,
+                    transaction,
+                    link,
+                    p1,
+                    p2,
+                    flags,
+                    (baserev, delta),
+                    ifh,
+                    dfh,
+                    alwayscache=bool(addrevisioncb),
+                    deltacomputer=deltacomputer,
+                )
 
                 if addrevisioncb:
                     addrevisioncb(self, node)
@@ -2215,8 +2359,8 @@
                     # addrevision switched from inline to conventional
                     # reopen the index
                     ifh.close()
-                    dfh = self._datafp("a+")
-                    ifh = self._indexfp("a+")
+                    dfh = self._datafp(b"a+")
+                    ifh = self._indexfp(b"a+")
                     self._writinghandles = (ifh, dfh)
         finally:
             self._writinghandles = None
@@ -2247,9 +2391,13 @@
         Returns a tuple containing the minimum rev and a set of all revs that
         have linkrevs that will be broken by this strip.
         """
-        return storageutil.resolvestripinfo(minlink, len(self) - 1,
-                                            self.headrevs(),
-                                            self.linkrev, self.parentrevs)
+        return storageutil.resolvestripinfo(
+            minlink,
+            len(self) - 1,
+            self.headrevs(),
+            self.linkrev,
+            self.parentrevs,
+        )
 
     def strip(self, minlink, transaction):
         """truncate the revlog on the first revision with a linkrev >= minlink
@@ -2342,22 +2490,33 @@
             res.append(self.datafile)
         return res
 
-    def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
-                      assumehaveparentrevisions=False,
-                      deltamode=repository.CG_DELTAMODE_STD):
-        if nodesorder not in ('nodes', 'storage', 'linear', None):
-            raise error.ProgrammingError('unhandled value for nodesorder: %s' %
-                                         nodesorder)
+    def emitrevisions(
+        self,
+        nodes,
+        nodesorder=None,
+        revisiondata=False,
+        assumehaveparentrevisions=False,
+        deltamode=repository.CG_DELTAMODE_STD,
+    ):
+        if nodesorder not in (b'nodes', b'storage', b'linear', None):
+            raise error.ProgrammingError(
+                b'unhandled value for nodesorder: %s' % nodesorder
+            )
 
         if nodesorder is None and not self._generaldelta:
-            nodesorder = 'storage'
-
-        if (not self._storedeltachains and
-                deltamode != repository.CG_DELTAMODE_PREV):
+            nodesorder = b'storage'
+
+        if (
+            not self._storedeltachains
+            and deltamode != repository.CG_DELTAMODE_PREV
+        ):
             deltamode = repository.CG_DELTAMODE_FULL
 
         return storageutil.emitrevisions(
-            self, nodes, nodesorder, revlogrevisiondelta,
+            self,
+            nodes,
+            nodesorder,
+            revlogrevisiondelta,
             deltaparentfn=self.deltaparent,
             candeltafn=self.candelta,
             rawsizefn=self.rawsize,
@@ -2365,18 +2524,26 @@
             flagsfn=self.flags,
             deltamode=deltamode,
             revisiondata=revisiondata,
-            assumehaveparentrevisions=assumehaveparentrevisions)
-
-    DELTAREUSEALWAYS = 'always'
-    DELTAREUSESAMEREVS = 'samerevs'
-    DELTAREUSENEVER = 'never'
-
-    DELTAREUSEFULLADD = 'fulladd'
-
-    DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
-
-    def clone(self, tr, destrevlog, addrevisioncb=None,
-              deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
+            assumehaveparentrevisions=assumehaveparentrevisions,
+        )
+
+    DELTAREUSEALWAYS = b'always'
+    DELTAREUSESAMEREVS = b'samerevs'
+    DELTAREUSENEVER = b'never'
+
+    DELTAREUSEFULLADD = b'fulladd'
+
+    DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
+
+    def clone(
+        self,
+        tr,
+        destrevlog,
+        addrevisioncb=None,
+        deltareuse=DELTAREUSESAMEREVS,
+        forcedeltabothparents=None,
+        sidedatacompanion=None,
+    ):
         """Copy this revlog to another, possibly with format changes.
 
         The destination revlog will contain the same revisions and nodes.
@@ -2399,6 +2566,10 @@
            Deltas will never be reused. This is the slowest mode of execution.
            This mode can be used to recompute deltas (e.g. if the diff/delta
            algorithm changes).
+        DELTAREUSEFULLADD
+           Revision will be re-added as if their were new content. This is
+           slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
+           eg: large file detection and handling.
 
         Delta computation can be slow, so the choice of delta reuse policy can
         significantly affect run time.
@@ -2413,17 +2584,33 @@
         In addition to the delta policy, the ``forcedeltabothparents``
         argument controls whether to force compute deltas against both parents
         for merges. By default, the current default is used.
+
+        If not None, the `sidedatacompanion` is callable that accept two
+        arguments:
+
+            (srcrevlog, rev)
+
+        and return a triplet that control changes to sidedata content from the
+        old revision to the new clone result:
+
+            (dropall, filterout, update)
+
+        * if `dropall` is True, all sidedata should be dropped
+        * `filterout` is a set of sidedata keys that should be dropped
+        * `update` is a mapping of additionnal/new key -> value
         """
         if deltareuse not in self.DELTAREUSEALL:
-            raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
+            raise ValueError(
+                _(b'value for deltareuse invalid: %s') % deltareuse
+            )
 
         if len(destrevlog):
-            raise ValueError(_('destination revlog is not empty'))
+            raise ValueError(_(b'destination revlog is not empty'))
 
         if getattr(self, 'filteredrevs', None):
-            raise ValueError(_('source revlog has filtered revisions'))
+            raise ValueError(_(b'source revlog has filtered revisions'))
         if getattr(destrevlog, 'filteredrevs', None):
-            raise ValueError(_('destination revlog has filtered revisions'))
+            raise ValueError(_(b'destination revlog has filtered revisions'))
 
         # lazydelta and lazydeltabase controls whether to reuse a cached delta,
         # if possible.
@@ -2444,70 +2631,123 @@
 
             destrevlog._deltabothparents = forcedeltabothparents or oldamd
 
-            deltacomputer = deltautil.deltacomputer(destrevlog)
-            index = self.index
-            for rev in self:
-                entry = index[rev]
-
-                # Some classes override linkrev to take filtered revs into
-                # account. Use raw entry from index.
-                flags = entry[0] & 0xffff
-                linkrev = entry[4]
-                p1 = index[entry[5]][7]
-                p2 = index[entry[6]][7]
-                node = entry[7]
-
-                # (Possibly) reuse the delta from the revlog if allowed and
-                # the revlog chunk is a delta.
-                cachedelta = None
-                rawtext = None
+            self._clone(
+                tr,
+                destrevlog,
+                addrevisioncb,
+                deltareuse,
+                forcedeltabothparents,
+                sidedatacompanion,
+            )
+
+        finally:
+            destrevlog._lazydelta = oldlazydelta
+            destrevlog._lazydeltabase = oldlazydeltabase
+            destrevlog._deltabothparents = oldamd
+
+    def _clone(
+        self,
+        tr,
+        destrevlog,
+        addrevisioncb,
+        deltareuse,
+        forcedeltabothparents,
+        sidedatacompanion,
+    ):
+        """perform the core duty of `revlog.clone` after parameter processing"""
+        deltacomputer = deltautil.deltacomputer(destrevlog)
+        index = self.index
+        for rev in self:
+            entry = index[rev]
+
+            # Some classes override linkrev to take filtered revs into
+            # account. Use raw entry from index.
+            flags = entry[0] & 0xFFFF
+            linkrev = entry[4]
+            p1 = index[entry[5]][7]
+            p2 = index[entry[6]][7]
+            node = entry[7]
+
+            sidedataactions = (False, [], {})
+            if sidedatacompanion is not None:
+                sidedataactions = sidedatacompanion(self, rev)
+
+            # (Possibly) reuse the delta from the revlog if allowed and
+            # the revlog chunk is a delta.
+            cachedelta = None
+            rawtext = None
+            if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
+                dropall, filterout, update = sidedataactions
+                text, sidedata = self._revisiondata(rev)
+                if dropall:
+                    sidedata = {}
+                for key in filterout:
+                    sidedata.pop(key, None)
+                sidedata.update(update)
+                if not sidedata:
+                    sidedata = None
+                destrevlog.addrevision(
+                    text,
+                    tr,
+                    linkrev,
+                    p1,
+                    p2,
+                    cachedelta=cachedelta,
+                    node=node,
+                    flags=flags,
+                    deltacomputer=deltacomputer,
+                    sidedata=sidedata,
+                )
+            else:
                 if destrevlog._lazydelta:
                     dp = self.deltaparent(rev)
                     if dp != nullrev:
                         cachedelta = (dp, bytes(self._chunk(rev)))
 
                 if not cachedelta:
-                    rawtext = self.revision(rev, raw=True)
-
-
-                if deltareuse == self.DELTAREUSEFULLADD:
-                    destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
-                                           cachedelta=cachedelta,
-                                           node=node, flags=flags,
-                                           deltacomputer=deltacomputer)
-                else:
-                    ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
-                                            checkambig=False)
-                    dfh = None
-                    if not destrevlog._inline:
-                        dfh = destrevlog.opener(destrevlog.datafile, 'a+')
-                    try:
-                        destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
-                                                p2, flags, cachedelta, ifh, dfh,
-                                                deltacomputer=deltacomputer)
-                    finally:
-                        if dfh:
-                            dfh.close()
-                        ifh.close()
-
-                if addrevisioncb:
-                    addrevisioncb(self, rev, node)
-        finally:
-            destrevlog._lazydelta = oldlazydelta
-            destrevlog._lazydeltabase = oldlazydeltabase
-            destrevlog._deltabothparents = oldamd
+                    rawtext = self.rawdata(rev)
+
+                ifh = destrevlog.opener(
+                    destrevlog.indexfile, b'a+', checkambig=False
+                )
+                dfh = None
+                if not destrevlog._inline:
+                    dfh = destrevlog.opener(destrevlog.datafile, b'a+')
+                try:
+                    destrevlog._addrevision(
+                        node,
+                        rawtext,
+                        tr,
+                        linkrev,
+                        p1,
+                        p2,
+                        flags,
+                        cachedelta,
+                        ifh,
+                        dfh,
+                        deltacomputer=deltacomputer,
+                    )
+                finally:
+                    if dfh:
+                        dfh.close()
+                    ifh.close()
+
+            if addrevisioncb:
+                addrevisioncb(self, rev, node)
 
     def censorrevision(self, tr, censornode, tombstone=b''):
         if (self.version & 0xFFFF) == REVLOGV0:
-            raise error.RevlogError(_('cannot censor with version %d revlogs') %
-                                    self.version)
+            raise error.RevlogError(
+                _(b'cannot censor with version %d revlogs') % self.version
+            )
 
         censorrev = self.rev(censornode)
         tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
 
         if len(tombstone) > self.rawsize(censorrev):
-            raise error.Abort(_('censor tombstone must be no longer than '
-                                'censored data'))
+            raise error.Abort(
+                _(b'censor tombstone must be no longer than censored data')
+            )
 
         # Rewriting the revlog in place is hard. Our strategy for censoring is
         # to create a new revlog, copy all revisions to it, then replace the
@@ -2517,8 +2757,7 @@
         newdatafile = self.datafile + b'.tmpcensored'
 
         # This is a bit dangerous. We could easily have a mismatch of state.
-        newrl = revlog(self.opener, newindexfile, newdatafile,
-                       censorable=True)
+        newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
         newrl.version = self.version
         newrl._generaldelta = self._generaldelta
         newrl._io = self._io
@@ -2528,31 +2767,49 @@
             p1, p2 = self.parents(node)
 
             if rev == censorrev:
-                newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
-                                     p1, p2, censornode, REVIDX_ISCENSORED)
+                newrl.addrawrevision(
+                    tombstone,
+                    tr,
+                    self.linkrev(censorrev),
+                    p1,
+                    p2,
+                    censornode,
+                    REVIDX_ISCENSORED,
+                )
 
                 if newrl.deltaparent(rev) != nullrev:
-                    raise error.Abort(_('censored revision stored as delta; '
-                                        'cannot censor'),
-                                      hint=_('censoring of revlogs is not '
-                                             'fully implemented; please report '
-                                             'this bug'))
+                    raise error.Abort(
+                        _(
+                            b'censored revision stored as delta; '
+                            b'cannot censor'
+                        ),
+                        hint=_(
+                            b'censoring of revlogs is not '
+                            b'fully implemented; please report '
+                            b'this bug'
+                        ),
+                    )
                 continue
 
             if self.iscensored(rev):
                 if self.deltaparent(rev) != nullrev:
-                    raise error.Abort(_('cannot censor due to censored '
-                                        'revision having delta stored'))
+                    raise error.Abort(
+                        _(
+                            b'cannot censor due to censored '
+                            b'revision having delta stored'
+                        )
+                    )
                 rawtext = self._chunk(rev)
             else:
-                rawtext = self.revision(rev, raw=True)
-
-            newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
-                                 self.flags(rev))
-
-        tr.addbackup(self.indexfile, location='store')
+                rawtext = self.rawdata(rev)
+
+            newrl.addrawrevision(
+                rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
+            )
+
+        tr.addbackup(self.indexfile, location=b'store')
         if not self._inline:
-            tr.addbackup(self.datafile, location='store')
+            tr.addbackup(self.datafile, location=b'store')
 
         self.opener.rename(newrl.indexfile, self.indexfile)
         if not self._inline:
@@ -2569,19 +2826,20 @@
         """
         dd, di = self.checksize()
         if dd:
-            yield revlogproblem(error=_('data length off by %d bytes') % dd)
+            yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
         if di:
-            yield revlogproblem(error=_('index contains %d extra bytes') % di)
+            yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
 
         version = self.version & 0xFFFF
 
         # The verifier tells us what version revlog we should be.
-        if version != state['expectedversion']:
+        if version != state[b'expectedversion']:
             yield revlogproblem(
-                warning=_("warning: '%s' uses revlog format %d; expected %d") %
-                        (self.indexfile, version, state['expectedversion']))
-
-        state['skipread'] = set()
+                warning=_(b"warning: '%s' uses revlog format %d; expected %d")
+                % (self.indexfile, version, state[b'expectedversion'])
+            )
+
+        state[b'skipread'] = set()
 
         for rev in self:
             node = self.node(rev)
@@ -2603,8 +2861,8 @@
             #   rawtext[0:2]=='\1\n'| False  | True   | True  | ?
             #
             # "rawtext" means the raw text stored in revlog data, which
-            # could be retrieved by "revision(rev, raw=True)". "text"
-            # mentioned below is "revision(rev, raw=False)".
+            # could be retrieved by "rawdata(rev)". "text"
+            # mentioned below is "revision(rev)".
             #
             # There are 3 different lengths stored physically:
             #  1. L1: rawsize, stored in revlog index
@@ -2614,7 +2872,7 @@
             #
             # L1 should be equal to L2. L3 could be different from them.
             # "text" may or may not affect commit hash depending on flag
-            # processors (see revlog.addflagprocessor).
+            # processors (see flagutil.addflagprocessor).
             #
             #              | common  | rename | meta  | ext
             # -------------------------------------------------
@@ -2635,57 +2893,66 @@
             #     use either "text" (external), or "rawtext" (in revlog).
 
             try:
-                skipflags = state.get('skipflags', 0)
+                skipflags = state.get(b'skipflags', 0)
                 if skipflags:
                     skipflags &= self.flags(rev)
 
                 if skipflags:
-                    state['skipread'].add(node)
+                    state[b'skipread'].add(node)
                 else:
                     # Side-effect: read content and verify hash.
                     self.revision(node)
 
                 l1 = self.rawsize(rev)
-                l2 = len(self.revision(node, raw=True))
+                l2 = len(self.rawdata(node))
 
                 if l1 != l2:
                     yield revlogproblem(
-                        error=_('unpacked size is %d, %d expected') % (l2, l1),
-                        node=node)
+                        error=_(b'unpacked size is %d, %d expected') % (l2, l1),
+                        node=node,
+                    )
 
             except error.CensoredNodeError:
-                if state['erroroncensored']:
-                    yield revlogproblem(error=_('censored file data'),
-                                        node=node)
-                    state['skipread'].add(node)
+                if state[b'erroroncensored']:
+                    yield revlogproblem(
+                        error=_(b'censored file data'), node=node
+                    )
+                    state[b'skipread'].add(node)
             except Exception as e:
                 yield revlogproblem(
-                    error=_('unpacking %s: %s') % (short(node),
-                                                   stringutil.forcebytestr(e)),
-                    node=node)
-                state['skipread'].add(node)
-
-    def storageinfo(self, exclusivefiles=False, sharedfiles=False,
-                    revisionscount=False, trackedsize=False,
-                    storedsize=False):
+                    error=_(b'unpacking %s: %s')
+                    % (short(node), stringutil.forcebytestr(e)),
+                    node=node,
+                )
+                state[b'skipread'].add(node)
+
+    def storageinfo(
+        self,
+        exclusivefiles=False,
+        sharedfiles=False,
+        revisionscount=False,
+        trackedsize=False,
+        storedsize=False,
+    ):
         d = {}
 
         if exclusivefiles:
-            d['exclusivefiles'] = [(self.opener, self.indexfile)]
+            d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
             if not self._inline:
-                d['exclusivefiles'].append((self.opener, self.datafile))
+                d[b'exclusivefiles'].append((self.opener, self.datafile))
 
         if sharedfiles:
-            d['sharedfiles'] = []
+            d[b'sharedfiles'] = []
 
         if revisionscount:
-            d['revisionscount'] = len(self)
+            d[b'revisionscount'] = len(self)
 
         if trackedsize:
-            d['trackedsize'] = sum(map(self.rawsize, iter(self)))
+            d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
 
         if storedsize:
-            d['storedsize'] = sum(self.opener.stat(path).st_size
-                                  for path in self.files())
+            d[b'storedsize'] = sum(
+                self.opener.stat(path).st_size for path in self.files()
+            )
 
         return d
--- a/mercurial/revlogutils/constants.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/revlogutils/constants.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,10 +9,7 @@
 
 from __future__ import absolute_import
 
-from .. import (
-    repository,
-    util,
-)
+from ..interfaces import repository
 
 # revlog header flags
 REVLOGV0 = 0
@@ -21,9 +18,9 @@
 # Reminder: change the bounds check in revlog.__init__ when this is changed.
 REVLOGV2 = 0xDEAD
 # Shared across v1 and v2.
-FLAG_INLINE_DATA = (1 << 16)
+FLAG_INLINE_DATA = 1 << 16
 # Only used by v1, implied by v2.
-FLAG_GENERALDELTA = (1 << 17)
+FLAG_GENERALDELTA = 1 << 17
 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
 REVLOG_DEFAULT_FORMAT = REVLOGV1
 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
@@ -41,16 +38,20 @@
 REVIDX_ELLIPSIS = repository.REVISION_FLAG_ELLIPSIS
 # revision data is stored externally
 REVIDX_EXTSTORED = repository.REVISION_FLAG_EXTSTORED
+# revision data contains extra metadata not part of the official digest
+REVIDX_SIDEDATA = repository.REVISION_FLAG_SIDEDATA
 REVIDX_DEFAULT_FLAGS = 0
 # stable order in which flags need to be processed and their processors applied
 REVIDX_FLAGS_ORDER = [
     REVIDX_ISCENSORED,
     REVIDX_ELLIPSIS,
     REVIDX_EXTSTORED,
+    REVIDX_SIDEDATA,
 ]
-REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
+
 # bitmark for flags that could cause rawdata content change
-REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
+REVIDX_RAWTEXT_CHANGING_FLAGS = (
+    REVIDX_ISCENSORED | REVIDX_EXTSTORED | REVIDX_SIDEDATA
+)
 
 SPARSE_REVLOG_MAX_CHAIN_LENGTH = 1000
-
--- a/mercurial/revlogutils/deltas.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/revlogutils/deltas.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,19 +13,16 @@
 import struct
 
 # import stuff from node for others to import from revlog
-from ..node import (
-    nullrev,
-)
+from ..node import nullrev
 from ..i18n import _
+from ..pycompat import getattr
 
 from .constants import (
     REVIDX_ISCENSORED,
     REVIDX_RAWTEXT_CHANGING_FLAGS,
 )
 
-from ..thirdparty import (
-    attr,
-)
+from ..thirdparty import attr
 
 from .. import (
     error,
@@ -33,9 +30,12 @@
     util,
 )
 
+from . import flagutil
+
 # maximum <delta-chain-data>/<revision-text-length> ratio
 LIMIT_DELTA2TEXT = 2
 
+
 class _testrevlog(object):
     """minimalist fake revlog to use in doctests"""
 
@@ -70,6 +70,7 @@
             return True
         return rev in self._snapshot
 
+
 def slicechunk(revlog, revs, targetsize=None):
     """slice revs to reduce the amount of unrelated data to be read from disk.
 
@@ -137,12 +138,13 @@
     densityslicing = getattr(revlog.index, 'slicechunktodensity', None)
     if densityslicing is None:
         densityslicing = lambda x, y, z: _slicechunktodensity(revlog, x, y, z)
-    for chunk in densityslicing(revs,
-                                revlog._srdensitythreshold,
-                                revlog._srmingapsize):
+    for chunk in densityslicing(
+        revs, revlog._srdensitythreshold, revlog._srmingapsize
+    ):
         for subchunk in _slicechunktosize(revlog, chunk, targetsize):
             yield subchunk
 
+
 def _slicechunktosize(revlog, revs, targetsize=None):
     """slice revs to match the target size
 
@@ -253,7 +255,7 @@
     startrevidx = 0
     endrevidx = 1
     iterrevs = enumerate(revs)
-    next(iterrevs) # skip first rev.
+    next(iterrevs)  # skip first rev.
     # first step: get snapshots out of the way
     for idx, r in iterrevs:
         span = revlog.end(r) - startdata
@@ -278,12 +280,12 @@
     while (enddata - startdata) > targetsize:
         endrevidx = nbitem
         if nbitem - startrevidx <= 1:
-            break # protect against individual chunk larger than limit
+            break  # protect against individual chunk larger than limit
         localenddata = revlog.end(revs[endrevidx - 1])
         span = localenddata - startdata
         while span > targetsize:
             if endrevidx - startrevidx <= 1:
-                break # protect against individual chunk larger than limit
+                break  # protect against individual chunk larger than limit
             endrevidx -= (endrevidx - startrevidx) // 2
             localenddata = revlog.end(revs[endrevidx - 1])
             span = localenddata - startdata
@@ -297,8 +299,8 @@
     if chunk:
         yield chunk
 
-def _slicechunktodensity(revlog, revs, targetdensity=0.5,
-                         mingapsize=0):
+
+def _slicechunktodensity(revlog, revs, targetdensity=0.5, mingapsize=0):
     """slice revs to reduce the amount of unrelated data to be read from disk.
 
     ``revs`` is sliced into groups that should be read in one time.
@@ -423,6 +425,7 @@
     if chunk:
         yield chunk
 
+
 def _trimchunk(revlog, revs, startidx, endidx=None):
     """returns revs[startidx:endidx] without empty trailing revs
 
@@ -469,13 +472,14 @@
     # If we have a non-emtpy delta candidate, there are nothing to trim
     if revs[endidx - 1] < len(revlog):
         # Trim empty revs at the end, except the very first revision of a chain
-        while (endidx > 1
-                and endidx > startidx
-                and length(revs[endidx - 1]) == 0):
+        while (
+            endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0
+        ):
             endidx -= 1
 
     return revs[startidx:endidx]
 
+
 def segmentspan(revlog, revs):
     """Get the byte span of a segment of revisions
 
@@ -505,14 +509,16 @@
     end = revlog.end(revs[-1])
     return end - revlog.start(revs[0])
 
+
 def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode):
     """build full text from a (base, delta) pair and other metadata"""
     # special case deltas which replace entire base; no need to decode
     # base revision. this neatly avoids censored bases, which throw when
     # they're decoded.
-    hlen = struct.calcsize(">lll")
-    if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
-                                               len(delta) - hlen):
+    hlen = struct.calcsize(b">lll")
+    if delta[:hlen] == mdiff.replacediffheader(
+        revlog.rawsize(baserev), len(delta) - hlen
+    ):
         fulltext = delta[hlen:]
     else:
         # deltabase is rawtext before changed by flag processors, which is
@@ -521,19 +527,20 @@
         fulltext = mdiff.patch(basetext, delta)
 
     try:
-        res = revlog._processflags(fulltext, flags, 'read', raw=True)
-        fulltext, validatehash = res
+        validatehash = flagutil.processflagsraw(revlog, fulltext, flags)
         if validatehash:
             revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2)
         if flags & REVIDX_ISCENSORED:
-            raise error.StorageError(_('node %s is not censored') %
-                                     expectednode)
+            raise error.StorageError(
+                _(b'node %s is not censored') % expectednode
+            )
     except error.CensoredNodeError:
         # must pass the censored index flag to add censored revisions
         if not flags & REVIDX_ISCENSORED:
             raise
     return fulltext
 
+
 @attr.s(slots=True, frozen=True)
 class _deltainfo(object):
     distance = attr.ib()
@@ -545,6 +552,7 @@
     compresseddeltalen = attr.ib()
     snapshotdepth = attr.ib()
 
+
 def isgooddeltainfo(revlog, deltainfo, revinfo):
     """Returns True if the given delta is good. Good means that it is within
     the disk span, disk size, and chain length bounds that we know to be
@@ -562,7 +570,7 @@
     defaultmax = textlen * 4
     maxdist = revlog._maxdeltachainspan
     if not maxdist:
-        maxdist = deltainfo.distance # ensure the conditional pass
+        maxdist = deltainfo.distance  # ensure the conditional pass
     maxdist = max(maxdist, defaultmax)
 
     # Bad delta from read span:
@@ -593,8 +601,7 @@
     # Bad delta from chain length:
     #
     #   If the number of delta in the chain gets too high.
-    if (revlog._maxchainlen
-            and revlog._maxchainlen < deltainfo.chainlen):
+    if revlog._maxchainlen and revlog._maxchainlen < deltainfo.chainlen:
         return False
 
     # bad delta from intermediate snapshot size limit
@@ -602,23 +609,29 @@
     #   If an intermediate snapshot size is higher than the limit.  The
     #   limit exist to prevent endless chain of intermediate delta to be
     #   created.
-    if (deltainfo.snapshotdepth is not None and
-            (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen):
+    if (
+        deltainfo.snapshotdepth is not None
+        and (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen
+    ):
         return False
 
     # bad delta if new intermediate snapshot is larger than the previous
     # snapshot
-    if (deltainfo.snapshotdepth
-            and revlog.length(deltainfo.base) < deltainfo.deltalen):
+    if (
+        deltainfo.snapshotdepth
+        and revlog.length(deltainfo.base) < deltainfo.deltalen
+    ):
         return False
 
     return True
 
+
 # If a revision's full text is that much bigger than a base candidate full
 # text's, it is very unlikely that it will produce a valid delta. We no longer
 # consider these candidates.
 LIMIT_BASE2TEXT = 500
 
+
 def _candidategroups(revlog, textlen, p1, p2, cachedelta):
     """Provides group of revision to be tested as delta base
 
@@ -646,10 +659,9 @@
         group = []
         for rev in temptative:
             # skip over empty delta (no need to include them in a chain)
-            while (revlog._generaldelta
-                   and not (rev == nullrev
-                            or rev in tested
-                            or deltalength(rev))):
+            while revlog._generaldelta and not (
+                rev == nullrev or rev in tested or deltalength(rev)
+            ):
                 tested.add(rev)
                 rev = deltaparent(rev)
             # no need to try a delta against nullrev, this will be done as a
@@ -712,9 +724,10 @@
             good = yield tuple(group)
     yield None
 
+
 def _findsnapshots(revlog, cache, start_rev):
     """find snapshot from start_rev to tip"""
-    if util.safehasattr(revlog.index, 'findsnapshots'):
+    if util.safehasattr(revlog.index, b'findsnapshots'):
         revlog.index.findsnapshots(cache, start_rev)
     else:
         deltaparent = revlog.deltaparent
@@ -723,6 +736,7 @@
             if issnapshot(rev):
                 cache[deltaparent(rev)].append(rev)
 
+
 def _refinedgroups(revlog, p1, p2, cachedelta):
     good = None
     # First we try to reuse a the delta contained in the bundle.
@@ -771,6 +785,7 @@
     # we have found nothing
     yield None
 
+
 def _rawgroups(revlog, p1, p2, cachedelta, snapshots=None):
     """Provides group of revision to be tested as delta base
 
@@ -891,6 +906,7 @@
         # fulltext.
         yield (prev,)
 
+
 class deltacomputer(object):
     def __init__(self, revlog):
         self.revlog = revlog
@@ -911,9 +927,16 @@
         baserev = cachedelta[0]
         delta = cachedelta[1]
 
-        fulltext = btext[0] = _textfromdelta(fh, revlog, baserev, delta,
-                                             revinfo.p1, revinfo.p2,
-                                             revinfo.flags, revinfo.node)
+        fulltext = btext[0] = _textfromdelta(
+            fh,
+            revlog,
+            baserev,
+            delta,
+            revinfo.p1,
+            revinfo.p2,
+            revinfo.flags,
+            revinfo.node,
+        )
         return fulltext
 
     def _builddeltadiff(self, base, revinfo, fh):
@@ -925,7 +948,7 @@
             header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
             delta = header + t
         else:
-            ptext = revlog.revision(base, _df=fh, raw=True)
+            ptext = revlog.rawdata(base, _df=fh)
             delta = mdiff.textdiff(ptext, t)
 
         return delta
@@ -950,11 +973,13 @@
         delta = None
         if revinfo.cachedelta:
             cachebase, cachediff = revinfo.cachedelta
-            #check if the diff still apply
+            # check if the diff still apply
             currentbase = cachebase
-            while (currentbase != nullrev
-                    and currentbase != base
-                    and self.revlog.length(currentbase) == 0):
+            while (
+                currentbase != nullrev
+                and currentbase != base
+                and self.revlog.length(currentbase) == 0
+            ):
                 currentbase = self.revlog.deltaparent(currentbase)
             if self.revlog._lazydelta and currentbase == base:
                 delta = revinfo.cachedelta[1]
@@ -976,9 +1001,16 @@
         chainlen += 1
         compresseddeltalen += deltalen
 
-        return _deltainfo(dist, deltalen, (header, data), deltabase,
-                          chainbase, chainlen, compresseddeltalen,
-                          snapshotdepth)
+        return _deltainfo(
+            dist,
+            deltalen,
+            (header, data),
+            deltabase,
+            chainbase,
+            chainlen,
+            compresseddeltalen,
+            snapshotdepth,
+        )
 
     def _fullsnapshotinfo(self, fh, revinfo):
         curr = len(self.revlog)
@@ -989,9 +1021,16 @@
         snapshotdepth = 0
         chainlen = 1
 
-        return _deltainfo(dist, deltalen, data, deltabase,
-                          chainbase, chainlen, compresseddeltalen,
-                          snapshotdepth)
+        return _deltainfo(
+            dist,
+            deltalen,
+            data,
+            deltabase,
+            chainbase,
+            chainlen,
+            compresseddeltalen,
+            snapshotdepth,
+        )
 
     def finddeltainfo(self, revinfo, fh):
         """Find an acceptable delta against a candidate revision
@@ -1022,8 +1061,9 @@
 
         deltainfo = None
         p1r, p2r = revlog.rev(p1), revlog.rev(p2)
-        groups = _candidategroups(self.revlog, revinfo.textlen,
-                                             p1r, p2r, cachedelta)
+        groups = _candidategroups(
+            self.revlog, revinfo.textlen, p1r, p2r, cachedelta
+        )
         candidaterevs = next(groups)
         while candidaterevs is not None:
             nominateddeltas = []
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revlogutils/flagutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,192 @@
+# flagutils.py - code to deal with revlog flags and their processors
+#
+# Copyright 2016 Remi Chaintron <remi@fb.com>
+# Copyright 2016-2019 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from ..i18n import _
+
+from .constants import (
+    REVIDX_DEFAULT_FLAGS,
+    REVIDX_ELLIPSIS,
+    REVIDX_EXTSTORED,
+    REVIDX_FLAGS_ORDER,
+    REVIDX_ISCENSORED,
+    REVIDX_RAWTEXT_CHANGING_FLAGS,
+    REVIDX_SIDEDATA,
+)
+
+from .. import error, util
+
+# blanked usage of all the name to prevent pyflakes constraints
+# We need these name available in the module for extensions.
+REVIDX_ISCENSORED
+REVIDX_ELLIPSIS
+REVIDX_EXTSTORED
+REVIDX_SIDEDATA
+REVIDX_DEFAULT_FLAGS
+REVIDX_FLAGS_ORDER
+REVIDX_RAWTEXT_CHANGING_FLAGS
+
+REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
+
+# Store flag processors (cf. 'addflagprocessor()' to register)
+flagprocessors = {
+    REVIDX_ISCENSORED: None,
+}
+
+
+def addflagprocessor(flag, processor):
+    """Register a flag processor on a revision data flag.
+
+    Invariant:
+    - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
+      and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
+    - Only one flag processor can be registered on a specific flag.
+    - flagprocessors must be 3-tuples of functions (read, write, raw) with the
+      following signatures:
+          - (read)  f(self, rawtext) -> text, bool
+          - (write) f(self, text) -> rawtext, bool
+          - (raw)   f(self, rawtext) -> bool
+      "text" is presented to the user. "rawtext" is stored in revlog data, not
+      directly visible to the user.
+      The boolean returned by these transforms is used to determine whether
+      the returned text can be used for hash integrity checking. For example,
+      if "write" returns False, then "text" is used to generate hash. If
+      "write" returns True, that basically means "rawtext" returned by "write"
+      should be used to generate hash. Usually, "write" and "read" return
+      different booleans. And "raw" returns a same boolean as "write".
+
+      Note: The 'raw' transform is used for changegroup generation and in some
+      debug commands. In this case the transform only indicates whether the
+      contents can be used for hash integrity checks.
+    """
+    insertflagprocessor(flag, processor, flagprocessors)
+
+
+def insertflagprocessor(flag, processor, flagprocessors):
+    if not flag & REVIDX_KNOWN_FLAGS:
+        msg = _(b"cannot register processor on unknown flag '%#x'.") % flag
+        raise error.ProgrammingError(msg)
+    if flag not in REVIDX_FLAGS_ORDER:
+        msg = _(b"flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % flag
+        raise error.ProgrammingError(msg)
+    if flag in flagprocessors:
+        msg = _(b"cannot register multiple processors on flag '%#x'.") % flag
+        raise error.Abort(msg)
+    flagprocessors[flag] = processor
+
+
+def processflagswrite(revlog, text, flags, sidedata):
+    """Inspect revision data flags and applies write transformations defined
+    by registered flag processors.
+
+    ``text`` - the revision data to process
+    ``flags`` - the revision flags
+
+    This method processes the flags in the order (or reverse order if
+    ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
+    flag processors registered for present flags. The order of flags defined
+    in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
+
+    Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
+    processed text and ``validatehash`` is a bool indicating whether the
+    returned text should be checked for hash integrity.
+    """
+    return _processflagsfunc(revlog, text, flags, b'write', sidedata=sidedata)[
+        :2
+    ]
+
+
+def processflagsread(revlog, text, flags):
+    """Inspect revision data flags and applies read transformations defined
+    by registered flag processors.
+
+    ``text`` - the revision data to process
+    ``flags`` - the revision flags
+    ``raw`` - an optional argument describing if the raw transform should be
+    applied.
+
+    This method processes the flags in the order (or reverse order if
+    ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
+    flag processors registered for present flags. The order of flags defined
+    in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
+
+    Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
+    processed text and ``validatehash`` is a bool indicating whether the
+    returned text should be checked for hash integrity.
+    """
+    return _processflagsfunc(revlog, text, flags, b'read')
+
+
+def processflagsraw(revlog, text, flags):
+    """Inspect revision data flags to check is the content hash should be
+    validated.
+
+    ``text`` - the revision data to process
+    ``flags`` - the revision flags
+
+    This method processes the flags in the order (or reverse order if
+    ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
+    flag processors registered for present flags. The order of flags defined
+    in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
+
+    Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
+    processed text and ``validatehash`` is a bool indicating whether the
+    returned text should be checked for hash integrity.
+    """
+    return _processflagsfunc(revlog, text, flags, b'raw')[1]
+
+
+def _processflagsfunc(revlog, text, flags, operation, sidedata=None):
+    """internal function to process flag on a revlog
+
+    This function is private to this module, code should never needs to call it
+    directly."""
+    # fast path: no flag processors will run
+    if flags == 0:
+        return text, True, {}
+    if operation not in (b'read', b'write', b'raw'):
+        raise error.ProgrammingError(_(b"invalid '%s' operation") % operation)
+    # Check all flags are known.
+    if flags & ~REVIDX_KNOWN_FLAGS:
+        raise revlog._flagserrorclass(
+            _(b"incompatible revision flag '%#x'")
+            % (flags & ~REVIDX_KNOWN_FLAGS)
+        )
+    validatehash = True
+    # Depending on the operation (read or write), the order might be
+    # reversed due to non-commutative transforms.
+    orderedflags = REVIDX_FLAGS_ORDER
+    if operation == b'write':
+        orderedflags = reversed(orderedflags)
+
+    outsidedata = {}
+    for flag in orderedflags:
+        # If a flagprocessor has been registered for a known flag, apply the
+        # related operation transform and update result tuple.
+        if flag & flags:
+            vhash = True
+
+            if flag not in revlog._flagprocessors:
+                message = _(b"missing processor for flag '%#x'") % flag
+                raise revlog._flagserrorclass(message)
+
+            processor = revlog._flagprocessors[flag]
+            if processor is not None:
+                readtransform, writetransform, rawtransform = processor
+
+                if operation == b'raw':
+                    vhash = rawtransform(revlog, text)
+                elif operation == b'read':
+                    text, vhash, s = readtransform(revlog, text)
+                    outsidedata.update(s)
+                else:  # write operation
+                    text, vhash = writetransform(revlog, text, sidedata)
+            validatehash = validatehash and vhash
+
+    return text, validatehash, outsidedata
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revlogutils/sidedata.py	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,106 @@
+# sidedata.py - Logic around store extra data alongside revlog revisions
+#
+# Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net)
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""core code for "sidedata" support
+
+The "sidedata" are stored alongside the revision without actually being part of
+its content and not affecting its hash. It's main use cases is to cache
+important information related to a changesets.
+
+The current implementation is experimental and subject to changes. Do not rely
+on it in production.
+
+Sidedata are stored in the revlog itself, withing the revision rawtext. They
+are inserted, removed from it using the flagprocessors mechanism. The following
+format is currently used::
+
+    initial header:
+        <number of sidedata; 2 bytes>
+    sidedata (repeated N times):
+        <sidedata-key; 2 bytes>
+        <sidedata-entry-length: 4 bytes>
+        <sidedata-content-sha1-digest: 20 bytes>
+        <sidedata-content; X bytes>
+    normal raw text:
+        <all bytes remaining in the rawtext>
+
+This is a simple and effective format. It should be enought to experiment with
+the concept.
+"""
+
+from __future__ import absolute_import
+
+import hashlib
+import struct
+
+from .. import error
+
+## sidedata type constant
+# reserve a block for testing purposes.
+SD_TEST1 = 1
+SD_TEST2 = 2
+SD_TEST3 = 3
+SD_TEST4 = 4
+SD_TEST5 = 5
+SD_TEST6 = 6
+SD_TEST7 = 7
+
+# key to store copies related information
+SD_P1COPIES = 8
+SD_P2COPIES = 9
+SD_FILESADDED = 10
+SD_FILESREMOVED = 11
+
+# internal format constant
+SIDEDATA_HEADER = struct.Struct(r'>H')
+SIDEDATA_ENTRY = struct.Struct(r'>HL20s')
+
+
+def sidedatawriteprocessor(rl, text, sidedata):
+    sidedata = list(sidedata.items())
+    sidedata.sort()
+    rawtext = [SIDEDATA_HEADER.pack(len(sidedata))]
+    for key, value in sidedata:
+        digest = hashlib.sha1(value).digest()
+        rawtext.append(SIDEDATA_ENTRY.pack(key, len(value), digest))
+    for key, value in sidedata:
+        rawtext.append(value)
+    rawtext.append(bytes(text))
+    return b''.join(rawtext), False
+
+
+def sidedatareadprocessor(rl, text):
+    sidedata = {}
+    offset = 0
+    (nbentry,) = SIDEDATA_HEADER.unpack(text[: SIDEDATA_HEADER.size])
+    offset += SIDEDATA_HEADER.size
+    dataoffset = SIDEDATA_HEADER.size + (SIDEDATA_ENTRY.size * nbentry)
+    for i in range(nbentry):
+        nextoffset = offset + SIDEDATA_ENTRY.size
+        key, size, storeddigest = SIDEDATA_ENTRY.unpack(text[offset:nextoffset])
+        offset = nextoffset
+        # read the data associated with that entry
+        nextdataoffset = dataoffset + size
+        entrytext = text[dataoffset:nextdataoffset]
+        readdigest = hashlib.sha1(entrytext).digest()
+        if storeddigest != readdigest:
+            raise error.SidedataHashError(key, storeddigest, readdigest)
+        sidedata[key] = entrytext
+        dataoffset = nextdataoffset
+    text = text[dataoffset:]
+    return text, True, sidedata
+
+
+def sidedatarawprocessor(rl, text):
+    # side data modifies rawtext and prevent rawtext hash validation
+    return False
+
+
+processors = (
+    sidedatareadprocessor,
+    sidedatawriteprocessor,
+    sidedatarawprocessor,
+)
--- a/mercurial/revset.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/revset.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,6 +10,7 @@
 import re
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     dagop,
     destutil,
@@ -93,20 +94,22 @@
 #
 # There are a few revsets that always redefine the order if 'define' is
 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
-anyorder = 'any'        # don't care the order, could be even random-shuffled
-defineorder = 'define'  # ALWAYS redefine, or ALWAYS follow the current order
-followorder = 'follow'  # MUST follow the current order
+anyorder = b'any'  # don't care the order, could be even random-shuffled
+defineorder = b'define'  # ALWAYS redefine, or ALWAYS follow the current order
+followorder = b'follow'  # MUST follow the current order
 
 # helpers
 
+
 def getset(repo, subset, x, order=defineorder):
     if not x:
-        raise error.ParseError(_("missing argument"))
+        raise error.ParseError(_(b"missing argument"))
     return methods[x[0]](repo, subset, *x[1:], order=order)
 
+
 def _getrevsource(repo, r):
     extra = repo[r].extra()
-    for label in ('source', 'transplant_source', 'rebase_source'):
+    for label in (b'source', b'transplant_source', b'rebase_source'):
         if label in extra:
             try:
                 return repo[extra[label]].rev()
@@ -114,19 +117,23 @@
                 pass
     return None
 
+
 def _sortedb(xs):
     return sorted(pycompat.rapply(pycompat.maybebytestr, xs))
 
+
 # operator methods
 
+
 def stringset(repo, subset, x, order):
     if not x:
-        raise error.ParseError(_("empty string is not a valid revision"))
+        raise error.ParseError(_(b"empty string is not a valid revision"))
     x = scmutil.intrev(scmutil.revsymbol(repo, x))
     if x in subset or x in _virtualrevs and isinstance(subset, fullreposet):
         return baseset([x])
     return baseset()
 
+
 def rawsmartset(repo, subset, x, order):
     """argument is already a smartset, use that directly"""
     if order == followorder:
@@ -134,6 +141,7 @@
     else:
         return x & subset
 
+
 def rangeset(repo, subset, x, y, order):
     m = getset(repo, fullreposet(repo), x)
     n = getset(repo, fullreposet(repo), y)
@@ -142,10 +150,12 @@
         return baseset()
     return _makerangeset(repo, subset, m.first(), n.last(), order)
 
+
 def rangeall(repo, subset, x, order):
     assert x is None
     return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order)
 
+
 def rangepre(repo, subset, y, order):
     # ':y' can't be rewritten to '0:y' since '0' may be hidden
     n = getset(repo, fullreposet(repo), y)
@@ -153,12 +163,15 @@
         return baseset()
     return _makerangeset(repo, subset, 0, n.last(), order)
 
+
 def rangepost(repo, subset, x, order):
     m = getset(repo, fullreposet(repo), x)
     if not m:
         return baseset()
-    return _makerangeset(repo, subset, m.first(), repo.changelog.tiprev(),
-                         order)
+    return _makerangeset(
+        repo, subset, m.first(), repo.changelog.tiprev(), order
+    )
+
 
 def _makerangeset(repo, subset, m, n, order):
     if m == n:
@@ -178,12 +191,15 @@
         # carrying the sorting over when possible would be more efficient
         return subset & r
 
+
 def dagrange(repo, subset, x, y, order):
     r = fullreposet(repo)
-    xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
-                              includepath=True)
+    xs = dagop.reachableroots(
+        repo, getset(repo, r, x), getset(repo, r, y), includepath=True
+    )
     return subset & xs
 
+
 def andset(repo, subset, x, y, order):
     if order == anyorder:
         yorder = anyorder
@@ -191,6 +207,7 @@
         yorder = followorder
     return getset(repo, getset(repo, subset, x, order), y, yorder)
 
+
 def andsmallyset(repo, subset, x, y, order):
     # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
     if order == anyorder:
@@ -199,9 +216,11 @@
         yorder = followorder
     return getset(repo, getset(repo, subset, y, yorder), x, order)
 
+
 def differenceset(repo, subset, x, y, order):
     return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
 
+
 def _orsetlist(repo, subset, xs, order):
     assert xs
     if len(xs) == 1:
@@ -211,6 +230,7 @@
     b = _orsetlist(repo, subset, xs[p:], order)
     return a + b
 
+
 def orset(repo, subset, x, order):
     xs = getlist(x)
     if not xs:
@@ -221,11 +241,14 @@
     else:
         return _orsetlist(repo, subset, xs, order)
 
+
 def notset(repo, subset, x, order):
     return subset - getset(repo, subset, x, anyorder)
 
+
 def relationset(repo, subset, x, y, order):
-    raise error.ParseError(_("can't use a relation in this context"))
+    raise error.ParseError(_(b"can't use a relation in this context"))
+
 
 def _splitrange(a, b):
     """Split range with bounds a and b into two ranges at 0 and return two
@@ -257,14 +280,17 @@
         descdepths = (max(a, 0), b + 1)
     return ancdepths, descdepths
 
+
 def generationsrel(repo, subset, x, rel, z, order):
     # TODO: rewrite tests, and drop startdepth argument from ancestors() and
     # descendants() predicates
-    a, b = getintrange(z,
-                       _('relation subscript must be an integer or a range'),
-                       _('relation subscript bounds must be integers'),
-                       deffirst=-(dagop.maxlogdepth - 1),
-                       deflast=+(dagop.maxlogdepth - 1))
+    a, b = getintrange(
+        z,
+        _(b'relation subscript must be an integer or a range'),
+        _(b'relation subscript bounds must be integers'),
+        deffirst=-(dagop.maxlogdepth - 1),
+        deflast=+(dagop.maxlogdepth - 1),
+    )
     (ancstart, ancstop), (descstart, descstop) = _splitrange(a, b)
 
     if ancstart is None and descstart is None:
@@ -284,6 +310,7 @@
 
     return subset & s
 
+
 def relsubscriptset(repo, subset, x, y, z, order):
     # this is pretty basic implementation of 'x#y[z]' operator, still
     # experimental so undocumented. see the wiki for further ideas.
@@ -295,15 +322,21 @@
     relnames = [r for r in subscriptrelations.keys() if len(r) > 1]
     raise error.UnknownIdentifier(rel, relnames)
 
+
 def subscriptset(repo, subset, x, y, order):
-    raise error.ParseError(_("can't use a subscript in this context"))
+    raise error.ParseError(_(b"can't use a subscript in this context"))
+
 
 def listset(repo, subset, *xs, **opts):
-    raise error.ParseError(_("can't use a list in this context"),
-                           hint=_('see \'hg help "revsets.x or y"\''))
+    raise error.ParseError(
+        _(b"can't use a list in this context"),
+        hint=_(b'see \'hg help "revsets.x or y"\''),
+    )
+
 
 def keyvaluepair(repo, subset, k, v, order):
-    raise error.ParseError(_("can't use a key-value pair in this context"))
+    raise error.ParseError(_(b"can't use a key-value pair in this context"))
+
 
 def func(repo, subset, a, b, order):
     f = getsymbol(a)
@@ -318,6 +351,7 @@
     syms = [s for (s, fn) in symbols.items() if keep(fn)]
     raise error.UnknownIdentifier(f, syms)
 
+
 # functions
 
 # symbols are callables like:
@@ -335,14 +369,17 @@
 
 predicate = registrar.revsetpredicate()
 
-@predicate('_destupdate')
+
+@predicate(b'_destupdate')
 def _destupdate(repo, subset, x):
     # experimental revset for update destination
-    args = getargsdict(x, 'limit', 'clean')
-    return subset & baseset([destutil.destupdate(repo,
-                            **pycompat.strkwargs(args))[0]])
-
-@predicate('_destmerge')
+    args = getargsdict(x, b'limit', b'clean')
+    return subset & baseset(
+        [destutil.destupdate(repo, **pycompat.strkwargs(args))[0]]
+    )
+
+
+@predicate(b'_destmerge')
 def _destmerge(repo, subset, x):
     # experimental revset for merge destination
     sourceset = None
@@ -350,7 +387,8 @@
         sourceset = getset(repo, fullreposet(repo), x)
     return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
 
-@predicate('adds(pattern)', safe=True, weight=30)
+
+@predicate(b'adds(pattern)', safe=True, weight=30)
 def adds(repo, subset, x):
     """Changesets that add a file matching pattern.
 
@@ -359,10 +397,11 @@
     directory.
     """
     # i18n: "adds" is a keyword
-    pat = getstring(x, _("adds requires a pattern"))
+    pat = getstring(x, _(b"adds requires a pattern"))
     return checkstatus(repo, subset, pat, 1)
 
-@predicate('ancestor(*changeset)', safe=True, weight=0.5)
+
+@predicate(b'ancestor(*changeset)', safe=True, weight=0.5)
 def ancestor(repo, subset, x):
     """A greatest common ancestor of the changesets.
 
@@ -383,15 +422,18 @@
         return baseset([r])
     return baseset()
 
-def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
-               stopdepth=None):
+
+def _ancestors(
+    repo, subset, x, followfirst=False, startdepth=None, stopdepth=None
+):
     heads = getset(repo, fullreposet(repo), x)
     if not heads:
         return baseset()
     s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
     return subset & s
 
-@predicate('ancestors(set[, depth])', safe=True)
+
+@predicate(b'ancestors(set[, depth])', safe=True)
 def ancestors(repo, subset, x):
     """Changesets that are ancestors of changesets in set, including the
     given changesets themselves.
@@ -400,32 +442,36 @@
     the specified generation.
     """
     # startdepth is for internal use only until we can decide the UI
-    args = getargsdict(x, 'ancestors', 'set depth startdepth')
-    if 'set' not in args:
+    args = getargsdict(x, b'ancestors', b'set depth startdepth')
+    if b'set' not in args:
         # i18n: "ancestors" is a keyword
-        raise error.ParseError(_('ancestors takes at least 1 argument'))
+        raise error.ParseError(_(b'ancestors takes at least 1 argument'))
     startdepth = stopdepth = None
-    if 'startdepth' in args:
-        n = getinteger(args['startdepth'],
-                       "ancestors expects an integer startdepth")
+    if b'startdepth' in args:
+        n = getinteger(
+            args[b'startdepth'], b"ancestors expects an integer startdepth"
+        )
         if n < 0:
-            raise error.ParseError("negative startdepth")
+            raise error.ParseError(b"negative startdepth")
         startdepth = n
-    if 'depth' in args:
+    if b'depth' in args:
         # i18n: "ancestors" is a keyword
-        n = getinteger(args['depth'], _("ancestors expects an integer depth"))
+        n = getinteger(args[b'depth'], _(b"ancestors expects an integer depth"))
         if n < 0:
-            raise error.ParseError(_("negative depth"))
+            raise error.ParseError(_(b"negative depth"))
         stopdepth = n + 1
-    return _ancestors(repo, subset, args['set'],
-                      startdepth=startdepth, stopdepth=stopdepth)
-
-@predicate('_firstancestors', safe=True)
+    return _ancestors(
+        repo, subset, args[b'set'], startdepth=startdepth, stopdepth=stopdepth
+    )
+
+
+@predicate(b'_firstancestors', safe=True)
 def _firstancestors(repo, subset, x):
     # ``_firstancestors(set)``
     # Like ``ancestors(set)`` but follows only the first parents.
     return _ancestors(repo, subset, x, followfirst=True)
 
+
 def _childrenspec(repo, subset, x, n, order):
     """Changesets that are the Nth child of a changeset
     in set.
@@ -438,18 +484,20 @@
                 break
             if len(c) > 1:
                 raise error.RepoLookupError(
-                    _("revision in set has more than one child"))
+                    _(b"revision in set has more than one child")
+                )
             r = c[0].rev()
         else:
             cs.add(r)
     return subset & cs
 
+
 def ancestorspec(repo, subset, x, n, order):
     """``set~n``
     Changesets that are the Nth ancestor (first parents only) of a changeset
     in set.
     """
-    n = getinteger(n, _("~ expects a number"))
+    n = getinteger(n, _(b"~ expects a number"))
     if n < 0:
         # children lookup
         return _childrenspec(repo, subset, x, -n, order)
@@ -464,17 +512,20 @@
         ps.add(r)
     return subset & ps
 
-@predicate('author(string)', safe=True, weight=10)
+
+@predicate(b'author(string)', safe=True, weight=10)
 def author(repo, subset, x):
     """Alias for ``user(string)``.
     """
     # i18n: "author" is a keyword
-    n = getstring(x, _("author requires a string"))
+    n = getstring(x, _(b"author requires a string"))
     kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
-    return subset.filter(lambda x: matcher(repo[x].user()),
-                         condrepr=('<user %r>', n))
-
-@predicate('bisect(string)', safe=True)
+    return subset.filter(
+        lambda x: matcher(repo[x].user()), condrepr=(b'<user %r>', n)
+    )
+
+
+@predicate(b'bisect(string)', safe=True)
 def bisect(repo, subset, x):
     """Changesets marked in the specified bisect status:
 
@@ -487,41 +538,46 @@
     - ``current``            : the cset currently being bisected
     """
     # i18n: "bisect" is a keyword
-    status = getstring(x, _("bisect requires a string")).lower()
+    status = getstring(x, _(b"bisect requires a string")).lower()
     state = set(hbisect.get(repo, status))
     return subset & state
 
+
 # Backward-compatibility
 # - no help entry so that we do not advertise it any more
-@predicate('bisected', safe=True)
+@predicate(b'bisected', safe=True)
 def bisected(repo, subset, x):
     return bisect(repo, subset, x)
 
-@predicate('bookmark([name])', safe=True)
+
+@predicate(b'bookmark([name])', safe=True)
 def bookmark(repo, subset, x):
     """The named bookmark or all bookmarks.
 
     Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
     """
     # i18n: "bookmark" is a keyword
-    args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
+    args = getargs(x, 0, 1, _(b'bookmark takes one or no arguments'))
     if args:
-        bm = getstring(args[0],
-                       # i18n: "bookmark" is a keyword
-                       _('the argument to bookmark must be a string'))
+        bm = getstring(
+            args[0],
+            # i18n: "bookmark" is a keyword
+            _(b'the argument to bookmark must be a string'),
+        )
         kind, pattern, matcher = stringutil.stringmatcher(bm)
         bms = set()
-        if kind == 'literal':
+        if kind == b'literal':
             if bm == pattern:
                 pattern = repo._bookmarks.expandname(pattern)
             bmrev = repo._bookmarks.get(pattern, None)
             if not bmrev:
-                raise error.RepoLookupError(_("bookmark '%s' does not exist")
-                                            % pattern)
+                raise error.RepoLookupError(
+                    _(b"bookmark '%s' does not exist") % pattern
+                )
             bms.add(repo[bmrev].rev())
         else:
             matchrevs = set()
-            for name, bmrev in repo._bookmarks.iteritems():
+            for name, bmrev in pycompat.iteritems(repo._bookmarks):
                 if matcher(name):
                     matchrevs.add(bmrev)
             for bmrev in matchrevs:
@@ -531,7 +587,8 @@
     bms -= {node.nullrev}
     return subset & bms
 
-@predicate('branch(string or set)', safe=True, weight=10)
+
+@predicate(b'branch(string or set)', safe=True, weight=10)
 def branch(repo, subset, x):
     """
     All changesets belonging to the given branch or the branches of the given
@@ -541,6 +598,7 @@
     :hg:`help revisions.patterns`.
     """
     getbi = repo.revbranchcache().branchinfo
+
     def getbranch(r):
         try:
             return getbi(r)[0]
@@ -548,34 +606,41 @@
             return repo[r].branch()
 
     try:
-        b = getstring(x, '')
+        b = getstring(x, b'')
     except error.ParseError:
         # not a string, but another revspec, e.g. tip()
         pass
     else:
         kind, pattern, matcher = stringutil.stringmatcher(b)
-        if kind == 'literal':
+        if kind == b'literal':
             # note: falls through to the revspec case if no branch with
             # this name exists and pattern kind is not specified explicitly
             if repo.branchmap().hasbranch(pattern):
-                return subset.filter(lambda r: matcher(getbranch(r)),
-                                     condrepr=('<branch %r>', b))
-            if b.startswith('literal:'):
-                raise error.RepoLookupError(_("branch '%s' does not exist")
-                                            % pattern)
+                return subset.filter(
+                    lambda r: matcher(getbranch(r)),
+                    condrepr=(b'<branch %r>', b),
+                )
+            if b.startswith(b'literal:'):
+                raise error.RepoLookupError(
+                    _(b"branch '%s' does not exist") % pattern
+                )
         else:
-            return subset.filter(lambda r: matcher(getbranch(r)),
-                                 condrepr=('<branch %r>', b))
+            return subset.filter(
+                lambda r: matcher(getbranch(r)), condrepr=(b'<branch %r>', b)
+            )
 
     s = getset(repo, fullreposet(repo), x)
     b = set()
     for r in s:
         b.add(getbranch(r))
     c = s.__contains__
-    return subset.filter(lambda r: c(r) or getbranch(r) in b,
-                         condrepr=lambda: '<branch %r>' % _sortedb(b))
-
-@predicate('phasedivergent()', safe=True)
+    return subset.filter(
+        lambda r: c(r) or getbranch(r) in b,
+        condrepr=lambda: b'<branch %r>' % _sortedb(b),
+    )
+
+
+@predicate(b'phasedivergent()', safe=True)
 def phasedivergent(repo, subset, x):
     """Mutable changesets marked as successors of public changesets.
 
@@ -583,11 +648,12 @@
     (EXPERIMENTAL)
     """
     # i18n: "phasedivergent" is a keyword
-    getargs(x, 0, 0, _("phasedivergent takes no arguments"))
-    phasedivergent = obsmod.getrevs(repo, 'phasedivergent')
+    getargs(x, 0, 0, _(b"phasedivergent takes no arguments"))
+    phasedivergent = obsmod.getrevs(repo, b'phasedivergent')
     return subset & phasedivergent
 
-@predicate('bundle()', safe=True)
+
+@predicate(b'bundle()', safe=True)
 def bundle(repo, subset, x):
     """Changesets in the bundle.
 
@@ -596,9 +662,10 @@
     try:
         bundlerevs = repo.changelog.bundlerevs
     except AttributeError:
-        raise error.Abort(_("no bundle provided - specify with -R"))
+        raise error.Abort(_(b"no bundle provided - specify with -R"))
     return subset & bundlerevs
 
+
 def checkstatus(repo, subset, pat, field):
     """Helper for status-related revsets (adds, removes, modifies).
     The field parameter says which kind is desired:
@@ -606,9 +673,10 @@
     1: added
     2: removed
     """
-    hasset = matchmod.patkind(pat) == 'set'
+    hasset = matchmod.patkind(pat) == b'set'
 
     mcache = [None]
+
     def matches(x):
         c = repo[x]
         if not mcache[0] or hasset:
@@ -635,7 +703,8 @@
                 if m(f):
                     return True
 
-    return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
+    return subset.filter(matches, condrepr=(b'<status[%r] %r>', field, pat))
+
 
 def _children(repo, subset, parentset):
     if not parentset:
@@ -654,7 +723,8 @@
             cs.add(r)
     return baseset(cs)
 
-@predicate('children(set)', safe=True)
+
+@predicate(b'children(set)', safe=True)
 def children(repo, subset, x):
     """Child changesets of changesets in set.
     """
@@ -662,17 +732,20 @@
     cs = _children(repo, subset, s)
     return subset & cs
 
-@predicate('closed()', safe=True, weight=10)
+
+@predicate(b'closed()', safe=True, weight=10)
 def closed(repo, subset, x):
     """Changeset is closed.
     """
     # i18n: "closed" is a keyword
-    getargs(x, 0, 0, _("closed takes no arguments"))
-    return subset.filter(lambda r: repo[r].closesbranch(),
-                         condrepr='<branch closed>')
+    getargs(x, 0, 0, _(b"closed takes no arguments"))
+    return subset.filter(
+        lambda r: repo[r].closesbranch(), condrepr=b'<branch closed>'
+    )
+
 
 # for internal use
-@predicate('_commonancestorheads(set)', safe=True)
+@predicate(b'_commonancestorheads(set)', safe=True)
 def _commonancestorheads(repo, subset, x):
     # This is an internal method is for quickly calculating "heads(::x and
     # ::y)"
@@ -684,7 +757,8 @@
     ancs = repo.changelog._commonancestorsheads(*list(startrevs))
     return subset & baseset(ancs)
 
-@predicate('commonancestors(set)', safe=True)
+
+@predicate(b'commonancestors(set)', safe=True)
 def commonancestors(repo, subset, x):
     """Changesets that are ancestors of every changeset in set.
     """
@@ -695,7 +769,8 @@
         subset &= dagop.revancestors(repo, baseset([r]))
     return subset
 
-@predicate('contains(pattern)', weight=100)
+
+@predicate(b'contains(pattern)', weight=100)
 def contains(repo, subset, x):
     """The revision's manifest contains a file matching pattern (but might not
     modify it). See :hg:`help patterns` for information about file patterns.
@@ -705,7 +780,7 @@
     for efficiency.
     """
     # i18n: "contains" is a keyword
-    pat = getstring(x, _("contains requires a pattern"))
+    pat = getstring(x, _(b"contains requires a pattern"))
 
     def matches(x):
         if not matchmod.patkind(pat):
@@ -720,9 +795,10 @@
                     return True
         return False
 
-    return subset.filter(matches, condrepr=('<contains %r>', pat))
-
-@predicate('converted([id])', safe=True)
+    return subset.filter(matches, condrepr=(b'<contains %r>', pat))
+
+
+@predicate(b'converted([id])', safe=True)
 def converted(repo, subset, x):
     """Changesets converted from the given identifier in the old repository if
     present, or all converted changesets if no identifier is specified.
@@ -733,29 +809,33 @@
 
     rev = None
     # i18n: "converted" is a keyword
-    l = getargs(x, 0, 1, _('converted takes one or no arguments'))
+    l = getargs(x, 0, 1, _(b'converted takes one or no arguments'))
     if l:
         # i18n: "converted" is a keyword
-        rev = getstring(l[0], _('converted requires a revision'))
+        rev = getstring(l[0], _(b'converted requires a revision'))
 
     def _matchvalue(r):
-        source = repo[r].extra().get('convert_revision', None)
+        source = repo[r].extra().get(b'convert_revision', None)
         return source is not None and (rev is None or source.startswith(rev))
 
-    return subset.filter(lambda r: _matchvalue(r),
-                         condrepr=('<converted %r>', rev))
-
-@predicate('date(interval)', safe=True, weight=10)
+    return subset.filter(
+        lambda r: _matchvalue(r), condrepr=(b'<converted %r>', rev)
+    )
+
+
+@predicate(b'date(interval)', safe=True, weight=10)
 def date(repo, subset, x):
     """Changesets within the interval, see :hg:`help dates`.
     """
     # i18n: "date" is a keyword
-    ds = getstring(x, _("date requires a string"))
+    ds = getstring(x, _(b"date requires a string"))
     dm = dateutil.matchdate(ds)
-    return subset.filter(lambda x: dm(repo[x].date()[0]),
-                         condrepr=('<date %r>', ds))
-
-@predicate('desc(string)', safe=True, weight=10)
+    return subset.filter(
+        lambda x: dm(repo[x].date()[0]), condrepr=(b'<date %r>', ds)
+    )
+
+
+@predicate(b'desc(string)', safe=True, weight=10)
 def desc(repo, subset, x):
     """Search commit message for string. The match is case-insensitive.
 
@@ -763,22 +843,26 @@
     :hg:`help revisions.patterns`.
     """
     # i18n: "desc" is a keyword
-    ds = getstring(x, _("desc requires a string"))
+    ds = getstring(x, _(b"desc requires a string"))
 
     kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
 
-    return subset.filter(lambda r: matcher(repo[r].description()),
-                         condrepr=('<desc %r>', ds))
-
-def _descendants(repo, subset, x, followfirst=False, startdepth=None,
-                 stopdepth=None):
+    return subset.filter(
+        lambda r: matcher(repo[r].description()), condrepr=(b'<desc %r>', ds)
+    )
+
+
+def _descendants(
+    repo, subset, x, followfirst=False, startdepth=None, stopdepth=None
+):
     roots = getset(repo, fullreposet(repo), x)
     if not roots:
         return baseset()
     s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
     return subset & s
 
-@predicate('descendants(set[, depth])', safe=True)
+
+@predicate(b'descendants(set[, depth])', safe=True)
 def descendants(repo, subset, x):
     """Changesets which are descendants of changesets in set, including the
     given changesets themselves.
@@ -787,33 +871,39 @@
     the specified generation.
     """
     # startdepth is for internal use only until we can decide the UI
-    args = getargsdict(x, 'descendants', 'set depth startdepth')
-    if 'set' not in args:
+    args = getargsdict(x, b'descendants', b'set depth startdepth')
+    if b'set' not in args:
         # i18n: "descendants" is a keyword
-        raise error.ParseError(_('descendants takes at least 1 argument'))
+        raise error.ParseError(_(b'descendants takes at least 1 argument'))
     startdepth = stopdepth = None
-    if 'startdepth' in args:
-        n = getinteger(args['startdepth'],
-                       "descendants expects an integer startdepth")
+    if b'startdepth' in args:
+        n = getinteger(
+            args[b'startdepth'], b"descendants expects an integer startdepth"
+        )
         if n < 0:
-            raise error.ParseError("negative startdepth")
+            raise error.ParseError(b"negative startdepth")
         startdepth = n
-    if 'depth' in args:
+    if b'depth' in args:
         # i18n: "descendants" is a keyword
-        n = getinteger(args['depth'], _("descendants expects an integer depth"))
+        n = getinteger(
+            args[b'depth'], _(b"descendants expects an integer depth")
+        )
         if n < 0:
-            raise error.ParseError(_("negative depth"))
+            raise error.ParseError(_(b"negative depth"))
         stopdepth = n + 1
-    return _descendants(repo, subset, args['set'],
-                        startdepth=startdepth, stopdepth=stopdepth)
-
-@predicate('_firstdescendants', safe=True)
+    return _descendants(
+        repo, subset, args[b'set'], startdepth=startdepth, stopdepth=stopdepth
+    )
+
+
+@predicate(b'_firstdescendants', safe=True)
 def _firstdescendants(repo, subset, x):
     # ``_firstdescendants(set)``
     # Like ``descendants(set)`` but follows only the first parents.
     return _descendants(repo, subset, x, followfirst=True)
 
-@predicate('destination([set])', safe=True, weight=10)
+
+@predicate(b'destination([set])', safe=True, weight=10)
 def destination(repo, subset, x):
     """Changesets that were created by a graft, transplant or rebase operation,
     with the given revisions specified as the source.  Omitting the optional set
@@ -855,21 +945,25 @@
             r = src
             src = _getrevsource(repo, r)
 
-    return subset.filter(dests.__contains__,
-                         condrepr=lambda: '<destination %r>' % _sortedb(dests))
-
-@predicate('contentdivergent()', safe=True)
+    return subset.filter(
+        dests.__contains__,
+        condrepr=lambda: b'<destination %r>' % _sortedb(dests),
+    )
+
+
+@predicate(b'contentdivergent()', safe=True)
 def contentdivergent(repo, subset, x):
     """
     Final successors of changesets with an alternative set of final
     successors. (EXPERIMENTAL)
     """
     # i18n: "contentdivergent" is a keyword
-    getargs(x, 0, 0, _("contentdivergent takes no arguments"))
-    contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
+    getargs(x, 0, 0, _(b"contentdivergent takes no arguments"))
+    contentdivergent = obsmod.getrevs(repo, b'contentdivergent')
     return subset & contentdivergent
 
-@predicate('expectsize(set[, size])', safe=True, takeorder=True)
+
+@predicate(b'expectsize(set[, size])', safe=True, takeorder=True)
 def expectsize(repo, subset, x, order):
     """Return the given revset if size matches the revset size.
     Abort if the revset doesn't expect given size.
@@ -878,27 +972,33 @@
     For example, ``expectsize(0:1, 3:5)`` will abort as revset size is 2 and
     2 is not between 3 and 5 inclusive."""
 
-    args = getargsdict(x, 'expectsize', 'set size')
+    args = getargsdict(x, b'expectsize', b'set size')
     minsize = 0
     maxsize = len(repo) + 1
-    err = ''
-    if 'size' not in args or 'set' not in args:
-        raise error.ParseError(_('invalid set of arguments'))
-    minsize, maxsize = getintrange(args['size'],
-                                   _('expectsize requires a size range'
-                                     ' or a positive integer'),
-                                   _('size range bounds must be integers'),
-                                   minsize, maxsize)
+    err = b''
+    if b'size' not in args or b'set' not in args:
+        raise error.ParseError(_(b'invalid set of arguments'))
+    minsize, maxsize = getintrange(
+        args[b'size'],
+        _(b'expectsize requires a size range or a positive integer'),
+        _(b'size range bounds must be integers'),
+        minsize,
+        maxsize,
+    )
     if minsize < 0 or maxsize < 0:
-        raise error.ParseError(_('negative size'))
-    rev = getset(repo, fullreposet(repo), args['set'], order=order)
+        raise error.ParseError(_(b'negative size'))
+    rev = getset(repo, fullreposet(repo), args[b'set'], order=order)
     if minsize != maxsize and (len(rev) < minsize or len(rev) > maxsize):
-        err = _('revset size mismatch.'
-                ' expected between %d and %d, got %d') % (minsize, maxsize,
-                                                          len(rev))
+        err = _(b'revset size mismatch. expected between %d and %d, got %d') % (
+            minsize,
+            maxsize,
+            len(rev),
+        )
     elif minsize == maxsize and len(rev) != minsize:
-        err = _('revset size mismatch.'
-                ' expected %d, got %d') % (minsize, len(rev))
+        err = _(b'revset size mismatch. expected %d, got %d') % (
+            minsize,
+            len(rev),
+        )
     if err:
         raise error.RepoLookupError(err)
     if order == followorder:
@@ -906,27 +1006,32 @@
     else:
         return rev & subset
 
-@predicate('extdata(source)', safe=False, weight=100)
+
+@predicate(b'extdata(source)', safe=False, weight=100)
 def extdata(repo, subset, x):
     """Changesets in the specified extdata source. (EXPERIMENTAL)"""
     # i18n: "extdata" is a keyword
-    args = getargsdict(x, 'extdata', 'source')
-    source = getstring(args.get('source'),
-                       # i18n: "extdata" is a keyword
-                       _('extdata takes at least 1 string argument'))
+    args = getargsdict(x, b'extdata', b'source')
+    source = getstring(
+        args.get(b'source'),
+        # i18n: "extdata" is a keyword
+        _(b'extdata takes at least 1 string argument'),
+    )
     data = scmutil.extdatasource(repo, source)
     return subset & baseset(data)
 
-@predicate('extinct()', safe=True)
+
+@predicate(b'extinct()', safe=True)
 def extinct(repo, subset, x):
     """Obsolete changesets with obsolete descendants only.
     """
     # i18n: "extinct" is a keyword
-    getargs(x, 0, 0, _("extinct takes no arguments"))
-    extincts = obsmod.getrevs(repo, 'extinct')
+    getargs(x, 0, 0, _(b"extinct takes no arguments"))
+    extincts = obsmod.getrevs(repo, b'extinct')
     return subset & extincts
 
-@predicate('extra(label, [value])', safe=True)
+
+@predicate(b'extra(label, [value])', safe=True)
 def extra(repo, subset, x):
     """Changesets with the given label in the extra metadata, with the given
     optional value.
@@ -934,29 +1039,33 @@
     Pattern matching is supported for `value`. See
     :hg:`help revisions.patterns`.
     """
-    args = getargsdict(x, 'extra', 'label value')
-    if 'label' not in args:
+    args = getargsdict(x, b'extra', b'label value')
+    if b'label' not in args:
         # i18n: "extra" is a keyword
-        raise error.ParseError(_('extra takes at least 1 argument'))
+        raise error.ParseError(_(b'extra takes at least 1 argument'))
     # i18n: "extra" is a keyword
-    label = getstring(args['label'], _('first argument to extra must be '
-                                       'a string'))
+    label = getstring(
+        args[b'label'], _(b'first argument to extra must be a string')
+    )
     value = None
 
-    if 'value' in args:
+    if b'value' in args:
         # i18n: "extra" is a keyword
-        value = getstring(args['value'], _('second argument to extra must be '
-                                           'a string'))
+        value = getstring(
+            args[b'value'], _(b'second argument to extra must be a string')
+        )
         kind, value, matcher = stringutil.stringmatcher(value)
 
     def _matchvalue(r):
         extra = repo[r].extra()
         return label in extra and (value is None or matcher(extra[label]))
 
-    return subset.filter(lambda r: _matchvalue(r),
-                         condrepr=('<extra[%r] %r>', label, value))
-
-@predicate('filelog(pattern)', safe=True)
+    return subset.filter(
+        lambda r: _matchvalue(r), condrepr=(b'<extra[%r] %r>', label, value)
+    )
+
+
+@predicate(b'filelog(pattern)', safe=True)
 def filelog(repo, subset, x):
     """Changesets connected to the specified filelog.
 
@@ -971,7 +1080,7 @@
     """
 
     # i18n: "filelog" is a keyword
-    pat = getstring(x, _("filelog requires a pattern"))
+    pat = getstring(x, _(b"filelog requires a pattern"))
     s = set()
     cl = repo.changelog
 
@@ -1019,55 +1128,63 @@
 
     return subset & s
 
-@predicate('first(set, [n])', safe=True, takeorder=True, weight=0)
+
+@predicate(b'first(set, [n])', safe=True, takeorder=True, weight=0)
 def first(repo, subset, x, order):
     """An alias for limit().
     """
     return limit(repo, subset, x, order)
 
+
 def _follow(repo, subset, x, name, followfirst=False):
-    args = getargsdict(x, name, 'file startrev')
+    args = getargsdict(x, name, b'file startrev')
     revs = None
-    if 'startrev' in args:
-        revs = getset(repo, fullreposet(repo), args['startrev'])
-    if 'file' in args:
-        x = getstring(args['file'], _("%s expected a pattern") % name)
+    if b'startrev' in args:
+        revs = getset(repo, fullreposet(repo), args[b'startrev'])
+    if b'file' in args:
+        x = getstring(args[b'file'], _(b"%s expected a pattern") % name)
         if revs is None:
             revs = [None]
         fctxs = []
         for r in revs:
             ctx = mctx = repo[r]
             if r is None:
-                ctx = repo['.']
-            m = matchmod.match(repo.root, repo.getcwd(), [x],
-                               ctx=mctx, default='path')
+                ctx = repo[b'.']
+            m = matchmod.match(
+                repo.root, repo.getcwd(), [x], ctx=mctx, default=b'path'
+            )
             fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
         s = dagop.filerevancestors(fctxs, followfirst)
     else:
         if revs is None:
-            revs = baseset([repo['.'].rev()])
+            revs = baseset([repo[b'.'].rev()])
         s = dagop.revancestors(repo, revs, followfirst)
 
     return subset & s
 
-@predicate('follow([file[, startrev]])', safe=True)
+
+@predicate(b'follow([file[, startrev]])', safe=True)
 def follow(repo, subset, x):
     """
     An alias for ``::.`` (ancestors of the working directory's first parent).
     If file pattern is specified, the histories of files matching given
     pattern in the revision given by startrev are followed, including copies.
     """
-    return _follow(repo, subset, x, 'follow')
-
-@predicate('_followfirst', safe=True)
+    return _follow(repo, subset, x, b'follow')
+
+
+@predicate(b'_followfirst', safe=True)
 def _followfirst(repo, subset, x):
     # ``followfirst([file[, startrev]])``
     # Like ``follow([file[, startrev]])`` but follows only the first parent
     # of every revisions or files revisions.
-    return _follow(repo, subset, x, '_followfirst', followfirst=True)
-
-@predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
-           safe=True)
+    return _follow(repo, subset, x, b'_followfirst', followfirst=True)
+
+
+@predicate(
+    b'followlines(file, fromline:toline[, startrev=., descend=False])',
+    safe=True,
+)
 def followlines(repo, subset, x):
     """Changesets modifying `file` in line range ('fromline', 'toline').
 
@@ -1079,56 +1196,74 @@
     descendants of 'startrev' are returned though renames are (currently) not
     followed in this direction.
     """
-    args = getargsdict(x, 'followlines', 'file *lines startrev descend')
-    if len(args['lines']) != 1:
-        raise error.ParseError(_("followlines requires a line range"))
-
-    rev = '.'
-    if 'startrev' in args:
-        revs = getset(repo, fullreposet(repo), args['startrev'])
+    args = getargsdict(x, b'followlines', b'file *lines startrev descend')
+    if len(args[b'lines']) != 1:
+        raise error.ParseError(_(b"followlines requires a line range"))
+
+    rev = b'.'
+    if b'startrev' in args:
+        revs = getset(repo, fullreposet(repo), args[b'startrev'])
         if len(revs) != 1:
             raise error.ParseError(
                 # i18n: "followlines" is a keyword
-                _("followlines expects exactly one revision"))
+                _(b"followlines expects exactly one revision")
+            )
         rev = revs.last()
 
-    pat = getstring(args['file'], _("followlines requires a pattern"))
+    pat = getstring(args[b'file'], _(b"followlines requires a pattern"))
     # i18n: "followlines" is a keyword
-    msg = _("followlines expects exactly one file")
+    msg = _(b"followlines expects exactly one file")
     fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
     fromline, toline = util.processlinerange(
-        *getintrange(args['lines'][0],
-                     # i18n: "followlines" is a keyword
-                     _("followlines expects a line number or a range"),
-                     _("line range bounds must be integers")))
+        *getintrange(
+            args[b'lines'][0],
+            # i18n: "followlines" is a keyword
+            _(b"followlines expects a line number or a range"),
+            _(b"line range bounds must be integers"),
+        )
+    )
 
     fctx = repo[rev].filectx(fname)
     descend = False
-    if 'descend' in args:
-        descend = getboolean(args['descend'],
-                             # i18n: "descend" is a keyword
-                             _("descend argument must be a boolean"))
+    if b'descend' in args:
+        descend = getboolean(
+            args[b'descend'],
+            # i18n: "descend" is a keyword
+            _(b"descend argument must be a boolean"),
+        )
     if descend:
         rs = generatorset(
-            (c.rev() for c, _linerange
-             in dagop.blockdescendants(fctx, fromline, toline)),
-            iterasc=True)
+            (
+                c.rev()
+                for c, _linerange in dagop.blockdescendants(
+                    fctx, fromline, toline
+                )
+            ),
+            iterasc=True,
+        )
     else:
         rs = generatorset(
-            (c.rev() for c, _linerange
-             in dagop.blockancestors(fctx, fromline, toline)),
-            iterasc=False)
+            (
+                c.rev()
+                for c, _linerange in dagop.blockancestors(
+                    fctx, fromline, toline
+                )
+            ),
+            iterasc=False,
+        )
     return subset & rs
 
-@predicate('all()', safe=True)
+
+@predicate(b'all()', safe=True)
 def getall(repo, subset, x):
     """All changesets, the same as ``0:tip``.
     """
     # i18n: "all" is a keyword
-    getargs(x, 0, 0, _("all takes no arguments"))
+    getargs(x, 0, 0, _(b"all takes no arguments"))
     return subset & spanset(repo)  # drop "null" if any
 
-@predicate('grep(regex)', weight=10)
+
+@predicate(b'grep(regex)', weight=10)
 def grep(repo, subset, x):
     """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
     to ensure special escape characters are handled correctly. Unlike
@@ -1136,10 +1271,11 @@
     """
     try:
         # i18n: "grep" is a keyword
-        gr = re.compile(getstring(x, _("grep requires a string")))
+        gr = re.compile(getstring(x, _(b"grep requires a string")))
     except re.error as e:
         raise error.ParseError(
-            _('invalid match pattern: %s') % stringutil.forcebytestr(e))
+            _(b'invalid match pattern: %s') % stringutil.forcebytestr(e)
+        )
 
     def matches(x):
         c = repo[x]
@@ -1148,9 +1284,10 @@
                 return True
         return False
 
-    return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
-
-@predicate('_matchfiles', safe=True)
+    return subset.filter(matches, condrepr=(b'<grep %r>', gr.pattern))
+
+
+@predicate(b'_matchfiles', safe=True)
 def _matchfiles(repo, subset, x):
     # _matchfiles takes a revset list of prefixed arguments:
     #
@@ -1164,36 +1301,38 @@
     # initialized. Use 'd:' to set the default matching mode, default
     # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
 
-    l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
+    l = getargs(x, 1, -1, b"_matchfiles requires at least one argument")
     pats, inc, exc = [], [], []
     rev, default = None, None
     for arg in l:
-        s = getstring(arg, "_matchfiles requires string arguments")
+        s = getstring(arg, b"_matchfiles requires string arguments")
         prefix, value = s[:2], s[2:]
-        if prefix == 'p:':
+        if prefix == b'p:':
             pats.append(value)
-        elif prefix == 'i:':
+        elif prefix == b'i:':
             inc.append(value)
-        elif prefix == 'x:':
+        elif prefix == b'x:':
             exc.append(value)
-        elif prefix == 'r:':
+        elif prefix == b'r:':
             if rev is not None:
-                raise error.ParseError('_matchfiles expected at most one '
-                                       'revision')
-            if value == '': # empty means working directory
+                raise error.ParseError(
+                    b'_matchfiles expected at most one revision'
+                )
+            if value == b'':  # empty means working directory
                 rev = node.wdirrev
             else:
                 rev = value
-        elif prefix == 'd:':
+        elif prefix == b'd:':
             if default is not None:
-                raise error.ParseError('_matchfiles expected at most one '
-                                       'default mode')
+                raise error.ParseError(
+                    b'_matchfiles expected at most one default mode'
+                )
             default = value
         else:
-            raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
+            raise error.ParseError(b'invalid _matchfiles prefix: %s' % prefix)
     if not default:
-        default = 'glob'
-    hasset = any(matchmod.patkind(p) == 'set' for p in pats + inc + exc)
+        default = b'glob'
+    hasset = any(matchmod.patkind(p) == b'set' for p in pats + inc + exc)
 
     mcache = [None]
 
@@ -1201,6 +1340,7 @@
     # revisions is quite expensive.
     getfiles = repo.changelog.readfiles
     wdirrev = node.wdirrev
+
     def matches(x):
         if x == wdirrev:
             files = repo[x].files()
@@ -1209,9 +1349,15 @@
 
         if not mcache[0] or (hasset and rev is None):
             r = x if rev is None else rev
-            mcache[0] = matchmod.match(repo.root, repo.getcwd(), pats,
-                                       include=inc, exclude=exc, ctx=repo[r],
-                                       default=default)
+            mcache[0] = matchmod.match(
+                repo.root,
+                repo.getcwd(),
+                pats,
+                include=inc,
+                exclude=exc,
+                ctx=repo[r],
+                default=default,
+            )
         m = mcache[0]
 
         for f in files:
@@ -1219,12 +1365,21 @@
                 return True
         return False
 
-    return subset.filter(matches,
-                         condrepr=('<matchfiles patterns=%r, include=%r '
-                                   'exclude=%r, default=%r, rev=%r>',
-                                   pats, inc, exc, default, rev))
-
-@predicate('file(pattern)', safe=True, weight=10)
+    return subset.filter(
+        matches,
+        condrepr=(
+            b'<matchfiles patterns=%r, include=%r '
+            b'exclude=%r, default=%r, rev=%r>',
+            pats,
+            inc,
+            exc,
+            default,
+            rev,
+        ),
+    )
+
+
+@predicate(b'file(pattern)', safe=True, weight=10)
 def hasfile(repo, subset, x):
     """Changesets affecting files matched by pattern.
 
@@ -1234,22 +1389,24 @@
     This predicate uses ``glob:`` as the default kind of pattern.
     """
     # i18n: "file" is a keyword
-    pat = getstring(x, _("file requires a pattern"))
-    return _matchfiles(repo, subset, ('string', 'p:' + pat))
-
-@predicate('head()', safe=True)
+    pat = getstring(x, _(b"file requires a pattern"))
+    return _matchfiles(repo, subset, (b'string', b'p:' + pat))
+
+
+@predicate(b'head()', safe=True)
 def head(repo, subset, x):
     """Changeset is a named branch head.
     """
     # i18n: "head" is a keyword
-    getargs(x, 0, 0, _("head takes no arguments"))
+    getargs(x, 0, 0, _(b"head takes no arguments"))
     hs = set()
     cl = repo.changelog
     for ls in repo.branchmap().iterheads():
         hs.update(cl.rev(h) for h in ls)
     return subset & baseset(hs)
 
-@predicate('heads(set)', safe=True, takeorder=True)
+
+@predicate(b'heads(set)', safe=True, takeorder=True)
 def heads(repo, subset, x, order):
     """Members of set with no children in set.
     """
@@ -1270,16 +1427,18 @@
     heads = baseset(heads)
     return subset & heads
 
-@predicate('hidden()', safe=True)
+
+@predicate(b'hidden()', safe=True)
 def hidden(repo, subset, x):
     """Hidden changesets.
     """
     # i18n: "hidden" is a keyword
-    getargs(x, 0, 0, _("hidden takes no arguments"))
-    hiddenrevs = repoview.filterrevs(repo, 'visible')
+    getargs(x, 0, 0, _(b"hidden takes no arguments"))
+    hiddenrevs = repoview.filterrevs(repo, b'visible')
     return subset & hiddenrevs
 
-@predicate('keyword(string)', safe=True, weight=10)
+
+@predicate(b'keyword(string)', safe=True, weight=10)
 def keyword(repo, subset, x):
     """Search commit message, user name, and names of changed files for
     string. The match is case-insensitive.
@@ -1288,49 +1447,55 @@
     ``grep(regex)``.
     """
     # i18n: "keyword" is a keyword
-    kw = encoding.lower(getstring(x, _("keyword requires a string")))
+    kw = encoding.lower(getstring(x, _(b"keyword requires a string")))
 
     def matches(r):
         c = repo[r]
-        return any(kw in encoding.lower(t)
-                   for t in c.files() + [c.user(), c.description()])
-
-    return subset.filter(matches, condrepr=('<keyword %r>', kw))
-
-@predicate('limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
+        return any(
+            kw in encoding.lower(t)
+            for t in c.files() + [c.user(), c.description()]
+        )
+
+    return subset.filter(matches, condrepr=(b'<keyword %r>', kw))
+
+
+@predicate(b'limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
 def limit(repo, subset, x, order):
     """First n members of set, defaulting to 1, starting from offset.
     """
-    args = getargsdict(x, 'limit', 'set n offset')
-    if 'set' not in args:
+    args = getargsdict(x, b'limit', b'set n offset')
+    if b'set' not in args:
         # i18n: "limit" is a keyword
-        raise error.ParseError(_("limit requires one to three arguments"))
+        raise error.ParseError(_(b"limit requires one to three arguments"))
     # i18n: "limit" is a keyword
-    lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
+    lim = getinteger(args.get(b'n'), _(b"limit expects a number"), default=1)
     if lim < 0:
-        raise error.ParseError(_("negative number to select"))
+        raise error.ParseError(_(b"negative number to select"))
     # i18n: "limit" is a keyword
-    ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
+    ofs = getinteger(
+        args.get(b'offset'), _(b"limit expects a number"), default=0
+    )
     if ofs < 0:
-        raise error.ParseError(_("negative offset"))
-    os = getset(repo, fullreposet(repo), args['set'])
+        raise error.ParseError(_(b"negative offset"))
+    os = getset(repo, fullreposet(repo), args[b'set'])
     ls = os.slice(ofs, ofs + lim)
     if order == followorder and lim > 1:
         return subset & ls
     return ls & subset
 
-@predicate('last(set, [n])', safe=True, takeorder=True)
+
+@predicate(b'last(set, [n])', safe=True, takeorder=True)
 def last(repo, subset, x, order):
     """Last n members of set, defaulting to 1.
     """
     # i18n: "last" is a keyword
-    l = getargs(x, 1, 2, _("last requires one or two arguments"))
+    l = getargs(x, 1, 2, _(b"last requires one or two arguments"))
     lim = 1
     if len(l) == 2:
         # i18n: "last" is a keyword
-        lim = getinteger(l[1], _("last expects a number"))
+        lim = getinteger(l[1], _(b"last expects a number"))
     if lim < 0:
-        raise error.ParseError(_("negative number to select"))
+        raise error.ParseError(_(b"negative number to select"))
     os = getset(repo, fullreposet(repo), l[0])
     os.reverse()
     ls = os.slice(0, lim)
@@ -1339,7 +1504,8 @@
     ls.reverse()
     return ls & subset
 
-@predicate('max(set)', safe=True)
+
+@predicate(b'max(set)', safe=True)
 def maxrev(repo, subset, x):
     """Changeset with highest revision number in set.
     """
@@ -1347,49 +1513,55 @@
     try:
         m = os.max()
         if m in subset:
-            return baseset([m], datarepr=('<max %r, %r>', subset, os))
+            return baseset([m], datarepr=(b'<max %r, %r>', subset, os))
     except ValueError:
         # os.max() throws a ValueError when the collection is empty.
         # Same as python's max().
         pass
-    return baseset(datarepr=('<max %r, %r>', subset, os))
-
-@predicate('merge()', safe=True)
+    return baseset(datarepr=(b'<max %r, %r>', subset, os))
+
+
+@predicate(b'merge()', safe=True)
 def merge(repo, subset, x):
     """Changeset is a merge changeset.
     """
     # i18n: "merge" is a keyword
-    getargs(x, 0, 0, _("merge takes no arguments"))
+    getargs(x, 0, 0, _(b"merge takes no arguments"))
     cl = repo.changelog
     nullrev = node.nullrev
+
     def ismerge(r):
         try:
             return cl.parentrevs(r)[1] != nullrev
         except error.WdirUnsupported:
             return bool(repo[r].p2())
-    return subset.filter(ismerge, condrepr='<merge>')
-
-@predicate('branchpoint()', safe=True)
+
+    return subset.filter(ismerge, condrepr=b'<merge>')
+
+
+@predicate(b'branchpoint()', safe=True)
 def branchpoint(repo, subset, x):
     """Changesets with more than one child.
     """
     # i18n: "branchpoint" is a keyword
-    getargs(x, 0, 0, _("branchpoint takes no arguments"))
+    getargs(x, 0, 0, _(b"branchpoint takes no arguments"))
     cl = repo.changelog
     if not subset:
         return baseset()
     # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
     # (and if it is not, it should.)
     baserev = min(subset)
-    parentscount = [0]*(len(repo) - baserev)
+    parentscount = [0] * (len(repo) - baserev)
     for r in cl.revs(start=baserev + 1):
         for p in cl.parentrevs(r):
             if p >= baserev:
                 parentscount[p - baserev] += 1
-    return subset.filter(lambda r: parentscount[r - baserev] > 1,
-                         condrepr='<branchpoint>')
-
-@predicate('min(set)', safe=True)
+    return subset.filter(
+        lambda r: parentscount[r - baserev] > 1, condrepr=b'<branchpoint>'
+    )
+
+
+@predicate(b'min(set)', safe=True)
 def minrev(repo, subset, x):
     """Changeset with lowest revision number in set.
     """
@@ -1397,14 +1569,15 @@
     try:
         m = os.min()
         if m in subset:
-            return baseset([m], datarepr=('<min %r, %r>', subset, os))
+            return baseset([m], datarepr=(b'<min %r, %r>', subset, os))
     except ValueError:
         # os.min() throws a ValueError when the collection is empty.
         # Same as python's min().
         pass
-    return baseset(datarepr=('<min %r, %r>', subset, os))
-
-@predicate('modifies(pattern)', safe=True, weight=30)
+    return baseset(datarepr=(b'<min %r, %r>', subset, os))
+
+
+@predicate(b'modifies(pattern)', safe=True, weight=30)
 def modifies(repo, subset, x):
     """Changesets modifying files matched by pattern.
 
@@ -1413,10 +1586,11 @@
     directory.
     """
     # i18n: "modifies" is a keyword
-    pat = getstring(x, _("modifies requires a pattern"))
+    pat = getstring(x, _(b"modifies requires a pattern"))
     return checkstatus(repo, subset, pat, 0)
 
-@predicate('named(namespace)')
+
+@predicate(b'named(namespace)')
 def named(repo, subset, x):
     """The changesets in a given namespace.
 
@@ -1424,20 +1598,23 @@
     :hg:`help revisions.patterns`.
     """
     # i18n: "named" is a keyword
-    args = getargs(x, 1, 1, _('named requires a namespace argument'))
-
-    ns = getstring(args[0],
-                   # i18n: "named" is a keyword
-                   _('the argument to named must be a string'))
+    args = getargs(x, 1, 1, _(b'named requires a namespace argument'))
+
+    ns = getstring(
+        args[0],
+        # i18n: "named" is a keyword
+        _(b'the argument to named must be a string'),
+    )
     kind, pattern, matcher = stringutil.stringmatcher(ns)
     namespaces = set()
-    if kind == 'literal':
+    if kind == b'literal':
         if pattern not in repo.names:
-            raise error.RepoLookupError(_("namespace '%s' does not exist")
-                                        % ns)
+            raise error.RepoLookupError(
+                _(b"namespace '%s' does not exist") % ns
+            )
         namespaces.add(repo.names[pattern])
     else:
-        for name, ns in repo.names.iteritems():
+        for name, ns in pycompat.iteritems(repo.names):
             if matcher(name):
                 namespaces.add(ns)
 
@@ -1450,14 +1627,15 @@
     names -= {node.nullrev}
     return subset & names
 
-@predicate('id(string)', safe=True)
+
+@predicate(b'id(string)', safe=True)
 def node_(repo, subset, x):
     """Revision non-ambiguously specified by the given hex string prefix.
     """
     # i18n: "id" is a keyword
-    l = getargs(x, 1, 1, _("id requires one argument"))
+    l = getargs(x, 1, 1, _(b"id requires one argument"))
     # i18n: "id" is a keyword
-    n = getstring(l[0], _("id requires a string"))
+    n = getstring(l[0], _(b"id requires a string"))
     if len(n) == 40:
         try:
             rn = repo.changelog.rev(node.bin(n))
@@ -1481,23 +1659,26 @@
     result = baseset([rn])
     return result & subset
 
-@predicate('none()', safe=True)
+
+@predicate(b'none()', safe=True)
 def none(repo, subset, x):
     """No changesets.
     """
     # i18n: "none" is a keyword
-    getargs(x, 0, 0, _("none takes no arguments"))
+    getargs(x, 0, 0, _(b"none takes no arguments"))
     return baseset()
 
-@predicate('obsolete()', safe=True)
+
+@predicate(b'obsolete()', safe=True)
 def obsolete(repo, subset, x):
     """Mutable changeset with a newer version."""
     # i18n: "obsolete" is a keyword
-    getargs(x, 0, 0, _("obsolete takes no arguments"))
-    obsoletes = obsmod.getrevs(repo, 'obsolete')
+    getargs(x, 0, 0, _(b"obsolete takes no arguments"))
+    obsoletes = obsmod.getrevs(repo, b'obsolete')
     return subset & obsoletes
 
-@predicate('only(set, [set])', safe=True)
+
+@predicate(b'only(set, [set])', safe=True)
 def only(repo, subset, x):
     """Changesets that are ancestors of the first set that are not ancestors
     of any other head in the repo. If a second set is specified, the result
@@ -1506,15 +1687,18 @@
     """
     cl = repo.changelog
     # i18n: "only" is a keyword
-    args = getargs(x, 1, 2, _('only takes one or two arguments'))
+    args = getargs(x, 1, 2, _(b'only takes one or two arguments'))
     include = getset(repo, fullreposet(repo), args[0])
     if len(args) == 1:
         if not include:
             return baseset()
 
         descendants = set(dagop.revdescendants(repo, include, False))
-        exclude = [rev for rev in cl.headrevs()
-            if not rev in descendants and not rev in include]
+        exclude = [
+            rev
+            for rev in cl.headrevs()
+            if not rev in descendants and not rev in include
+        ]
     else:
         exclude = getset(repo, fullreposet(repo), args[1])
 
@@ -1523,7 +1707,8 @@
     # some optimizations from the fact this is a baseset.
     return subset & results
 
-@predicate('origin([set])', safe=True)
+
+@predicate(b'origin([set])', safe=True)
 def origin(repo, subset, x):
     """
     Changesets that were specified as a source for the grafts, transplants or
@@ -1555,7 +1740,8 @@
     # some optimizations from the fact this is a baseset.
     return subset & o
 
-@predicate('outgoing([path])', safe=False, weight=10)
+
+@predicate(b'outgoing([path])', safe=False, weight=10)
 def outgoing(repo, subset, x):
     """Changesets not found in the specified destination repository, or the
     default push location.
@@ -1565,17 +1751,22 @@
         discovery,
         hg,
     )
+
     # i18n: "outgoing" is a keyword
-    l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
+    l = getargs(x, 0, 1, _(b"outgoing takes one or no arguments"))
     # i18n: "outgoing" is a keyword
-    dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
+    dest = (
+        l and getstring(l[0], _(b"outgoing requires a repository path")) or b''
+    )
     if not dest:
         # ui.paths.getpath() explicitly tests for None, not just a boolean
         dest = None
-    path = repo.ui.paths.getpath(dest, default=('default-push', 'default'))
+    path = repo.ui.paths.getpath(dest, default=(b'default-push', b'default'))
     if not path:
-        raise error.Abort(_('default repository not configured!'),
-                hint=_("see 'hg help config.paths'"))
+        raise error.Abort(
+            _(b'default repository not configured!'),
+            hint=_(b"see 'hg help config.paths'"),
+        )
     dest = path.pushloc or path.loc
     branches = path.branch, []
 
@@ -1590,7 +1781,8 @@
     o = {cl.rev(r) for r in outgoing.missing}
     return subset & o
 
-@predicate('p1([set])', safe=True)
+
+@predicate(b'p1([set])', safe=True)
 def p1(repo, subset, x):
     """First parent of changesets in set, or the working directory.
     """
@@ -1612,7 +1804,8 @@
     # some optimizations from the fact this is a baseset.
     return subset & ps
 
-@predicate('p2([set])', safe=True)
+
+@predicate(b'p2([set])', safe=True)
 def p2(repo, subset, x):
     """Second parent of changesets in set, or the working directory.
     """
@@ -1640,10 +1833,12 @@
     # some optimizations from the fact this is a baseset.
     return subset & ps
 
+
 def parentpost(repo, subset, x, order):
     return p1(repo, subset, x)
 
-@predicate('parents([set])', safe=True)
+
+@predicate(b'parents([set])', safe=True)
 def parents(repo, subset, x):
     """
     The set of all parents for all changesets in set, or the working directory.
@@ -1663,39 +1858,44 @@
     ps -= {node.nullrev}
     return subset & ps
 
+
 def _phase(repo, subset, *targets):
     """helper to select all rev in <targets> phases"""
     return repo._phasecache.getrevset(repo, targets, subset)
 
-@predicate('_phase(idx)', safe=True)
+
+@predicate(b'_phase(idx)', safe=True)
 def phase(repo, subset, x):
-    l = getargs(x, 1, 1, ("_phase requires one argument"))
-    target = getinteger(l[0], ("_phase expects a number"))
+    l = getargs(x, 1, 1, b"_phase requires one argument")
+    target = getinteger(l[0], b"_phase expects a number")
     return _phase(repo, subset, target)
 
-@predicate('draft()', safe=True)
+
+@predicate(b'draft()', safe=True)
 def draft(repo, subset, x):
     """Changeset in draft phase."""
     # i18n: "draft" is a keyword
-    getargs(x, 0, 0, _("draft takes no arguments"))
+    getargs(x, 0, 0, _(b"draft takes no arguments"))
     target = phases.draft
     return _phase(repo, subset, target)
 
-@predicate('secret()', safe=True)
+
+@predicate(b'secret()', safe=True)
 def secret(repo, subset, x):
     """Changeset in secret phase."""
     # i18n: "secret" is a keyword
-    getargs(x, 0, 0, _("secret takes no arguments"))
+    getargs(x, 0, 0, _(b"secret takes no arguments"))
     target = phases.secret
     return _phase(repo, subset, target)
 
-@predicate('stack([revs])', safe=True)
+
+@predicate(b'stack([revs])', safe=True)
 def stack(repo, subset, x):
     """Experimental revset for the stack of changesets or working directory
     parent. (EXPERIMENTAL)
     """
     if x is None:
-        stacks = stackmod.getstack(repo, x)
+        stacks = stackmod.getstack(repo)
     else:
         stacks = smartset.baseset([])
         for revision in getset(repo, fullreposet(repo), x):
@@ -1704,6 +1904,7 @@
 
     return subset & stacks
 
+
 def parentspec(repo, subset, x, n, order):
     """``set^0``
     The set.
@@ -1715,7 +1916,7 @@
         if n not in (0, 1, 2):
             raise ValueError
     except (TypeError, ValueError):
-        raise error.ParseError(_("^ expects a number 0, 1, or 2"))
+        raise error.ParseError(_(b"^ expects a number 0, 1, or 2"))
     ps = set()
     cl = repo.changelog
     for r in getset(repo, fullreposet(repo), x):
@@ -1737,7 +1938,8 @@
                     ps.add(parents[1].rev())
     return subset & ps
 
-@predicate('present(set)', safe=True, takeorder=True)
+
+@predicate(b'present(set)', safe=True, takeorder=True)
 def present(repo, subset, x, order):
     """An empty set, if any revision in set isn't found; otherwise,
     all revisions in set.
@@ -1751,30 +1953,32 @@
     except error.RepoLookupError:
         return baseset()
 
-# for internal use
-@predicate('_notpublic', safe=True)
-def _notpublic(repo, subset, x):
-    getargs(x, 0, 0, "_notpublic takes no arguments")
-    return _phase(repo, subset, phases.draft, phases.secret)
 
 # for internal use
-@predicate('_phaseandancestors(phasename, set)', safe=True)
+@predicate(b'_notpublic', safe=True)
+def _notpublic(repo, subset, x):
+    getargs(x, 0, 0, b"_notpublic takes no arguments")
+    return _phase(repo, subset, phases.draft, phases.secret)
+
+
+# for internal use
+@predicate(b'_phaseandancestors(phasename, set)', safe=True)
 def _phaseandancestors(repo, subset, x):
     # equivalent to (phasename() & ancestors(set)) but more efficient
     # phasename could be one of 'draft', 'secret', or '_notpublic'
-    args = getargs(x, 2, 2, "_phaseandancestors requires two arguments")
+    args = getargs(x, 2, 2, b"_phaseandancestors requires two arguments")
     phasename = getsymbol(args[0])
     s = getset(repo, fullreposet(repo), args[1])
 
     draft = phases.draft
     secret = phases.secret
     phasenamemap = {
-        '_notpublic': draft,
-        'draft': draft, # follow secret's ancestors
-        'secret': secret,
+        b'_notpublic': draft,
+        b'draft': draft,  # follow secret's ancestors
+        b'secret': secret,
     }
     if phasename not in phasenamemap:
-        raise error.ParseError('%r is not a valid phasename' % phasename)
+        raise error.ParseError(b'%r is not a valid phasename' % phasename)
 
     minimalphase = phasenamemap[phasename]
     getphase = repo._phasecache.phase
@@ -1784,40 +1988,43 @@
 
     revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
 
-    if phasename == 'draft': # need to remove secret changesets
+    if phasename == b'draft':  # need to remove secret changesets
         revs = revs.filter(lambda r: getphase(repo, r) == draft)
     return subset & revs
 
-@predicate('public()', safe=True)
+
+@predicate(b'public()', safe=True)
 def public(repo, subset, x):
     """Changeset in public phase."""
     # i18n: "public" is a keyword
-    getargs(x, 0, 0, _("public takes no arguments"))
+    getargs(x, 0, 0, _(b"public takes no arguments"))
     return _phase(repo, subset, phases.public)
 
-@predicate('remote([id [,path]])', safe=False)
+
+@predicate(b'remote([id [,path]])', safe=False)
 def remote(repo, subset, x):
     """Local revision that corresponds to the given identifier in a
     remote repository, if present. Here, the '.' identifier is a
     synonym for the current local branch.
     """
 
-    from . import hg # avoid start-up nasties
+    from . import hg  # avoid start-up nasties
+
     # i18n: "remote" is a keyword
-    l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
-
-    q = '.'
+    l = getargs(x, 0, 2, _(b"remote takes zero, one, or two arguments"))
+
+    q = b'.'
     if len(l) > 0:
-    # i18n: "remote" is a keyword
-        q = getstring(l[0], _("remote requires a string id"))
-    if q == '.':
-        q = repo['.'].branch()
-
-    dest = ''
+        # i18n: "remote" is a keyword
+        q = getstring(l[0], _(b"remote requires a string id"))
+    if q == b'.':
+        q = repo[b'.'].branch()
+
+    dest = b''
     if len(l) > 1:
         # i18n: "remote" is a keyword
-        dest = getstring(l[1], _("remote requires a repository path"))
-    dest = repo.ui.expandpath(dest or 'default')
+        dest = getstring(l[1], _(b"remote requires a repository path"))
+    dest = repo.ui.expandpath(dest or b'default')
     dest, branches = hg.parseurl(dest)
     revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
     if revs:
@@ -1830,7 +2037,8 @@
             return baseset([r])
     return baseset()
 
-@predicate('removes(pattern)', safe=True, weight=30)
+
+@predicate(b'removes(pattern)', safe=True, weight=30)
 def removes(repo, subset, x):
     """Changesets which remove files matching pattern.
 
@@ -1839,40 +2047,43 @@
     directory.
     """
     # i18n: "removes" is a keyword
-    pat = getstring(x, _("removes requires a pattern"))
+    pat = getstring(x, _(b"removes requires a pattern"))
     return checkstatus(repo, subset, pat, 2)
 
-@predicate('rev(number)', safe=True)
+
+@predicate(b'rev(number)', safe=True)
 def rev(repo, subset, x):
     """Revision with the given numeric identifier.
     """
     # i18n: "rev" is a keyword
-    l = getargs(x, 1, 1, _("rev requires one argument"))
+    l = getargs(x, 1, 1, _(b"rev requires one argument"))
     try:
         # i18n: "rev" is a keyword
-        l = int(getstring(l[0], _("rev requires a number")))
+        l = int(getstring(l[0], _(b"rev requires a number")))
     except (TypeError, ValueError):
         # i18n: "rev" is a keyword
-        raise error.ParseError(_("rev expects a number"))
+        raise error.ParseError(_(b"rev expects a number"))
     if l not in repo.changelog and l not in _virtualrevs:
         return baseset()
     return subset & baseset([l])
 
-@predicate('_rev(number)', safe=True)
+
+@predicate(b'_rev(number)', safe=True)
 def _rev(repo, subset, x):
     # internal version of "rev(x)" that raise error if "x" is invalid
     # i18n: "rev" is a keyword
-    l = getargs(x, 1, 1, _("rev requires one argument"))
+    l = getargs(x, 1, 1, _(b"rev requires one argument"))
     try:
         # i18n: "rev" is a keyword
-        l = int(getstring(l[0], _("rev requires a number")))
+        l = int(getstring(l[0], _(b"rev requires a number")))
     except (TypeError, ValueError):
         # i18n: "rev" is a keyword
-        raise error.ParseError(_("rev expects a number"))
-    repo.changelog.node(l) # check that the rev exists
+        raise error.ParseError(_(b"rev expects a number"))
+    repo.changelog.node(l)  # check that the rev exists
     return subset & baseset([l])
 
-@predicate('revset(set)', safe=True, takeorder=True)
+
+@predicate(b'revset(set)', safe=True, takeorder=True)
 def revsetpredicate(repo, subset, x, order):
     """Strictly interpret the content as a revset.
 
@@ -1882,7 +2093,8 @@
     """
     return getset(repo, subset, x, order)
 
-@predicate('matching(revision [, field])', safe=True)
+
+@predicate(b'matching(revision [, field])', safe=True)
 def matching(repo, subset, x):
     """Changesets in which a given set of fields match the set of fields in the
     selected revision or set.
@@ -1908,49 +2120,62 @@
     specified. You can match more than one field at a time.
     """
     # i18n: "matching" is a keyword
-    l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
+    l = getargs(x, 1, 2, _(b"matching takes 1 or 2 arguments"))
 
     revs = getset(repo, fullreposet(repo), l[0])
 
-    fieldlist = ['metadata']
+    fieldlist = [b'metadata']
     if len(l) > 1:
-            fieldlist = getstring(l[1],
-                # i18n: "matching" is a keyword
-                _("matching requires a string "
-                "as its second argument")).split()
+        fieldlist = getstring(
+            l[1],
+            # i18n: "matching" is a keyword
+            _(b"matching requires a string as its second argument"),
+        ).split()
 
     # Make sure that there are no repeated fields,
     # expand the 'special' 'metadata' field type
     # and check the 'files' whenever we check the 'diff'
     fields = []
     for field in fieldlist:
-        if field == 'metadata':
-            fields += ['user', 'description', 'date']
-        elif field == 'diff':
+        if field == b'metadata':
+            fields += [b'user', b'description', b'date']
+        elif field == b'diff':
             # a revision matching the diff must also match the files
             # since matching the diff is very costly, make sure to
             # also match the files first
-            fields += ['files', 'diff']
+            fields += [b'files', b'diff']
         else:
-            if field == 'author':
-                field = 'user'
+            if field == b'author':
+                field = b'user'
             fields.append(field)
     fields = set(fields)
-    if 'summary' in fields and 'description' in fields:
+    if b'summary' in fields and b'description' in fields:
         # If a revision matches its description it also matches its summary
-        fields.discard('summary')
+        fields.discard(b'summary')
 
     # We may want to match more than one field
     # Not all fields take the same amount of time to be matched
     # Sort the selected fields in order of increasing matching cost
-    fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
-        'files', 'description', 'substate', 'diff']
+    fieldorder = [
+        b'phase',
+        b'parents',
+        b'user',
+        b'date',
+        b'branch',
+        b'summary',
+        b'files',
+        b'description',
+        b'substate',
+        b'diff',
+    ]
+
     def fieldkeyfunc(f):
         try:
             return fieldorder.index(f)
         except ValueError:
             # assume an unknown field is very costly
             return len(fieldorder)
+
     fields = list(fields)
     fields.sort(key=fieldkeyfunc)
 
@@ -1958,24 +2183,27 @@
     # which will be added to the getfieldfuncs array of functions
     getfieldfuncs = []
     _funcs = {
-        'user': lambda r: repo[r].user(),
-        'branch': lambda r: repo[r].branch(),
-        'date': lambda r: repo[r].date(),
-        'description': lambda r: repo[r].description(),
-        'files': lambda r: repo[r].files(),
-        'parents': lambda r: repo[r].parents(),
-        'phase': lambda r: repo[r].phase(),
-        'substate': lambda r: repo[r].substate,
-        'summary': lambda r: repo[r].description().splitlines()[0],
-        'diff': lambda r: list(repo[r].diff(
-            opts=diffutil.diffallopts(repo.ui, {'git': True}))),
+        b'user': lambda r: repo[r].user(),
+        b'branch': lambda r: repo[r].branch(),
+        b'date': lambda r: repo[r].date(),
+        b'description': lambda r: repo[r].description(),
+        b'files': lambda r: repo[r].files(),
+        b'parents': lambda r: repo[r].parents(),
+        b'phase': lambda r: repo[r].phase(),
+        b'substate': lambda r: repo[r].substate,
+        b'summary': lambda r: repo[r].description().splitlines()[0],
+        b'diff': lambda r: list(
+            repo[r].diff(opts=diffutil.diffallopts(repo.ui, {b'git': True}))
+        ),
     }
     for info in fields:
         getfield = _funcs.get(info, None)
         if getfield is None:
             raise error.ParseError(
                 # i18n: "matching" is a keyword
-                _("unexpected field name passed to matching: %s") % info)
+                _(b"unexpected field name passed to matching: %s")
+                % info
+            )
         getfieldfuncs.append(getfield)
     # convert the getfield array of functions into a "getinfo" function
     # which returns an array of field values (or a single value if there
@@ -1993,9 +2221,10 @@
                 return True
         return False
 
-    return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
-
-@predicate('reverse(set)', safe=True, takeorder=True, weight=0)
+    return subset.filter(matches, condrepr=(b'<matching%r %r>', fields, revs))
+
+
+@predicate(b'reverse(set)', safe=True, takeorder=True, weight=0)
 def reverse(repo, subset, x, order):
     """Reverse order of set.
     """
@@ -2004,68 +2233,81 @@
         l.reverse()
     return l
 
-@predicate('roots(set)', safe=True)
+
+@predicate(b'roots(set)', safe=True)
 def roots(repo, subset, x):
     """Changesets in set with no parent changeset in set.
     """
     s = getset(repo, fullreposet(repo), x)
     parents = repo.changelog.parentrevs
+
     def filter(r):
         for p in parents(r):
             if 0 <= p and p in s:
                 return False
         return True
-    return subset & s.filter(filter, condrepr='<roots>')
+
+    return subset & s.filter(filter, condrepr=b'<roots>')
+
 
 _sortkeyfuncs = {
-    'rev': lambda c: c.rev(),
-    'branch': lambda c: c.branch(),
-    'desc': lambda c: c.description(),
-    'user': lambda c: c.user(),
-    'author': lambda c: c.user(),
-    'date': lambda c: c.date()[0],
+    b'rev': lambda c: c.rev(),
+    b'branch': lambda c: c.branch(),
+    b'desc': lambda c: c.description(),
+    b'user': lambda c: c.user(),
+    b'author': lambda c: c.user(),
+    b'date': lambda c: c.date()[0],
 }
 
+
 def _getsortargs(x):
     """Parse sort options into (set, [(key, reverse)], opts)"""
-    args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
-    if 'set' not in args:
+    args = getargsdict(x, b'sort', b'set keys topo.firstbranch')
+    if b'set' not in args:
         # i18n: "sort" is a keyword
-        raise error.ParseError(_('sort requires one or two arguments'))
-    keys = "rev"
-    if 'keys' in args:
+        raise error.ParseError(_(b'sort requires one or two arguments'))
+    keys = b"rev"
+    if b'keys' in args:
         # i18n: "sort" is a keyword
-        keys = getstring(args['keys'], _("sort spec must be a string"))
+        keys = getstring(args[b'keys'], _(b"sort spec must be a string"))
 
     keyflags = []
     for k in keys.split():
         fk = k
-        reverse = (k.startswith('-'))
+        reverse = k.startswith(b'-')
         if reverse:
             k = k[1:]
-        if k not in _sortkeyfuncs and k != 'topo':
+        if k not in _sortkeyfuncs and k != b'topo':
             raise error.ParseError(
-                _("unknown sort key %r") % pycompat.bytestr(fk))
+                _(b"unknown sort key %r") % pycompat.bytestr(fk)
+            )
         keyflags.append((k, reverse))
 
-    if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
+    if len(keyflags) > 1 and any(k == b'topo' for k, reverse in keyflags):
         # i18n: "topo" is a keyword
-        raise error.ParseError(_('topo sort order cannot be combined '
-                                 'with other sort keys'))
+        raise error.ParseError(
+            _(b'topo sort order cannot be combined with other sort keys')
+        )
 
     opts = {}
-    if 'topo.firstbranch' in args:
-        if any(k == 'topo' for k, reverse in keyflags):
-            opts['topo.firstbranch'] = args['topo.firstbranch']
+    if b'topo.firstbranch' in args:
+        if any(k == b'topo' for k, reverse in keyflags):
+            opts[b'topo.firstbranch'] = args[b'topo.firstbranch']
         else:
             # i18n: "topo" and "topo.firstbranch" are keywords
-            raise error.ParseError(_('topo.firstbranch can only be used '
-                                     'when using the topo sort key'))
-
-    return args['set'], keyflags, opts
-
-@predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True,
-           weight=10)
+            raise error.ParseError(
+                _(
+                    b'topo.firstbranch can only be used '
+                    b'when using the topo sort key'
+                )
+            )
+
+    return args[b'set'], keyflags, opts
+
+
+@predicate(
+    b'sort(set[, [-]key... [, ...]])', safe=True, takeorder=True, weight=10
+)
 def sort(repo, subset, x, order):
     """Sort set by keys. The default sort order is ascending, specify a key
     as ``-key`` to sort in descending order.
@@ -2089,16 +2331,17 @@
 
     if not keyflags or order != defineorder:
         return revs
-    if len(keyflags) == 1 and keyflags[0][0] == "rev":
+    if len(keyflags) == 1 and keyflags[0][0] == b"rev":
         revs.sort(reverse=keyflags[0][1])
         return revs
-    elif keyflags[0][0] == "topo":
+    elif keyflags[0][0] == b"topo":
         firstbranch = ()
-        if 'topo.firstbranch' in opts:
-            firstbranch = getset(repo, subset, opts['topo.firstbranch'])
-        revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
-                                      firstbranch),
-                       istopo=True)
+        if b'topo.firstbranch' in opts:
+            firstbranch = getset(repo, subset, opts[b'topo.firstbranch'])
+        revs = baseset(
+            dagop.toposort(revs, repo.changelog.parentrevs, firstbranch),
+            istopo=True,
+        )
         if keyflags[0][1]:
             revs.reverse()
         return revs
@@ -2109,18 +2352,19 @@
         ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
     return baseset([c.rev() for c in ctxs])
 
-@predicate('subrepo([pattern])')
+
+@predicate(b'subrepo([pattern])')
 def subrepo(repo, subset, x):
     """Changesets that add, modify or remove the given subrepo.  If no subrepo
     pattern is named, any subrepo changes are returned.
     """
     # i18n: "subrepo" is a keyword
-    args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
+    args = getargs(x, 0, 1, _(b'subrepo takes at most one argument'))
     pat = None
     if len(args) != 0:
-        pat = getstring(args[0], _("subrepo requires a pattern"))
-
-    m = matchmod.exact(['.hgsubstate'])
+        pat = getstring(args[0], _(b"subrepo requires a pattern"))
+
+    m = matchmod.exact([b'.hgsubstate'])
 
     def submatches(names):
         k, p, m = stringutil.stringmatcher(pat)
@@ -2151,7 +2395,8 @@
 
         return False
 
-    return subset.filter(matches, condrepr=('<subrepo %r>', pat))
+    return subset.filter(matches, condrepr=(b'<subrepo %r>', pat))
+
 
 def _mapbynodefunc(repo, s, f):
     """(repo, smartset, [node] -> [node]) -> smartset
@@ -2167,7 +2412,8 @@
     result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
     return smartset.baseset(result - repo.changelog.filteredrevs)
 
-@predicate('successors(set)', safe=True)
+
+@predicate(b'successors(set)', safe=True)
 def successors(repo, subset, x):
     """All successors for set, including the given set themselves"""
     s = getset(repo, fullreposet(repo), x)
@@ -2175,10 +2421,12 @@
     d = _mapbynodefunc(repo, s, f)
     return subset & d
 
+
 def _substringmatcher(pattern, casesensitive=True):
     kind, pattern, matcher = stringutil.stringmatcher(
-        pattern, casesensitive=casesensitive)
-    if kind == 'literal':
+        pattern, casesensitive=casesensitive
+    )
+    if kind == b'literal':
         if not casesensitive:
             pattern = encoding.lower(pattern)
             matcher = lambda s: pattern in encoding.lower(s)
@@ -2186,7 +2434,8 @@
             matcher = lambda s: pattern in s
     return kind, pattern, matcher
 
-@predicate('tag([name])', safe=True)
+
+@predicate(b'tag([name])', safe=True)
 def tag(repo, subset, x):
     """The specified tag by name, or all tagged revisions if no name is given.
 
@@ -2194,41 +2443,46 @@
     :hg:`help revisions.patterns`.
     """
     # i18n: "tag" is a keyword
-    args = getargs(x, 0, 1, _("tag takes one or no arguments"))
+    args = getargs(x, 0, 1, _(b"tag takes one or no arguments"))
     cl = repo.changelog
     if args:
-        pattern = getstring(args[0],
-                            # i18n: "tag" is a keyword
-                            _('the argument to tag must be a string'))
+        pattern = getstring(
+            args[0],
+            # i18n: "tag" is a keyword
+            _(b'the argument to tag must be a string'),
+        )
         kind, pattern, matcher = stringutil.stringmatcher(pattern)
-        if kind == 'literal':
+        if kind == b'literal':
             # avoid resolving all tags
             tn = repo._tagscache.tags.get(pattern, None)
             if tn is None:
-                raise error.RepoLookupError(_("tag '%s' does not exist")
-                                            % pattern)
+                raise error.RepoLookupError(
+                    _(b"tag '%s' does not exist") % pattern
+                )
             s = {repo[tn].rev()}
         else:
             s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
     else:
-        s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
+        s = {cl.rev(n) for t, n in repo.tagslist() if t != b'tip'}
     return subset & s
 
-@predicate('tagged', safe=True)
+
+@predicate(b'tagged', safe=True)
 def tagged(repo, subset, x):
     return tag(repo, subset, x)
 
-@predicate('orphan()', safe=True)
+
+@predicate(b'orphan()', safe=True)
 def orphan(repo, subset, x):
     """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
     """
     # i18n: "orphan" is a keyword
-    getargs(x, 0, 0, _("orphan takes no arguments"))
-    orphan = obsmod.getrevs(repo, 'orphan')
+    getargs(x, 0, 0, _(b"orphan takes no arguments"))
+    orphan = obsmod.getrevs(repo, b'orphan')
     return subset & orphan
 
 
-@predicate('user(string)', safe=True, weight=10)
+@predicate(b'user(string)', safe=True, weight=10)
 def user(repo, subset, x):
     """User name contains string. The match is case-insensitive.
 
@@ -2237,17 +2491,19 @@
     """
     return author(repo, subset, x)
 
-@predicate('wdir()', safe=True, weight=0)
+
+@predicate(b'wdir()', safe=True, weight=0)
 def wdir(repo, subset, x):
     """Working directory. (EXPERIMENTAL)"""
     # i18n: "wdir" is a keyword
-    getargs(x, 0, 0, _("wdir takes no arguments"))
+    getargs(x, 0, 0, _(b"wdir takes no arguments"))
     if node.wdirrev in subset or isinstance(subset, fullreposet):
         return baseset([node.wdirrev])
     return baseset()
 
+
 def _orderedlist(repo, subset, x):
-    s = getstring(x, "internal error")
+    s = getstring(x, b"internal error")
     if not s:
         return baseset()
     # remove duplicates here. it's difficult for caller to deduplicate sets
@@ -2255,11 +2511,11 @@
     cl = repo.changelog
     ls = []
     seen = set()
-    for t in s.split('\0'):
+    for t in s.split(b'\0'):
         try:
             # fast path for integer revision
             r = int(t)
-            if ('%d' % r) != t or r not in cl:
+            if (b'%d' % r) != t or r not in cl:
                 raise ValueError
             revs = [r]
         except ValueError:
@@ -2268,14 +2524,18 @@
         for r in revs:
             if r in seen:
                 continue
-            if (r in subset
-                or r in _virtualrevs and isinstance(subset, fullreposet)):
+            if (
+                r in subset
+                or r in _virtualrevs
+                and isinstance(subset, fullreposet)
+            ):
                 ls.append(r)
             seen.add(r)
     return baseset(ls)
 
+
 # for internal use
-@predicate('_list', safe=True, takeorder=True)
+@predicate(b'_list', safe=True, takeorder=True)
 def _list(repo, subset, x, order):
     if order == followorder:
         # slow path to take the subset order
@@ -2283,16 +2543,18 @@
     else:
         return _orderedlist(repo, subset, x)
 
+
 def _orderedintlist(repo, subset, x):
-    s = getstring(x, "internal error")
+    s = getstring(x, b"internal error")
     if not s:
         return baseset()
-    ls = [int(r) for r in s.split('\0')]
+    ls = [int(r) for r in s.split(b'\0')]
     s = subset
     return baseset([r for r in ls if r in s])
 
+
 # for internal use
-@predicate('_intlist', safe=True, takeorder=True, weight=0)
+@predicate(b'_intlist', safe=True, takeorder=True, weight=0)
 def _intlist(repo, subset, x, order):
     if order == followorder:
         # slow path to take the subset order
@@ -2300,17 +2562,19 @@
     else:
         return _orderedintlist(repo, subset, x)
 
+
 def _orderedhexlist(repo, subset, x):
-    s = getstring(x, "internal error")
+    s = getstring(x, b"internal error")
     if not s:
         return baseset()
     cl = repo.changelog
-    ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
+    ls = [cl.rev(node.bin(r)) for r in s.split(b'\0')]
     s = subset
     return baseset([r for r in ls if r in s])
 
+
 # for internal use
-@predicate('_hexlist', safe=True, takeorder=True)
+@predicate(b'_hexlist', safe=True, takeorder=True)
 def _hexlist(repo, subset, x, order):
     if order == followorder:
         # slow path to take the subset order
@@ -2318,43 +2582,47 @@
     else:
         return _orderedhexlist(repo, subset, x)
 
+
 methods = {
-    "range": rangeset,
-    "rangeall": rangeall,
-    "rangepre": rangepre,
-    "rangepost": rangepost,
-    "dagrange": dagrange,
-    "string": stringset,
-    "symbol": stringset,
-    "and": andset,
-    "andsmally": andsmallyset,
-    "or": orset,
-    "not": notset,
-    "difference": differenceset,
-    "relation": relationset,
-    "relsubscript": relsubscriptset,
-    "subscript": subscriptset,
-    "list": listset,
-    "keyvalue": keyvaluepair,
-    "func": func,
-    "ancestor": ancestorspec,
-    "parent": parentspec,
-    "parentpost": parentpost,
-    "smartset": rawsmartset,
+    b"range": rangeset,
+    b"rangeall": rangeall,
+    b"rangepre": rangepre,
+    b"rangepost": rangepost,
+    b"dagrange": dagrange,
+    b"string": stringset,
+    b"symbol": stringset,
+    b"and": andset,
+    b"andsmally": andsmallyset,
+    b"or": orset,
+    b"not": notset,
+    b"difference": differenceset,
+    b"relation": relationset,
+    b"relsubscript": relsubscriptset,
+    b"subscript": subscriptset,
+    b"list": listset,
+    b"keyvalue": keyvaluepair,
+    b"func": func,
+    b"ancestor": ancestorspec,
+    b"parent": parentspec,
+    b"parentpost": parentpost,
+    b"smartset": rawsmartset,
 }
 
 subscriptrelations = {
-    "g": generationsrel,
-    "generations": generationsrel,
+    b"g": generationsrel,
+    b"generations": generationsrel,
 }
 
+
 def lookupfn(repo):
     return lambda symbol: scmutil.isrevsymbol(repo, symbol)
 
+
 def match(ui, spec, lookup=None):
     """Create a matcher for a single revision spec"""
     return matchany(ui, [spec], lookup=lookup)
 
+
 def matchany(ui, specs, lookup=None, localalias=None):
     """Create a matcher that will include any revisions matching one of the
     given specs
@@ -2366,21 +2634,25 @@
     precedence over [revsetalias] config section.
     """
     if not specs:
+
         def mfunc(repo, subset=None):
             return baseset()
+
         return mfunc
     if not all(specs):
-        raise error.ParseError(_("empty query"))
+        raise error.ParseError(_(b"empty query"))
     if len(specs) == 1:
         tree = revsetlang.parse(specs[0], lookup)
     else:
-        tree = ('or',
-                ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
+        tree = (
+            b'or',
+            (b'list',) + tuple(revsetlang.parse(s, lookup) for s in specs),
+        )
 
     aliases = []
     warn = None
     if ui:
-        aliases.extend(ui.configitems('revsetalias'))
+        aliases.extend(ui.configitems(b'revsetalias'))
         warn = ui.warn
     if localalias:
         aliases.extend(localalias.items())
@@ -2391,8 +2663,10 @@
     tree = revsetlang.optimize(tree)
     return makematcher(tree)
 
+
 def makematcher(tree):
     """Create a matcher from an evaluatable tree"""
+
     def mfunc(repo, subset=None, order=None):
         if order is None:
             if subset is None:
@@ -2402,16 +2676,19 @@
         if subset is None:
             subset = fullreposet(repo)
         return getset(repo, subset, tree, order)
+
     return mfunc
 
+
 def loadpredicate(ui, extname, registrarobj):
     """Load revset predicates from specified registrarobj
     """
-    for name, func in registrarobj._table.iteritems():
+    for name, func in pycompat.iteritems(registrarobj._table):
         symbols[name] = func
         if func._safe:
             safesymbols.add(name)
 
+
 # load built-in predicates explicitly to setup safesymbols
 loadpredicate(None, None, predicate)
 
--- a/mercurial/revsetlang.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/revsetlang.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,6 +10,7 @@
 import string
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     error,
     node,
@@ -18,56 +19,68 @@
     smartset,
     util,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
 
 elements = {
     # token-type: binding-strength, primary, prefix, infix, suffix
-    "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
-    "[": (21, None, None, ("subscript", 1, "]"), None),
-    "#": (21, None, None, ("relation", 21), None),
-    "##": (20, None, None, ("_concat", 20), None),
-    "~": (18, None, None, ("ancestor", 18), None),
-    "^": (18, None, None, ("parent", 18), "parentpost"),
-    "-": (5, None, ("negate", 19), ("minus", 5), None),
-    "::": (17, "dagrangeall", ("dagrangepre", 17), ("dagrange", 17),
-           "dagrangepost"),
-    "..": (17, "dagrangeall", ("dagrangepre", 17), ("dagrange", 17),
-           "dagrangepost"),
-    ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
-    "not": (10, None, ("not", 10), None, None),
-    "!": (10, None, ("not", 10), None, None),
-    "and": (5, None, None, ("and", 5), None),
-    "&": (5, None, None, ("and", 5), None),
-    "%": (5, None, None, ("only", 5), "onlypost"),
-    "or": (4, None, None, ("or", 4), None),
-    "|": (4, None, None, ("or", 4), None),
-    "+": (4, None, None, ("or", 4), None),
-    "=": (3, None, None, ("keyvalue", 3), None),
-    ",": (2, None, None, ("list", 2), None),
-    ")": (0, None, None, None, None),
-    "]": (0, None, None, None, None),
-    "symbol": (0, "symbol", None, None, None),
-    "string": (0, "string", None, None, None),
-    "end": (0, None, None, None, None),
+    b"(": (21, None, (b"group", 1, b")"), (b"func", 1, b")"), None),
+    b"[": (21, None, None, (b"subscript", 1, b"]"), None),
+    b"#": (21, None, None, (b"relation", 21), None),
+    b"##": (20, None, None, (b"_concat", 20), None),
+    b"~": (18, None, None, (b"ancestor", 18), None),
+    b"^": (18, None, None, (b"parent", 18), b"parentpost"),
+    b"-": (5, None, (b"negate", 19), (b"minus", 5), None),
+    b"::": (
+        17,
+        b"dagrangeall",
+        (b"dagrangepre", 17),
+        (b"dagrange", 17),
+        b"dagrangepost",
+    ),
+    b"..": (
+        17,
+        b"dagrangeall",
+        (b"dagrangepre", 17),
+        (b"dagrange", 17),
+        b"dagrangepost",
+    ),
+    b":": (15, b"rangeall", (b"rangepre", 15), (b"range", 15), b"rangepost"),
+    b"not": (10, None, (b"not", 10), None, None),
+    b"!": (10, None, (b"not", 10), None, None),
+    b"and": (5, None, None, (b"and", 5), None),
+    b"&": (5, None, None, (b"and", 5), None),
+    b"%": (5, None, None, (b"only", 5), b"onlypost"),
+    b"or": (4, None, None, (b"or", 4), None),
+    b"|": (4, None, None, (b"or", 4), None),
+    b"+": (4, None, None, (b"or", 4), None),
+    b"=": (3, None, None, (b"keyvalue", 3), None),
+    b",": (2, None, None, (b"list", 2), None),
+    b")": (0, None, None, None, None),
+    b"]": (0, None, None, None, None),
+    b"symbol": (0, b"symbol", None, None, None),
+    b"string": (0, b"string", None, None, None),
+    b"end": (0, None, None, None, None),
 }
 
-keywords = {'and', 'or', 'not'}
+keywords = {b'and', b'or', b'not'}
 
 symbols = {}
 
-_quoteletters = {'"', "'"}
-_simpleopletters = set(pycompat.iterbytestr("()[]#:=,-|&+!~^%"))
+_quoteletters = {b'"', b"'"}
+_simpleopletters = set(pycompat.iterbytestr(b"()[]#:=,-|&+!~^%"))
 
 # default set of valid characters for the initial letter of symbols
-_syminitletters = set(pycompat.iterbytestr(
-    pycompat.sysbytes(string.ascii_letters) +
-    pycompat.sysbytes(string.digits) +
-    '._@')) | set(map(pycompat.bytechr, pycompat.xrange(128, 256)))
+_syminitletters = set(
+    pycompat.iterbytestr(
+        pycompat.sysbytes(string.ascii_letters)
+        + pycompat.sysbytes(string.digits)
+        + b'._@'
+    )
+) | set(map(pycompat.bytechr, pycompat.xrange(128, 256)))
 
 # default set of valid characters for non-initial letters of symbols
-_symletters = _syminitletters | set(pycompat.iterbytestr('-/'))
+_symletters = _syminitletters | set(pycompat.iterbytestr(b'-/'))
+
 
 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
     '''
@@ -91,8 +104,9 @@
 
     '''
     if not isinstance(program, bytes):
-        raise error.ProgrammingError('revset statement must be bytes, got %r'
-                                     % program)
+        raise error.ProgrammingError(
+            b'revset statement must be bytes, got %r' % program
+        )
     program = pycompat.bytestr(program)
     if syminitletters is None:
         syminitletters = _syminitletters
@@ -102,37 +116,46 @@
     if program and lookup:
         # attempt to parse old-style ranges first to deal with
         # things like old-tag which contain query metacharacters
-        parts = program.split(':', 1)
+        parts = program.split(b':', 1)
         if all(lookup(sym) for sym in parts if sym):
             if parts[0]:
-                yield ('symbol', parts[0], 0)
+                yield (b'symbol', parts[0], 0)
             if len(parts) > 1:
                 s = len(parts[0])
-                yield (':', None, s)
+                yield (b':', None, s)
                 if parts[1]:
-                    yield ('symbol', parts[1], s + 1)
-            yield ('end', None, len(program))
+                    yield (b'symbol', parts[1], s + 1)
+            yield (b'end', None, len(program))
             return
 
     pos, l = 0, len(program)
     while pos < l:
         c = program[pos]
-        if c.isspace(): # skip inter-token whitespace
+        if c.isspace():  # skip inter-token whitespace
             pass
-        elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
-            yield ('::', None, pos)
-            pos += 1 # skip ahead
-        elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
-            yield ('..', None, pos)
-            pos += 1 # skip ahead
-        elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
-            yield ('##', None, pos)
-            pos += 1 # skip ahead
-        elif c in _simpleopletters: # handle simple operators
+        elif (
+            c == b':' and program[pos : pos + 2] == b'::'
+        ):  # look ahead carefully
+            yield (b'::', None, pos)
+            pos += 1  # skip ahead
+        elif (
+            c == b'.' and program[pos : pos + 2] == b'..'
+        ):  # look ahead carefully
+            yield (b'..', None, pos)
+            pos += 1  # skip ahead
+        elif (
+            c == b'#' and program[pos : pos + 2] == b'##'
+        ):  # look ahead carefully
+            yield (b'##', None, pos)
+            pos += 1  # skip ahead
+        elif c in _simpleopletters:  # handle simple operators
             yield (c, None, pos)
-        elif (c in _quoteletters or c == 'r' and
-              program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
-            if c == 'r':
+        elif (
+            c in _quoteletters
+            or c == b'r'
+            and program[pos : pos + 2] in (b"r'", b'r"')
+        ):  # handle quoted strings
+            if c == b'r':
                 pos += 1
                 c = program[pos]
                 decode = lambda x: x
@@ -140,71 +163,78 @@
                 decode = parser.unescapestr
             pos += 1
             s = pos
-            while pos < l: # find closing quote
+            while pos < l:  # find closing quote
                 d = program[pos]
-                if d == '\\': # skip over escaped characters
+                if d == b'\\':  # skip over escaped characters
                     pos += 2
                     continue
                 if d == c:
-                    yield ('string', decode(program[s:pos]), s)
+                    yield (b'string', decode(program[s:pos]), s)
                     break
                 pos += 1
             else:
-                raise error.ParseError(_("unterminated string"), s)
+                raise error.ParseError(_(b"unterminated string"), s)
         # gather up a symbol/keyword
         elif c in syminitletters:
             s = pos
             pos += 1
-            while pos < l: # find end of symbol
+            while pos < l:  # find end of symbol
                 d = program[pos]
                 if d not in symletters:
                     break
-                if d == '.' and program[pos - 1] == '.': # special case for ..
+                if (
+                    d == b'.' and program[pos - 1] == b'.'
+                ):  # special case for ..
                     pos -= 1
                     break
                 pos += 1
             sym = program[s:pos]
-            if sym in keywords: # operator keywords
+            if sym in keywords:  # operator keywords
                 yield (sym, None, s)
-            elif '-' in sym:
+            elif b'-' in sym:
                 # some jerk gave us foo-bar-baz, try to check if it's a symbol
                 if lookup and lookup(sym):
                     # looks like a real symbol
-                    yield ('symbol', sym, s)
+                    yield (b'symbol', sym, s)
                 else:
                     # looks like an expression
-                    parts = sym.split('-')
+                    parts = sym.split(b'-')
                     for p in parts[:-1]:
-                        if p: # possible consecutive -
-                            yield ('symbol', p, s)
+                        if p:  # possible consecutive -
+                            yield (b'symbol', p, s)
                         s += len(p)
-                        yield ('-', None, s)
+                        yield (b'-', None, s)
                         s += 1
-                    if parts[-1]: # possible trailing -
-                        yield ('symbol', parts[-1], s)
+                    if parts[-1]:  # possible trailing -
+                        yield (b'symbol', parts[-1], s)
             else:
-                yield ('symbol', sym, s)
+                yield (b'symbol', sym, s)
             pos -= 1
         else:
-            raise error.ParseError(_("syntax error in revset '%s'") %
-                                   program, pos)
+            raise error.ParseError(
+                _(b"syntax error in revset '%s'") % program, pos
+            )
         pos += 1
-    yield ('end', None, pos)
+    yield (b'end', None, pos)
+
 
 # helpers
 
 _notset = object()
 
+
 def getsymbol(x):
-    if x and x[0] == 'symbol':
+    if x and x[0] == b'symbol':
         return x[1]
-    raise error.ParseError(_('not a symbol'))
+    raise error.ParseError(_(b'not a symbol'))
+
 
 def getstring(x, err):
-    if x and (x[0] == 'string' or x[0] == 'symbol'):
+    if x and (x[0] == b'string' or x[0] == b'symbol'):
         return x[1]
     raise error.ParseError(err)
 
+
 def getinteger(x, err, default=_notset):
     if not x and default is not _notset:
         return default
@@ -213,58 +243,71 @@
     except ValueError:
         raise error.ParseError(err)
 
+
 def getboolean(x, err):
     value = stringutil.parsebool(getsymbol(x))
     if value is not None:
         return value
     raise error.ParseError(err)
 
+
 def getlist(x):
     if not x:
         return []
-    if x[0] == 'list':
+    if x[0] == b'list':
         return list(x[1:])
     return [x]
 
+
 def getrange(x, err):
     if not x:
         raise error.ParseError(err)
     op = x[0]
-    if op == 'range':
+    if op == b'range':
         return x[1], x[2]
-    elif op == 'rangepre':
+    elif op == b'rangepre':
         return None, x[1]
-    elif op == 'rangepost':
+    elif op == b'rangepost':
         return x[1], None
-    elif op == 'rangeall':
+    elif op == b'rangeall':
         return None, None
     raise error.ParseError(err)
 
+
 def getintrange(x, err1, err2, deffirst=_notset, deflast=_notset):
     """Get [first, last] integer range (both inclusive) from a parsed tree
 
     If any of the sides omitted, and if no default provided, ParseError will
     be raised.
     """
-    if x and (x[0] == 'string' or x[0] == 'symbol'):
+    if x and (x[0] == b'string' or x[0] == b'symbol'):
         n = getinteger(x, err1)
         return n, n
     a, b = getrange(x, err1)
     return getinteger(a, err2, deffirst), getinteger(b, err2, deflast)
 
+
 def getargs(x, min, max, err):
     l = getlist(x)
     if len(l) < min or (max >= 0 and len(l) > max):
         raise error.ParseError(err)
     return l
 
+
 def getargsdict(x, funcname, keys):
-    return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys),
-                                keyvaluenode='keyvalue', keynode='symbol')
+    return parser.buildargsdict(
+        getlist(x),
+        funcname,
+        parser.splitargspec(keys),
+        keyvaluenode=b'keyvalue',
+        keynode=b'symbol',
+    )
+
 
 # cache of {spec: raw parsed tree} built internally
 _treecache = {}
 
+
 def _cachedtree(spec):
     # thread safe because parse() is reentrant and dict.__setitem__() is atomic
     tree = _treecache.get(spec)
@@ -272,6 +315,7 @@
         _treecache[spec] = tree = parse(spec)
     return tree
 
+
 def _build(tmplspec, *repls):
     """Create raw parsed tree from a template revset statement
 
@@ -279,7 +323,8 @@
     ('and', ('func', ('symbol', 'f'), ('string', '1')), ('symbol', '2'))
     """
     template = _cachedtree(tmplspec)
-    return parser.buildtree(template, ('symbol', '_'), *repls)
+    return parser.buildtree(template, (b'symbol', b'_'), *repls)
+
 
 def _match(patspec, tree):
     """Test if a tree matches the given pattern statement; return the matches
@@ -290,11 +335,14 @@
     >>> _match(b'f(_)', parse(b'f(1, 2)'))
     """
     pattern = _cachedtree(patspec)
-    return parser.matchtree(pattern, tree, ('symbol', '_'),
-                            {'keyvalue', 'list'})
+    return parser.matchtree(
+        pattern, tree, (b'symbol', b'_'), {b'keyvalue', b'list'}
+    )
+
 
 def _matchonly(revs, bases):
-    return _match('ancestors(_) and not ancestors(_)', ('and', revs, bases))
+    return _match(b'ancestors(_) and not ancestors(_)', (b'and', revs, bases))
+
 
 def _fixops(x):
     """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
@@ -303,73 +351,82 @@
         return x
 
     op = x[0]
-    if op == 'parent':
+    if op == b'parent':
         # x^:y means (x^) : y, not x ^ (:y)
         # x^:  means (x^) :,   not x ^ (:)
-        post = ('parentpost', x[1])
-        if x[2][0] == 'dagrangepre':
-            return _fixops(('dagrange', post, x[2][1]))
-        elif x[2][0] == 'dagrangeall':
-            return _fixops(('dagrangepost', post))
-        elif x[2][0] == 'rangepre':
-            return _fixops(('range', post, x[2][1]))
-        elif x[2][0] == 'rangeall':
-            return _fixops(('rangepost', post))
-    elif op == 'or':
+        post = (b'parentpost', x[1])
+        if x[2][0] == b'dagrangepre':
+            return _fixops((b'dagrange', post, x[2][1]))
+        elif x[2][0] == b'dagrangeall':
+            return _fixops((b'dagrangepost', post))
+        elif x[2][0] == b'rangepre':
+            return _fixops((b'range', post, x[2][1]))
+        elif x[2][0] == b'rangeall':
+            return _fixops((b'rangepost', post))
+    elif op == b'or':
         # make number of arguments deterministic:
         # x + y + z -> (or x y z) -> (or (list x y z))
-        return (op, _fixops(('list',) + x[1:]))
-    elif op == 'subscript' and x[1][0] == 'relation':
+        return (op, _fixops((b'list',) + x[1:]))
+    elif op == b'subscript' and x[1][0] == b'relation':
         # x#y[z] ternary
-        return _fixops(('relsubscript', x[1][1], x[1][2], x[2]))
+        return _fixops((b'relsubscript', x[1][1], x[1][2], x[2]))
 
     return (op,) + tuple(_fixops(y) for y in x[1:])
 
+
 def _analyze(x):
     if x is None:
         return x
 
     op = x[0]
-    if op == 'minus':
-        return _analyze(_build('_ and not _', *x[1:]))
-    elif op == 'only':
-        return _analyze(_build('only(_, _)', *x[1:]))
-    elif op == 'onlypost':
-        return _analyze(_build('only(_)', x[1]))
-    elif op == 'dagrangeall':
-        raise error.ParseError(_("can't use '::' in this context"))
-    elif op == 'dagrangepre':
-        return _analyze(_build('ancestors(_)', x[1]))
-    elif op == 'dagrangepost':
-        return _analyze(_build('descendants(_)', x[1]))
-    elif op == 'negate':
-        s = getstring(x[1], _("can't negate that"))
-        return _analyze(('string', '-' + s))
-    elif op in ('string', 'symbol', 'smartset'):
+    if op == b'minus':
+        return _analyze(_build(b'_ and not _', *x[1:]))
+    elif op == b'only':
+        return _analyze(_build(b'only(_, _)', *x[1:]))
+    elif op == b'onlypost':
+        return _analyze(_build(b'only(_)', x[1]))
+    elif op == b'dagrangeall':
+        raise error.ParseError(_(b"can't use '::' in this context"))
+    elif op == b'dagrangepre':
+        return _analyze(_build(b'ancestors(_)', x[1]))
+    elif op == b'dagrangepost':
+        return _analyze(_build(b'descendants(_)', x[1]))
+    elif op == b'negate':
+        s = getstring(x[1], _(b"can't negate that"))
+        return _analyze((b'string', b'-' + s))
+    elif op in (b'string', b'symbol', b'smartset'):
         return x
-    elif op == 'rangeall':
+    elif op == b'rangeall':
         return (op, None)
-    elif op in {'or', 'not', 'rangepre', 'rangepost', 'parentpost'}:
+    elif op in {b'or', b'not', b'rangepre', b'rangepost', b'parentpost'}:
         return (op, _analyze(x[1]))
-    elif op == 'group':
+    elif op == b'group':
         return _analyze(x[1])
-    elif op in {'and', 'dagrange', 'range', 'parent', 'ancestor', 'relation',
-                'subscript'}:
+    elif op in {
+        b'and',
+        b'dagrange',
+        b'range',
+        b'parent',
+        b'ancestor',
+        b'relation',
+        b'subscript',
+    }:
         ta = _analyze(x[1])
         tb = _analyze(x[2])
         return (op, ta, tb)
-    elif op == 'relsubscript':
+    elif op == b'relsubscript':
         ta = _analyze(x[1])
         tb = _analyze(x[2])
         tc = _analyze(x[3])
         return (op, ta, tb, tc)
-    elif op == 'list':
+    elif op == b'list':
         return (op,) + tuple(_analyze(y) for y in x[1:])
-    elif op == 'keyvalue':
+    elif op == b'keyvalue':
         return (op, x[1], _analyze(x[2]))
-    elif op == 'func':
+    elif op == b'func':
         return (op, x[1], _analyze(x[2]))
-    raise ValueError('invalid operator %r' % op)
+    raise ValueError(b'invalid operator %r' % op)
+
 
 def analyze(x):
     """Transform raw parsed tree to evaluatable tree which can be fed to
@@ -380,53 +437,56 @@
     """
     return _analyze(x)
 
+
 def _optimize(x):
     if x is None:
         return 0, x
 
     op = x[0]
-    if op in ('string', 'symbol', 'smartset'):
-        return 0.5, x # single revisions are small
-    elif op == 'and':
+    if op in (b'string', b'symbol', b'smartset'):
+        return 0.5, x  # single revisions are small
+    elif op == b'and':
         wa, ta = _optimize(x[1])
         wb, tb = _optimize(x[2])
         w = min(wa, wb)
 
         # (draft/secret/_notpublic() & ::x) have a fast path
-        m = _match('_() & ancestors(_)', ('and', ta, tb))
-        if m and getsymbol(m[1]) in {'draft', 'secret', '_notpublic'}:
-            return w, _build('_phaseandancestors(_, _)', m[1], m[2])
+        m = _match(b'_() & ancestors(_)', (b'and', ta, tb))
+        if m and getsymbol(m[1]) in {b'draft', b'secret', b'_notpublic'}:
+            return w, _build(b'_phaseandancestors(_, _)', m[1], m[2])
 
         # (::x and not ::y)/(not ::y and ::x) have a fast path
         m = _matchonly(ta, tb) or _matchonly(tb, ta)
         if m:
-            return w, _build('only(_, _)', *m[1:])
+            return w, _build(b'only(_, _)', *m[1:])
 
-        m = _match('not _', tb)
+        m = _match(b'not _', tb)
         if m:
-            return wa, ('difference', ta, m[1])
+            return wa, (b'difference', ta, m[1])
         if wa > wb:
-            op = 'andsmally'
+            op = b'andsmally'
         return w, (op, ta, tb)
-    elif op == 'or':
+    elif op == b'or':
         # fast path for machine-generated expression, that is likely to have
         # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
         ws, ts, ss = [], [], []
+
         def flushss():
             if not ss:
                 return
             if len(ss) == 1:
                 w, t = ss[0]
             else:
-                s = '\0'.join(t[1] for w, t in ss)
-                y = _build('_list(_)', ('string', s))
+                s = b'\0'.join(t[1] for w, t in ss)
+                y = _build(b'_list(_)', (b'string', s))
                 w, t = _optimize(y)
             ws.append(w)
             ts.append(t)
             del ss[:]
+
         for y in getlist(x[1]):
             w, t = _optimize(y)
-            if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
+            if t is not None and (t[0] == b'string' or t[0] == b'symbol'):
                 ss.append((w, t))
                 continue
             flushss()
@@ -434,49 +494,50 @@
             ts.append(t)
         flushss()
         if len(ts) == 1:
-            return ws[0], ts[0] # 'or' operation is fully optimized out
-        return max(ws), (op, ('list',) + tuple(ts))
-    elif op == 'not':
+            return ws[0], ts[0]  # 'or' operation is fully optimized out
+        return max(ws), (op, (b'list',) + tuple(ts))
+    elif op == b'not':
         # Optimize not public() to _notpublic() because we have a fast version
-        if _match('public()', x[1]):
-            o = _optimize(_build('_notpublic()'))
+        if _match(b'public()', x[1]):
+            o = _optimize(_build(b'_notpublic()'))
             return o[0], o[1]
         else:
             o = _optimize(x[1])
             return o[0], (op, o[1])
-    elif op == 'rangeall':
+    elif op == b'rangeall':
         return 1, x
-    elif op in ('rangepre', 'rangepost', 'parentpost'):
+    elif op in (b'rangepre', b'rangepost', b'parentpost'):
         o = _optimize(x[1])
         return o[0], (op, o[1])
-    elif op in ('dagrange', 'range'):
+    elif op in (b'dagrange', b'range'):
         wa, ta = _optimize(x[1])
         wb, tb = _optimize(x[2])
         return wa + wb, (op, ta, tb)
-    elif op in ('parent', 'ancestor', 'relation', 'subscript'):
+    elif op in (b'parent', b'ancestor', b'relation', b'subscript'):
         w, t = _optimize(x[1])
         return w, (op, t, x[2])
-    elif op == 'relsubscript':
+    elif op == b'relsubscript':
         w, t = _optimize(x[1])
         return w, (op, t, x[2], x[3])
-    elif op == 'list':
+    elif op == b'list':
         ws, ts = zip(*(_optimize(y) for y in x[1:]))
         return sum(ws), (op,) + ts
-    elif op == 'keyvalue':
+    elif op == b'keyvalue':
         w, t = _optimize(x[2])
         return w, (op, x[1], t)
-    elif op == 'func':
+    elif op == b'func':
         f = getsymbol(x[1])
         wa, ta = _optimize(x[2])
         w = getattr(symbols.get(f), '_weight', 1)
-        m = _match('commonancestors(_)', ta)
+        m = _match(b'commonancestors(_)', ta)
 
         # Optimize heads(commonancestors(_)) because we have a fast version
-        if f == 'heads' and m:
-            return w + wa, _build('_commonancestorheads(_)', m[1])
+        if f == b'heads' and m:
+            return w + wa, _build(b'_commonancestorheads(_)', m[1])
 
         return w + wa, (op, x[1], ta)
-    raise ValueError('invalid operator %r' % op)
+    raise ValueError(b'invalid operator %r' % op)
+
 
 def optimize(tree):
     """Optimize evaluatable tree
@@ -486,9 +547,11 @@
     _weight, newtree = _optimize(tree)
     return newtree
 
+
 # the set of valid characters for the initial letter of symbols in
 # alias declarations and definitions
-_aliassyminitletters = _syminitletters | {'$'}
+_aliassyminitletters = _syminitletters | {b'$'}
+
 
 def _parsewith(spec, lookup=None, syminitletters=None):
     """Generate a parse tree of given spec with given tokenizing options
@@ -504,18 +567,21 @@
       ...
     ParseError: ('invalid token', 4)
     """
-    if lookup and spec.startswith('revset(') and spec.endswith(')'):
+    if lookup and spec.startswith(b'revset(') and spec.endswith(b')'):
         lookup = None
     p = parser.parser(elements)
-    tree, pos = p.parse(tokenize(spec, lookup=lookup,
-                                 syminitletters=syminitletters))
+    tree, pos = p.parse(
+        tokenize(spec, lookup=lookup, syminitletters=syminitletters)
+    )
     if pos != len(spec):
-        raise error.ParseError(_('invalid token'), pos)
-    return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
+        raise error.ParseError(_(b'invalid token'), pos)
+    return _fixops(parser.simplifyinfixops(tree, (b'list', b'or')))
+
 
 class _aliasrules(parser.basealiasrules):
     """Parsing and expansion rule set of revset aliases"""
-    _section = _('revset alias')
+
+    _section = _(b'revset alias')
 
     @staticmethod
     def _parse(spec):
@@ -529,43 +595,49 @@
 
     @staticmethod
     def _trygetfunc(tree):
-        if tree[0] == 'func' and tree[1][0] == 'symbol':
+        if tree[0] == b'func' and tree[1][0] == b'symbol':
             return tree[1][1], getlist(tree[2])
 
+
 def expandaliases(tree, aliases, warn=None):
     """Expand aliases in a tree, aliases is a list of (name, value) tuples"""
     aliases = _aliasrules.buildmap(aliases)
     tree = _aliasrules.expand(aliases, tree)
     # warn about problematic (but not referred) aliases
     if warn is not None:
-        for name, alias in sorted(aliases.iteritems()):
+        for name, alias in sorted(pycompat.iteritems(aliases)):
             if alias.error and not alias.warned:
-                warn(_('warning: %s\n') % (alias.error))
+                warn(_(b'warning: %s\n') % (alias.error))
                 alias.warned = True
     return tree
 
+
 def foldconcat(tree):
     """Fold elements to be concatenated by `##`
     """
-    if (not isinstance(tree, tuple)
-        or tree[0] in ('string', 'symbol', 'smartset')):
+    if not isinstance(tree, tuple) or tree[0] in (
+        b'string',
+        b'symbol',
+        b'smartset',
+    ):
         return tree
-    if tree[0] == '_concat':
+    if tree[0] == b'_concat':
         pending = [tree]
         l = []
         while pending:
             e = pending.pop()
-            if e[0] == '_concat':
+            if e[0] == b'_concat':
                 pending.extend(reversed(e[1:]))
-            elif e[0] in ('string', 'symbol'):
+            elif e[0] in (b'string', b'symbol'):
                 l.append(e[1])
             else:
-                msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
+                msg = _(b"\"##\" can't concatenate \"%s\" element") % (e[0])
                 raise error.ParseError(msg)
-        return ('string', ''.join(l))
+        return (b'string', b''.join(l))
     else:
         return tuple(foldconcat(t) for t in tree)
 
+
 def parse(spec, lookup=None):
     try:
         return _parsewith(spec, lookup=lookup)
@@ -573,14 +645,15 @@
         if len(inst.args) > 1:  # has location
             loc = inst.args[1]
             # Remove newlines -- spaces are equivalent whitespace.
-            spec = spec.replace('\n', ' ')
+            spec = spec.replace(b'\n', b' ')
             # We want the caret to point to the place in the template that
             # failed to parse, but in a hint we get a open paren at the
             # start. Therefore, we print "loc + 1" spaces (instead of "loc")
             # to line up the caret with the location of the error.
-            inst.hint = spec + '\n' + ' ' * (loc + 1) + '^ ' + _('here')
+            inst.hint = spec + b'\n' + b' ' * (loc + 1) + b'^ ' + _(b'here')
         raise
 
+
 def _quote(s):
     r"""Quote a value in order to make it safe for the revset engine.
 
@@ -593,67 +666,73 @@
     >>> _quote(1)
     "'1'"
     """
-    return "'%s'" % stringutil.escapestr(pycompat.bytestr(s))
+    return b"'%s'" % stringutil.escapestr(pycompat.bytestr(s))
+
 
 def _formatargtype(c, arg):
-    if c == 'd':
-        return '_rev(%d)' % int(arg)
-    elif c == 's':
+    if c == b'd':
+        return b'_rev(%d)' % int(arg)
+    elif c == b's':
         return _quote(arg)
-    elif c == 'r':
+    elif c == b'r':
         if not isinstance(arg, bytes):
             raise TypeError
-        parse(arg) # make sure syntax errors are confined
-        return '(%s)' % arg
-    elif c == 'n':
+        parse(arg)  # make sure syntax errors are confined
+        return b'(%s)' % arg
+    elif c == b'n':
         return _quote(node.hex(arg))
-    elif c == 'b':
+    elif c == b'b':
         try:
             return _quote(arg.branch())
         except AttributeError:
             raise TypeError
-    raise error.ParseError(_('unexpected revspec format character %s') % c)
+    raise error.ParseError(_(b'unexpected revspec format character %s') % c)
+
 
 def _formatlistexp(s, t):
     l = len(s)
     if l == 0:
-        return "_list('')"
+        return b"_list('')"
     elif l == 1:
         return _formatargtype(t, s[0])
-    elif t == 'd':
+    elif t == b'd':
         return _formatintlist(s)
-    elif t == 's':
-        return "_list(%s)" % _quote("\0".join(s))
-    elif t == 'n':
-        return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
-    elif t == 'b':
+    elif t == b's':
+        return b"_list(%s)" % _quote(b"\0".join(s))
+    elif t == b'n':
+        return b"_hexlist('%s')" % b"\0".join(node.hex(a) for a in s)
+    elif t == b'b':
         try:
-            return "_list('%s')" % "\0".join(a.branch() for a in s)
+            return b"_list('%s')" % b"\0".join(a.branch() for a in s)
         except AttributeError:
             raise TypeError
 
     m = l // 2
-    return '(%s or %s)' % (_formatlistexp(s[:m], t), _formatlistexp(s[m:], t))
+    return b'(%s or %s)' % (_formatlistexp(s[:m], t), _formatlistexp(s[m:], t))
+
 
 def _formatintlist(data):
     try:
         l = len(data)
         if l == 0:
-            return "_list('')"
+            return b"_list('')"
         elif l == 1:
-            return _formatargtype('d', data[0])
-        return "_intlist('%s')" % "\0".join('%d' % int(a) for a in data)
+            return _formatargtype(b'd', data[0])
+        return b"_intlist('%s')" % b"\0".join(b'%d' % int(a) for a in data)
     except (TypeError, ValueError):
-        raise error.ParseError(_('invalid argument for revspec'))
+        raise error.ParseError(_(b'invalid argument for revspec'))
+
 
 def _formatparamexp(args, t):
-    return ', '.join(_formatargtype(t, a) for a in args)
+    return b', '.join(_formatargtype(t, a) for a in args)
+
 
 _formatlistfuncs = {
-    'l': _formatlistexp,
-    'p': _formatparamexp,
+    b'l': _formatlistexp,
+    b'p': _formatparamexp,
 }
 
+
 def formatspec(expr, *args):
     '''
     This is a convenience function for using revsets internally, and
@@ -696,14 +775,15 @@
     for t, arg in parsed:
         if t is None:
             ret.append(arg)
-        elif t == 'baseset':
+        elif t == b'baseset':
             if isinstance(arg, set):
                 arg = sorted(arg)
             ret.append(_formatintlist(list(arg)))
         else:
-            raise error.ProgrammingError("unknown revspec item type: %r" % t)
+            raise error.ProgrammingError(b"unknown revspec item type: %r" % t)
     return b''.join(ret)
 
+
 def spectree(expr, *args):
     """similar to formatspec but return a parsed and optimized tree"""
     parsed = _parseargs(expr, args)
@@ -712,20 +792,21 @@
     for t, arg in parsed:
         if t is None:
             ret.append(arg)
-        elif t == 'baseset':
-            newtree = ('smartset', smartset.baseset(arg))
+        elif t == b'baseset':
+            newtree = (b'smartset', smartset.baseset(arg))
             inputs.append(newtree)
-            ret.append("$")
+            ret.append(b"$")
         else:
-            raise error.ProgrammingError("unknown revspec item type: %r" % t)
+            raise error.ProgrammingError(b"unknown revspec item type: %r" % t)
     expr = b''.join(ret)
     tree = _parsewith(expr, syminitletters=_aliassyminitletters)
-    tree = parser.buildtree(tree, ('symbol', '$'), *inputs)
+    tree = parser.buildtree(tree, (b'symbol', b'$'), *inputs)
     tree = foldconcat(tree)
     tree = analyze(tree)
     tree = optimize(tree)
     return tree
 
+
 def _parseargs(expr, args):
     """parse the expression and replace all inexpensive args
 
@@ -740,7 +821,7 @@
     ret = []
     pos = 0
     while pos < len(expr):
-        q = expr.find('%', pos)
+        q = expr.find(b'%', pos)
         if q < 0:
             ret.append((None, expr[pos:]))
             break
@@ -749,8 +830,8 @@
         try:
             d = expr[pos]
         except IndexError:
-            raise error.ParseError(_('incomplete revspec format character'))
-        if d == '%':
+            raise error.ParseError(_(b'incomplete revspec format character'))
+        if d == b'%':
             ret.append((None, d))
             pos += 1
             continue
@@ -758,44 +839,48 @@
         try:
             arg = next(argiter)
         except StopIteration:
-            raise error.ParseError(_('missing argument for revspec'))
+            raise error.ParseError(_(b'missing argument for revspec'))
         f = _formatlistfuncs.get(d)
         if f:
             # a list of some type, might be expensive, do not replace
             pos += 1
-            islist = (d == 'l')
+            islist = d == b'l'
             try:
                 d = expr[pos]
             except IndexError:
-                raise error.ParseError(_('incomplete revspec format character'))
-            if islist and d == 'd' and arg:
+                raise error.ParseError(
+                    _(b'incomplete revspec format character')
+                )
+            if islist and d == b'd' and arg:
                 # we don't create a baseset yet, because it come with an
                 # extra cost. If we are going to serialize it we better
                 # skip it.
-                ret.append(('baseset', arg))
+                ret.append((b'baseset', arg))
                 pos += 1
                 continue
             try:
                 ret.append((None, f(list(arg), d)))
             except (TypeError, ValueError):
-                raise error.ParseError(_('invalid argument for revspec'))
+                raise error.ParseError(_(b'invalid argument for revspec'))
         else:
             # a single entry, not expensive, replace
             try:
                 ret.append((None, _formatargtype(d, arg)))
             except (TypeError, ValueError):
-                raise error.ParseError(_('invalid argument for revspec'))
+                raise error.ParseError(_(b'invalid argument for revspec'))
         pos += 1
 
     try:
         next(argiter)
-        raise error.ParseError(_('too many revspec arguments specified'))
+        raise error.ParseError(_(b'too many revspec arguments specified'))
     except StopIteration:
         pass
     return ret
 
+
 def prettyformat(tree):
-    return parser.prettyformat(tree, ('string', 'symbol'))
+    return parser.prettyformat(tree, (b'string', b'symbol'))
+
 
 def depth(tree):
     if isinstance(tree, tuple):
@@ -803,23 +888,27 @@
     else:
         return 0
 
+
 def funcsused(tree):
-    if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
+    if not isinstance(tree, tuple) or tree[0] in (b'string', b'symbol'):
         return set()
     else:
         funcs = set()
         for s in tree[1:]:
             funcs |= funcsused(s)
-        if tree[0] == 'func':
+        if tree[0] == b'func':
             funcs.add(tree[1][1])
         return funcs
 
-_hashre = util.re.compile('[0-9a-fA-F]{1,40}$')
+
+_hashre = util.re.compile(b'[0-9a-fA-F]{1,40}$')
+
 
 def _ishashlikesymbol(symbol):
     """returns true if the symbol looks like a hash"""
     return _hashre.match(symbol)
 
+
 def gethashlikesymbols(tree):
     """returns the list of symbols of the tree that look like hashes
 
@@ -835,7 +924,7 @@
     if not tree:
         return []
 
-    if tree[0] == "symbol":
+    if tree[0] == b"symbol":
         if _ishashlikesymbol(tree[1]):
             return [tree[1]]
     elif len(tree) >= 3:
--- a/mercurial/rewriteutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/rewriteutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,29 +16,31 @@
     revset,
 )
 
-def precheck(repo, revs, action='rewrite'):
+
+def precheck(repo, revs, action=b'rewrite'):
     """check if revs can be rewritten
     action is used to control the error message.
 
     Make sure this function is called after taking the lock.
     """
     if node.nullrev in revs:
-        msg = _("cannot %s null changeset") % (action)
-        hint = _("no changeset checked out")
+        msg = _(b"cannot %s null changeset") % action
+        hint = _(b"no changeset checked out")
         raise error.Abort(msg, hint=hint)
 
     if len(repo[None].parents()) > 1:
-        raise error.Abort(_("cannot %s while merging") % action)
+        raise error.Abort(_(b"cannot %s while merging") % action)
 
-    publicrevs = repo.revs('%ld and public()', revs)
+    publicrevs = repo.revs(b'%ld and public()', revs)
     if publicrevs:
-        msg = _("cannot %s public changesets") % (action)
-        hint = _("see 'hg help phases' for details")
+        msg = _(b"cannot %s public changesets") % action
+        hint = _(b"see 'hg help phases' for details")
         raise error.Abort(msg, hint=hint)
 
     newunstable = disallowednewunstable(repo, revs)
     if newunstable:
-        raise error.Abort(_("cannot %s changeset with children") % action)
+        raise error.Abort(_(b"cannot %s changeset with children") % action)
+
 
 def disallowednewunstable(repo, revs):
     """Checks whether editing the revs will create new unstable changesets and
@@ -50,4 +52,4 @@
     allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
     if allowunstable:
         return revset.baseset()
-    return repo.revs("(%ld::) - %ld", revs, revs)
+    return repo.revs(b"(%ld::) - %ld", revs, revs)
--- a/mercurial/scmposix.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/scmposix.py	Mon Oct 21 11:09:48 2019 -0400
@@ -6,6 +6,7 @@
 import os
 import sys
 
+from .pycompat import getattr
 from . import (
     encoding,
     pycompat,
@@ -16,49 +17,60 @@
 # $MORE variable, but there's no compatible option with Linux 'more'. Given
 # OS X is widely used and most modern Unix systems would have 'less', setting
 # 'less' as the default seems reasonable.
-fallbackpager = 'less'
+fallbackpager = b'less'
+
 
 def _rcfiles(path):
-    rcs = [os.path.join(path, 'hgrc')]
-    rcdir = os.path.join(path, 'hgrc.d')
+    rcs = [os.path.join(path, b'hgrc')]
+    rcdir = os.path.join(path, b'hgrc.d')
     try:
-        rcs.extend([os.path.join(rcdir, f)
-                    for f, kind in util.listdir(rcdir)
-                    if f.endswith(".rc")])
+        rcs.extend(
+            [
+                os.path.join(rcdir, f)
+                for f, kind in util.listdir(rcdir)
+                if f.endswith(b".rc")
+            ]
+        )
     except OSError:
         pass
     return rcs
 
+
 def systemrcpath():
     path = []
-    if pycompat.sysplatform == 'plan9':
-        root = 'lib/mercurial'
+    if pycompat.sysplatform == b'plan9':
+        root = b'lib/mercurial'
     else:
-        root = 'etc/mercurial'
+        root = b'etc/mercurial'
     # old mod_python does not set sys.argv
     if len(getattr(sys, 'argv', [])) > 0:
         p = os.path.dirname(os.path.dirname(pycompat.sysargv[0]))
-        if p != '/':
+        if p != b'/':
             path.extend(_rcfiles(os.path.join(p, root)))
-    path.extend(_rcfiles('/' + root))
+    path.extend(_rcfiles(b'/' + root))
     return path
 
+
 def userrcpath():
-    if pycompat.sysplatform == 'plan9':
-        return [encoding.environ['home'] + '/lib/hgrc']
+    if pycompat.sysplatform == b'plan9':
+        return [encoding.environ[b'home'] + b'/lib/hgrc']
     elif pycompat.isdarwin:
-        return [os.path.expanduser('~/.hgrc')]
+        return [os.path.expanduser(b'~/.hgrc')]
     else:
-        confighome = encoding.environ.get('XDG_CONFIG_HOME')
+        confighome = encoding.environ.get(b'XDG_CONFIG_HOME')
         if confighome is None or not os.path.isabs(confighome):
-            confighome = os.path.expanduser('~/.config')
+            confighome = os.path.expanduser(b'~/.config')
 
-        return [os.path.expanduser('~/.hgrc'),
-                os.path.join(confighome, 'hg', 'hgrc')]
+        return [
+            os.path.expanduser(b'~/.hgrc'),
+            os.path.join(confighome, b'hg', b'hgrc'),
+        ]
+
 
 def termsize(ui):
     try:
         import termios
+
         TIOCGWINSZ = termios.TIOCGWINSZ  # unavailable on IRIX (issue3449)
     except (AttributeError, ImportError):
         return 80, 24
@@ -71,7 +83,7 @@
                 continue
             if not os.isatty(fd):
                 continue
-            arri = fcntl.ioctl(fd, TIOCGWINSZ, '\0' * 8)
+            arri = fcntl.ioctl(fd, TIOCGWINSZ, b'\0' * 8)
             height, width = array.array(r'h', arri)[:2]
             if width > 0 and height > 0:
                 return width, height
--- a/mercurial/scmutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/scmutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -26,6 +26,7 @@
     wdirid,
     wdirrev,
 )
+from .pycompat import getattr
 
 from . import (
     copies as copiesmod,
@@ -60,6 +61,7 @@
 
 termsize = scmplatform.termsize
 
+
 class status(tuple):
     '''Named tuple with a list of files per status. The 'deleted', 'unknown'
        and 'ignored' properties are only relevant to the working copy.
@@ -67,10 +69,12 @@
 
     __slots__ = ()
 
-    def __new__(cls, modified, added, removed, deleted, unknown, ignored,
-                clean):
-        return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
-                                   ignored, clean))
+    def __new__(
+        cls, modified, added, removed, deleted, unknown, ignored, clean
+    ):
+        return tuple.__new__(
+            cls, (modified, added, removed, deleted, unknown, ignored, clean)
+        )
 
     @property
     def modified(self):
@@ -110,9 +114,11 @@
         return self[6]
 
     def __repr__(self, *args, **kwargs):
-        return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
-                 r'unknown=%s, ignored=%s, clean=%s>') %
-                tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
+        return (
+            r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
+            r'unknown=%s, ignored=%s, clean=%s>'
+        ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
+
 
 def itersubrepos(ctx1, ctx2):
     """find subrepos in ctx1 or ctx2"""
@@ -129,7 +135,7 @@
             del subpaths[subpath]
             missing.add(subpath)
 
-    for subpath, ctx in sorted(subpaths.iteritems()):
+    for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
         yield subpath, ctx.sub(subpath)
 
     # Yield an empty subrepo based on ctx1 for anything only in ctx2.  That way,
@@ -139,6 +145,7 @@
     for subpath in missing:
         yield subpath, ctx2.nullsub(subpath, ctx1)
 
+
 def nochangesfound(ui, repo, excluded=None):
     '''Report no changes for push/pull, excluded is None or a list of
     nodes excluded from the push/pull.
@@ -151,10 +158,13 @@
                 secretlist.append(n)
 
     if secretlist:
-        ui.status(_("no changes found (ignored %d secret changesets)\n")
-                  % len(secretlist))
+        ui.status(
+            _(b"no changes found (ignored %d secret changesets)\n")
+            % len(secretlist)
+        )
     else:
-        ui.status(_("no changes found\n"))
+        ui.status(_(b"no changes found\n"))
+
 
 def callcatch(ui, func):
     """call func() with global exception handling
@@ -165,79 +175,86 @@
     try:
         try:
             return func()
-        except: # re-raises
+        except:  # re-raises
             ui.traceback()
             raise
     # Global exception handling, alphabetically
     # Mercurial-specific first, followed by built-in and library exceptions
     except error.LockHeld as inst:
         if inst.errno == errno.ETIMEDOUT:
-            reason = _('timed out waiting for lock held by %r') % (
-                pycompat.bytestr(inst.locker))
+            reason = _(b'timed out waiting for lock held by %r') % (
+                pycompat.bytestr(inst.locker)
+            )
         else:
-            reason = _('lock held by %r') % inst.locker
-        ui.error(_("abort: %s: %s\n") % (
-            inst.desc or stringutil.forcebytestr(inst.filename), reason))
+            reason = _(b'lock held by %r') % inst.locker
+        ui.error(
+            _(b"abort: %s: %s\n")
+            % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
+        )
         if not inst.locker:
-            ui.error(_("(lock might be very busy)\n"))
+            ui.error(_(b"(lock might be very busy)\n"))
     except error.LockUnavailable as inst:
-        ui.error(_("abort: could not lock %s: %s\n") %
-                 (inst.desc or stringutil.forcebytestr(inst.filename),
-                  encoding.strtolocal(inst.strerror)))
+        ui.error(
+            _(b"abort: could not lock %s: %s\n")
+            % (
+                inst.desc or stringutil.forcebytestr(inst.filename),
+                encoding.strtolocal(inst.strerror),
+            )
+        )
     except error.OutOfBandError as inst:
         if inst.args:
-            msg = _("abort: remote error:\n")
+            msg = _(b"abort: remote error:\n")
         else:
-            msg = _("abort: remote error\n")
+            msg = _(b"abort: remote error\n")
         ui.error(msg)
         if inst.args:
-            ui.error(''.join(inst.args))
+            ui.error(b''.join(inst.args))
         if inst.hint:
-            ui.error('(%s)\n' % inst.hint)
+            ui.error(b'(%s)\n' % inst.hint)
     except error.RepoError as inst:
-        ui.error(_("abort: %s!\n") % inst)
+        ui.error(_(b"abort: %s!\n") % inst)
         if inst.hint:
-            ui.error(_("(%s)\n") % inst.hint)
+            ui.error(_(b"(%s)\n") % inst.hint)
     except error.ResponseError as inst:
-        ui.error(_("abort: %s") % inst.args[0])
+        ui.error(_(b"abort: %s") % inst.args[0])
         msg = inst.args[1]
         if isinstance(msg, type(u'')):
             msg = pycompat.sysbytes(msg)
         if not isinstance(msg, bytes):
-            ui.error(" %r\n" % (msg,))
+            ui.error(b" %r\n" % (msg,))
         elif not msg:
-            ui.error(_(" empty string\n"))
+            ui.error(_(b" empty string\n"))
         else:
-            ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
+            ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
     except error.CensoredNodeError as inst:
-        ui.error(_("abort: file censored %s!\n") % inst)
+        ui.error(_(b"abort: file censored %s!\n") % inst)
     except error.StorageError as inst:
-        ui.error(_("abort: %s!\n") % inst)
+        ui.error(_(b"abort: %s!\n") % inst)
         if inst.hint:
-            ui.error(_("(%s)\n") % inst.hint)
+            ui.error(_(b"(%s)\n") % inst.hint)
     except error.InterventionRequired as inst:
-        ui.error("%s\n" % inst)
+        ui.error(b"%s\n" % inst)
         if inst.hint:
-            ui.error(_("(%s)\n") % inst.hint)
+            ui.error(_(b"(%s)\n") % inst.hint)
         return 1
     except error.WdirUnsupported:
-        ui.error(_("abort: working directory revision cannot be specified\n"))
+        ui.error(_(b"abort: working directory revision cannot be specified\n"))
     except error.Abort as inst:
-        ui.error(_("abort: %s\n") % inst)
+        ui.error(_(b"abort: %s\n") % inst)
         if inst.hint:
-            ui.error(_("(%s)\n") % inst.hint)
+            ui.error(_(b"(%s)\n") % inst.hint)
     except ImportError as inst:
-        ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
+        ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
         m = stringutil.forcebytestr(inst).split()[-1]
-        if m in "mpatch bdiff".split():
-            ui.error(_("(did you forget to compile extensions?)\n"))
-        elif m in "zlib".split():
-            ui.error(_("(is your Python install correct?)\n"))
+        if m in b"mpatch bdiff".split():
+            ui.error(_(b"(did you forget to compile extensions?)\n"))
+        elif m in b"zlib".split():
+            ui.error(_(b"(is your Python install correct?)\n"))
     except (IOError, OSError) as inst:
-        if util.safehasattr(inst, "code"): # HTTPError
-            ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
-        elif util.safehasattr(inst, "reason"): # URLError or SSLError
-            try: # usually it is in the form (errno, strerror)
+        if util.safehasattr(inst, b"code"):  # HTTPError
+            ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
+        elif util.safehasattr(inst, b"reason"):  # URLError or SSLError
+            try:  # usually it is in the form (errno, strerror)
                 reason = inst.reason.args[1]
             except (AttributeError, IndexError):
                 # it might be anything, for example a string
@@ -245,21 +262,28 @@
             if isinstance(reason, pycompat.unicode):
                 # SSLError of Python 2.7.9 contains a unicode
                 reason = encoding.unitolocal(reason)
-            ui.error(_("abort: error: %s\n") % reason)
-        elif (util.safehasattr(inst, "args")
-              and inst.args and inst.args[0] == errno.EPIPE):
+            ui.error(_(b"abort: error: %s\n") % reason)
+        elif (
+            util.safehasattr(inst, b"args")
+            and inst.args
+            and inst.args[0] == errno.EPIPE
+        ):
             pass
-        elif getattr(inst, "strerror", None): # common IOError or OSError
+        elif getattr(inst, "strerror", None):  # common IOError or OSError
             if getattr(inst, "filename", None) is not None:
-                ui.error(_("abort: %s: '%s'\n") % (
-                    encoding.strtolocal(inst.strerror),
-                    stringutil.forcebytestr(inst.filename)))
+                ui.error(
+                    _(b"abort: %s: '%s'\n")
+                    % (
+                        encoding.strtolocal(inst.strerror),
+                        stringutil.forcebytestr(inst.filename),
+                    )
+                )
             else:
-                ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
-        else: # suspicious IOError
+                ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
+        else:  # suspicious IOError
             raise
     except MemoryError:
-        ui.error(_("abort: out of memory\n"))
+        ui.error(_(b"abort: out of memory\n"))
     except SystemExit as inst:
         # Commands shouldn't sys.exit directly, but give a return code.
         # Just in case catch this and and pass exit code to caller.
@@ -267,28 +291,34 @@
 
     return -1
 
+
 def checknewlabel(repo, lbl, kind):
     # Do not use the "kind" parameter in ui output.
     # It makes strings difficult to translate.
-    if lbl in ['tip', '.', 'null']:
-        raise error.Abort(_("the name '%s' is reserved") % lbl)
-    for c in (':', '\0', '\n', '\r'):
+    if lbl in [b'tip', b'.', b'null']:
+        raise error.Abort(_(b"the name '%s' is reserved") % lbl)
+    for c in (b':', b'\0', b'\n', b'\r'):
         if c in lbl:
             raise error.Abort(
-                _("%r cannot be used in a name") % pycompat.bytestr(c))
+                _(b"%r cannot be used in a name") % pycompat.bytestr(c)
+            )
     try:
         int(lbl)
-        raise error.Abort(_("cannot use an integer as a name"))
+        raise error.Abort(_(b"cannot use an integer as a name"))
     except ValueError:
         pass
     if lbl.strip() != lbl:
-        raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
+        raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
+
 
 def checkfilename(f):
     '''Check that the filename f is an acceptable filename for a tracked file'''
-    if '\r' in f or '\n' in f:
-        raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
-                          % pycompat.bytestr(f))
+    if b'\r' in f or b'\n' in f:
+        raise error.Abort(
+            _(b"'\\n' and '\\r' disallowed in filenames: %r")
+            % pycompat.bytestr(f)
+        )
+
 
 def checkportable(ui, f):
     '''Check if filename f is portable and warn or abort depending on config'''
@@ -297,30 +327,33 @@
     if abort or warn:
         msg = util.checkwinfilename(f)
         if msg:
-            msg = "%s: %s" % (msg, procutil.shellquote(f))
+            msg = b"%s: %s" % (msg, procutil.shellquote(f))
             if abort:
                 raise error.Abort(msg)
-            ui.warn(_("warning: %s\n") % msg)
+            ui.warn(_(b"warning: %s\n") % msg)
+
 
 def checkportabilityalert(ui):
     '''check if the user's config requests nothing, a warning, or abort for
     non-portable filenames'''
-    val = ui.config('ui', 'portablefilenames')
+    val = ui.config(b'ui', b'portablefilenames')
     lval = val.lower()
     bval = stringutil.parsebool(val)
-    abort = pycompat.iswindows or lval == 'abort'
-    warn = bval or lval == 'warn'
-    if bval is None and not (warn or abort or lval == 'ignore'):
+    abort = pycompat.iswindows or lval == b'abort'
+    warn = bval or lval == b'warn'
+    if bval is None and not (warn or abort or lval == b'ignore'):
         raise error.ConfigError(
-            _("ui.portablefilenames value is invalid ('%s')") % val)
+            _(b"ui.portablefilenames value is invalid ('%s')") % val
+        )
     return abort, warn
 
+
 class casecollisionauditor(object):
     def __init__(self, ui, abort, dirstate):
         self._ui = ui
         self._abort = abort
-        allfiles = '\0'.join(dirstate._map)
-        self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
+        allfiles = b'\0'.join(dirstate)
+        self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
         self._dirstate = dirstate
         # The purpose of _newfiles is so that we don't complain about
         # case collisions if someone were to call this object with the
@@ -332,13 +365,14 @@
             return
         fl = encoding.lower(f)
         if fl in self._loweredfiles and f not in self._dirstate:
-            msg = _('possible case-folding collision for %s') % f
+            msg = _(b'possible case-folding collision for %s') % f
             if self._abort:
                 raise error.Abort(msg)
-            self._ui.warn(_("warning: %s\n") % msg)
+            self._ui.warn(_(b"warning: %s\n") % msg)
         self._loweredfiles.add(fl)
         self._newfiles.add(f)
 
+
 def filteredhash(repo, maxrev):
     """build hash of filtered revisions in the current repoview.
 
@@ -359,24 +393,29 @@
     if revs:
         s = hashlib.sha1()
         for rev in revs:
-            s.update('%d;' % rev)
+            s.update(b'%d;' % rev)
         key = s.digest()
     return key
 
+
 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
     '''yield every hg repository under path, always recursively.
     The recurse flag will only control recursion into repo working dirs'''
+
     def errhandler(err):
         if err.filename == path:
             raise err
+
     samestat = getattr(os.path, 'samestat', None)
     if followsym and samestat is not None:
+
         def adddir(dirlst, dirname):
             dirstat = os.stat(dirname)
             match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
             if not match:
                 dirlst.append(dirstat)
             return not match
+
     else:
         followsym = False
 
@@ -385,16 +424,16 @@
         adddir(seen_dirs, path)
     for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
         dirs.sort()
-        if '.hg' in dirs:
-            yield root # found a repository
-            qroot = os.path.join(root, '.hg', 'patches')
-            if os.path.isdir(os.path.join(qroot, '.hg')):
-                yield qroot # we have a patch queue repo here
+        if b'.hg' in dirs:
+            yield root  # found a repository
+            qroot = os.path.join(root, b'.hg', b'patches')
+            if os.path.isdir(os.path.join(qroot, b'.hg')):
+                yield qroot  # we have a patch queue repo here
             if recurse:
                 # avoid recursing inside the .hg directory
-                dirs.remove('.hg')
+                dirs.remove(b'.hg')
             else:
-                dirs[:] = [] # don't descend further
+                dirs[:] = []  # don't descend further
         elif followsym:
             newdirs = []
             for d in dirs:
@@ -407,6 +446,7 @@
                         newdirs.append(d)
             dirs[:] = newdirs
 
+
 def binnode(ctx):
     """Return binary node id for a given basectx"""
     node = ctx.node()
@@ -414,6 +454,7 @@
         return wdirid
     return node
 
+
 def intrev(ctx):
     """Return integer for a given basectx that can be used in comparison or
     arithmetic operation"""
@@ -422,34 +463,41 @@
         return wdirrev
     return rev
 
+
 def formatchangeid(ctx):
     """Format changectx as '{rev}:{node|formatnode}', which is the default
     template provided by logcmdutil.changesettemplater"""
     repo = ctx.repo()
     return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
 
+
 def formatrevnode(ui, rev, node):
     """Format given revision and node depending on the current verbosity"""
     if ui.debugflag:
         hexfunc = hex
     else:
         hexfunc = short
-    return '%d:%s' % (rev, hexfunc(node))
+    return b'%d:%s' % (rev, hexfunc(node))
+
 
 def resolvehexnodeidprefix(repo, prefix):
-    if (prefix.startswith('x') and
-        repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
+    if prefix.startswith(b'x') and repo.ui.configbool(
+        b'experimental', b'revisions.prefixhexnode'
+    ):
         prefix = prefix[1:]
     try:
         # Uses unfiltered repo because it's faster when prefix is ambiguous/
         # This matches the shortesthexnodeidprefix() function below.
         node = repo.unfiltered().changelog._partialmatch(prefix)
     except error.AmbiguousPrefixLookupError:
-        revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
+        revset = repo.ui.config(
+            b'experimental', b'revisions.disambiguatewithin'
+        )
         if revset:
             # Clear config to avoid infinite recursion
-            configoverrides = {('experimental',
-                                'revisions.disambiguatewithin'): None}
+            configoverrides = {
+                (b'experimental', b'revisions.disambiguatewithin'): None
+            }
             with repo.ui.configoverride(configoverrides):
                 revs = repo.anyrevs([revset], user=True)
                 matches = []
@@ -465,6 +513,7 @@
     repo.changelog.rev(node)  # make sure node isn't filtered
     return node
 
+
 def mayberevnum(repo, prefix):
     """Checks if the given prefix may be mistaken for a revision number"""
     try:
@@ -479,6 +528,7 @@
     except ValueError:
         return False
 
+
 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
     """Find the shortest unambiguous prefix that matches hexnode.
 
@@ -489,13 +539,13 @@
     # which would be unacceptably slow. so we look for hash collision in
     # unfiltered space, which means some hashes may be slightly longer.
 
-    minlength=max(minlength, 1)
+    minlength = max(minlength, 1)
 
     def disambiguate(prefix):
         """Disambiguate against revnums."""
-        if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
+        if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
             if mayberevnum(repo, prefix):
-                return 'x' + prefix
+                return b'x' + prefix
             else:
                 return prefix
 
@@ -506,20 +556,20 @@
                 return prefix
 
     cl = repo.unfiltered().changelog
-    revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
+    revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
     if revset:
         revs = None
         if cache is not None:
-            revs = cache.get('disambiguationrevset')
+            revs = cache.get(b'disambiguationrevset')
         if revs is None:
             revs = repo.anyrevs([revset], user=True)
             if cache is not None:
-                cache['disambiguationrevset'] = revs
+                cache[b'disambiguationrevset'] = revs
         if cl.rev(node) in revs:
             hexnode = hex(node)
             nodetree = None
             if cache is not None:
-                nodetree = cache.get('disambiguationnodetree')
+                nodetree = cache.get(b'disambiguationnodetree')
             if not nodetree:
                 try:
                     nodetree = parsers.nodetree(cl.index, len(revs))
@@ -530,7 +580,7 @@
                     for r in revs:
                         nodetree.insert(r)
                     if cache is not None:
-                        cache['disambiguationnodetree'] = nodetree
+                        cache[b'disambiguationnodetree'] = nodetree
             if nodetree is not None:
                 length = max(nodetree.shortest(node), minlength)
                 prefix = hexnode[:length]
@@ -550,6 +600,7 @@
     except error.LookupError:
         raise error.RepoLookupError()
 
+
 def isrevsymbol(repo, symbol):
     """Checks if a symbol exists in the repo.
 
@@ -562,6 +613,7 @@
     except error.RepoLookupError:
         return False
 
+
 def revsymbol(repo, symbol):
     """Returns a context given a single revision symbol (as string).
 
@@ -570,16 +622,18 @@
     not "max(public())".
     """
     if not isinstance(symbol, bytes):
-        msg = ("symbol (%s of type %s) was not a string, did you mean "
-               "repo[symbol]?" % (symbol, type(symbol)))
+        msg = (
+            b"symbol (%s of type %s) was not a string, did you mean "
+            b"repo[symbol]?" % (symbol, type(symbol))
+        )
         raise error.ProgrammingError(msg)
     try:
-        if symbol in ('.', 'tip', 'null'):
+        if symbol in (b'.', b'tip', b'null'):
             return repo[symbol]
 
         try:
             r = int(symbol)
-            if '%d' % r != symbol:
+            if b'%d' % r != symbol:
                 raise ValueError
             l = len(repo.changelog)
             if r < 0:
@@ -615,20 +669,24 @@
             rev = repo.changelog.rev(node)
             return repo[rev]
 
-        raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
+        raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
 
     except error.WdirUnsupported:
         return repo[None]
-    except (error.FilteredIndexError, error.FilteredLookupError,
-            error.FilteredRepoLookupError):
+    except (
+        error.FilteredIndexError,
+        error.FilteredLookupError,
+        error.FilteredRepoLookupError,
+    ):
         raise _filterederror(repo, symbol)
 
+
 def _filterederror(repo, changeid):
     """build an exception to be raised about a filtered changeid
 
     This is extracted in a function to help extensions (eg: evolve) to
     experiment with various message variants."""
-    if repo.filtername.startswith('visible'):
+    if repo.filtername.startswith(b'visible'):
 
         # Check if the changeset is obsolete
         unfilteredrepo = repo.unfiltered()
@@ -639,43 +697,54 @@
         if ctx.obsolete():
             msg = obsutil._getfilteredreason(repo, changeid, ctx)
         else:
-            msg = _("hidden revision '%s'") % changeid
+            msg = _(b"hidden revision '%s'") % changeid
 
-        hint = _('use --hidden to access hidden revisions')
+        hint = _(b'use --hidden to access hidden revisions')
 
         return error.FilteredRepoLookupError(msg, hint=hint)
-    msg = _("filtered revision '%s' (not in '%s' subset)")
+    msg = _(b"filtered revision '%s' (not in '%s' subset)")
     msg %= (changeid, repo.filtername)
     return error.FilteredRepoLookupError(msg)
 
-def revsingle(repo, revspec, default='.', localalias=None):
+
+def revsingle(repo, revspec, default=b'.', localalias=None):
     if not revspec and revspec != 0:
         return repo[default]
 
     l = revrange(repo, [revspec], localalias=localalias)
     if not l:
-        raise error.Abort(_('empty revision set'))
+        raise error.Abort(_(b'empty revision set'))
     return repo[l.last()]
 
+
 def _pairspec(revspec):
     tree = revsetlang.parse(revspec)
-    return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
+    return tree and tree[0] in (
+        b'range',
+        b'rangepre',
+        b'rangepost',
+        b'rangeall',
+    )
+
 
 def revpair(repo, revs):
     if not revs:
-        return repo['.'], repo[None]
+        return repo[b'.'], repo[None]
 
     l = revrange(repo, revs)
 
     if not l:
-        raise error.Abort(_('empty revision range'))
+        raise error.Abort(_(b'empty revision range'))
 
     first = l.first()
     second = l.last()
 
-    if (first == second and len(revs) >= 2
-        and not all(revrange(repo, [r]) for r in revs)):
-        raise error.Abort(_('empty revision on one side of range'))
+    if (
+        first == second
+        and len(revs) >= 2
+        and not all(revrange(repo, [r]) for r in revs)
+    ):
+        raise error.Abort(_(b'empty revision on one side of range'))
 
     # if top-level is range expression, the result must always be a pair
     if first == second and len(revs) == 1 and not _pairspec(revs[0]):
@@ -683,6 +752,7 @@
 
     return repo[first], repo[second]
 
+
 def revrange(repo, specs, localalias=None):
     """Execute 1 to many revsets and return the union.
 
@@ -707,10 +777,11 @@
     allspecs = []
     for spec in specs:
         if isinstance(spec, int):
-            spec = revsetlang.formatspec('%d', spec)
+            spec = revsetlang.formatspec(b'%d', spec)
         allspecs.append(spec)
     return repo.anyrevs(allspecs, user=True, localalias=localalias)
 
+
 def meaningfulparents(repo, ctx):
     """Return list of meaningful (or all if debug) parentrevs for rev.
 
@@ -727,6 +798,7 @@
         return []
     return parents
 
+
 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
     """Return a function that produced paths for presenting to the user.
 
@@ -744,35 +816,39 @@
     if forcerelativevalue is not None:
         relative = forcerelativevalue
     else:
-        config = repo.ui.config('ui', 'relative-paths')
-        if config == 'legacy':
+        config = repo.ui.config(b'ui', b'relative-paths')
+        if config == b'legacy':
             relative = legacyrelativevalue
         else:
             relative = stringutil.parsebool(config)
             if relative is None:
                 raise error.ConfigError(
-                    _("ui.relative-paths is not a boolean ('%s')") % config)
+                    _(b"ui.relative-paths is not a boolean ('%s')") % config
+                )
 
     if relative:
         cwd = repo.getcwd()
         pathto = repo.pathto
         return lambda f: pathto(f, cwd)
-    elif repo.ui.configbool('ui', 'slash'):
+    elif repo.ui.configbool(b'ui', b'slash'):
         return lambda f: f
     else:
         return util.localpath
 
+
 def subdiruipathfn(subpath, uipathfn):
     '''Create a new uipathfn that treats the file as relative to subpath.'''
     return lambda f: uipathfn(posixpath.join(subpath, f))
 
+
 def anypats(pats, opts):
     '''Checks if any patterns, including --include and --exclude were given.
 
     Some commands (e.g. addremove) use this condition for deciding whether to
     print absolute or relative paths.
     '''
-    return bool(pats or opts.get('include') or opts.get('exclude'))
+    return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
+
 
 def expandpats(pats):
     '''Expand bare globs when running on windows.
@@ -793,43 +869,57 @@
         ret.append(kindpat)
     return ret
 
-def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
-                 badfn=None):
+
+def matchandpats(
+    ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
+):
     '''Return a matcher and the patterns that were used.
     The matcher will warn about bad matches, unless an alternate badfn callback
     is provided.'''
     if opts is None:
         opts = {}
-    if not globbed and default == 'relpath':
+    if not globbed and default == b'relpath':
         pats = expandpats(pats or [])
 
     uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
+
     def bad(f, msg):
-        ctx.repo().ui.warn("%s: %s\n" % (uipathfn(f), msg))
+        ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
 
     if badfn is None:
         badfn = bad
 
-    m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
-                  default, listsubrepos=opts.get('subrepos'), badfn=badfn)
+    m = ctx.match(
+        pats,
+        opts.get(b'include'),
+        opts.get(b'exclude'),
+        default,
+        listsubrepos=opts.get(b'subrepos'),
+        badfn=badfn,
+    )
 
     if m.always():
         pats = []
     return m, pats
 
-def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
-          badfn=None):
+
+def match(
+    ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
+):
     '''Return a matcher that will warn about bad matches.'''
     return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
 
+
 def matchall(repo):
     '''Return a matcher that will efficiently match everything.'''
     return matchmod.always()
 
+
 def matchfiles(repo, files, badfn=None):
     '''Return a matcher that will efficiently match exactly these files.'''
     return matchmod.exact(files, badfn=badfn)
 
+
 def parsefollowlinespattern(repo, rev, pat, msg):
     """Return a file name from `pat` pattern suitable for usage in followlines
     logic.
@@ -844,15 +934,17 @@
             raise error.ParseError(msg)
         return files[0]
 
+
 def getorigvfs(ui, repo):
     """return a vfs suitable to save 'orig' file
 
     return None if no special directory is configured"""
-    origbackuppath = ui.config('ui', 'origbackuppath')
+    origbackuppath = ui.config(b'ui', b'origbackuppath')
     if not origbackuppath:
         return None
     return vfs.vfs(repo.wvfs.join(origbackuppath))
 
+
 def backuppath(ui, repo, filepath):
     '''customize where working copy backup files (.orig files) are created
 
@@ -865,29 +957,30 @@
     '''
     origvfs = getorigvfs(ui, repo)
     if origvfs is None:
-        return repo.wjoin(filepath + ".orig")
+        return repo.wjoin(filepath + b".orig")
 
     origbackupdir = origvfs.dirname(filepath)
     if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
-        ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
+        ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
 
         # Remove any files that conflict with the backup file's path
         for f in reversed(list(util.finddirs(filepath))):
             if origvfs.isfileorlink(f):
-                ui.note(_('removing conflicting file: %s\n')
-                        % origvfs.join(f))
+                ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
                 origvfs.unlink(f)
                 break
 
         origvfs.makedirs(origbackupdir)
 
     if origvfs.isdir(filepath) and not origvfs.islink(filepath):
-        ui.note(_('removing conflicting directory: %s\n')
-                % origvfs.join(filepath))
+        ui.note(
+            _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
+        )
         origvfs.rmtree(filepath, forcibly=True)
 
     return origvfs.join(filepath)
 
+
 class _containsnode(object):
     """proxy __contains__(node) to container.__contains__ which accepts revs"""
 
@@ -898,8 +991,17 @@
     def __contains__(self, node):
         return self._revcontains(self._torev(node))
 
-def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
-                 fixphase=False, targetphase=None, backup=True):
+
+def cleanupnodes(
+    repo,
+    replacements,
+    operation,
+    moves=None,
+    metadata=None,
+    fixphase=False,
+    targetphase=None,
+    backup=True,
+):
     """do common cleanups when old nodes are replaced by new nodes
 
     That includes writing obsmarkers or stripping nodes, and moving bookmarks.
@@ -920,7 +1022,7 @@
         return
 
     # translate mapping's other forms
-    if not util.safehasattr(replacements, 'items'):
+    if not util.safehasattr(replacements, b'items'):
         replacements = {(n,): () for n in replacements}
     else:
         # upgrading non tuple "source" to tuple ones for BC
@@ -943,14 +1045,15 @@
                     continue
                 if len(newnodes) > 1:
                     # usually a split, take the one with biggest rev number
-                    newnode = next(unfi.set('max(%ln)', newnodes)).node()
+                    newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
                 elif len(newnodes) == 0:
                     # move bookmark backwards
                     allreplaced = []
                     for rep in replacements:
                         allreplaced.extend(rep)
-                    roots = list(unfi.set('max((::%n) - %ln)', oldnode,
-                                          allreplaced))
+                    roots = list(
+                        unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
+                    )
                     if roots:
                         newnode = roots[0].node()
                     else:
@@ -971,14 +1074,17 @@
 
         allnewnodes.sort(key=lambda n: unfi[n].rev())
         newphases = {}
+
         def phase(ctx):
             return newphases.get(ctx.node(), ctx.phase())
+
         for newnode in allnewnodes:
             ctx = unfi[newnode]
             parentphase = max(phase(p) for p in ctx.parents())
             if targetphase is None:
-                oldphase = max(unfi[oldnode].phase()
-                               for oldnode in precursors[newnode])
+                oldphase = max(
+                    unfi[oldnode].phase() for oldnode in precursors[newnode]
+                )
                 newphase = max(oldphase, parentphase)
             else:
                 newphase = max(targetphase, parentphase)
@@ -988,7 +1094,7 @@
             elif newphase < ctx.phase():
                 toadvance.setdefault(newphase, []).append(newnode)
 
-    with repo.transaction('cleanup') as tr:
+    with repo.transaction(b'cleanup') as tr:
         # Move bookmarks
         bmarks = repo._bookmarks
         bmarkchanges = []
@@ -996,13 +1102,23 @@
             oldbmarks = repo.nodebookmarks(oldnode)
             if not oldbmarks:
                 continue
-            from . import bookmarks # avoid import cycle
-            repo.ui.debug('moving bookmarks %r from %s to %s\n' %
-                          (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
-                           hex(oldnode), hex(newnode)))
+            from . import bookmarks  # avoid import cycle
+
+            repo.ui.debug(
+                b'moving bookmarks %r from %s to %s\n'
+                % (
+                    pycompat.rapply(pycompat.maybebytestr, oldbmarks),
+                    hex(oldnode),
+                    hex(newnode),
+                )
+            )
             # Delete divergent bookmarks being parents of related newnodes
-            deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
-                                   allnewnodes, newnode, oldnode)
+            deleterevs = repo.revs(
+                b'parents(roots(%ln & (::%n))) - parents(%n)',
+                allnewnodes,
+                newnode,
+                oldnode,
+            )
             deletenodes = _containsnode(repo, deleterevs)
             for name in oldbmarks:
                 bmarkchanges.append((name, newnode))
@@ -1017,7 +1133,7 @@
         for phase, nodes in toadvance.items():
             phases.advanceboundary(repo, tr, phase, nodes)
 
-        mayusearchived = repo.ui.config('experimental', 'cleanup-as-archived')
+        mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
         # Obsolete or strip nodes
         if obsolete.isenabled(repo, obsolete.createmarkersopt):
             # If a node is already obsoleted, and we want to obsolete it
@@ -1033,37 +1149,43 @@
                 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
                 rels.append(rel)
             if rels:
-                obsolete.createmarkers(repo, rels, operation=operation,
-                                       metadata=metadata)
+                obsolete.createmarkers(
+                    repo, rels, operation=operation, metadata=metadata
+                )
         elif phases.supportinternal(repo) and mayusearchived:
             # this assume we do not have "unstable" nodes above the cleaned ones
             allreplaced = set()
             for ns in replacements.keys():
                 allreplaced.update(ns)
             if backup:
-                from . import repair # avoid import cycle
+                from . import repair  # avoid import cycle
+
                 node = min(allreplaced, key=repo.changelog.rev)
-                repair.backupbundle(repo, allreplaced, allreplaced, node,
-                                    operation)
+                repair.backupbundle(
+                    repo, allreplaced, allreplaced, node, operation
+                )
             phases.retractboundary(repo, tr, phases.archived, allreplaced)
         else:
-            from . import repair # avoid import cycle
+            from . import repair  # avoid import cycle
+
             tostrip = list(n for ns in replacements for n in ns)
             if tostrip:
-                repair.delayedstrip(repo.ui, repo, tostrip, operation,
-                                    backup=backup)
+                repair.delayedstrip(
+                    repo.ui, repo, tostrip, operation, backup=backup
+                )
+
 
 def addremove(repo, matcher, prefix, uipathfn, opts=None):
     if opts is None:
         opts = {}
     m = matcher
-    dry_run = opts.get('dry_run')
+    dry_run = opts.get(b'dry_run')
     try:
-        similarity = float(opts.get('similarity') or 0)
+        similarity = float(opts.get(b'similarity') or 0)
     except ValueError:
-        raise error.Abort(_('similarity must be a number'))
+        raise error.Abort(_(b'similarity must be a number'))
     if similarity < 0 or similarity > 100:
-        raise error.Abort(_('similarity must be between 0 and 100'))
+        raise error.Abort(_(b'similarity must be between 0 and 100'))
     similarity /= 100.0
 
     ret = 0
@@ -1071,7 +1193,7 @@
     wctx = repo[None]
     for subpath in sorted(wctx.substate):
         submatch = matchmod.subdirmatcher(subpath, m)
-        if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
+        if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
             sub = wctx.sub(subpath)
             subprefix = repo.wvfs.reljoin(prefix, subpath)
             subuipathfn = subdiruipathfn(subpath, uipathfn)
@@ -1079,18 +1201,22 @@
                 if sub.addremove(submatch, subprefix, subuipathfn, opts):
                     ret = 1
             except error.LookupError:
-                repo.ui.status(_("skipping missing subrepository: %s\n")
-                                 % uipathfn(subpath))
+                repo.ui.status(
+                    _(b"skipping missing subrepository: %s\n")
+                    % uipathfn(subpath)
+                )
 
     rejected = []
+
     def badfn(f, msg):
         if f in m.files():
             m.bad(f, msg)
         rejected.append(f)
 
     badmatch = matchmod.badmatch(m, badfn)
-    added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
-                                                                    badmatch)
+    added, unknown, deleted, removed, forgotten = _interestingfiles(
+        repo, badmatch
+    )
 
     unknownset = set(unknown + forgotten)
     toprint = unknownset.copy()
@@ -1098,15 +1224,16 @@
     for abs in sorted(toprint):
         if repo.ui.verbose or not m.exact(abs):
             if abs in unknownset:
-                status = _('adding %s\n') % uipathfn(abs)
-                label = 'ui.addremove.added'
+                status = _(b'adding %s\n') % uipathfn(abs)
+                label = b'ui.addremove.added'
             else:
-                status = _('removing %s\n') % uipathfn(abs)
-                label = 'ui.addremove.removed'
+                status = _(b'removing %s\n') % uipathfn(abs)
+                label = b'ui.addremove.removed'
             repo.ui.status(status, label=label)
 
-    renames = _findrenames(repo, m, added + unknown, removed + deleted,
-                           similarity, uipathfn)
+    renames = _findrenames(
+        repo, m, added + unknown, removed + deleted, similarity, uipathfn
+    )
 
     if not dry_run:
         _markchanges(repo, unknown + forgotten, deleted, renames)
@@ -1116,6 +1243,7 @@
             return 1
     return ret
 
+
 def marktouched(repo, files, similarity=0.0):
     '''Assert that files have somehow been operated upon. files are relative to
     the repo root.'''
@@ -1130,17 +1258,18 @@
         toprint.update(deleted)
         for abs in sorted(toprint):
             if abs in unknownset:
-                status = _('adding %s\n') % abs
+                status = _(b'adding %s\n') % abs
             else:
-                status = _('removing %s\n') % abs
+                status = _(b'removing %s\n') % abs
             repo.ui.status(status)
 
     # TODO: We should probably have the caller pass in uipathfn and apply it to
     # the messages above too. legacyrelativevalue=True is consistent with how
     # it used to work.
     uipathfn = getuipathfn(repo, legacyrelativevalue=True)
-    renames = _findrenames(repo, m, added + unknown, removed + deleted,
-                           similarity, uipathfn)
+    renames = _findrenames(
+        repo, m, added + unknown, removed + deleted, similarity, uipathfn
+    )
 
     _markchanges(repo, unknown + forgotten, deleted, renames)
 
@@ -1149,6 +1278,7 @@
             return 1
     return 0
 
+
 def _interestingfiles(repo, matcher):
     '''Walk dirstate with matcher, looking for files that addremove would care
     about.
@@ -1161,39 +1291,53 @@
     ctx = repo[None]
     dirstate = repo.dirstate
     matcher = repo.narrowmatch(matcher, includeexact=True)
-    walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
-                                unknown=True, ignored=False, full=False)
-    for abs, st in walkresults.iteritems():
+    walkresults = dirstate.walk(
+        matcher,
+        subrepos=sorted(ctx.substate),
+        unknown=True,
+        ignored=False,
+        full=False,
+    )
+    for abs, st in pycompat.iteritems(walkresults):
         dstate = dirstate[abs]
-        if dstate == '?' and audit_path.check(abs):
+        if dstate == b'?' and audit_path.check(abs):
             unknown.append(abs)
-        elif dstate != 'r' and not st:
+        elif dstate != b'r' and not st:
             deleted.append(abs)
-        elif dstate == 'r' and st:
+        elif dstate == b'r' and st:
             forgotten.append(abs)
         # for finding renames
-        elif dstate == 'r' and not st:
+        elif dstate == b'r' and not st:
             removed.append(abs)
-        elif dstate == 'a':
+        elif dstate == b'a':
             added.append(abs)
 
     return added, unknown, deleted, removed, forgotten
 
+
 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
     '''Find renames from removed files to added ones.'''
     renames = {}
     if similarity > 0:
-        for old, new, score in similar.findrenames(repo, added, removed,
-                                                   similarity):
-            if (repo.ui.verbose or not matcher.exact(old)
-                or not matcher.exact(new)):
-                repo.ui.status(_('recording removal of %s as rename to %s '
-                                 '(%d%% similar)\n') %
-                               (uipathfn(old), uipathfn(new),
-                                score * 100))
+        for old, new, score in similar.findrenames(
+            repo, added, removed, similarity
+        ):
+            if (
+                repo.ui.verbose
+                or not matcher.exact(old)
+                or not matcher.exact(new)
+            ):
+                repo.ui.status(
+                    _(
+                        b'recording removal of %s as rename to %s '
+                        b'(%d%% similar)\n'
+                    )
+                    % (uipathfn(old), uipathfn(new), score * 100)
+                )
             renames[new] = old
     return renames
 
+
 def _markchanges(repo, unknown, deleted, renames):
     '''Marks the files in unknown as added, the files in deleted as removed,
     and the files in renames as copied.'''
@@ -1201,11 +1345,13 @@
     with repo.wlock():
         wctx.forget(deleted)
         wctx.add(unknown)
-        for new, old in renames.iteritems():
+        for new, old in pycompat.iteritems(renames):
             wctx.copy(old, new)
 
+
 def getrenamedfn(repo, endrev=None):
     if copiesmod.usechangesetcentricalgo(repo):
+
         def getrenamed(fn, rev):
             ctx = repo[rev]
             p1copies = ctx.p1copies()
@@ -1215,6 +1361,7 @@
             if fn in p2copies:
                 return p2copies[fn]
             return None
+
         return getrenamed
 
     rcache = {}
@@ -1247,8 +1394,10 @@
 
     return getrenamed
 
+
 def getcopiesfn(repo, endrev=None):
     if copiesmod.usechangesetcentricalgo(repo):
+
         def copiesfn(ctx):
             if ctx.p2copies():
                 allcopies = ctx.p1copies().copy()
@@ -1257,8 +1406,10 @@
                 return sorted(allcopies.items())
             else:
                 return sorted(ctx.p1copies().items())
+
     else:
         getrenamed = getrenamedfn(repo, endrev)
+
         def copiesfn(ctx):
             copies = []
             for fn in ctx.files():
@@ -1269,25 +1420,31 @@
 
     return copiesfn
 
+
 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
     """Update the dirstate to reflect the intent of copying src to dst. For
     different reasons it might not end with dst being marked as copied from src.
     """
     origsrc = repo.dirstate.copied(src) or src
-    if dst == origsrc: # copying back a copy?
-        if repo.dirstate[dst] not in 'mn' and not dryrun:
+    if dst == origsrc:  # copying back a copy?
+        if repo.dirstate[dst] not in b'mn' and not dryrun:
             repo.dirstate.normallookup(dst)
     else:
-        if repo.dirstate[origsrc] == 'a' and origsrc == src:
+        if repo.dirstate[origsrc] == b'a' and origsrc == src:
             if not ui.quiet:
-                ui.warn(_("%s has not been committed yet, so no copy "
-                          "data will be stored for %s.\n")
-                        % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
-            if repo.dirstate[dst] in '?r' and not dryrun:
+                ui.warn(
+                    _(
+                        b"%s has not been committed yet, so no copy "
+                        b"data will be stored for %s.\n"
+                    )
+                    % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
+                )
+            if repo.dirstate[dst] in b'?r' and not dryrun:
                 wctx.add([dst])
         elif not dryrun:
             wctx.copy(origsrc, dst)
 
+
 def movedirstate(repo, newctx, match=None):
     """Move the dirstate to newctx and adjust it as necessary.
 
@@ -1295,46 +1452,50 @@
     a matcher that doesn't match all the differences between the parent of the
     working copy and newctx.
     """
-    oldctx = repo['.']
+    oldctx = repo[b'.']
     ds = repo.dirstate
     ds.setparents(newctx.node(), nullid)
     copies = dict(ds.copies())
     s = newctx.status(oldctx, match=match)
     for f in s.modified:
-        if ds[f] == 'r':
+        if ds[f] == b'r':
             # modified + removed -> removed
             continue
         ds.normallookup(f)
 
     for f in s.added:
-        if ds[f] == 'r':
+        if ds[f] == b'r':
             # added + removed -> unknown
             ds.drop(f)
-        elif ds[f] != 'a':
+        elif ds[f] != b'a':
             ds.add(f)
 
     for f in s.removed:
-        if ds[f] == 'a':
+        if ds[f] == b'a':
             # removed + added -> normal
             ds.normallookup(f)
-        elif ds[f] != 'r':
+        elif ds[f] != b'r':
             ds.remove(f)
 
     # Merge old parent and old working dir copies
     oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
     oldcopies.update(copies)
-    copies = dict((dst, oldcopies.get(src, src))
-                  for dst, src in oldcopies.iteritems())
+    copies = dict(
+        (dst, oldcopies.get(src, src))
+        for dst, src in pycompat.iteritems(oldcopies)
+    )
     # Adjust the dirstate copies
-    for dst, src in copies.iteritems():
-        if (src not in newctx or dst in newctx or ds[dst] != 'a'):
+    for dst, src in pycompat.iteritems(copies):
+        if src not in newctx or dst in newctx or ds[dst] != b'a':
             src = None
         ds.copy(src, dst)
 
+
 def writerequires(opener, requirements):
-    with opener('requires', 'w', atomictemp=True) as fp:
+    with opener(b'requires', b'w', atomictemp=True) as fp:
         for r in sorted(requirements):
-            fp.write("%s\n" % r)
+            fp.write(b"%s\n" % r)
+
 
 class filecachesubentry(object):
     def __init__(self, path, stat):
@@ -1391,6 +1552,7 @@
             if e.errno != errno.ENOENT:
                 raise
 
+
 class filecacheentry(object):
     def __init__(self, paths, stat=True):
         self._entries = []
@@ -1408,6 +1570,7 @@
         for entry in self._entries:
             entry.refresh()
 
+
 class filecache(object):
     """A property like decorator that tracks files under .hg/ for updates.
 
@@ -1490,8 +1653,9 @@
         else:
             ce = obj._filecache[self.name]
 
-        ce.obj = value # update cached copy
-        obj.__dict__[self.sname] = value # update copy returned by obj.x
+        ce.obj = value  # update cached copy
+        obj.__dict__[self.sname] = value  # update copy returned by obj.x
+
 
 def extdatasource(repo, source):
     """Gather a map of rev -> value dict from the specified source
@@ -1509,36 +1673,39 @@
     remote data sources.
     """
 
-    spec = repo.ui.config("extdata", source)
+    spec = repo.ui.config(b"extdata", source)
     if not spec:
-        raise error.Abort(_("unknown extdata source '%s'") % source)
+        raise error.Abort(_(b"unknown extdata source '%s'") % source)
 
     data = {}
     src = proc = None
     try:
-        if spec.startswith("shell:"):
+        if spec.startswith(b"shell:"):
             # external commands should be run relative to the repo root
             cmd = spec[6:]
-            proc = subprocess.Popen(procutil.tonativestr(cmd),
-                                    shell=True, bufsize=-1,
-                                    close_fds=procutil.closefds,
-                                    stdout=subprocess.PIPE,
-                                    cwd=procutil.tonativestr(repo.root))
+            proc = subprocess.Popen(
+                procutil.tonativestr(cmd),
+                shell=True,
+                bufsize=-1,
+                close_fds=procutil.closefds,
+                stdout=subprocess.PIPE,
+                cwd=procutil.tonativestr(repo.root),
+            )
             src = proc.stdout
         else:
             # treat as a URL or file
             src = url.open(repo.ui, spec)
         for l in src:
-            if " " in l:
-                k, v = l.strip().split(" ", 1)
+            if b" " in l:
+                k, v = l.strip().split(b" ", 1)
             else:
-                k, v = l.strip(), ""
+                k, v = l.strip(), b""
 
             k = encoding.tolocal(k)
             try:
                 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
             except (error.LookupError, error.RepoLookupError):
-                pass # we ignore data for nodes that don't exist locally
+                pass  # we ignore data for nodes that don't exist locally
     finally:
         if proc:
             try:
@@ -1550,38 +1717,45 @@
         if src:
             src.close()
     if proc and proc.returncode != 0:
-        raise error.Abort(_("extdata command '%s' failed: %s")
-                          % (cmd, procutil.explainexit(proc.returncode)))
+        raise error.Abort(
+            _(b"extdata command '%s' failed: %s")
+            % (cmd, procutil.explainexit(proc.returncode))
+        )
 
     return data
 
+
 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
     if lock is None:
         raise error.LockInheritanceContractViolation(
-            'lock can only be inherited while held')
+            b'lock can only be inherited while held'
+        )
     if environ is None:
         environ = {}
     with lock.inherit() as locker:
         environ[envvar] = locker
         return repo.ui.system(cmd, environ=environ, *args, **kwargs)
 
+
 def wlocksub(repo, cmd, *args, **kwargs):
     """run cmd as a subprocess that allows inheriting repo's wlock
 
     This can only be called while the wlock is held. This takes all the
     arguments that ui.system does, and returns the exit code of the
     subprocess."""
-    return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
-                    **kwargs)
+    return _locksub(
+        repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
+    )
+
 
 class progress(object):
-    def __init__(self, ui, updatebar, topic, unit="", total=None):
+    def __init__(self, ui, updatebar, topic, unit=b"", total=None):
         self.ui = ui
         self.pos = 0
         self.topic = topic
         self.unit = unit
         self.total = total
-        self.debug = ui.configbool('progress', 'debug')
+        self.debug = ui.configbool(b'progress', b'debug')
         self._updatebar = updatebar
 
     def __enter__(self):
@@ -1590,7 +1764,7 @@
     def __exit__(self, exc_type, exc_value, exc_tb):
         self.complete()
 
-    def update(self, pos, item="", total=None):
+    def update(self, pos, item=b"", total=None):
         assert pos is not None
         if total:
             self.total = total
@@ -1599,47 +1773,54 @@
         if self.debug:
             self._printdebug(item)
 
-    def increment(self, step=1, item="", total=None):
+    def increment(self, step=1, item=b"", total=None):
         self.update(self.pos + step, item, total)
 
     def complete(self):
         self.pos = None
-        self.unit = ""
+        self.unit = b""
         self.total = None
-        self._updatebar(self.topic, self.pos, "", self.unit, self.total)
+        self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
 
     def _printdebug(self, item):
         if self.unit:
-            unit = ' ' + self.unit
+            unit = b' ' + self.unit
         if item:
-            item = ' ' + item
+            item = b' ' + item
 
         if self.total:
             pct = 100.0 * self.pos / self.total
-            self.ui.debug('%s:%s %d/%d%s (%4.2f%%)\n'
-                       % (self.topic, item, self.pos, self.total, unit, pct))
+            self.ui.debug(
+                b'%s:%s %d/%d%s (%4.2f%%)\n'
+                % (self.topic, item, self.pos, self.total, unit, pct)
+            )
         else:
-            self.ui.debug('%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
+            self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
+
 
 def gdinitconfig(ui):
     """helper function to know if a repo should be created as general delta
     """
     # experimental config: format.generaldelta
-    return (ui.configbool('format', 'generaldelta')
-            or ui.configbool('format', 'usegeneraldelta'))
+    return ui.configbool(b'format', b'generaldelta') or ui.configbool(
+        b'format', b'usegeneraldelta'
+    )
+
 
 def gddeltaconfig(ui):
     """helper function to know if incoming delta should be optimised
     """
     # experimental config: format.generaldelta
-    return ui.configbool('format', 'generaldelta')
+    return ui.configbool(b'format', b'generaldelta')
+
 
 class simplekeyvaluefile(object):
     """A simple file with key=value lines
 
     Keys must be alphanumerics and start with a letter, values must not
     contain '\n' characters"""
-    firstlinekey = '__firstline'
+
+    firstlinekey = b'__firstline'
 
     def __init__(self, vfs, path, keys=None):
         self.vfs = vfs
@@ -1655,7 +1836,7 @@
         d = {}
         if firstlinenonkeyval:
             if not lines:
-                e = _("empty simplekeyvalue file")
+                e = _(b"empty simplekeyvalue file")
                 raise error.CorruptedState(e)
             # we don't want to include '\n' in the __firstline
             d[self.firstlinekey] = lines[0][:-1]
@@ -1665,10 +1846,11 @@
             # the 'if line.strip()' part prevents us from failing on empty
             # lines which only contain '\n' therefore are not skipped
             # by 'if line'
-            updatedict = dict(line[:-1].split('=', 1) for line in lines
-                                                      if line.strip())
+            updatedict = dict(
+                line[:-1].split(b'=', 1) for line in lines if line.strip()
+            )
             if self.firstlinekey in updatedict:
-                e = _("%r can't be used as a key")
+                e = _(b"%r can't be used as a key")
                 raise error.CorruptedState(e % self.firstlinekey)
             d.update(updatedict)
         except ValueError as e:
@@ -1684,38 +1866,40 @@
         everything else, as it is, not in a key=value form"""
         lines = []
         if firstline is not None:
-            lines.append('%s\n' % firstline)
+            lines.append(b'%s\n' % firstline)
 
         for k, v in data.items():
             if k == self.firstlinekey:
-                e = "key name '%s' is reserved" % self.firstlinekey
+                e = b"key name '%s' is reserved" % self.firstlinekey
                 raise error.ProgrammingError(e)
             if not k[0:1].isalpha():
-                e = "keys must start with a letter in a key-value file"
+                e = b"keys must start with a letter in a key-value file"
                 raise error.ProgrammingError(e)
             if not k.isalnum():
-                e = "invalid key name in a simple key-value file"
+                e = b"invalid key name in a simple key-value file"
+                raise error.ProgrammingError(e)
+            if b'\n' in v:
+                e = b"invalid value in a simple key-value file"
                 raise error.ProgrammingError(e)
-            if '\n' in v:
-                e = "invalid value in a simple key-value file"
-                raise error.ProgrammingError(e)
-            lines.append("%s=%s\n" % (k, v))
-        with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
-            fp.write(''.join(lines))
+            lines.append(b"%s=%s\n" % (k, v))
+        with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
+            fp.write(b''.join(lines))
+
 
 _reportobsoletedsource = [
-    'debugobsolete',
-    'pull',
-    'push',
-    'serve',
-    'unbundle',
+    b'debugobsolete',
+    b'pull',
+    b'push',
+    b'serve',
+    b'unbundle',
 ]
 
 _reportnewcssource = [
-    'pull',
-    'unbundle',
+    b'pull',
+    b'unbundle',
 ]
 
+
 def prefetchfiles(repo, revs, match):
     """Invokes the registered file prefetch functions, allowing extensions to
     ensure the corresponding files are available locally, before the command
@@ -1729,15 +1913,18 @@
 
     fileprefetchhooks(repo, revs, match)
 
+
 # a list of (repo, revs, match) prefetch functions
 fileprefetchhooks = util.hooks()
 
 # A marker that tells the evolve extension to suppress its own reporting
 _reportstroubledchangesets = True
 
-def registersummarycallback(repo, otr, txnname=''):
+
+def registersummarycallback(repo, otr, txnname=b''):
     """register a callback to issue a summary after the transaction is closed
     """
+
     def txmatch(sources):
         return any(txnname.startswith(source) for source in sources)
 
@@ -1752,56 +1939,82 @@
         # repository through the weakref.
         filtername = repo.filtername
         reporef = weakref.ref(repo.unfiltered())
+
         def wrapped(tr):
             repo = reporef()
             if filtername:
                 repo = repo.filtered(filtername)
             func(repo, tr)
-        newcat = '%02i-txnreport' % len(categories)
+
+        newcat = b'%02i-txnreport' % len(categories)
         otr.addpostclose(newcat, wrapped)
         categories.append(newcat)
         return wrapped
 
+    @reportsummary
+    def reportchangegroup(repo, tr):
+        cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
+        cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
+        cgfiles = tr.changes.get(b'changegroup-count-files', 0)
+        cgheads = tr.changes.get(b'changegroup-count-heads', 0)
+        if cgchangesets or cgrevisions or cgfiles:
+            htext = b""
+            if cgheads:
+                htext = _(b" (%+d heads)") % cgheads
+            msg = _(b"added %d changesets with %d changes to %d files%s\n")
+            repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
+
     if txmatch(_reportobsoletedsource):
+
         @reportsummary
         def reportobsoleted(repo, tr):
             obsoleted = obsutil.getobsoleted(repo, tr)
+            newmarkers = len(tr.changes.get(b'obsmarkers', ()))
+            if newmarkers:
+                repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
             if obsoleted:
-                repo.ui.status(_('obsoleted %i changesets\n')
-                               % len(obsoleted))
+                repo.ui.status(_(b'obsoleted %i changesets\n') % len(obsoleted))
 
-    if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
-        repo.ui.configbool('experimental', 'evolution.report-instabilities')):
+    if obsolete.isenabled(
+        repo, obsolete.createmarkersopt
+    ) and repo.ui.configbool(
+        b'experimental', b'evolution.report-instabilities'
+    ):
         instabilitytypes = [
-            ('orphan', 'orphan'),
-            ('phase-divergent', 'phasedivergent'),
-            ('content-divergent', 'contentdivergent'),
+            (b'orphan', b'orphan'),
+            (b'phase-divergent', b'phasedivergent'),
+            (b'content-divergent', b'contentdivergent'),
         ]
 
         def getinstabilitycounts(repo):
             filtered = repo.changelog.filteredrevs
             counts = {}
             for instability, revset in instabilitytypes:
-                counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
-                                          filtered)
+                counts[instability] = len(
+                    set(obsolete.getrevs(repo, revset)) - filtered
+                )
             return counts
 
         oldinstabilitycounts = getinstabilitycounts(repo)
+
         @reportsummary
         def reportnewinstabilities(repo, tr):
             newinstabilitycounts = getinstabilitycounts(repo)
             for instability, revset in instabilitytypes:
-                delta = (newinstabilitycounts[instability] -
-                         oldinstabilitycounts[instability])
+                delta = (
+                    newinstabilitycounts[instability]
+                    - oldinstabilitycounts[instability]
+                )
                 msg = getinstabilitymessage(delta, instability)
                 if msg:
                     repo.ui.warn(msg)
 
     if txmatch(_reportnewcssource):
+
         @reportsummary
         def reportnewcs(repo, tr):
             """Report the range of new revisions pulled/unbundled."""
-            origrepolen = tr.changes.get('origrepolen', len(repo))
+            origrepolen = tr.changes.get(b'origrepolen', len(repo))
             unfi = repo.unfiltered()
             if origrepolen >= len(unfi):
                 return
@@ -1814,36 +2027,37 @@
                 if minrev == maxrev:
                     revrange = minrev
                 else:
-                    revrange = '%s:%s' % (minrev, maxrev)
-                draft = len(repo.revs('%ld and draft()', revs))
-                secret = len(repo.revs('%ld and secret()', revs))
+                    revrange = b'%s:%s' % (minrev, maxrev)
+                draft = len(repo.revs(b'%ld and draft()', revs))
+                secret = len(repo.revs(b'%ld and secret()', revs))
                 if not (draft or secret):
-                    msg = _('new changesets %s\n') % revrange
+                    msg = _(b'new changesets %s\n') % revrange
                 elif draft and secret:
-                    msg = _('new changesets %s (%d drafts, %d secrets)\n')
+                    msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
                     msg %= (revrange, draft, secret)
                 elif draft:
-                    msg = _('new changesets %s (%d drafts)\n')
+                    msg = _(b'new changesets %s (%d drafts)\n')
                     msg %= (revrange, draft)
                 elif secret:
-                    msg = _('new changesets %s (%d secrets)\n')
+                    msg = _(b'new changesets %s (%d secrets)\n')
                     msg %= (revrange, secret)
                 else:
-                    errormsg = 'entered unreachable condition'
+                    errormsg = b'entered unreachable condition'
                     raise error.ProgrammingError(errormsg)
                 repo.ui.status(msg)
 
             # search new changesets directly pulled as obsolete
-            duplicates = tr.changes.get('revduplicates', ())
-            obsadded = unfi.revs('(%d: + %ld) and obsolete()',
-                                 origrepolen, duplicates)
+            duplicates = tr.changes.get(b'revduplicates', ())
+            obsadded = unfi.revs(
+                b'(%d: + %ld) and obsolete()', origrepolen, duplicates
+            )
             cl = repo.changelog
             extinctadded = [r for r in obsadded if r not in cl]
             if extinctadded:
                 # They are not just obsolete, but obsolete and invisible
                 # we call them "extinct" internally but the terms have not been
                 # exposed to users.
-                msg = '(%d other changesets obsolete on arrival)\n'
+                msg = b'(%d other changesets obsolete on arrival)\n'
                 repo.ui.status(msg % len(extinctadded))
 
         @reportsummary
@@ -1851,18 +2065,21 @@
             """Report statistics of phase changes for changesets pre-existing
             pull/unbundle.
             """
-            origrepolen = tr.changes.get('origrepolen', len(repo))
-            phasetracking = tr.changes.get('phases', {})
+            origrepolen = tr.changes.get(b'origrepolen', len(repo))
+            phasetracking = tr.changes.get(b'phases', {})
             if not phasetracking:
                 return
             published = [
-                rev for rev, (old, new) in phasetracking.iteritems()
+                rev
+                for rev, (old, new) in pycompat.iteritems(phasetracking)
                 if new == phases.public and rev < origrepolen
             ]
             if not published:
                 return
-            repo.ui.status(_('%d local changesets published\n')
-                           % len(published))
+            repo.ui.status(
+                _(b'%d local changesets published\n') % len(published)
+            )
+
 
 def getinstabilitymessage(delta, instability):
     """function to return the message to show warning about new instabilities
@@ -1870,35 +2087,41 @@
     exists as a separate function so that extension can wrap to show more
     information like how to fix instabilities"""
     if delta > 0:
-        return _('%i new %s changesets\n') % (delta, instability)
+        return _(b'%i new %s changesets\n') % (delta, instability)
+
 
 def nodesummaries(repo, nodes, maxnumnodes=4):
     if len(nodes) <= maxnumnodes or repo.ui.verbose:
-        return ' '.join(short(h) for h in nodes)
-    first = ' '.join(short(h) for h in nodes[:maxnumnodes])
-    return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
+        return b' '.join(short(h) for h in nodes)
+    first = b' '.join(short(h) for h in nodes[:maxnumnodes])
+    return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
 
-def enforcesinglehead(repo, tr, desc):
+
+def enforcesinglehead(repo, tr, desc, accountclosed=False):
     """check that no named branch has multiple heads"""
-    if desc in ('strip', 'repair'):
+    if desc in (b'strip', b'repair'):
         # skip the logic during strip
         return
-    visible = repo.filtered('visible')
+    visible = repo.filtered(b'visible')
     # possible improvement: we could restrict the check to affected branch
-    for name, heads in visible.branchmap().iteritems():
+    bm = visible.branchmap()
+    for name in bm:
+        heads = bm.branchheads(name, closed=accountclosed)
         if len(heads) > 1:
-            msg = _('rejecting multiple heads on branch "%s"')
+            msg = _(b'rejecting multiple heads on branch "%s"')
             msg %= name
-            hint = _('%d heads: %s')
+            hint = _(b'%d heads: %s')
             hint %= (len(heads), nodesummaries(repo, heads))
             raise error.Abort(msg, hint=hint)
 
+
 def wrapconvertsink(sink):
     """Allow extensions to wrap the sink returned by convcmd.convertsink()
     before it is used, whether or not the convert extension was formally loaded.
     """
     return sink
 
+
 def unhidehashlikerevs(repo, specs, hiddentype):
     """parse the user specs and unhide changesets whose hash or revision number
     is passed.
@@ -1908,18 +2131,19 @@
 
     returns a repo object with the required changesets unhidden
     """
-    if not repo.filtername or not repo.ui.configbool('experimental',
-                                                     'directaccess'):
+    if not repo.filtername or not repo.ui.configbool(
+        b'experimental', b'directaccess'
+    ):
         return repo
 
-    if repo.filtername not in ('visible', 'visible-hidden'):
+    if repo.filtername not in (b'visible', b'visible-hidden'):
         return repo
 
     symbols = set()
     for spec in specs:
         try:
             tree = revsetlang.parse(spec)
-        except error.ParseError: # will be reported by scmutil.revrange()
+        except error.ParseError:  # will be reported by scmutil.revrange()
             continue
 
         symbols.update(revsetlang.gethashlikesymbols(tree))
@@ -1932,15 +2156,21 @@
     if not revs:
         return repo
 
-    if hiddentype == 'warn':
+    if hiddentype == b'warn':
         unfi = repo.unfiltered()
-        revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
-        repo.ui.warn(_("warning: accessing hidden changesets for write "
-                       "operation: %s\n") % revstr)
+        revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
+        repo.ui.warn(
+            _(
+                b"warning: accessing hidden changesets for write "
+                b"operation: %s\n"
+            )
+            % revstr
+        )
 
     # we have to use new filtername to separate branch/tags cache until we can
     # disbale these cache when revisions are dynamically pinned.
-    return repo.filtered('visible-hidden', revs)
+    return repo.filtered(b'visible-hidden', revs)
+
 
 def _getrevsfromsymbols(repo, symbols):
     """parse the list of symbols and returns a set of revision numbers of hidden
@@ -1950,7 +2180,7 @@
     unficl = unfi.changelog
     cl = repo.changelog
     tiprev = len(unficl)
-    allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
+    allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
     for s in symbols:
         try:
             n = int(s)
@@ -1976,11 +2206,16 @@
 
     return revs
 
+
 def bookmarkrevs(repo, mark):
     """
     Select revisions reachable by a given bookmark
     """
-    return repo.revs("ancestors(bookmark(%s)) - "
-                     "ancestors(head() and not bookmark(%s)) - "
-                     "ancestors(bookmark() and not bookmark(%s))",
-                     mark, mark, mark)
+    return repo.revs(
+        b"ancestors(bookmark(%s)) - "
+        b"ancestors(head() and not bookmark(%s)) - "
+        b"ancestors(bookmark() and not bookmark(%s))",
+        mark,
+        mark,
+        mark,
+    )
--- a/mercurial/scmwindows.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/scmwindows.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,51 +11,55 @@
 
 try:
     import _winreg as winreg
+
     winreg.CloseKey
 except ImportError:
     import winreg
 
 # MS-DOS 'more' is the only pager available by default on Windows.
-fallbackpager = 'more'
+fallbackpager = b'more'
+
 
 def systemrcpath():
     '''return default os-specific hgrc search path'''
     rcpath = []
     filename = win32.executablepath()
     # Use mercurial.ini found in directory with hg.exe
-    progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
+    progrc = os.path.join(os.path.dirname(filename), b'mercurial.ini')
     rcpath.append(progrc)
     # Use hgrc.d found in directory with hg.exe
-    progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
+    progrcd = os.path.join(os.path.dirname(filename), b'hgrc.d')
     if os.path.isdir(progrcd):
         for f, kind in util.listdir(progrcd):
-            if f.endswith('.rc'):
+            if f.endswith(b'.rc'):
                 rcpath.append(os.path.join(progrcd, f))
     # else look for a system rcpath in the registry
-    value = util.lookupreg('SOFTWARE\\Mercurial', None,
-                           winreg.HKEY_LOCAL_MACHINE)
+    value = util.lookupreg(
+        b'SOFTWARE\\Mercurial', None, winreg.HKEY_LOCAL_MACHINE
+    )
     if not isinstance(value, str) or not value:
         return rcpath
     value = util.localpath(value)
     for p in value.split(pycompat.ospathsep):
-        if p.lower().endswith('mercurial.ini'):
+        if p.lower().endswith(b'mercurial.ini'):
             rcpath.append(p)
         elif os.path.isdir(p):
             for f, kind in util.listdir(p):
-                if f.endswith('.rc'):
+                if f.endswith(b'.rc'):
                     rcpath.append(os.path.join(p, f))
     return rcpath
 
+
 def userrcpath():
     '''return os-specific hgrc search path to the user dir'''
-    home = os.path.expanduser('~')
-    path = [os.path.join(home, 'mercurial.ini'),
-            os.path.join(home, '.hgrc')]
-    userprofile = encoding.environ.get('USERPROFILE')
+    home = os.path.expanduser(b'~')
+    path = [os.path.join(home, b'mercurial.ini'), os.path.join(home, b'.hgrc')]
+    userprofile = encoding.environ.get(b'USERPROFILE')
     if userprofile and userprofile != home:
-        path.append(os.path.join(userprofile, 'mercurial.ini'))
-        path.append(os.path.join(userprofile, '.hgrc'))
+        path.append(os.path.join(userprofile, b'mercurial.ini'))
+        path.append(os.path.join(userprofile, b'.hgrc'))
     return path
 
+
 def termsize(ui):
     return win32.termsize()
--- a/mercurial/server.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/server.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,6 +10,7 @@
 import os
 
 from .i18n import _
+from .pycompat import open
 
 from . import (
     chgserver,
@@ -21,38 +22,48 @@
     util,
 )
 
-from .utils import (
-    procutil,
-)
+from .utils import procutil
+
 
-def runservice(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
-               runargs=None, appendpid=False):
+def runservice(
+    opts,
+    parentfn=None,
+    initfn=None,
+    runfn=None,
+    logfile=None,
+    runargs=None,
+    appendpid=False,
+):
     '''Run a command as a service.'''
 
     postexecargs = {}
 
-    if opts['daemon_postexec']:
-        for inst in opts['daemon_postexec']:
-            if inst.startswith('unlink:'):
-                postexecargs['unlink'] = inst[7:]
-            elif inst.startswith('chdir:'):
-                postexecargs['chdir'] = inst[6:]
-            elif inst != 'none':
-                raise error.Abort(_('invalid value for --daemon-postexec: %s')
-                                  % inst)
+    if opts[b'daemon_postexec']:
+        for inst in opts[b'daemon_postexec']:
+            if inst.startswith(b'unlink:'):
+                postexecargs[b'unlink'] = inst[7:]
+            elif inst.startswith(b'chdir:'):
+                postexecargs[b'chdir'] = inst[6:]
+            elif inst != b'none':
+                raise error.Abort(
+                    _(b'invalid value for --daemon-postexec: %s') % inst
+                )
 
     # When daemonized on Windows, redirect stdout/stderr to the lockfile (which
     # gets cleaned up after the child is up and running), so that the parent can
     # read and print the error if this child dies early.  See 594dd384803c.  On
     # other platforms, the child can write to the parent's stdio directly, until
     # it is redirected prior to runfn().
-    if pycompat.iswindows and opts['daemon_postexec']:
-        if 'unlink' in postexecargs and os.path.exists(postexecargs['unlink']):
+    if pycompat.iswindows and opts[b'daemon_postexec']:
+        if b'unlink' in postexecargs and os.path.exists(
+            postexecargs[b'unlink']
+        ):
             procutil.stdout.flush()
             procutil.stderr.flush()
 
-            fd = os.open(postexecargs['unlink'],
-                         os.O_WRONLY | os.O_APPEND | os.O_BINARY)
+            fd = os.open(
+                postexecargs[b'unlink'], os.O_WRONLY | os.O_APPEND | os.O_BINARY
+            )
             try:
                 os.dup2(fd, procutil.stdout.fileno())
                 os.dup2(fd, procutil.stderr.fileno())
@@ -60,43 +71,45 @@
                 os.close(fd)
 
     def writepid(pid):
-        if opts['pid_file']:
+        if opts[b'pid_file']:
             if appendpid:
-                mode = 'ab'
+                mode = b'ab'
             else:
-                mode = 'wb'
-            fp = open(opts['pid_file'], mode)
-            fp.write('%d\n' % pid)
+                mode = b'wb'
+            fp = open(opts[b'pid_file'], mode)
+            fp.write(b'%d\n' % pid)
             fp.close()
 
-    if opts['daemon'] and not opts['daemon_postexec']:
+    if opts[b'daemon'] and not opts[b'daemon_postexec']:
         # Signal child process startup with file removal
-        lockfd, lockpath = pycompat.mkstemp(prefix='hg-service-')
+        lockfd, lockpath = pycompat.mkstemp(prefix=b'hg-service-')
         os.close(lockfd)
         try:
             if not runargs:
                 runargs = procutil.hgcmd() + pycompat.sysargv[1:]
-            runargs.append('--daemon-postexec=unlink:%s' % lockpath)
+            runargs.append(b'--daemon-postexec=unlink:%s' % lockpath)
             # Don't pass --cwd to the child process, because we've already
             # changed directory.
             for i in pycompat.xrange(1, len(runargs)):
-                if runargs[i].startswith('--cwd='):
+                if runargs[i].startswith(b'--cwd='):
                     del runargs[i]
                     break
-                elif runargs[i].startswith('--cwd'):
-                    del runargs[i:i + 2]
+                elif runargs[i].startswith(b'--cwd'):
+                    del runargs[i : i + 2]
                     break
+
             def condfn():
                 return not os.path.exists(lockpath)
+
             pid = procutil.rundetached(runargs, condfn)
             if pid < 0:
                 # If the daemonized process managed to write out an error msg,
                 # report it.
                 if pycompat.iswindows and os.path.exists(lockpath):
-                    with open(lockpath, 'rb') as log:
+                    with open(lockpath, b'rb') as log:
                         for line in log:
                             procutil.stderr.write(line)
-                raise error.Abort(_('child process failed to start'))
+                raise error.Abort(_(b'child process failed to start'))
             writepid(pid)
         finally:
             util.tryunlink(lockpath)
@@ -108,17 +121,17 @@
     if initfn:
         initfn()
 
-    if not opts['daemon']:
+    if not opts[b'daemon']:
         writepid(procutil.getpid())
 
-    if opts['daemon_postexec']:
+    if opts[b'daemon_postexec']:
         try:
             os.setsid()
         except AttributeError:
             pass
 
-        if 'chdir' in postexecargs:
-            os.chdir(postexecargs['chdir'])
+        if b'chdir' in postexecargs:
+            os.chdir(postexecargs[b'chdir'])
         procutil.hidewindow()
         procutil.stdout.flush()
         procutil.stderr.flush()
@@ -126,13 +139,17 @@
         nullfd = os.open(os.devnull, os.O_RDWR)
         logfilefd = nullfd
         if logfile:
-            logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND,
-                                0o666)
+            logfilefd = os.open(
+                logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND, 0o666
+            )
         os.dup2(nullfd, procutil.stdin.fileno())
         os.dup2(logfilefd, procutil.stdout.fileno())
         os.dup2(logfilefd, procutil.stderr.fileno())
-        stdio = (procutil.stdin.fileno(), procutil.stdout.fileno(),
-                 procutil.stderr.fileno())
+        stdio = (
+            procutil.stdin.fileno(),
+            procutil.stdout.fileno(),
+            procutil.stderr.fileno(),
+        )
         if nullfd not in stdio:
             os.close(nullfd)
         if logfile and logfilefd not in stdio:
@@ -140,31 +157,34 @@
 
         # Only unlink after redirecting stdout/stderr, so Windows doesn't
         # complain about a sharing violation.
-        if 'unlink' in postexecargs:
-            os.unlink(postexecargs['unlink'])
+        if b'unlink' in postexecargs:
+            os.unlink(postexecargs[b'unlink'])
 
     if runfn:
         return runfn()
 
+
 _cmdservicemap = {
-    'chgunix': chgserver.chgunixservice,
-    'pipe': commandserver.pipeservice,
-    'unix': commandserver.unixforkingservice,
+    b'chgunix': chgserver.chgunixservice,
+    b'pipe': commandserver.pipeservice,
+    b'unix': commandserver.unixforkingservice,
 }
 
+
 def _createcmdservice(ui, repo, opts):
-    mode = opts['cmdserver']
+    mode = opts[b'cmdserver']
     try:
         servicefn = _cmdservicemap[mode]
     except KeyError:
-        raise error.Abort(_('unknown mode %s') % mode)
+        raise error.Abort(_(b'unknown mode %s') % mode)
     commandserver.setuplogging(ui, repo)
     return servicefn(ui, repo, opts)
 
+
 def _createhgwebservice(ui, repo, opts):
     # this way we can check if something was given in the command-line
-    if opts.get('port'):
-        opts['port'] = util.getport(opts.get('port'))
+    if opts.get(b'port'):
+        opts[b'port'] = util.getport(opts.get(b'port'))
 
     alluis = {ui}
     if repo:
@@ -172,41 +192,44 @@
         alluis.update([repo.baseui, repo.ui])
     else:
         baseui = ui
-    webconf = opts.get('web_conf') or opts.get('webdir_conf')
+    webconf = opts.get(b'web_conf') or opts.get(b'webdir_conf')
     if webconf:
-        if opts.get('subrepos'):
-            raise error.Abort(_('--web-conf cannot be used with --subrepos'))
+        if opts.get(b'subrepos'):
+            raise error.Abort(_(b'--web-conf cannot be used with --subrepos'))
 
         # load server settings (e.g. web.port) to "copied" ui, which allows
         # hgwebdir to reload webconf cleanly
         servui = ui.copy()
-        servui.readconfig(webconf, sections=['web'])
+        servui.readconfig(webconf, sections=[b'web'])
         alluis.add(servui)
-    elif opts.get('subrepos'):
+    elif opts.get(b'subrepos'):
         servui = ui
 
         # If repo is None, hgweb.createapp() already raises a proper abort
         # message as long as webconf is None.
         if repo:
             webconf = dict()
-            cmdutil.addwebdirpath(repo, "", webconf)
+            cmdutil.addwebdirpath(repo, b"", webconf)
     else:
         servui = ui
 
-    optlist = ("name templates style address port prefix ipv6"
-               " accesslog errorlog certificate encoding")
+    optlist = (
+        b"name templates style address port prefix ipv6"
+        b" accesslog errorlog certificate encoding"
+    )
     for o in optlist.split():
-        val = opts.get(o, '')
-        if val in (None, ''): # should check against default options instead
+        val = opts.get(o, b'')
+        if val in (None, b''):  # should check against default options instead
             continue
         for u in alluis:
-            u.setconfig("web", o, val, 'serve')
+            u.setconfig(b"web", o, val, b'serve')
 
     app = hgweb.createapp(baseui, repo, webconf)
     return hgweb.httpservice(servui, app, opts)
 
+
 def createservice(ui, repo, opts):
-    if opts["cmdserver"]:
+    if opts[b"cmdserver"]:
         return _createcmdservice(ui, repo, opts)
     else:
         return _createhgwebservice(ui, repo, opts)
--- a/mercurial/setdiscovery.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/setdiscovery.py	Mon Oct 21 11:09:48 2019 -0400
@@ -52,9 +52,11 @@
 )
 from . import (
     error,
+    policy,
     util,
 )
 
+
 def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
     """update an existing sample to match the expected size
 
@@ -92,11 +94,21 @@
                 dist.setdefault(p, d + 1)
                 visit.append(p)
 
-def _limitsample(sample, desiredlen):
-    """return a random subset of sample of at most desiredlen item"""
-    if len(sample) > desiredlen:
-        sample = set(random.sample(sample, desiredlen))
-    return sample
+
+def _limitsample(sample, desiredlen, randomize=True):
+    """return a random subset of sample of at most desiredlen item.
+
+    If randomize is False, though, a deterministic subset is returned.
+    This is meant for integration tests.
+    """
+    if len(sample) <= desiredlen:
+        return sample
+    if randomize:
+        return set(random.sample(sample, desiredlen))
+    sample = list(sample)
+    sample.sort()
+    return set(sample[:desiredlen])
+
 
 class partialdiscovery(object):
     """an object representing ongoing discovery
@@ -110,7 +122,7 @@
     (all tracked revisions are known locally)
     """
 
-    def __init__(self, repo, targetheads, respectsize):
+    def __init__(self, repo, targetheads, respectsize, randomize=True):
         self._repo = repo
         self._targetheads = targetheads
         self._common = repo.changelog.incrementalmissingrevs()
@@ -118,6 +130,7 @@
         self.missing = set()
         self._childrenmap = None
         self._respectsize = respectsize
+        self.randomize = randomize
 
     def addcommons(self, commons):
         """register nodes known as common"""
@@ -127,7 +140,7 @@
 
     def addmissings(self, missings):
         """register some nodes as missing"""
-        newmissing = self._repo.revs('%ld::%ld', missings, self.undecided)
+        newmissing = self._repo.revs(b'%ld::%ld', missings, self.undecided)
         if newmissing:
             self.missing.update(newmissing)
             self.undecided.difference_update(newmissing)
@@ -174,8 +187,10 @@
 
     def _parentsgetter(self):
         getrev = self._repo.changelog.index.__getitem__
+
         def getparents(r):
             return getrev(r)[5:7]
+
         return getparents
 
     def _childrengetter(self):
@@ -219,13 +234,14 @@
         revs = self.undecided
         if len(revs) <= size:
             return list(revs)
-        sample = set(self._repo.revs('heads(%ld)', revs))
+        sample = set(self._repo.revs(b'heads(%ld)', revs))
 
         if len(sample) >= size:
-            return _limitsample(sample, size)
+            return _limitsample(sample, size, randomize=self.randomize)
 
-        _updatesample(None, headrevs, sample, self._parentsgetter(),
-                      quicksamplesize=size)
+        _updatesample(
+            None, headrevs, sample, self._parentsgetter(), quicksamplesize=size
+        )
         return sample
 
     def takefullsample(self, headrevs, size):
@@ -233,7 +249,7 @@
         if len(revs) <= size:
             return list(revs)
         repo = self._repo
-        sample = set(repo.revs('heads(%ld)', revs))
+        sample = set(repo.revs(b'heads(%ld)', revs))
         parentrevs = self._parentsgetter()
 
         # update from heads
@@ -241,7 +257,7 @@
         _updatesample(revs, revsheads, sample, parentrevs)
 
         # update from roots
-        revsroots = set(repo.revs('roots(%ld)', revs))
+        revsroots = set(repo.revs(b'roots(%ld)', revs))
         childrenrevs = self._childrengetter()
         _updatesample(revs, revsroots, sample, childrenrevs)
         assert sample
@@ -249,18 +265,33 @@
         if not self._respectsize:
             size = max(size, min(len(revsroots), len(revsheads)))
 
-        sample = _limitsample(sample, size)
+        sample = _limitsample(sample, size, randomize=self.randomize)
         if len(sample) < size:
             more = size - len(sample)
-            sample.update(random.sample(list(revs - sample), more))
+            takefrom = list(revs - sample)
+            if self.randomize:
+                sample.update(random.sample(takefrom, more))
+            else:
+                takefrom.sort()
+                sample.update(takefrom[:more])
         return sample
 
-def findcommonheads(ui, local, remote,
-                    initialsamplesize=100,
-                    fullsamplesize=200,
-                    abortwhenunrelated=True,
-                    ancestorsof=None,
-                    samplegrowth=1.05):
+
+partialdiscovery = policy.importrust(
+    r'discovery', member=r'PartialDiscovery', default=partialdiscovery
+)
+
+
+def findcommonheads(
+    ui,
+    local,
+    remote,
+    initialsamplesize=100,
+    fullsamplesize=200,
+    abortwhenunrelated=True,
+    ancestorsof=None,
+    samplegrowth=1.05,
+):
     '''Return a tuple (common, anyincoming, remoteheads) used to identify
     missing nodes from or in remote.
     '''
@@ -277,7 +308,7 @@
         ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
 
     # early exit if we know all the specified remote heads already
-    ui.debug("query 1; heads\n")
+    ui.debug(b"query 1; heads\n")
     roundtrips += 1
     # We also ask remote about all the local heads. That set can be arbitrarily
     # large, so we used to limit it size to `initialsamplesize`. We no longer
@@ -338,10 +369,10 @@
         sample = ownheads
 
     with remote.commandexecutor() as e:
-        fheads = e.callcommand('heads', {})
-        fknown = e.callcommand('known', {
-            'nodes': [clnode(r) for r in sample],
-        })
+        fheads = e.callcommand(b'heads', {})
+        fknown = e.callcommand(
+            b'known', {b'nodes': [clnode(r) for r in sample],}
+        )
 
     srvheadhashes, yesno = fheads.result(), fknown.result()
 
@@ -352,7 +383,7 @@
 
     # start actual discovery (we note this before the next "if" for
     # compatibility reasons)
-    ui.status(_("searching for changes\n"))
+    ui.status(_(b"searching for changes\n"))
 
     knownsrvheads = []  # revnos of remote heads that are known locally
     for node in srvheadhashes:
@@ -366,38 +397,41 @@
             continue
 
     if len(knownsrvheads) == len(srvheadhashes):
-        ui.debug("all remote heads known locally\n")
+        ui.debug(b"all remote heads known locally\n")
         return srvheadhashes, False, srvheadhashes
 
     if len(sample) == len(ownheads) and all(yesno):
-        ui.note(_("all local heads known remotely\n"))
+        ui.note(_(b"all local changesets known remotely\n"))
         ownheadhashes = [clnode(r) for r in ownheads]
         return ownheadhashes, True, srvheadhashes
 
     # full blown discovery
 
-    disco = partialdiscovery(local, ownheads, remote.limitedarguments)
+    randomize = ui.configbool(b'devel', b'discovery.randomize')
+    disco = partialdiscovery(
+        local, ownheads, remote.limitedarguments, randomize=randomize
+    )
     # treat remote heads (and maybe own heads) as a first implicit sample
     # response
     disco.addcommons(knownsrvheads)
     disco.addinfo(zip(sample, yesno))
 
     full = False
-    progress = ui.makeprogress(_('searching'), unit=_('queries'))
+    progress = ui.makeprogress(_(b'searching'), unit=_(b'queries'))
     while not disco.iscomplete():
 
         if full or disco.hasinfo():
             if full:
-                ui.note(_("sampling from both directions\n"))
+                ui.note(_(b"sampling from both directions\n"))
             else:
-                ui.debug("taking initial sample\n")
+                ui.debug(b"taking initial sample\n")
             samplefunc = disco.takefullsample
             targetsize = fullsamplesize
             if not remote.limitedarguments:
                 fullsamplesize = int(fullsamplesize * samplegrowth)
         else:
             # use even cheaper initial sample
-            ui.debug("taking quick initial sample\n")
+            ui.debug(b"taking quick initial sample\n")
             samplefunc = disco.takequicksample
             targetsize = initialsamplesize
         sample = samplefunc(ownheads, targetsize)
@@ -405,16 +439,18 @@
         roundtrips += 1
         progress.update(roundtrips)
         stats = disco.stats()
-        ui.debug("query %i; still undecided: %i, sample size is: %i\n"
-                 % (roundtrips, stats['undecided'], len(sample)))
+        ui.debug(
+            b"query %i; still undecided: %i, sample size is: %i\n"
+            % (roundtrips, stats['undecided'], len(sample))
+        )
 
         # indices between sample and externalized version must match
         sample = list(sample)
 
         with remote.commandexecutor() as e:
-            yesno = e.callcommand('known', {
-                'nodes': [clnode(r) for r in sample],
-            }).result()
+            yesno = e.callcommand(
+                b'known', {b'nodes': [clnode(r) for r in sample],}
+            ).result()
 
         full = True
 
@@ -423,20 +459,25 @@
     result = disco.commonheads()
     elapsed = util.timer() - start
     progress.complete()
-    ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
-    msg = ('found %d common and %d unknown server heads,'
-           ' %d roundtrips in %.4fs\n')
+    ui.debug(b"%d total queries in %.4fs\n" % (roundtrips, elapsed))
+    msg = (
+        b'found %d common and %d unknown server heads,'
+        b' %d roundtrips in %.4fs\n'
+    )
     missing = set(result) - set(knownsrvheads)
-    ui.log('discovery', msg, len(result), len(missing), roundtrips,
-           elapsed)
+    ui.log(b'discovery', msg, len(result), len(missing), roundtrips, elapsed)
 
     if not result and srvheadhashes != [nullid]:
         if abortwhenunrelated:
-            raise error.Abort(_("repository is unrelated"))
+            raise error.Abort(_(b"repository is unrelated"))
         else:
-            ui.warn(_("warning: repository is unrelated\n"))
-        return ({nullid}, True, srvheadhashes,)
+            ui.warn(_(b"warning: repository is unrelated\n"))
+        return (
+            {nullid},
+            True,
+            srvheadhashes,
+        )
 
-    anyincoming = (srvheadhashes != [nullid])
+    anyincoming = srvheadhashes != [nullid]
     result = {clnode(r) for r in result}
     return result, anyincoming, srvheadhashes
--- a/mercurial/shelve.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/shelve.py	Mon Oct 21 11:09:48 2019 -0400
@@ -28,6 +28,7 @@
 import stat
 
 from .i18n import _
+from .pycompat import open
 from . import (
     bookmarks,
     bundle2,
@@ -56,21 +57,23 @@
     stringutil,
 )
 
-backupdir = 'shelve-backup'
-shelvedir = 'shelved'
-shelvefileextensions = ['hg', 'patch', 'shelve']
+backupdir = b'shelve-backup'
+shelvedir = b'shelved'
+shelvefileextensions = [b'hg', b'patch', b'shelve']
 # universal extension is present in all types of shelves
-patchextension = 'patch'
+patchextension = b'patch'
 
 # we never need the user, so we use a
 # generic user for all shelve operations
-shelveuser = 'shelve@localhost'
+shelveuser = b'shelve@localhost'
+
 
 class shelvedfile(object):
     """Helper for the file storing a single shelve
 
     Handles common functions on shelve files (.hg/.patch) using
     the vfs layer"""
+
     def __init__(self, repo, name, filetype=None):
         self.repo = repo
         self.name = name
@@ -78,7 +81,7 @@
         self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
         self.ui = self.repo.ui
         if filetype:
-            self.fname = name + '.' + filetype
+            self.fname = name + b'.' + filetype
         else:
             self.fname = name
 
@@ -91,9 +94,9 @@
     def backupfilename(self):
         def gennames(base):
             yield base
-            base, ext = base.rsplit('.', 1)
+            base, ext = base.rsplit(b'.', 1)
             for i in itertools.count(1):
-                yield '%s-%d.%s' % (base, i, ext)
+                yield b'%s-%d.%s' % (base, i, ext)
 
         name = self.backupvfs.join(self.fname)
         for n in gennames(name):
@@ -108,13 +111,13 @@
     def stat(self):
         return self.vfs.stat(self.fname)
 
-    def opener(self, mode='rb'):
+    def opener(self, mode=b'rb'):
         try:
             return self.vfs(self.fname, mode)
         except IOError as err:
             if err.errno != errno.ENOENT:
                 raise
-            raise error.Abort(_("shelved change '%s' not found") % self.name)
+            raise error.Abort(_(b"shelved change '%s' not found") % self.name)
 
     def applybundle(self, tr):
         fp = self.opener()
@@ -123,14 +126,18 @@
             if not phases.supportinternal(self.repo):
                 targetphase = phases.secret
             gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
-            pretip = self.repo['tip']
-            bundle2.applybundle(self.repo, gen, tr,
-                                source='unshelve',
-                                url='bundle:' + self.vfs.join(self.fname),
-                                targetphase=targetphase)
-            shelvectx = self.repo['tip']
+            pretip = self.repo[b'tip']
+            bundle2.applybundle(
+                self.repo,
+                gen,
+                tr,
+                source=b'unshelve',
+                url=b'bundle:' + self.vfs.join(self.fname),
+                targetphase=targetphase,
+            )
+            shelvectx = self.repo[b'tip']
             if pretip == shelvectx:
-                shelverev = tr.changes['revduplicates'][-1]
+                shelverev = tr.changes[b'revduplicates'][-1]
                 shelvectx = self.repo[shelverev]
             return shelvectx
         finally:
@@ -138,26 +145,29 @@
 
     def bundlerepo(self):
         path = self.vfs.join(self.fname)
-        return bundlerepo.instance(self.repo.baseui,
-                                   'bundle://%s+%s' % (self.repo.root, path))
+        return bundlerepo.instance(
+            self.repo.baseui, b'bundle://%s+%s' % (self.repo.root, path)
+        )
 
     def writebundle(self, bases, node):
         cgversion = changegroup.safeversion(self.repo)
-        if cgversion == '01':
-            btype = 'HG10BZ'
+        if cgversion == b'01':
+            btype = b'HG10BZ'
             compression = None
         else:
-            btype = 'HG20'
-            compression = 'BZ'
+            btype = b'HG20'
+            compression = b'BZ'
 
         repo = self.repo.unfiltered()
 
-        outgoing = discovery.outgoing(repo, missingroots=bases,
-                                      missingheads=[node])
-        cg = changegroup.makechangegroup(repo, outgoing, cgversion, 'shelve')
+        outgoing = discovery.outgoing(
+            repo, missingroots=bases, missingheads=[node]
+        )
+        cg = changegroup.makechangegroup(repo, outgoing, cgversion, b'shelve')
 
-        bundle2.writebundle(self.ui, cg, self.fname, btype, self.vfs,
-                                compression=compression)
+        bundle2.writebundle(
+            self.ui, cg, self.fname, btype, self.vfs, compression=compression
+        )
 
     def writeinfo(self, info):
         scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
@@ -165,29 +175,32 @@
     def readinfo(self):
         return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
 
+
 class shelvedstate(object):
     """Handle persistence during unshelving operations.
 
     Handles saving and restoring a shelved state. Ensures that different
     versions of a shelved state are possible and handles them appropriately.
     """
+
     _version = 2
-    _filename = 'shelvedstate'
-    _keep = 'keep'
-    _nokeep = 'nokeep'
+    _filename = b'shelvedstate'
+    _keep = b'keep'
+    _nokeep = b'nokeep'
     # colon is essential to differentiate from a real bookmark name
-    _noactivebook = ':no-active-bookmark'
+    _noactivebook = b':no-active-bookmark'
+    _interactive = b'interactive'
 
     @classmethod
     def _verifyandtransform(cls, d):
         """Some basic shelvestate syntactic verification and transformation"""
         try:
-            d['originalwctx'] = nodemod.bin(d['originalwctx'])
-            d['pendingctx'] = nodemod.bin(d['pendingctx'])
-            d['parents'] = [nodemod.bin(h)
-                            for h in d['parents'].split(' ')]
-            d['nodestoremove'] = [nodemod.bin(h)
-                                  for h in d['nodestoremove'].split(' ')]
+            d[b'originalwctx'] = nodemod.bin(d[b'originalwctx'])
+            d[b'pendingctx'] = nodemod.bin(d[b'pendingctx'])
+            d[b'parents'] = [nodemod.bin(h) for h in d[b'parents'].split(b' ')]
+            d[b'nodestoremove'] = [
+                nodemod.bin(h) for h in d[b'nodestoremove'].split(b' ')
+            ]
         except (ValueError, TypeError, KeyError) as err:
             raise error.CorruptedState(pycompat.bytestr(err))
 
@@ -209,8 +222,17 @@
         # Order is important, because old shelvestate file uses it
         # to detemine values of fields (i.g. name is on the second line,
         # originalwctx is on the third and so forth). Please do not change.
-        keys = ['version', 'name', 'originalwctx', 'pendingctx', 'parents',
-                'nodestoremove', 'branchtorestore', 'keep', 'activebook']
+        keys = [
+            b'version',
+            b'name',
+            b'originalwctx',
+            b'pendingctx',
+            b'parents',
+            b'nodestoremove',
+            b'branchtorestore',
+            b'keep',
+            b'activebook',
+        ]
         # this is executed only seldomly, so it is not a big deal
         # that we open this file twice
         fp = repo.vfs(cls._filename)
@@ -228,70 +250,91 @@
         if version < cls._version:
             d = cls._readold(repo)
         elif version == cls._version:
-            d = scmutil.simplekeyvaluefile(
-                repo.vfs, cls._filename).read(firstlinenonkeyval=True)
+            d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename).read(
+                firstlinenonkeyval=True
+            )
         else:
-            raise error.Abort(_('this version of shelve is incompatible '
-                                'with the version used in this repo'))
+            raise error.Abort(
+                _(
+                    b'this version of shelve is incompatible '
+                    b'with the version used in this repo'
+                )
+            )
 
         cls._verifyandtransform(d)
         try:
             obj = cls()
-            obj.name = d['name']
-            obj.wctx = repo[d['originalwctx']]
-            obj.pendingctx = repo[d['pendingctx']]
-            obj.parents = d['parents']
-            obj.nodestoremove = d['nodestoremove']
-            obj.branchtorestore = d.get('branchtorestore', '')
-            obj.keep = d.get('keep') == cls._keep
-            obj.activebookmark = ''
-            if d.get('activebook', '') != cls._noactivebook:
-                obj.activebookmark = d.get('activebook', '')
+            obj.name = d[b'name']
+            obj.wctx = repo[d[b'originalwctx']]
+            obj.pendingctx = repo[d[b'pendingctx']]
+            obj.parents = d[b'parents']
+            obj.nodestoremove = d[b'nodestoremove']
+            obj.branchtorestore = d.get(b'branchtorestore', b'')
+            obj.keep = d.get(b'keep') == cls._keep
+            obj.activebookmark = b''
+            if d.get(b'activebook', b'') != cls._noactivebook:
+                obj.activebookmark = d.get(b'activebook', b'')
+            obj.interactive = d.get(b'interactive') == cls._interactive
         except (error.RepoLookupError, KeyError) as err:
             raise error.CorruptedState(pycompat.bytestr(err))
 
         return obj
 
     @classmethod
-    def save(cls, repo, name, originalwctx, pendingctx, nodestoremove,
-             branchtorestore, keep=False, activebook=''):
+    def save(
+        cls,
+        repo,
+        name,
+        originalwctx,
+        pendingctx,
+        nodestoremove,
+        branchtorestore,
+        keep=False,
+        activebook=b'',
+        interactive=False,
+    ):
         info = {
-            "name": name,
-            "originalwctx": nodemod.hex(originalwctx.node()),
-            "pendingctx": nodemod.hex(pendingctx.node()),
-            "parents": ' '.join([nodemod.hex(p)
-                                 for p in repo.dirstate.parents()]),
-            "nodestoremove": ' '.join([nodemod.hex(n)
-                                      for n in nodestoremove]),
-            "branchtorestore": branchtorestore,
-            "keep": cls._keep if keep else cls._nokeep,
-            "activebook": activebook or cls._noactivebook
+            b"name": name,
+            b"originalwctx": nodemod.hex(originalwctx.node()),
+            b"pendingctx": nodemod.hex(pendingctx.node()),
+            b"parents": b' '.join(
+                [nodemod.hex(p) for p in repo.dirstate.parents()]
+            ),
+            b"nodestoremove": b' '.join(
+                [nodemod.hex(n) for n in nodestoremove]
+            ),
+            b"branchtorestore": branchtorestore,
+            b"keep": cls._keep if keep else cls._nokeep,
+            b"activebook": activebook or cls._noactivebook,
         }
-        scmutil.simplekeyvaluefile(
-            repo.vfs, cls._filename).write(info,
-                                           firstline=("%d" % cls._version))
+        if interactive:
+            info[b'interactive'] = cls._interactive
+        scmutil.simplekeyvaluefile(repo.vfs, cls._filename).write(
+            info, firstline=(b"%d" % cls._version)
+        )
 
     @classmethod
     def clear(cls, repo):
         repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
 
+
 def cleanupoldbackups(repo):
     vfs = vfsmod.vfs(repo.vfs.join(backupdir))
-    maxbackups = repo.ui.configint('shelve', 'maxbackups')
-    hgfiles = [f for f in vfs.listdir()
-               if f.endswith('.' + patchextension)]
+    maxbackups = repo.ui.configint(b'shelve', b'maxbackups')
+    hgfiles = [f for f in vfs.listdir() if f.endswith(b'.' + patchextension)]
     hgfiles = sorted([(vfs.stat(f)[stat.ST_MTIME], f) for f in hgfiles])
     if maxbackups > 0 and maxbackups < len(hgfiles):
         bordermtime = hgfiles[-maxbackups][0]
     else:
         bordermtime = None
-    for mtime, f in hgfiles[:len(hgfiles) - maxbackups]:
+    for mtime, f in hgfiles[: len(hgfiles) - maxbackups]:
         if mtime == bordermtime:
             # keep it, because timestamp can't decide exact order of backups
             continue
-        base = f[:-(1 + len(patchextension))]
+        base = f[: -(1 + len(patchextension))]
         for ext in shelvefileextensions:
-            vfs.tryunlink(base + '.' + ext)
+            vfs.tryunlink(base + b'.' + ext)
+
 
 def _backupactivebookmark(repo):
     activebookmark = repo._activebookmark
@@ -299,43 +342,50 @@
         bookmarks.deactivate(repo)
     return activebookmark
 
+
 def _restoreactivebookmark(repo, mark):
     if mark:
         bookmarks.activate(repo, mark)
 
+
 def _aborttransaction(repo, tr):
     '''Abort current transaction for shelve/unshelve, but keep dirstate
     '''
-    dirstatebackupname = 'dirstate.shelve'
+    dirstatebackupname = b'dirstate.shelve'
     repo.dirstate.savebackup(tr, dirstatebackupname)
     tr.abort()
     repo.dirstate.restorebackup(None, dirstatebackupname)
 
+
 def getshelvename(repo, parent, opts):
     """Decide on the name this shelve is going to have"""
+
     def gennames():
         yield label
         for i in itertools.count(1):
-            yield '%s-%02d' % (label, i)
-    name = opts.get('name')
-    label = repo._activebookmark or parent.branch() or 'default'
+            yield b'%s-%02d' % (label, i)
+
+    name = opts.get(b'name')
+    label = repo._activebookmark or parent.branch() or b'default'
     # slashes aren't allowed in filenames, therefore we rename it
-    label = label.replace('/', '_')
-    label = label.replace('\\', '_')
+    label = label.replace(b'/', b'_')
+    label = label.replace(b'\\', b'_')
     # filenames must not start with '.' as it should not be hidden
-    if label.startswith('.'):
-        label = label.replace('.', '_', 1)
+    if label.startswith(b'.'):
+        label = label.replace(b'.', b'_', 1)
 
     if name:
         if shelvedfile(repo, name, patchextension).exists():
-            e = _("a shelved change named '%s' already exists") % name
+            e = _(b"a shelved change named '%s' already exists") % name
             raise error.Abort(e)
 
         # ensure we are not creating a subdirectory or a hidden file
-        if '/' in name or '\\' in name:
-            raise error.Abort(_('shelved change names can not contain slashes'))
-        if name.startswith('.'):
-            raise error.Abort(_("shelved change names can not start with '.'"))
+        if b'/' in name or b'\\' in name:
+            raise error.Abort(
+                _(b'shelved change names can not contain slashes')
+            )
+        if name.startswith(b'.'):
+            raise error.Abort(_(b"shelved change names can not start with '.'"))
 
     else:
         for n in gennames():
@@ -345,6 +395,7 @@
 
     return name
 
+
 def mutableancestors(ctx):
     """return all mutable ancestors for ctx (included)
 
@@ -362,72 +413,88 @@
                 if parent.mutable():
                     visit.append(parent)
 
+
 def getcommitfunc(extra, interactive, editor=False):
     def commitfunc(ui, repo, message, match, opts):
-        hasmq = util.safehasattr(repo, 'mq')
+        hasmq = util.safehasattr(repo, b'mq')
         if hasmq:
             saved, repo.mq.checkapplied = repo.mq.checkapplied, False
 
         targetphase = phases.internal
         if not phases.supportinternal(repo):
             targetphase = phases.secret
-        overrides = {('phases', 'new-commit'): targetphase}
+        overrides = {(b'phases', b'new-commit'): targetphase}
         try:
             editor_ = False
             if editor:
-                editor_ = cmdutil.getcommiteditor(editform='shelve.shelve',
-                                                  **pycompat.strkwargs(opts))
+                editor_ = cmdutil.getcommiteditor(
+                    editform=b'shelve.shelve', **pycompat.strkwargs(opts)
+                )
             with repo.ui.configoverride(overrides):
-                return repo.commit(message, shelveuser, opts.get('date'),
-                                   match, editor=editor_, extra=extra)
+                return repo.commit(
+                    message,
+                    shelveuser,
+                    opts.get(b'date'),
+                    match,
+                    editor=editor_,
+                    extra=extra,
+                )
         finally:
             if hasmq:
                 repo.mq.checkapplied = saved
 
     def interactivecommitfunc(ui, repo, *pats, **opts):
         opts = pycompat.byteskwargs(opts)
-        match = scmutil.match(repo['.'], pats, {})
-        message = opts['message']
+        match = scmutil.match(repo[b'.'], pats, {})
+        message = opts[b'message']
         return commitfunc(ui, repo, message, match, opts)
 
     return interactivecommitfunc if interactive else commitfunc
 
+
 def _nothingtoshelvemessaging(ui, repo, pats, opts):
     stat = repo.status(match=scmutil.match(repo[None], pats, opts))
     if stat.deleted:
-        ui.status(_("nothing changed (%d missing files, see "
-                    "'hg status')\n") % len(stat.deleted))
+        ui.status(
+            _(b"nothing changed (%d missing files, see 'hg status')\n")
+            % len(stat.deleted)
+        )
     else:
-        ui.status(_("nothing changed\n"))
+        ui.status(_(b"nothing changed\n"))
+
 
 def _shelvecreatedcommit(repo, node, name, match):
-    info = {'node': nodemod.hex(node)}
-    shelvedfile(repo, name, 'shelve').writeinfo(info)
+    info = {b'node': nodemod.hex(node)}
+    shelvedfile(repo, name, b'shelve').writeinfo(info)
     bases = list(mutableancestors(repo[node]))
-    shelvedfile(repo, name, 'hg').writebundle(bases, node)
-    with shelvedfile(repo, name, patchextension).opener('wb') as fp:
-        cmdutil.exportfile(repo, [node], fp, opts=mdiff.diffopts(git=True),
-                           match=match)
+    shelvedfile(repo, name, b'hg').writebundle(bases, node)
+    with shelvedfile(repo, name, patchextension).opener(b'wb') as fp:
+        cmdutil.exportfile(
+            repo, [node], fp, opts=mdiff.diffopts(git=True), match=match
+        )
+
 
 def _includeunknownfiles(repo, pats, opts, extra):
-    s = repo.status(match=scmutil.match(repo[None], pats, opts),
-                    unknown=True)
+    s = repo.status(match=scmutil.match(repo[None], pats, opts), unknown=True)
     if s.unknown:
-        extra['shelve_unknown'] = '\0'.join(s.unknown)
+        extra[b'shelve_unknown'] = b'\0'.join(s.unknown)
         repo[None].add(s.unknown)
 
+
 def _finishshelve(repo, tr):
     if phases.supportinternal(repo):
         tr.close()
     else:
         _aborttransaction(repo, tr)
 
+
 def createcmd(ui, repo, pats, opts):
     """subcommand that creates a new shelve"""
     with repo.wlock():
         cmdutil.checkunfinished(repo)
         return _docreatecmd(ui, repo, pats, opts)
 
+
 def _docreatecmd(ui, repo, pats, opts):
     wctx = repo[None]
     parents = wctx.parents()
@@ -435,12 +502,12 @@
     origbranch = wctx.branch()
 
     if parent.node() != nodemod.nullid:
-        desc = "changes to: %s" % parent.description().split('\n', 1)[0]
+        desc = b"changes to: %s" % parent.description().split(b'\n', 1)[0]
     else:
-        desc = '(changes in empty repository)'
+        desc = b'(changes in empty repository)'
 
-    if not opts.get('message'):
-        opts['message'] = desc
+    if not opts.get(b'message'):
+        opts[b'message'] = desc
 
     lock = tr = activebookmark = None
     try:
@@ -448,30 +515,38 @@
 
         # use an uncommitted transaction to generate the bundle to avoid
         # pull races. ensure we don't print the abort message to stderr.
-        tr = repo.transaction('shelve', report=lambda x: None)
+        tr = repo.transaction(b'shelve', report=lambda x: None)
 
-        interactive = opts.get('interactive', False)
-        includeunknown = (opts.get('unknown', False) and
-                          not opts.get('addremove', False))
+        interactive = opts.get(b'interactive', False)
+        includeunknown = opts.get(b'unknown', False) and not opts.get(
+            b'addremove', False
+        )
 
         name = getshelvename(repo, parent, opts)
         activebookmark = _backupactivebookmark(repo)
-        extra = {'internal': 'shelve'}
+        extra = {b'internal': b'shelve'}
         if includeunknown:
             _includeunknownfiles(repo, pats, opts, extra)
 
         if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
             # In non-bare shelve we don't store newly created branch
             # at bundled commit
-            repo.dirstate.setbranch(repo['.'].branch())
+            repo.dirstate.setbranch(repo[b'.'].branch())
 
         commitfunc = getcommitfunc(extra, interactive, editor=True)
         if not interactive:
             node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
         else:
-            node = cmdutil.dorecord(ui, repo, commitfunc, None,
-                                    False, cmdutil.recordfilter, *pats,
-                                    **pycompat.strkwargs(opts))
+            node = cmdutil.dorecord(
+                ui,
+                repo,
+                commitfunc,
+                None,
+                False,
+                cmdutil.recordfilter,
+                *pats,
+                **pycompat.strkwargs(opts)
+            )
         if not node:
             _nothingtoshelvemessaging(ui, repo, pats, opts)
             return 1
@@ -484,13 +559,13 @@
 
         if ui.formatted():
             desc = stringutil.ellipsis(desc, ui.termwidth())
-        ui.status(_('shelved as %s\n') % name)
-        if opts['keep']:
+        ui.status(_(b'shelved as %s\n') % name)
+        if opts[b'keep']:
             with repo.dirstate.parentchange():
                 scmutil.movedirstate(repo, parent, match)
         else:
             hg.update(repo, parent.node())
-        if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts):
+        if origbranch != repo[b'.'].branch() and not _isbareshelve(pats, opts):
             repo.dirstate.setbranch(origbranch)
 
         _finishshelve(repo, tr)
@@ -498,29 +573,35 @@
         _restoreactivebookmark(repo, activebookmark)
         lockmod.release(tr, lock)
 
+
 def _isbareshelve(pats, opts):
-    return (not pats
-            and not opts.get('interactive', False)
-            and not opts.get('include', False)
-            and not opts.get('exclude', False))
+    return (
+        not pats
+        and not opts.get(b'interactive', False)
+        and not opts.get(b'include', False)
+        and not opts.get(b'exclude', False)
+    )
+
 
 def _iswctxonnewbranch(repo):
-    return repo[None].branch() != repo['.'].branch()
+    return repo[None].branch() != repo[b'.'].branch()
+
 
 def cleanupcmd(ui, repo):
     """subcommand that deletes all shelves"""
 
     with repo.wlock():
         for (name, _type) in repo.vfs.readdir(shelvedir):
-            suffix = name.rsplit('.', 1)[-1]
+            suffix = name.rsplit(b'.', 1)[-1]
             if suffix in shelvefileextensions:
                 shelvedfile(repo, name).movetobackup()
             cleanupoldbackups(repo)
 
+
 def deletecmd(ui, repo, pats):
     """subcommand that deletes a specific shelve"""
     if not pats:
-        raise error.Abort(_('no shelved changes specified!'))
+        raise error.Abort(_(b'no shelved changes specified!'))
     with repo.wlock():
         try:
             for name in pats:
@@ -537,7 +618,8 @@
         except OSError as err:
             if err.errno != errno.ENOENT:
                 raise
-            raise error.Abort(_("shelved change '%s' not found") % name)
+            raise error.Abort(_(b"shelved change '%s' not found") % name)
+
 
 def listshelves(repo):
     """return all shelves in repo as list of (time, filename)"""
@@ -549,104 +631,116 @@
         return []
     info = []
     for (name, _type) in names:
-        pfx, sfx = name.rsplit('.', 1)
+        pfx, sfx = name.rsplit(b'.', 1)
         if not pfx or sfx != patchextension:
             continue
         st = shelvedfile(repo, name).stat()
         info.append((st[stat.ST_MTIME], shelvedfile(repo, pfx).filename()))
     return sorted(info, reverse=True)
 
+
 def listcmd(ui, repo, pats, opts):
     """subcommand that displays the list of shelves"""
     pats = set(pats)
     width = 80
     if not ui.plain():
         width = ui.termwidth()
-    namelabel = 'shelve.newest'
-    ui.pager('shelve')
+    namelabel = b'shelve.newest'
+    ui.pager(b'shelve')
     for mtime, name in listshelves(repo):
         sname = util.split(name)[1]
         if pats and sname not in pats:
             continue
         ui.write(sname, label=namelabel)
-        namelabel = 'shelve.name'
+        namelabel = b'shelve.name'
         if ui.quiet:
-            ui.write('\n')
+            ui.write(b'\n')
             continue
-        ui.write(' ' * (16 - len(sname)))
+        ui.write(b' ' * (16 - len(sname)))
         used = 16
         date = dateutil.makedate(mtime)
-        age = '(%s)' % templatefilters.age(date, abbrev=True)
-        ui.write(age, label='shelve.age')
-        ui.write(' ' * (12 - len(age)))
+        age = b'(%s)' % templatefilters.age(date, abbrev=True)
+        ui.write(age, label=b'shelve.age')
+        ui.write(b' ' * (12 - len(age)))
         used += 12
-        with open(name + '.' + patchextension, 'rb') as fp:
+        with open(name + b'.' + patchextension, b'rb') as fp:
             while True:
                 line = fp.readline()
                 if not line:
                     break
-                if not line.startswith('#'):
+                if not line.startswith(b'#'):
                     desc = line.rstrip()
                     if ui.formatted():
                         desc = stringutil.ellipsis(desc, width - used)
                     ui.write(desc)
                     break
-            ui.write('\n')
-            if not (opts['patch'] or opts['stat']):
+            ui.write(b'\n')
+            if not (opts[b'patch'] or opts[b'stat']):
                 continue
             difflines = fp.readlines()
-            if opts['patch']:
+            if opts[b'patch']:
                 for chunk, label in patch.difflabel(iter, difflines):
                     ui.write(chunk, label=label)
-            if opts['stat']:
+            if opts[b'stat']:
                 for chunk, label in patch.diffstatui(difflines, width=width):
                     ui.write(chunk, label=label)
 
+
 def patchcmds(ui, repo, pats, opts):
     """subcommand that displays shelves"""
     if len(pats) == 0:
         shelves = listshelves(repo)
         if not shelves:
-            raise error.Abort(_("there are no shelves to show"))
+            raise error.Abort(_(b"there are no shelves to show"))
         mtime, name = shelves[0]
         sname = util.split(name)[1]
         pats = [sname]
 
     for shelfname in pats:
         if not shelvedfile(repo, shelfname, patchextension).exists():
-            raise error.Abort(_("cannot find shelf %s") % shelfname)
+            raise error.Abort(_(b"cannot find shelf %s") % shelfname)
 
     listcmd(ui, repo, pats, opts)
 
+
 def checkparents(repo, state):
     """check parent while resuming an unshelve"""
     if state.parents != repo.dirstate.parents():
-        raise error.Abort(_('working directory parents do not match unshelve '
-                           'state'))
+        raise error.Abort(
+            _(b'working directory parents do not match unshelve state')
+        )
+
 
 def _loadshelvedstate(ui, repo, opts):
     try:
         state = shelvedstate.load(repo)
-        if opts.get('keep') is None:
-            opts['keep'] = state.keep
+        if opts.get(b'keep') is None:
+            opts[b'keep'] = state.keep
     except IOError as err:
         if err.errno != errno.ENOENT:
             raise
-        cmdutil.wrongtooltocontinue(repo, _('unshelve'))
+        cmdutil.wrongtooltocontinue(repo, _(b'unshelve'))
     except error.CorruptedState as err:
-        ui.debug(pycompat.bytestr(err) + '\n')
-        if opts.get('continue'):
-            msg = _('corrupted shelved state file')
-            hint = _('please run hg unshelve --abort to abort unshelve '
-                     'operation')
+        ui.debug(pycompat.bytestr(err) + b'\n')
+        if opts.get(b'continue'):
+            msg = _(b'corrupted shelved state file')
+            hint = _(
+                b'please run hg unshelve --abort to abort unshelve '
+                b'operation'
+            )
             raise error.Abort(msg, hint=hint)
-        elif opts.get('abort'):
+        elif opts.get(b'abort'):
             shelvedstate.clear(repo)
-            raise error.Abort(_('could not read shelved state file, your '
-                                'working copy may be in an unexpected state\n'
-                                'please update to some commit\n'))
+            raise error.Abort(
+                _(
+                    b'could not read shelved state file, your '
+                    b'working copy may be in an unexpected state\n'
+                    b'please update to some commit\n'
+                )
+            )
     return state
 
+
 def unshelveabort(ui, repo, state):
     """subcommand that abort an in-progress unshelve"""
     with repo.lock():
@@ -654,58 +748,67 @@
             checkparents(repo, state)
 
             merge.update(repo, state.pendingctx, branchmerge=False, force=True)
-            if (state.activebookmark
-                    and state.activebookmark in repo._bookmarks):
+            if state.activebookmark and state.activebookmark in repo._bookmarks:
                 bookmarks.activate(repo, state.activebookmark)
             mergefiles(ui, repo, state.wctx, state.pendingctx)
             if not phases.supportinternal(repo):
-                repair.strip(ui, repo, state.nodestoremove, backup=False,
-                             topic='shelve')
+                repair.strip(
+                    ui, repo, state.nodestoremove, backup=False, topic=b'shelve'
+                )
         finally:
             shelvedstate.clear(repo)
-            ui.warn(_("unshelve of '%s' aborted\n") % state.name)
+            ui.warn(_(b"unshelve of '%s' aborted\n") % state.name)
+
 
 def hgabortunshelve(ui, repo):
     """logic to  abort unshelve using 'hg abort"""
     with repo.wlock():
-        state = _loadshelvedstate(ui, repo, {'abort' : True})
+        state = _loadshelvedstate(ui, repo, {b'abort': True})
         return unshelveabort(ui, repo, state)
 
+
 def mergefiles(ui, repo, wctx, shelvectx):
     """updates to wctx and merges the changes from shelvectx into the
     dirstate."""
-    with ui.configoverride({('ui', 'quiet'): True}):
+    with ui.configoverride({(b'ui', b'quiet'): True}):
         hg.update(repo, wctx.node())
         ui.pushbuffer(True)
         cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents())
         ui.popbuffer()
 
+
 def restorebranch(ui, repo, branchtorestore):
     if branchtorestore and branchtorestore != repo.dirstate.branch():
         repo.dirstate.setbranch(branchtorestore)
-        ui.status(_('marked working directory as branch %s\n')
-                  % branchtorestore)
+        ui.status(
+            _(b'marked working directory as branch %s\n') % branchtorestore
+        )
+
 
 def unshelvecleanup(ui, repo, name, opts):
     """remove related files after an unshelve"""
-    if not opts.get('keep'):
+    if not opts.get(b'keep'):
         for filetype in shelvefileextensions:
             shfile = shelvedfile(repo, name, filetype)
             if shfile.exists():
                 shfile.movetobackup()
         cleanupoldbackups(repo)
-def unshelvecontinue(ui, repo, state, opts, basename=None):
+
+
+def unshelvecontinue(ui, repo, state, opts):
     """subcommand to continue an in-progress unshelve"""
     # We're finishing off a merge. First parent is our original
     # parent, second is the temporary "fake" commit we're unshelving.
-    interactive = opts.get('interactive')
+    interactive = state.interactive
+    basename = state.name
     with repo.lock():
         checkparents(repo, state)
         ms = merge.mergestate.read(repo)
         if list(ms.unresolved()):
             raise error.Abort(
-                _("unresolved conflicts, can't continue"),
-                hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
+                _(b"unresolved conflicts, can't continue"),
+                hint=_(b"see 'hg resolve', then 'hg unshelve --continue'"),
+            )
 
         shelvectx = repo[state.parents[1]]
         pendingctx = state.pendingctx
@@ -717,19 +820,13 @@
         targetphase = phases.internal
         if not phases.supportinternal(repo):
             targetphase = phases.secret
-        overrides = {('phases', 'new-commit'): targetphase}
-        with repo.ui.configoverride(overrides, 'unshelve'):
+        overrides = {(b'phases', b'new-commit'): targetphase}
+        with repo.ui.configoverride(overrides, b'unshelve'):
             with repo.dirstate.parentchange():
                 repo.setparents(state.parents[0], nodemod.nullid)
-                if not interactive:
-                    ispartialunshelve = False
-                    newnode = repo.commit(text=shelvectx.description(),
-                                        extra=shelvectx.extra(),
-                                        user=shelvectx.user(),
-                                        date=shelvectx.date())
-                else:
-                    newnode, ispartialunshelve = _dounshelveinteractive(ui,
-                        repo, shelvectx, basename, opts)
+                newnode, ispartialunshelve = _createunshelvectx(
+                    ui, repo, shelvectx, basename, interactive, opts
+                )
 
         if newnode is None:
             # If it ended up being a no-op commit, then the normal
@@ -737,8 +834,10 @@
             # here. Fix issue5494
             merge.mergestate.clean(repo)
             shelvectx = state.pendingctx
-            msg = _('note: unshelved changes already existed '
-                    'in the working copy\n')
+            msg = _(
+                b'note: unshelved changes already existed '
+                b'in the working copy\n'
+            )
             ui.status(msg)
         else:
             # only strip the shelvectx if we produced one
@@ -749,20 +848,23 @@
         mergefiles(ui, repo, state.wctx, shelvectx)
         restorebranch(ui, repo, state.branchtorestore)
 
+        if not phases.supportinternal(repo):
+            repair.strip(
+                ui, repo, state.nodestoremove, backup=False, topic=b'shelve'
+            )
+        shelvedstate.clear(repo)
         if not ispartialunshelve:
-            if not phases.supportinternal(repo):
-                repair.strip(ui, repo, state.nodestoremove, backup=False,
-                            topic='shelve')
-            shelvedstate.clear(repo)
             unshelvecleanup(ui, repo, state.name, opts)
         _restoreactivebookmark(repo, state.activebookmark)
-        ui.status(_("unshelve of '%s' complete\n") % state.name)
+        ui.status(_(b"unshelve of '%s' complete\n") % state.name)
+
 
 def hgcontinueunshelve(ui, repo):
     """logic to resume unshelve using 'hg continue'"""
     with repo.wlock():
-        state = _loadshelvedstate(ui, repo, {'continue' : True})
-        return unshelvecontinue(ui, repo, state, {'keep' : state.keep})
+        state = _loadshelvedstate(ui, repo, {b'continue': True})
+        return unshelvecontinue(ui, repo, state, {b'keep': state.keep})
+
 
 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
     """Temporarily commit working copy changes before moving unshelve commit"""
@@ -772,104 +874,164 @@
     addedbefore = frozenset(s.added)
     if not (s.modified or s.added or s.removed):
         return tmpwctx, addedbefore
-    ui.status(_("temporarily committing pending changes "
-                "(restore with 'hg unshelve --abort')\n"))
-    extra = {'internal': 'shelve'}
-    commitfunc = getcommitfunc(extra=extra, interactive=False,
-                               editor=False)
+    ui.status(
+        _(
+            b"temporarily committing pending changes "
+            b"(restore with 'hg unshelve --abort')\n"
+        )
+    )
+    extra = {b'internal': b'shelve'}
+    commitfunc = getcommitfunc(extra=extra, interactive=False, editor=False)
     tempopts = {}
-    tempopts['message'] = "pending changes temporary commit"
-    tempopts['date'] = opts.get('date')
-    with ui.configoverride({('ui', 'quiet'): True}):
+    tempopts[b'message'] = b"pending changes temporary commit"
+    tempopts[b'date'] = opts.get(b'date')
+    with ui.configoverride({(b'ui', b'quiet'): True}):
         node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
     tmpwctx = repo[node]
     return tmpwctx, addedbefore
 
+
 def _unshelverestorecommit(ui, repo, tr, basename):
     """Recreate commit in the repository during the unshelve"""
     repo = repo.unfiltered()
     node = None
-    if shelvedfile(repo, basename, 'shelve').exists():
-        node = shelvedfile(repo, basename, 'shelve').readinfo()['node']
+    if shelvedfile(repo, basename, b'shelve').exists():
+        node = shelvedfile(repo, basename, b'shelve').readinfo()[b'node']
     if node is None or node not in repo:
-        with ui.configoverride({('ui', 'quiet'): True}):
-            shelvectx = shelvedfile(repo, basename, 'hg').applybundle(tr)
+        with ui.configoverride({(b'ui', b'quiet'): True}):
+            shelvectx = shelvedfile(repo, basename, b'hg').applybundle(tr)
         # We might not strip the unbundled changeset, so we should keep track of
         # the unshelve node in case we need to reuse it (eg: unshelve --keep)
         if node is None:
-            info = {'node': nodemod.hex(shelvectx.node())}
-            shelvedfile(repo, basename, 'shelve').writeinfo(info)
+            info = {b'node': nodemod.hex(shelvectx.node())}
+            shelvedfile(repo, basename, b'shelve').writeinfo(info)
     else:
         shelvectx = repo[node]
 
     return repo, shelvectx
 
-def _dounshelveinteractive(ui, repo, shelvectx, basename, opts):
-    """The user might want to unshelve certain changes only from the stored
-    shelve. So, we would create two commits. One with requested changes to
-    unshelve at that time and the latter is shelved for future.
+
+def _createunshelvectx(ui, repo, shelvectx, basename, interactive, opts):
+    """Handles the creation of unshelve commit and updates the shelve if it
+    was partially unshelved.
+
+    If interactive is:
+
+      * False: Commits all the changes in the working directory.
+      * True: Prompts the user to select changes to unshelve and commit them.
+              Update the shelve with remaining changes.
+
+    Returns the node of the new commit formed and a bool indicating whether
+    the shelve was partially unshelved.Creates a commit ctx to unshelve
+    interactively or non-interactively.
+
+    The user might want to unshelve certain changes only from the stored
+    shelve in interactive. So, we would create two commits. One with requested
+    changes to unshelve at that time and the latter is shelved for future.
+
+    Here, we return both the newnode which is created interactively and a
+    bool to know whether the shelve is partly done or completely done.
     """
-    opts['message'] = shelvectx.description()
-    opts['interactive-unshelve'] = True
+    opts[b'message'] = shelvectx.description()
+    opts[b'interactive-unshelve'] = True
     pats = []
-    commitfunc = getcommitfunc(shelvectx.extra(), interactive=True,
-                               editor=True)
-    newnode = cmdutil.dorecord(ui, repo, commitfunc, None, False,
-                               cmdutil.recordfilter, *pats,
-                               **pycompat.strkwargs(opts))
-    snode = repo.commit(text=shelvectx.description(),
-                        extra=shelvectx.extra(),
-                        user=shelvectx.user(),
-                        date=shelvectx.date())
-    m = scmutil.matchfiles(repo, repo[snode].files())
+    if not interactive:
+        newnode = repo.commit(
+            text=shelvectx.description(),
+            extra=shelvectx.extra(),
+            user=shelvectx.user(),
+            date=shelvectx.date(),
+        )
+        return newnode, False
+
+    commitfunc = getcommitfunc(shelvectx.extra(), interactive=True, editor=True)
+    newnode = cmdutil.dorecord(
+        ui,
+        repo,
+        commitfunc,
+        None,
+        False,
+        cmdutil.recordfilter,
+        *pats,
+        **pycompat.strkwargs(opts)
+    )
+    snode = repo.commit(
+        text=shelvectx.description(),
+        extra=shelvectx.extra(),
+        user=shelvectx.user(),
+    )
     if snode:
+        m = scmutil.matchfiles(repo, repo[snode].files())
         _shelvecreatedcommit(repo, snode, basename, m)
 
     return newnode, bool(snode)
 
-def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
-                          tmpwctx, shelvectx, branchtorestore,
-                          activebookmark):
+
+def _rebaserestoredcommit(
+    ui,
+    repo,
+    opts,
+    tr,
+    oldtiprev,
+    basename,
+    pctx,
+    tmpwctx,
+    shelvectx,
+    branchtorestore,
+    activebookmark,
+):
     """Rebase restored commit from its original location to a destination"""
     # If the shelve is not immediately on top of the commit
     # we'll be merging with, rebase it to be on top.
-    interactive = opts.get('interactive')
+    interactive = opts.get(b'interactive')
     if tmpwctx.node() == shelvectx.p1().node() and not interactive:
         # We won't skip on interactive mode because, the user might want to
         # unshelve certain changes only.
         return shelvectx, False
 
     overrides = {
-        ('ui', 'forcemerge'): opts.get('tool', ''),
-        ('phases', 'new-commit'): phases.secret,
+        (b'ui', b'forcemerge'): opts.get(b'tool', b''),
+        (b'phases', b'new-commit'): phases.secret,
     }
-    with repo.ui.configoverride(overrides, 'unshelve'):
-        ui.status(_('rebasing shelved changes\n'))
-        stats = merge.graft(repo, shelvectx, shelvectx.p1(),
-                           labels=['shelve', 'working-copy'],
-                           keepconflictparent=True)
+    with repo.ui.configoverride(overrides, b'unshelve'):
+        ui.status(_(b'rebasing shelved changes\n'))
+        stats = merge.graft(
+            repo,
+            shelvectx,
+            shelvectx.p1(),
+            labels=[b'shelve', b'working-copy'],
+            keepconflictparent=True,
+        )
         if stats.unresolvedcount:
             tr.close()
 
-            nodestoremove = [repo.changelog.node(rev)
-                             for rev in pycompat.xrange(oldtiprev, len(repo))]
-            shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove,
-                              branchtorestore, opts.get('keep'), activebookmark)
+            nodestoremove = [
+                repo.changelog.node(rev)
+                for rev in pycompat.xrange(oldtiprev, len(repo))
+            ]
+            shelvedstate.save(
+                repo,
+                basename,
+                pctx,
+                tmpwctx,
+                nodestoremove,
+                branchtorestore,
+                opts.get(b'keep'),
+                activebookmark,
+                interactive,
+            )
             raise error.InterventionRequired(
-                _("unresolved conflicts (see 'hg resolve', then "
-                  "'hg unshelve --continue')"))
+                _(
+                    b"unresolved conflicts (see 'hg resolve', then "
+                    b"'hg unshelve --continue')"
+                )
+            )
 
         with repo.dirstate.parentchange():
             repo.setparents(tmpwctx.node(), nodemod.nullid)
-            if not interactive:
-                ispartialunshelve = False
-                newnode = repo.commit(text=shelvectx.description(),
-                                      extra=shelvectx.extra(),
-                                      user=shelvectx.user(),
-                                      date=shelvectx.date())
-            else:
-                newnode, ispartialunshelve = _dounshelveinteractive(ui, repo,
-                                                shelvectx, basename, opts)
+            newnode, ispartialunshelve = _createunshelvectx(
+                ui, repo, shelvectx, basename, interactive, opts
+            )
 
         if newnode is None:
             # If it ended up being a no-op commit, then the normal
@@ -877,8 +1039,10 @@
             # here. Fix issue5494
             merge.mergestate.clean(repo)
             shelvectx = tmpwctx
-            msg = _('note: unshelved changes already existed '
-                    'in the working copy\n')
+            msg = _(
+                b'note: unshelved changes already existed '
+                b'in the working copy\n'
+            )
             ui.status(msg)
         else:
             shelvectx = repo[newnode]
@@ -886,17 +1050,19 @@
 
     return shelvectx, ispartialunshelve
 
+
 def _forgetunknownfiles(repo, shelvectx, addedbefore):
     # Forget any files that were unknown before the shelve, unknown before
     # unshelve started, but are now added.
-    shelveunknown = shelvectx.extra().get('shelve_unknown')
+    shelveunknown = shelvectx.extra().get(b'shelve_unknown')
     if not shelveunknown:
         return
-    shelveunknown = frozenset(shelveunknown.split('\0'))
+    shelveunknown = frozenset(shelveunknown.split(b'\0'))
     addedafter = frozenset(repo.status().added)
     toforget = (addedafter & shelveunknown) - addedbefore
     repo[None].forget(toforget)
 
+
 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
     _restoreactivebookmark(repo, activebookmark)
     # The transaction aborting will strip all the commits for us,
@@ -906,6 +1072,7 @@
     repo.unfiltered().changelog.strip(oldtiprev, tr)
     _aborttransaction(repo, tr)
 
+
 def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
     """Check potential problems which may result from working
     copy having untracked changes."""
@@ -913,60 +1080,66 @@
     shelvetouched = set(shelvectx.files())
     intersection = wcdeleted.intersection(shelvetouched)
     if intersection:
-        m = _("shelved change touches missing files")
-        hint = _("run hg status to see which files are missing")
+        m = _(b"shelved change touches missing files")
+        hint = _(b"run hg status to see which files are missing")
         raise error.Abort(m, hint=hint)
 
+
 def dounshelve(ui, repo, *shelved, **opts):
     opts = pycompat.byteskwargs(opts)
-    abortf = opts.get('abort')
-    continuef = opts.get('continue')
-    interactive = opts.get('interactive')
+    abortf = opts.get(b'abort')
+    continuef = opts.get(b'continue')
+    interactive = opts.get(b'interactive')
     if not abortf and not continuef:
         cmdutil.checkunfinished(repo)
     shelved = list(shelved)
-    if opts.get("name"):
-        shelved.append(opts["name"])
+    if opts.get(b"name"):
+        shelved.append(opts[b"name"])
 
-    if abortf or continuef and not interactive:
+    if interactive and opts.get(b'keep'):
+        raise error.Abort(_(b'--keep on --interactive is not yet supported'))
+    if abortf or continuef:
         if abortf and continuef:
-            raise error.Abort(_('cannot use both abort and continue'))
+            raise error.Abort(_(b'cannot use both abort and continue'))
         if shelved:
-            raise error.Abort(_('cannot combine abort/continue with '
-                               'naming a shelved change'))
-        if abortf and opts.get('tool', False):
-            ui.warn(_('tool option will be ignored\n'))
+            raise error.Abort(
+                _(
+                    b'cannot combine abort/continue with '
+                    b'naming a shelved change'
+                )
+            )
+        if abortf and opts.get(b'tool', False):
+            ui.warn(_(b'tool option will be ignored\n'))
 
         state = _loadshelvedstate(ui, repo, opts)
         if abortf:
             return unshelveabort(ui, repo, state)
+        elif continuef and interactive:
+            raise error.Abort(_(b'cannot use both continue and interactive'))
         elif continuef:
             return unshelvecontinue(ui, repo, state, opts)
     elif len(shelved) > 1:
-        raise error.Abort(_('can only unshelve one change at a time'))
+        raise error.Abort(_(b'can only unshelve one change at a time'))
     elif not shelved:
         shelved = listshelves(repo)
         if not shelved:
-            raise error.Abort(_('no shelved changes to apply!'))
+            raise error.Abort(_(b'no shelved changes to apply!'))
         basename = util.split(shelved[0][1])[1]
-        ui.status(_("unshelving change '%s'\n") % basename)
-    elif shelved:
+        ui.status(_(b"unshelving change '%s'\n") % basename)
+    else:
         basename = shelved[0]
-    if continuef and interactive:
-        state = _loadshelvedstate(ui, repo, opts)
-        return unshelvecontinue(ui, repo, state, opts, basename)
 
     if not shelvedfile(repo, basename, patchextension).exists():
-        raise error.Abort(_("shelved change '%s' not found") % basename)
+        raise error.Abort(_(b"shelved change '%s' not found") % basename)
 
     repo = repo.unfiltered()
     lock = tr = None
     try:
         lock = repo.lock()
-        tr = repo.transaction('unshelve', report=lambda x: None)
+        tr = repo.transaction(b'unshelve', report=lambda x: None)
         oldtiprev = len(repo)
 
-        pctx = repo['.']
+        pctx = repo[b'.']
         tmpwctx = pctx
         # The goal is to have a commit structure like so:
         # ...-> pctx -> tmpwctx -> shelvectx
@@ -975,26 +1148,36 @@
         # to the original pctx.
 
         activebookmark = _backupactivebookmark(repo)
-        tmpwctx, addedbefore = _commitworkingcopychanges(ui, repo, opts,
-                                                         tmpwctx)
+        tmpwctx, addedbefore = _commitworkingcopychanges(
+            ui, repo, opts, tmpwctx
+        )
         repo, shelvectx = _unshelverestorecommit(ui, repo, tr, basename)
         _checkunshelveuntrackedproblems(ui, repo, shelvectx)
-        branchtorestore = ''
+        branchtorestore = b''
         if shelvectx.branch() != shelvectx.p1().branch():
             branchtorestore = shelvectx.branch()
 
-        shelvectx, ispartialunshelve = _rebaserestoredcommit(ui, repo, opts,
-            tr, oldtiprev, basename, pctx, tmpwctx, shelvectx,
-            branchtorestore, activebookmark)
-        overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
-        with ui.configoverride(overrides, 'unshelve'):
+        shelvectx, ispartialunshelve = _rebaserestoredcommit(
+            ui,
+            repo,
+            opts,
+            tr,
+            oldtiprev,
+            basename,
+            pctx,
+            tmpwctx,
+            shelvectx,
+            branchtorestore,
+            activebookmark,
+        )
+        overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
+        with ui.configoverride(overrides, b'unshelve'):
             mergefiles(ui, repo, pctx, shelvectx)
         restorebranch(ui, repo, branchtorestore)
+        shelvedstate.clear(repo)
+        _finishunshelve(repo, oldtiprev, tr, activebookmark)
+        _forgetunknownfiles(repo, shelvectx, addedbefore)
         if not ispartialunshelve:
-            _forgetunknownfiles(repo, shelvectx, addedbefore)
-
-            shelvedstate.clear(repo)
-            _finishunshelve(repo, oldtiprev, tr, activebookmark)
             unshelvecleanup(ui, repo, basename, opts)
     finally:
         if tr:
--- a/mercurial/similar.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/similar.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,8 +10,10 @@
 from .i18n import _
 from . import (
     mdiff,
+    pycompat,
 )
 
+
 def _findexactmatches(repo, added, removed):
     '''find renamed files that have no changes
 
@@ -21,9 +23,11 @@
     # Build table of removed files: {hash(fctx.data()): [fctx, ...]}.
     # We use hash() to discard fctx.data() from memory.
     hashes = {}
-    progress = repo.ui.makeprogress(_('searching for exact renames'),
-                                    total=(len(added) + len(removed)),
-                                    unit=_('files'))
+    progress = repo.ui.makeprogress(
+        _(b'searching for exact renames'),
+        total=(len(added) + len(removed)),
+        unit=_(b'files'),
+    )
     for fctx in removed:
         progress.increment()
         h = hash(fctx.data())
@@ -46,11 +50,13 @@
     # Done
     progress.complete()
 
+
 def _ctxdata(fctx):
     # lazily load text
     orig = fctx.data()
     return orig, mdiff.splitnewlines(orig)
 
+
 def _score(fctx, otherdata):
     orig, lines = otherdata
     text = fctx.data()
@@ -65,9 +71,11 @@
     lengths = len(text) + len(orig)
     return equal * 2.0 / lengths
 
+
 def score(fctx1, fctx2):
     return _score(fctx1, _ctxdata(fctx2))
 
+
 def _findsimilarmatches(repo, added, removed, threshold):
     '''find potentially renamed files based on similar file content
 
@@ -75,8 +83,9 @@
     (before, after, score) tuples of partial matches.
     '''
     copies = {}
-    progress = repo.ui.makeprogress(_('searching for similar files'),
-                         unit=_('files'), total=len(removed))
+    progress = repo.ui.makeprogress(
+        _(b'searching for similar files'), unit=_(b'files'), total=len(removed)
+    )
     for r in removed:
         progress.increment()
         data = None
@@ -89,13 +98,15 @@
                 copies[a] = (r, myscore)
     progress.complete()
 
-    for dest, v in copies.iteritems():
+    for dest, v in pycompat.iteritems(copies):
         source, bscore = v
         yield source, dest, bscore
 
+
 def _dropempty(fctxs):
     return [x for x in fctxs if x.size() > 0]
 
+
 def findrenames(repo, added, removed, threshold):
     '''find renamed files -- yields (before, after, score) tuples'''
     wctx = repo[None]
@@ -116,6 +127,7 @@
     # If the user requested similar files to be matched, search for them also.
     if threshold < 1.0:
         addedfiles = [x for x in addedfiles if x not in matchedfiles]
-        for (a, b, score) in _findsimilarmatches(repo, addedfiles,
-                                                 removedfiles, threshold):
+        for (a, b, score) in _findsimilarmatches(
+            repo, addedfiles, removedfiles, threshold
+        ):
             yield (a.path(), b.path(), score)
--- a/mercurial/simplemerge.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/simplemerge.py	Mon Oct 21 11:09:48 2019 -0400
@@ -24,13 +24,13 @@
     mdiff,
     pycompat,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
+
 
 class CantReprocessAndShowBase(Exception):
     pass
 
+
 def intersect(ra, rb):
     """Given two ranges return the range where they intersect or None.
 
@@ -53,23 +53,27 @@
     else:
         return None
 
+
 def compare_range(a, astart, aend, b, bstart, bend):
     """Compare a[astart:aend] == b[bstart:bend], without slicing.
     """
     if (aend - astart) != (bend - bstart):
         return False
-    for ia, ib in zip(pycompat.xrange(astart, aend),
-                      pycompat.xrange(bstart, bend)):
+    for ia, ib in zip(
+        pycompat.xrange(astart, aend), pycompat.xrange(bstart, bend)
+    ):
         if a[ia] != b[ib]:
             return False
     else:
         return True
 
+
 class Merge3Text(object):
     """3-way merge of texts.
 
     Given strings BASE, OTHER, THIS, tries to produce a combined text
     incorporating the changes from both BASE->OTHER and BASE->THIS."""
+
     def __init__(self, basetext, atext, btext, base=None, a=None, b=None):
         self.basetext = basetext
         self.atext = atext
@@ -84,50 +88,52 @@
         self.a = a
         self.b = b
 
-    def merge_lines(self,
-                    name_a=None,
-                    name_b=None,
-                    name_base=None,
-                    start_marker='<<<<<<<',
-                    mid_marker='=======',
-                    end_marker='>>>>>>>',
-                    base_marker=None,
-                    localorother=None,
-                    minimize=False):
+    def merge_lines(
+        self,
+        name_a=None,
+        name_b=None,
+        name_base=None,
+        start_marker=b'<<<<<<<',
+        mid_marker=b'=======',
+        end_marker=b'>>>>>>>',
+        base_marker=None,
+        localorother=None,
+        minimize=False,
+    ):
         """Return merge in cvs-like form.
         """
         self.conflicts = False
-        newline = '\n'
+        newline = b'\n'
         if len(self.a) > 0:
-            if self.a[0].endswith('\r\n'):
-                newline = '\r\n'
-            elif self.a[0].endswith('\r'):
-                newline = '\r'
+            if self.a[0].endswith(b'\r\n'):
+                newline = b'\r\n'
+            elif self.a[0].endswith(b'\r'):
+                newline = b'\r'
         if name_a and start_marker:
-            start_marker = start_marker + ' ' + name_a
+            start_marker = start_marker + b' ' + name_a
         if name_b and end_marker:
-            end_marker = end_marker + ' ' + name_b
+            end_marker = end_marker + b' ' + name_b
         if name_base and base_marker:
-            base_marker = base_marker + ' ' + name_base
+            base_marker = base_marker + b' ' + name_base
         merge_regions = self.merge_regions()
         if minimize:
             merge_regions = self.minimize(merge_regions)
         for t in merge_regions:
             what = t[0]
-            if what == 'unchanged':
+            if what == b'unchanged':
                 for i in range(t[1], t[2]):
                     yield self.base[i]
-            elif what == 'a' or what == 'same':
+            elif what == b'a' or what == b'same':
                 for i in range(t[1], t[2]):
                     yield self.a[i]
-            elif what == 'b':
+            elif what == b'b':
                 for i in range(t[1], t[2]):
                     yield self.b[i]
-            elif what == 'conflict':
-                if localorother == 'local':
+            elif what == b'conflict':
+                if localorother == b'local':
                     for i in range(t[3], t[4]):
                         yield self.a[i]
-                elif localorother == 'other':
+                elif localorother == b'other':
                     for i in range(t[5], t[6]):
                         yield self.b[i]
                 else:
@@ -169,17 +175,19 @@
         """
         for t in self.merge_regions():
             what = t[0]
-            if what == 'unchanged':
-                yield what, self.base[t[1]:t[2]]
-            elif what == 'a' or what == 'same':
-                yield what, self.a[t[1]:t[2]]
-            elif what == 'b':
-                yield what, self.b[t[1]:t[2]]
-            elif what == 'conflict':
-                yield (what,
-                       self.base[t[1]:t[2]],
-                       self.a[t[3]:t[4]],
-                       self.b[t[5]:t[6]])
+            if what == b'unchanged':
+                yield what, self.base[t[1] : t[2]]
+            elif what == b'a' or what == b'same':
+                yield what, self.a[t[1] : t[2]]
+            elif what == b'b':
+                yield what, self.b[t[1] : t[2]]
+            elif what == b'conflict':
+                yield (
+                    what,
+                    self.base[t[1] : t[2]],
+                    self.a[t[3] : t[4]],
+                    self.b[t[5] : t[6]],
+                )
             else:
                 raise ValueError(what)
 
@@ -218,7 +226,7 @@
 
         for region in self.find_sync_regions():
             zmatch, zend, amatch, aend, bmatch, bend = region
-            #print 'match base [%d:%d]' % (zmatch, zend)
+            # print 'match base [%d:%d]' % (zmatch, zend)
 
             matchlen = zend - zmatch
             assert matchlen >= 0
@@ -232,27 +240,28 @@
             assert len_b >= 0
             assert len_base >= 0
 
-            #print 'unmatched a=%d, b=%d' % (len_a, len_b)
+            # print 'unmatched a=%d, b=%d' % (len_a, len_b)
 
             if len_a or len_b:
                 # try to avoid actually slicing the lists
-                equal_a = compare_range(self.a, ia, amatch,
-                                        self.base, iz, zmatch)
-                equal_b = compare_range(self.b, ib, bmatch,
-                                        self.base, iz, zmatch)
-                same = compare_range(self.a, ia, amatch,
-                                     self.b, ib, bmatch)
+                equal_a = compare_range(
+                    self.a, ia, amatch, self.base, iz, zmatch
+                )
+                equal_b = compare_range(
+                    self.b, ib, bmatch, self.base, iz, zmatch
+                )
+                same = compare_range(self.a, ia, amatch, self.b, ib, bmatch)
 
                 if same:
-                    yield 'same', ia, amatch
+                    yield b'same', ia, amatch
                 elif equal_a and not equal_b:
-                    yield 'b', ib, bmatch
+                    yield b'b', ib, bmatch
                 elif equal_b and not equal_a:
-                    yield 'a', ia, amatch
+                    yield b'a', ia, amatch
                 elif not equal_a and not equal_b:
-                    yield 'conflict', iz, zmatch, ia, amatch, ib, bmatch
+                    yield b'conflict', iz, zmatch, ia, amatch, ib, bmatch
                 else:
-                    raise AssertionError("can't handle a=b=base but unmatched")
+                    raise AssertionError(b"can't handle a=b=base but unmatched")
 
                 ia = amatch
                 ib = bmatch
@@ -261,13 +270,12 @@
             # if the same part of the base was deleted on both sides
             # that's OK, we can just skip it.
 
-
             if matchlen > 0:
                 assert ia == amatch
                 assert ib == bmatch
                 assert iz == zmatch
 
-                yield 'unchanged', zmatch, zend
+                yield b'unchanged', zmatch, zend
                 iz = zend
                 ia = aend
                 ib = bend
@@ -280,7 +288,7 @@
         region and are instead considered the same.
         """
         for region in merge_regions:
-            if region[0] != "conflict":
+            if region[0] != b"conflict":
                 yield region
                 continue
             issue, z1, z2, a1, a2, b1, b2 = region
@@ -289,27 +297,37 @@
 
             # find matches at the front
             ii = 0
-            while (ii < alen and ii < blen and
-                   self.a[a1 + ii] == self.b[b1 + ii]):
+            while (
+                ii < alen and ii < blen and self.a[a1 + ii] == self.b[b1 + ii]
+            ):
                 ii += 1
             startmatches = ii
 
             # find matches at the end
             ii = 0
-            while (ii < alen and ii < blen and
-                   self.a[a2 - ii - 1] == self.b[b2 - ii - 1]):
+            while (
+                ii < alen
+                and ii < blen
+                and self.a[a2 - ii - 1] == self.b[b2 - ii - 1]
+            ):
                 ii += 1
             endmatches = ii
 
             if startmatches > 0:
-                yield 'same', a1, a1 + startmatches
+                yield b'same', a1, a1 + startmatches
 
-            yield ('conflict', z1, z2,
-                    a1 + startmatches, a2 - endmatches,
-                    b1 + startmatches, b2 - endmatches)
+            yield (
+                b'conflict',
+                z1,
+                z2,
+                a1 + startmatches,
+                a2 - endmatches,
+                b1 + startmatches,
+                b2 - endmatches,
+            )
 
             if endmatches > 0:
-                yield 'same', a2 - endmatches, a2
+                yield b'same', a2 - endmatches, a2
 
     def find_sync_regions(self):
         """Return a list of sync regions, where both descendants match the base.
@@ -351,13 +369,13 @@
                 bend = bsub + intlen
 
                 assert self.base[intbase:intend] == self.a[asub:aend], (
-                        (self.base[intbase:intend], self.a[asub:aend]))
+                    self.base[intbase:intend],
+                    self.a[asub:aend],
+                )
 
                 assert self.base[intbase:intend] == self.b[bsub:bend]
 
-                sl.append((intbase, intend,
-                           asub, aend,
-                           bsub, bend))
+                sl.append((intbase, intend, asub, aend, bsub, bend))
 
             # advance whichever one ends first in the base text
             if (abase + alen) < (bbase + blen):
@@ -397,25 +415,28 @@
 
         return unc
 
+
 def _verifytext(text, path, ui, opts):
     """verifies that text is non-binary (unless opts[text] is passed,
     then we just warn)"""
     if stringutil.binary(text):
-        msg = _("%s looks like a binary file.") % path
-        if not opts.get('quiet'):
-            ui.warn(_('warning: %s\n') % msg)
-        if not opts.get('text'):
+        msg = _(b"%s looks like a binary file.") % path
+        if not opts.get(b'quiet'):
+            ui.warn(_(b'warning: %s\n') % msg)
+        if not opts.get(b'text'):
             raise error.Abort(msg)
     return text
 
+
 def _picklabels(defaults, overrides):
     if len(overrides) > 3:
-        raise error.Abort(_("can only specify three labels."))
+        raise error.Abort(_(b"can only specify three labels."))
     result = defaults[:]
     for i, override in enumerate(overrides):
         result[i] = override
     return result
 
+
 def simplemerge(ui, localctx, basectx, otherctx, **opts):
     """Performs the simplemerge algorithm.
 
@@ -433,12 +454,12 @@
         # repository usually sees) might be more useful.
         return _verifytext(ctx.decodeddata(), ctx.path(), ui, opts)
 
-    mode = opts.get('mode','merge')
+    mode = opts.get(b'mode', b'merge')
     name_a, name_b, name_base = None, None, None
-    if mode != 'union':
-        name_a, name_b, name_base = _picklabels([localctx.path(),
-                                                 otherctx.path(), None],
-                                                opts.get('label', []))
+    if mode != b'union':
+        name_a, name_b, name_base = _picklabels(
+            [localctx.path(), otherctx.path(), None], opts.get(b'label', [])
+        )
 
     try:
         localtext = readctx(localctx)
@@ -449,28 +470,29 @@
 
     m3 = Merge3Text(basetext, localtext, othertext)
     extrakwargs = {
-            "localorother": opts.get("localorother", None),
-            'minimize': True,
-        }
-    if mode == 'union':
-        extrakwargs['start_marker'] = None
-        extrakwargs['mid_marker'] = None
-        extrakwargs['end_marker'] = None
+        b"localorother": opts.get(b"localorother", None),
+        b'minimize': True,
+    }
+    if mode == b'union':
+        extrakwargs[b'start_marker'] = None
+        extrakwargs[b'mid_marker'] = None
+        extrakwargs[b'end_marker'] = None
     elif name_base is not None:
-        extrakwargs['base_marker'] = '|||||||'
-        extrakwargs['name_base'] = name_base
-        extrakwargs['minimize'] = False
+        extrakwargs[b'base_marker'] = b'|||||||'
+        extrakwargs[b'name_base'] = name_base
+        extrakwargs[b'minimize'] = False
 
-    mergedtext = ""
-    for line in m3.merge_lines(name_a=name_a, name_b=name_b,
-                               **pycompat.strkwargs(extrakwargs)):
-        if opts.get('print'):
+    mergedtext = b""
+    for line in m3.merge_lines(
+        name_a=name_a, name_b=name_b, **pycompat.strkwargs(extrakwargs)
+    ):
+        if opts.get(b'print'):
             ui.fout.write(line)
         else:
             mergedtext += line
 
-    if not opts.get('print'):
+    if not opts.get(b'print'):
         localctx.write(mergedtext, localctx.flags())
 
-    if m3.conflicts and not mode == 'union':
+    if m3.conflicts and not mode == b'union':
         return 1
--- a/mercurial/smartset.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/smartset.py	Mon Oct 21 11:09:48 2019 -0400
@@ -7,21 +7,21 @@
 
 from __future__ import absolute_import
 
+from .pycompat import getattr
 from . import (
     encoding,
     error,
     pycompat,
     util,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
+
 
 def _typename(o):
-    return pycompat.sysbytes(type(o).__name__).lstrip('_')
+    return pycompat.sysbytes(type(o).__name__).lstrip(b'_')
+
 
 class abstractsmartset(object):
-
     def __nonzero__(self):
         """True if the smartset is not empty"""
         raise NotImplementedError()
@@ -64,7 +64,7 @@
             for v in self.fastasc():
                 break
             else:
-                raise ValueError('arg is an empty sequence')
+                raise ValueError(b'arg is an empty sequence')
         self.min = lambda: v
         return v
 
@@ -76,7 +76,7 @@
             for v in self.fastdesc():
                 break
             else:
-                raise ValueError('arg is an empty sequence')
+                raise ValueError(b'arg is an empty sequence')
         self.max = lambda: v
         return v
 
@@ -125,8 +125,9 @@
 
         This is part of the mandatory API for smartset."""
         c = other.__contains__
-        return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
-                           cache=False)
+        return self.filter(
+            lambda r: not c(r), condrepr=(b'<not %r>', other), cache=False
+        )
 
     def filter(self, condition, condrepr=None, cache=True):
         """Returns this smartset filtered by condition as a new smartset.
@@ -137,14 +138,14 @@
 
         This is part of the mandatory API for smartset."""
         # builtin cannot be cached. but do not needs to
-        if cache and util.safehasattr(condition, '__code__'):
+        if cache and util.safehasattr(condition, b'__code__'):
             condition = util.cachefunc(condition)
         return filteredset(self, condition, condrepr)
 
     def slice(self, start, stop):
         """Return new smartset that contains selected elements from this set"""
         if start < 0 or stop < 0:
-            raise error.ProgrammingError('negative index not allowed')
+            raise error.ProgrammingError(b'negative index not allowed')
         return self._slice(start, stop)
 
     def _slice(self, start, stop):
@@ -161,7 +162,8 @@
             if y is None:
                 break
             ys.append(y)
-        return baseset(ys, datarepr=('slice=%d:%d %r', start, stop, self))
+        return baseset(ys, datarepr=(b'slice=%d:%d %r', start, stop, self))
+
 
 class baseset(abstractsmartset):
     """Basic data structure that represents a revset and contains the basic
@@ -222,6 +224,7 @@
     >>> rs._istopo
     True
     """
+
     def __init__(self, data=(), datarepr=None, istopo=False):
         """
         datarepr: a tuple of (format, obj, ...), a function or an object that
@@ -342,20 +345,25 @@
 
     def _fastsetop(self, other, op):
         # try to use native set operations as fast paths
-        if (type(other) is baseset and r'_set' in other.__dict__ and r'_set' in
-            self.__dict__ and self._ascending is not None):
-            s = baseset(data=getattr(self._set, op)(other._set),
-                        istopo=self._istopo)
+        if (
+            type(other) is baseset
+            and r'_set' in other.__dict__
+            and r'_set' in self.__dict__
+            and self._ascending is not None
+        ):
+            s = baseset(
+                data=getattr(self._set, op)(other._set), istopo=self._istopo
+            )
             s._ascending = self._ascending
         else:
             s = getattr(super(baseset, self), op)(other)
         return s
 
     def __and__(self, other):
-        return self._fastsetop(other, '__and__')
+        return self._fastsetop(other, b'__and__')
 
     def __sub__(self, other):
-        return self._fastsetop(other, '__sub__')
+        return self._fastsetop(other, b'__sub__')
 
     def _slice(self, start, stop):
         # creating new list should be generally cheaper than iterating items
@@ -371,7 +379,7 @@
 
     @encoding.strmethod
     def __repr__(self):
-        d = {None: '', False: '-', True: '+'}[self._ascending]
+        d = {None: b'', False: b'-', True: b'+'}[self._ascending]
         s = stringutil.buildrepr(self._datarepr)
         if not s:
             l = self._list
@@ -381,13 +389,15 @@
             if self._ascending is not None:
                 l = self._asclist
             s = pycompat.byterepr(l)
-        return '<%s%s %s>' % (_typename(self), d, s)
+        return b'<%s%s %s>' % (_typename(self), d, s)
+
 
 class filteredset(abstractsmartset):
     """Duck type for baseset class which iterates lazily over the revisions in
     the subset and contains a function which tests for membership in the
     revset
     """
+
     def __init__(self, subset, condition=lambda x: True, condrepr=None):
         """
         condition: a function that decide whether a revision in the subset
@@ -427,10 +437,12 @@
 
     def __nonzero__(self):
         fast = None
-        candidates = [self.fastasc if self.isascending() else None,
-                      self.fastdesc if self.isdescending() else None,
-                      self.fastasc,
-                      self.fastdesc]
+        candidates = [
+            self.fastasc if self.isascending() else None,
+            self.fastdesc if self.isdescending() else None,
+            self.fastasc,
+            self.fastdesc,
+        ]
         for candidate in candidates:
             if candidate is not None:
                 fast = candidate
@@ -484,7 +496,7 @@
         if it is not None:
             for x in it():
                 return x
-            return None #empty case
+            return None  # empty case
         else:
             x = None
             for x in self:
@@ -497,7 +509,8 @@
         s = stringutil.buildrepr(self._condrepr)
         if s:
             xs.append(s)
-        return '<%s %s>' % (_typename(self), ', '.join(xs))
+        return b'<%s %s>' % (_typename(self), b', '.join(xs))
+
 
 def _iterordered(ascending, iter1, iter2):
     """produce an ordered iteration from two iterators with the same order
@@ -535,6 +548,7 @@
         for val in it:
             yield val
 
+
 class addset(abstractsmartset):
     """Represent the addition of two sets
 
@@ -606,6 +620,7 @@
     >>> [x for x in rs]
     [5, 4, 3, 2, 0]
     """
+
     def __init__(self, revs1, revs2, ascending=None):
         self._r1 = revs1
         self._r2 = revs2
@@ -641,6 +656,7 @@
         if self._ascending is None:
             if self._genlist:
                 return iter(self._genlist)
+
             def arbitraryordergen():
                 for r in self._r1:
                     yield r
@@ -648,13 +664,14 @@
                 for r in self._r2:
                     if not inr1(r):
                         yield r
+
             return arbitraryordergen()
         # try to use our own fast iterator if it exists
         self._trysetasclist()
         if self._ascending:
-            attr = 'fastasc'
+            attr = b'fastasc'
         else:
-            attr = 'fastdesc'
+            attr = b'fastdesc'
         it = getattr(self, attr)
         if it is not None:
             return it()
@@ -744,8 +761,9 @@
 
     @encoding.strmethod
     def __repr__(self):
-        d = {None: '', False: '-', True: '+'}[self._ascending]
-        return '<%s%s %r, %r>' % (_typename(self), d, self._r1, self._r2)
+        d = {None: b'', False: b'-', True: b'+'}[self._ascending]
+        return b'<%s%s %r, %r>' % (_typename(self), d, self._r1, self._r2)
+
 
 class generatorset(abstractsmartset):
     """Wrap a generator for lazy iteration
@@ -760,6 +778,7 @@
     >>> xs.last()  # cached
     4
     """
+
     def __new__(cls, gen, iterasc=None):
         if iterasc is None:
             typ = cls
@@ -830,7 +849,8 @@
         # iteration.
         genlist = self._genlist
         nextgen = self._consumegen()
-        _len, _next = len, next # cache global lookup
+        _len, _next = len, next  # cache global lookup
+
         def gen():
             i = 0
             while True:
@@ -842,6 +862,7 @@
                     except StopIteration:
                         return
                 i += 1
+
         return gen()
 
     def _consumegen(self):
@@ -908,8 +929,9 @@
 
     @encoding.strmethod
     def __repr__(self):
-        d = {False: '-', True: '+'}[self._ascending]
-        return '<%s%s>' % (_typename(self), d)
+        d = {False: b'-', True: b'+'}[self._ascending]
+        return b'<%s%s>' % (_typename(self), d)
+
 
 class _generatorsetasc(generatorset):
     """Special case of generatorset optimized for ascending generators."""
@@ -930,6 +952,7 @@
         self._cache[x] = False
         return False
 
+
 class _generatorsetdesc(generatorset):
     """Special case of generatorset optimized for descending generators."""
 
@@ -949,6 +972,7 @@
         self._cache[x] = False
         return False
 
+
 def spanset(repo, start=0, end=None):
     """Create a spanset that represents a range of repository revisions
 
@@ -964,6 +988,7 @@
         start, end = end + 1, start + 1
     return _spanset(start, end, ascending, repo.changelog.filteredrevs)
 
+
 class _spanset(abstractsmartset):
     """Duck type for baseset class which represents a range of revisions and
     can work lazily and without having all the range in memory
@@ -974,6 +999,7 @@
     - revision filtered with this repoview will be skipped.
 
     """
+
     def __init__(self, start, end, ascending, hiddenrevs):
         self._start = start
         self._end = end
@@ -1018,8 +1044,9 @@
 
     def __contains__(self, rev):
         hidden = self._hiddenrevs
-        return ((self._start <= rev < self._end)
-                and not (hidden and rev in hidden))
+        return (self._start <= rev < self._end) and not (
+            hidden and rev in hidden
+        )
 
     def __nonzero__(self):
         for r in self:
@@ -1078,8 +1105,9 @@
 
     @encoding.strmethod
     def __repr__(self):
-        d = {False: '-', True: '+'}[self._ascending]
-        return '<%s%s %d:%d>' % (_typename(self), d, self._start, self._end)
+        d = {False: b'-', True: b'+'}[self._ascending]
+        return b'<%s%s %d:%d>' % (_typename(self), d, self._start, self._end)
+
 
 class fullreposet(_spanset):
     """a set containing all revisions in the repo
@@ -1089,8 +1117,9 @@
     """
 
     def __init__(self, repo):
-        super(fullreposet, self).__init__(0, len(repo), True,
-                                          repo.changelog.filteredrevs)
+        super(fullreposet, self).__init__(
+            0, len(repo), True, repo.changelog.filteredrevs
+        )
 
     def __and__(self, other):
         """As self contains the whole repo, all of the other set should also be
@@ -1099,7 +1128,7 @@
         This boldly assumes the other contains valid revs only.
         """
         # other not a smartset, make is so
-        if not util.safehasattr(other, 'isascending'):
+        if not util.safehasattr(other, b'isascending'):
             # filter out hidden revision
             # (this boldly assumes all smartset are pure)
             #
--- a/mercurial/sparse.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/sparse.py	Mon Oct 21 11:09:48 2019 -0400
@@ -30,6 +30,7 @@
 # a per-repo option, possibly a repo requirement.
 enabled = False
 
+
 def parseconfig(ui, raw, action):
     """Parse sparse config file content.
 
@@ -43,43 +44,59 @@
     current = None
     havesection = False
 
-    for line in raw.split('\n'):
+    for line in raw.split(b'\n'):
         line = line.strip()
-        if not line or line.startswith('#'):
+        if not line or line.startswith(b'#'):
             # empty or comment line, skip
             continue
-        elif line.startswith('%include '):
+        elif line.startswith(b'%include '):
             line = line[9:].strip()
             if line:
                 profiles.add(line)
-        elif line == '[include]':
+        elif line == b'[include]':
             if havesection and current != includes:
                 # TODO pass filename into this API so we can report it.
-                raise error.Abort(_('%(action)s config cannot have includes '
-                                    'after excludes') % {'action': action})
+                raise error.Abort(
+                    _(
+                        b'%(action)s config cannot have includes '
+                        b'after excludes'
+                    )
+                    % {b'action': action}
+                )
             havesection = True
             current = includes
             continue
-        elif line == '[exclude]':
+        elif line == b'[exclude]':
             havesection = True
             current = excludes
         elif line:
             if current is None:
-                raise error.Abort(_('%(action)s config entry outside of '
-                                    'section: %(line)s')
-                                  % {'action': action, 'line': line},
-                                  hint=_('add an [include] or [exclude] line '
-                                         'to declare the entry type'))
+                raise error.Abort(
+                    _(
+                        b'%(action)s config entry outside of '
+                        b'section: %(line)s'
+                    )
+                    % {b'action': action, b'line': line},
+                    hint=_(
+                        b'add an [include] or [exclude] line '
+                        b'to declare the entry type'
+                    ),
+                )
 
-            if line.strip().startswith('/'):
-                ui.warn(_('warning: %(action)s profile cannot use'
-                          ' paths starting with /, ignoring %(line)s\n')
-                        % {'action': action, 'line': line})
+            if line.strip().startswith(b'/'):
+                ui.warn(
+                    _(
+                        b'warning: %(action)s profile cannot use'
+                        b' paths starting with /, ignoring %(line)s\n'
+                    )
+                    % {b'action': action, b'line': line}
+                )
                 continue
             current.add(line)
 
     return includes, excludes, profiles
 
+
 # Exists as separate function to facilitate monkeypatching.
 def readprofile(repo, profile, changeid):
     """Resolve the raw content of a sparse profile file."""
@@ -87,6 +104,7 @@
     # resolve and can be slow.
     return repo.filectx(profile, changeid=changeid).data()
 
+
 def patternsforrev(repo, rev):
     """Obtain sparse checkout patterns for the given rev.
 
@@ -97,15 +115,16 @@
     if not enabled:
         return set(), set(), set()
 
-    raw = repo.vfs.tryread('sparse')
+    raw = repo.vfs.tryread(b'sparse')
     if not raw:
         return set(), set(), set()
 
     if rev is None:
-        raise error.Abort(_('cannot parse sparse patterns from working '
-                            'directory'))
+        raise error.Abort(
+            _(b'cannot parse sparse patterns from working directory')
+        )
 
-    includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse')
+    includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
     ctx = repo[rev]
 
     if profiles:
@@ -121,17 +140,19 @@
                 raw = readprofile(repo, profile, rev)
             except error.ManifestLookupError:
                 msg = (
-                    "warning: sparse profile '%s' not found "
-                    "in rev %s - ignoring it\n" % (profile, ctx))
+                    b"warning: sparse profile '%s' not found "
+                    b"in rev %s - ignoring it\n" % (profile, ctx)
+                )
                 # experimental config: sparse.missingwarning
-                if repo.ui.configbool(
-                        'sparse', 'missingwarning'):
+                if repo.ui.configbool(b'sparse', b'missingwarning'):
                     repo.ui.warn(msg)
                 else:
                     repo.ui.debug(msg)
                 continue
 
-            pincludes, pexcludes, subprofs = parseconfig(repo.ui, raw, 'sparse')
+            pincludes, pexcludes, subprofs = parseconfig(
+                repo.ui, raw, b'sparse'
+            )
             includes.update(pincludes)
             excludes.update(pexcludes)
             profiles.update(subprofs)
@@ -139,18 +160,22 @@
         profiles = visited
 
     if includes:
-        includes.add('.hg*')
+        includes.add(b'.hg*')
 
     return includes, excludes, profiles
 
+
 def activeconfig(repo):
     """Determine the active sparse config rules.
 
     Rules are constructed by reading the current sparse config and bringing in
     referenced profiles from parents of the working directory.
     """
-    revs = [repo.changelog.rev(node) for node in
-            repo.dirstate.parents() if node != nullid]
+    revs = [
+        repo.changelog.rev(node)
+        for node in repo.dirstate.parents()
+        if node != nullid
+    ]
 
     allincludes = set()
     allexcludes = set()
@@ -164,6 +189,7 @@
 
     return allincludes, allexcludes, allprofiles
 
+
 def configsignature(repo, includetemp=True):
     """Obtain the signature string for the current sparse configuration.
 
@@ -171,63 +197,68 @@
     """
     cache = repo._sparsesignaturecache
 
-    signature = cache.get('signature')
+    signature = cache.get(b'signature')
 
     if includetemp:
-        tempsignature = cache.get('tempsignature')
+        tempsignature = cache.get(b'tempsignature')
     else:
-        tempsignature = '0'
+        tempsignature = b'0'
 
     if signature is None or (includetemp and tempsignature is None):
-        signature = hex(hashlib.sha1(repo.vfs.tryread('sparse')).digest())
-        cache['signature'] = signature
+        signature = hex(hashlib.sha1(repo.vfs.tryread(b'sparse')).digest())
+        cache[b'signature'] = signature
 
         if includetemp:
-            raw = repo.vfs.tryread('tempsparse')
+            raw = repo.vfs.tryread(b'tempsparse')
             tempsignature = hex(hashlib.sha1(raw).digest())
-            cache['tempsignature'] = tempsignature
+            cache[b'tempsignature'] = tempsignature
 
-    return '%s %s' % (signature, tempsignature)
+    return b'%s %s' % (signature, tempsignature)
+
 
 def writeconfig(repo, includes, excludes, profiles):
     """Write the sparse config file given a sparse configuration."""
-    with repo.vfs('sparse', 'wb') as fh:
+    with repo.vfs(b'sparse', b'wb') as fh:
         for p in sorted(profiles):
-            fh.write('%%include %s\n' % p)
+            fh.write(b'%%include %s\n' % p)
 
         if includes:
-            fh.write('[include]\n')
+            fh.write(b'[include]\n')
             for i in sorted(includes):
                 fh.write(i)
-                fh.write('\n')
+                fh.write(b'\n')
 
         if excludes:
-            fh.write('[exclude]\n')
+            fh.write(b'[exclude]\n')
             for e in sorted(excludes):
                 fh.write(e)
-                fh.write('\n')
+                fh.write(b'\n')
 
     repo._sparsesignaturecache.clear()
 
+
 def readtemporaryincludes(repo):
-    raw = repo.vfs.tryread('tempsparse')
+    raw = repo.vfs.tryread(b'tempsparse')
     if not raw:
         return set()
 
-    return set(raw.split('\n'))
+    return set(raw.split(b'\n'))
+
 
 def writetemporaryincludes(repo, includes):
-    repo.vfs.write('tempsparse', '\n'.join(sorted(includes)))
+    repo.vfs.write(b'tempsparse', b'\n'.join(sorted(includes)))
     repo._sparsesignaturecache.clear()
 
+
 def addtemporaryincludes(repo, additional):
     includes = readtemporaryincludes(repo)
     for i in additional:
         includes.add(i)
     writetemporaryincludes(repo, includes)
 
+
 def prunetemporaryincludes(repo):
-    if not enabled or not repo.vfs.exists('tempsparse'):
+    if not enabled or not repo.vfs.exists(b'tempsparse'):
         return
 
     s = repo.status()
@@ -242,32 +273,37 @@
     tempincludes = readtemporaryincludes(repo)
     for file in tempincludes:
         if file in dirstate and not sparsematch(file):
-            message = _('dropping temporarily included sparse files')
+            message = _(b'dropping temporarily included sparse files')
             actions.append((file, None, message))
             dropped.append(file)
 
     typeactions = mergemod.emptyactions()
-    typeactions['r'] = actions
-    mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False,
-                          wantfiledata=False)
+    typeactions[b'r'] = actions
+    mergemod.applyupdates(
+        repo, typeactions, repo[None], repo[b'.'], False, wantfiledata=False
+    )
 
     # Fix dirstate
     for file in dropped:
         dirstate.drop(file)
 
-    repo.vfs.unlink('tempsparse')
+    repo.vfs.unlink(b'tempsparse')
     repo._sparsesignaturecache.clear()
-    msg = _('cleaned up %d temporarily added file(s) from the '
-            'sparse checkout\n')
+    msg = _(
+        b'cleaned up %d temporarily added file(s) from the '
+        b'sparse checkout\n'
+    )
     repo.ui.status(msg % len(tempincludes))
 
+
 def forceincludematcher(matcher, includes):
     """Returns a matcher that returns true for any of the forced includes
     before testing against the actual matcher."""
-    kindpats = [('path', include, '') for include in includes]
-    includematcher = matchmod.includematcher('', kindpats)
+    kindpats = [(b'path', include, b'') for include in includes]
+    includematcher = matchmod.includematcher(b'', kindpats)
     return matchmod.unionmatcher([includematcher, matcher])
 
+
 def matcher(repo, revs=None, includetemp=True):
     """Obtain a matcher for sparse working directories for the given revs.
 
@@ -281,12 +317,15 @@
         return matchmod.always()
 
     if not revs or revs == [None]:
-        revs = [repo.changelog.rev(node)
-                for node in repo.dirstate.parents() if node != nullid]
+        revs = [
+            repo.changelog.rev(node)
+            for node in repo.dirstate.parents()
+            if node != nullid
+        ]
 
     signature = configsignature(repo, includetemp=includetemp)
 
-    key = '%s %s' % (signature, ' '.join(map(pycompat.bytestr, revs)))
+    key = b'%s %s' % (signature, b' '.join(map(pycompat.bytestr, revs)))
 
     result = repo._sparsematchercache.get(key)
     if result:
@@ -298,9 +337,14 @@
             includes, excludes, profiles = patternsforrev(repo, rev)
 
             if includes or excludes:
-                matcher = matchmod.match(repo.root, '', [],
-                                         include=includes, exclude=excludes,
-                                         default='relpath')
+                matcher = matchmod.match(
+                    repo.root,
+                    b'',
+                    [],
+                    include=includes,
+                    exclude=excludes,
+                    default=b'relpath',
+                )
                 matchers.append(matcher)
         except IOError:
             pass
@@ -320,6 +364,7 @@
 
     return result
 
+
 def filterupdatesactions(repo, wctx, mctx, branchmerge, actions):
     """Filter updates to only lay out files that match the sparse rules."""
     if not enabled:
@@ -344,22 +389,22 @@
         sparsematch = matcher(repo, [mctx.rev()])
 
     temporaryfiles = []
-    for file, action in actions.iteritems():
+    for file, action in pycompat.iteritems(actions):
         type, args, msg = action
         files.add(file)
         if sparsematch(file):
             prunedactions[file] = action
-        elif type == 'm':
+        elif type == b'm':
             temporaryfiles.append(file)
             prunedactions[file] = action
         elif branchmerge:
-            if type != 'k':
+            if type != b'k':
                 temporaryfiles.append(file)
                 prunedactions[file] = action
-        elif type == 'f':
+        elif type == b'f':
             prunedactions[file] = action
         elif file in wctx:
-            prunedactions[file] = ('r', args, msg)
+            prunedactions[file] = (b'r', args, msg)
 
         if branchmerge and type == mergemod.ACTION_MERGE:
             f1, f2, fa, move, anc = args
@@ -367,13 +412,18 @@
                 temporaryfiles.append(f1)
 
     if len(temporaryfiles) > 0:
-        repo.ui.status(_('temporarily included %d file(s) in the sparse '
-                         'checkout for merging\n') % len(temporaryfiles))
+        repo.ui.status(
+            _(
+                b'temporarily included %d file(s) in the sparse '
+                b'checkout for merging\n'
+            )
+            % len(temporaryfiles)
+        )
         addtemporaryincludes(repo, temporaryfiles)
 
         # Add the new files to the working copy so they can be merged, etc
         actions = []
-        message = 'temporarily adding to sparse checkout'
+        message = b'temporarily adding to sparse checkout'
         wctxmanifest = repo[None].manifest()
         for file in temporaryfiles:
             if file in wctxmanifest:
@@ -381,9 +431,10 @@
                 actions.append((file, (fctx.flags(), False), message))
 
         typeactions = mergemod.emptyactions()
-        typeactions['g'] = actions
-        mergemod.applyupdates(repo, typeactions, repo[None], repo['.'],
-                              False, wantfiledata=False)
+        typeactions[b'g'] = actions
+        mergemod.applyupdates(
+            repo, typeactions, repo[None], repo[b'.'], False, wantfiledata=False
+        )
 
         dirstate = repo.dirstate
         for file, flags, msg in actions:
@@ -401,12 +452,13 @@
             new = sparsematch(file)
             if not old and new:
                 flags = mf.flags(file)
-                prunedactions[file] = ('g', (flags, False), '')
+                prunedactions[file] = (b'g', (flags, False), b'')
             elif old and not new:
-                prunedactions[file] = ('r', [], '')
+                prunedactions[file] = (b'r', [], b'')
 
     return prunedactions
 
+
 def refreshwdir(repo, origstatus, origsparsematch, force=False):
     """Refreshes working directory by taking sparse config into account.
 
@@ -426,16 +478,17 @@
 
     for f in pending:
         if not sparsematch(f):
-            repo.ui.warn(_("pending changes to '%s'\n") % f)
+            repo.ui.warn(_(b"pending changes to '%s'\n") % f)
             abort = not force
 
     if abort:
-        raise error.Abort(_('could not update sparseness due to pending '
-                            'changes'))
+        raise error.Abort(
+            _(b'could not update sparseness due to pending changes')
+        )
 
     # Calculate actions
     dirstate = repo.dirstate
-    ctx = repo['.']
+    ctx = repo[b'.']
     added = []
     lookup = []
     dropped = []
@@ -452,30 +505,34 @@
         if (new and not old) or (old and new and not file in dirstate):
             fl = mf.flags(file)
             if repo.wvfs.exists(file):
-                actions[file] = ('e', (fl,), '')
+                actions[file] = (b'e', (fl,), b'')
                 lookup.append(file)
             else:
-                actions[file] = ('g', (fl, False), '')
+                actions[file] = (b'g', (fl, False), b'')
                 added.append(file)
         # Drop files that are newly excluded, or that still exist in
         # the dirstate.
         elif (old and not new) or (not old and not new and file in dirstate):
             dropped.append(file)
             if file not in pending:
-                actions[file] = ('r', [], '')
+                actions[file] = (b'r', [], b'')
 
     # Verify there are no pending changes in newly included files
     abort = False
     for file in lookup:
-        repo.ui.warn(_("pending changes to '%s'\n") % file)
+        repo.ui.warn(_(b"pending changes to '%s'\n") % file)
         abort = not force
     if abort:
-        raise error.Abort(_('cannot change sparseness due to pending '
-                            'changes (delete the files or use '
-                            '--force to bring them back dirty)'))
+        raise error.Abort(
+            _(
+                b'cannot change sparseness due to pending '
+                b'changes (delete the files or use '
+                b'--force to bring them back dirty)'
+            )
+        )
 
     # Check for files that were only in the dirstate.
-    for file, state in dirstate.iteritems():
+    for file, state in pycompat.iteritems(dirstate):
         if not file in files:
             old = origsparsematch(file)
             new = sparsematch(file)
@@ -484,11 +541,12 @@
 
     # Apply changes to disk
     typeactions = mergemod.emptyactions()
-    for f, (m, args, msg) in actions.iteritems():
+    for f, (m, args, msg) in pycompat.iteritems(actions):
         typeactions[m].append((f, args, msg))
 
-    mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False,
-                          wantfiledata=False)
+    mergemod.applyupdates(
+        repo, typeactions, repo[None], repo[b'.'], False, wantfiledata=False
+    )
 
     # Fix dirstate
     for file in added:
@@ -503,6 +561,7 @@
 
     return added, dropped, lookup
 
+
 def aftercommit(repo, node):
     """Perform actions after a working directory commit."""
     # This function is called unconditionally, even if sparse isn't
@@ -519,11 +578,13 @@
 
     prunetemporaryincludes(repo)
 
-def _updateconfigandrefreshwdir(repo, includes, excludes, profiles,
-                                force=False, removing=False):
+
+def _updateconfigandrefreshwdir(
+    repo, includes, excludes, profiles, force=False, removing=False
+):
     """Update the sparse config and working directory state."""
-    raw = repo.vfs.tryread('sparse')
-    oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, 'sparse')
+    raw = repo.vfs.tryread(b'sparse')
+    oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, b'sparse')
 
     oldstatus = repo.status()
     oldmatch = matcher(repo)
@@ -537,11 +598,11 @@
     # updated. But this requires massive rework to matcher() and its
     # consumers.
 
-    if 'exp-sparse' in oldrequires and removing:
-        repo.requirements.discard('exp-sparse')
+    if b'exp-sparse' in oldrequires and removing:
+        repo.requirements.discard(b'exp-sparse')
         scmutil.writerequires(repo.vfs, repo.requirements)
-    elif 'exp-sparse' not in oldrequires:
-        repo.requirements.add('exp-sparse')
+    elif b'exp-sparse' not in oldrequires:
+        repo.requirements.add(b'exp-sparse')
         scmutil.writerequires(repo.vfs, repo.requirements)
 
     try:
@@ -555,6 +616,7 @@
         writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
         raise
 
+
 def clearrules(repo, force=False):
     """Clears include/exclude rules from the sparse config.
 
@@ -562,14 +624,15 @@
     directory is refreshed, as needed.
     """
     with repo.wlock():
-        raw = repo.vfs.tryread('sparse')
-        includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse')
+        raw = repo.vfs.tryread(b'sparse')
+        includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
 
         if not includes and not excludes:
             return
 
         _updateconfigandrefreshwdir(repo, set(), set(), profiles, force=force)
 
+
 def importfromfiles(repo, opts, paths, force=False):
     """Import sparse config rules from files.
 
@@ -578,19 +641,20 @@
     """
     with repo.wlock():
         # read current configuration
-        raw = repo.vfs.tryread('sparse')
-        includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse')
+        raw = repo.vfs.tryread(b'sparse')
+        includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
         aincludes, aexcludes, aprofiles = activeconfig(repo)
 
         # Import rules on top; only take in rules that are not yet
         # part of the active rules.
         changed = False
         for p in paths:
-            with util.posixfile(util.expandpath(p), mode='rb') as fh:
+            with util.posixfile(util.expandpath(p), mode=b'rb') as fh:
                 raw = fh.read()
 
-            iincludes, iexcludes, iprofiles = parseconfig(repo.ui, raw,
-                                                          'sparse')
+            iincludes, iexcludes, iprofiles = parseconfig(
+                repo.ui, raw, b'sparse'
+            )
             oldsize = len(includes) + len(excludes) + len(profiles)
             includes.update(iincludes - aincludes)
             excludes.update(iexcludes - aexcludes)
@@ -606,15 +670,31 @@
             includecount = len(includes - aincludes)
             excludecount = len(excludes - aexcludes)
 
-            fcounts = map(len, _updateconfigandrefreshwdir(
-                repo, includes, excludes, profiles, force=force))
+            fcounts = map(
+                len,
+                _updateconfigandrefreshwdir(
+                    repo, includes, excludes, profiles, force=force
+                ),
+            )
+
+        printchanges(
+            repo.ui, opts, profilecount, includecount, excludecount, *fcounts
+        )
+
 
-        printchanges(repo.ui, opts, profilecount, includecount, excludecount,
-                     *fcounts)
-
-def updateconfig(repo, pats, opts, include=False, exclude=False, reset=False,
-                 delete=False, enableprofile=False, disableprofile=False,
-                 force=False, usereporootpaths=False):
+def updateconfig(
+    repo,
+    pats,
+    opts,
+    include=False,
+    exclude=False,
+    reset=False,
+    delete=False,
+    enableprofile=False,
+    disableprofile=False,
+    force=False,
+    usereporootpaths=False,
+):
     """Perform a sparse config update.
 
     Only one of the actions may be performed.
@@ -622,9 +702,10 @@
     The new config is written out and a working directory refresh is performed.
     """
     with repo.wlock():
-        raw = repo.vfs.tryread('sparse')
-        oldinclude, oldexclude, oldprofiles = parseconfig(repo.ui, raw,
-                                                          'sparse')
+        raw = repo.vfs.tryread(b'sparse')
+        oldinclude, oldexclude, oldprofiles = parseconfig(
+            repo.ui, raw, b'sparse'
+        )
 
         if reset:
             newinclude = set()
@@ -636,7 +717,7 @@
             newprofiles = set(oldprofiles)
 
         if any(os.path.isabs(pat) for pat in pats):
-            raise error.Abort(_('paths cannot be absolute'))
+            raise error.Abort(_(b'paths cannot be absolute'))
 
         if not usereporootpaths:
             # let's treat paths as relative to cwd
@@ -645,8 +726,9 @@
             for kindpat in pats:
                 kind, pat = matchmod._patsplit(kindpat, None)
                 if kind in matchmod.cwdrelativepatternkinds or kind is None:
-                    ap = ((kind + ':' if kind else '') +
-                          pathutil.canonpath(root, cwd, pat))
+                    ap = (kind + b':' if kind else b'') + pathutil.canonpath(
+                        root, cwd, pat
+                    )
                     abspats.append(ap)
                 else:
                     abspats.append(kindpat)
@@ -664,39 +746,78 @@
             newinclude.difference_update(pats)
             newexclude.difference_update(pats)
 
-        profilecount = (len(newprofiles - oldprofiles) -
-                        len(oldprofiles - newprofiles))
-        includecount = (len(newinclude - oldinclude) -
-                        len(oldinclude - newinclude))
-        excludecount = (len(newexclude - oldexclude) -
-                        len(oldexclude - newexclude))
+        profilecount = len(newprofiles - oldprofiles) - len(
+            oldprofiles - newprofiles
+        )
+        includecount = len(newinclude - oldinclude) - len(
+            oldinclude - newinclude
+        )
+        excludecount = len(newexclude - oldexclude) - len(
+            oldexclude - newexclude
+        )
 
-        fcounts = map(len, _updateconfigandrefreshwdir(
-            repo, newinclude, newexclude, newprofiles, force=force,
-            removing=reset))
+        fcounts = map(
+            len,
+            _updateconfigandrefreshwdir(
+                repo,
+                newinclude,
+                newexclude,
+                newprofiles,
+                force=force,
+                removing=reset,
+            ),
+        )
+
+        printchanges(
+            repo.ui, opts, profilecount, includecount, excludecount, *fcounts
+        )
+
 
-        printchanges(repo.ui, opts, profilecount, includecount,
-                     excludecount, *fcounts)
-
-def printchanges(ui, opts, profilecount=0, includecount=0, excludecount=0,
-                 added=0, dropped=0, conflicting=0):
+def printchanges(
+    ui,
+    opts,
+    profilecount=0,
+    includecount=0,
+    excludecount=0,
+    added=0,
+    dropped=0,
+    conflicting=0,
+):
     """Print output summarizing sparse config changes."""
-    with ui.formatter('sparse', opts) as fm:
+    with ui.formatter(b'sparse', opts) as fm:
         fm.startitem()
-        fm.condwrite(ui.verbose, 'profiles_added', _('Profiles changed: %d\n'),
-                     profilecount)
-        fm.condwrite(ui.verbose, 'include_rules_added',
-                     _('Include rules changed: %d\n'), includecount)
-        fm.condwrite(ui.verbose, 'exclude_rules_added',
-                     _('Exclude rules changed: %d\n'), excludecount)
+        fm.condwrite(
+            ui.verbose,
+            b'profiles_added',
+            _(b'Profiles changed: %d\n'),
+            profilecount,
+        )
+        fm.condwrite(
+            ui.verbose,
+            b'include_rules_added',
+            _(b'Include rules changed: %d\n'),
+            includecount,
+        )
+        fm.condwrite(
+            ui.verbose,
+            b'exclude_rules_added',
+            _(b'Exclude rules changed: %d\n'),
+            excludecount,
+        )
 
         # In 'plain' verbose mode, mergemod.applyupdates already outputs what
         # files are added or removed outside of the templating formatter
         # framework. No point in repeating ourselves in that case.
         if not fm.isplain():
-            fm.condwrite(ui.verbose, 'files_added', _('Files added: %d\n'),
-                         added)
-            fm.condwrite(ui.verbose, 'files_dropped', _('Files dropped: %d\n'),
-                         dropped)
-            fm.condwrite(ui.verbose, 'files_conflicting',
-                         _('Files conflicting: %d\n'), conflicting)
+            fm.condwrite(
+                ui.verbose, b'files_added', _(b'Files added: %d\n'), added
+            )
+            fm.condwrite(
+                ui.verbose, b'files_dropped', _(b'Files dropped: %d\n'), dropped
+            )
+            fm.condwrite(
+                ui.verbose,
+                b'files_conflicting',
+                _(b'Files conflicting: %d\n'),
+                conflicting,
+            )
--- a/mercurial/sshpeer.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/sshpeer.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,6 +11,7 @@
 import uuid
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     error,
     pycompat,
@@ -25,13 +26,15 @@
     stringutil,
 )
 
+
 def _serverquote(s):
     """quote a string for the remote shell ... which we assume is sh"""
     if not s:
         return s
-    if re.match('[a-zA-Z0-9@%_+=:,./-]*$', s):
+    if re.match(b'[a-zA-Z0-9@%_+=:,./-]*$', s):
         return s
-    return "'%s'" % s.replace("'", "'\\''")
+    return b"'%s'" % s.replace(b"'", b"'\\''")
+
 
 def _forwardoutput(ui, pipe):
     """display all data currently available on pipe as remote output.
@@ -41,7 +44,8 @@
         s = procutil.readpipe(pipe)
         if s:
             for l in s.splitlines():
-                ui.status(_("remote: "), l, '\n')
+                ui.status(_(b"remote: "), l, b'\n')
+
 
 class doublepipe(object):
     """Operate a side-channel pipe in addition of a main one
@@ -72,8 +76,10 @@
 
         (This will only wait for data if the setup is supported by `util.poll`)
         """
-        if (isinstance(self._main, util.bufferedinputpipe) and
-            self._main.hasbuffer):
+        if (
+            isinstance(self._main, util.bufferedinputpipe)
+            and self._main.hasbuffer
+        ):
             # Main has data. Assume side is worth poking at.
             return True, True
 
@@ -86,10 +92,10 @@
         return (self._main.fileno() in act, self._side.fileno() in act)
 
     def write(self, data):
-        return self._call('write', data)
+        return self._call(b'write', data)
 
     def read(self, size):
-        r = self._call('read', size)
+        r = self._call(b'read', size)
         if size != 0 and not r:
             # We've observed a condition that indicates the
             # stdout closed unexpectedly. Check stderr one
@@ -100,7 +106,7 @@
         return r
 
     def unbufferedread(self, size):
-        r = self._call('unbufferedread', size)
+        r = self._call(b'unbufferedread', size)
         if size != 0 and not r:
             # We've observed a condition that indicates the
             # stdout closed unexpectedly. Check stderr one
@@ -111,7 +117,7 @@
         return r
 
     def readline(self):
-        return self._call('readline')
+        return self._call(b'readline')
 
     def _call(self, methname, data=None):
         """call <methname> on "main", forward output of "side" while blocking
@@ -119,7 +125,7 @@
         # data can be '' or 0
         if (data is not None and not data) or self._main.closed:
             _forwardoutput(self._ui, self._side)
-            return ''
+            return b''
         while True:
             mainready, sideready = self._wait()
             if sideready:
@@ -137,6 +143,7 @@
     def flush(self):
         return self._main.flush()
 
+
 def _cleanuppipes(ui, pipei, pipeo, pipee):
     """Clean up pipes used by an SSH connection."""
     if pipeo:
@@ -148,25 +155,29 @@
         # Try to read from the err descriptor until EOF.
         try:
             for l in pipee:
-                ui.status(_('remote: '), l)
+                ui.status(_(b'remote: '), l)
         except (IOError, ValueError):
             pass
 
         pipee.close()
 
+
 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
     """Create an SSH connection to a server.
 
     Returns a tuple of (process, stdin, stdout, stderr) for the
     spawned process.
     """
-    cmd = '%s %s %s' % (
+    cmd = b'%s %s %s' % (
         sshcmd,
         args,
-        procutil.shellquote('%s -R %s serve --stdio' % (
-            _serverquote(remotecmd), _serverquote(path))))
+        procutil.shellquote(
+            b'%s -R %s serve --stdio'
+            % (_serverquote(remotecmd), _serverquote(path))
+        ),
+    )
 
-    ui.debug('running %s\n' % cmd)
+    ui.debug(b'running %s\n' % cmd)
     cmd = procutil.quotecommand(cmd)
 
     # no buffer allow the use of 'select'
@@ -176,24 +187,28 @@
 
     return proc, stdin, stdout, stderr
 
+
 def _clientcapabilities():
     """Return list of capabilities of this client.
 
     Returns a list of capabilities that are supported by this client.
     """
-    protoparams = {'partial-pull'}
-    comps = [e.wireprotosupport().name for e in
-             util.compengines.supportedwireengines(util.CLIENTROLE)]
-    protoparams.add('comp=%s' % ','.join(comps))
+    protoparams = {b'partial-pull'}
+    comps = [
+        e.wireprotosupport().name
+        for e in util.compengines.supportedwireengines(util.CLIENTROLE)
+    ]
+    protoparams.add(b'comp=%s' % b','.join(comps))
     return protoparams
 
+
 def _performhandshake(ui, stdin, stdout, stderr):
     def badresponse():
         # Flush any output on stderr.
         _forwardoutput(ui, stderr)
 
-        msg = _('no suitable response from remote hg')
-        hint = ui.config('ui', 'ssherrorhint')
+        msg = _(b'no suitable response from remote hg')
+        hint = ui.config(b'ui', b'ssherrorhint')
         raise error.RepoError(msg, hint=hint)
 
     # The handshake consists of sending wire protocol commands in reverse
@@ -248,37 +263,37 @@
     # for output to our well-known ``between`` command. Of course, if
     # the banner contains ``1\n\n``, this will throw off our detection.
 
-    requestlog = ui.configbool('devel', 'debug.peer-request')
+    requestlog = ui.configbool(b'devel', b'debug.peer-request')
 
     # Generate a random token to help identify responses to version 2
     # upgrade request.
     token = pycompat.sysbytes(str(uuid.uuid4()))
     upgradecaps = [
-        ('proto', wireprotoserver.SSHV2),
+        (b'proto', wireprotoserver.SSHV2),
     ]
     upgradecaps = util.urlreq.urlencode(upgradecaps)
 
     try:
-        pairsarg = '%s-%s' % ('0' * 40, '0' * 40)
+        pairsarg = b'%s-%s' % (b'0' * 40, b'0' * 40)
         handshake = [
-            'hello\n',
-            'between\n',
-            'pairs %d\n' % len(pairsarg),
+            b'hello\n',
+            b'between\n',
+            b'pairs %d\n' % len(pairsarg),
             pairsarg,
         ]
 
         # Request upgrade to version 2 if configured.
-        if ui.configbool('experimental', 'sshpeer.advertise-v2'):
-            ui.debug('sending upgrade request: %s %s\n' % (token, upgradecaps))
-            handshake.insert(0, 'upgrade %s %s\n' % (token, upgradecaps))
+        if ui.configbool(b'experimental', b'sshpeer.advertise-v2'):
+            ui.debug(b'sending upgrade request: %s %s\n' % (token, upgradecaps))
+            handshake.insert(0, b'upgrade %s %s\n' % (token, upgradecaps))
 
         if requestlog:
-            ui.debug('devel-peer-request: hello+between\n')
-            ui.debug('devel-peer-request:   pairs: %d bytes\n' % len(pairsarg))
-        ui.debug('sending hello command\n')
-        ui.debug('sending between command\n')
+            ui.debug(b'devel-peer-request: hello+between\n')
+            ui.debug(b'devel-peer-request:   pairs: %d bytes\n' % len(pairsarg))
+        ui.debug(b'sending hello command\n')
+        ui.debug(b'sending between command\n')
 
-        stdin.write(''.join(handshake))
+        stdin.write(b''.join(handshake))
         stdin.flush()
     except IOError:
         badresponse()
@@ -287,7 +302,7 @@
     protoname = wireprototypes.SSHV1
     reupgraded = re.compile(b'^upgraded %s (.*)$' % stringutil.reescape(token))
 
-    lines = ['', 'dummy']
+    lines = [b'', b'dummy']
     max_noise = 500
     while lines[-1] and max_noise:
         try:
@@ -299,7 +314,7 @@
             m = reupgraded.match(l)
             if m:
                 protoname = m.group(1)
-                ui.debug('protocol upgraded to %s\n' % protoname)
+                ui.debug(b'protocol upgraded to %s\n' % protoname)
                 # If an upgrade was handled, the ``hello`` and ``between``
                 # requests are ignored. The next output belongs to the
                 # protocol, so stop scanning lines.
@@ -308,10 +323,10 @@
             # Otherwise it could be a banner, ``0\n`` response if server
             # doesn't support upgrade.
 
-            if lines[-1] == '1\n' and l == '\n':
+            if lines[-1] == b'1\n' and l == b'\n':
                 break
             if l:
-                ui.debug('remote: ', l)
+                ui.debug(b'remote: ', l)
             lines.append(l)
             max_noise -= 1
         except IOError:
@@ -327,8 +342,8 @@
         for l in reversed(lines):
             # Look for response to ``hello`` command. Scan from the back so
             # we don't misinterpret banner output as the command reply.
-            if l.startswith('capabilities:'):
-                caps.update(l[:-1].split(':')[1].split())
+            if l.startswith(b'capabilities:'):
+                caps.update(l[:-1].split(b':')[1].split())
                 break
     elif protoname == wireprotoserver.SSHV2:
         # We see a line with number of bytes to follow and then a value
@@ -340,12 +355,12 @@
             badresponse()
 
         capsline = stdout.read(valuelen)
-        if not capsline.startswith('capabilities: '):
+        if not capsline.startswith(b'capabilities: '):
             badresponse()
 
-        ui.debug('remote: %s\n' % capsline)
+        ui.debug(b'remote: %s\n' % capsline)
 
-        caps.update(capsline.split(':')[1].split())
+        caps.update(capsline.split(b':')[1].split())
         # Trailing newline.
         stdout.read(1)
 
@@ -363,9 +378,11 @@
 
     return protoname, caps
 
+
 class sshv1peer(wireprotov1peer.wirepeer):
-    def __init__(self, ui, url, proc, stdin, stdout, stderr, caps,
-                 autoreadstderr=True):
+    def __init__(
+        self, ui, url, proc, stdin, stdout, stderr, caps, autoreadstderr=True
+    ):
         """Create a peer from an existing SSH connection.
 
         ``proc`` is a handle on the underlying SSH process.
@@ -396,7 +413,7 @@
     # Commands that have a "framed" response where the first line of the
     # response contains the length of that response.
     _FRAMED_COMMANDS = {
-        'batch',
+        b'batch',
     }
 
     # Begin of ipeerconnection interface.
@@ -438,34 +455,35 @@
     __del__ = _cleanup
 
     def _sendrequest(self, cmd, args, framed=False):
-        if (self.ui.debugflag
-            and self.ui.configbool('devel', 'debug.peer-request')):
+        if self.ui.debugflag and self.ui.configbool(
+            b'devel', b'debug.peer-request'
+        ):
             dbg = self.ui.debug
-            line = 'devel-peer-request: %s\n'
+            line = b'devel-peer-request: %s\n'
             dbg(line % cmd)
             for key, value in sorted(args.items()):
                 if not isinstance(value, dict):
-                    dbg(line % '  %s: %d bytes' % (key, len(value)))
+                    dbg(line % b'  %s: %d bytes' % (key, len(value)))
                 else:
                     for dk, dv in sorted(value.items()):
-                        dbg(line % '  %s-%s: %d' % (key, dk, len(dv)))
-        self.ui.debug("sending %s command\n" % cmd)
-        self._pipeo.write("%s\n" % cmd)
+                        dbg(line % b'  %s-%s: %d' % (key, dk, len(dv)))
+        self.ui.debug(b"sending %s command\n" % cmd)
+        self._pipeo.write(b"%s\n" % cmd)
         _func, names = wireprotov1server.commands[cmd]
         keys = names.split()
         wireargs = {}
         for k in keys:
-            if k == '*':
-                wireargs['*'] = args
+            if k == b'*':
+                wireargs[b'*'] = args
                 break
             else:
                 wireargs[k] = args[k]
                 del args[k]
-        for k, v in sorted(wireargs.iteritems()):
-            self._pipeo.write("%s %d\n" % (k, len(v)))
+        for k, v in sorted(pycompat.iteritems(wireargs)):
+            self._pipeo.write(b"%s %d\n" % (k, len(v)))
             if isinstance(v, dict):
-                for dk, dv in v.iteritems():
-                    self._pipeo.write("%s %d\n" % (dk, len(dv)))
+                for dk, dv in pycompat.iteritems(v):
+                    self._pipeo.write(b"%s %d\n" % (dk, len(dv)))
                     self._pipeo.write(dv)
             else:
                 self._pipeo.write(v)
@@ -498,21 +516,21 @@
         # continue submitting the payload.
         r = self._call(cmd, **args)
         if r:
-            return '', r
+            return b'', r
 
         # The payload consists of frames with content followed by an empty
         # frame.
-        for d in iter(lambda: fp.read(4096), ''):
+        for d in iter(lambda: fp.read(4096), b''):
             self._writeframed(d)
-        self._writeframed("", flush=True)
+        self._writeframed(b"", flush=True)
 
         # In case of success, there is an empty frame and a frame containing
         # the integer result (as a string).
         # In case of error, there is a non-empty frame containing the error.
         r = self._readframed()
         if r:
-            return '', r
-        return self._readframed(), ''
+            return b'', r
+        return self._readframed(), b''
 
     def _calltwowaystream(self, cmd, fp, **args):
         # The server responds with an empty frame if the client should
@@ -520,29 +538,29 @@
         r = self._call(cmd, **args)
         if r:
             # XXX needs to be made better
-            raise error.Abort(_('unexpected remote reply: %s') % r)
+            raise error.Abort(_(b'unexpected remote reply: %s') % r)
 
         # The payload consists of frames with content followed by an empty
         # frame.
-        for d in iter(lambda: fp.read(4096), ''):
+        for d in iter(lambda: fp.read(4096), b''):
             self._writeframed(d)
-        self._writeframed("", flush=True)
+        self._writeframed(b"", flush=True)
 
         return self._pipei
 
     def _getamount(self):
         l = self._pipei.readline()
-        if l == '\n':
+        if l == b'\n':
             if self._autoreadstderr:
                 self._readerr()
-            msg = _('check previous remote output')
+            msg = _(b'check previous remote output')
             self._abort(error.OutOfBandError(hint=msg))
         if self._autoreadstderr:
             self._readerr()
         try:
             return int(l)
         except ValueError:
-            self._abort(error.ResponseError(_("unexpected response:"), l))
+            self._abort(error.ResponseError(_(b"unexpected response:"), l))
 
     def _readframed(self):
         size = self._getamount()
@@ -552,7 +570,7 @@
         return self._pipei.read(size)
 
     def _writeframed(self, data, flush=False):
-        self._pipeo.write("%d\n" % len(data))
+        self._pipeo.write(b"%d\n" % len(data))
         if data:
             self._pipeo.write(data)
         if flush:
@@ -560,12 +578,15 @@
         if self._autoreadstderr:
             self._readerr()
 
+
 class sshv2peer(sshv1peer):
     """A peer that speakers version 2 of the transport protocol."""
+
     # Currently version 2 is identical to version 1 post handshake.
     # And handshake is performed before the peer is instantiated. So
     # we need no custom code.
 
+
 def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
     """Make a peer instance from existing pipes.
 
@@ -587,15 +608,33 @@
         raise
 
     if protoname == wireprototypes.SSHV1:
-        return sshv1peer(ui, path, proc, stdin, stdout, stderr, caps,
-                         autoreadstderr=autoreadstderr)
+        return sshv1peer(
+            ui,
+            path,
+            proc,
+            stdin,
+            stdout,
+            stderr,
+            caps,
+            autoreadstderr=autoreadstderr,
+        )
     elif protoname == wireprototypes.SSHV2:
-        return sshv2peer(ui, path, proc, stdin, stdout, stderr, caps,
-                         autoreadstderr=autoreadstderr)
+        return sshv2peer(
+            ui,
+            path,
+            proc,
+            stdin,
+            stdout,
+            stderr,
+            caps,
+            autoreadstderr=autoreadstderr,
+        )
     else:
         _cleanuppipes(ui, stdout, stdin, stderr)
-        raise error.RepoError(_('unknown version of SSH protocol: %s') %
-                              protoname)
+        raise error.RepoError(
+            _(b'unknown version of SSH protocol: %s') % protoname
+        )
+
 
 def instance(ui, path, create, intents=None, createopts=None):
     """Create an SSH peer.
@@ -603,19 +642,19 @@
     The returned object conforms to the ``wireprotov1peer.wirepeer`` interface.
     """
     u = util.url(path, parsequery=False, parsefragment=False)
-    if u.scheme != 'ssh' or not u.host or u.path is None:
-        raise error.RepoError(_("couldn't parse location %s") % path)
+    if u.scheme != b'ssh' or not u.host or u.path is None:
+        raise error.RepoError(_(b"couldn't parse location %s") % path)
 
     util.checksafessh(path)
 
     if u.passwd is not None:
-        raise error.RepoError(_('password in URL not supported'))
+        raise error.RepoError(_(b'password in URL not supported'))
 
-    sshcmd = ui.config('ui', 'ssh')
-    remotecmd = ui.config('ui', 'remotecmd')
-    sshaddenv = dict(ui.configitems('sshenv'))
+    sshcmd = ui.config(b'ui', b'ssh')
+    remotecmd = ui.config(b'ui', b'remotecmd')
+    sshaddenv = dict(ui.configitems(b'sshenv'))
     sshenv = procutil.shellenviron(sshaddenv)
-    remotepath = u.path or '.'
+    remotepath = u.path or b'.'
 
     args = procutil.sshargs(sshcmd, u.host, u.user, u.port)
 
@@ -625,30 +664,41 @@
         # querying the remote, there's no way of knowing if the remote even
         # supports said requested feature.
         if createopts:
-            raise error.RepoError(_('cannot create remote SSH repositories '
-                                    'with extra options'))
+            raise error.RepoError(
+                _(
+                    b'cannot create remote SSH repositories '
+                    b'with extra options'
+                )
+            )
 
-        cmd = '%s %s %s' % (sshcmd, args,
-            procutil.shellquote('%s init %s' %
-                (_serverquote(remotecmd), _serverquote(remotepath))))
-        ui.debug('running %s\n' % cmd)
-        res = ui.system(cmd, blockedtag='sshpeer', environ=sshenv)
+        cmd = b'%s %s %s' % (
+            sshcmd,
+            args,
+            procutil.shellquote(
+                b'%s init %s'
+                % (_serverquote(remotecmd), _serverquote(remotepath))
+            ),
+        )
+        ui.debug(b'running %s\n' % cmd)
+        res = ui.system(cmd, blockedtag=b'sshpeer', environ=sshenv)
         if res != 0:
-            raise error.RepoError(_('could not create remote repo'))
+            raise error.RepoError(_(b'could not create remote repo'))
 
-    proc, stdin, stdout, stderr = _makeconnection(ui, sshcmd, args, remotecmd,
-                                                  remotepath, sshenv)
+    proc, stdin, stdout, stderr = _makeconnection(
+        ui, sshcmd, args, remotecmd, remotepath, sshenv
+    )
 
     peer = makepeer(ui, path, proc, stdin, stdout, stderr)
 
     # Finally, if supported by the server, notify it about our own
     # capabilities.
-    if 'protocaps' in peer.capabilities():
+    if b'protocaps' in peer.capabilities():
         try:
-            peer._call("protocaps",
-                       caps=' '.join(sorted(_clientcapabilities())))
+            peer._call(
+                b"protocaps", caps=b' '.join(sorted(_clientcapabilities()))
+            )
         except IOError:
             peer._cleanup()
-            raise error.RepoError(_('capability exchange failed'))
+            raise error.RepoError(_(b'capability exchange failed'))
 
     return peer
--- a/mercurial/sslutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/sslutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,6 +15,7 @@
 import ssl
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     encoding,
     error,
@@ -36,27 +37,27 @@
 # separate code paths depending on support in Python.
 
 configprotocols = {
-    'tls1.0',
-    'tls1.1',
-    'tls1.2',
+    b'tls1.0',
+    b'tls1.1',
+    b'tls1.2',
 }
 
 hassni = getattr(ssl, 'HAS_SNI', False)
 
 # TLS 1.1 and 1.2 may not be supported if the OpenSSL Python is compiled
 # against doesn't support them.
-supportedprotocols = {'tls1.0'}
-if util.safehasattr(ssl, 'PROTOCOL_TLSv1_1'):
-    supportedprotocols.add('tls1.1')
-if util.safehasattr(ssl, 'PROTOCOL_TLSv1_2'):
-    supportedprotocols.add('tls1.2')
+supportedprotocols = {b'tls1.0'}
+if util.safehasattr(ssl, b'PROTOCOL_TLSv1_1'):
+    supportedprotocols.add(b'tls1.1')
+if util.safehasattr(ssl, b'PROTOCOL_TLSv1_2'):
+    supportedprotocols.add(b'tls1.2')
 
 try:
     # ssl.SSLContext was added in 2.7.9 and presence indicates modern
     # SSL/TLS features are available.
     SSLContext = ssl.SSLContext
     modernssl = True
-    _canloaddefaultcerts = util.safehasattr(SSLContext, 'load_default_certs')
+    _canloaddefaultcerts = util.safehasattr(SSLContext, b'load_default_certs')
 except AttributeError:
     modernssl = False
     _canloaddefaultcerts = False
@@ -87,9 +88,9 @@
 
         def load_verify_locations(self, cafile=None, capath=None, cadata=None):
             if capath:
-                raise error.Abort(_('capath not supported'))
+                raise error.Abort(_(b'capath not supported'))
             if cadata:
-                raise error.Abort(_('cadata not supported'))
+                raise error.Abort(_(b'cadata not supported'))
 
             self._cacerts = cafile
 
@@ -113,6 +114,7 @@
 
             return ssl.wrap_socket(socket, **args)
 
+
 def _hostsettings(ui, hostname):
     """Obtain security settings for a hostname.
 
@@ -122,165 +124,186 @@
     s = {
         # Whether we should attempt to load default/available CA certs
         # if an explicit ``cafile`` is not defined.
-        'allowloaddefaultcerts': True,
+        b'allowloaddefaultcerts': True,
         # List of 2-tuple of (hash algorithm, hash).
-        'certfingerprints': [],
+        b'certfingerprints': [],
         # Path to file containing concatenated CA certs. Used by
         # SSLContext.load_verify_locations().
-        'cafile': None,
+        b'cafile': None,
         # Whether certificate verification should be disabled.
-        'disablecertverification': False,
+        b'disablecertverification': False,
         # Whether the legacy [hostfingerprints] section has data for this host.
-        'legacyfingerprint': False,
+        b'legacyfingerprint': False,
         # PROTOCOL_* constant to use for SSLContext.__init__.
-        'protocol': None,
+        b'protocol': None,
         # String representation of minimum protocol to be used for UI
         # presentation.
-        'protocolui': None,
+        b'protocolui': None,
         # ssl.CERT_* constant used by SSLContext.verify_mode.
-        'verifymode': None,
+        b'verifymode': None,
         # Defines extra ssl.OP* bitwise options to set.
-        'ctxoptions': None,
+        b'ctxoptions': None,
         # OpenSSL Cipher List to use (instead of default).
-        'ciphers': None,
+        b'ciphers': None,
     }
 
     # Allow minimum TLS protocol to be specified in the config.
     def validateprotocol(protocol, key):
         if protocol not in configprotocols:
             raise error.Abort(
-                _('unsupported protocol from hostsecurity.%s: %s') %
-                (key, protocol),
-                hint=_('valid protocols: %s') %
-                     ' '.join(sorted(configprotocols)))
+                _(b'unsupported protocol from hostsecurity.%s: %s')
+                % (key, protocol),
+                hint=_(b'valid protocols: %s')
+                % b' '.join(sorted(configprotocols)),
+            )
 
     # We default to TLS 1.1+ where we can because TLS 1.0 has known
     # vulnerabilities (like BEAST and POODLE). We allow users to downgrade to
     # TLS 1.0+ via config options in case a legacy server is encountered.
-    if 'tls1.1' in supportedprotocols:
-        defaultprotocol = 'tls1.1'
+    if b'tls1.1' in supportedprotocols:
+        defaultprotocol = b'tls1.1'
     else:
         # Let people know they are borderline secure.
         # We don't document this config option because we want people to see
         # the bold warnings on the web site.
         # internal config: hostsecurity.disabletls10warning
-        if not ui.configbool('hostsecurity', 'disabletls10warning'):
-            ui.warn(_('warning: connecting to %s using legacy security '
-                      'technology (TLS 1.0); see '
-                      'https://mercurial-scm.org/wiki/SecureConnections for '
-                      'more info\n') % bhostname)
-        defaultprotocol = 'tls1.0'
+        if not ui.configbool(b'hostsecurity', b'disabletls10warning'):
+            ui.warn(
+                _(
+                    b'warning: connecting to %s using legacy security '
+                    b'technology (TLS 1.0); see '
+                    b'https://mercurial-scm.org/wiki/SecureConnections for '
+                    b'more info\n'
+                )
+                % bhostname
+            )
+        defaultprotocol = b'tls1.0'
 
-    key = 'minimumprotocol'
-    protocol = ui.config('hostsecurity', key, defaultprotocol)
+    key = b'minimumprotocol'
+    protocol = ui.config(b'hostsecurity', key, defaultprotocol)
     validateprotocol(protocol, key)
 
-    key = '%s:minimumprotocol' % bhostname
-    protocol = ui.config('hostsecurity', key, protocol)
+    key = b'%s:minimumprotocol' % bhostname
+    protocol = ui.config(b'hostsecurity', key, protocol)
     validateprotocol(protocol, key)
 
     # If --insecure is used, we allow the use of TLS 1.0 despite config options.
     # We always print a "connection security to %s is disabled..." message when
     # --insecure is used. So no need to print anything more here.
     if ui.insecureconnections:
-        protocol = 'tls1.0'
-
-    s['protocol'], s['ctxoptions'], s['protocolui'] = protocolsettings(protocol)
+        protocol = b'tls1.0'
 
-    ciphers = ui.config('hostsecurity', 'ciphers')
-    ciphers = ui.config('hostsecurity', '%s:ciphers' % bhostname, ciphers)
-    s['ciphers'] = ciphers
+    s[b'protocol'], s[b'ctxoptions'], s[b'protocolui'] = protocolsettings(
+        protocol
+    )
+
+    ciphers = ui.config(b'hostsecurity', b'ciphers')
+    ciphers = ui.config(b'hostsecurity', b'%s:ciphers' % bhostname, ciphers)
+    s[b'ciphers'] = ciphers
 
     # Look for fingerprints in [hostsecurity] section. Value is a list
     # of <alg>:<fingerprint> strings.
-    fingerprints = ui.configlist('hostsecurity', '%s:fingerprints' % bhostname)
+    fingerprints = ui.configlist(
+        b'hostsecurity', b'%s:fingerprints' % bhostname
+    )
     for fingerprint in fingerprints:
-        if not (fingerprint.startswith(('sha1:', 'sha256:', 'sha512:'))):
-            raise error.Abort(_('invalid fingerprint for %s: %s') % (
-                                bhostname, fingerprint),
-                              hint=_('must begin with "sha1:", "sha256:", '
-                                     'or "sha512:"'))
+        if not (fingerprint.startswith((b'sha1:', b'sha256:', b'sha512:'))):
+            raise error.Abort(
+                _(b'invalid fingerprint for %s: %s') % (bhostname, fingerprint),
+                hint=_(b'must begin with "sha1:", "sha256:", or "sha512:"'),
+            )
 
-        alg, fingerprint = fingerprint.split(':', 1)
-        fingerprint = fingerprint.replace(':', '').lower()
-        s['certfingerprints'].append((alg, fingerprint))
+        alg, fingerprint = fingerprint.split(b':', 1)
+        fingerprint = fingerprint.replace(b':', b'').lower()
+        s[b'certfingerprints'].append((alg, fingerprint))
 
     # Fingerprints from [hostfingerprints] are always SHA-1.
-    for fingerprint in ui.configlist('hostfingerprints', bhostname):
-        fingerprint = fingerprint.replace(':', '').lower()
-        s['certfingerprints'].append(('sha1', fingerprint))
-        s['legacyfingerprint'] = True
+    for fingerprint in ui.configlist(b'hostfingerprints', bhostname):
+        fingerprint = fingerprint.replace(b':', b'').lower()
+        s[b'certfingerprints'].append((b'sha1', fingerprint))
+        s[b'legacyfingerprint'] = True
 
     # If a host cert fingerprint is defined, it is the only thing that
     # matters. No need to validate CA certs.
-    if s['certfingerprints']:
-        s['verifymode'] = ssl.CERT_NONE
-        s['allowloaddefaultcerts'] = False
+    if s[b'certfingerprints']:
+        s[b'verifymode'] = ssl.CERT_NONE
+        s[b'allowloaddefaultcerts'] = False
 
     # If --insecure is used, don't take CAs into consideration.
     elif ui.insecureconnections:
-        s['disablecertverification'] = True
-        s['verifymode'] = ssl.CERT_NONE
-        s['allowloaddefaultcerts'] = False
+        s[b'disablecertverification'] = True
+        s[b'verifymode'] = ssl.CERT_NONE
+        s[b'allowloaddefaultcerts'] = False
 
-    if ui.configbool('devel', 'disableloaddefaultcerts'):
-        s['allowloaddefaultcerts'] = False
+    if ui.configbool(b'devel', b'disableloaddefaultcerts'):
+        s[b'allowloaddefaultcerts'] = False
 
     # If both fingerprints and a per-host ca file are specified, issue a warning
     # because users should not be surprised about what security is or isn't
     # being performed.
-    cafile = ui.config('hostsecurity', '%s:verifycertsfile' % bhostname)
-    if s['certfingerprints'] and cafile:
-        ui.warn(_('(hostsecurity.%s:verifycertsfile ignored when host '
-                  'fingerprints defined; using host fingerprints for '
-                  'verification)\n') % bhostname)
+    cafile = ui.config(b'hostsecurity', b'%s:verifycertsfile' % bhostname)
+    if s[b'certfingerprints'] and cafile:
+        ui.warn(
+            _(
+                b'(hostsecurity.%s:verifycertsfile ignored when host '
+                b'fingerprints defined; using host fingerprints for '
+                b'verification)\n'
+            )
+            % bhostname
+        )
 
     # Try to hook up CA certificate validation unless something above
     # makes it not necessary.
-    if s['verifymode'] is None:
+    if s[b'verifymode'] is None:
         # Look at per-host ca file first.
         if cafile:
             cafile = util.expandpath(cafile)
             if not os.path.exists(cafile):
-                raise error.Abort(_('path specified by %s does not exist: %s') %
-                                  ('hostsecurity.%s:verifycertsfile' % (
-                                      bhostname,), cafile))
-            s['cafile'] = cafile
+                raise error.Abort(
+                    _(b'path specified by %s does not exist: %s')
+                    % (
+                        b'hostsecurity.%s:verifycertsfile' % (bhostname,),
+                        cafile,
+                    )
+                )
+            s[b'cafile'] = cafile
         else:
             # Find global certificates file in config.
-            cafile = ui.config('web', 'cacerts')
+            cafile = ui.config(b'web', b'cacerts')
 
             if cafile:
                 cafile = util.expandpath(cafile)
                 if not os.path.exists(cafile):
-                    raise error.Abort(_('could not find web.cacerts: %s') %
-                                      cafile)
-            elif s['allowloaddefaultcerts']:
+                    raise error.Abort(
+                        _(b'could not find web.cacerts: %s') % cafile
+                    )
+            elif s[b'allowloaddefaultcerts']:
                 # CAs not defined in config. Try to find system bundles.
                 cafile = _defaultcacerts(ui)
                 if cafile:
-                    ui.debug('using %s for CA file\n' % cafile)
+                    ui.debug(b'using %s for CA file\n' % cafile)
 
-            s['cafile'] = cafile
+            s[b'cafile'] = cafile
 
         # Require certificate validation if CA certs are being loaded and
         # verification hasn't been disabled above.
-        if cafile or (_canloaddefaultcerts and s['allowloaddefaultcerts']):
-            s['verifymode'] = ssl.CERT_REQUIRED
+        if cafile or (_canloaddefaultcerts and s[b'allowloaddefaultcerts']):
+            s[b'verifymode'] = ssl.CERT_REQUIRED
         else:
             # At this point we don't have a fingerprint, aren't being
             # explicitly insecure, and can't load CA certs. Connecting
             # is insecure. We allow the connection and abort during
             # validation (once we have the fingerprint to print to the
             # user).
-            s['verifymode'] = ssl.CERT_NONE
+            s[b'verifymode'] = ssl.CERT_NONE
 
-    assert s['protocol'] is not None
-    assert s['ctxoptions'] is not None
-    assert s['verifymode'] is not None
+    assert s[b'protocol'] is not None
+    assert s[b'ctxoptions'] is not None
+    assert s[b'verifymode'] is not None
 
     return s
 
+
 def protocolsettings(protocol):
     """Resolve the protocol for a config value.
 
@@ -289,7 +312,7 @@
     of the ``minimumprotocol`` config option equivalent.
     """
     if protocol not in configprotocols:
-        raise ValueError('protocol value not supported: %s' % protocol)
+        raise ValueError(b'protocol value not supported: %s' % protocol)
 
     # Despite its name, PROTOCOL_SSLv23 selects the highest protocol
     # that both ends support, including TLS protocols. On legacy stacks,
@@ -302,14 +325,18 @@
     # disable protocols via SSLContext.options and OP_NO_* constants.
     # However, SSLContext.options doesn't work unless we have the
     # full/real SSLContext available to us.
-    if supportedprotocols == {'tls1.0'}:
-        if protocol != 'tls1.0':
-            raise error.Abort(_('current Python does not support protocol '
-                                'setting %s') % protocol,
-                              hint=_('upgrade Python or disable setting since '
-                                     'only TLS 1.0 is supported'))
+    if supportedprotocols == {b'tls1.0'}:
+        if protocol != b'tls1.0':
+            raise error.Abort(
+                _(b'current Python does not support protocol setting %s')
+                % protocol,
+                hint=_(
+                    b'upgrade Python or disable setting since '
+                    b'only TLS 1.0 is supported'
+                ),
+            )
 
-        return ssl.PROTOCOL_TLSv1, 0, 'tls1.0'
+        return ssl.PROTOCOL_TLSv1, 0, b'tls1.0'
 
     # WARNING: returned options don't work unless the modern ssl module
     # is available. Be careful when adding options here.
@@ -317,15 +344,15 @@
     # SSLv2 and SSLv3 are broken. We ban them outright.
     options = ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3
 
-    if protocol == 'tls1.0':
+    if protocol == b'tls1.0':
         # Defaults above are to use TLS 1.0+
         pass
-    elif protocol == 'tls1.1':
+    elif protocol == b'tls1.1':
         options |= ssl.OP_NO_TLSv1
-    elif protocol == 'tls1.2':
+    elif protocol == b'tls1.2':
         options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
     else:
-        raise error.Abort(_('this should not happen'))
+        raise error.Abort(_(b'this should not happen'))
 
     # Prevent CRIME.
     # There is no guarantee this attribute is defined on the module.
@@ -333,6 +360,7 @@
 
     return ssl.PROTOCOL_SSLv23, options, protocol
 
+
 def wrapsocket(sock, keyfile, certfile, ui, serverhostname=None):
     """Add SSL/TLS to a socket.
 
@@ -347,26 +375,34 @@
       to use.
     """
     if not serverhostname:
-        raise error.Abort(_('serverhostname argument is required'))
+        raise error.Abort(_(b'serverhostname argument is required'))
 
     if b'SSLKEYLOGFILE' in encoding.environ:
         try:
             import sslkeylog
-            sslkeylog.set_keylog(pycompat.fsdecode(
-                encoding.environ[b'SSLKEYLOGFILE']))
-            ui.warn(
-                b'sslkeylog enabled by SSLKEYLOGFILE environment variable\n')
+
+            sslkeylog.set_keylog(
+                pycompat.fsdecode(encoding.environ[b'SSLKEYLOGFILE'])
+            )
+            ui.warnnoi18n(
+                b'sslkeylog enabled by SSLKEYLOGFILE environment variable\n'
+            )
         except ImportError:
-            ui.warn(b'sslkeylog module missing, '
-                    b'but SSLKEYLOGFILE set in environment\n')
+            ui.warnnoi18n(
+                b'sslkeylog module missing, '
+                b'but SSLKEYLOGFILE set in environment\n'
+            )
 
     for f in (keyfile, certfile):
         if f and not os.path.exists(f):
             raise error.Abort(
-                _('certificate file (%s) does not exist; cannot connect to %s')
+                _(b'certificate file (%s) does not exist; cannot connect to %s')
                 % (f, pycompat.bytesurl(serverhostname)),
-                hint=_('restore missing file or fix references '
-                       'in Mercurial config'))
+                hint=_(
+                    b'restore missing file or fix references '
+                    b'in Mercurial config'
+                ),
+            )
 
     settings = _hostsettings(ui, serverhostname)
 
@@ -377,43 +413,48 @@
     # bundle with a specific CA cert removed. If the system/default CA bundle
     # is loaded and contains that removed CA, you've just undone the user's
     # choice.
-    sslcontext = SSLContext(settings['protocol'])
+    sslcontext = SSLContext(settings[b'protocol'])
 
     # This is a no-op unless using modern ssl.
-    sslcontext.options |= settings['ctxoptions']
+    sslcontext.options |= settings[b'ctxoptions']
 
     # This still works on our fake SSLContext.
-    sslcontext.verify_mode = settings['verifymode']
+    sslcontext.verify_mode = settings[b'verifymode']
 
-    if settings['ciphers']:
+    if settings[b'ciphers']:
         try:
-            sslcontext.set_ciphers(pycompat.sysstr(settings['ciphers']))
+            sslcontext.set_ciphers(pycompat.sysstr(settings[b'ciphers']))
         except ssl.SSLError as e:
             raise error.Abort(
-                _('could not set ciphers: %s')
+                _(b'could not set ciphers: %s')
                 % stringutil.forcebytestr(e.args[0]),
-                hint=_('change cipher string (%s) in config') %
-                settings['ciphers'])
+                hint=_(b'change cipher string (%s) in config')
+                % settings[b'ciphers'],
+            )
 
     if certfile is not None:
+
         def password():
             f = keyfile or certfile
-            return ui.getpass(_('passphrase for %s: ') % f, '')
+            return ui.getpass(_(b'passphrase for %s: ') % f, b'')
+
         sslcontext.load_cert_chain(certfile, keyfile, password)
 
-    if settings['cafile'] is not None:
+    if settings[b'cafile'] is not None:
         try:
-            sslcontext.load_verify_locations(cafile=settings['cafile'])
+            sslcontext.load_verify_locations(cafile=settings[b'cafile'])
         except ssl.SSLError as e:
-            if len(e.args) == 1: # pypy has different SSLError args
+            if len(e.args) == 1:  # pypy has different SSLError args
                 msg = e.args[0]
             else:
                 msg = e.args[1]
-            raise error.Abort(_('error loading CA file %s: %s') % (
-                              settings['cafile'], stringutil.forcebytestr(msg)),
-                              hint=_('file is empty or malformed?'))
+            raise error.Abort(
+                _(b'error loading CA file %s: %s')
+                % (settings[b'cafile'], stringutil.forcebytestr(msg)),
+                hint=_(b'file is empty or malformed?'),
+            )
         caloaded = True
-    elif settings['allowloaddefaultcerts']:
+    elif settings[b'allowloaddefaultcerts']:
         # This is a no-op on old Python.
         sslcontext.load_default_certs()
         caloaded = True
@@ -433,18 +474,26 @@
         # When the main 20916 bug occurs, 'sslcontext.get_ca_certs()' is a
         # non-empty list, but the following conditional is otherwise True.
         try:
-            if (caloaded and settings['verifymode'] == ssl.CERT_REQUIRED and
-                modernssl and not sslcontext.get_ca_certs()):
-                ui.warn(_('(an attempt was made to load CA certificates but '
-                          'none were loaded; see '
-                          'https://mercurial-scm.org/wiki/SecureConnections '
-                          'for how to configure Mercurial to avoid this '
-                          'error)\n'))
+            if (
+                caloaded
+                and settings[b'verifymode'] == ssl.CERT_REQUIRED
+                and modernssl
+                and not sslcontext.get_ca_certs()
+            ):
+                ui.warn(
+                    _(
+                        b'(an attempt was made to load CA certificates but '
+                        b'none were loaded; see '
+                        b'https://mercurial-scm.org/wiki/SecureConnections '
+                        b'for how to configure Mercurial to avoid this '
+                        b'error)\n'
+                    )
+                )
         except ssl.SSLError:
             pass
 
         # Try to print more helpful error messages for known failures.
-        if util.safehasattr(e, 'reason'):
+        if util.safehasattr(e, b'reason'):
             # This error occurs when the client and server don't share a
             # common/supported SSL/TLS protocol. We've disabled SSLv2 and SSLv3
             # outright. Hopefully the reason for this error is that we require
@@ -452,77 +501,104 @@
             # reason, try to emit an actionable warning.
             if e.reason == r'UNSUPPORTED_PROTOCOL':
                 # We attempted TLS 1.0+.
-                if settings['protocolui'] == 'tls1.0':
+                if settings[b'protocolui'] == b'tls1.0':
                     # We support more than just TLS 1.0+. If this happens,
                     # the likely scenario is either the client or the server
                     # is really old. (e.g. server doesn't support TLS 1.0+ or
                     # client doesn't support modern TLS versions introduced
                     # several years from when this comment was written).
-                    if supportedprotocols != {'tls1.0'}:
-                        ui.warn(_(
-                            '(could not communicate with %s using security '
-                            'protocols %s; if you are using a modern Mercurial '
-                            'version, consider contacting the operator of this '
-                            'server; see '
-                            'https://mercurial-scm.org/wiki/SecureConnections '
-                            'for more info)\n') % (
+                    if supportedprotocols != {b'tls1.0'}:
+                        ui.warn(
+                            _(
+                                b'(could not communicate with %s using security '
+                                b'protocols %s; if you are using a modern Mercurial '
+                                b'version, consider contacting the operator of this '
+                                b'server; see '
+                                b'https://mercurial-scm.org/wiki/SecureConnections '
+                                b'for more info)\n'
+                            )
+                            % (
                                 pycompat.bytesurl(serverhostname),
-                                ', '.join(sorted(supportedprotocols))))
+                                b', '.join(sorted(supportedprotocols)),
+                            )
+                        )
                     else:
-                        ui.warn(_(
-                            '(could not communicate with %s using TLS 1.0; the '
-                            'likely cause of this is the server no longer '
-                            'supports TLS 1.0 because it has known security '
-                            'vulnerabilities; see '
-                            'https://mercurial-scm.org/wiki/SecureConnections '
-                            'for more info)\n') %
-                                pycompat.bytesurl(serverhostname))
+                        ui.warn(
+                            _(
+                                b'(could not communicate with %s using TLS 1.0; the '
+                                b'likely cause of this is the server no longer '
+                                b'supports TLS 1.0 because it has known security '
+                                b'vulnerabilities; see '
+                                b'https://mercurial-scm.org/wiki/SecureConnections '
+                                b'for more info)\n'
+                            )
+                            % pycompat.bytesurl(serverhostname)
+                        )
                 else:
                     # We attempted TLS 1.1+. We can only get here if the client
                     # supports the configured protocol. So the likely reason is
                     # the client wants better security than the server can
                     # offer.
-                    ui.warn(_(
-                        '(could not negotiate a common security protocol (%s+) '
-                        'with %s; the likely cause is Mercurial is configured '
-                        'to be more secure than the server can support)\n') % (
-                        settings['protocolui'],
-                        pycompat.bytesurl(serverhostname)))
-                    ui.warn(_('(consider contacting the operator of this '
-                              'server and ask them to support modern TLS '
-                              'protocol versions; or, set '
-                              'hostsecurity.%s:minimumprotocol=tls1.0 to allow '
-                              'use of legacy, less secure protocols when '
-                              'communicating with this server)\n') %
-                            pycompat.bytesurl(serverhostname))
-                    ui.warn(_(
-                        '(see https://mercurial-scm.org/wiki/SecureConnections '
-                        'for more info)\n'))
+                    ui.warn(
+                        _(
+                            b'(could not negotiate a common security protocol (%s+) '
+                            b'with %s; the likely cause is Mercurial is configured '
+                            b'to be more secure than the server can support)\n'
+                        )
+                        % (
+                            settings[b'protocolui'],
+                            pycompat.bytesurl(serverhostname),
+                        )
+                    )
+                    ui.warn(
+                        _(
+                            b'(consider contacting the operator of this '
+                            b'server and ask them to support modern TLS '
+                            b'protocol versions; or, set '
+                            b'hostsecurity.%s:minimumprotocol=tls1.0 to allow '
+                            b'use of legacy, less secure protocols when '
+                            b'communicating with this server)\n'
+                        )
+                        % pycompat.bytesurl(serverhostname)
+                    )
+                    ui.warn(
+                        _(
+                            b'(see https://mercurial-scm.org/wiki/SecureConnections '
+                            b'for more info)\n'
+                        )
+                    )
 
-            elif (e.reason == r'CERTIFICATE_VERIFY_FAILED' and
-                pycompat.iswindows):
+            elif (
+                e.reason == r'CERTIFICATE_VERIFY_FAILED' and pycompat.iswindows
+            ):
 
-                ui.warn(_('(the full certificate chain may not be available '
-                          'locally; see "hg help debugssl")\n'))
+                ui.warn(
+                    _(
+                        b'(the full certificate chain may not be available '
+                        b'locally; see "hg help debugssl")\n'
+                    )
+                )
         raise
 
     # check if wrap_socket failed silently because socket had been
     # closed
     # - see http://bugs.python.org/issue13721
     if not sslsocket.cipher():
-        raise error.Abort(_('ssl connection failed'))
+        raise error.Abort(_(b'ssl connection failed'))
 
     sslsocket._hgstate = {
-        'caloaded': caloaded,
-        'hostname': serverhostname,
-        'settings': settings,
-        'ui': ui,
+        b'caloaded': caloaded,
+        b'hostname': serverhostname,
+        b'settings': settings,
+        b'ui': ui,
     }
 
     return sslsocket
 
-def wrapserversocket(sock, ui, certfile=None, keyfile=None, cafile=None,
-                     requireclientcert=False):
+
+def wrapserversocket(
+    sock, ui, certfile=None, keyfile=None, cafile=None, requireclientcert=False
+):
     """Wrap a socket for use by servers.
 
     ``certfile`` and ``keyfile`` specify the files containing the certificate's
@@ -539,27 +615,29 @@
     # doesn't have to be as detailed as for wrapsocket().
     for f in (certfile, keyfile, cafile):
         if f and not os.path.exists(f):
-            raise error.Abort(_('referenced certificate file (%s) does not '
-                                'exist') % f)
+            raise error.Abort(
+                _(b'referenced certificate file (%s) does not exist') % f
+            )
 
-    protocol, options, _protocolui = protocolsettings('tls1.0')
+    protocol, options, _protocolui = protocolsettings(b'tls1.0')
 
     # This config option is intended for use in tests only. It is a giant
     # footgun to kill security. Don't define it.
-    exactprotocol = ui.config('devel', 'serverexactprotocol')
-    if exactprotocol == 'tls1.0':
+    exactprotocol = ui.config(b'devel', b'serverexactprotocol')
+    if exactprotocol == b'tls1.0':
         protocol = ssl.PROTOCOL_TLSv1
-    elif exactprotocol == 'tls1.1':
-        if 'tls1.1' not in supportedprotocols:
-            raise error.Abort(_('TLS 1.1 not supported by this Python'))
+    elif exactprotocol == b'tls1.1':
+        if b'tls1.1' not in supportedprotocols:
+            raise error.Abort(_(b'TLS 1.1 not supported by this Python'))
         protocol = ssl.PROTOCOL_TLSv1_1
-    elif exactprotocol == 'tls1.2':
-        if 'tls1.2' not in supportedprotocols:
-            raise error.Abort(_('TLS 1.2 not supported by this Python'))
+    elif exactprotocol == b'tls1.2':
+        if b'tls1.2' not in supportedprotocols:
+            raise error.Abort(_(b'TLS 1.2 not supported by this Python'))
         protocol = ssl.PROTOCOL_TLSv1_2
     elif exactprotocol:
-        raise error.Abort(_('invalid value for serverexactprotocol: %s') %
-                          exactprotocol)
+        raise error.Abort(
+            _(b'invalid value for serverexactprotocol: %s') % exactprotocol
+        )
 
     if modernssl:
         # We /could/ use create_default_context() here since it doesn't load
@@ -573,7 +651,7 @@
         sslcontext.options |= getattr(ssl, 'OP_SINGLE_ECDH_USE', 0)
 
         # Use the list of more secure ciphers if found in the ssl module.
-        if util.safehasattr(ssl, '_RESTRICTED_SERVER_CIPHERS'):
+        if util.safehasattr(ssl, b'_RESTRICTED_SERVER_CIPHERS'):
             sslcontext.options |= getattr(ssl, 'OP_CIPHER_SERVER_PREFERENCE', 0)
             sslcontext.set_ciphers(ssl._RESTRICTED_SERVER_CIPHERS)
     else:
@@ -592,9 +670,11 @@
 
     return sslcontext.wrap_socket(sock, server_side=True)
 
+
 class wildcarderror(Exception):
     """Represents an error parsing wildcards in DNS name."""
 
+
 def _dnsnamematch(dn, hostname, maxwildcards=1):
     """Match DNS names according RFC 6125 section 6.4.3.
 
@@ -609,13 +689,14 @@
     dn = pycompat.bytesurl(dn)
     hostname = pycompat.bytesurl(hostname)
 
-    pieces = dn.split('.')
+    pieces = dn.split(b'.')
     leftmost = pieces[0]
     remainder = pieces[1:]
-    wildcards = leftmost.count('*')
+    wildcards = leftmost.count(b'*')
     if wildcards > maxwildcards:
         raise wildcarderror(
-            _('too many wildcards in certificate DNS name: %s') % dn)
+            _(b'too many wildcards in certificate DNS name: %s') % dn
+        )
 
     # speed up common case w/o wildcards
     if not wildcards:
@@ -624,11 +705,11 @@
     # RFC 6125, section 6.4.3, subitem 1.
     # The client SHOULD NOT attempt to match a presented identifier in which
     # the wildcard character comprises a label other than the left-most label.
-    if leftmost == '*':
+    if leftmost == b'*':
         # When '*' is a fragment by itself, it matches a non-empty dotless
         # fragment.
-        pats.append('[^.]+')
-    elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
+        pats.append(b'[^.]+')
+    elif leftmost.startswith(b'xn--') or hostname.startswith(b'xn--'):
         # RFC 6125, section 6.4.3, subitem 3.
         # The client SHOULD NOT attempt to match a presented identifier
         # where the wildcard character is embedded within an A-label or
@@ -636,7 +717,7 @@
         pats.append(stringutil.reescape(leftmost))
     else:
         # Otherwise, '*' matches any dotless string, e.g. www*
-        pats.append(stringutil.reescape(leftmost).replace(br'\*', '[^.]*'))
+        pats.append(stringutil.reescape(leftmost).replace(br'\*', b'[^.]*'))
 
     # add the remaining fragments, ignore any wildcards
     for frag in remainder:
@@ -645,6 +726,7 @@
     pat = re.compile(br'\A' + br'\.'.join(pats) + br'\Z', re.IGNORECASE)
     return pat.match(hostname) is not None
 
+
 def _verifycert(cert, hostname):
     '''Verify that cert (in socket.getpeercert() format) matches hostname.
     CRLs is not handled.
@@ -652,7 +734,7 @@
     Returns error message if any problems are found and None on success.
     '''
     if not cert:
-        return _('no certificate received')
+        return _(b'no certificate received')
 
     dnsnames = []
     san = cert.get(r'subjectAltName', [])
@@ -677,7 +759,7 @@
                     try:
                         value = value.encode('ascii')
                     except UnicodeEncodeError:
-                        return _('IDN in certificate not supported')
+                        return _(b'IDN in certificate not supported')
 
                     try:
                         if _dnsnamematch(value, hostname):
@@ -689,11 +771,12 @@
 
     dnsnames = [pycompat.bytesurl(d) for d in dnsnames]
     if len(dnsnames) > 1:
-        return _('certificate is for %s') % ', '.join(dnsnames)
+        return _(b'certificate is for %s') % b', '.join(dnsnames)
     elif len(dnsnames) == 1:
-        return _('certificate is for %s') % dnsnames[0]
+        return _(b'certificate is for %s') % dnsnames[0]
     else:
-        return _('no commonName or subjectAltName found in certificate')
+        return _(b'no commonName or subjectAltName found in certificate')
+
 
 def _plainapplepython():
     """return true if this seems to be a pure Apple Python that
@@ -703,20 +786,26 @@
       for using system certificate store CAs in addition to the provided
       cacerts file
     """
-    if (not pycompat.isdarwin or procutil.mainfrozen() or
-        not pycompat.sysexecutable):
+    if (
+        not pycompat.isdarwin
+        or procutil.mainfrozen()
+        or not pycompat.sysexecutable
+    ):
         return False
     exe = os.path.realpath(pycompat.sysexecutable).lower()
-    return (exe.startswith('/usr/bin/python') or
-            exe.startswith('/system/library/frameworks/python.framework/'))
+    return exe.startswith(b'/usr/bin/python') or exe.startswith(
+        b'/system/library/frameworks/python.framework/'
+    )
+
 
 _systemcacertpaths = [
     # RHEL, CentOS, and Fedora
-    '/etc/pki/tls/certs/ca-bundle.trust.crt',
+    b'/etc/pki/tls/certs/ca-bundle.trust.crt',
     # Debian, Ubuntu, Gentoo
-    '/etc/ssl/certs/ca-certificates.crt',
+    b'/etc/ssl/certs/ca-certificates.crt',
 ]
 
+
 def _defaultcacerts(ui):
     """return path to default CA certificates or None.
 
@@ -731,9 +820,10 @@
     # and usable, assume the user intends it to be used and use it.
     try:
         import certifi
+
         certs = certifi.where()
         if os.path.exists(certs):
-            ui.debug('using ca certificates from certifi\n')
+            ui.debug(b'using ca certificates from certifi\n')
             return pycompat.fsencode(certs)
     except (ImportError, AttributeError):
         pass
@@ -745,9 +835,13 @@
     # Assertion: this code is only called if certificates are being verified.
     if pycompat.iswindows:
         if not _canloaddefaultcerts:
-            ui.warn(_('(unable to load Windows CA certificates; see '
-                      'https://mercurial-scm.org/wiki/SecureConnections for '
-                      'how to configure Mercurial to avoid this message)\n'))
+            ui.warn(
+                _(
+                    b'(unable to load Windows CA certificates; see '
+                    b'https://mercurial-scm.org/wiki/SecureConnections for '
+                    b'how to configure Mercurial to avoid this message)\n'
+                )
+            )
 
         return None
 
@@ -756,7 +850,8 @@
     # trick.
     if _plainapplepython():
         dummycert = os.path.join(
-            os.path.dirname(pycompat.fsencode(__file__)), 'dummycert.pem')
+            os.path.dirname(pycompat.fsencode(__file__)), b'dummycert.pem'
+        )
         if os.path.exists(dummycert):
             return dummycert
 
@@ -767,9 +862,13 @@
         # files. Also consider exporting the keychain certs to a file during
         # Mercurial install.
         if not _canloaddefaultcerts:
-            ui.warn(_('(unable to load CA certificates; see '
-                      'https://mercurial-scm.org/wiki/SecureConnections for '
-                      'how to configure Mercurial to avoid this message)\n'))
+            ui.warn(
+                _(
+                    b'(unable to load CA certificates; see '
+                    b'https://mercurial-scm.org/wiki/SecureConnections for '
+                    b'how to configure Mercurial to avoid this message)\n'
+                )
+            )
         return None
 
     # / is writable on Windows. Out of an abundance of caution make sure
@@ -787,106 +886,140 @@
     if not _canloaddefaultcerts:
         for path in _systemcacertpaths:
             if os.path.isfile(path):
-                ui.warn(_('(using CA certificates from %s; if you see this '
-                          'message, your Mercurial install is not properly '
-                          'configured; see '
-                          'https://mercurial-scm.org/wiki/SecureConnections '
-                          'for how to configure Mercurial to avoid this '
-                          'message)\n') % path)
+                ui.warn(
+                    _(
+                        b'(using CA certificates from %s; if you see this '
+                        b'message, your Mercurial install is not properly '
+                        b'configured; see '
+                        b'https://mercurial-scm.org/wiki/SecureConnections '
+                        b'for how to configure Mercurial to avoid this '
+                        b'message)\n'
+                    )
+                    % path
+                )
                 return path
 
-        ui.warn(_('(unable to load CA certificates; see '
-                  'https://mercurial-scm.org/wiki/SecureConnections for '
-                  'how to configure Mercurial to avoid this message)\n'))
+        ui.warn(
+            _(
+                b'(unable to load CA certificates; see '
+                b'https://mercurial-scm.org/wiki/SecureConnections for '
+                b'how to configure Mercurial to avoid this message)\n'
+            )
+        )
 
     return None
 
+
 def validatesocket(sock):
     """Validate a socket meets security requirements.
 
     The passed socket must have been created with ``wrapsocket()``.
     """
-    shost = sock._hgstate['hostname']
+    shost = sock._hgstate[b'hostname']
     host = pycompat.bytesurl(shost)
-    ui = sock._hgstate['ui']
-    settings = sock._hgstate['settings']
+    ui = sock._hgstate[b'ui']
+    settings = sock._hgstate[b'settings']
 
     try:
         peercert = sock.getpeercert(True)
         peercert2 = sock.getpeercert()
     except AttributeError:
-        raise error.Abort(_('%s ssl connection error') % host)
+        raise error.Abort(_(b'%s ssl connection error') % host)
 
     if not peercert:
-        raise error.Abort(_('%s certificate error: '
-                           'no certificate received') % host)
+        raise error.Abort(
+            _(b'%s certificate error: no certificate received') % host
+        )
 
-    if settings['disablecertverification']:
+    if settings[b'disablecertverification']:
         # We don't print the certificate fingerprint because it shouldn't
         # be necessary: if the user requested certificate verification be
         # disabled, they presumably already saw a message about the inability
         # to verify the certificate and this message would have printed the
         # fingerprint. So printing the fingerprint here adds little to no
         # value.
-        ui.warn(_('warning: connection security to %s is disabled per current '
-                  'settings; communication is susceptible to eavesdropping '
-                  'and tampering\n') % host)
+        ui.warn(
+            _(
+                b'warning: connection security to %s is disabled per current '
+                b'settings; communication is susceptible to eavesdropping '
+                b'and tampering\n'
+            )
+            % host
+        )
         return
 
     # If a certificate fingerprint is pinned, use it and only it to
     # validate the remote cert.
     peerfingerprints = {
-        'sha1': node.hex(hashlib.sha1(peercert).digest()),
-        'sha256': node.hex(hashlib.sha256(peercert).digest()),
-        'sha512': node.hex(hashlib.sha512(peercert).digest()),
+        b'sha1': node.hex(hashlib.sha1(peercert).digest()),
+        b'sha256': node.hex(hashlib.sha256(peercert).digest()),
+        b'sha512': node.hex(hashlib.sha512(peercert).digest()),
     }
 
     def fmtfingerprint(s):
-        return ':'.join([s[x:x + 2] for x in range(0, len(s), 2)])
+        return b':'.join([s[x : x + 2] for x in range(0, len(s), 2)])
 
-    nicefingerprint = 'sha256:%s' % fmtfingerprint(peerfingerprints['sha256'])
+    nicefingerprint = b'sha256:%s' % fmtfingerprint(peerfingerprints[b'sha256'])
 
-    if settings['certfingerprints']:
-        for hash, fingerprint in settings['certfingerprints']:
+    if settings[b'certfingerprints']:
+        for hash, fingerprint in settings[b'certfingerprints']:
             if peerfingerprints[hash].lower() == fingerprint:
-                ui.debug('%s certificate matched fingerprint %s:%s\n' %
-                         (host, hash, fmtfingerprint(fingerprint)))
-                if settings['legacyfingerprint']:
-                    ui.warn(_('(SHA-1 fingerprint for %s found in legacy '
-                              '[hostfingerprints] section; '
-                              'if you trust this fingerprint, remove the old '
-                              'SHA-1 fingerprint from [hostfingerprints] and '
-                              'add the following entry to the new '
-                              '[hostsecurity] section: %s:fingerprints=%s)\n') %
-                            (host, host, nicefingerprint))
+                ui.debug(
+                    b'%s certificate matched fingerprint %s:%s\n'
+                    % (host, hash, fmtfingerprint(fingerprint))
+                )
+                if settings[b'legacyfingerprint']:
+                    ui.warn(
+                        _(
+                            b'(SHA-1 fingerprint for %s found in legacy '
+                            b'[hostfingerprints] section; '
+                            b'if you trust this fingerprint, remove the old '
+                            b'SHA-1 fingerprint from [hostfingerprints] and '
+                            b'add the following entry to the new '
+                            b'[hostsecurity] section: %s:fingerprints=%s)\n'
+                        )
+                        % (host, host, nicefingerprint)
+                    )
                 return
 
         # Pinned fingerprint didn't match. This is a fatal error.
-        if settings['legacyfingerprint']:
-            section = 'hostfingerprint'
-            nice = fmtfingerprint(peerfingerprints['sha1'])
+        if settings[b'legacyfingerprint']:
+            section = b'hostfingerprint'
+            nice = fmtfingerprint(peerfingerprints[b'sha1'])
         else:
-            section = 'hostsecurity'
-            nice = '%s:%s' % (hash, fmtfingerprint(peerfingerprints[hash]))
-        raise error.Abort(_('certificate for %s has unexpected '
-                            'fingerprint %s') % (host, nice),
-                          hint=_('check %s configuration') % section)
+            section = b'hostsecurity'
+            nice = b'%s:%s' % (hash, fmtfingerprint(peerfingerprints[hash]))
+        raise error.Abort(
+            _(b'certificate for %s has unexpected fingerprint %s')
+            % (host, nice),
+            hint=_(b'check %s configuration') % section,
+        )
 
     # Security is enabled but no CAs are loaded. We can't establish trust
     # for the cert so abort.
-    if not sock._hgstate['caloaded']:
+    if not sock._hgstate[b'caloaded']:
         raise error.Abort(
-            _('unable to verify security of %s (no loaded CA certificates); '
-              'refusing to connect') % host,
-            hint=_('see https://mercurial-scm.org/wiki/SecureConnections for '
-                   'how to configure Mercurial to avoid this error or set '
-                   'hostsecurity.%s:fingerprints=%s to trust this server') %
-                   (host, nicefingerprint))
+            _(
+                b'unable to verify security of %s (no loaded CA certificates); '
+                b'refusing to connect'
+            )
+            % host,
+            hint=_(
+                b'see https://mercurial-scm.org/wiki/SecureConnections for '
+                b'how to configure Mercurial to avoid this error or set '
+                b'hostsecurity.%s:fingerprints=%s to trust this server'
+            )
+            % (host, nicefingerprint),
+        )
 
     msg = _verifycert(peercert2, shost)
     if msg:
-        raise error.Abort(_('%s certificate error: %s') % (host, msg),
-                         hint=_('set hostsecurity.%s:certfingerprints=%s '
-                                'config setting or use --insecure to connect '
-                                'insecurely') %
-                              (host, nicefingerprint))
+        raise error.Abort(
+            _(b'%s certificate error: %s') % (host, msg),
+            hint=_(
+                b'set hostsecurity.%s:certfingerprints=%s '
+                b'config setting or use --insecure to connect '
+                b'insecurely'
+            )
+            % (host, nicefingerprint),
+        )
--- a/mercurial/stack.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/stack.py	Mon Oct 21 11:09:48 2019 -0400
@@ -7,10 +7,6 @@
 
 from __future__ import absolute_import
 
-from . import (
-    revsetlang,
-    scmutil,
-)
 
 def getstack(repo, rev=None):
     """return a sorted smartrev of the stack containing either rev if it is
@@ -20,10 +16,9 @@
     the revision and are not merges.
     """
     if rev is None:
-        rev = '.'
+        rev = b'.'
 
-    revspec = 'reverse(only(%s) and not public() and not ::merge())'
-    revset = revsetlang.formatspec(revspec, rev)
-    revisions = scmutil.revrange(repo, [revset])
+    revspec = b'only(%s) and not public() and not ::merge()'
+    revisions = repo.revs(revspec, rev)
     revisions.sort()
     return revisions
--- a/mercurial/state.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/state.py	Mon Oct 21 11:09:48 2019 -0400
@@ -25,9 +25,8 @@
     error,
     util,
 )
-from .utils import (
-    cborutil,
-)
+from .utils import cborutil
+
 
 class cmdstate(object):
     """a wrapper class to store the state of commands like `rebase`, `graft`,
@@ -60,23 +59,25 @@
         we use third-party library cbor to serialize data to write in the file.
         """
         if not isinstance(version, int):
-            raise error.ProgrammingError("version of state file should be"
-                                         " an integer")
+            raise error.ProgrammingError(
+                b"version of state file should be an integer"
+            )
 
-        with self._repo.vfs(self.fname, 'wb', atomictemp=True) as fp:
-            fp.write('%d\n' % version)
+        with self._repo.vfs(self.fname, b'wb', atomictemp=True) as fp:
+            fp.write(b'%d\n' % version)
             for chunk in cborutil.streamencode(data):
                 fp.write(chunk)
 
     def _read(self):
         """reads the state file and returns a dictionary which contain
         data in the same format as it was before storing"""
-        with self._repo.vfs(self.fname, 'rb') as fp:
+        with self._repo.vfs(self.fname, b'rb') as fp:
             try:
                 int(fp.readline())
             except ValueError:
-                raise error.CorruptedState("unknown version of state file"
-                                           " found")
+                raise error.CorruptedState(
+                    b"unknown version of state file found"
+                )
 
             return cborutil.decodeall(fp.read())[0]
 
@@ -88,6 +89,7 @@
         """check whether the state file exists or not"""
         return self._repo.vfs.exists(self.fname)
 
+
 class _statecheck(object):
     """a utility class that deals with multistep operations like graft,
        histedit, bisect, update etc and check whether such commands
@@ -97,9 +99,21 @@
        multistep operation or multistep command extension.
     """
 
-    def __init__(self, opname, fname, clearable, allowcommit, reportonly,
-                 continueflag, stopflag, cmdmsg, cmdhint, statushint,
-                 abortfunc, continuefunc):
+    def __init__(
+        self,
+        opname,
+        fname,
+        clearable,
+        allowcommit,
+        reportonly,
+        continueflag,
+        stopflag,
+        cmdmsg,
+        cmdhint,
+        statushint,
+        abortfunc,
+        continuefunc,
+    ):
         self._opname = opname
         self._fname = fname
         self._clearable = clearable
@@ -118,12 +132,14 @@
         hg status --verbose
         """
         if not self._statushint:
-            hint = (_('To continue:    hg %s --continue\n'
-                      'To abort:       hg %s --abort') % (self._opname,
-                       self._opname))
+            hint = _(
+                b'To continue:    hg %s --continue\n'
+                b'To abort:       hg %s --abort'
+            ) % (self._opname, self._opname)
             if self._stopflag:
-                hint = hint + (_('\nTo stop:        hg %s --stop') %
-                            (self._opname))
+                hint = hint + (
+                    _(b'\nTo stop:        hg %s --stop') % (self._opname)
+                )
             return hint
         return self._statushint
 
@@ -132,36 +148,50 @@
         operation
         """
         if not self._cmdhint:
-                return (_("use 'hg %s --continue' or 'hg %s --abort'") %
-                        (self._opname, self._opname))
+            return _(b"use 'hg %s --continue' or 'hg %s --abort'") % (
+                self._opname,
+                self._opname,
+            )
         return self._cmdhint
 
     def msg(self):
         """returns the status message corresponding to the command"""
         if not self._cmdmsg:
-            return _('%s in progress') % (self._opname)
+            return _(b'%s in progress') % (self._opname)
         return self._cmdmsg
 
     def continuemsg(self):
         """ returns appropriate continue message corresponding to command"""
-        return _('hg %s --continue') % (self._opname)
+        return _(b'hg %s --continue') % (self._opname)
 
     def isunfinished(self, repo):
         """determines whether a multi-step operation is in progress
         or not
         """
-        if self._opname == 'merge':
+        if self._opname == b'merge':
             return len(repo[None].parents()) > 1
         else:
             return repo.vfs.exists(self._fname)
 
+
 # A list of statecheck objects for multistep operations like graft.
 _unfinishedstates = []
 
-def addunfinished(opname, fname, clearable=False, allowcommit=False,
-                  reportonly=False, continueflag=False, stopflag=False,
-                  cmdmsg="", cmdhint="", statushint="", abortfunc=None,
-                  continuefunc=None):
+
+def addunfinished(
+    opname,
+    fname,
+    clearable=False,
+    allowcommit=False,
+    reportonly=False,
+    continueflag=False,
+    stopflag=False,
+    cmdmsg=b"",
+    cmdhint=b"",
+    statushint=b"",
+    abortfunc=None,
+    continuefunc=None,
+):
     """this registers a new command or operation to unfinishedstates
     opname is the name the command or operation
     fname is the file name in which data should be stored in .hg directory.
@@ -189,30 +219,50 @@
     continuefunc stores the function required to finish an interrupted
     operation.
     """
-    statecheckobj = _statecheck(opname, fname, clearable, allowcommit,
-                                reportonly, continueflag, stopflag, cmdmsg,
-                                cmdhint, statushint, abortfunc, continuefunc)
-    if opname == 'merge':
+    statecheckobj = _statecheck(
+        opname,
+        fname,
+        clearable,
+        allowcommit,
+        reportonly,
+        continueflag,
+        stopflag,
+        cmdmsg,
+        cmdhint,
+        statushint,
+        abortfunc,
+        continuefunc,
+    )
+    if opname == b'merge':
         _unfinishedstates.append(statecheckobj)
     else:
         _unfinishedstates.insert(0, statecheckobj)
 
+
 addunfinished(
-    'update', fname='updatestate', clearable=True,
-    cmdmsg=_('last update was interrupted'),
-    cmdhint=_("use 'hg update' to get a consistent checkout"),
-    statushint=_("To continue:    hg update")
+    b'update',
+    fname=b'updatestate',
+    clearable=True,
+    cmdmsg=_(b'last update was interrupted'),
+    cmdhint=_(b"use 'hg update' to get a consistent checkout"),
+    statushint=_(b"To continue:    hg update ."),
 )
 addunfinished(
-    'bisect', fname='bisect.state', allowcommit=True, reportonly=True,
-    statushint=_('To mark the changeset good:    hg bisect --good\n'
-                 'To mark the changeset bad:     hg bisect --bad\n'
-                 'To abort:                      hg bisect --reset\n')
+    b'bisect',
+    fname=b'bisect.state',
+    allowcommit=True,
+    reportonly=True,
+    statushint=_(
+        b'To mark the changeset good:    hg bisect --good\n'
+        b'To mark the changeset bad:     hg bisect --bad\n'
+        b'To abort:                      hg bisect --reset\n'
+    ),
 )
 
+
 def getrepostate(repo):
     # experimental config: commands.status.skipstates
-    skip = set(repo.ui.configlist('commands', 'status.skipstates'))
+    skip = set(repo.ui.configlist(b'commands', b'status.skipstates'))
     for state in _unfinishedstates:
         if state._opname in skip:
             continue
--- a/mercurial/statichttprepo.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/statichttprepo.py	Mon Oct 21 11:09:48 2019 -0400
@@ -29,6 +29,7 @@
 urlerr = util.urlerr
 urlreq = util.urlreq
 
+
 class httprangereader(object):
     def __init__(self, url, opener):
         # we assume opener has HTTPRangeHandler
@@ -45,9 +46,10 @@
 
     def seek(self, pos):
         self.pos = pos
+
     def read(self, bytes=None):
         req = urlreq.request(pycompat.strurl(self.url))
-        end = ''
+        end = b''
         if bytes:
             end = self.pos + bytes - 1
         if self.pos or end:
@@ -67,26 +69,31 @@
             # HTTPRangeHandler does nothing if remote does not support
             # Range headers and returns the full entity. Let's slice it.
             if bytes:
-                data = data[self.pos:self.pos + bytes]
+                data = data[self.pos : self.pos + bytes]
             else:
-                data = data[self.pos:]
+                data = data[self.pos :]
         elif bytes:
             data = data[:bytes]
         self.pos += len(data)
         return data
+
     def readlines(self):
         return self.read().splitlines(True)
+
     def __iter__(self):
         return iter(self.readlines())
+
     def close(self):
         pass
 
+
 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
 # which was itself extracted from urlgrabber. See the last version of
 # byterange.py from history if you need more information.
 class _RangeError(IOError):
     """Error raised when an unsatisfiable range is requested."""
 
+
 class _HTTPRangeHandler(urlreq.basehandler):
     """Handler that enables HTTP Range headers.
 
@@ -105,7 +112,8 @@
 
     def http_error_416(self, req, fp, code, msg, hdrs):
         # HTTP's Range Not Satisfiable error
-        raise _RangeError('Requested Range Not Satisfiable')
+        raise _RangeError(b'Requested Range Not Satisfiable')
+
 
 def build_opener(ui, authinfo):
     # urllib cannot handle URLs with embedded user or passwd
@@ -115,11 +123,12 @@
     class statichttpvfs(vfsmod.abstractvfs):
         def __init__(self, base):
             self.base = base
+            self.options = {}
 
-        def __call__(self, path, mode='r', *args, **kw):
-            if mode not in ('r', 'rb'):
-                raise IOError('Permission denied')
-            f = "/".join((self.base, urlreq.quote(path)))
+        def __call__(self, path, mode=b'r', *args, **kw):
+            if mode not in (b'r', b'rb'):
+                raise IOError(b'Permission denied')
+            f = b"/".join((self.base, urlreq.quote(path)))
             return httprangereader(f, urlopener)
 
         def join(self, path):
@@ -130,14 +139,18 @@
 
     return statichttpvfs
 
+
 class statichttppeer(localrepo.localpeer):
     def local(self):
         return None
+
     def canpush(self):
         return False
 
-class statichttprepository(localrepo.localrepository,
-                           localrepo.revlogfilestorage):
+
+class statichttprepository(
+    localrepo.localrepository, localrepo.revlogfilestorage
+):
     supported = localrepo.localrepository._basesupported
 
     def __init__(self, ui, path):
@@ -145,12 +158,12 @@
         self.ui = ui
 
         self.root = path
-        u = util.url(path.rstrip('/') + "/.hg")
+        u = util.url(path.rstrip(b'/') + b"/.hg")
         self.path, authinfo = u.authinfo()
 
         vfsclass = build_opener(ui, authinfo)
         self.vfs = vfsclass(self.path)
-        self.cachevfs = vfsclass(self.vfs.join('cache'))
+        self.cachevfs = vfsclass(self.vfs.join(b'cache'))
         self._phasedefaults = []
 
         self.names = namespaces.namespaces()
@@ -166,19 +179,20 @@
 
             # check if it is a non-empty old-style repository
             try:
-                fp = self.vfs("00changelog.i")
+                fp = self.vfs(b"00changelog.i")
                 fp.read(1)
                 fp.close()
             except IOError as inst:
                 if inst.errno != errno.ENOENT:
                     raise
                 # we do not care about empty old-style repositories here
-                msg = _("'%s' does not appear to be an hg repository") % path
+                msg = _(b"'%s' does not appear to be an hg repository") % path
                 raise error.RepoError(msg)
 
         supportedrequirements = localrepo.gathersupportedrequirements(ui)
-        localrepo.ensurerequirementsrecognized(requirements,
-                                               supportedrequirements)
+        localrepo.ensurerequirementsrecognized(
+            requirements, supportedrequirements
+        )
         localrepo.ensurerequirementscompatible(ui, requirements)
 
         # setup store
@@ -190,8 +204,9 @@
         self.requirements = requirements
 
         rootmanifest = manifest.manifestrevlog(self.svfs)
-        self.manifestlog = manifest.manifestlog(self.svfs, self, rootmanifest,
-                                                self.narrowmatch())
+        self.manifestlog = manifest.manifestlog(
+            self.svfs, self, rootmanifest, self.narrowmatch()
+        )
         self.changelog = changelog.changelog(self.svfs)
         self._tags = None
         self.nodetagscache = None
@@ -203,7 +218,7 @@
 
     def _restrictcapabilities(self, caps):
         caps = super(statichttprepository, self)._restrictcapabilities(caps)
-        return caps.difference(["pushkey"])
+        return caps.difference([b"pushkey"])
 
     def url(self):
         return self._url
@@ -215,16 +230,21 @@
         return statichttppeer(self)
 
     def wlock(self, wait=True):
-        raise error.LockUnavailable(0, _('lock not available'), 'lock',
-                                    _('cannot lock static-http repository'))
+        raise error.LockUnavailable(
+            0,
+            _(b'lock not available'),
+            b'lock',
+            _(b'cannot lock static-http repository'),
+        )
 
     def lock(self, wait=True):
-        raise error.Abort(_('cannot lock static-http repository'))
+        raise error.Abort(_(b'cannot lock static-http repository'))
 
     def _writecaches(self):
-        pass # statichttprepository are read only
+        pass  # statichttprepository are read only
+
 
 def instance(ui, path, create, intents=None, createopts=None):
     if create:
-        raise error.Abort(_('cannot create new static-http repository'))
+        raise error.Abort(_(b'cannot create new static-http repository'))
     return statichttprepository(ui, path[7:])
--- a/mercurial/statprof.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/statprof.py	Mon Oct 21 11:09:48 2019 -0400
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
 ## statprof.py
 ## Copyright (C) 2012 Bryan O'Sullivan <bos@serpentine.com>
 ## Copyright (C) 2011 Alex Fraser <alex at phatcore dot com>
@@ -115,6 +114,7 @@
 import threading
 import time
 
+from .pycompat import open
 from . import (
     encoding,
     pycompat,
@@ -123,7 +123,7 @@
 defaultdict = collections.defaultdict
 contextmanager = contextlib.contextmanager
 
-__all__ = ['start', 'stop', 'reset', 'display', 'profile']
+__all__ = [b'start', b'stop', b'reset', b'display', b'profile']
 
 skips = {
     r"util.py:check",
@@ -145,6 +145,7 @@
 ###########################################################################
 ## Utils
 
+
 def clock():
     times = os.times()
     return (times[0] + times[1], times[4])
@@ -153,10 +154,11 @@
 ###########################################################################
 ## Collection data structures
 
+
 class ProfileState(object):
     def __init__(self, frequency=None):
         self.reset(frequency)
-        self.track = 'cpu'
+        self.track = b'cpu'
 
     def reset(self, frequency=None):
         # total so far
@@ -166,7 +168,7 @@
         # a float
         if frequency:
             self.sample_interval = 1.0 / frequency
-        elif not hasattr(self, 'sample_interval'):
+        elif not pycompat.hasattr(self, 'sample_interval'):
             # default to 1000 Hz
             self.sample_interval = 1.0 / 1000.0
         else:
@@ -193,10 +195,11 @@
 
     @property
     def timeidx(self):
-        if self.track == 'real':
+        if self.track == b'real':
             return 1
         return 0
 
+
 state = ProfileState()
 
 
@@ -215,8 +218,7 @@
 
     def __eq__(self, other):
         try:
-            return (self.lineno == other.lineno and
-                    self.path == other.path)
+            return self.lineno == other.lineno and self.path == other.path
         except:
             return False
 
@@ -236,24 +238,20 @@
     def getsource(self, length):
         if self.source is None:
             lineno = self.lineno - 1
-            fp = None
             try:
-                fp = open(self.path, 'rb')
-                for i, line in enumerate(fp):
-                    if i == lineno:
-                        self.source = line.strip()
-                        break
+                with open(self.path, b'rb') as fp:
+                    for i, line in enumerate(fp):
+                        if i == lineno:
+                            self.source = line.strip()
+                            break
             except:
                 pass
-            finally:
-                if fp:
-                    fp.close()
             if self.source is None:
-                self.source = ''
+                self.source = b''
 
         source = self.source
         if len(source) > length:
-            source = source[:(length - 3)] + "..."
+            source = source[: (length - 3)] + b"..."
         return source
 
     def filename(self):
@@ -262,6 +260,7 @@
     def skipname(self):
         return r'%s:%s' % (self.filename(), self.function)
 
+
 class Sample(object):
     __slots__ = (r'stack', r'time')
 
@@ -274,17 +273,22 @@
         stack = []
 
         while frame:
-            stack.append(CodeSite.get(
-                pycompat.sysbytes(frame.f_code.co_filename),
-                frame.f_lineno,
-                pycompat.sysbytes(frame.f_code.co_name)))
+            stack.append(
+                CodeSite.get(
+                    pycompat.sysbytes(frame.f_code.co_filename),
+                    frame.f_lineno,
+                    pycompat.sysbytes(frame.f_code.co_name),
+                )
+            )
             frame = frame.f_back
 
         return Sample(stack, time)
 
+
 ###########################################################################
 ## SIGPROF handler
 
+
 def profile_signal_handler(signum, frame):
     if state.profile_level > 0:
         now = clock()
@@ -293,11 +297,13 @@
         timestamp = state.accumulated_time[state.timeidx]
         state.samples.append(Sample.from_frame(frame, timestamp))
 
-        signal.setitimer(signal.ITIMER_PROF,
-            state.sample_interval, 0.0)
+        signal.setitimer(signal.ITIMER_PROF, state.sample_interval, 0.0)
         state.last_start_time = now
 
+
 stopthread = threading.Event()
+
+
 def samplerthread(tid):
     while not stopthread.is_set():
         now = clock()
@@ -313,16 +319,21 @@
 
     stopthread.clear()
 
+
 ###########################################################################
 ## Profiling API
 
+
 def is_active():
     return state.profile_level > 0
 
+
 lastmechanism = None
-def start(mechanism='thread', track='cpu'):
+
+
+def start(mechanism=b'thread', track=b'cpu'):
     '''Install the profiling signal handler, and start profiling.'''
-    state.track = track # note: nesting different mode won't work
+    state.track = track  # note: nesting different mode won't work
     state.profile_level += 1
     if state.profile_level == 1:
         state.last_start_time = clock()
@@ -332,73 +343,80 @@
         global lastmechanism
         lastmechanism = mechanism
 
-        if mechanism == 'signal':
+        if mechanism == b'signal':
             signal.signal(signal.SIGPROF, profile_signal_handler)
-            signal.setitimer(signal.ITIMER_PROF,
-                rpt or state.sample_interval, 0.0)
-        elif mechanism == 'thread':
+            signal.setitimer(
+                signal.ITIMER_PROF, rpt or state.sample_interval, 0.0
+            )
+        elif mechanism == b'thread':
             frame = inspect.currentframe()
             tid = [k for k, f in sys._current_frames().items() if f == frame][0]
-            state.thread = threading.Thread(target=samplerthread,
-                                 args=(tid,), name="samplerthread")
+            state.thread = threading.Thread(
+                target=samplerthread, args=(tid,), name=b"samplerthread"
+            )
             state.thread.start()
 
+
 def stop():
     '''Stop profiling, and uninstall the profiling signal handler.'''
     state.profile_level -= 1
     if state.profile_level == 0:
-        if lastmechanism == 'signal':
+        if lastmechanism == b'signal':
             rpt = signal.setitimer(signal.ITIMER_PROF, 0.0, 0.0)
             signal.signal(signal.SIGPROF, signal.SIG_IGN)
             state.remaining_prof_time = rpt[0]
-        elif lastmechanism == 'thread':
+        elif lastmechanism == b'thread':
             stopthread.set()
             state.thread.join()
 
         state.accumulate_time(clock())
         state.last_start_time = None
-        statprofpath = encoding.environ.get('STATPROF_DEST')
+        statprofpath = encoding.environ.get(b'STATPROF_DEST')
         if statprofpath:
             save_data(statprofpath)
 
     return state
 
+
 def save_data(path):
-    with open(path, 'w+') as file:
-        file.write("%f %f\n" % state.accumulated_time)
+    with open(path, b'w+') as file:
+        file.write(b"%f %f\n" % state.accumulated_time)
         for sample in state.samples:
             time = sample.time
             stack = sample.stack
-            sites = ['\1'.join([s.path, b'%d' % s.lineno, s.function])
-                     for s in stack]
-            file.write("%d\0%s\n" % (time, '\0'.join(sites)))
+            sites = [
+                b'\1'.join([s.path, b'%d' % s.lineno, s.function])
+                for s in stack
+            ]
+            file.write(b"%d\0%s\n" % (time, b'\0'.join(sites)))
+
 
 def load_data(path):
-    lines = open(path, 'rb').read().splitlines()
+    lines = open(path, b'rb').read().splitlines()
 
     state.accumulated_time = [float(value) for value in lines[0].split()]
     state.samples = []
     for line in lines[1:]:
-        parts = line.split('\0')
+        parts = line.split(b'\0')
         time = float(parts[0])
         rawsites = parts[1:]
         sites = []
         for rawsite in rawsites:
-            siteparts = rawsite.split('\1')
-            sites.append(CodeSite.get(siteparts[0], int(siteparts[1]),
-                        siteparts[2]))
+            siteparts = rawsite.split(b'\1')
+            sites.append(
+                CodeSite.get(siteparts[0], int(siteparts[1]), siteparts[2])
+            )
 
         state.samples.append(Sample(sites, time))
 
 
-
 def reset(frequency=None):
     '''Clear out the state of the profiler.  Do not call while the
     profiler is running.
 
     The optional frequency argument specifies the number of samples to
     collect per second.'''
-    assert state.profile_level == 0, "Can't reset() while statprof is running"
+    assert state.profile_level == 0, b"Can't reset() while statprof is running"
     CodeSite.cache.clear()
     state.reset(frequency)
 
@@ -416,6 +434,7 @@
 ###########################################################################
 ## Reporting API
 
+
 class SiteStats(object):
     def __init__(self, site):
         self.site = site
@@ -456,7 +475,8 @@
                 if i == 0:
                     sitestat.addself()
 
-        return [s for s in stats.itervalues()]
+        return [s for s in pycompat.itervalues(stats)]
+
 
 class DisplayFormats:
     ByLine = 0
@@ -467,6 +487,7 @@
     Json = 5
     Chrome = 6
 
+
 def display(fp=None, format=3, data=None, **kwargs):
     '''Print statistics, either to stdout or the given file object.'''
     if data is None:
@@ -474,6 +495,7 @@
 
     if fp is None:
         import sys
+
         fp = sys.stdout
     if len(data.samples) == 0:
         fp.write(b'No samples recorded.\n')
@@ -494,41 +516,55 @@
     elif format == DisplayFormats.Chrome:
         write_to_chrome(data, fp, **kwargs)
     else:
-        raise Exception("Invalid display format")
+        raise Exception(b"Invalid display format")
 
     if format not in (DisplayFormats.Json, DisplayFormats.Chrome):
         fp.write(b'---\n')
         fp.write(b'Sample count: %d\n' % len(data.samples))
         fp.write(b'Total time: %f seconds (%f wall)\n' % data.accumulated_time)
 
+
 def display_by_line(data, fp):
     '''Print the profiler data with each sample line represented
     as one row in a table.  Sorted by self-time per line.'''
     stats = SiteStats.buildstats(data.samples)
     stats.sort(reverse=True, key=lambda x: x.selfseconds())
 
-    fp.write(b'%5.5s %10.10s   %7.7s  %-8.8s\n' % (
-        b'%  ', b'cumulative', b'self', b''))
-    fp.write(b'%5.5s  %9.9s  %8.8s  %-8.8s\n' % (
-        b"time", b"seconds", b"seconds", b"name"))
+    fp.write(
+        b'%5.5s %10.10s   %7.7s  %-8.8s\n'
+        % (b'%  ', b'cumulative', b'self', b'')
+    )
+    fp.write(
+        b'%5.5s  %9.9s  %8.8s  %-8.8s\n'
+        % (b"time", b"seconds", b"seconds", b"name")
+    )
 
     for stat in stats:
         site = stat.site
-        sitelabel = '%s:%d:%s' % (site.filename(),
-                                  site.lineno,
-                                  site.function)
-        fp.write(b'%6.2f %9.2f %9.2f  %s\n' % (
-            stat.selfpercent(), stat.totalseconds(),
-            stat.selfseconds(), sitelabel))
+        sitelabel = b'%s:%d:%s' % (site.filename(), site.lineno, site.function)
+        fp.write(
+            b'%6.2f %9.2f %9.2f  %s\n'
+            % (
+                stat.selfpercent(),
+                stat.totalseconds(),
+                stat.selfseconds(),
+                sitelabel,
+            )
+        )
+
 
 def display_by_method(data, fp):
     '''Print the profiler data with each sample function represented
     as one row in a table.  Important lines within that function are
     output as nested rows.  Sorted by self-time per line.'''
-    fp.write(b'%5.5s %10.10s   %7.7s  %-8.8s\n' %
-          ('%  ', 'cumulative', 'self', ''))
-    fp.write(b'%5.5s  %9.9s  %8.8s  %-8.8s\n' %
-          ("time", "seconds", "seconds", "name"))
+    fp.write(
+        b'%5.5s %10.10s   %7.7s  %-8.8s\n'
+        % (b'%  ', b'cumulative', b'self', b'')
+    )
+    fp.write(
+        b'%5.5s  %9.9s  %8.8s  %-8.8s\n'
+        % (b"time", b"seconds", b"seconds", b"name")
+    )
 
     stats = SiteStats.buildstats(data.samples)
 
@@ -538,7 +574,7 @@
 
     # compute sums for each function
     functiondata = []
-    for fname, sitestats in grouped.iteritems():
+    for fname, sitestats in pycompat.iteritems(grouped):
         total_cum_sec = 0
         total_self_sec = 0
         total_percent = 0
@@ -547,11 +583,9 @@
             total_self_sec += stat.selfseconds()
             total_percent += stat.selfpercent()
 
-        functiondata.append((fname,
-                             total_cum_sec,
-                             total_self_sec,
-                             total_percent,
-                             sitestats))
+        functiondata.append(
+            (fname, total_cum_sec, total_self_sec, total_percent, sitestats)
+        )
 
     # sort by total self sec
     functiondata.sort(reverse=True, key=lambda x: x[2])
@@ -559,32 +593,43 @@
     for function in functiondata:
         if function[3] < 0.05:
             continue
-        fp.write(b'%6.2f %9.2f %9.2f  %s\n' % (
-            function[3], # total percent
-            function[1], # total cum sec
-            function[2], # total self sec
-            function[0])) # file:function
+        fp.write(
+            b'%6.2f %9.2f %9.2f  %s\n'
+            % (
+                function[3],  # total percent
+                function[1],  # total cum sec
+                function[2],  # total self sec
+                function[0],
+            )
+        )  # file:function
 
         function[4].sort(reverse=True, key=lambda i: i.selfseconds())
         for stat in function[4]:
             # only show line numbers for significant locations (>1% time spent)
             if stat.selfpercent() > 1:
                 source = stat.site.getsource(25)
-                if sys.version_info.major >= 3 and not isinstance(source, bytes):
+                if sys.version_info.major >= 3 and not isinstance(
+                    source, bytes
+                ):
                     source = pycompat.bytestr(source)
 
-                stattuple = (stat.selfpercent(), stat.selfseconds(),
-                             stat.site.lineno, source)
+                stattuple = (
+                    stat.selfpercent(),
+                    stat.selfseconds(),
+                    stat.site.lineno,
+                    source,
+                )
 
                 fp.write(b'%33.0f%% %6.2f   line %d: %s\n' % stattuple)
 
+
 def display_about_method(data, fp, function=None, **kwargs):
     if function is None:
-        raise Exception("Invalid function")
+        raise Exception(b"Invalid function")
 
     filename = None
-    if ':' in function:
-        filename, function = function.split(':')
+    if b':' in function:
+        filename, function = function.split(b':')
 
     relevant_samples = 0
     parents = {}
@@ -592,8 +637,9 @@
 
     for sample in data.samples:
         for i, site in enumerate(sample.stack):
-            if site.function == function and (not filename
-                or site.filename() == filename):
+            if site.function == function and (
+                not filename or site.filename() == filename
+            ):
                 relevant_samples += 1
                 if i != len(sample.stack) - 1:
                     parent = sample.stack[i + 1]
@@ -607,20 +653,27 @@
                 else:
                     children[site] = 1
 
-    parents = [(parent, count) for parent, count in parents.iteritems()]
+    parents = [(parent, count) for parent, count in pycompat.iteritems(parents)]
     parents.sort(reverse=True, key=lambda x: x[1])
     for parent, count in parents:
-        fp.write(b'%6.2f%%   %s:%s   line %s: %s\n' %
-            (count / relevant_samples * 100,
-             pycompat.fsencode(parent.filename()),
-             pycompat.sysbytes(parent.function),
-             parent.lineno,
-             pycompat.sysbytes(parent.getsource(50))))
+        fp.write(
+            b'%6.2f%%   %s:%s   line %s: %s\n'
+            % (
+                count / relevant_samples * 100,
+                pycompat.fsencode(parent.filename()),
+                pycompat.sysbytes(parent.function),
+                parent.lineno,
+                pycompat.sysbytes(parent.getsource(50)),
+            )
+        )
 
     stats = SiteStats.buildstats(data.samples)
-    stats = [s for s in stats
-               if s.site.function == function and
-               (not filename or s.site.filename() == filename)]
+    stats = [
+        s
+        for s in stats
+        if s.site.function == function
+        and (not filename or s.site.filename() == filename)
+    ]
 
     total_cum_sec = 0
     total_self_sec = 0
@@ -635,20 +688,27 @@
     fp.write(
         b'\n    %s:%s    Total: %0.2fs (%0.2f%%)    Self: %0.2fs (%0.2f%%)\n\n'
         % (
-        pycompat.sysbytes(filename or '___'),
-        pycompat.sysbytes(function),
-        total_cum_sec,
-        total_cum_percent,
-        total_self_sec,
-        total_self_percent
-        ))
+            pycompat.sysbytes(filename or b'___'),
+            pycompat.sysbytes(function),
+            total_cum_sec,
+            total_cum_percent,
+            total_self_sec,
+            total_self_percent,
+        )
+    )
 
-    children = [(child, count) for child, count in children.iteritems()]
+    children = [(child, count) for child, count in pycompat.iteritems(children)]
     children.sort(reverse=True, key=lambda x: x[1])
     for child, count in children:
-        fp.write(b'        %6.2f%%   line %s: %s\n' %
-              (count / relevant_samples * 100, child.lineno,
-               pycompat.sysbytes(child.getsource(50))))
+        fp.write(
+            b'        %6.2f%%   line %s: %s\n'
+            % (
+                count / relevant_samples * 100,
+                child.lineno,
+                pycompat.sysbytes(child.getsource(50)),
+            )
+        )
+
 
 def display_hotpath(data, fp, limit=0.05, **kwargs):
     class HotNode(object):
@@ -682,36 +742,50 @@
 
     def _write(node, depth, multiple_siblings):
         site = node.site
-        visiblechildren = [c for c in node.children.itervalues()
-                             if c.count >= (limit * root.count)]
+        visiblechildren = [
+            c
+            for c in pycompat.itervalues(node.children)
+            if c.count >= (limit * root.count)
+        ]
         if site:
             indent = depth * 2 - 1
-            filename = ''
-            function = ''
+            filename = b''
+            function = b''
             if len(node.children) > 0:
-                childsite = list(node.children.itervalues())[0].site
-                filename = (childsite.filename() + ':').ljust(15)
+                childsite = list(pycompat.itervalues(node.children))[0].site
+                filename = (childsite.filename() + b':').ljust(15)
                 function = childsite.function
 
             # lots of string formatting
-            listpattern = ''.ljust(indent) +\
-                          ('\\' if multiple_siblings else '|') +\
-                          ' %4.1f%%' +\
-                          (' %5.2fs' % node.count if showtime else '') +\
-                          '  %s %s'
-            liststring = listpattern % (node.count / root.count * 100,
-                                        filename, function)
-            codepattern = '%' + ('%d' % (55 - len(liststring))) + 's %d:  %s'
-            codestring = codepattern % ('line', site.lineno, site.getsource(30))
+            listpattern = (
+                b''.ljust(indent)
+                + (b'\\' if multiple_siblings else b'|')
+                + b' %4.1f%%'
+                + (b' %5.2fs' % node.count if showtime else b'')
+                + b'  %s %s'
+            )
+            liststring = listpattern % (
+                node.count / root.count * 100,
+                filename,
+                function,
+            )
+            codepattern = b'%' + (b'%d' % (55 - len(liststring))) + b's %d:  %s'
+            codestring = codepattern % (
+                b'line',
+                site.lineno,
+                site.getsource(30),
+            )
 
             finalstring = liststring + codestring
-            childrensamples = sum([c.count for c in node.children.itervalues()])
+            childrensamples = sum(
+                [c.count for c in pycompat.itervalues(node.children)]
+            )
             # Make frames that performed more than 10% of the operation red
             if node.count - childrensamples > (0.1 * root.count):
-                finalstring = '\033[91m' + finalstring + '\033[0m'
+                finalstring = b'\033[91m' + finalstring + b'\033[0m'
             # Make frames that didn't actually perform work dark grey
             elif node.count - childrensamples == 0:
-                finalstring = '\033[90m' + finalstring + '\033[0m'
+                finalstring = b'\033[90m' + finalstring + b'\033[0m'
             fp.write(finalstring + b'\n')
 
         newdepth = depth
@@ -725,40 +799,41 @@
     if root.count > 0:
         _write(root, 0, False)
 
+
 def write_to_flame(data, fp, scriptpath=None, outputfile=None, **kwargs):
     if scriptpath is None:
-        scriptpath = encoding.environ['HOME'] + '/flamegraph.pl'
+        scriptpath = encoding.environ[b'HOME'] + b'/flamegraph.pl'
     if not os.path.exists(scriptpath):
         fp.write(b'error: missing %s\n' % scriptpath)
         fp.write(b'get it here: https://github.com/brendangregg/FlameGraph\n')
         return
 
-    fd, path = pycompat.mkstemp()
-
-    file = open(path, "w+")
-
     lines = {}
     for sample in data.samples:
         sites = [s.function for s in sample.stack]
         sites.reverse()
-        line = ';'.join(sites)
+        line = b';'.join(sites)
         if line in lines:
             lines[line] = lines[line] + 1
         else:
             lines[line] = 1
 
-    for line, count in lines.iteritems():
-        file.write("%s %d\n" % (line, count))
+    fd, path = pycompat.mkstemp()
 
-    file.close()
+    with open(path, b"w+") as file:
+        for line, count in pycompat.iteritems(lines):
+            file.write(b"%s %d\n" % (line, count))
 
     if outputfile is None:
-        outputfile = '~/flamegraph.svg'
+        outputfile = b'~/flamegraph.svg'
 
-    os.system("perl ~/flamegraph.pl %s > %s" % (path, outputfile))
+    os.system(b"perl ~/flamegraph.pl %s > %s" % (path, outputfile))
     fp.write(b'Written to %s\n' % outputfile)
 
+
 _pathcache = {}
+
+
 def simplifypath(path):
     '''Attempt to make the path to a Python module easier to read by
     removing whatever part of the Python search path it was found
@@ -766,15 +841,16 @@
 
     if path in _pathcache:
         return _pathcache[path]
-    hgpath = pycompat.fsencode(encoding.__file__).rsplit(os.sep, 2)[0]
+    hgpath = encoding.__file__.rsplit(os.sep, 2)[0]
     for p in [hgpath] + sys.path:
         prefix = p + os.sep
         if path.startswith(prefix):
-            path = path[len(prefix):]
+            path = path[len(prefix) :]
             break
     _pathcache[path] = path
     return path
 
+
 def write_to_json(data, fp):
     samples = []
 
@@ -783,9 +859,12 @@
 
         for frame in sample.stack:
             stack.append(
-                (pycompat.sysstr(frame.path),
-                 frame.lineno,
-                 pycompat.sysstr(frame.function)))
+                (
+                    pycompat.sysstr(frame.path),
+                    frame.lineno,
+                    pycompat.sysstr(frame.function),
+                )
+            )
 
         samples.append((sample.time, stack))
 
@@ -795,6 +874,7 @@
 
     fp.write(data)
 
+
 def write_to_chrome(data, fp, minthreshold=0.005, maxthreshold=0.999):
     samples = []
     laststack = collections.deque()
@@ -804,7 +884,7 @@
     # representation to save space. It's fiddly but worth it.
     # We maintain a bijection between stack and ID.
     stack2id = {}
-    id2stack = [] # will eventually be rendered
+    id2stack = []  # will eventually be rendered
 
     def stackid(stack):
         if not stack:
@@ -814,7 +894,7 @@
         parent = stackid(stack[1:])
         myid = len(stack2id)
         stack2id[stack] = myid
-        id2stack.append(dict(category=stack[0][0], name='%s %s' % stack[0]))
+        id2stack.append(dict(category=stack[0][0], name=r'%s %s' % stack[0]))
         if parent is not None:
             id2stack[-1].update(parent=parent)
         return myid
@@ -849,8 +929,16 @@
         if minthreshold <= duration <= maxthreshold:
             # ensure no zero-duration events
             sampletime = max(oldtime + clamp, sample.time)
-            samples.append(dict(ph='E', name=oldfunc, cat=oldcat, sf=oldsid,
-                                ts=sampletime*1e6, pid=0))
+            samples.append(
+                dict(
+                    ph=r'E',
+                    name=oldfunc,
+                    cat=oldcat,
+                    sf=oldsid,
+                    ts=sampletime * 1e6,
+                    pid=0,
+                )
+            )
         else:
             blacklist.add(oldidx)
 
@@ -858,8 +946,16 @@
     # events given only stack snapshots.
 
     for sample in data.samples:
-        stack = tuple((('%s:%d' % (simplifypath(frame.path), frame.lineno),
-                        frame.function) for frame in sample.stack))
+        stack = tuple(
+            (
+                (
+                    r'%s:%d'
+                    % (simplifypath(pycompat.sysstr(frame.path)), frame.lineno),
+                    pycompat.sysstr(frame.function),
+                )
+                for frame in sample.stack
+            )
+        )
         qstack = collections.deque(stack)
         if laststack == qstack:
             continue
@@ -873,19 +969,35 @@
             laststack.appendleft(f)
             path, name = f
             sid = stackid(tuple(laststack))
-            samples.append(dict(ph='B', name=name, cat=path, ts=sample.time*1e6,
-                                sf=sid, pid=0))
+            samples.append(
+                dict(
+                    ph=r'B',
+                    name=name,
+                    cat=path,
+                    ts=sample.time * 1e6,
+                    sf=sid,
+                    pid=0,
+                )
+            )
         laststack = collections.deque(stack)
     while laststack:
         poplast()
-    events = [s[1] for s in enumerate(samples) if s[0] not in blacklist]
-    frames = collections.OrderedDict((str(k), v)
-                                     for (k,v) in enumerate(id2stack))
-    json.dump(dict(traceEvents=events, stackFrames=frames), fp, indent=1)
-    fp.write('\n')
+    events = [
+        sample for idx, sample in enumerate(samples) if idx not in blacklist
+    ]
+    frames = collections.OrderedDict(
+        (str(k), v) for (k, v) in enumerate(id2stack)
+    )
+    data = json.dumps(dict(traceEvents=events, stackFrames=frames), indent=1)
+    if not isinstance(data, bytes):
+        data = data.encode('utf-8')
+    fp.write(data)
+    fp.write(b'\n')
+
 
 def printusage():
-    print(r"""
+    print(
+        r"""
 The statprof command line allows you to inspect the last profile's results in
 the following forms:
 
@@ -902,7 +1014,9 @@
     flame [-s --script-path] [-o --output-file path]
         Writes out a flamegraph to output-file (defaults to ~/flamegraph.svg)
         Requires that ~/flamegraph.pl exist.
-        (Specify alternate script path with --script-path.)""")
+        (Specify alternate script path with --script-path.)"""
+    )
+
 
 def main(argv=None):
     if argv is None:
@@ -915,48 +1029,51 @@
     displayargs = {}
 
     optstart = 2
-    displayargs['function'] = None
+    displayargs[b'function'] = None
     if argv[1] == r'hotpath':
-        displayargs['format'] = DisplayFormats.Hotpath
+        displayargs[b'format'] = DisplayFormats.Hotpath
     elif argv[1] == r'lines':
-        displayargs['format'] = DisplayFormats.ByLine
+        displayargs[b'format'] = DisplayFormats.ByLine
     elif argv[1] == r'functions':
-        displayargs['format'] = DisplayFormats.ByMethod
+        displayargs[b'format'] = DisplayFormats.ByMethod
     elif argv[1] == r'function':
-        displayargs['format'] = DisplayFormats.AboutMethod
-        displayargs['function'] = argv[2]
+        displayargs[b'format'] = DisplayFormats.AboutMethod
+        displayargs[b'function'] = argv[2]
         optstart = 3
     elif argv[1] == r'flame':
-        displayargs['format'] = DisplayFormats.FlameGraph
+        displayargs[b'format'] = DisplayFormats.FlameGraph
     else:
         printusage()
         return 0
 
     # process options
     try:
-        opts, args = pycompat.getoptb(sys.argv[optstart:], "hl:f:o:p:",
-                                   ["help", "limit=", "file=", "output-file=", "script-path="])
+        opts, args = pycompat.getoptb(
+            sys.argv[optstart:],
+            b"hl:f:o:p:",
+            [b"help", b"limit=", b"file=", b"output-file=", b"script-path="],
+        )
     except getopt.error as msg:
         print(msg)
         printusage()
         return 2
 
-    displayargs['limit'] = 0.05
+    displayargs[b'limit'] = 0.05
     path = None
     for o, value in opts:
         if o in (r"-l", r"--limit"):
-            displayargs['limit'] = float(value)
+            displayargs[b'limit'] = float(value)
         elif o in (r"-f", r"--file"):
             path = value
         elif o in (r"-o", r"--output-file"):
-            displayargs['outputfile'] = value
+            displayargs[b'outputfile'] = value
         elif o in (r"-p", r"--script-path"):
-            displayargs['scriptpath'] = value
+            displayargs[b'scriptpath'] = value
         elif o in (r"-h", r"help"):
             printusage()
             return 0
         else:
-            assert False, "unhandled option %s" % o
+            assert False, b"unhandled option %s" % o
 
     if not path:
         print(r'must specify --file to load')
@@ -968,5 +1085,6 @@
 
     return 0
 
+
 if __name__ == r"__main__":
     sys.exit(main())
--- a/mercurial/store.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/store.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,8 +14,11 @@
 import stat
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
+    changelog,
     error,
+    manifest,
     node,
     policy,
     pycompat,
@@ -28,6 +31,7 @@
 # It is done to prevent loading large fncache files into memory
 fncache_chunksize = 10 ** 6
 
+
 def _matchtrackedpath(path, matcher):
     """parses a fncache entry and returns whether the entry is tracking a path
     matched by matcher or not.
@@ -37,12 +41,13 @@
     if matcher is None:
         return True
     path = decodedir(path)
-    if path.startswith('data/'):
-        return matcher(path[len('data/'):-len('.i')])
-    elif path.startswith('meta/'):
-        return matcher.visitdir(path[len('meta/'):-len('/00manifest.i')])
+    if path.startswith(b'data/'):
+        return matcher(path[len(b'data/') : -len(b'.i')])
+    elif path.startswith(b'meta/'):
+        return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')])
 
-    raise error.ProgrammingError("cannot decode path %s" % path)
+    raise error.ProgrammingError(b"cannot decode path %s" % path)
+
 
 # This avoids a collision between a file named foo and a dir named
 # foo.i or foo.d
@@ -57,13 +62,16 @@
     >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
     'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
     '''
-    return (path
-            .replace(".hg/", ".hg.hg/")
-            .replace(".i/", ".i.hg/")
-            .replace(".d/", ".d.hg/"))
+    return (
+        path.replace(b".hg/", b".hg.hg/")
+        .replace(b".i/", b".i.hg/")
+        .replace(b".d/", b".d.hg/")
+    )
+
 
 encodedir = getattr(parsers, 'encodedir', _encodedir)
 
+
 def decodedir(path):
     '''
     >>> decodedir(b'data/foo.i')
@@ -73,12 +81,14 @@
     >>> decodedir(b'data/foo.i.hg.hg/bla.i')
     'data/foo.i.hg/bla.i'
     '''
-    if ".hg/" not in path:
+    if b".hg/" not in path:
         return path
-    return (path
-            .replace(".d.hg/", ".d/")
-            .replace(".i.hg/", ".i/")
-            .replace(".hg.hg/", ".hg/"))
+    return (
+        path.replace(b".d.hg/", b".d/")
+        .replace(b".i.hg/", b".i/")
+        .replace(b".hg.hg/", b".hg/")
+    )
+
 
 def _reserved():
     ''' characters that are problematic for filesystems
@@ -97,6 +107,7 @@
     for x in winreserved:
         yield x
 
+
 def _buildencodefun():
     '''
     >>> enc, dec = _buildencodefun()
@@ -121,38 +132,45 @@
     >>> dec(b'the~07quick~adshot')
     'the\\x07quick\\xadshot'
     '''
-    e = '_'
+    e = b'_'
     xchr = pycompat.bytechr
     asciistr = list(map(xchr, range(127)))
-    capitals = list(range(ord("A"), ord("Z") + 1))
+    capitals = list(range(ord(b"A"), ord(b"Z") + 1))
 
     cmap = dict((x, x) for x in asciistr)
     for x in _reserved():
-        cmap[xchr(x)] = "~%02x" % x
+        cmap[xchr(x)] = b"~%02x" % x
     for x in capitals + [ord(e)]:
         cmap[xchr(x)] = e + xchr(x).lower()
 
     dmap = {}
-    for k, v in cmap.iteritems():
+    for k, v in pycompat.iteritems(cmap):
         dmap[v] = k
+
     def decode(s):
         i = 0
         while i < len(s):
             for l in pycompat.xrange(1, 4):
                 try:
-                    yield dmap[s[i:i + l]]
+                    yield dmap[s[i : i + l]]
                     i += l
                     break
                 except KeyError:
                     pass
             else:
                 raise KeyError
-    return (lambda s: ''.join([cmap[s[c:c + 1]]
-                               for c in pycompat.xrange(len(s))]),
-            lambda s: ''.join(list(decode(s))))
+
+    return (
+        lambda s: b''.join(
+            [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))]
+        ),
+        lambda s: b''.join(list(decode(s))),
+    )
+
 
 _encodefname, _decodefname = _buildencodefun()
 
+
 def encodefilename(s):
     '''
     >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
@@ -160,6 +178,7 @@
     '''
     return _encodefname(encodedir(s))
 
+
 def decodefilename(s):
     '''
     >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
@@ -167,6 +186,7 @@
     '''
     return decodedir(_decodefname(s))
 
+
 def _buildlowerencodefun():
     '''
     >>> f = _buildlowerencodefun()
@@ -182,18 +202,23 @@
     xchr = pycompat.bytechr
     cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)])
     for x in _reserved():
-        cmap[xchr(x)] = "~%02x" % x
-    for x in range(ord("A"), ord("Z") + 1):
+        cmap[xchr(x)] = b"~%02x" % x
+    for x in range(ord(b"A"), ord(b"Z") + 1):
         cmap[xchr(x)] = xchr(x).lower()
+
     def lowerencode(s):
-        return "".join([cmap[c] for c in pycompat.iterbytestr(s)])
+        return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
+
     return lowerencode
 
+
 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
 
 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
-_winres3 = ('aux', 'con', 'prn', 'nul') # length 3
-_winres4 = ('com', 'lpt')               # length 4 (with trailing 1..9)
+_winres3 = (b'aux', b'con', b'prn', b'nul')  # length 3
+_winres4 = (b'com', b'lpt')  # length 4 (with trailing 1..9)
+
+
 def _auxencode(path, dotencode):
     '''
     Encodes filenames containing names reserved by Windows or which end in
@@ -219,32 +244,37 @@
     for i, n in enumerate(path):
         if not n:
             continue
-        if dotencode and n[0] in '. ':
-            n = "~%02x" % ord(n[0:1]) + n[1:]
+        if dotencode and n[0] in b'. ':
+            n = b"~%02x" % ord(n[0:1]) + n[1:]
             path[i] = n
         else:
-            l = n.find('.')
+            l = n.find(b'.')
             if l == -1:
                 l = len(n)
-            if ((l == 3 and n[:3] in _winres3) or
-                (l == 4 and n[3:4] <= '9' and n[3:4] >= '1'
-                        and n[:3] in _winres4)):
+            if (l == 3 and n[:3] in _winres3) or (
+                l == 4
+                and n[3:4] <= b'9'
+                and n[3:4] >= b'1'
+                and n[:3] in _winres4
+            ):
                 # encode third letter ('aux' -> 'au~78')
-                ec = "~%02x" % ord(n[2:3])
+                ec = b"~%02x" % ord(n[2:3])
                 n = n[0:2] + ec + n[3:]
                 path[i] = n
-        if n[-1] in '. ':
+        if n[-1] in b'. ':
             # encode last period or space ('foo...' -> 'foo..~2e')
-            path[i] = n[:-1] + "~%02x" % ord(n[-1:])
+            path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
     return path
 
+
 _maxstorepathlen = 120
 _dirprefixlen = 8
 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
 
+
 def _hashencode(path, dotencode):
     digest = node.hex(hashlib.sha1(path).digest())
-    le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/'
+    le = lowerencode(path[5:]).split(b'/')  # skips prefix 'data/' or 'meta/'
     parts = _auxencode(le, dotencode)
     basename = parts[-1]
     _root, ext = os.path.splitext(basename)
@@ -252,9 +282,9 @@
     sdirslen = 0
     for p in parts[:-1]:
         d = p[:_dirprefixlen]
-        if d[-1] in '. ':
+        if d[-1] in b'. ':
             # Windows can't access dirs ending in period or space
-            d = d[:-1] + '_'
+            d = d[:-1] + b'_'
         if sdirslen == 0:
             t = len(d)
         else:
@@ -263,16 +293,17 @@
                 break
         sdirs.append(d)
         sdirslen = t
-    dirs = '/'.join(sdirs)
+    dirs = b'/'.join(sdirs)
     if len(dirs) > 0:
-        dirs += '/'
-    res = 'dh/' + dirs + digest + ext
+        dirs += b'/'
+    res = b'dh/' + dirs + digest + ext
     spaceleft = _maxstorepathlen - len(res)
     if spaceleft > 0:
         filler = basename[:spaceleft]
-        res = 'dh/' + dirs + filler + digest + ext
+        res = b'dh/' + dirs + filler + digest + ext
     return res
 
+
 def _hybridencode(path, dotencode):
     '''encodes path with a length limit
 
@@ -305,46 +336,56 @@
     encoding was used.
     '''
     path = encodedir(path)
-    ef = _encodefname(path).split('/')
-    res = '/'.join(_auxencode(ef, dotencode))
+    ef = _encodefname(path).split(b'/')
+    res = b'/'.join(_auxencode(ef, dotencode))
     if len(res) > _maxstorepathlen:
         res = _hashencode(path, dotencode)
     return res
 
+
 def _pathencode(path):
     de = encodedir(path)
     if len(path) > _maxstorepathlen:
         return _hashencode(de, True)
-    ef = _encodefname(de).split('/')
-    res = '/'.join(_auxencode(ef, True))
+    ef = _encodefname(de).split(b'/')
+    res = b'/'.join(_auxencode(ef, True))
     if len(res) > _maxstorepathlen:
         return _hashencode(de, True)
     return res
 
+
 _pathencode = getattr(parsers, 'pathencode', _pathencode)
 
+
 def _plainhybridencode(f):
     return _hybridencode(f, False)
 
+
 def _calcmode(vfs):
     try:
         # files in .hg/ will be created using this mode
         mode = vfs.stat().st_mode
-            # avoid some useless chmods
+        # avoid some useless chmods
         if (0o777 & ~util.umask) == (0o777 & mode):
             mode = None
     except OSError:
         mode = None
     return mode
 
-_data = ('bookmarks narrowspec data meta 00manifest.d 00manifest.i'
-         ' 00changelog.d 00changelog.i phaseroots obsstore')
+
+_data = (
+    b'bookmarks narrowspec data meta 00manifest.d 00manifest.i'
+    b' 00changelog.d 00changelog.i phaseroots obsstore'
+)
+
 
 def isrevlog(f, kind, st):
-    return kind == stat.S_IFREG and f[-2:] in ('.i', '.d')
+    return kind == stat.S_IFREG and f[-2:] in (b'.i', b'.d')
+
 
 class basicstore(object):
     '''base class for local repository stores'''
+
     def __init__(self, path, vfstype):
         vfs = vfstype(path)
         self.path = vfs.base
@@ -355,13 +396,13 @@
         self.opener = self.vfs
 
     def join(self, f):
-        return self.path + '/' + encodedir(f)
+        return self.path + b'/' + encodedir(f)
 
     def _walk(self, relpath, recurse, filefilter=isrevlog):
         '''yields (unencoded, encoded, size)'''
         path = self.path
         if relpath:
-            path += '/' + relpath
+            path += b'/' + relpath
         striplen = len(self.path) + 1
         l = []
         if self.rawvfs.isdir(path):
@@ -370,7 +411,7 @@
             while visit:
                 p = visit.pop()
                 for f, kind, st in readdir(p, stat=True):
-                    fp = p + '/' + f
+                    fp = p + b'/' + f
                     if filefilter(f, kind, st):
                         n = util.pconvert(fp[striplen:])
                         l.append((decodedir(n), n, st.st_size))
@@ -379,12 +420,19 @@
         l.sort()
         return l
 
+    def changelog(self, trypending):
+        return changelog.changelog(self.vfs, trypending=trypending)
+
+    def manifestlog(self, repo, storenarrowmatch):
+        rootstore = manifest.manifestrevlog(self.vfs)
+        return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
+
     def datafiles(self, matcher=None):
-        return self._walk('data', True) + self._walk('meta', True)
+        return self._walk(b'data', True) + self._walk(b'meta', True)
 
     def topfiles(self):
         # yield manifest before changelog
-        return reversed(self._walk('', False))
+        return reversed(self._walk(b'', False))
 
     def walk(self, matcher=None):
         '''yields (unencoded, encoded, size)
@@ -399,7 +447,7 @@
             yield x
 
     def copylist(self):
-        return ['requires'] + _data.split()
+        return [b'requires'] + _data.split()
 
     def write(self, tr):
         pass
@@ -412,18 +460,19 @@
 
     def __contains__(self, path):
         '''Checks if the store contains path'''
-        path = "/".join(("data", path))
+        path = b"/".join((b"data", path))
         # file?
-        if self.vfs.exists(path + ".i"):
+        if self.vfs.exists(path + b".i"):
             return True
         # dir?
-        if not path.endswith("/"):
-            path = path + "/"
+        if not path.endswith(b"/"):
+            path = path + b"/"
         return self.vfs.exists(path)
 
+
 class encodedstore(basicstore):
     def __init__(self, path, vfstype):
-        vfs = vfstype(path + '/store')
+        vfs = vfstype(path + b'/store')
         self.path = vfs.base
         self.createmode = _calcmode(vfs)
         vfs.createmode = self.createmode
@@ -442,11 +491,13 @@
             yield a, b, size
 
     def join(self, f):
-        return self.path + '/' + encodefilename(f)
+        return self.path + b'/' + encodefilename(f)
 
     def copylist(self):
-        return (['requires', '00changelog.i'] +
-                ['store/' + f for f in _data.split()])
+        return [b'requires', b'00changelog.i'] + [
+            b'store/' + f for f in _data.split()
+        ]
+
 
 class fncache(object):
     # the filename used to be partially encoded
@@ -470,7 +521,7 @@
         '''fill the entries from the fncache file'''
         self._dirty = False
         try:
-            fp = self.vfs('fncache', mode='rb')
+            fp = self.vfs(b'fncache', mode=b'rb')
         except IOError:
             # skip nonexistent file
             self.entries = set()
@@ -482,33 +533,37 @@
             chunk += c
             try:
                 p = chunk.rindex(b'\n')
-                self.entries.update(decodedir(chunk[:p + 1]).splitlines())
-                chunk = chunk[p + 1:]
+                self.entries.update(decodedir(chunk[: p + 1]).splitlines())
+                chunk = chunk[p + 1 :]
             except ValueError:
                 # substring '\n' not found, maybe the entry is bigger than the
                 # chunksize, so let's keep iterating
                 pass
 
         if chunk:
-            msg = _("fncache does not ends with a newline")
+            msg = _(b"fncache does not ends with a newline")
             if warn:
-                warn(msg + '\n')
+                warn(msg + b'\n')
             else:
-                raise error.Abort(msg,
-                                  hint=_("use 'hg debugrebuildfncache' to "
-                                         "rebuild the fncache"))
+                raise error.Abort(
+                    msg,
+                    hint=_(
+                        b"use 'hg debugrebuildfncache' to "
+                        b"rebuild the fncache"
+                    ),
+                )
         self._checkentries(fp, warn)
         fp.close()
 
     def _checkentries(self, fp, warn):
         """ make sure there is no empty string in entries """
-        if '' in self.entries:
+        if b'' in self.entries:
             fp.seek(0)
             for n, line in enumerate(util.iterfile(fp)):
-                if not line.rstrip('\n'):
-                    t = _('invalid entry in fncache, line %d') % (n + 1)
+                if not line.rstrip(b'\n'):
+                    t = _(b'invalid entry in fncache, line %d') % (n + 1)
                     if warn:
-                        warn(t + '\n')
+                        warn(t + b'\n')
                     else:
                         raise error.Abort(t)
 
@@ -517,18 +572,18 @@
             assert self.entries is not None
             self.entries = self.entries | self.addls
             self.addls = set()
-            tr.addbackup('fncache')
-            fp = self.vfs('fncache', mode='wb', atomictemp=True)
+            tr.addbackup(b'fncache')
+            fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
             if self.entries:
-                fp.write(encodedir('\n'.join(self.entries) + '\n'))
+                fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
             fp.close()
             self._dirty = False
         if self.addls:
             # if we have just new entries, let's append them to the fncache
-            tr.addbackup('fncache')
-            fp = self.vfs('fncache', mode='ab', atomictemp=True)
+            tr.addbackup(b'fncache')
+            fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
             if self.addls:
-                fp.write(encodedir('\n'.join(self.addls) + '\n'))
+                fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
             fp.close()
             self.entries = None
             self.addls = set()
@@ -563,20 +618,22 @@
             self._load()
         return iter(self.entries | self.addls)
 
+
 class _fncachevfs(vfsmod.proxyvfs):
     def __init__(self, vfs, fnc, encode):
         vfsmod.proxyvfs.__init__(self, vfs)
         self.fncache = fnc
         self.encode = encode
 
-    def __call__(self, path, mode='r', *args, **kw):
+    def __call__(self, path, mode=b'r', *args, **kw):
         encoded = self.encode(path)
-        if mode not in ('r', 'rb') and (path.startswith('data/') or
-                                        path.startswith('meta/')):
+        if mode not in (b'r', b'rb') and (
+            path.startswith(b'data/') or path.startswith(b'meta/')
+        ):
             # do not trigger a fncache load when adding a file that already is
             # known to exist.
             notload = self.fncache.entries is None and self.vfs.exists(encoded)
-            if notload and 'a' in mode and not self.vfs.stat(encoded).st_size:
+            if notload and b'a' in mode and not self.vfs.stat(encoded).st_size:
                 # when appending to an existing file, if the file has size zero,
                 # it should be considered as missing. Such zero-size files are
                 # the result of truncation when a transaction is aborted.
@@ -591,6 +648,7 @@
         else:
             return self.vfs.join(path)
 
+
 class fncachestore(basicstore):
     def __init__(self, path, vfstype, dotencode):
         if dotencode:
@@ -598,9 +656,9 @@
         else:
             encode = _plainhybridencode
         self.encode = encode
-        vfs = vfstype(path + '/store')
+        vfs = vfstype(path + b'/store')
         self.path = vfs.base
-        self.pathsep = self.path + '/'
+        self.pathsep = self.path + b'/'
         self.createmode = _calcmode(vfs)
         vfs.createmode = self.createmode
         self.rawvfs = vfs
@@ -627,10 +685,13 @@
                     raise
 
     def copylist(self):
-        d = ('bookmarks narrowspec data meta dh fncache phaseroots obsstore'
-             ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
-        return (['requires', '00changelog.i'] +
-                ['store/' + f for f in d.split()])
+        d = (
+            b'bookmarks narrowspec data meta dh fncache phaseroots obsstore'
+            b' 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
+        )
+        return [b'requires', b'00changelog.i'] + [
+            b'store/' + f for f in d.split()
+        ]
 
     def write(self, tr):
         self.fncache.write(tr)
@@ -655,14 +716,14 @@
 
     def __contains__(self, path):
         '''Checks if the store contains path'''
-        path = "/".join(("data", path))
+        path = b"/".join((b"data", path))
         # check for files (exact match)
-        e = path + '.i'
+        e = path + b'.i'
         if e in self.fncache and self._exists(e):
             return True
         # now check for directories (prefix match)
-        if not path.endswith('/'):
-            path += '/'
+        if not path.endswith(b'/'):
+            path += b'/'
         for e in self.fncache:
             if e.startswith(path) and self._exists(e):
                 return True
--- a/mercurial/streamclone.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/streamclone.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,17 +12,19 @@
 import struct
 
 from .i18n import _
+from .pycompat import open
+from .interfaces import repository
 from . import (
     cacheutil,
     error,
     narrowspec,
     phases,
     pycompat,
-    repository,
     store,
     util,
 )
 
+
 def canperformstreamclone(pullop, bundle2=False):
     """Whether it is possible to perform a streaming clone as part of pull.
 
@@ -39,11 +41,11 @@
 
     bundle2supported = False
     if pullop.canusebundle2:
-        if 'v2' in pullop.remotebundle2caps.get('stream', []):
+        if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
             bundle2supported = True
         # else
-            # Server doesn't support bundle2 stream clone or doesn't support
-            # the versions we support. Fall back and possibly allow legacy.
+        # Server doesn't support bundle2 stream clone or doesn't support
+        # the versions we support. Fall back and possibly allow legacy.
 
     # Ensures legacy code path uses available bundle2.
     if bundle2supported and not bundle2:
@@ -66,7 +68,7 @@
     # likely only comes into play in LANs.
     if streamrequested is None:
         # The server can advertise whether to prefer streaming clone.
-        streamrequested = remote.capable('stream-preferred')
+        streamrequested = remote.capable(b'stream-preferred')
 
     if not streamrequested:
         return False, None
@@ -79,32 +81,43 @@
     # if the only requirement is "revlogv1." Else, the "streamreqs" capability
     # is advertised and contains a comma-delimited list of requirements.
     requirements = set()
-    if remote.capable('stream'):
-        requirements.add('revlogv1')
+    if remote.capable(b'stream'):
+        requirements.add(b'revlogv1')
     else:
-        streamreqs = remote.capable('streamreqs')
+        streamreqs = remote.capable(b'streamreqs')
         # This is weird and shouldn't happen with modern servers.
         if not streamreqs:
-            pullop.repo.ui.warn(_(
-                'warning: stream clone requested but server has them '
-                'disabled\n'))
+            pullop.repo.ui.warn(
+                _(
+                    b'warning: stream clone requested but server has them '
+                    b'disabled\n'
+                )
+            )
             return False, None
 
-        streamreqs = set(streamreqs.split(','))
+        streamreqs = set(streamreqs.split(b','))
         # Server requires something we don't support. Bail.
         missingreqs = streamreqs - repo.supportedformats
         if missingreqs:
-            pullop.repo.ui.warn(_(
-                'warning: stream clone requested but client is missing '
-                'requirements: %s\n') % ', '.join(sorted(missingreqs)))
             pullop.repo.ui.warn(
-                _('(see https://www.mercurial-scm.org/wiki/MissingRequirement '
-                  'for more information)\n'))
+                _(
+                    b'warning: stream clone requested but client is missing '
+                    b'requirements: %s\n'
+                )
+                % b', '.join(sorted(missingreqs))
+            )
+            pullop.repo.ui.warn(
+                _(
+                    b'(see https://www.mercurial-scm.org/wiki/MissingRequirement '
+                    b'for more information)\n'
+                )
+            )
             return False, None
         requirements = streamreqs
 
     return True, requirements
 
+
 def maybeperformlegacystreamclone(pullop):
     """Possibly perform a legacy stream clone operation.
 
@@ -127,14 +140,14 @@
     # Save remote branchmap. We will use it later to speed up branchcache
     # creation.
     rbranchmap = None
-    if remote.capable('branchmap'):
+    if remote.capable(b'branchmap'):
         with remote.commandexecutor() as e:
-            rbranchmap = e.callcommand('branchmap', {}).result()
+            rbranchmap = e.callcommand(b'branchmap', {}).result()
 
-    repo.ui.status(_('streaming all changes\n'))
+    repo.ui.status(_(b'streaming all changes\n'))
 
     with remote.commandexecutor() as e:
-        fp = e.callcommand('stream_out', {}).result()
+        fp = e.callcommand(b'stream_out', {}).result()
 
     # TODO strictly speaking, this code should all be inside the context
     # manager because the context manager is supposed to ensure all wire state
@@ -145,20 +158,22 @@
         resp = int(l)
     except ValueError:
         raise error.ResponseError(
-            _('unexpected response from remote server:'), l)
+            _(b'unexpected response from remote server:'), l
+        )
     if resp == 1:
-        raise error.Abort(_('operation forbidden by server'))
+        raise error.Abort(_(b'operation forbidden by server'))
     elif resp == 2:
-        raise error.Abort(_('locking the remote repository failed'))
+        raise error.Abort(_(b'locking the remote repository failed'))
     elif resp != 0:
-        raise error.Abort(_('the server sent an unknown error code'))
+        raise error.Abort(_(b'the server sent an unknown error code'))
 
     l = fp.readline()
     try:
-        filecount, bytecount = map(int, l.split(' ', 1))
+        filecount, bytecount = map(int, l.split(b' ', 1))
     except (ValueError, TypeError):
         raise error.ResponseError(
-            _('unexpected response from remote server:'), l)
+            _(b'unexpected response from remote server:'), l
+        )
 
     with repo.lock():
         consumev1(repo, fp, filecount, bytecount)
@@ -167,9 +182,11 @@
         #                    new format-related remote requirements
         # requirements from the streamed-in repository
         repo.requirements = requirements | (
-                repo.requirements - repo.supportedformats)
+            repo.requirements - repo.supportedformats
+        )
         repo.svfs.options = localrepo.resolvestorevfsoptions(
-            repo.ui, repo.requirements, repo.features)
+            repo.ui, repo.requirements, repo.features
+        )
         repo._writerequirements()
 
         if rbranchmap:
@@ -177,26 +194,29 @@
 
         repo.invalidate()
 
+
 def allowservergeneration(repo):
     """Whether streaming clones are allowed from the server."""
     if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
         return False
 
-    if not repo.ui.configbool('server', 'uncompressed', untrusted=True):
+    if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True):
         return False
 
     # The way stream clone works makes it impossible to hide secret changesets.
     # So don't allow this by default.
     secret = phases.hassecret(repo)
     if secret:
-        return repo.ui.configbool('server', 'uncompressedallowsecret')
+        return repo.ui.configbool(b'server', b'uncompressedallowsecret')
 
     return True
 
+
 # This is it's own function so extensions can override it.
 def _walkstreamfiles(repo, matcher=None):
     return repo.store.walk(matcher)
 
+
 def generatev1(repo):
     """Emit content for version 1 of a streaming clone.
 
@@ -220,14 +240,15 @@
     total_bytes = 0
     # Get consistent snapshot of repo, lock during scan.
     with repo.lock():
-        repo.ui.debug('scanning\n')
+        repo.ui.debug(b'scanning\n')
         for name, ename, size in _walkstreamfiles(repo):
             if size:
                 entries.append((name, size))
                 total_bytes += size
 
-    repo.ui.debug('%d files, %d bytes to transfer\n' %
-                  (len(entries), total_bytes))
+    repo.ui.debug(
+        b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes)
+    )
 
     svfs = repo.svfs
     debugflag = repo.ui.debugflag
@@ -235,12 +256,12 @@
     def emitrevlogdata():
         for name, size in entries:
             if debugflag:
-                repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
+                repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size))
             # partially encode name over the wire for backwards compat
-            yield '%s\0%d\n' % (store.encodedir(name), size)
+            yield b'%s\0%d\n' % (store.encodedir(name), size)
             # auditing at this stage is both pointless (paths are already
             # trusted by the local repo) and expensive
-            with svfs(name, 'rb', auditpath=False) as fp:
+            with svfs(name, b'rb', auditpath=False) as fp:
                 if size <= 65536:
                     yield fp.read(size)
                 else:
@@ -249,6 +270,7 @@
 
     return len(entries), total_bytes, emitrevlogdata()
 
+
 def generatev1wireproto(repo):
     """Emit content for version 1 of streaming clone suitable for the wire.
 
@@ -261,22 +283,23 @@
     a permissions error for the server process).
     """
     if not allowservergeneration(repo):
-        yield '1\n'
+        yield b'1\n'
         return
 
     try:
         filecount, bytecount, it = generatev1(repo)
     except error.LockError:
-        yield '2\n'
+        yield b'2\n'
         return
 
     # Indicates successful response.
-    yield '0\n'
-    yield '%d %d\n' % (filecount, bytecount)
+    yield b'0\n'
+    yield b'%d %d\n' % (filecount, bytecount)
     for chunk in it:
         yield chunk
 
-def generatebundlev1(repo, compression='UN'):
+
+def generatebundlev1(repo, compression=b'UN'):
     """Emit content for version 1 of a stream clone bundle.
 
     The first 4 bytes of the output ("HGS1") denote this as stream clone
@@ -298,29 +321,31 @@
 
     Returns a tuple of (requirements, data generator).
     """
-    if compression != 'UN':
-        raise ValueError('we do not support the compression argument yet')
+    if compression != b'UN':
+        raise ValueError(b'we do not support the compression argument yet')
 
     requirements = repo.requirements & repo.supportedformats
-    requires = ','.join(sorted(requirements))
+    requires = b','.join(sorted(requirements))
 
     def gen():
-        yield 'HGS1'
+        yield b'HGS1'
         yield compression
 
         filecount, bytecount, it = generatev1(repo)
-        repo.ui.status(_('writing %d bytes for %d files\n') %
-                         (bytecount, filecount))
+        repo.ui.status(
+            _(b'writing %d bytes for %d files\n') % (bytecount, filecount)
+        )
 
-        yield struct.pack('>QQ', filecount, bytecount)
-        yield struct.pack('>H', len(requires) + 1)
-        yield requires + '\0'
+        yield struct.pack(b'>QQ', filecount, bytecount)
+        yield struct.pack(b'>H', len(requires) + 1)
+        yield requires + b'\0'
 
         # This is where we'll add compression in the future.
-        assert compression == 'UN'
+        assert compression == b'UN'
 
-        progress = repo.ui.makeprogress(_('bundle'), total=bytecount,
-                                        unit=_('bytes'))
+        progress = repo.ui.makeprogress(
+            _(b'bundle'), total=bytecount, unit=_(b'bytes')
+        )
         progress.update(0)
 
         for chunk in it:
@@ -331,6 +356,7 @@
 
     return requirements, gen()
 
+
 def consumev1(repo, fp, filecount, bytecount):
     """Apply the contents from version 1 of a streaming clone file handle.
 
@@ -341,10 +367,13 @@
     handled by this function.
     """
     with repo.lock():
-        repo.ui.status(_('%d files to transfer, %s of data\n') %
-                       (filecount, util.bytecount(bytecount)))
-        progress = repo.ui.makeprogress(_('clone'), total=bytecount,
-                                        unit=_('bytes'))
+        repo.ui.status(
+            _(b'%d files to transfer, %s of data\n')
+            % (filecount, util.bytecount(bytecount))
+        )
+        progress = repo.ui.makeprogress(
+            _(b'clone'), total=bytecount, unit=_(b'bytes')
+        )
         progress.update(0)
         start = util.timer()
 
@@ -362,23 +391,25 @@
         # nesting occurs also in ordinary case (e.g. enabling
         # clonebundles).
 
-        with repo.transaction('clone'):
+        with repo.transaction(b'clone'):
             with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
                 for i in pycompat.xrange(filecount):
                     # XXX doesn't support '\n' or '\r' in filenames
                     l = fp.readline()
                     try:
-                        name, size = l.split('\0', 1)
+                        name, size = l.split(b'\0', 1)
                         size = int(size)
                     except (ValueError, TypeError):
                         raise error.ResponseError(
-                            _('unexpected response from remote server:'), l)
+                            _(b'unexpected response from remote server:'), l
+                        )
                     if repo.ui.debugflag:
-                        repo.ui.debug('adding %s (%s)\n' %
-                                      (name, util.bytecount(size)))
+                        repo.ui.debug(
+                            b'adding %s (%s)\n' % (name, util.bytecount(size))
+                        )
                     # for backwards compat, name was partially encoded
                     path = store.decodedir(name)
-                    with repo.svfs(path, 'w', backgroundclose=True) as ofp:
+                    with repo.svfs(path, b'w', backgroundclose=True) as ofp:
                         for chunk in util.filechunkiter(fp, limit=size):
                             progress.increment(step=len(chunk))
                             ofp.write(chunk)
@@ -391,28 +422,44 @@
         if elapsed <= 0:
             elapsed = 0.001
         progress.complete()
-        repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
-                       (util.bytecount(bytecount), elapsed,
-                        util.bytecount(bytecount / elapsed)))
+        repo.ui.status(
+            _(b'transferred %s in %.1f seconds (%s/sec)\n')
+            % (
+                util.bytecount(bytecount),
+                elapsed,
+                util.bytecount(bytecount / elapsed),
+            )
+        )
+
 
 def readbundle1header(fp):
     compression = fp.read(2)
-    if compression != 'UN':
-        raise error.Abort(_('only uncompressed stream clone bundles are '
-            'supported; got %s') % compression)
+    if compression != b'UN':
+        raise error.Abort(
+            _(
+                b'only uncompressed stream clone bundles are '
+                b'supported; got %s'
+            )
+            % compression
+        )
 
-    filecount, bytecount = struct.unpack('>QQ', fp.read(16))
-    requireslen = struct.unpack('>H', fp.read(2))[0]
+    filecount, bytecount = struct.unpack(b'>QQ', fp.read(16))
+    requireslen = struct.unpack(b'>H', fp.read(2))[0]
     requires = fp.read(requireslen)
 
-    if not requires.endswith('\0'):
-        raise error.Abort(_('malformed stream clone bundle: '
-                            'requirements not properly encoded'))
+    if not requires.endswith(b'\0'):
+        raise error.Abort(
+            _(
+                b'malformed stream clone bundle: '
+                b'requirements not properly encoded'
+            )
+        )
 
-    requirements = set(requires.rstrip('\0').split(','))
+    requirements = set(requires.rstrip(b'\0').split(b','))
 
     return filecount, bytecount, requirements
 
+
 def applybundlev1(repo, fp):
     """Apply the content from a stream clone bundle version 1.
 
@@ -420,46 +467,52 @@
     is at the 2 byte compression identifier.
     """
     if len(repo):
-        raise error.Abort(_('cannot apply stream clone bundle on non-empty '
-                            'repo'))
+        raise error.Abort(
+            _(b'cannot apply stream clone bundle on non-empty repo')
+        )
 
     filecount, bytecount, requirements = readbundle1header(fp)
     missingreqs = requirements - repo.supportedformats
     if missingreqs:
-        raise error.Abort(_('unable to apply stream clone: '
-                            'unsupported format: %s') %
-                            ', '.join(sorted(missingreqs)))
+        raise error.Abort(
+            _(b'unable to apply stream clone: unsupported format: %s')
+            % b', '.join(sorted(missingreqs))
+        )
 
     consumev1(repo, fp, filecount, bytecount)
 
+
 class streamcloneapplier(object):
     """Class to manage applying streaming clone bundles.
 
     We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
     readers to perform bundle type-specific functionality.
     """
+
     def __init__(self, fh):
         self._fh = fh
 
     def apply(self, repo):
         return applybundlev1(repo, self._fh)
 
+
 # type of file to stream
-_fileappend = 0 # append only file
-_filefull = 1   # full snapshot file
+_fileappend = 0  # append only file
+_filefull = 1  # full snapshot file
 
 # Source of the file
-_srcstore = 's' # store (svfs)
-_srccache = 'c' # cache (cache)
+_srcstore = b's'  # store (svfs)
+_srccache = b'c'  # cache (cache)
 
 # This is it's own function so extensions can override it.
 def _walkstreamfullstorefiles(repo):
     """list snapshot file from the store"""
     fnames = []
     if not repo.publishing():
-        fnames.append('phaseroots')
+        fnames.append(b'phaseroots')
     return fnames
 
+
 def _filterfull(entry, copy, vfsmap):
     """actually copy the snapshot files"""
     src, name, ftype, data = entry
@@ -467,22 +520,26 @@
         return entry
     return (src, name, ftype, copy(vfsmap[src].join(name)))
 
+
 @contextlib.contextmanager
 def maketempcopies():
     """return a function to temporary copy file"""
     files = []
     try:
+
         def copy(src):
             fd, dst = pycompat.mkstemp()
             os.close(fd)
             files.append(dst)
             util.copyfiles(src, dst, hardlink=True)
             return dst
+
         yield copy
     finally:
         for tmp in files:
             util.tryunlink(tmp)
 
+
 def _makemap(repo):
     """make a (src -> vfs) map for the repo"""
     vfsmap = {
@@ -495,16 +552,18 @@
 
     return vfsmap
 
+
 def _emit2(repo, entries, totalfilesize):
     """actually emit the stream bundle"""
     vfsmap = _makemap(repo)
-    progress = repo.ui.makeprogress(_('bundle'), total=totalfilesize,
-                                    unit=_('bytes'))
+    progress = repo.ui.makeprogress(
+        _(b'bundle'), total=totalfilesize, unit=_(b'bytes')
+    )
     progress.update(0)
     with maketempcopies() as copy, progress:
         # copy is delayed until we are in the try
         entries = [_filterfull(e, copy, vfsmap) for e in entries]
-        yield None # this release the lock on the repository
+        yield None  # this release the lock on the repository
         seen = 0
 
         for src, name, ftype, data in entries:
@@ -515,7 +574,7 @@
                 fp = vfs(name)
                 size = data
             elif ftype == _filefull:
-                fp = open(data, 'rb')
+                fp = open(data, b'rb')
                 size = util.fstat(fp).st_size
             try:
                 yield util.uvarintencode(size)
@@ -531,6 +590,7 @@
             finally:
                 fp.close()
 
+
 def generatev2(repo, includes, excludes, includeobsmarkers):
     """Emit content for version 2 of a streaming clone.
 
@@ -553,7 +613,7 @@
         if includes or excludes:
             matcher = narrowspec.match(repo.root, includes, excludes)
 
-        repo.ui.debug('scanning\n')
+        repo.ui.debug(b'scanning\n')
         for name, ename, size in _walkstreamfiles(repo, matcher):
             if size:
                 entries.append((_srcstore, name, _fileappend, size))
@@ -562,9 +622,9 @@
             if repo.svfs.exists(name):
                 totalfilesize += repo.svfs.lstat(name).st_size
                 entries.append((_srcstore, name, _filefull, None))
-        if includeobsmarkers and repo.svfs.exists('obsstore'):
-            totalfilesize += repo.svfs.lstat('obsstore').st_size
-            entries.append((_srcstore, 'obsstore', _filefull, None))
+        if includeobsmarkers and repo.svfs.exists(b'obsstore'):
+            totalfilesize += repo.svfs.lstat(b'obsstore').st_size
+            entries.append((_srcstore, b'obsstore', _filefull, None))
         for name in cacheutil.cachetocopy(repo):
             if repo.cachevfs.exists(name):
                 totalfilesize += repo.cachevfs.lstat(name).st_size
@@ -576,6 +636,7 @@
 
     return len(entries), totalfilesize, chunks
 
+
 @contextlib.contextmanager
 def nested(*ctxs):
     this = ctxs[0]
@@ -587,6 +648,7 @@
         else:
             yield
 
+
 def consumev2(repo, fp, filecount, filesize):
     """Apply the contents from a version 2 streaming clone.
 
@@ -594,19 +656,21 @@
     method.
     """
     with repo.lock():
-        repo.ui.status(_('%d files to transfer, %s of data\n') %
-                       (filecount, util.bytecount(filesize)))
+        repo.ui.status(
+            _(b'%d files to transfer, %s of data\n')
+            % (filecount, util.bytecount(filesize))
+        )
 
         start = util.timer()
-        progress = repo.ui.makeprogress(_('clone'), total=filesize,
-                                        unit=_('bytes'))
+        progress = repo.ui.makeprogress(
+            _(b'clone'), total=filesize, unit=_(b'bytes')
+        )
         progress.update(0)
 
         vfsmap = _makemap(repo)
 
-        with repo.transaction('clone'):
-            ctxs = (vfs.backgroundclosing(repo.ui)
-                    for vfs in vfsmap.values())
+        with repo.transaction(b'clone'):
+            ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values())
             with nested(*ctxs):
                 for i in range(filecount):
                     src = util.readexactly(fp, 1)
@@ -617,10 +681,12 @@
                     name = util.readexactly(fp, namelen)
 
                     if repo.ui.debugflag:
-                        repo.ui.debug('adding [%s] %s (%s)\n' %
-                                      (src, name, util.bytecount(datalen)))
+                        repo.ui.debug(
+                            b'adding [%s] %s (%s)\n'
+                            % (src, name, util.bytecount(datalen))
+                        )
 
-                    with vfs(name, 'w') as ofp:
+                    with vfs(name, b'w') as ofp:
                         for chunk in util.filechunkiter(fp, limit=datalen):
                             progress.increment(step=len(chunk))
                             ofp.write(chunk)
@@ -632,19 +698,26 @@
         elapsed = util.timer() - start
         if elapsed <= 0:
             elapsed = 0.001
-        repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
-                       (util.bytecount(progress.pos), elapsed,
-                        util.bytecount(progress.pos / elapsed)))
+        repo.ui.status(
+            _(b'transferred %s in %.1f seconds (%s/sec)\n')
+            % (
+                util.bytecount(progress.pos),
+                elapsed,
+                util.bytecount(progress.pos / elapsed),
+            )
+        )
         progress.complete()
 
+
 def applybundlev2(repo, fp, filecount, filesize, requirements):
     from . import localrepo
 
     missingreqs = [r for r in requirements if r not in repo.supported]
     if missingreqs:
-        raise error.Abort(_('unable to apply stream clone: '
-                            'unsupported format: %s') %
-                          ', '.join(sorted(missingreqs)))
+        raise error.Abort(
+            _(b'unable to apply stream clone: unsupported format: %s')
+            % b', '.join(sorted(missingreqs))
+        )
 
     consumev2(repo, fp, filecount, filesize)
 
@@ -652,7 +725,9 @@
     #                    new format-related remote requirements
     # requirements from the streamed-in repository
     repo.requirements = set(requirements) | (
-            repo.requirements - repo.supportedformats)
+        repo.requirements - repo.supportedformats
+    )
     repo.svfs.options = localrepo.resolvestorevfsoptions(
-        repo.ui, repo.requirements, repo.features)
+        repo.ui, repo.requirements, repo.features
+    )
     repo._writerequirements()
--- a/mercurial/subrepo.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/subrepo.py	Mon Oct 21 11:09:48 2019 -0400
@@ -19,6 +19,7 @@
 import xml.dom.minidom
 
 from .i18n import _
+from .pycompat import open
 from . import (
     cmdutil,
     encoding,
@@ -47,6 +48,7 @@
 _abssource = subrepoutil._abssource
 propertycache = util.propertycache
 
+
 def _expandedabspath(path):
     '''
     get a path or url and if it is a path expand it and return an absolute path
@@ -57,17 +59,21 @@
         path = util.normpath(os.path.abspath(u.path))
     return path
 
+
 def _getstorehashcachename(remotepath):
     '''get a unique filename for the store hash cache of a remote repository'''
     return node.hex(hashlib.sha1(_expandedabspath(remotepath)).digest())[0:12]
 
+
 class SubrepoAbort(error.Abort):
     """Exception class used to avoid handling a subrepo error more than once"""
+
     def __init__(self, *args, **kw):
         self.subrepo = kw.pop(r'subrepo', None)
         self.cause = kw.pop(r'cause', None)
         error.Abort.__init__(self, *args, **kw)
 
+
 def annotatesubrepoerror(func):
     def decoratedmethod(self, *args, **kargs):
         try:
@@ -77,74 +83,97 @@
             raise ex
         except error.Abort as ex:
             subrepo = subrelpath(self)
-            errormsg = (stringutil.forcebytestr(ex) + ' '
-                        + _('(in subrepository "%s")') % subrepo)
+            errormsg = (
+                stringutil.forcebytestr(ex)
+                + b' '
+                + _(b'(in subrepository "%s")') % subrepo
+            )
             # avoid handling this exception by raising a SubrepoAbort exception
-            raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
-                               cause=sys.exc_info())
+            raise SubrepoAbort(
+                errormsg, hint=ex.hint, subrepo=subrepo, cause=sys.exc_info()
+            )
         return res
+
     return decoratedmethod
 
+
 def _updateprompt(ui, sub, dirty, local, remote):
     if dirty:
-        msg = (_(' subrepository sources for %s differ\n'
-                 'you can use (l)ocal source (%s) or (r)emote source (%s).\n'
-                 'what do you want to do?'
-                 '$$ &Local $$ &Remote')
-               % (subrelpath(sub), local, remote))
+        msg = _(
+            b' subrepository sources for %s differ\n'
+            b'you can use (l)ocal source (%s) or (r)emote source (%s).\n'
+            b'what do you want to do?'
+            b'$$ &Local $$ &Remote'
+        ) % (subrelpath(sub), local, remote)
     else:
-        msg = (_(' subrepository sources for %s differ (in checked out '
-                 'version)\n'
-                 'you can use (l)ocal source (%s) or (r)emote source (%s).\n'
-                 'what do you want to do?'
-                 '$$ &Local $$ &Remote')
-               % (subrelpath(sub), local, remote))
+        msg = _(
+            b' subrepository sources for %s differ (in checked out '
+            b'version)\n'
+            b'you can use (l)ocal source (%s) or (r)emote source (%s).\n'
+            b'what do you want to do?'
+            b'$$ &Local $$ &Remote'
+        ) % (subrelpath(sub), local, remote)
     return ui.promptchoice(msg, 0)
 
+
 def _sanitize(ui, vfs, ignore):
     for dirname, dirs, names in vfs.walk():
         for i, d in enumerate(dirs):
             if d.lower() == ignore:
                 del dirs[i]
                 break
-        if vfs.basename(dirname).lower() != '.hg':
+        if vfs.basename(dirname).lower() != b'.hg':
             continue
         for f in names:
-            if f.lower() == 'hgrc':
-                ui.warn(_("warning: removing potentially hostile 'hgrc' "
-                          "in '%s'\n") % vfs.join(dirname))
+            if f.lower() == b'hgrc':
+                ui.warn(
+                    _(
+                        b"warning: removing potentially hostile 'hgrc' "
+                        b"in '%s'\n"
+                    )
+                    % vfs.join(dirname)
+                )
                 vfs.unlink(vfs.reljoin(dirname, f))
 
+
 def _auditsubrepopath(repo, path):
     # sanity check for potentially unsafe paths such as '~' and '$FOO'
-    if path.startswith('~') or '$' in path or util.expandpath(path) != path:
-        raise error.Abort(_('subrepo path contains illegal component: %s')
-                          % path)
+    if path.startswith(b'~') or b'$' in path or util.expandpath(path) != path:
+        raise error.Abort(
+            _(b'subrepo path contains illegal component: %s') % path
+        )
     # auditor doesn't check if the path itself is a symlink
     pathutil.pathauditor(repo.root)(path)
     if repo.wvfs.islink(path):
-        raise error.Abort(_("subrepo '%s' traverses symbolic link") % path)
+        raise error.Abort(_(b"subrepo '%s' traverses symbolic link") % path)
+
 
 SUBREPO_ALLOWED_DEFAULTS = {
-    'hg': True,
-    'git': False,
-    'svn': False,
+    b'hg': True,
+    b'git': False,
+    b'svn': False,
 }
 
+
 def _checktype(ui, kind):
     # subrepos.allowed is a master kill switch. If disabled, subrepos are
     # disabled period.
-    if not ui.configbool('subrepos', 'allowed', True):
-        raise error.Abort(_('subrepos not enabled'),
-                          hint=_("see 'hg help config.subrepos' for details"))
+    if not ui.configbool(b'subrepos', b'allowed', True):
+        raise error.Abort(
+            _(b'subrepos not enabled'),
+            hint=_(b"see 'hg help config.subrepos' for details"),
+        )
 
     default = SUBREPO_ALLOWED_DEFAULTS.get(kind, False)
-    if not ui.configbool('subrepos', '%s:allowed' % kind, default):
-        raise error.Abort(_('%s subrepos not allowed') % kind,
-                          hint=_("see 'hg help config.subrepos' for details"))
+    if not ui.configbool(b'subrepos', b'%s:allowed' % kind, default):
+        raise error.Abort(
+            _(b'%s subrepos not allowed') % kind,
+            hint=_(b"see 'hg help config.subrepos' for details"),
+        )
 
     if kind not in types:
-        raise error.Abort(_('unknown subrepo type %s') % kind)
+        raise error.Abort(_(b'unknown subrepo type %s') % kind)
+
 
 def subrepo(ctx, path, allowwdir=False, allowcreate=True):
     """return instance of the right subrepo class for subrepo in path"""
@@ -154,6 +183,7 @@
     # scripts that don't use our demand-loading
     global hg
     from . import hg as h
+
     hg = h
 
     repo = ctx.repo()
@@ -164,6 +194,7 @@
         state = (state[0], ctx.subrev(path), state[2])
     return types[state[2]](ctx, path, state[:2], allowcreate)
 
+
 def nullsubrepo(ctx, path, pctx):
     """return an empty subrepo in pctx for the extant subrepo in ctx"""
     # subrepo inherently violates our import layering rules
@@ -172,21 +203,23 @@
     # scripts that don't use our demand-loading
     global hg
     from . import hg as h
+
     hg = h
 
     repo = ctx.repo()
     _auditsubrepopath(repo, path)
     state = ctx.substate[path]
     _checktype(repo.ui, state[2])
-    subrev = ''
-    if state[2] == 'hg':
-        subrev = "0" * 40
+    subrev = b''
+    if state[2] == b'hg':
+        subrev = b"0" * 40
     return types[state[2]](pctx, path, (state[0], subrev), True)
 
+
 # subrepo classes need to implement the following abstract class:
 
+
 class abstractsubrepo(object):
-
     def __init__(self, ctx, path):
         """Initialize abstractsubrepo part
 
@@ -233,14 +266,14 @@
         This returns None, otherwise.
         """
         if self.dirty(ignoreupdate=ignoreupdate, missing=missing):
-            return _('uncommitted changes in subrepository "%s"'
-                     ) % subrelpath(self)
+            return _(b'uncommitted changes in subrepository "%s"') % subrelpath(
+                self
+            )
 
     def bailifchanged(self, ignoreupdate=False, hint=None):
         """raise Abort if subrepository is ``dirty()``
         """
-        dirtyreason = self.dirtyreason(ignoreupdate=ignoreupdate,
-                                       missing=True)
+        dirtyreason = self.dirtyreason(ignoreupdate=ignoreupdate, missing=True)
         if dirtyreason:
             raise error.Abort(dirtyreason, hint=hint)
 
@@ -293,7 +326,7 @@
         return []
 
     def addremove(self, matcher, prefix, uipathfn, opts):
-        self.ui.warn("%s: %s" % (prefix, _("addremove is not supported")))
+        self.ui.warn(b"%s: %s" % (prefix, _(b"addremove is not supported")))
         return 1
 
     def cat(self, match, fm, fntemplate, prefix, **opts):
@@ -321,7 +354,7 @@
 
     def fileflags(self, name):
         """return file flags"""
-        return ''
+        return b''
 
     def matchfileset(self, expr, badfn=None):
         """Resolve the fileset expression for this repo"""
@@ -338,15 +371,17 @@
             files = self.files()
         total = len(files)
         relpath = subrelpath(self)
-        progress = self.ui.makeprogress(_('archiving (%s)') % relpath,
-                                        unit=_('files'), total=total)
+        progress = self.ui.makeprogress(
+            _(b'archiving (%s)') % relpath, unit=_(b'files'), total=total
+        )
         progress.update(0)
         for name in files:
             flags = self.fileflags(name)
-            mode = 'x' in flags and 0o755 or 0o644
-            symlink = 'l' in flags
-            archiver.addfile(prefix + name, mode, symlink,
-                             self.filedata(name, decode))
+            mode = b'x' in flags and 0o755 or 0o644
+            symlink = b'l' in flags
+            archiver.addfile(
+                prefix + name, mode, symlink, self.filedata(name, decode)
+            )
             progress.increment()
         progress.complete()
         return total
@@ -360,19 +395,31 @@
     def forget(self, match, prefix, uipathfn, dryrun, interactive):
         return ([], [])
 
-    def removefiles(self, matcher, prefix, uipathfn, after, force, subrepos,
-                    dryrun, warnings):
+    def removefiles(
+        self,
+        matcher,
+        prefix,
+        uipathfn,
+        after,
+        force,
+        subrepos,
+        dryrun,
+        warnings,
+    ):
         """remove the matched files from the subrepository and the filesystem,
         possibly by force and/or after the file has been removed from the
         filesystem.  Return 0 on success, 1 on any warning.
         """
-        warnings.append(_("warning: removefiles not implemented (%s)")
-                        % self._path)
+        warnings.append(
+            _(b"warning: removefiles not implemented (%s)") % self._path
+        )
         return 1
 
     def revert(self, substate, *pats, **opts):
-        self.ui.warn(_('%s: reverting %s subrepos is unsupported\n')
-                     % (substate[0], substate[2]))
+        self.ui.warn(
+            _(b'%s: reverting %s subrepos is unsupported\n')
+            % (substate[0], substate[2])
+        )
         return []
 
     def shortid(self, revid):
@@ -401,35 +448,38 @@
         """
         return self.wvfs.reljoin(reporelpath(self._ctx.repo()), self._path)
 
+
 class hgsubrepo(abstractsubrepo):
     def __init__(self, ctx, path, state, allowcreate):
         super(hgsubrepo, self).__init__(ctx, path)
         self._state = state
         r = ctx.repo()
         root = r.wjoin(util.localpath(path))
-        create = allowcreate and not r.wvfs.exists('%s/.hg' % path)
+        create = allowcreate and not r.wvfs.exists(b'%s/.hg' % path)
         # repository constructor does expand variables in path, which is
         # unsafe since subrepo path might come from untrusted source.
         if os.path.realpath(util.expandpath(root)) != root:
-            raise error.Abort(_('subrepo path contains illegal component: %s')
-                              % path)
+            raise error.Abort(
+                _(b'subrepo path contains illegal component: %s') % path
+            )
         self._repo = hg.repository(r.baseui, root, create=create)
         if self._repo.root != root:
-            raise error.ProgrammingError('failed to reject unsafe subrepo '
-                                         'path: %s (expanded to %s)'
-                                         % (root, self._repo.root))
+            raise error.ProgrammingError(
+                b'failed to reject unsafe subrepo '
+                b'path: %s (expanded to %s)' % (root, self._repo.root)
+            )
 
         # Propagate the parent's --hidden option
         if r is r.unfiltered():
             self._repo = self._repo.unfiltered()
 
         self.ui = self._repo.ui
-        for s, k in [('ui', 'commitsubrepos')]:
+        for s, k in [(b'ui', b'commitsubrepos')]:
             v = r.ui.config(s, k)
             if v:
-                self.ui.setconfig(s, k, v, 'subrepo')
+                self.ui.setconfig(s, k, v, b'subrepo')
         # internal config: ui._usedassubrepo
-        self.ui.setconfig('ui', '_usedassubrepo', 'True', 'subrepo')
+        self.ui.setconfig(b'ui', b'_usedassubrepo', b'True', b'subrepo')
         self._initrepo(r, state[0], create)
 
     @annotatesubrepoerror
@@ -459,21 +509,21 @@
         This method is used to to detect when there are changes that may
         require a push to a given remote path.'''
         # sort the files that will be hashed in increasing (likely) file size
-        filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
-        yield '# %s\n' % _expandedabspath(remotepath)
+        filelist = (b'bookmarks', b'store/phaseroots', b'store/00changelog.i')
+        yield b'# %s\n' % _expandedabspath(remotepath)
         vfs = self._repo.vfs
         for relname in filelist:
             filehash = node.hex(hashlib.sha1(vfs.tryread(relname)).digest())
-            yield '%s = %s\n' % (relname, filehash)
+            yield b'%s = %s\n' % (relname, filehash)
 
     @propertycache
     def _cachestorehashvfs(self):
-        return vfsmod.vfs(self._repo.vfs.join('cache/storehash'))
+        return vfsmod.vfs(self._repo.vfs.join(b'cache/storehash'))
 
     def _readstorehashcache(self, remotepath):
         '''read the store hash cache for a given remote repository'''
         cachefile = _getstorehashcachename(remotepath)
-        return self._cachestorehashvfs.tryreadlines(cachefile, 'r')
+        return self._cachestorehashvfs.tryreadlines(cachefile, b'r')
 
     def _cachestorehash(self, remotepath):
         '''cache the current store hash
@@ -485,7 +535,7 @@
         with self._repo.lock():
             storehash = list(self._calcstorehash(remotepath))
             vfs = self._cachestorehashvfs
-            vfs.writelines(cachefile, storehash, mode='wb', notindexed=True)
+            vfs.writelines(cachefile, storehash, mode=b'wb', notindexed=True)
 
     def _getctx(self):
         '''fetch the context for this subrepo revision, possibly a workingctx
@@ -502,25 +552,26 @@
         self._repo._subsource = source
 
         if create:
-            lines = ['[paths]\n']
+            lines = [b'[paths]\n']
 
             def addpathconfig(key, value):
                 if value:
-                    lines.append('%s = %s\n' % (key, value))
-                    self.ui.setconfig('paths', key, value, 'subrepo')
+                    lines.append(b'%s = %s\n' % (key, value))
+                    self.ui.setconfig(b'paths', key, value, b'subrepo')
 
             defpath = _abssource(self._repo, abort=False)
             defpushpath = _abssource(self._repo, True, abort=False)
-            addpathconfig('default', defpath)
+            addpathconfig(b'default', defpath)
             if defpath != defpushpath:
-                addpathconfig('default-push', defpushpath)
+                addpathconfig(b'default-push', defpushpath)
 
-            self._repo.vfs.write('hgrc', util.tonativeeol(''.join(lines)))
+            self._repo.vfs.write(b'hgrc', util.tonativeeol(b''.join(lines)))
 
     @annotatesubrepoerror
     def add(self, ui, match, prefix, uipathfn, explicitonly, **opts):
-        return cmdutil.add(ui, self._repo, match, prefix, uipathfn,
-                           explicitonly, **opts)
+        return cmdutil.add(
+            ui, self._repo, match, prefix, uipathfn, explicitonly, **opts
+        )
 
     @annotatesubrepoerror
     def addremove(self, m, prefix, uipathfn, opts):
@@ -528,15 +579,16 @@
         # always entry any of its subrepos.  Don't corrupt the options that will
         # be used to process sibling subrepos however.
         opts = copy.copy(opts)
-        opts['subrepos'] = True
+        opts[b'subrepos'] = True
         return scmutil.addremove(self._repo, m, prefix, uipathfn, opts)
 
     @annotatesubrepoerror
     def cat(self, match, fm, fntemplate, prefix, **opts):
         rev = self._state[1]
         ctx = self._repo[rev]
-        return cmdutil.cat(self.ui, self._repo, ctx, match, fm, fntemplate,
-                           prefix, **opts)
+        return cmdutil.cat(
+            self.ui, self._repo, ctx, match, fm, fntemplate, prefix, **opts
+        )
 
     @annotatesubrepoerror
     def status(self, rev2, **opts):
@@ -546,8 +598,10 @@
             ctx2 = self._repo[rev2]
             return self._repo.status(ctx1, ctx2, **opts)
         except error.RepoLookupError as inst:
-            self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
-                         % (inst, subrelpath(self)))
+            self.ui.warn(
+                _(b'warning: error "%s" in subrepository "%s"\n')
+                % (inst, subrelpath(self))
+            )
             return scmutil.status([], [], [], [], [], [], [])
 
     @annotatesubrepoerror
@@ -558,45 +612,55 @@
             # in hex format
             if node2 is not None:
                 node2 = node.bin(node2)
-            logcmdutil.diffordiffstat(ui, self._repo, diffopts, node1, node2,
-                                      match, prefix=prefix, listsubrepos=True,
-                                      **opts)
+            logcmdutil.diffordiffstat(
+                ui,
+                self._repo,
+                diffopts,
+                node1,
+                node2,
+                match,
+                prefix=prefix,
+                listsubrepos=True,
+                **opts
+            )
         except error.RepoLookupError as inst:
-            self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
-                          % (inst, subrelpath(self)))
+            self.ui.warn(
+                _(b'warning: error "%s" in subrepository "%s"\n')
+                % (inst, subrelpath(self))
+            )
 
     @annotatesubrepoerror
     def archive(self, archiver, prefix, match=None, decode=True):
-        self._get(self._state + ('hg',))
+        self._get(self._state + (b'hg',))
         files = self.files()
         if match:
             files = [f for f in files if match(f)]
         rev = self._state[1]
         ctx = self._repo[rev]
-        scmutil.prefetchfiles(self._repo, [ctx.rev()],
-                              scmutil.matchfiles(self._repo, files))
+        scmutil.prefetchfiles(
+            self._repo, [ctx.rev()], scmutil.matchfiles(self._repo, files)
+        )
         total = abstractsubrepo.archive(self, archiver, prefix, match)
         for subpath in ctx.substate:
             s = subrepo(ctx, subpath, True)
             submatch = matchmod.subdirmatcher(subpath, match)
-            subprefix = prefix + subpath + '/'
-            total += s.archive(archiver, subprefix, submatch,
-                               decode)
+            subprefix = prefix + subpath + b'/'
+            total += s.archive(archiver, subprefix, submatch, decode)
         return total
 
     @annotatesubrepoerror
     def dirty(self, ignoreupdate=False, missing=False):
         r = self._state[1]
-        if r == '' and not ignoreupdate: # no state recorded
+        if r == b'' and not ignoreupdate:  # no state recorded
             return True
         w = self._repo[None]
         if r != w.p1().hex() and not ignoreupdate:
             # different version checked out
             return True
-        return w.dirty(missing=missing) # working directory changed
+        return w.dirty(missing=missing)  # working directory changed
 
     def basestate(self):
-        return self._repo['.'].hex()
+        return self._repo[b'.'].hex()
 
     def checknested(self, path):
         return self._repo._checknested(self._repo.wjoin(path))
@@ -606,22 +670,22 @@
         # don't bother committing in the subrepo if it's only been
         # updated
         if not self.dirty(True):
-            return self._repo['.'].hex()
-        self.ui.debug("committing subrepo %s\n" % subrelpath(self))
+            return self._repo[b'.'].hex()
+        self.ui.debug(b"committing subrepo %s\n" % subrelpath(self))
         n = self._repo.commit(text, user, date)
         if not n:
-            return self._repo['.'].hex() # different version checked out
+            return self._repo[b'.'].hex()  # different version checked out
         return node.hex(n)
 
     @annotatesubrepoerror
     def phase(self, state):
-        return self._repo[state or '.'].phase()
+        return self._repo[state or b'.'].phase()
 
     @annotatesubrepoerror
     def remove(self):
         # we can't fully delete the repository as it may contain
         # local-only history
-        self.ui.note(_('removing subrepo %s\n') % subrelpath(self))
+        self.ui.note(_(b'removing subrepo %s\n') % subrelpath(self))
         hg.clean(self._repo, node.nullid, False)
 
     def _get(self, state):
@@ -649,38 +713,53 @@
             # A simpler option is for the user to configure clone pooling, and
             # work with that.
             if parentrepo.shared() and hg.islocal(srcurl):
-                self.ui.status(_('sharing subrepo %s from %s\n')
-                               % (subrelpath(self), srcurl))
-                shared = hg.share(self._repo._subparent.baseui,
-                                  getpeer(), self._repo.root,
-                                  update=False, bookmarks=False)
+                self.ui.status(
+                    _(b'sharing subrepo %s from %s\n')
+                    % (subrelpath(self), srcurl)
+                )
+                shared = hg.share(
+                    self._repo._subparent.baseui,
+                    getpeer(),
+                    self._repo.root,
+                    update=False,
+                    bookmarks=False,
+                )
                 self._repo = shared.local()
             else:
                 # TODO: find a common place for this and this code in the
                 # share.py wrap of the clone command.
                 if parentrepo.shared():
-                    pool = self.ui.config('share', 'pool')
+                    pool = self.ui.config(b'share', b'pool')
                     if pool:
                         pool = util.expandpath(pool)
 
                     shareopts = {
-                        'pool': pool,
-                        'mode': self.ui.config('share', 'poolnaming'),
+                        b'pool': pool,
+                        b'mode': self.ui.config(b'share', b'poolnaming'),
                     }
                 else:
                     shareopts = {}
 
-                self.ui.status(_('cloning subrepo %s from %s\n')
-                               % (subrelpath(self), util.hidepassword(srcurl)))
-                other, cloned = hg.clone(self._repo._subparent.baseui, {},
-                                         getpeer(), self._repo.root,
-                                         update=False, shareopts=shareopts)
+                self.ui.status(
+                    _(b'cloning subrepo %s from %s\n')
+                    % (subrelpath(self), util.hidepassword(srcurl))
+                )
+                other, cloned = hg.clone(
+                    self._repo._subparent.baseui,
+                    {},
+                    getpeer(),
+                    self._repo.root,
+                    update=False,
+                    shareopts=shareopts,
+                )
                 self._repo = cloned.local()
             self._initrepo(parentrepo, source, create=True)
             self._cachestorehash(srcurl)
         else:
-            self.ui.status(_('pulling subrepo %s from %s\n')
-                           % (subrelpath(self), util.hidepassword(srcurl)))
+            self.ui.status(
+                _(b'pulling subrepo %s from %s\n')
+                % (subrelpath(self), util.hidepassword(srcurl))
+            )
             cleansub = self.storeclean(srcurl)
             exchange.pull(self._repo, getpeer())
             if cleansub:
@@ -693,34 +772,39 @@
         inrepo = self._get(state)
         source, revision, kind = state
         repo = self._repo
-        repo.ui.debug("getting subrepo %s\n" % self._path)
+        repo.ui.debug(b"getting subrepo %s\n" % self._path)
         if inrepo:
             urepo = repo.unfiltered()
             ctx = urepo[revision]
             if ctx.hidden():
                 urepo.ui.warn(
-                    _('revision %s in subrepository "%s" is hidden\n')
-                    % (revision[0:12], self._path))
+                    _(b'revision %s in subrepository "%s" is hidden\n')
+                    % (revision[0:12], self._path)
+                )
                 repo = urepo
         hg.updaterepo(repo, revision, overwrite)
 
     @annotatesubrepoerror
     def merge(self, state):
         self._get(state)
-        cur = self._repo['.']
+        cur = self._repo[b'.']
         dst = self._repo[state[1]]
         anc = dst.ancestor(cur)
 
         def mergefunc():
             if anc == cur and dst.branch() == cur.branch():
-                self.ui.debug('updating subrepository "%s"\n'
-                              % subrelpath(self))
+                self.ui.debug(
+                    b'updating subrepository "%s"\n' % subrelpath(self)
+                )
                 hg.update(self._repo, state[1])
             elif anc == dst:
-                self.ui.debug('skipping subrepository "%s"\n'
-                              % subrelpath(self))
+                self.ui.debug(
+                    b'skipping subrepository "%s"\n' % subrelpath(self)
+                )
             else:
-                self.ui.debug('merging subrepository "%s"\n' % subrelpath(self))
+                self.ui.debug(
+                    b'merging subrepository "%s"\n' % subrelpath(self)
+                )
                 hg.merge(self._repo, state[1], remind=False)
 
         wctx = self._repo[None]
@@ -735,13 +819,13 @@
 
     @annotatesubrepoerror
     def push(self, opts):
-        force = opts.get('force')
-        newbranch = opts.get('new_branch')
-        ssh = opts.get('ssh')
+        force = opts.get(b'force')
+        newbranch = opts.get(b'new_branch')
+        ssh = opts.get(b'ssh')
 
         # push subrepos depth-first for coherent ordering
-        c = self._repo['.']
-        subs = c.substate # only repos that are committed
+        c = self._repo[b'.']
+        subs = c.substate  # only repos that are committed
         for s in sorted(subs):
             if c.sub(s).push(opts) == 0:
                 return False
@@ -750,12 +834,15 @@
         if not force:
             if self.storeclean(dsturl):
                 self.ui.status(
-                    _('no changes made to subrepo %s since last push to %s\n')
-                    % (subrelpath(self), util.hidepassword(dsturl)))
+                    _(b'no changes made to subrepo %s since last push to %s\n')
+                    % (subrelpath(self), util.hidepassword(dsturl))
+                )
                 return None
-        self.ui.status(_('pushing subrepo %s to %s\n') %
-            (subrelpath(self), util.hidepassword(dsturl)))
-        other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
+        self.ui.status(
+            _(b'pushing subrepo %s to %s\n')
+            % (subrelpath(self), util.hidepassword(dsturl))
+        )
+        other = hg.peer(self._repo, {b'ssh': ssh}, dsturl)
         res = exchange.push(self._repo, other, force, newbranch=newbranch)
 
         # the repo is now clean
@@ -764,18 +851,18 @@
 
     @annotatesubrepoerror
     def outgoing(self, ui, dest, opts):
-        if 'rev' in opts or 'branch' in opts:
+        if b'rev' in opts or b'branch' in opts:
             opts = copy.copy(opts)
-            opts.pop('rev', None)
-            opts.pop('branch', None)
+            opts.pop(b'rev', None)
+            opts.pop(b'branch', None)
         return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
 
     @annotatesubrepoerror
     def incoming(self, ui, source, opts):
-        if 'rev' in opts or 'branch' in opts:
+        if b'rev' in opts or b'branch' in opts:
             opts = copy.copy(opts)
-            opts.pop('rev', None)
-            opts.pop('branch', None)
+            opts.pop(b'rev', None)
+            opts.pop(b'branch', None)
         return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
 
     @annotatesubrepoerror
@@ -825,8 +912,10 @@
                 pm = matchmod.prefixdirmatcher(subpath, sm, badfn=badfn)
                 matchers.append(pm)
             except error.LookupError:
-                self.ui.status(_("skipping missing subrepository: %s\n")
-                               % self.wvfs.reljoin(reporelpath(self), subpath))
+                self.ui.status(
+                    _(b"skipping missing subrepository: %s\n")
+                    % self.wvfs.reljoin(reporelpath(self), subpath)
+                )
         if len(matchers) == 1:
             return matchers[0]
         return matchmod.unionmatcher(matchers)
@@ -837,14 +926,40 @@
 
     @annotatesubrepoerror
     def forget(self, match, prefix, uipathfn, dryrun, interactive):
-        return cmdutil.forget(self.ui, self._repo, match, prefix, uipathfn,
-                              True, dryrun=dryrun, interactive=interactive)
+        return cmdutil.forget(
+            self.ui,
+            self._repo,
+            match,
+            prefix,
+            uipathfn,
+            True,
+            dryrun=dryrun,
+            interactive=interactive,
+        )
 
     @annotatesubrepoerror
-    def removefiles(self, matcher, prefix, uipathfn, after, force, subrepos,
-                    dryrun, warnings):
-        return cmdutil.remove(self.ui, self._repo, matcher, prefix, uipathfn,
-                              after, force, subrepos, dryrun)
+    def removefiles(
+        self,
+        matcher,
+        prefix,
+        uipathfn,
+        after,
+        force,
+        subrepos,
+        dryrun,
+        warnings,
+    ):
+        return cmdutil.remove(
+            self.ui,
+            self._repo,
+            matcher,
+            prefix,
+            uipathfn,
+            after,
+            force,
+            subrepos,
+            dryrun,
+        )
 
     @annotatesubrepoerror
     def revert(self, substate, *pats, **opts):
@@ -853,7 +968,7 @@
         #    files inside the subrepo
         # 2. update the subrepo to the revision specified in
         #    the corresponding substate dictionary
-        self.ui.status(_('reverting subrepo %s\n') % substate[0])
+        self.ui.status(_(b'reverting subrepo %s\n') % substate[0])
         if not opts.get(r'no_backup'):
             # Revert all files on the subrepo, creating backups
             # Note that this will not recursively revert subrepos
@@ -872,7 +987,7 @@
         ctx = self._repo[opts[r'rev']]
         parents = self._repo.dirstate.parents()
         if opts.get(r'all'):
-            pats = ['set:modified()']
+            pats = [b'set:modified()']
         else:
             pats = []
         cmdutil.revert(self.ui, self._repo, ctx, parents, *pats, **opts)
@@ -888,13 +1003,14 @@
         # scripts that don't use our demand-loading
         global hg
         from . import hg as h
+
         hg = h
 
         # Nothing prevents a user from sharing in a repo, and then making that a
         # subrepo.  Alternately, the previous unshare attempt may have failed
         # part way through.  So recurse whether or not this layer is shared.
         if self._repo.shared():
-            self.ui.status(_("unsharing subrepo '%s'\n") % self._relpath)
+            self.ui.status(_(b"unsharing subrepo '%s'\n") % self._relpath)
 
         hg.unshare(self.ui, self._repo)
 
@@ -906,14 +1022,18 @@
                 # Since hidden revisions aren't pushed/pulled, it seems worth an
                 # explicit warning.
                 ui = self._repo.ui
-                ui.warn(_("subrepo '%s' is hidden in revision %s\n") %
-                        (self._relpath, node.short(self._ctx.node())))
+                ui.warn(
+                    _(b"subrepo '%s' is hidden in revision %s\n")
+                    % (self._relpath, node.short(self._ctx.node()))
+                )
             return 0
         except error.RepoLookupError:
             # A missing subrepo revision may be a case of needing to pull it, so
             # don't treat this as an error.
-            self._repo.ui.warn(_("subrepo '%s' not found in revision %s\n") %
-                               (self._relpath, node.short(self._ctx.node())))
+            self._repo.ui.warn(
+                _(b"subrepo '%s' not found in revision %s\n")
+                % (self._relpath, node.short(self._ctx.node()))
+            )
             return 0
 
     @propertycache
@@ -929,16 +1049,18 @@
         # Keep consistent dir separators by avoiding vfs.join(self._path)
         return reporelpath(self._repo)
 
+
 class svnsubrepo(abstractsubrepo):
     def __init__(self, ctx, path, state, allowcreate):
         super(svnsubrepo, self).__init__(ctx, path)
         self._state = state
-        self._exe = procutil.findexe('svn')
+        self._exe = procutil.findexe(b'svn')
         if not self._exe:
-            raise error.Abort(_("'svn' executable not found for subrepo '%s'")
-                             % self._path)
+            raise error.Abort(
+                _(b"'svn' executable not found for subrepo '%s'") % self._path
+            )
 
-    def _svncommand(self, commands, filename='', failok=False):
+    def _svncommand(self, commands, filename=b'', failok=False):
         cmd = [self._exe]
         extrakw = {}
         if not self.ui.interactive():
@@ -949,59 +1071,70 @@
             # instead of being per-command, but we need to support 1.4 so
             # we have to be intelligent about what commands take
             # --non-interactive.
-            if commands[0] in ('update', 'checkout', 'commit'):
-                cmd.append('--non-interactive')
+            if commands[0] in (b'update', b'checkout', b'commit'):
+                cmd.append(b'--non-interactive')
         cmd.extend(commands)
         if filename is not None:
-            path = self.wvfs.reljoin(self._ctx.repo().origroot,
-                                     self._path, filename)
+            path = self.wvfs.reljoin(
+                self._ctx.repo().origroot, self._path, filename
+            )
             cmd.append(path)
         env = dict(encoding.environ)
         # Avoid localized output, preserve current locale for everything else.
-        lc_all = env.get('LC_ALL')
+        lc_all = env.get(b'LC_ALL')
         if lc_all:
-            env['LANG'] = lc_all
-            del env['LC_ALL']
-        env['LC_MESSAGES'] = 'C'
-        p = subprocess.Popen(pycompat.rapply(procutil.tonativestr, cmd),
-                             bufsize=-1, close_fds=procutil.closefds,
-                             stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                             env=procutil.tonativeenv(env), **extrakw)
+            env[b'LANG'] = lc_all
+            del env[b'LC_ALL']
+        env[b'LC_MESSAGES'] = b'C'
+        p = subprocess.Popen(
+            pycompat.rapply(procutil.tonativestr, cmd),
+            bufsize=-1,
+            close_fds=procutil.closefds,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            env=procutil.tonativeenv(env),
+            **extrakw
+        )
         stdout, stderr = map(util.fromnativeeol, p.communicate())
         stderr = stderr.strip()
         if not failok:
             if p.returncode:
-                raise error.Abort(stderr or 'exited with code %d'
-                                  % p.returncode)
+                raise error.Abort(
+                    stderr or b'exited with code %d' % p.returncode
+                )
             if stderr:
-                self.ui.warn(stderr + '\n')
+                self.ui.warn(stderr + b'\n')
         return stdout, stderr
 
     @propertycache
     def _svnversion(self):
-        output, err = self._svncommand(['--version', '--quiet'], filename=None)
+        output, err = self._svncommand(
+            [b'--version', b'--quiet'], filename=None
+        )
         m = re.search(br'^(\d+)\.(\d+)', output)
         if not m:
-            raise error.Abort(_('cannot retrieve svn tool version'))
+            raise error.Abort(_(b'cannot retrieve svn tool version'))
         return (int(m.group(1)), int(m.group(2)))
 
     def _svnmissing(self):
-        return not self.wvfs.exists('.svn')
+        return not self.wvfs.exists(b'.svn')
 
     def _wcrevs(self):
         # Get the working directory revision as well as the last
         # commit revision so we can compare the subrepo state with
         # both. We used to store the working directory one.
-        output, err = self._svncommand(['info', '--xml'])
+        output, err = self._svncommand([b'info', b'--xml'])
         doc = xml.dom.minidom.parseString(output)
         entries = doc.getElementsByTagName(r'entry')
-        lastrev, rev = '0', '0'
+        lastrev, rev = b'0', b'0'
         if entries:
-            rev = pycompat.bytestr(entries[0].getAttribute(r'revision')) or '0'
+            rev = pycompat.bytestr(entries[0].getAttribute(r'revision')) or b'0'
             commits = entries[0].getElementsByTagName(r'commit')
             if commits:
-                lastrev = pycompat.bytestr(
-                    commits[0].getAttribute(r'revision')) or '0'
+                lastrev = (
+                    pycompat.bytestr(commits[0].getAttribute(r'revision'))
+                    or b'0'
+                )
         return (lastrev, rev)
 
     def _wcrev(self):
@@ -1013,7 +1146,7 @@
         True if any of these changes concern an external entry and missing
         is True if any change is a missing entry.
         """
-        output, err = self._svncommand(['status', '--xml'])
+        output, err = self._svncommand([b'status', b'--xml'])
         externals, changes, missing = [], [], []
         doc = xml.dom.minidom.parseString(output)
         for e in doc.getElementsByTagName(r'entry'):
@@ -1027,8 +1160,12 @@
                 externals.append(path)
             elif item == r'missing':
                 missing.append(path)
-            if (item not in (r'', r'normal', r'unversioned', r'external')
-                or props not in (r'', r'none', r'normal')):
+            if item not in (
+                r'',
+                r'normal',
+                r'unversioned',
+                r'external',
+            ) or props not in (r'', r'none', r'normal'):
                 changes.append(path)
         for path in changes:
             for ext in externals:
@@ -1039,7 +1176,7 @@
     @annotatesubrepoerror
     def dirty(self, ignoreupdate=False, missing=False):
         if self._svnmissing():
-            return self._state[1] != ''
+            return self._state[1] != b''
         wcchanged = self._wcchanged()
         changed = wcchanged[0] or (missing and wcchanged[2])
         if not changed:
@@ -1055,7 +1192,9 @@
             # URL exists at lastrev.  Test it and fallback to rev it
             # is not there.
             try:
-                self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
+                self._svncommand(
+                    [b'list', b'%s@%s' % (self._state[0], lastrev)]
+                )
                 return lastrev
             except error.Abort:
                 pass
@@ -1069,33 +1208,35 @@
             return self.basestate()
         if extchanged:
             # Do not try to commit externals
-            raise error.Abort(_('cannot commit svn externals'))
+            raise error.Abort(_(b'cannot commit svn externals'))
         if missing:
             # svn can commit with missing entries but aborting like hg
             # seems a better approach.
-            raise error.Abort(_('cannot commit missing svn entries'))
-        commitinfo, err = self._svncommand(['commit', '-m', text])
+            raise error.Abort(_(b'cannot commit missing svn entries'))
+        commitinfo, err = self._svncommand([b'commit', b'-m', text])
         self.ui.status(commitinfo)
-        newrev = re.search('Committed revision ([0-9]+).', commitinfo)
+        newrev = re.search(b'Committed revision ([0-9]+).', commitinfo)
         if not newrev:
             if not commitinfo.strip():
                 # Sometimes, our definition of "changed" differs from
                 # svn one. For instance, svn ignores missing files
                 # when committing. If there are only missing files, no
                 # commit is made, no output and no error code.
-                raise error.Abort(_('failed to commit svn changes'))
+                raise error.Abort(_(b'failed to commit svn changes'))
             raise error.Abort(commitinfo.splitlines()[-1])
         newrev = newrev.groups()[0]
-        self.ui.status(self._svncommand(['update', '-r', newrev])[0])
+        self.ui.status(self._svncommand([b'update', b'-r', newrev])[0])
         return newrev
 
     @annotatesubrepoerror
     def remove(self):
         if self.dirty():
-            self.ui.warn(_('not removing repo %s because '
-                           'it has changes.\n') % self._path)
+            self.ui.warn(
+                _(b'not removing repo %s because it has changes.\n')
+                % self._path
+            )
             return
-        self.ui.note(_('removing subrepo %s\n') % self._path)
+        self.ui.note(_(b'removing subrepo %s\n') % self._path)
 
         self.wvfs.rmtree(forcibly=True)
         try:
@@ -1107,22 +1248,23 @@
     @annotatesubrepoerror
     def get(self, state, overwrite=False):
         if overwrite:
-            self._svncommand(['revert', '--recursive'])
-        args = ['checkout']
+            self._svncommand([b'revert', b'--recursive'])
+        args = [b'checkout']
         if self._svnversion >= (1, 5):
-            args.append('--force')
+            args.append(b'--force')
         # The revision must be specified at the end of the URL to properly
         # update to a directory which has since been deleted and recreated.
-        args.append('%s@%s' % (state[0], state[1]))
+        args.append(b'%s@%s' % (state[0], state[1]))
 
         # SEC: check that the ssh url is safe
         util.checksafessh(state[0])
 
         status, err = self._svncommand(args, failok=True)
-        _sanitize(self.ui, self.wvfs, '.svn')
-        if not re.search('Checked out revision [0-9]+.', status):
-            if ('is already a working copy for a different URL' in err
-                and (self._wcchanged()[:2] == (False, False))):
+        _sanitize(self.ui, self.wvfs, b'.svn')
+        if not re.search(b'Checked out revision [0-9]+.', status):
+            if b'is already a working copy for a different URL' in err and (
+                self._wcchanged()[:2] == (False, False)
+            ):
                 # obstructed but clean working copy, so just blow it away.
                 self.remove()
                 self.get(state, overwrite=False)
@@ -1146,21 +1288,23 @@
 
     @annotatesubrepoerror
     def files(self):
-        output = self._svncommand(['list', '--recursive', '--xml'])[0]
+        output = self._svncommand([b'list', b'--recursive', b'--xml'])[0]
         doc = xml.dom.minidom.parseString(output)
         paths = []
         for e in doc.getElementsByTagName(r'entry'):
             kind = pycompat.bytestr(e.getAttribute(r'kind'))
-            if kind != 'file':
+            if kind != b'file':
                 continue
-            name = r''.join(c.data for c
-                            in e.getElementsByTagName(r'name')[0].childNodes
-                            if c.nodeType == c.TEXT_NODE)
+            name = r''.join(
+                c.data
+                for c in e.getElementsByTagName(r'name')[0].childNodes
+                if c.nodeType == c.TEXT_NODE
+            )
             paths.append(name.encode('utf8'))
         return paths
 
     def filedata(self, name, decode):
-        return self._svncommand(['cat'], name)[0]
+        return self._svncommand([b'cat'], name)[0]
 
 
 class gitsubrepo(abstractsubrepo):
@@ -1173,36 +1317,48 @@
 
     def _ensuregit(self):
         try:
-            self._gitexecutable = 'git'
-            out, err = self._gitnodir(['--version'])
+            self._gitexecutable = b'git'
+            out, err = self._gitnodir([b'--version'])
         except OSError as e:
-            genericerror = _("error executing git for subrepo '%s': %s")
-            notfoundhint = _("check git is installed and in your PATH")
+            genericerror = _(b"error executing git for subrepo '%s': %s")
+            notfoundhint = _(b"check git is installed and in your PATH")
             if e.errno != errno.ENOENT:
-                raise error.Abort(genericerror % (
-                    self._path, encoding.strtolocal(e.strerror)))
+                raise error.Abort(
+                    genericerror % (self._path, encoding.strtolocal(e.strerror))
+                )
             elif pycompat.iswindows:
                 try:
-                    self._gitexecutable = 'git.cmd'
-                    out, err = self._gitnodir(['--version'])
+                    self._gitexecutable = b'git.cmd'
+                    out, err = self._gitnodir([b'--version'])
                 except OSError as e2:
                     if e2.errno == errno.ENOENT:
-                        raise error.Abort(_("couldn't find 'git' or 'git.cmd'"
-                            " for subrepo '%s'") % self._path,
-                            hint=notfoundhint)
+                        raise error.Abort(
+                            _(
+                                b"couldn't find 'git' or 'git.cmd'"
+                                b" for subrepo '%s'"
+                            )
+                            % self._path,
+                            hint=notfoundhint,
+                        )
                     else:
-                        raise error.Abort(genericerror % (self._path,
-                            encoding.strtolocal(e2.strerror)))
+                        raise error.Abort(
+                            genericerror
+                            % (self._path, encoding.strtolocal(e2.strerror))
+                        )
             else:
-                raise error.Abort(_("couldn't find git for subrepo '%s'")
-                    % self._path, hint=notfoundhint)
+                raise error.Abort(
+                    _(b"couldn't find git for subrepo '%s'") % self._path,
+                    hint=notfoundhint,
+                )
         versionstatus = self._checkversion(out)
-        if versionstatus == 'unknown':
-            self.ui.warn(_('cannot retrieve git version\n'))
-        elif versionstatus == 'abort':
-            raise error.Abort(_('git subrepo requires at least 1.6.0 or later'))
-        elif versionstatus == 'warning':
-            self.ui.warn(_('git subrepo requires at least 1.6.0 or later\n'))
+        if versionstatus == b'unknown':
+            self.ui.warn(_(b'cannot retrieve git version\n'))
+        elif versionstatus == b'abort':
+            raise error.Abort(
+                _(b'git subrepo requires at least 1.6.0 or later')
+            )
+        elif versionstatus == b'warning':
+            self.ui.warn(_(b'git subrepo requires at least 1.6.0 or later\n'))
 
     @staticmethod
     def _gitversion(out):
@@ -1245,19 +1401,20 @@
         # despite the docstring comment.  For now, error on 1.4.0, warn on
         # 1.5.0 but attempt to continue.
         if version == -1:
-            return 'unknown'
+            return b'unknown'
         if version < (1, 5, 0):
-            return 'abort'
+            return b'abort'
         elif version < (1, 6, 0):
-            return 'warning'
-        return 'ok'
+            return b'warning'
+        return b'ok'
 
     def _gitcommand(self, commands, env=None, stream=False):
         return self._gitdir(commands, env=env, stream=stream)[0]
 
     def _gitdir(self, commands, env=None, stream=False):
-        return self._gitnodir(commands, env=env, stream=stream,
-                              cwd=self._abspath)
+        return self._gitnodir(
+            commands, env=env, stream=stream, cwd=self._abspath
+        )
 
     def _gitnodir(self, commands, env=None, stream=False, cwd=None):
         """Calls the git command
@@ -1265,30 +1422,34 @@
         The methods tries to call the git command. versions prior to 1.6.0
         are not supported and very probably fail.
         """
-        self.ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
+        self.ui.debug(b'%s: git %s\n' % (self._relpath, b' '.join(commands)))
         if env is None:
             env = encoding.environ.copy()
         # disable localization for Git output (issue5176)
-        env['LC_ALL'] = 'C'
+        env[b'LC_ALL'] = b'C'
         # fix for Git CVE-2015-7545
-        if 'GIT_ALLOW_PROTOCOL' not in env:
-            env['GIT_ALLOW_PROTOCOL'] = 'file:git:http:https:ssh'
+        if b'GIT_ALLOW_PROTOCOL' not in env:
+            env[b'GIT_ALLOW_PROTOCOL'] = b'file:git:http:https:ssh'
         # unless ui.quiet is set, print git's stderr,
         # which is mostly progress and useful info
         errpipe = None
         if self.ui.quiet:
-            errpipe = open(os.devnull, 'w')
-        if self.ui._colormode and len(commands) and commands[0] == "diff":
+            errpipe = open(os.devnull, b'w')
+        if self.ui._colormode and len(commands) and commands[0] == b"diff":
             # insert the argument in the front,
             # the end of git diff arguments is used for paths
-            commands.insert(1, '--color')
-        p = subprocess.Popen(pycompat.rapply(procutil.tonativestr,
-                                             [self._gitexecutable] + commands),
-                             bufsize=-1,
-                             cwd=pycompat.rapply(procutil.tonativestr, cwd),
-                             env=procutil.tonativeenv(env),
-                             close_fds=procutil.closefds,
-                             stdout=subprocess.PIPE, stderr=errpipe)
+            commands.insert(1, b'--color')
+        p = subprocess.Popen(
+            pycompat.rapply(
+                procutil.tonativestr, [self._gitexecutable] + commands
+            ),
+            bufsize=-1,
+            cwd=pycompat.rapply(procutil.tonativestr, cwd),
+            env=procutil.tonativeenv(env),
+            close_fds=procutil.closefds,
+            stdout=subprocess.PIPE,
+            stderr=errpipe,
+        )
         if stream:
             return p.stdout, None
 
@@ -1299,48 +1460,50 @@
         if p.returncode != 0 and p.returncode != 1:
             # there are certain error codes that are ok
             command = commands[0]
-            if command in ('cat-file', 'symbolic-ref'):
+            if command in (b'cat-file', b'symbolic-ref'):
                 return retdata, p.returncode
             # for all others, abort
-            raise error.Abort(_('git %s error %d in %s') %
-                             (command, p.returncode, self._relpath))
+            raise error.Abort(
+                _(b'git %s error %d in %s')
+                % (command, p.returncode, self._relpath)
+            )
 
         return retdata, p.returncode
 
     def _gitmissing(self):
-        return not self.wvfs.exists('.git')
+        return not self.wvfs.exists(b'.git')
 
     def _gitstate(self):
-        return self._gitcommand(['rev-parse', 'HEAD'])
+        return self._gitcommand([b'rev-parse', b'HEAD'])
 
     def _gitcurrentbranch(self):
-        current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
+        current, err = self._gitdir([b'symbolic-ref', b'HEAD', b'--quiet'])
         if err:
             current = None
         return current
 
     def _gitremote(self, remote):
-        out = self._gitcommand(['remote', 'show', '-n', remote])
-        line = out.split('\n')[1]
-        i = line.index('URL: ') + len('URL: ')
+        out = self._gitcommand([b'remote', b'show', b'-n', remote])
+        line = out.split(b'\n')[1]
+        i = line.index(b'URL: ') + len(b'URL: ')
         return line[i:]
 
     def _githavelocally(self, revision):
-        out, code = self._gitdir(['cat-file', '-e', revision])
+        out, code = self._gitdir([b'cat-file', b'-e', revision])
         return code == 0
 
     def _gitisancestor(self, r1, r2):
-        base = self._gitcommand(['merge-base', r1, r2])
+        base = self._gitcommand([b'merge-base', r1, r2])
         return base == r1
 
     def _gitisbare(self):
-        return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
+        return self._gitcommand([b'config', b'--bool', b'core.bare']) == b'true'
 
     def _gitupdatestat(self):
         """This must be run before git diff-index.
         diff-index only looks at changes to file stat;
         this command looks at file contents and updates the stat."""
-        self._gitcommand(['update-index', '-q', '--refresh'])
+        self._gitcommand([b'update-index', b'-q', b'--refresh'])
 
     def _gitbranchmap(self):
         '''returns 2 things:
@@ -1349,39 +1512,42 @@
         branch2rev = {}
         rev2branch = {}
 
-        out = self._gitcommand(['for-each-ref', '--format',
-                                '%(objectname) %(refname)'])
-        for line in out.split('\n'):
-            revision, ref = line.split(' ')
-            if (not ref.startswith('refs/heads/') and
-                not ref.startswith('refs/remotes/')):
+        out = self._gitcommand(
+            [b'for-each-ref', b'--format', b'%(objectname) %(refname)']
+        )
+        for line in out.split(b'\n'):
+            revision, ref = line.split(b' ')
+            if not ref.startswith(b'refs/heads/') and not ref.startswith(
+                b'refs/remotes/'
+            ):
                 continue
-            if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
-                continue # ignore remote/HEAD redirects
+            if ref.startswith(b'refs/remotes/') and ref.endswith(b'/HEAD'):
+                continue  # ignore remote/HEAD redirects
             branch2rev[ref] = revision
             rev2branch.setdefault(revision, []).append(ref)
         return branch2rev, rev2branch
 
     def _gittracking(self, branches):
-        'return map of remote branch to local tracking branch'
+        b'return map of remote branch to local tracking branch'
         # assumes no more than one local tracking branch for each remote
         tracking = {}
         for b in branches:
-            if b.startswith('refs/remotes/'):
+            if b.startswith(b'refs/remotes/'):
                 continue
-            bname = b.split('/', 2)[2]
-            remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
+            bname = b.split(b'/', 2)[2]
+            remote = self._gitcommand([b'config', b'branch.%s.remote' % bname])
             if remote:
-                ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
-                tracking['refs/remotes/%s/%s' %
-                         (remote, ref.split('/', 2)[2])] = b
+                ref = self._gitcommand([b'config', b'branch.%s.merge' % bname])
+                tracking[
+                    b'refs/remotes/%s/%s' % (remote, ref.split(b'/', 2)[2])
+                ] = b
         return tracking
 
     def _abssource(self, source):
-        if '://' not in source:
+        if b'://' not in source:
             # recognize the scp syntax as an absolute source
-            colon = source.find(':')
-            if colon != -1 and '/' not in source[:colon]:
+            colon = source.find(b':')
+            if colon != -1 and b'/' not in source[:colon]:
                 return source
         self._subsource = source
         return _abssource(self)
@@ -1392,23 +1558,28 @@
             util.checksafessh(source)
 
             source = self._abssource(source)
-            self.ui.status(_('cloning subrepo %s from %s\n') %
-                            (self._relpath, source))
-            self._gitnodir(['clone', source, self._abspath])
+            self.ui.status(
+                _(b'cloning subrepo %s from %s\n') % (self._relpath, source)
+            )
+            self._gitnodir([b'clone', source, self._abspath])
         if self._githavelocally(revision):
             return
-        self.ui.status(_('pulling subrepo %s from %s\n') %
-                        (self._relpath, self._gitremote('origin')))
+        self.ui.status(
+            _(b'pulling subrepo %s from %s\n')
+            % (self._relpath, self._gitremote(b'origin'))
+        )
         # try only origin: the originally cloned repo
-        self._gitcommand(['fetch'])
+        self._gitcommand([b'fetch'])
         if not self._githavelocally(revision):
-            raise error.Abort(_('revision %s does not exist in subrepository '
-                                '"%s"\n') % (revision, self._relpath))
+            raise error.Abort(
+                _(b'revision %s does not exist in subrepository "%s"\n')
+                % (revision, self._relpath)
+            )
 
     @annotatesubrepoerror
     def dirty(self, ignoreupdate=False, missing=False):
         if self._gitmissing():
-            return self._state[1] != ''
+            return self._state[1] != b''
         if self._gitisbare():
             return True
         if not ignoreupdate and self._state[1] != self._gitstate():
@@ -1416,7 +1587,7 @@
             return True
         # check for staged changes or modified files; ignore untracked files
         self._gitupdatestat()
-        out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
+        out, code = self._gitdir([b'diff-index', b'--quiet', b'HEAD'])
         return code == 1
 
     def basestate(self):
@@ -1431,38 +1602,41 @@
         self._fetch(source, revision)
         # if the repo was set to be bare, unbare it
         if self._gitisbare():
-            self._gitcommand(['config', 'core.bare', 'false'])
+            self._gitcommand([b'config', b'core.bare', b'false'])
             if self._gitstate() == revision:
-                self._gitcommand(['reset', '--hard', 'HEAD'])
+                self._gitcommand([b'reset', b'--hard', b'HEAD'])
                 return
         elif self._gitstate() == revision:
             if overwrite:
                 # first reset the index to unmark new files for commit, because
                 # reset --hard will otherwise throw away files added for commit,
                 # not just unmark them.
-                self._gitcommand(['reset', 'HEAD'])
-                self._gitcommand(['reset', '--hard', 'HEAD'])
+                self._gitcommand([b'reset', b'HEAD'])
+                self._gitcommand([b'reset', b'--hard', b'HEAD'])
             return
         branch2rev, rev2branch = self._gitbranchmap()
 
         def checkout(args):
-            cmd = ['checkout']
+            cmd = [b'checkout']
             if overwrite:
                 # first reset the index to unmark new files for commit, because
                 # the -f option will otherwise throw away files added for
                 # commit, not just unmark them.
-                self._gitcommand(['reset', 'HEAD'])
-                cmd.append('-f')
+                self._gitcommand([b'reset', b'HEAD'])
+                cmd.append(b'-f')
             self._gitcommand(cmd + args)
-            _sanitize(self.ui, self.wvfs, '.git')
+            _sanitize(self.ui, self.wvfs, b'.git')
 
         def rawcheckout():
             # no branch to checkout, check it out with no branch
-            self.ui.warn(_('checking out detached HEAD in '
-                           'subrepository "%s"\n') % self._relpath)
-            self.ui.warn(_('check out a git branch if you intend '
-                            'to make changes\n'))
-            checkout(['-q', revision])
+            self.ui.warn(
+                _(b'checking out detached HEAD in subrepository "%s"\n')
+                % self._relpath
+            )
+            self.ui.warn(
+                _(b'check out a git branch if you intend to make changes\n')
+            )
+            checkout([b'-q', revision])
 
         if revision not in rev2branch:
             rawcheckout()
@@ -1470,11 +1644,11 @@
         branches = rev2branch[revision]
         firstlocalbranch = None
         for b in branches:
-            if b == 'refs/heads/master':
+            if b == b'refs/heads/master':
                 # master trumps all other branches
-                checkout(['refs/heads/master'])
+                checkout([b'refs/heads/master'])
                 return
-            if not firstlocalbranch and not b.startswith('refs/remotes/'):
+            if not firstlocalbranch and not b.startswith(b'refs/remotes/'):
                 firstlocalbranch = b
         if firstlocalbranch:
             checkout([firstlocalbranch])
@@ -1491,8 +1665,8 @@
 
         if remote not in tracking:
             # create a new local tracking branch
-            local = remote.split('/', 3)[3]
-            checkout(['-b', local, remote])
+            local = remote.split(b'/', 3)[3]
+            checkout([b'-b', local, remote])
         elif self._gitisancestor(branch2rev[tracking[remote]], remote):
             # When updating to a tracked remote branch,
             # if the local tracking branch is downstream of it,
@@ -1502,8 +1676,8 @@
             # detect this situation and perform this action lazily.
             if tracking[remote] != self._gitcurrentbranch():
                 checkout([tracking[remote]])
-            self._gitcommand(['merge', '--ff', remote])
-            _sanitize(self.ui, self.wvfs, '.git')
+            self._gitcommand([b'merge', b'--ff', remote])
+            _sanitize(self.ui, self.wvfs, b'.git')
         else:
             # a real merge would be required, just checkout the revision
             rawcheckout()
@@ -1511,16 +1685,17 @@
     @annotatesubrepoerror
     def commit(self, text, user, date):
         if self._gitmissing():
-            raise error.Abort(_("subrepo %s is missing") % self._relpath)
-        cmd = ['commit', '-a', '-m', text]
+            raise error.Abort(_(b"subrepo %s is missing") % self._relpath)
+        cmd = [b'commit', b'-a', b'-m', text]
         env = encoding.environ.copy()
         if user:
-            cmd += ['--author', user]
+            cmd += [b'--author', user]
         if date:
             # git's date parser silently ignores when seconds < 1e9
             # convert to ISO8601
-            env['GIT_AUTHOR_DATE'] = dateutil.datestr(date,
-                                                  '%Y-%m-%dT%H:%M:%S %1%2')
+            env[b'GIT_AUTHOR_DATE'] = dateutil.datestr(
+                date, b'%Y-%m-%dT%H:%M:%S %1%2'
+            )
         self._gitcommand(cmd, env=env)
         # make sure commit works otherwise HEAD might not exist under certain
         # circumstances
@@ -1530,64 +1705,76 @@
     def merge(self, state):
         source, revision, kind = state
         self._fetch(source, revision)
-        base = self._gitcommand(['merge-base', revision, self._state[1]])
+        base = self._gitcommand([b'merge-base', revision, self._state[1]])
         self._gitupdatestat()
-        out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
+        out, code = self._gitdir([b'diff-index', b'--quiet', b'HEAD'])
 
         def mergefunc():
             if base == revision:
-                self.get(state) # fast forward merge
+                self.get(state)  # fast forward merge
             elif base != self._state[1]:
-                self._gitcommand(['merge', '--no-commit', revision])
-            _sanitize(self.ui, self.wvfs, '.git')
+                self._gitcommand([b'merge', b'--no-commit', revision])
+            _sanitize(self.ui, self.wvfs, b'.git')
 
         if self.dirty():
             if self._gitstate() != revision:
                 dirty = self._gitstate() == self._state[1] or code != 0
-                if _updateprompt(self.ui, self, dirty,
-                                 self._state[1][:7], revision[:7]):
+                if _updateprompt(
+                    self.ui, self, dirty, self._state[1][:7], revision[:7]
+                ):
                     mergefunc()
         else:
             mergefunc()
 
     @annotatesubrepoerror
     def push(self, opts):
-        force = opts.get('force')
+        force = opts.get(b'force')
 
         if not self._state[1]:
             return True
         if self._gitmissing():
-            raise error.Abort(_("subrepo %s is missing") % self._relpath)
+            raise error.Abort(_(b"subrepo %s is missing") % self._relpath)
         # if a branch in origin contains the revision, nothing to do
         branch2rev, rev2branch = self._gitbranchmap()
         if self._state[1] in rev2branch:
             for b in rev2branch[self._state[1]]:
-                if b.startswith('refs/remotes/origin/'):
+                if b.startswith(b'refs/remotes/origin/'):
                     return True
-        for b, revision in branch2rev.iteritems():
-            if b.startswith('refs/remotes/origin/'):
+        for b, revision in pycompat.iteritems(branch2rev):
+            if b.startswith(b'refs/remotes/origin/'):
                 if self._gitisancestor(self._state[1], revision):
                     return True
         # otherwise, try to push the currently checked out branch
-        cmd = ['push']
+        cmd = [b'push']
         if force:
-            cmd.append('--force')
+            cmd.append(b'--force')
 
         current = self._gitcurrentbranch()
         if current:
             # determine if the current branch is even useful
             if not self._gitisancestor(self._state[1], current):
-                self.ui.warn(_('unrelated git branch checked out '
-                               'in subrepository "%s"\n') % self._relpath)
+                self.ui.warn(
+                    _(
+                        b'unrelated git branch checked out '
+                        b'in subrepository "%s"\n'
+                    )
+                    % self._relpath
+                )
                 return False
-            self.ui.status(_('pushing branch %s of subrepository "%s"\n') %
-                           (current.split('/', 2)[2], self._relpath))
-            ret = self._gitdir(cmd + ['origin', current])
+            self.ui.status(
+                _(b'pushing branch %s of subrepository "%s"\n')
+                % (current.split(b'/', 2)[2], self._relpath)
+            )
+            ret = self._gitdir(cmd + [b'origin', current])
             return ret[1] == 0
         else:
-            self.ui.warn(_('no branch checked out in subrepository "%s"\n'
-                           'cannot push revision %s\n') %
-                          (self._relpath, self._state[1]))
+            self.ui.warn(
+                _(
+                    b'no branch checked out in subrepository "%s"\n'
+                    b'cannot push revision %s\n'
+                )
+                % (self._relpath, self._state[1])
+            )
             return False
 
     @annotatesubrepoerror
@@ -1611,11 +1798,11 @@
         files = [f for f in sorted(set(files)) if match(f)]
         for f in files:
             exact = match.exact(f)
-            command = ["add"]
+            command = [b"add"]
             if exact:
-                command.append("-f") #should be added, even if ignored
+                command.append(b"-f")  # should be added, even if ignored
             if ui.verbose or not exact:
-                ui.status(_('adding %s\n') % uipathfn(f))
+                ui.status(_(b'adding %s\n') % uipathfn(f))
 
             if f in tracked:  # hg prints 'adding' even if already tracked
                 if exact:
@@ -1625,7 +1812,7 @@
                 self._gitcommand(command + [f])
 
         for f in rejected:
-            ui.warn(_("%s already tracked!\n") % uipathfn(f))
+            ui.warn(_(b"%s already tracked!\n") % uipathfn(f))
 
         return rejected
 
@@ -1634,15 +1821,17 @@
         if self._gitmissing():
             return
         if self.dirty():
-            self.ui.warn(_('not removing repo %s because '
-                           'it has changes.\n') % self._relpath)
+            self.ui.warn(
+                _(b'not removing repo %s because it has changes.\n')
+                % self._relpath
+            )
             return
         # we can't fully delete the repository as it may contain
         # local-only history
-        self.ui.note(_('removing subrepo %s\n') % self._relpath)
-        self._gitcommand(['config', 'core.bare', 'true'])
+        self.ui.note(_(b'removing subrepo %s\n') % self._relpath)
+        self._gitcommand([b'config', b'core.bare', b'true'])
         for f, kind in self.wvfs.readdir():
-            if f == '.git':
+            if f == b'.git':
                 continue
             if kind == stat.S_IFDIR:
                 self.wvfs.rmtree(f)
@@ -1659,11 +1848,12 @@
         # Parse git's native archive command.
         # This should be much faster than manually traversing the trees
         # and objects with many subprocess calls.
-        tarstream = self._gitcommand(['archive', revision], stream=True)
+        tarstream = self._gitcommand([b'archive', revision], stream=True)
         tar = tarfile.open(fileobj=tarstream, mode=r'r|')
         relpath = subrelpath(self)
-        progress = self.ui.makeprogress(_('archiving (%s)') % relpath,
-                                        unit=_('files'))
+        progress = self.ui.makeprogress(
+            _(b'archiving (%s)') % relpath, unit=_(b'files')
+        )
         progress.update(0)
         for info in tar:
             if info.isdir():
@@ -1681,26 +1871,25 @@
         progress.complete()
         return total
 
-
     @annotatesubrepoerror
     def cat(self, match, fm, fntemplate, prefix, **opts):
         rev = self._state[1]
         if match.anypats():
-            return 1 #No support for include/exclude yet
+            return 1  # No support for include/exclude yet
 
         if not match.files():
             return 1
 
         # TODO: add support for non-plain formatter (see cmdutil.cat())
         for f in match.files():
-            output = self._gitcommand(["show", "%s:%s" % (rev, f)])
-            fp = cmdutil.makefileobj(self._ctx, fntemplate,
-                                     pathname=self.wvfs.reljoin(prefix, f))
+            output = self._gitcommand([b"show", b"%s:%s" % (rev, f)])
+            fp = cmdutil.makefileobj(
+                self._ctx, fntemplate, pathname=self.wvfs.reljoin(prefix, f)
+            )
             fp.write(output)
             fp.close()
         return 0
 
-
     @annotatesubrepoerror
     def status(self, rev2, **opts):
         rev1 = self._state[1]
@@ -1710,42 +1899,42 @@
         modified, added, removed = [], [], []
         self._gitupdatestat()
         if rev2:
-            command = ['diff-tree', '--no-renames', '-r', rev1, rev2]
+            command = [b'diff-tree', b'--no-renames', b'-r', rev1, rev2]
         else:
-            command = ['diff-index', '--no-renames', rev1]
+            command = [b'diff-index', b'--no-renames', rev1]
         out = self._gitcommand(command)
-        for line in out.split('\n'):
-            tab = line.find('\t')
+        for line in out.split(b'\n'):
+            tab = line.find(b'\t')
             if tab == -1:
                 continue
-            status, f = line[tab - 1:tab], line[tab + 1:]
-            if status == 'M':
+            status, f = line[tab - 1 : tab], line[tab + 1 :]
+            if status == b'M':
                 modified.append(f)
-            elif status == 'A':
+            elif status == b'A':
                 added.append(f)
-            elif status == 'D':
+            elif status == b'D':
                 removed.append(f)
 
         deleted, unknown, ignored, clean = [], [], [], []
 
-        command = ['status', '--porcelain', '-z']
+        command = [b'status', b'--porcelain', b'-z']
         if opts.get(r'unknown'):
-            command += ['--untracked-files=all']
+            command += [b'--untracked-files=all']
         if opts.get(r'ignored'):
-            command += ['--ignored']
+            command += [b'--ignored']
         out = self._gitcommand(command)
 
         changedfiles = set()
         changedfiles.update(modified)
         changedfiles.update(added)
         changedfiles.update(removed)
-        for line in out.split('\0'):
+        for line in out.split(b'\0'):
             if not line:
                 continue
             st = line[0:2]
-            #moves and copies show 2 files on one line
-            if line.find('\0') >= 0:
-                filename1, filename2 = line[3:].split('\0')
+            # moves and copies show 2 files on one line
+            if line.find(b'\0') >= 0:
+                filename1, filename2 = line[3:].split(b'\0')
             else:
                 filename1 = line[3:]
                 filename2 = None
@@ -1754,65 +1943,70 @@
             if filename2:
                 changedfiles.add(filename2)
 
-            if st == '??':
+            if st == b'??':
                 unknown.append(filename1)
-            elif st == '!!':
+            elif st == b'!!':
                 ignored.append(filename1)
 
         if opts.get(r'clean'):
-            out = self._gitcommand(['ls-files'])
-            for f in out.split('\n'):
+            out = self._gitcommand([b'ls-files'])
+            for f in out.split(b'\n'):
                 if not f in changedfiles:
                     clean.append(f)
 
-        return scmutil.status(modified, added, removed, deleted,
-                              unknown, ignored, clean)
+        return scmutil.status(
+            modified, added, removed, deleted, unknown, ignored, clean
+        )
 
     @annotatesubrepoerror
     def diff(self, ui, diffopts, node2, match, prefix, **opts):
         node1 = self._state[1]
-        cmd = ['diff', '--no-renames']
+        cmd = [b'diff', b'--no-renames']
         if opts[r'stat']:
-            cmd.append('--stat')
+            cmd.append(b'--stat')
         else:
             # for Git, this also implies '-p'
-            cmd.append('-U%d' % diffopts.context)
+            cmd.append(b'-U%d' % diffopts.context)
 
         if diffopts.noprefix:
-            cmd.extend(['--src-prefix=%s/' % prefix,
-                        '--dst-prefix=%s/' % prefix])
+            cmd.extend(
+                [b'--src-prefix=%s/' % prefix, b'--dst-prefix=%s/' % prefix]
+            )
         else:
-            cmd.extend(['--src-prefix=a/%s/' % prefix,
-                        '--dst-prefix=b/%s/' % prefix])
+            cmd.extend(
+                [b'--src-prefix=a/%s/' % prefix, b'--dst-prefix=b/%s/' % prefix]
+            )
 
         if diffopts.ignorews:
-            cmd.append('--ignore-all-space')
+            cmd.append(b'--ignore-all-space')
         if diffopts.ignorewsamount:
-            cmd.append('--ignore-space-change')
-        if (self._gitversion(self._gitcommand(['--version'])) >= (1, 8, 4)
-            and diffopts.ignoreblanklines):
-            cmd.append('--ignore-blank-lines')
+            cmd.append(b'--ignore-space-change')
+        if (
+            self._gitversion(self._gitcommand([b'--version'])) >= (1, 8, 4)
+            and diffopts.ignoreblanklines
+        ):
+            cmd.append(b'--ignore-blank-lines')
 
         cmd.append(node1)
         if node2:
             cmd.append(node2)
 
-        output = ""
+        output = b""
         if match.always():
-            output += self._gitcommand(cmd) + '\n'
+            output += self._gitcommand(cmd) + b'\n'
         else:
             st = self.status(node2)[:3]
             files = [f for sublist in st for f in sublist]
             for f in files:
                 if match(f):
-                    output += self._gitcommand(cmd + ['--', f]) + '\n'
+                    output += self._gitcommand(cmd + [b'--', f]) + b'\n'
 
         if output.strip():
             ui.write(output)
 
     @annotatesubrepoerror
     def revert(self, substate, *pats, **opts):
-        self.ui.status(_('reverting subrepo %s\n') % substate[0])
+        self.ui.status(_(b'reverting subrepo %s\n') % substate[0])
         if not opts.get(r'no_backup'):
             status = self.status(None)
             names = status.modified
@@ -1820,10 +2014,13 @@
                 # backuppath() expects a path relative to the parent repo (the
                 # repo that ui.origbackuppath is relative to)
                 parentname = os.path.join(self._path, name)
-                bakname = scmutil.backuppath(self.ui, self._subparent,
-                                             parentname)
-                self.ui.note(_('saving current version of %s as %s\n') %
-                        (name, os.path.relpath(bakname)))
+                bakname = scmutil.backuppath(
+                    self.ui, self._subparent, parentname
+                )
+                self.ui.note(
+                    _(b'saving current version of %s as %s\n')
+                    % (name, os.path.relpath(bakname))
+                )
                 util.rename(self.wvfs.join(name), bakname)
 
         if not opts.get(r'dry_run'):
@@ -1833,8 +2030,9 @@
     def shortid(self, revid):
         return revid[:7]
 
+
 types = {
-    'hg': hgsubrepo,
-    'svn': svnsubrepo,
-    'git': gitsubrepo,
-    }
+    b'hg': hgsubrepo,
+    b'svn': svnsubrepo,
+    b'git': gitsubrepo,
+}
--- a/mercurial/subrepoutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/subrepoutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,19 +13,20 @@
 import re
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     config,
     error,
     filemerge,
     pathutil,
     phases,
+    pycompat,
     util,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
 
-nullstate = ('', '', 'empty')
+nullstate = (b'', b'', b'empty')
+
 
 def state(ctx, ui):
     """return a state dict, mapping subrepo paths configured in .hgsub
@@ -34,6 +35,7 @@
     """
     p = config.config()
     repo = ctx.repo()
+
     def read(f, sections=None, remap=None):
         if f in ctx:
             try:
@@ -42,39 +44,47 @@
                 if err.errno != errno.ENOENT:
                     raise
                 # handle missing subrepo spec files as removed
-                ui.warn(_("warning: subrepo spec file \'%s\' not found\n") %
-                        repo.pathto(f))
+                ui.warn(
+                    _(b"warning: subrepo spec file \'%s\' not found\n")
+                    % repo.pathto(f)
+                )
                 return
             p.parse(f, data, sections, remap, read)
         else:
-            raise error.Abort(_("subrepo spec file \'%s\' not found") %
-                             repo.pathto(f))
-    if '.hgsub' in ctx:
-        read('.hgsub')
+            raise error.Abort(
+                _(b"subrepo spec file \'%s\' not found") % repo.pathto(f)
+            )
 
-    for path, src in ui.configitems('subpaths'):
-        p.set('subpaths', path, src, ui.configsource('subpaths', path))
+    if b'.hgsub' in ctx:
+        read(b'.hgsub')
+
+    for path, src in ui.configitems(b'subpaths'):
+        p.set(b'subpaths', path, src, ui.configsource(b'subpaths', path))
 
     rev = {}
-    if '.hgsubstate' in ctx:
+    if b'.hgsubstate' in ctx:
         try:
-            for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
+            for i, l in enumerate(ctx[b'.hgsubstate'].data().splitlines()):
                 l = l.lstrip()
                 if not l:
                     continue
                 try:
-                    revision, path = l.split(" ", 1)
+                    revision, path = l.split(b" ", 1)
                 except ValueError:
-                    raise error.Abort(_("invalid subrepository revision "
-                                       "specifier in \'%s\' line %d")
-                                     % (repo.pathto('.hgsubstate'), (i + 1)))
+                    raise error.Abort(
+                        _(
+                            b"invalid subrepository revision "
+                            b"specifier in \'%s\' line %d"
+                        )
+                        % (repo.pathto(b'.hgsubstate'), (i + 1))
+                    )
                 rev[path] = revision
         except IOError as err:
             if err.errno != errno.ENOENT:
                 raise
 
     def remap(src):
-        for pattern, repl in p.items('subpaths'):
+        for pattern, repl in p.items(b'subpaths'):
             # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
             # does a string decode.
             repl = stringutil.escapestr(repl)
@@ -85,26 +95,30 @@
             try:
                 src = re.sub(pattern, repl, src, 1)
             except re.error as e:
-                raise error.Abort(_("bad subrepository pattern in %s: %s")
-                                 % (p.source('subpaths', pattern),
-                                    stringutil.forcebytestr(e)))
+                raise error.Abort(
+                    _(b"bad subrepository pattern in %s: %s")
+                    % (
+                        p.source(b'subpaths', pattern),
+                        stringutil.forcebytestr(e),
+                    )
+                )
         return src
 
     state = {}
-    for path, src in p[''].items():
-        kind = 'hg'
-        if src.startswith('['):
-            if ']' not in src:
-                raise error.Abort(_('missing ] in subrepository source'))
-            kind, src = src.split(']', 1)
+    for path, src in p[b''].items():
+        kind = b'hg'
+        if src.startswith(b'['):
+            if b']' not in src:
+                raise error.Abort(_(b'missing ] in subrepository source'))
+            kind, src = src.split(b']', 1)
             kind = kind[1:]
-            src = src.lstrip() # strip any extra whitespace after ']'
+            src = src.lstrip()  # strip any extra whitespace after ']'
 
         if not util.url(src).isabs():
             parent = _abssource(repo, abort=False)
             if parent:
                 parent = util.url(parent)
-                parent.path = posixpath.join(parent.path or '', src)
+                parent.path = posixpath.join(parent.path or b'', src)
                 parent.path = posixpath.normpath(parent.path)
                 joined = bytes(parent)
                 # Remap the full joined path and use it if it changes,
@@ -116,125 +130,152 @@
                     src = remapped
 
         src = remap(src)
-        state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
+        state[util.pconvert(path)] = (src.strip(), rev.get(path, b''), kind)
 
     return state
 
+
 def writestate(repo, state):
     """rewrite .hgsubstate in (outer) repo with these subrepo states"""
-    lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)
-                                                if state[s][1] != nullstate[1]]
-    repo.wwrite('.hgsubstate', ''.join(lines), '')
+    lines = [
+        b'%s %s\n' % (state[s][1], s)
+        for s in sorted(state)
+        if state[s][1] != nullstate[1]
+    ]
+    repo.wwrite(b'.hgsubstate', b''.join(lines), b'')
+
 
 def submerge(repo, wctx, mctx, actx, overwrite, labels=None):
     """delegated from merge.applyupdates: merging of .hgsubstate file
     in working context, merging context and ancestor context"""
-    if mctx == actx: # backwards?
+    if mctx == actx:  # backwards?
         actx = wctx.p1()
     s1 = wctx.substate
     s2 = mctx.substate
     sa = actx.substate
     sm = {}
 
-    repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
+    repo.ui.debug(b"subrepo merge %s %s %s\n" % (wctx, mctx, actx))
 
-    def debug(s, msg, r=""):
+    def debug(s, msg, r=b""):
         if r:
-            r = "%s:%s:%s" % r
-        repo.ui.debug("  subrepo %s: %s %s\n" % (s, msg, r))
+            r = b"%s:%s:%s" % r
+        repo.ui.debug(b"  subrepo %s: %s %s\n" % (s, msg, r))
 
     promptssrc = filemerge.partextras(labels)
-    for s, l in sorted(s1.iteritems()):
+    for s, l in sorted(pycompat.iteritems(s1)):
         a = sa.get(s, nullstate)
-        ld = l # local state with possible dirty flag for compares
+        ld = l  # local state with possible dirty flag for compares
         if wctx.sub(s).dirty():
-            ld = (l[0], l[1] + "+")
-        if wctx == actx: # overwrite
+            ld = (l[0], l[1] + b"+")
+        if wctx == actx:  # overwrite
             a = ld
 
         prompts = promptssrc.copy()
-        prompts['s'] = s
+        prompts[b's'] = s
         if s in s2:
             r = s2[s]
-            if ld == r or r == a: # no change or local is newer
+            if ld == r or r == a:  # no change or local is newer
                 sm[s] = l
                 continue
-            elif ld == a: # other side changed
-                debug(s, "other changed, get", r)
+            elif ld == a:  # other side changed
+                debug(s, b"other changed, get", r)
                 wctx.sub(s).get(r, overwrite)
                 sm[s] = r
-            elif ld[0] != r[0]: # sources differ
-                prompts['lo'] = l[0]
-                prompts['ro'] = r[0]
+            elif ld[0] != r[0]:  # sources differ
+                prompts[b'lo'] = l[0]
+                prompts[b'ro'] = r[0]
                 if repo.ui.promptchoice(
-                    _(' subrepository sources for %(s)s differ\n'
-                      'you can use (l)ocal%(l)s source (%(lo)s)'
-                      ' or (r)emote%(o)s source (%(ro)s).\n'
-                      'what do you want to do?'
-                      '$$ &Local $$ &Remote') % prompts, 0):
-                    debug(s, "prompt changed, get", r)
+                    _(
+                        b' subrepository sources for %(s)s differ\n'
+                        b'you can use (l)ocal%(l)s source (%(lo)s)'
+                        b' or (r)emote%(o)s source (%(ro)s).\n'
+                        b'what do you want to do?'
+                        b'$$ &Local $$ &Remote'
+                    )
+                    % prompts,
+                    0,
+                ):
+                    debug(s, b"prompt changed, get", r)
                     wctx.sub(s).get(r, overwrite)
                     sm[s] = r
-            elif ld[1] == a[1]: # local side is unchanged
-                debug(s, "other side changed, get", r)
+            elif ld[1] == a[1]:  # local side is unchanged
+                debug(s, b"other side changed, get", r)
                 wctx.sub(s).get(r, overwrite)
                 sm[s] = r
             else:
-                debug(s, "both sides changed")
+                debug(s, b"both sides changed")
                 srepo = wctx.sub(s)
-                prompts['sl'] = srepo.shortid(l[1])
-                prompts['sr'] = srepo.shortid(r[1])
+                prompts[b'sl'] = srepo.shortid(l[1])
+                prompts[b'sr'] = srepo.shortid(r[1])
                 option = repo.ui.promptchoice(
-                    _(' subrepository %(s)s diverged (local revision: %(sl)s, '
-                      'remote revision: %(sr)s)\n'
-                      'you can (m)erge, keep (l)ocal%(l)s or keep '
-                      '(r)emote%(o)s.\n'
-                      'what do you want to do?'
-                      '$$ &Merge $$ &Local $$ &Remote')
-                    % prompts, 0)
+                    _(
+                        b' subrepository %(s)s diverged (local revision: %(sl)s, '
+                        b'remote revision: %(sr)s)\n'
+                        b'you can (m)erge, keep (l)ocal%(l)s or keep '
+                        b'(r)emote%(o)s.\n'
+                        b'what do you want to do?'
+                        b'$$ &Merge $$ &Local $$ &Remote'
+                    )
+                    % prompts,
+                    0,
+                )
                 if option == 0:
                     wctx.sub(s).merge(r)
                     sm[s] = l
-                    debug(s, "merge with", r)
+                    debug(s, b"merge with", r)
                 elif option == 1:
                     sm[s] = l
-                    debug(s, "keep local subrepo revision", l)
+                    debug(s, b"keep local subrepo revision", l)
                 else:
                     wctx.sub(s).get(r, overwrite)
                     sm[s] = r
-                    debug(s, "get remote subrepo revision", r)
-        elif ld == a: # remote removed, local unchanged
-            debug(s, "remote removed, remove")
+                    debug(s, b"get remote subrepo revision", r)
+        elif ld == a:  # remote removed, local unchanged
+            debug(s, b"remote removed, remove")
             wctx.sub(s).remove()
-        elif a == nullstate: # not present in remote or ancestor
-            debug(s, "local added, keep")
+        elif a == nullstate:  # not present in remote or ancestor
+            debug(s, b"local added, keep")
             sm[s] = l
             continue
         else:
             if repo.ui.promptchoice(
-                _(' local%(l)s changed subrepository %(s)s'
-                  ' which remote%(o)s removed\n'
-                  'use (c)hanged version or (d)elete?'
-                  '$$ &Changed $$ &Delete') % prompts, 0):
-                debug(s, "prompt remove")
+                _(
+                    b' local%(l)s changed subrepository %(s)s'
+                    b' which remote%(o)s removed\n'
+                    b'use (c)hanged version or (d)elete?'
+                    b'$$ &Changed $$ &Delete'
+                )
+                % prompts,
+                0,
+            ):
+                debug(s, b"prompt remove")
                 wctx.sub(s).remove()
 
     for s, r in sorted(s2.items()):
         if s in s1:
             continue
         elif s not in sa:
-            debug(s, "remote added, get", r)
+            debug(s, b"remote added, get", r)
             mctx.sub(s).get(r)
             sm[s] = r
         elif r != sa[s]:
             prompts = promptssrc.copy()
-            prompts['s'] = s
-            if repo.ui.promptchoice(
-                _(' remote%(o)s changed subrepository %(s)s'
-                  ' which local%(l)s removed\n'
-                  'use (c)hanged version or (d)elete?'
-                  '$$ &Changed $$ &Delete') % prompts, 0) == 0:
-                debug(s, "prompt recreate", r)
+            prompts[b's'] = s
+            if (
+                repo.ui.promptchoice(
+                    _(
+                        b' remote%(o)s changed subrepository %(s)s'
+                        b' which local%(l)s removed\n'
+                        b'use (c)hanged version or (d)elete?'
+                        b'$$ &Changed $$ &Delete'
+                    )
+                    % prompts,
+                    0,
+                )
+                == 0
+            ):
+                debug(s, b"prompt recreate", r)
                 mctx.sub(s).get(r)
                 sm[s] = r
 
@@ -242,6 +283,7 @@
     writestate(repo, sm)
     return sm
 
+
 def precommit(ui, wctx, status, match, force=False):
     """Calculate .hgsubstate changes that should be applied before committing
 
@@ -257,11 +299,11 @@
     newstate = wctx.substate.copy()
 
     # only manage subrepos and .hgsubstate if .hgsub is present
-    if '.hgsub' in wctx:
+    if b'.hgsub' in wctx:
         # we'll decide whether to track this ourselves, thanks
         for c in status.modified, status.added, status.removed:
-            if '.hgsubstate' in c:
-                c.remove('.hgsubstate')
+            if b'.hgsubstate' in c:
+                c.remove(b'.hgsubstate')
 
         # compare current state to last committed state
         # build new substate based on last committed state
@@ -274,12 +316,15 @@
                     continue
                 if not force:
                     raise error.Abort(
-                        _("commit with new subrepo %s excluded") % s)
+                        _(b"commit with new subrepo %s excluded") % s
+                    )
             dirtyreason = wctx.sub(s).dirtyreason(True)
             if dirtyreason:
-                if not ui.configbool('ui', 'commitsubrepos'):
-                    raise error.Abort(dirtyreason,
-                        hint=_("use --subrepos for recursive commit"))
+                if not ui.configbool(b'ui', b'commitsubrepos'):
+                    raise error.Abort(
+                        dirtyreason,
+                        hint=_(b"use --subrepos for recursive commit"),
+                    )
                 subs.append(s)
                 commitsubs.add(s)
             else:
@@ -293,35 +338,39 @@
             r = [s for s in p.substate if s not in newstate]
             subs += [s for s in r if match(s)]
         if subs:
-            if (not match('.hgsub') and
-                '.hgsub' in (wctx.modified() + wctx.added())):
-                raise error.Abort(_("can't commit subrepos without .hgsub"))
-            status.modified.insert(0, '.hgsubstate')
+            if not match(b'.hgsub') and b'.hgsub' in (
+                wctx.modified() + wctx.added()
+            ):
+                raise error.Abort(_(b"can't commit subrepos without .hgsub"))
+            status.modified.insert(0, b'.hgsubstate')
 
-    elif '.hgsub' in status.removed:
+    elif b'.hgsub' in status.removed:
         # clean up .hgsubstate when .hgsub is removed
-        if ('.hgsubstate' in wctx and
-            '.hgsubstate' not in (status.modified + status.added +
-                                  status.removed)):
-            status.removed.insert(0, '.hgsubstate')
+        if b'.hgsubstate' in wctx and b'.hgsubstate' not in (
+            status.modified + status.added + status.removed
+        ):
+            status.removed.insert(0, b'.hgsubstate')
 
     return subs, commitsubs, newstate
 
+
 def reporelpath(repo):
     """return path to this (sub)repo as seen from outermost repo"""
     parent = repo
-    while util.safehasattr(parent, '_subparent'):
+    while util.safehasattr(parent, b'_subparent'):
         parent = parent._subparent
-    return repo.root[len(pathutil.normasprefix(parent.root)):]
+    return repo.root[len(pathutil.normasprefix(parent.root)) :]
+
 
 def subrelpath(sub):
     """return path to this subrepo as seen from outermost repo"""
     return sub._relpath
 
+
 def _abssource(repo, push=False, abort=True):
     """return pull/push path of repo - either based on parent repo .hgsub info
     or on the top repo config. Abort or return None if no source found."""
-    if util.safehasattr(repo, '_subparent'):
+    if util.safehasattr(repo, b'_subparent'):
         source = util.url(repo._subsource)
         if source.isabs():
             return bytes(source)
@@ -329,17 +378,17 @@
         parent = _abssource(repo._subparent, push, abort=False)
         if parent:
             parent = util.url(util.pconvert(parent))
-            parent.path = posixpath.join(parent.path or '', source.path)
+            parent.path = posixpath.join(parent.path or b'', source.path)
             parent.path = posixpath.normpath(parent.path)
             return bytes(parent)
-    else: # recursion reached top repo
+    else:  # recursion reached top repo
         path = None
-        if util.safehasattr(repo, '_subtoppath'):
+        if util.safehasattr(repo, b'_subtoppath'):
             path = repo._subtoppath
-        elif push and repo.ui.config('paths', 'default-push'):
-            path = repo.ui.config('paths', 'default-push')
-        elif repo.ui.config('paths', 'default'):
-            path = repo.ui.config('paths', 'default')
+        elif push and repo.ui.config(b'paths', b'default-push'):
+            path = repo.ui.config(b'paths', b'default-push')
+        elif repo.ui.config(b'paths', b'default'):
+            path = repo.ui.config(b'paths', b'default')
         elif repo.shared():
             # chop off the .hg component to get the default path form.  This has
             # already run through vfsmod.vfs(..., realpath=True), so it doesn't
@@ -363,18 +412,20 @@
             return path
 
     if abort:
-        raise error.Abort(_("default path for subrepository not found"))
+        raise error.Abort(_(b"default path for subrepository not found"))
+
 
 def newcommitphase(ui, ctx):
     commitphase = phases.newcommitphase(ui)
     substate = getattr(ctx, "substate", None)
     if not substate:
         return commitphase
-    check = ui.config('phases', 'checksubrepos')
-    if check not in ('ignore', 'follow', 'abort'):
-        raise error.Abort(_('invalid phases.checksubrepos configuration: %s')
-                         % (check))
-    if check == 'ignore':
+    check = ui.config(b'phases', b'checksubrepos')
+    if check not in (b'ignore', b'follow', b'abort'):
+        raise error.Abort(
+            _(b'invalid phases.checksubrepos configuration: %s') % check
+        )
+    if check == b'ignore':
         return commitphase
     maxphase = phases.public
     maxsub = None
@@ -385,13 +436,24 @@
             maxphase = subphase
             maxsub = s
     if commitphase < maxphase:
-        if check == 'abort':
-            raise error.Abort(_("can't commit in %s phase"
-                               " conflicting %s from subrepository %s") %
-                             (phases.phasenames[commitphase],
-                              phases.phasenames[maxphase], maxsub))
-        ui.warn(_("warning: changes are committed in"
-                  " %s phase from subrepository %s\n") %
-                (phases.phasenames[maxphase], maxsub))
+        if check == b'abort':
+            raise error.Abort(
+                _(
+                    b"can't commit in %s phase"
+                    b" conflicting %s from subrepository %s"
+                )
+                % (
+                    phases.phasenames[commitphase],
+                    phases.phasenames[maxphase],
+                    maxsub,
+                )
+            )
+        ui.warn(
+            _(
+                b"warning: changes are committed in"
+                b" %s phase from subrepository %s\n"
+            )
+            % (phases.phasenames[maxphase], maxsub)
+        )
         return maxphase
     return commitphase
--- a/mercurial/tagmerge.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/tagmerge.py	Mon Oct 21 11:09:48 2019 -0400
@@ -78,28 +78,31 @@
     hex,
     nullid,
 )
-from .import (
+from . import (
     tags as tagsmod,
     util,
 )
 
 hexnullid = hex(nullid)
 
-def readtagsformerge(ui, repo, lines, fn='', keeplinenums=False):
+
+def readtagsformerge(ui, repo, lines, fn=b'', keeplinenums=False):
     '''read the .hgtags file into a structure that is suitable for merging
 
     Depending on the keeplinenums flag, clear the line numbers associated
     with each tag. This is done because only the line numbers of the first
     parent are useful for merging.
     '''
-    filetags = tagsmod._readtaghist(ui, repo, lines, fn=fn, recode=None,
-                                    calcnodelines=True)[1]
+    filetags = tagsmod._readtaghist(
+        ui, repo, lines, fn=fn, recode=None, calcnodelines=True
+    )[1]
     for tagname, taginfo in filetags.items():
         if not keeplinenums:
             for el in taginfo:
                 el[1] = None
     return filetags
 
+
 def grouptagnodesbyline(tagnodes):
     '''
     Group nearby nodes (i.e. those that must be written next to each other)
@@ -134,6 +137,7 @@
             prevlinenum = linenum
     return groupednodes
 
+
 def writemergedtags(fcd, mergedtags):
     '''
     write the merged tags while trying to minimize the diff to the first parent
@@ -150,7 +154,7 @@
     # convert the grouped merged tags dict into a format that resembles the
     # final .hgtags file (i.e. a list of blocks of 'node tag' pairs)
     def taglist2string(tlist, tname):
-        return '\n'.join(['%s %s' % (hexnode, tname) for hexnode in tlist])
+        return b'\n'.join([b'%s %s' % (hexnode, tname) for hexnode in tlist])
 
     finaltags = []
     for tname, tags in mergedtags.items():
@@ -166,8 +170,9 @@
 
     # finally we can join the sorted groups to get the final contents of the
     # merged .hgtags file, and then write it to disk
-    mergedtagstring = '\n'.join([tags for rank, tags in finaltags if tags])
-    fcd.write(mergedtagstring + '\n', fcd.flags())
+    mergedtagstring = b'\n'.join([tags for rank, tags in finaltags if tags])
+    fcd.write(mergedtagstring + b'\n', fcd.flags())
+
 
 def singletagmerge(p1nodes, p2nodes):
     '''
@@ -214,6 +219,7 @@
     # whole list of lr nodes
     return lrnodes + hrnodes[commonidx:]
 
+
 def merge(repo, fcd, fco, fca):
     '''
     Merge the tags of two revisions, taking into account the base tags
@@ -223,14 +229,14 @@
     # read the p1, p2 and base tags
     # only keep the line numbers for the p1 tags
     p1tags = readtagsformerge(
-        ui, repo, fcd.data().splitlines(), fn="p1 tags",
-        keeplinenums=True)
+        ui, repo, fcd.data().splitlines(), fn=b"p1 tags", keeplinenums=True
+    )
     p2tags = readtagsformerge(
-        ui, repo, fco.data().splitlines(), fn="p2 tags",
-        keeplinenums=False)
+        ui, repo, fco.data().splitlines(), fn=b"p2 tags", keeplinenums=False
+    )
     basetags = readtagsformerge(
-        ui, repo, fca.data().splitlines(), fn="base tags",
-        keeplinenums=False)
+        ui, repo, fca.data().splitlines(), fn=b"base tags", keeplinenums=False
+    )
 
     # recover the list of "lost tags" (i.e. those that were found on the base
     # revision but not on one of the revisions being merged)
@@ -259,11 +265,15 @@
 
     if conflictedtags:
         numconflicts = len(conflictedtags)
-        ui.warn(_('automatic .hgtags merge failed\n'
-            'the following %d tags are in conflict: %s\n')
-            % (numconflicts, ', '.join(sorted(conflictedtags))))
+        ui.warn(
+            _(
+                b'automatic .hgtags merge failed\n'
+                b'the following %d tags are in conflict: %s\n'
+            )
+            % (numconflicts, b', '.join(sorted(conflictedtags)))
+        )
         return True, 1
 
     writemergedtags(fcd, mergedtags)
-    ui.note(_('.hgtags merged successfully\n'))
+    ui.note(_(b'.hgtags merged successfully\n'))
     return False, 0
--- a/mercurial/tags.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/tags.py	Mon Oct 21 11:09:48 2019 -0400
@@ -27,12 +27,11 @@
     encoding,
     error,
     match as matchmod,
+    pycompat,
     scmutil,
     util,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
 
 # Tags computation can be expensive and caches exist to make it fast in
 # the common case.
@@ -83,6 +82,7 @@
 # The most recent changeset (in terms of revlog ordering for the head
 # setting it) for each tag is last.
 
+
 def fnoderevs(ui, repo, revs):
     """return the list of '.hgtags' fnodes used in a set revisions
 
@@ -95,6 +95,7 @@
     fnodes = _filterfnodes(fnodes, nodes)
     return fnodes
 
+
 def _nulltonone(value):
     """convert nullid to None
 
@@ -104,6 +105,7 @@
         return None
     return value
 
+
 def difftags(ui, repo, oldfnodes, newfnodes):
     """list differences between tags expressed in two set of file-nodes
 
@@ -134,6 +136,7 @@
     entries.sort()
     return entries
 
+
 def writediff(fp, difflist):
     """write tags diff information to a file.
 
@@ -153,10 +156,10 @@
 
     See documentation of difftags output for details about the input.
     """
-    add = '+A %s %s\n'
-    remove = '-R %s %s\n'
-    updateold = '-M %s %s\n'
-    updatenew = '+M %s %s\n'
+    add = b'+A %s %s\n'
+    remove = b'-R %s %s\n'
+    updateold = b'-M %s %s\n'
+    updatenew = b'+M %s %s\n'
     for tag, old, new in difflist:
         # translate to hex
         if old is not None:
@@ -172,6 +175,7 @@
             fp.write(updateold % (old, tag))
             fp.write(updatenew % (new, tag))
 
+
 def findglobaltags(ui, repo):
     '''Find global tags in a repo: return a tagsmap
 
@@ -190,8 +194,9 @@
         return alltags
 
     for head in reversed(heads):  # oldest to newest
-        assert head in repo.changelog.nodemap, (
-               "tag cache returned bogus head %s" % short(head))
+        assert (
+            head in repo.changelog.nodemap
+        ), b"tag cache returned bogus head %s" % short(head)
     fnodes = _filterfnodes(tagfnode, reversed(heads))
     alltags = _tagsfromfnodes(ui, repo, fnodes)
 
@@ -200,6 +205,7 @@
         _writetagcache(ui, repo, valid, alltags)
     return alltags
 
+
 def _filterfnodes(tagfnode, nodes):
     """return a list of unique fnodes
 
@@ -215,6 +221,7 @@
             fnodes.append(fnode)
     return fnodes
 
+
 def _tagsfromfnodes(ui, repo, fnodes):
     """return a tagsmap from a list of file-node
 
@@ -225,17 +232,18 @@
     fctx = None
     for fnode in fnodes:
         if fctx is None:
-            fctx = repo.filectx('.hgtags', fileid=fnode)
+            fctx = repo.filectx(b'.hgtags', fileid=fnode)
         else:
             fctx = fctx.filectx(fnode)
         filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
         _updatetags(filetags, alltags)
     return alltags
 
+
 def readlocaltags(ui, repo, alltags, tagtypes):
     '''Read local tags in repo. Update alltags and tagtypes.'''
     try:
-        data = repo.vfs.read("localtags")
+        data = repo.vfs.read(b"localtags")
     except IOError as inst:
         if inst.errno != errno.ENOENT:
             raise
@@ -244,8 +252,8 @@
     # localtags is in the local encoding; re-encode to UTF-8 on
     # input for consistency with the rest of this module.
     filetags = _readtags(
-        ui, repo, data.splitlines(), "localtags",
-        recode=encoding.fromlocal)
+        ui, repo, data.splitlines(), b"localtags", recode=encoding.fromlocal
+    )
 
     # remove tags pointing to invalid nodes
     cl = repo.changelog
@@ -255,7 +263,8 @@
         except (LookupError, ValueError):
             del filetags[t]
 
-    _updatetags(filetags, alltags, 'local', tagtypes)
+    _updatetags(filetags, alltags, b'local', tagtypes)
+
 
 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
     '''Read tag definitions from a file (or any source of lines).
@@ -281,16 +290,16 @@
     count = 0
 
     def dbg(msg):
-        ui.debug("%s, line %d: %s\n" % (fn, count, msg))
+        ui.debug(b"%s, line %d: %s\n" % (fn, count, msg))
 
     for nline, line in enumerate(lines):
         count += 1
         if not line:
             continue
         try:
-            (nodehex, name) = line.split(" ", 1)
+            (nodehex, name) = line.split(b" ", 1)
         except ValueError:
-            dbg("cannot parse entry")
+            dbg(b"cannot parse entry")
             continue
         name = name.strip()
         if recode:
@@ -298,7 +307,7 @@
         try:
             nodebin = bin(nodehex)
         except TypeError:
-            dbg("node '%s' is not well formed" % nodehex)
+            dbg(b"node '%s' is not well formed" % nodehex)
             continue
 
         # update filetags
@@ -314,6 +323,7 @@
         bintaghist[name].append(nodebin)
     return bintaghist, hextaglines
 
+
 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
     '''Read tag definitions from a file (or any source of lines).
 
@@ -323,8 +333,9 @@
     is the list of node ids previously associated with it (in file order).
     All node ids are binary, not hex.
     '''
-    filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
-                                       calcnodelines=calcnodelines)
+    filetags, nodelines = _readtaghist(
+        ui, repo, lines, fn, recode=recode, calcnodelines=calcnodelines
+    )
     # util.sortdict().__setitem__ is much slower at replacing then inserting
     # new entries. The difference can matter if there are thousands of tags.
     # Create a new sortdict to avoid the performance penalty.
@@ -333,6 +344,7 @@
         newtags[tag] = (taghist[-1], taghist[:-1])
     return newtags
 
+
 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
     """Incorporate the tag info read from one file into dictionnaries
 
@@ -344,7 +356,7 @@
     if tagtype is None:
         assert tagtypes is None
 
-    for name, nodehist in filetags.iteritems():
+    for name, nodehist in pycompat.iteritems(filetags):
         if name not in alltags:
             alltags[name] = nodehist
             if tagtype is not None:
@@ -357,21 +369,26 @@
         # otherwise we win because we're tip-most
         anode, ahist = nodehist
         bnode, bhist = alltags[name]
-        if (bnode != anode and anode in bhist and
-            (bnode not in ahist or len(bhist) > len(ahist))):
+        if (
+            bnode != anode
+            and anode in bhist
+            and (bnode not in ahist or len(bhist) > len(ahist))
+        ):
             anode = bnode
         elif tagtype is not None:
             tagtypes[name] = tagtype
         ahist.extend([n for n in bhist if n not in ahist])
         alltags[name] = anode, ahist
 
+
 def _filename(repo):
     """name of a tagcache file for a given repo or repoview"""
-    filename = 'tags2'
+    filename = b'tags2'
     if repo.filtername:
-        filename = '%s-%s' % (filename, repo.filtername)
+        filename = b'%s-%s' % (filename, repo.filtername)
     return filename
 
+
 def _readtagcache(ui, repo):
     '''Read the tag cache.
 
@@ -391,7 +408,7 @@
     info from each returned head. (See findglobaltags().)
     '''
     try:
-        cachefile = repo.cachevfs(_filename(repo), 'r')
+        cachefile = repo.cachevfs(_filename(repo), b'r')
         # force reading the file for static-http
         cachelines = iter(cachefile)
     except IOError:
@@ -419,14 +436,16 @@
     # (Unchanged tip trivially means no changesets have been added.
     # But, thanks to localrepository.destroyed(), it also means none
     # have been destroyed by strip or rollback.)
-    if (cacherev == tiprev
-            and cachenode == tipnode
-            and cachehash == scmutil.filteredhash(repo, tiprev)):
+    if (
+        cacherev == tiprev
+        and cachenode == tipnode
+        and cachehash == scmutil.filteredhash(repo, tiprev)
+    ):
         tags = _readtags(ui, repo, cachelines, cachefile.name)
         cachefile.close()
         return (None, None, None, tags, False)
     if cachefile:
-        cachefile.close()               # ignore rest of file
+        cachefile.close()  # ignore rest of file
 
     valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
 
@@ -449,12 +468,11 @@
 
     # N.B. in case 4 (nodes destroyed), "new head" really means "newly
     # exposed".
-    if not len(repo.file('.hgtags')):
+    if not len(repo.file(b'.hgtags')):
         # No tags have ever been committed, so we can avoid a
         # potentially expensive search.
         return ([], {}, valid, None, True)
 
-
     # Now we have to lookup the .hgtags filenode for every new head.
     # This is the most expensive part of finding tags, so performance
     # depends primarily on the size of newheads.  Worst case: no cache
@@ -466,6 +484,7 @@
     # cachefnode to get to each .hgtags revision quickly.
     return (repoheads, cachefnode, valid, None, True)
 
+
 def _getfnodes(ui, repo, nodes):
     """return .hgtags fnodes for a list of changeset nodes
 
@@ -483,40 +502,52 @@
     fnodescache.write()
 
     duration = util.timer() - starttime
-    ui.log('tagscache',
-           '%d/%d cache hits/lookups in %0.4f seconds\n',
-           fnodescache.hitcount, fnodescache.lookupcount, duration)
+    ui.log(
+        b'tagscache',
+        b'%d/%d cache hits/lookups in %0.4f seconds\n',
+        fnodescache.hitcount,
+        fnodescache.lookupcount,
+        duration,
+    )
     return cachefnode
 
+
 def _writetagcache(ui, repo, valid, cachetags):
     filename = _filename(repo)
     try:
-        cachefile = repo.cachevfs(filename, 'w', atomictemp=True)
+        cachefile = repo.cachevfs(filename, b'w', atomictemp=True)
     except (OSError, IOError):
         return
 
-    ui.log('tagscache', 'writing .hg/cache/%s with %d tags\n',
-           filename, len(cachetags))
+    ui.log(
+        b'tagscache',
+        b'writing .hg/cache/%s with %d tags\n',
+        filename,
+        len(cachetags),
+    )
 
     if valid[2]:
-        cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
+        cachefile.write(
+            b'%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2]))
+        )
     else:
-        cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
+        cachefile.write(b'%d %s\n' % (valid[0], hex(valid[1])))
 
     # Tag names in the cache are in UTF-8 -- which is the whole reason
     # we keep them in UTF-8 throughout this module.  If we converted
     # them local encoding on input, we would lose info writing them to
     # the cache.
-    for (name, (node, hist)) in sorted(cachetags.iteritems()):
+    for (name, (node, hist)) in sorted(pycompat.iteritems(cachetags)):
         for n in hist:
-            cachefile.write("%s %s\n" % (hex(n), name))
-        cachefile.write("%s %s\n" % (hex(node), name))
+            cachefile.write(b"%s %s\n" % (hex(n), name))
+        cachefile.write(b"%s %s\n" % (hex(node), name))
 
     try:
         cachefile.close()
     except (OSError, IOError):
         pass
 
+
 def tag(repo, names, node, message, local, user, date, editor=False):
     '''tag a revision with one or more symbolic names.
 
@@ -539,67 +570,70 @@
     date: date tuple to use if committing'''
 
     if not local:
-        m = matchmod.exact(['.hgtags'])
+        m = matchmod.exact([b'.hgtags'])
         if any(repo.status(match=m, unknown=True, ignored=True)):
-            raise error.Abort(_('working copy of .hgtags is changed'),
-                             hint=_('please commit .hgtags manually'))
+            raise error.Abort(
+                _(b'working copy of .hgtags is changed'),
+                hint=_(b'please commit .hgtags manually'),
+            )
 
     with repo.wlock():
-        repo.tags() # instantiate the cache
-        _tag(repo, names, node, message, local, user, date,
-             editor=editor)
+        repo.tags()  # instantiate the cache
+        _tag(repo, names, node, message, local, user, date, editor=editor)
+
 
-def _tag(repo, names, node, message, local, user, date, extra=None,
-         editor=False):
+def _tag(
+    repo, names, node, message, local, user, date, extra=None, editor=False
+):
     if isinstance(names, bytes):
         names = (names,)
 
     branches = repo.branchmap()
     for name in names:
-        repo.hook('pretag', throw=True, node=hex(node), tag=name,
-                  local=local)
+        repo.hook(b'pretag', throw=True, node=hex(node), tag=name, local=local)
         if name in branches:
-            repo.ui.warn(_("warning: tag %s conflicts with existing"
-            " branch name\n") % name)
+            repo.ui.warn(
+                _(b"warning: tag %s conflicts with existing branch name\n")
+                % name
+            )
 
     def writetags(fp, names, munge, prevtags):
         fp.seek(0, io.SEEK_END)
-        if prevtags and not prevtags.endswith('\n'):
-            fp.write('\n')
+        if prevtags and not prevtags.endswith(b'\n'):
+            fp.write(b'\n')
         for name in names:
             if munge:
                 m = munge(name)
             else:
                 m = name
 
-            if (repo._tagscache.tagtypes and
-                name in repo._tagscache.tagtypes):
+            if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
                 old = repo.tags().get(name, nullid)
-                fp.write('%s %s\n' % (hex(old), m))
-            fp.write('%s %s\n' % (hex(node), m))
+                fp.write(b'%s %s\n' % (hex(old), m))
+            fp.write(b'%s %s\n' % (hex(node), m))
         fp.close()
 
-    prevtags = ''
+    prevtags = b''
     if local:
         try:
-            fp = repo.vfs('localtags', 'r+')
+            fp = repo.vfs(b'localtags', b'r+')
         except IOError:
-            fp = repo.vfs('localtags', 'a')
+            fp = repo.vfs(b'localtags', b'a')
         else:
             prevtags = fp.read()
 
         # local tags are stored in the current charset
         writetags(fp, names, None, prevtags)
         for name in names:
-            repo.hook('tag', node=hex(node), tag=name, local=local)
+            repo.hook(b'tag', node=hex(node), tag=name, local=local)
         return
 
     try:
-        fp = repo.wvfs('.hgtags', 'rb+')
+        fp = repo.wvfs(b'.hgtags', b'rb+')
     except IOError as e:
         if e.errno != errno.ENOENT:
             raise
-        fp = repo.wvfs('.hgtags', 'ab')
+        fp = repo.wvfs(b'.hgtags', b'ab')
     else:
         prevtags = fp.read()
 
@@ -610,21 +644,24 @@
 
     repo.invalidatecaches()
 
-    if '.hgtags' not in repo.dirstate:
-        repo[None].add(['.hgtags'])
+    if b'.hgtags' not in repo.dirstate:
+        repo[None].add([b'.hgtags'])
 
-    m = matchmod.exact(['.hgtags'])
-    tagnode = repo.commit(message, user, date, extra=extra, match=m,
-                          editor=editor)
+    m = matchmod.exact([b'.hgtags'])
+    tagnode = repo.commit(
+        message, user, date, extra=extra, match=m, editor=editor
+    )
 
     for name in names:
-        repo.hook('tag', node=hex(node), tag=name, local=local)
+        repo.hook(b'tag', node=hex(node), tag=name, local=local)
 
     return tagnode
 
-_fnodescachefile = 'hgtagsfnodes1'
-_fnodesrecsize = 4 + 20 # changeset fragment + filenode
-_fnodesmissingrec = '\xff' * 24
+
+_fnodescachefile = b'hgtagsfnodes1'
+_fnodesrecsize = 4 + 20  # changeset fragment + filenode
+_fnodesmissingrec = b'\xff' * 24
+
 
 class hgtagsfnodescache(object):
     """Persistent cache mapping revisions to .hgtags filenodes.
@@ -645,6 +682,7 @@
     Instances behave like lists. ``c[i]`` works where i is a rev or
     changeset node. Missing indexes are populated automatically on access.
     """
+
     def __init__(self, repo):
         assert repo.filtername is None
 
@@ -654,11 +692,10 @@
         self.lookupcount = 0
         self.hitcount = 0
 
-
         try:
             data = repo.cachevfs.read(_fnodescachefile)
         except (OSError, IOError):
-            data = ""
+            data = b""
         self._raw = bytearray(data)
 
         # The end state of self._raw is an array that is of the exact length
@@ -675,7 +712,7 @@
 
         if rawlen < wantedlen:
             self._dirtyoffset = rawlen
-            self._raw.extend('\xff' * (wantedlen - rawlen))
+            self._raw.extend(b'\xff' * (wantedlen - rawlen))
         elif rawlen > wantedlen:
             # There's no easy way to truncate array instances. This seems
             # slightly less evil than copying a potentially large array slice.
@@ -703,7 +740,7 @@
         self.lookupcount += 1
 
         offset = rev * _fnodesrecsize
-        record = '%s' % self._raw[offset:offset + _fnodesrecsize]
+        record = b'%s' % self._raw[offset : offset + _fnodesrecsize]
         properprefix = node[0:4]
 
         # Validate and return existing entry.
@@ -738,13 +775,13 @@
                 p1fnode = None
         if p1fnode is not None:
             mctx = ctx.manifestctx()
-            fnode = mctx.readfast().get('.hgtags')
+            fnode = mctx.readfast().get(b'.hgtags')
             if fnode is None:
                 fnode = p1fnode
         if fnode is None:
             # Populate missing entry.
             try:
-                fnode = ctx.filenode('.hgtags')
+                fnode = ctx.filenode(b'.hgtags')
             except error.LookupError:
                 # No .hgtags file on this revision.
                 fnode = nullid
@@ -766,7 +803,7 @@
     def _writeentry(self, offset, prefix, fnode):
         # Slices on array instances only accept other array.
         entry = bytearray(prefix + fnode)
-        self._raw[offset:offset + _fnodesrecsize] = entry
+        self._raw[offset : offset + _fnodesrecsize] = entry
         # self._dirtyoffset could be None.
         self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
 
@@ -779,7 +816,7 @@
         if self._dirtyoffset is None:
             return
 
-        data = self._raw[self._dirtyoffset:]
+        data = self._raw[self._dirtyoffset :]
         if not data:
             return
 
@@ -788,30 +825,37 @@
         try:
             lock = repo.wlock(wait=False)
         except error.LockError:
-            repo.ui.log('tagscache', 'not writing .hg/cache/%s because '
-                        'lock cannot be acquired\n' % (_fnodescachefile))
+            repo.ui.log(
+                b'tagscache',
+                b'not writing .hg/cache/%s because '
+                b'lock cannot be acquired\n' % _fnodescachefile,
+            )
             return
 
         try:
-            f = repo.cachevfs.open(_fnodescachefile, 'ab')
+            f = repo.cachevfs.open(_fnodescachefile, b'ab')
             try:
                 # if the file has been truncated
                 actualoffset = f.tell()
                 if actualoffset < self._dirtyoffset:
                     self._dirtyoffset = actualoffset
-                    data = self._raw[self._dirtyoffset:]
+                    data = self._raw[self._dirtyoffset :]
                 f.seek(self._dirtyoffset)
                 f.truncate()
-                repo.ui.log('tagscache',
-                            'writing %d bytes to cache/%s\n' % (
-                            len(data), _fnodescachefile))
+                repo.ui.log(
+                    b'tagscache',
+                    b'writing %d bytes to cache/%s\n'
+                    % (len(data), _fnodescachefile),
+                )
                 f.write(data)
                 self._dirtyoffset = None
             finally:
                 f.close()
         except (IOError, OSError) as inst:
-            repo.ui.log('tagscache',
-                        "couldn't write cache/%s: %s\n" % (
-                            _fnodescachefile, stringutil.forcebytestr(inst)))
+            repo.ui.log(
+                b'tagscache',
+                b"couldn't write cache/%s: %s\n"
+                % (_fnodescachefile, stringutil.forcebytestr(inst)),
+            )
         finally:
             lock.release()
--- a/mercurial/templatefilters.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/templatefilters.py	Mon Oct 21 11:09:48 2019 -0400
@@ -42,22 +42,27 @@
 
 templatefilter = registrar.templatefilter(filters)
 
-@templatefilter('addbreaks', intype=bytes)
+
+@templatefilter(b'addbreaks', intype=bytes)
 def addbreaks(text):
     """Any text. Add an XHTML "<br />" tag before the end of
     every line except the last.
     """
-    return text.replace('\n', '<br/>\n')
+    return text.replace(b'\n', b'<br/>\n')
+
 
-agescales = [("year", 3600 * 24 * 365, 'Y'),
-             ("month", 3600 * 24 * 30, 'M'),
-             ("week", 3600 * 24 * 7, 'W'),
-             ("day", 3600 * 24, 'd'),
-             ("hour", 3600, 'h'),
-             ("minute", 60, 'm'),
-             ("second", 1, 's')]
+agescales = [
+    (b"year", 3600 * 24 * 365, b'Y'),
+    (b"month", 3600 * 24 * 30, b'M'),
+    (b"week", 3600 * 24 * 7, b'W'),
+    (b"day", 3600 * 24, b'd'),
+    (b"hour", 3600, b'h'),
+    (b"minute", 60, b'm'),
+    (b"second", 1, b's'),
+]
 
-@templatefilter('age', intype=templateutil.date)
+
+@templatefilter(b'age', intype=templateutil.date)
 def age(date, abbrev=False):
     """Date. Returns a human-readable date/time difference between the
     given date/time and the current date/time.
@@ -66,11 +71,12 @@
     def plural(t, c):
         if c == 1:
             return t
-        return t + "s"
+        return t + b"s"
+
     def fmt(t, c, a):
         if abbrev:
-            return "%d%s" % (c, a)
-        return "%d %s" % (c, plural(t, c))
+            return b"%d%s" % (c, a)
+        return b"%d %s" % (c, plural(t, c))
 
     now = time.time()
     then = date[0]
@@ -79,7 +85,7 @@
         future = True
         delta = max(1, int(then - now))
         if delta > agescales[0][1] * 30:
-            return 'in the distant future'
+            return b'in the distant future'
     else:
         delta = max(1, int(now - then))
         if delta > agescales[0][1] * 2:
@@ -89,10 +95,11 @@
         n = delta // s
         if n >= 2 or s == 1:
             if future:
-                return '%s from now' % fmt(t, n, a)
-            return '%s ago' % fmt(t, n, a)
+                return b'%s from now' % fmt(t, n, a)
+            return b'%s ago' % fmt(t, n, a)
 
-@templatefilter('basename', intype=bytes)
+
+@templatefilter(b'basename', intype=bytes)
 def basename(path):
     """Any text. Treats the text as a path, and returns the last
     component of the path after splitting by the path separator.
@@ -100,12 +107,14 @@
     """
     return os.path.basename(path)
 
-@templatefilter('cbor')
+
+@templatefilter(b'cbor')
 def cbor(obj):
     """Any object. Serializes the object to CBOR bytes."""
     return b''.join(cborutil.streamencode(obj))
 
-@templatefilter('commondir')
+
+@templatefilter(b'commondir')
 def commondir(filelist):
     """List of text. Treats each list item as file name with /
     as path separator and returns the longest common directory
@@ -118,63 +127,69 @@
     For example, ["foo/bar/baz", "foo/baz/bar"] becomes "foo" and
     ["foo/bar", "baz"] becomes "".
     """
+
     def common(a, b):
         if len(a) > len(b):
-            a = b[:len(a)]
+            a = b[: len(a)]
         elif len(b) > len(a):
-            b = b[:len(a)]
+            b = b[: len(a)]
         if a == b:
             return a
         for i in pycompat.xrange(len(a)):
             if a[i] != b[i]:
                 return a[:i]
         return a
+
     try:
         if not filelist:
-            return ""
-        dirlist = [f.lstrip('/').split('/')[:-1] for f in filelist]
+            return b""
+        dirlist = [f.lstrip(b'/').split(b'/')[:-1] for f in filelist]
         if len(dirlist) == 1:
-            return '/'.join(dirlist[0])
+            return b'/'.join(dirlist[0])
         a = min(dirlist)
         b = max(dirlist)
         # The common prefix of a and b is shared with all
         # elements of the list since Python sorts lexicographical
         # and [1, x] after [1].
-        return '/'.join(common(a, b))
+        return b'/'.join(common(a, b))
     except TypeError:
-        raise error.ParseError(_('argument is not a list of text'))
+        raise error.ParseError(_(b'argument is not a list of text'))
 
-@templatefilter('count')
+
+@templatefilter(b'count')
 def count(i):
     """List or text. Returns the length as an integer."""
     try:
         return len(i)
     except TypeError:
-        raise error.ParseError(_('not countable'))
+        raise error.ParseError(_(b'not countable'))
 
-@templatefilter('dirname', intype=bytes)
+
+@templatefilter(b'dirname', intype=bytes)
 def dirname(path):
     """Any text. Treats the text as a path, and strips the last
     component of the path after splitting by the path separator.
     """
     return os.path.dirname(path)
 
-@templatefilter('domain', intype=bytes)
+
+@templatefilter(b'domain', intype=bytes)
 def domain(author):
     """Any text. Finds the first string that looks like an email
     address, and extracts just the domain component. Example: ``User
     <user@example.com>`` becomes ``example.com``.
     """
-    f = author.find('@')
+    f = author.find(b'@')
     if f == -1:
-        return ''
-    author = author[f + 1:]
-    f = author.find('>')
+        return b''
+    author = author[f + 1 :]
+    f = author.find(b'>')
     if f >= 0:
         author = author[:f]
     return author
 
-@templatefilter('email', intype=bytes)
+
+@templatefilter(b'email', intype=bytes)
 def email(text):
     """Any text. Extracts the first string that looks like an email
     address. Example: ``User <user@example.com>`` becomes
@@ -182,21 +197,24 @@
     """
     return stringutil.email(text)
 
-@templatefilter('escape', intype=bytes)
+
+@templatefilter(b'escape', intype=bytes)
 def escape(text):
     """Any text. Replaces the special XML/XHTML characters "&", "<"
     and ">" with XML entities, and filters out NUL characters.
     """
-    return url.escape(text.replace('\0', ''), True)
+    return url.escape(text.replace(b'\0', b''), True)
+
 
 para_re = None
 space_re = None
 
-def fill(text, width, initindent='', hangindent=''):
+
+def fill(text, width, initindent=b'', hangindent=b''):
     '''fill many paragraphs with optional indentation.'''
     global para_re, space_re
     if para_re is None:
-        para_re = re.compile('(\n\n|\n\\s*[-*]\\s*)', re.M)
+        para_re = re.compile(b'(\n\n|\n\\s*[-*]\\s*)', re.M)
         space_re = re.compile(br'  +')
 
     def findparas():
@@ -208,69 +226,88 @@
                 w = len(uctext)
                 while w > 0 and uctext[w - 1].isspace():
                     w -= 1
-                yield (encoding.unitolocal(uctext[:w]),
-                       encoding.unitolocal(uctext[w:]))
+                yield (
+                    encoding.unitolocal(uctext[:w]),
+                    encoding.unitolocal(uctext[w:]),
+                )
                 break
-            yield text[start:m.start(0)], m.group(1)
+            yield text[start : m.start(0)], m.group(1)
             start = m.end(1)
 
-    return "".join([stringutil.wrap(space_re.sub(' ',
-                                                 stringutil.wrap(para, width)),
-                                    width, initindent, hangindent) + rest
-                    for para, rest in findparas()])
+    return b"".join(
+        [
+            stringutil.wrap(
+                space_re.sub(b' ', stringutil.wrap(para, width)),
+                width,
+                initindent,
+                hangindent,
+            )
+            + rest
+            for para, rest in findparas()
+        ]
+    )
 
-@templatefilter('fill68', intype=bytes)
+
+@templatefilter(b'fill68', intype=bytes)
 def fill68(text):
     """Any text. Wraps the text to fit in 68 columns."""
     return fill(text, 68)
 
-@templatefilter('fill76', intype=bytes)
+
+@templatefilter(b'fill76', intype=bytes)
 def fill76(text):
     """Any text. Wraps the text to fit in 76 columns."""
     return fill(text, 76)
 
-@templatefilter('firstline', intype=bytes)
+
+@templatefilter(b'firstline', intype=bytes)
 def firstline(text):
     """Any text. Returns the first line of text."""
     try:
-        return text.splitlines(True)[0].rstrip('\r\n')
+        return text.splitlines(True)[0].rstrip(b'\r\n')
     except IndexError:
-        return ''
+        return b''
 
-@templatefilter('hex', intype=bytes)
+
+@templatefilter(b'hex', intype=bytes)
 def hexfilter(text):
     """Any text. Convert a binary Mercurial node identifier into
     its long hexadecimal representation.
     """
     return node.hex(text)
 
-@templatefilter('hgdate', intype=templateutil.date)
+
+@templatefilter(b'hgdate', intype=templateutil.date)
 def hgdate(text):
     """Date. Returns the date as a pair of numbers: "1157407993
     25200" (Unix timestamp, timezone offset).
     """
-    return "%d %d" % text
+    return b"%d %d" % text
 
-@templatefilter('isodate', intype=templateutil.date)
+
+@templatefilter(b'isodate', intype=templateutil.date)
 def isodate(text):
     """Date. Returns the date in ISO 8601 format: "2009-08-18 13:00
     +0200".
     """
-    return dateutil.datestr(text, '%Y-%m-%d %H:%M %1%2')
+    return dateutil.datestr(text, b'%Y-%m-%d %H:%M %1%2')
 
-@templatefilter('isodatesec', intype=templateutil.date)
+
+@templatefilter(b'isodatesec', intype=templateutil.date)
 def isodatesec(text):
     """Date. Returns the date in ISO 8601 format, including
     seconds: "2009-08-18 13:00:13 +0200". See also the rfc3339date
     filter.
     """
-    return dateutil.datestr(text, '%Y-%m-%d %H:%M:%S %1%2')
+    return dateutil.datestr(text, b'%Y-%m-%d %H:%M:%S %1%2')
+
 
 def indent(text, prefix):
     '''indent each non-empty line of text after first with prefix.'''
     lines = text.splitlines()
     num_lines = len(lines)
-    endswithnewline = text[-1:] == '\n'
+    endswithnewline = text[-1:] == b'\n'
+
     def indenter():
         for i in pycompat.xrange(num_lines):
             l = lines[i]
@@ -278,98 +315,115 @@
                 yield prefix
             yield l
             if i < num_lines - 1 or endswithnewline:
-                yield '\n'
-    return "".join(indenter())
+                yield b'\n'
 
-@templatefilter('json')
+    return b"".join(indenter())
+
+
+@templatefilter(b'json')
 def json(obj, paranoid=True):
     """Any object. Serializes the object to a JSON formatted text."""
     if obj is None:
-        return 'null'
+        return b'null'
     elif obj is False:
-        return 'false'
+        return b'false'
     elif obj is True:
-        return 'true'
+        return b'true'
     elif isinstance(obj, (int, long, float)):
         return pycompat.bytestr(obj)
     elif isinstance(obj, bytes):
-        return '"%s"' % encoding.jsonescape(obj, paranoid=paranoid)
+        return b'"%s"' % encoding.jsonescape(obj, paranoid=paranoid)
     elif isinstance(obj, type(u'')):
         raise error.ProgrammingError(
-            'Mercurial only does output with bytes: %r' % obj)
-    elif util.safehasattr(obj, 'keys'):
-        out = ['"%s": %s' % (encoding.jsonescape(k, paranoid=paranoid),
-                             json(v, paranoid))
-               for k, v in sorted(obj.iteritems())]
-        return '{' + ', '.join(out) + '}'
-    elif util.safehasattr(obj, '__iter__'):
+            b'Mercurial only does output with bytes: %r' % obj
+        )
+    elif util.safehasattr(obj, b'keys'):
+        out = [
+            b'"%s": %s'
+            % (encoding.jsonescape(k, paranoid=paranoid), json(v, paranoid))
+            for k, v in sorted(pycompat.iteritems(obj))
+        ]
+        return b'{' + b', '.join(out) + b'}'
+    elif util.safehasattr(obj, b'__iter__'):
         out = [json(i, paranoid) for i in obj]
-        return '[' + ', '.join(out) + ']'
-    raise error.ProgrammingError('cannot encode %r' % obj)
+        return b'[' + b', '.join(out) + b']'
+    raise error.ProgrammingError(b'cannot encode %r' % obj)
 
-@templatefilter('lower', intype=bytes)
+
+@templatefilter(b'lower', intype=bytes)
 def lower(text):
     """Any text. Converts the text to lowercase."""
     return encoding.lower(text)
 
-@templatefilter('nonempty', intype=bytes)
+
+@templatefilter(b'nonempty', intype=bytes)
 def nonempty(text):
     """Any text. Returns '(none)' if the string is empty."""
-    return text or "(none)"
+    return text or b"(none)"
 
-@templatefilter('obfuscate', intype=bytes)
+
+@templatefilter(b'obfuscate', intype=bytes)
 def obfuscate(text):
     """Any text. Returns the input text rendered as a sequence of
     XML entities.
     """
-    text = unicode(text, pycompat.sysstr(encoding.encoding), r'replace')
-    return ''.join(['&#%d;' % ord(c) for c in text])
+    text = pycompat.unicode(
+        text, pycompat.sysstr(encoding.encoding), r'replace'
+    )
+    return b''.join([b'&#%d;' % ord(c) for c in text])
 
-@templatefilter('permissions', intype=bytes)
+
+@templatefilter(b'permissions', intype=bytes)
 def permissions(flags):
-    if "l" in flags:
-        return "lrwxrwxrwx"
-    if "x" in flags:
-        return "-rwxr-xr-x"
-    return "-rw-r--r--"
+    if b"l" in flags:
+        return b"lrwxrwxrwx"
+    if b"x" in flags:
+        return b"-rwxr-xr-x"
+    return b"-rw-r--r--"
 
-@templatefilter('person', intype=bytes)
+
+@templatefilter(b'person', intype=bytes)
 def person(author):
     """Any text. Returns the name before an email address,
     interpreting it as per RFC 5322.
     """
     return stringutil.person(author)
 
-@templatefilter('revescape', intype=bytes)
+
+@templatefilter(b'revescape', intype=bytes)
 def revescape(text):
     """Any text. Escapes all "special" characters, except @.
     Forward slashes are escaped twice to prevent web servers from prematurely
     unescaping them. For example, "@foo bar/baz" becomes "@foo%20bar%252Fbaz".
     """
-    return urlreq.quote(text, safe='/@').replace('/', '%252F')
+    return urlreq.quote(text, safe=b'/@').replace(b'/', b'%252F')
 
-@templatefilter('rfc3339date', intype=templateutil.date)
+
+@templatefilter(b'rfc3339date', intype=templateutil.date)
 def rfc3339date(text):
     """Date. Returns a date using the Internet date format
     specified in RFC 3339: "2009-08-18T13:00:13+02:00".
     """
-    return dateutil.datestr(text, "%Y-%m-%dT%H:%M:%S%1:%2")
+    return dateutil.datestr(text, b"%Y-%m-%dT%H:%M:%S%1:%2")
 
-@templatefilter('rfc822date', intype=templateutil.date)
+
+@templatefilter(b'rfc822date', intype=templateutil.date)
 def rfc822date(text):
     """Date. Returns a date using the same format used in email
     headers: "Tue, 18 Aug 2009 13:00:13 +0200".
     """
-    return dateutil.datestr(text, "%a, %d %b %Y %H:%M:%S %1%2")
+    return dateutil.datestr(text, b"%a, %d %b %Y %H:%M:%S %1%2")
 
-@templatefilter('short', intype=bytes)
+
+@templatefilter(b'short', intype=bytes)
 def short(text):
     """Changeset hash. Returns the short form of a changeset hash,
     i.e. a 12 hexadecimal digit string.
     """
     return text[:12]
 
-@templatefilter('shortbisect', intype=bytes)
+
+@templatefilter(b'shortbisect', intype=bytes)
 def shortbisect(label):
     """Any text. Treats `label` as a bisection status, and
     returns a single-character representing the status (G: good, B: bad,
@@ -378,89 +432,104 @@
     """
     if label:
         return label[0:1].upper()
-    return ' '
+    return b' '
 
-@templatefilter('shortdate', intype=templateutil.date)
+
+@templatefilter(b'shortdate', intype=templateutil.date)
 def shortdate(text):
     """Date. Returns a date like "2006-09-18"."""
     return dateutil.shortdate(text)
 
-@templatefilter('slashpath', intype=bytes)
+
+@templatefilter(b'slashpath', intype=bytes)
 def slashpath(path):
     """Any text. Replaces the native path separator with slash."""
     return util.pconvert(path)
 
-@templatefilter('splitlines', intype=bytes)
+
+@templatefilter(b'splitlines', intype=bytes)
 def splitlines(text):
     """Any text. Split text into a list of lines."""
-    return templateutil.hybridlist(text.splitlines(), name='line')
+    return templateutil.hybridlist(text.splitlines(), name=b'line')
 
-@templatefilter('stringescape', intype=bytes)
+
+@templatefilter(b'stringescape', intype=bytes)
 def stringescape(text):
     return stringutil.escapestr(text)
 
-@templatefilter('stringify', intype=bytes)
+
+@templatefilter(b'stringify', intype=bytes)
 def stringify(thing):
     """Any type. Turns the value into text by converting values into
     text and concatenating them.
     """
     return thing  # coerced by the intype
 
-@templatefilter('stripdir', intype=bytes)
+
+@templatefilter(b'stripdir', intype=bytes)
 def stripdir(text):
     """Treat the text as path and strip a directory level, if
     possible. For example, "foo" and "foo/bar" becomes "foo".
     """
     dir = os.path.dirname(text)
-    if dir == "":
+    if dir == b"":
         return os.path.basename(text)
     else:
         return dir
 
-@templatefilter('tabindent', intype=bytes)
+
+@templatefilter(b'tabindent', intype=bytes)
 def tabindent(text):
     """Any text. Returns the text, with every non-empty line
     except the first starting with a tab character.
     """
-    return indent(text, '\t')
+    return indent(text, b'\t')
 
-@templatefilter('upper', intype=bytes)
+
+@templatefilter(b'upper', intype=bytes)
 def upper(text):
     """Any text. Converts the text to uppercase."""
     return encoding.upper(text)
 
-@templatefilter('urlescape', intype=bytes)
+
+@templatefilter(b'urlescape', intype=bytes)
 def urlescape(text):
     """Any text. Escapes all "special" characters. For example,
     "foo bar" becomes "foo%20bar".
     """
     return urlreq.quote(text)
 
-@templatefilter('user', intype=bytes)
+
+@templatefilter(b'user', intype=bytes)
 def userfilter(text):
     """Any text. Returns a short representation of a user name or email
     address."""
     return stringutil.shortuser(text)
 
-@templatefilter('emailuser', intype=bytes)
+
+@templatefilter(b'emailuser', intype=bytes)
 def emailuser(text):
     """Any text. Returns the user portion of an email address."""
     return stringutil.emailuser(text)
 
-@templatefilter('utf8', intype=bytes)
+
+@templatefilter(b'utf8', intype=bytes)
 def utf8(text):
     """Any text. Converts from the local character encoding to UTF-8."""
     return encoding.fromlocal(text)
 
-@templatefilter('xmlescape', intype=bytes)
+
+@templatefilter(b'xmlescape', intype=bytes)
 def xmlescape(text):
-    text = (text
-            .replace('&', '&amp;')
-            .replace('<', '&lt;')
-            .replace('>', '&gt;')
-            .replace('"', '&quot;')
-            .replace("'", '&#39;')) # &apos; invalid in HTML
-    return re.sub('[\x00-\x08\x0B\x0C\x0E-\x1F]', ' ', text)
+    text = (
+        text.replace(b'&', b'&amp;')
+        .replace(b'<', b'&lt;')
+        .replace(b'>', b'&gt;')
+        .replace(b'"', b'&quot;')
+        .replace(b"'", b'&#39;')
+    )  # &apos; invalid in HTML
+    return re.sub(b'[\x00-\x08\x0B\x0C\x0E-\x1F]', b' ', text)
+
 
 def websub(text, websubtable):
     """:websub: Any text. Only applies to hgweb. Applies the regular
@@ -471,11 +540,13 @@
             text = regexp.sub(format, text)
     return text
 
+
 def loadfilter(ui, extname, registrarobj):
     """Load template filter from specified registrarobj
     """
-    for name, func in registrarobj._table.iteritems():
+    for name, func in pycompat.iteritems(registrarobj._table):
         filters[name] = func
 
+
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = filters.values()
--- a/mercurial/templatefuncs.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/templatefuncs.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,6 +16,7 @@
 )
 from . import (
     color,
+    diffutil,
     encoding,
     error,
     minirst,
@@ -48,18 +49,23 @@
 funcs = {}
 templatefunc = registrar.templatefunc(funcs)
 
-@templatefunc('date(date[, fmt])')
+
+@templatefunc(b'date(date[, fmt])')
 def date(context, mapping, args):
     """Format a date. See :hg:`help dates` for formatting
     strings. The default is a Unix date format, including the timezone:
     "Mon Sep 04 15:13:13 2006 0700"."""
     if not (1 <= len(args) <= 2):
         # i18n: "date" is a keyword
-        raise error.ParseError(_("date expects one or two arguments"))
+        raise error.ParseError(_(b"date expects one or two arguments"))
 
-    date = evaldate(context, mapping, args[0],
-                    # i18n: "date" is a keyword
-                    _("date expects a date information"))
+    date = evaldate(
+        context,
+        mapping,
+        args[0],
+        # i18n: "date" is a keyword
+        _(b"date expects a date information"),
+    )
     fmt = None
     if len(args) == 2:
         fmt = evalstring(context, mapping, args[1])
@@ -68,31 +74,37 @@
     else:
         return dateutil.datestr(date, fmt)
 
-@templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
+
+@templatefunc(b'dict([[key=]value...])', argspec=b'*args **kwargs')
 def dict_(context, mapping, args):
     """Construct a dict from key-value pairs. A key may be omitted if
     a value expression can provide an unambiguous name."""
     data = util.sortdict()
 
-    for v in args['args']:
+    for v in args[b'args']:
         k = templateutil.findsymbolicname(v)
         if not k:
-            raise error.ParseError(_('dict key cannot be inferred'))
-        if k in data or k in args['kwargs']:
-            raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
+            raise error.ParseError(_(b'dict key cannot be inferred'))
+        if k in data or k in args[b'kwargs']:
+            raise error.ParseError(_(b"duplicated dict key '%s' inferred") % k)
         data[k] = evalfuncarg(context, mapping, v)
 
-    data.update((k, evalfuncarg(context, mapping, v))
-                for k, v in args['kwargs'].iteritems())
+    data.update(
+        (k, evalfuncarg(context, mapping, v))
+        for k, v in pycompat.iteritems(args[b'kwargs'])
+    )
     return templateutil.hybriddict(data)
 
-@templatefunc('diff([includepattern [, excludepattern]])', requires={'ctx'})
+
+@templatefunc(
+    b'diff([includepattern [, excludepattern]])', requires={b'ctx', b'ui'}
+)
 def diff(context, mapping, args):
     """Show a diff, optionally
     specifying files to include or exclude."""
     if len(args) > 2:
         # i18n: "diff" is a keyword
-        raise error.ParseError(_("diff expects zero, one, or two arguments"))
+        raise error.ParseError(_(b"diff expects zero, one, or two arguments"))
 
     def getpatterns(i):
         if i < len(args):
@@ -101,64 +113,79 @@
                 return [s]
         return []
 
-    ctx = context.resource(mapping, 'ctx')
-    chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
+    ctx = context.resource(mapping, b'ctx')
+    ui = context.resource(mapping, b'ui')
+    diffopts = diffutil.diffallopts(ui)
+    chunks = ctx.diff(
+        match=ctx.match([], getpatterns(0), getpatterns(1)), opts=diffopts
+    )
 
-    return ''.join(chunks)
+    return b''.join(chunks)
+
 
-@templatefunc('extdata(source)', argspec='source', requires={'ctx', 'cache'})
+@templatefunc(
+    b'extdata(source)', argspec=b'source', requires={b'ctx', b'cache'}
+)
 def extdata(context, mapping, args):
     """Show a text read from the specified extdata source. (EXPERIMENTAL)"""
-    if 'source' not in args:
+    if b'source' not in args:
         # i18n: "extdata" is a keyword
-        raise error.ParseError(_('extdata expects one argument'))
+        raise error.ParseError(_(b'extdata expects one argument'))
 
-    source = evalstring(context, mapping, args['source'])
+    source = evalstring(context, mapping, args[b'source'])
     if not source:
-        sym = templateutil.findsymbolicname(args['source'])
+        sym = templateutil.findsymbolicname(args[b'source'])
         if sym:
-            raise error.ParseError(_('empty data source specified'),
-                                   hint=_("did you mean extdata('%s')?") % sym)
+            raise error.ParseError(
+                _(b'empty data source specified'),
+                hint=_(b"did you mean extdata('%s')?") % sym,
+            )
         else:
-            raise error.ParseError(_('empty data source specified'))
-    cache = context.resource(mapping, 'cache').setdefault('extdata', {})
-    ctx = context.resource(mapping, 'ctx')
+            raise error.ParseError(_(b'empty data source specified'))
+    cache = context.resource(mapping, b'cache').setdefault(b'extdata', {})
+    ctx = context.resource(mapping, b'ctx')
     if source in cache:
         data = cache[source]
     else:
         data = cache[source] = scmutil.extdatasource(ctx.repo(), source)
-    return data.get(ctx.rev(), '')
+    return data.get(ctx.rev(), b'')
 
-@templatefunc('files(pattern)', requires={'ctx'})
+
+@templatefunc(b'files(pattern)', requires={b'ctx'})
 def files(context, mapping, args):
     """All files of the current changeset matching the pattern. See
     :hg:`help patterns`."""
     if not len(args) == 1:
         # i18n: "files" is a keyword
-        raise error.ParseError(_("files expects one argument"))
+        raise error.ParseError(_(b"files expects one argument"))
 
     raw = evalstring(context, mapping, args[0])
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     m = ctx.match([raw])
     files = list(ctx.matches(m))
-    return templateutil.compatfileslist(context, mapping, "file", files)
+    return templateutil.compatfileslist(context, mapping, b"file", files)
 
-@templatefunc('fill(text[, width[, initialident[, hangindent]]])')
+
+@templatefunc(b'fill(text[, width[, initialident[, hangindent]]])')
 def fill(context, mapping, args):
     """Fill many
     paragraphs with optional indentation. See the "fill" filter."""
     if not (1 <= len(args) <= 4):
         # i18n: "fill" is a keyword
-        raise error.ParseError(_("fill expects one to four arguments"))
+        raise error.ParseError(_(b"fill expects one to four arguments"))
 
     text = evalstring(context, mapping, args[0])
     width = 76
-    initindent = ''
-    hangindent = ''
+    initindent = b''
+    hangindent = b''
     if 2 <= len(args) <= 4:
-        width = evalinteger(context, mapping, args[1],
-                            # i18n: "fill" is a keyword
-                            _("fill expects an integer width"))
+        width = evalinteger(
+            context,
+            mapping,
+            args[1],
+            # i18n: "fill" is a keyword
+            _(b"fill expects an integer width"),
+        )
         try:
             initindent = evalstring(context, mapping, args[2])
             hangindent = evalstring(context, mapping, args[3])
@@ -167,84 +194,97 @@
 
     return templatefilters.fill(text, width, initindent, hangindent)
 
-@templatefunc('filter(iterable[, expr])')
+
+@templatefunc(b'filter(iterable[, expr])')
 def filter_(context, mapping, args):
     """Remove empty elements from a list or a dict. If expr specified, it's
     applied to each element to test emptiness."""
     if not (1 <= len(args) <= 2):
         # i18n: "filter" is a keyword
-        raise error.ParseError(_("filter expects one or two arguments"))
+        raise error.ParseError(_(b"filter expects one or two arguments"))
     iterable = evalwrapped(context, mapping, args[0])
     if len(args) == 1:
+
         def select(w):
             return w.tobool(context, mapping)
+
     else:
+
         def select(w):
             if not isinstance(w, templateutil.mappable):
-                raise error.ParseError(_("not filterable by expression"))
+                raise error.ParseError(_(b"not filterable by expression"))
             lm = context.overlaymap(mapping, w.tomap(context))
             return evalboolean(context, lm, args[1])
+
     return iterable.filter(context, mapping, select)
 
-@templatefunc('formatnode(node)', requires={'ui'})
+
+@templatefunc(b'formatnode(node)', requires={b'ui'})
 def formatnode(context, mapping, args):
     """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
     if len(args) != 1:
         # i18n: "formatnode" is a keyword
-        raise error.ParseError(_("formatnode expects one argument"))
+        raise error.ParseError(_(b"formatnode expects one argument"))
 
-    ui = context.resource(mapping, 'ui')
+    ui = context.resource(mapping, b'ui')
     node = evalstring(context, mapping, args[0])
     if ui.debugflag:
         return node
     return templatefilters.short(node)
 
-@templatefunc('mailmap(author)', requires={'repo', 'cache'})
+
+@templatefunc(b'mailmap(author)', requires={b'repo', b'cache'})
 def mailmap(context, mapping, args):
     """Return the author, updated according to the value
     set in the .mailmap file"""
     if len(args) != 1:
-        raise error.ParseError(_("mailmap expects one argument"))
+        raise error.ParseError(_(b"mailmap expects one argument"))
 
     author = evalstring(context, mapping, args[0])
 
-    cache = context.resource(mapping, 'cache')
-    repo = context.resource(mapping, 'repo')
+    cache = context.resource(mapping, b'cache')
+    repo = context.resource(mapping, b'repo')
 
-    if 'mailmap' not in cache:
-        data = repo.wvfs.tryread('.mailmap')
-        cache['mailmap'] = stringutil.parsemailmap(data)
+    if b'mailmap' not in cache:
+        data = repo.wvfs.tryread(b'.mailmap')
+        cache[b'mailmap'] = stringutil.parsemailmap(data)
 
-    return stringutil.mapname(cache['mailmap'], author)
+    return stringutil.mapname(cache[b'mailmap'], author)
+
 
 @templatefunc(
-    'pad(text, width[, fillchar=\' \'[, left=False[, truncate=False]]])',
-    argspec='text width fillchar left truncate')
+    b'pad(text, width[, fillchar=\' \'[, left=False[, truncate=False]]])',
+    argspec=b'text width fillchar left truncate',
+)
 def pad(context, mapping, args):
     """Pad text with a
     fill character."""
-    if 'text' not in args or 'width' not in args:
+    if b'text' not in args or b'width' not in args:
         # i18n: "pad" is a keyword
-        raise error.ParseError(_("pad() expects two to four arguments"))
+        raise error.ParseError(_(b"pad() expects two to four arguments"))
 
-    width = evalinteger(context, mapping, args['width'],
-                        # i18n: "pad" is a keyword
-                        _("pad() expects an integer width"))
+    width = evalinteger(
+        context,
+        mapping,
+        args[b'width'],
+        # i18n: "pad" is a keyword
+        _(b"pad() expects an integer width"),
+    )
 
-    text = evalstring(context, mapping, args['text'])
+    text = evalstring(context, mapping, args[b'text'])
 
     truncate = False
     left = False
-    fillchar = ' '
-    if 'fillchar' in args:
-        fillchar = evalstring(context, mapping, args['fillchar'])
+    fillchar = b' '
+    if b'fillchar' in args:
+        fillchar = evalstring(context, mapping, args[b'fillchar'])
         if len(color.stripeffects(fillchar)) != 1:
             # i18n: "pad" is a keyword
-            raise error.ParseError(_("pad() expects a single fill character"))
-    if 'left' in args:
-        left = evalboolean(context, mapping, args['left'])
-    if 'truncate' in args:
-        truncate = evalboolean(context, mapping, args['truncate'])
+            raise error.ParseError(_(b"pad() expects a single fill character"))
+    if b'left' in args:
+        left = evalboolean(context, mapping, args[b'left'])
+    if b'truncate' in args:
+        truncate = evalboolean(context, mapping, args[b'truncate'])
 
     fillwidth = width - encoding.colwidth(color.stripeffects(text))
     if fillwidth < 0 and truncate:
@@ -256,7 +296,8 @@
     else:
         return text + fillchar * fillwidth
 
-@templatefunc('indent(text, indentchars[, firstline])')
+
+@templatefunc(b'indent(text, indentchars[, firstline])')
 def indent(context, mapping, args):
     """Indents all non-empty lines
     with the characters given in the indentchars string. An optional
@@ -264,7 +305,7 @@
     if present."""
     if not (2 <= len(args) <= 3):
         # i18n: "indent" is a keyword
-        raise error.ParseError(_("indent() expects two or three arguments"))
+        raise error.ParseError(_(b"indent() expects two or three arguments"))
 
     text = evalstring(context, mapping, args[0])
     indent = evalstring(context, mapping, args[1])
@@ -277,14 +318,15 @@
     # the indent function doesn't indent the first line, so we do it here
     return templatefilters.indent(firstline + text, indent)
 
-@templatefunc('get(dict, key)')
+
+@templatefunc(b'get(dict, key)')
 def get(context, mapping, args):
     """Get an attribute/key from an object. Some keywords
     are complex types. This function allows you to obtain the value of an
     attribute on these types."""
     if len(args) != 2:
         # i18n: "get" is a keyword
-        raise error.ParseError(_("get() expects two arguments"))
+        raise error.ParseError(_(b"get() expects two arguments"))
 
     dictarg = evalwrapped(context, mapping, args[0])
     key = evalrawexp(context, mapping, args[1])
@@ -292,30 +334,34 @@
         return dictarg.getmember(context, mapping, key)
     except error.ParseError as err:
         # i18n: "get" is a keyword
-        hint = _("get() expects a dict as first argument")
+        hint = _(b"get() expects a dict as first argument")
         raise error.ParseError(bytes(err), hint=hint)
 
-@templatefunc('config(section, name[, default])', requires={'ui'})
+
+@templatefunc(b'config(section, name[, default])', requires={b'ui'})
 def config(context, mapping, args):
     """Returns the requested hgrc config option as a string."""
-    fn = context.resource(mapping, 'ui').config
+    fn = context.resource(mapping, b'ui').config
     return _config(context, mapping, args, fn, evalstring)
 
-@templatefunc('configbool(section, name[, default])', requires={'ui'})
+
+@templatefunc(b'configbool(section, name[, default])', requires={b'ui'})
 def configbool(context, mapping, args):
     """Returns the requested hgrc config option as a boolean."""
-    fn = context.resource(mapping, 'ui').configbool
+    fn = context.resource(mapping, b'ui').configbool
     return _config(context, mapping, args, fn, evalboolean)
 
-@templatefunc('configint(section, name[, default])', requires={'ui'})
+
+@templatefunc(b'configint(section, name[, default])', requires={b'ui'})
 def configint(context, mapping, args):
     """Returns the requested hgrc config option as an integer."""
-    fn = context.resource(mapping, 'ui').configint
+    fn = context.resource(mapping, b'ui').configint
     return _config(context, mapping, args, fn, evalinteger)
 
+
 def _config(context, mapping, args, configfn, defaultfn):
     if not (2 <= len(args) <= 3):
-        raise error.ParseError(_("config expects two or three arguments"))
+        raise error.ParseError(_(b"config expects two or three arguments"))
 
     # The config option can come from any section, though we specifically
     # reserve the [templateconfig] section for dynamically defining options
@@ -328,13 +374,14 @@
     else:
         return configfn(section, name)
 
-@templatefunc('if(expr, then[, else])')
+
+@templatefunc(b'if(expr, then[, else])')
 def if_(context, mapping, args):
     """Conditionally execute based on the result of
     an expression."""
     if not (2 <= len(args) <= 3):
         # i18n: "if" is a keyword
-        raise error.ParseError(_("if expects two or three arguments"))
+        raise error.ParseError(_(b"if expects two or three arguments"))
 
     test = evalboolean(context, mapping, args[0])
     if test:
@@ -342,13 +389,14 @@
     elif len(args) == 3:
         return evalrawexp(context, mapping, args[2])
 
-@templatefunc('ifcontains(needle, haystack, then[, else])')
+
+@templatefunc(b'ifcontains(needle, haystack, then[, else])')
 def ifcontains(context, mapping, args):
     """Conditionally execute based
     on whether the item "needle" is in "haystack"."""
     if not (3 <= len(args) <= 4):
         # i18n: "ifcontains" is a keyword
-        raise error.ParseError(_("ifcontains expects three or four arguments"))
+        raise error.ParseError(_(b"ifcontains expects three or four arguments"))
 
     haystack = evalwrapped(context, mapping, args[1])
     try:
@@ -362,13 +410,14 @@
     elif len(args) == 4:
         return evalrawexp(context, mapping, args[3])
 
-@templatefunc('ifeq(expr1, expr2, then[, else])')
+
+@templatefunc(b'ifeq(expr1, expr2, then[, else])')
 def ifeq(context, mapping, args):
     """Conditionally execute based on
     whether 2 items are equivalent."""
     if not (3 <= len(args) <= 4):
         # i18n: "ifeq" is a keyword
-        raise error.ParseError(_("ifeq expects three or four arguments"))
+        raise error.ParseError(_(b"ifeq expects three or four arguments"))
 
     test = evalstring(context, mapping, args[0])
     match = evalstring(context, mapping, args[1])
@@ -377,29 +426,31 @@
     elif len(args) == 4:
         return evalrawexp(context, mapping, args[3])
 
-@templatefunc('join(list, sep)')
+
+@templatefunc(b'join(list, sep)')
 def join(context, mapping, args):
     """Join items in a list with a delimiter."""
     if not (1 <= len(args) <= 2):
         # i18n: "join" is a keyword
-        raise error.ParseError(_("join expects one or two arguments"))
+        raise error.ParseError(_(b"join expects one or two arguments"))
 
     joinset = evalwrapped(context, mapping, args[0])
-    joiner = " "
+    joiner = b" "
     if len(args) > 1:
         joiner = evalstring(context, mapping, args[1])
     return joinset.join(context, mapping, joiner)
 
-@templatefunc('label(label, expr)', requires={'ui'})
+
+@templatefunc(b'label(label, expr)', requires={b'ui'})
 def label(context, mapping, args):
     """Apply a label to generated content. Content with
     a label applied can result in additional post-processing, such as
     automatic colorization."""
     if len(args) != 2:
         # i18n: "label" is a keyword
-        raise error.ParseError(_("label expects two arguments"))
+        raise error.ParseError(_(b"label expects two arguments"))
 
-    ui = context.resource(mapping, 'ui')
+    ui = context.resource(mapping, b'ui')
     thing = evalstring(context, mapping, args[1])
     # preserve unknown symbol as literal so effects like 'red', 'bold',
     # etc. don't need to be quoted
@@ -407,7 +458,8 @@
 
     return ui.label(thing, label)
 
-@templatefunc('latesttag([pattern])')
+
+@templatefunc(b'latesttag([pattern])')
 def latesttag(context, mapping, args):
     """The global tags matching the given pattern on the
     most recent globally tagged ancestor of this changeset.
@@ -417,24 +469,29 @@
     """
     if len(args) > 1:
         # i18n: "latesttag" is a keyword
-        raise error.ParseError(_("latesttag expects at most one argument"))
+        raise error.ParseError(_(b"latesttag expects at most one argument"))
 
     pattern = None
     if len(args) == 1:
         pattern = evalstring(context, mapping, args[0])
     return templatekw.showlatesttags(context, mapping, pattern)
 
-@templatefunc('localdate(date[, tz])')
+
+@templatefunc(b'localdate(date[, tz])')
 def localdate(context, mapping, args):
     """Converts a date to the specified timezone.
     The default is local date."""
     if not (1 <= len(args) <= 2):
         # i18n: "localdate" is a keyword
-        raise error.ParseError(_("localdate expects one or two arguments"))
+        raise error.ParseError(_(b"localdate expects one or two arguments"))
 
-    date = evaldate(context, mapping, args[0],
-                    # i18n: "localdate" is a keyword
-                    _("localdate expects a date information"))
+    date = evaldate(
+        context,
+        mapping,
+        args[0],
+        # i18n: "localdate" is a keyword
+        _(b"localdate expects a date information"),
+    )
     if len(args) >= 2:
         tzoffset = None
         tz = evalfuncarg(context, mapping, args[1])
@@ -447,111 +504,121 @@
                 tzoffset = int(tz)
             except (TypeError, ValueError):
                 # i18n: "localdate" is a keyword
-                raise error.ParseError(_("localdate expects a timezone"))
+                raise error.ParseError(_(b"localdate expects a timezone"))
     else:
         tzoffset = dateutil.makedate()[1]
     return templateutil.date((date[0], tzoffset))
 
-@templatefunc('max(iterable)')
+
+@templatefunc(b'max(iterable)')
 def max_(context, mapping, args, **kwargs):
     """Return the max of an iterable"""
     if len(args) != 1:
         # i18n: "max" is a keyword
-        raise error.ParseError(_("max expects one argument"))
+        raise error.ParseError(_(b"max expects one argument"))
 
     iterable = evalwrapped(context, mapping, args[0])
     try:
         return iterable.getmax(context, mapping)
     except error.ParseError as err:
         # i18n: "max" is a keyword
-        hint = _("max first argument should be an iterable")
+        hint = _(b"max first argument should be an iterable")
         raise error.ParseError(bytes(err), hint=hint)
 
-@templatefunc('min(iterable)')
+
+@templatefunc(b'min(iterable)')
 def min_(context, mapping, args, **kwargs):
     """Return the min of an iterable"""
     if len(args) != 1:
         # i18n: "min" is a keyword
-        raise error.ParseError(_("min expects one argument"))
+        raise error.ParseError(_(b"min expects one argument"))
 
     iterable = evalwrapped(context, mapping, args[0])
     try:
         return iterable.getmin(context, mapping)
     except error.ParseError as err:
         # i18n: "min" is a keyword
-        hint = _("min first argument should be an iterable")
+        hint = _(b"min first argument should be an iterable")
         raise error.ParseError(bytes(err), hint=hint)
 
-@templatefunc('mod(a, b)')
+
+@templatefunc(b'mod(a, b)')
 def mod(context, mapping, args):
     """Calculate a mod b such that a / b + a mod b == a"""
     if not len(args) == 2:
         # i18n: "mod" is a keyword
-        raise error.ParseError(_("mod expects two arguments"))
+        raise error.ParseError(_(b"mod expects two arguments"))
 
     func = lambda a, b: a % b
-    return templateutil.runarithmetic(context, mapping,
-                                      (func, args[0], args[1]))
+    return templateutil.runarithmetic(
+        context, mapping, (func, args[0], args[1])
+    )
 
-@templatefunc('obsfateoperations(markers)')
+
+@templatefunc(b'obsfateoperations(markers)')
 def obsfateoperations(context, mapping, args):
     """Compute obsfate related information based on markers (EXPERIMENTAL)"""
     if len(args) != 1:
         # i18n: "obsfateoperations" is a keyword
-        raise error.ParseError(_("obsfateoperations expects one argument"))
+        raise error.ParseError(_(b"obsfateoperations expects one argument"))
 
     markers = evalfuncarg(context, mapping, args[0])
 
     try:
         data = obsutil.markersoperations(markers)
-        return templateutil.hybridlist(data, name='operation')
+        return templateutil.hybridlist(data, name=b'operation')
     except (TypeError, KeyError):
         # i18n: "obsfateoperations" is a keyword
-        errmsg = _("obsfateoperations first argument should be an iterable")
+        errmsg = _(b"obsfateoperations first argument should be an iterable")
         raise error.ParseError(errmsg)
 
-@templatefunc('obsfatedate(markers)')
+
+@templatefunc(b'obsfatedate(markers)')
 def obsfatedate(context, mapping, args):
     """Compute obsfate related information based on markers (EXPERIMENTAL)"""
     if len(args) != 1:
         # i18n: "obsfatedate" is a keyword
-        raise error.ParseError(_("obsfatedate expects one argument"))
+        raise error.ParseError(_(b"obsfatedate expects one argument"))
 
     markers = evalfuncarg(context, mapping, args[0])
 
     try:
         # TODO: maybe this has to be a wrapped list of date wrappers?
         data = obsutil.markersdates(markers)
-        return templateutil.hybridlist(data, name='date', fmt='%d %d')
+        return templateutil.hybridlist(data, name=b'date', fmt=b'%d %d')
     except (TypeError, KeyError):
         # i18n: "obsfatedate" is a keyword
-        errmsg = _("obsfatedate first argument should be an iterable")
+        errmsg = _(b"obsfatedate first argument should be an iterable")
         raise error.ParseError(errmsg)
 
-@templatefunc('obsfateusers(markers)')
+
+@templatefunc(b'obsfateusers(markers)')
 def obsfateusers(context, mapping, args):
     """Compute obsfate related information based on markers (EXPERIMENTAL)"""
     if len(args) != 1:
         # i18n: "obsfateusers" is a keyword
-        raise error.ParseError(_("obsfateusers expects one argument"))
+        raise error.ParseError(_(b"obsfateusers expects one argument"))
 
     markers = evalfuncarg(context, mapping, args[0])
 
     try:
         data = obsutil.markersusers(markers)
-        return templateutil.hybridlist(data, name='user')
+        return templateutil.hybridlist(data, name=b'user')
     except (TypeError, KeyError, ValueError):
         # i18n: "obsfateusers" is a keyword
-        msg = _("obsfateusers first argument should be an iterable of "
-                "obsmakers")
+        msg = _(
+            b"obsfateusers first argument should be an iterable of "
+            b"obsmakers"
+        )
         raise error.ParseError(msg)
 
-@templatefunc('obsfateverb(successors, markers)')
+
+@templatefunc(b'obsfateverb(successors, markers)')
 def obsfateverb(context, mapping, args):
     """Compute obsfate related information based on successors (EXPERIMENTAL)"""
     if len(args) != 2:
         # i18n: "obsfateverb" is a keyword
-        raise error.ParseError(_("obsfateverb expects two arguments"))
+        raise error.ParseError(_(b"obsfateverb expects two arguments"))
 
     successors = evalfuncarg(context, mapping, args[0])
     markers = evalfuncarg(context, mapping, args[1])
@@ -560,31 +627,33 @@
         return obsutil.obsfateverb(successors, markers)
     except TypeError:
         # i18n: "obsfateverb" is a keyword
-        errmsg = _("obsfateverb first argument should be countable")
+        errmsg = _(b"obsfateverb first argument should be countable")
         raise error.ParseError(errmsg)
 
-@templatefunc('relpath(path)', requires={'repo'})
+
+@templatefunc(b'relpath(path)', requires={b'repo'})
 def relpath(context, mapping, args):
     """Convert a repository-absolute path into a filesystem path relative to
     the current working directory."""
     if len(args) != 1:
         # i18n: "relpath" is a keyword
-        raise error.ParseError(_("relpath expects one argument"))
+        raise error.ParseError(_(b"relpath expects one argument"))
 
-    repo = context.resource(mapping, 'repo')
+    repo = context.resource(mapping, b'repo')
     path = evalstring(context, mapping, args[0])
     return repo.pathto(path)
 
-@templatefunc('revset(query[, formatargs...])', requires={'repo', 'cache'})
+
+@templatefunc(b'revset(query[, formatargs...])', requires={b'repo', b'cache'})
 def revset(context, mapping, args):
     """Execute a revision set query. See
     :hg:`help revset`."""
     if not len(args) > 0:
         # i18n: "revset" is a keyword
-        raise error.ParseError(_("revset expects one or more arguments"))
+        raise error.ParseError(_(b"revset expects one or more arguments"))
 
     raw = evalstring(context, mapping, args[0])
-    repo = context.resource(mapping, 'repo')
+    repo = context.resource(mapping, b'repo')
 
     def query(expr):
         m = revsetmod.match(repo.ui, expr, lookup=revsetmod.lookupfn(repo))
@@ -594,28 +663,30 @@
         formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
         revs = query(revsetlang.formatspec(raw, *formatargs))
     else:
-        cache = context.resource(mapping, 'cache')
-        revsetcache = cache.setdefault("revsetcache", {})
+        cache = context.resource(mapping, b'cache')
+        revsetcache = cache.setdefault(b"revsetcache", {})
         if raw in revsetcache:
             revs = revsetcache[raw]
         else:
             revs = query(raw)
             revsetcache[raw] = revs
-    return templatekw.showrevslist(context, mapping, "revision", revs)
+    return templatekw.showrevslist(context, mapping, b"revision", revs)
 
-@templatefunc('rstdoc(text, style)')
+
+@templatefunc(b'rstdoc(text, style)')
 def rstdoc(context, mapping, args):
     """Format reStructuredText."""
     if len(args) != 2:
         # i18n: "rstdoc" is a keyword
-        raise error.ParseError(_("rstdoc expects two arguments"))
+        raise error.ParseError(_(b"rstdoc expects two arguments"))
 
     text = evalstring(context, mapping, args[0])
     style = evalstring(context, mapping, args[1])
 
-    return minirst.format(text, style=style, keep=['verbose'])
+    return minirst.format(text, style=style, keep=[b'verbose'])
 
-@templatefunc('search(pattern, text)')
+
+@templatefunc(b'search(pattern, text)')
 def search(context, mapping, args):
     """Look for the first text matching the regular expression pattern.
     Groups are accessible as ``{1}``, ``{2}``, ... in %-mapped template."""
@@ -631,14 +702,18 @@
         # i18n: "search" is a keyword
         raise error.ParseError(_(b'search got an invalid pattern: %s') % pat)
     # named groups shouldn't shadow *reserved* resource keywords
-    badgroups = (context.knownresourcekeys()
-                 & set(pycompat.byteskwargs(patre.groupindex)))
+    badgroups = context.knownresourcekeys() & set(
+        pycompat.byteskwargs(patre.groupindex)
+    )
     if badgroups:
         raise error.ParseError(
             # i18n: "search" is a keyword
             _(b'invalid group %(group)s in search pattern: %(pat)s')
-            % {b'group': b', '.join("'%s'" % g for g in sorted(badgroups)),
-               b'pat': pat})
+            % {
+                b'group': b', '.join(b"'%s'" % g for g in sorted(badgroups)),
+                b'pat': pat,
+            }
+        )
 
     match = patre.search(src)
     if not match:
@@ -649,16 +724,17 @@
     lm.update(pycompat.byteskwargs(match.groupdict()))
     return templateutil.mappingdict(lm, tmpl=b'{0}')
 
-@templatefunc('separate(sep, args...)', argspec='sep *args')
+
+@templatefunc(b'separate(sep, args...)', argspec=b'sep *args')
 def separate(context, mapping, args):
     """Add a separator between non-empty arguments."""
-    if 'sep' not in args:
+    if b'sep' not in args:
         # i18n: "separate" is a keyword
-        raise error.ParseError(_("separate expects at least one argument"))
+        raise error.ParseError(_(b"separate expects at least one argument"))
 
-    sep = evalstring(context, mapping, args['sep'])
+    sep = evalstring(context, mapping, args[b'sep'])
     first = True
-    for arg in args['args']:
+    for arg in args[b'args']:
         argstr = evalstring(context, mapping, arg)
         if not argstr:
             continue
@@ -668,23 +744,28 @@
             yield sep
         yield argstr
 
-@templatefunc('shortest(node, minlength=4)', requires={'repo', 'cache'})
+
+@templatefunc(b'shortest(node, minlength=4)', requires={b'repo', b'cache'})
 def shortest(context, mapping, args):
     """Obtain the shortest representation of
     a node."""
     if not (1 <= len(args) <= 2):
         # i18n: "shortest" is a keyword
-        raise error.ParseError(_("shortest() expects one or two arguments"))
+        raise error.ParseError(_(b"shortest() expects one or two arguments"))
 
     hexnode = evalstring(context, mapping, args[0])
 
     minlength = 4
     if len(args) > 1:
-        minlength = evalinteger(context, mapping, args[1],
-                                # i18n: "shortest" is a keyword
-                                _("shortest() expects an integer minlength"))
+        minlength = evalinteger(
+            context,
+            mapping,
+            args[1],
+            # i18n: "shortest" is a keyword
+            _(b"shortest() expects an integer minlength"),
+        )
 
-    repo = context.resource(mapping, 'repo')
+    repo = context.resource(mapping, b'repo')
     if len(hexnode) > 40:
         return hexnode
     elif len(hexnode) == 40:
@@ -701,19 +782,20 @@
             return hexnode
         if not node:
             return hexnode
-    cache = context.resource(mapping, 'cache')
+    cache = context.resource(mapping, b'cache')
     try:
         return scmutil.shortesthexnodeidprefix(repo, node, minlength, cache)
     except error.RepoLookupError:
         return hexnode
 
-@templatefunc('strip(text[, chars])')
+
+@templatefunc(b'strip(text[, chars])')
 def strip(context, mapping, args):
     """Strip characters from a string. By default,
     strips all leading and trailing whitespace."""
     if not (1 <= len(args) <= 2):
         # i18n: "strip" is a keyword
-        raise error.ParseError(_("strip expects one or two arguments"))
+        raise error.ParseError(_(b"strip expects one or two arguments"))
 
     text = evalstring(context, mapping, args[0])
     if len(args) == 2:
@@ -721,13 +803,14 @@
         return text.strip(chars)
     return text.strip()
 
-@templatefunc('sub(pattern, replacement, expression)')
+
+@templatefunc(b'sub(pattern, replacement, expression)')
 def sub(context, mapping, args):
     """Perform text substitution
     using regular expressions."""
     if len(args) != 3:
         # i18n: "sub" is a keyword
-        raise error.ParseError(_("sub expects three arguments"))
+        raise error.ParseError(_(b"sub expects three arguments"))
 
     pat = evalstring(context, mapping, args[0])
     rpl = evalstring(context, mapping, args[1])
@@ -736,38 +819,45 @@
         patre = re.compile(pat)
     except re.error:
         # i18n: "sub" is a keyword
-        raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
+        raise error.ParseError(_(b"sub got an invalid pattern: %s") % pat)
     try:
         yield patre.sub(rpl, src)
     except re.error:
         # i18n: "sub" is a keyword
-        raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
+        raise error.ParseError(_(b"sub got an invalid replacement: %s") % rpl)
 
-@templatefunc('startswith(pattern, text)')
+
+@templatefunc(b'startswith(pattern, text)')
 def startswith(context, mapping, args):
     """Returns the value from the "text" argument
     if it begins with the content from the "pattern" argument."""
     if len(args) != 2:
         # i18n: "startswith" is a keyword
-        raise error.ParseError(_("startswith expects two arguments"))
+        raise error.ParseError(_(b"startswith expects two arguments"))
 
     patn = evalstring(context, mapping, args[0])
     text = evalstring(context, mapping, args[1])
     if text.startswith(patn):
         return text
-    return ''
+    return b''
 
-@templatefunc('word(number, text[, separator])')
+
+@templatefunc(b'word(number, text[, separator])')
 def word(context, mapping, args):
     """Return the nth word from a string."""
     if not (2 <= len(args) <= 3):
         # i18n: "word" is a keyword
-        raise error.ParseError(_("word expects two or three arguments, got %d")
-                               % len(args))
+        raise error.ParseError(
+            _(b"word expects two or three arguments, got %d") % len(args)
+        )
 
-    num = evalinteger(context, mapping, args[0],
-                      # i18n: "word" is a keyword
-                      _("word expects an integer index"))
+    num = evalinteger(
+        context,
+        mapping,
+        args[0],
+        # i18n: "word" is a keyword
+        _(b"word expects an integer index"),
+    )
     text = evalstring(context, mapping, args[1])
     if len(args) == 3:
         splitter = evalstring(context, mapping, args[2])
@@ -776,15 +866,17 @@
 
     tokens = text.split(splitter)
     if num >= len(tokens) or num < -len(tokens):
-        return ''
+        return b''
     else:
         return tokens[num]
 
+
 def loadfunction(ui, extname, registrarobj):
     """Load template function from specified registrarobj
     """
-    for name, func in registrarobj._table.iteritems():
+    for name, func in pycompat.iteritems(registrarobj._table):
         funcs[name] = func
 
+
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = funcs.values()
--- a/mercurial/templatekw.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/templatekw.py	Mon Oct 21 11:09:48 2019 -0400
@@ -29,9 +29,7 @@
     templateutil,
     util,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
 
 _hybrid = templateutil.hybrid
 hybriddict = templateutil.hybriddict
@@ -40,15 +38,16 @@
 compatlist = templateutil.compatlist
 _showcompatlist = templateutil._showcompatlist
 
+
 def getlatesttags(context, mapping, pattern=None):
     '''return date, distance and name for the latest tag of rev'''
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
-    cache = context.resource(mapping, 'cache')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
+    cache = context.resource(mapping, b'cache')
 
-    cachename = 'latesttags'
+    cachename = b'latesttags'
     if pattern is not None:
-        cachename += '-' + pattern
+        cachename += b'-' + pattern
         match = stringutil.stringmatcher(pattern)[2]
     else:
         match = util.always
@@ -56,7 +55,7 @@
     if cachename not in cache:
         # Cache mapping from rev to a tuple with tag date, tag
         # distance and tag name
-        cache[cachename] = {-1: (0, 0, ['null'])}
+        cache[cachename] = {-1: (0, 0, [b'null'])}
     latesttags = cache[cachename]
 
     rev = ctx.rev()
@@ -66,9 +65,11 @@
         if rev in latesttags:
             continue
         ctx = repo[rev]
-        tags = [t for t in ctx.tags()
-                if (repo.tagtype(t) and repo.tagtype(t) != 'local'
-                    and match(t))]
+        tags = [
+            t
+            for t in ctx.tags()
+            if (repo.tagtype(t) and repo.tagtype(t) != b'local' and match(t))
+        ]
         if tags:
             latesttags[rev] = ctx.date()[0], 0, [t for t in sorted(tags)]
             continue
@@ -80,19 +81,21 @@
                     # comparison in this case.
                     pdate, pdist, ptag = max(ptags)
                 else:
+
                     def key(x):
                         tag = x[2][0]
                         if ctx.rev() is None:
                             # only() doesn't support wdir
                             prevs = [c.rev() for c in ctx.parents()]
-                            changes = repo.revs('only(%ld, %s)', prevs, tag)
+                            changes = repo.revs(b'only(%ld, %s)', prevs, tag)
                             changessincetag = len(changes) + 1
                         else:
-                            changes = repo.revs('only(%d, %s)', ctx.rev(), tag)
+                            changes = repo.revs(b'only(%d, %s)', ctx.rev(), tag)
                             changessincetag = len(changes)
                         # Smallest number of changes since tag wins. Date is
                         # used as tiebreaker.
                         return [-changessincetag, x[0]]
+
                     pdate, pdist, ptag = max(ptags, key=key)
             else:
                 pdate, pdist, ptag = ptags[0]
@@ -104,130 +107,151 @@
         latesttags[rev] = pdate, pdist + 1, ptag
     return latesttags[rev]
 
+
 def getlogcolumns():
     """Return a dict of log column labels"""
     _ = pycompat.identity  # temporarily disable gettext
     # i18n: column positioning for "hg log"
-    columns = _('bookmark:    %s\n'
-                'branch:      %s\n'
-                'changeset:   %s\n'
-                'copies:      %s\n'
-                'date:        %s\n'
-                'extra:       %s=%s\n'
-                'files+:      %s\n'
-                'files-:      %s\n'
-                'files:       %s\n'
-                'instability: %s\n'
-                'manifest:    %s\n'
-                'obsolete:    %s\n'
-                'parent:      %s\n'
-                'phase:       %s\n'
-                'summary:     %s\n'
-                'tag:         %s\n'
-                'user:        %s\n')
-    return dict(zip([s.split(':', 1)[0] for s in columns.splitlines()],
-                    i18n._(columns).splitlines(True)))
+    columns = _(
+        b'bookmark:    %s\n'
+        b'branch:      %s\n'
+        b'changeset:   %s\n'
+        b'copies:      %s\n'
+        b'date:        %s\n'
+        b'extra:       %s=%s\n'
+        b'files+:      %s\n'
+        b'files-:      %s\n'
+        b'files:       %s\n'
+        b'instability: %s\n'
+        b'manifest:    %s\n'
+        b'obsolete:    %s\n'
+        b'parent:      %s\n'
+        b'phase:       %s\n'
+        b'summary:     %s\n'
+        b'tag:         %s\n'
+        b'user:        %s\n'
+    )
+    return dict(
+        zip(
+            [s.split(b':', 1)[0] for s in columns.splitlines()],
+            i18n._(columns).splitlines(True),
+        )
+    )
+
 
 # basic internal templates
-_changeidtmpl = '{rev}:{node|formatnode}'
+_changeidtmpl = b'{rev}:{node|formatnode}'
 
 # default templates internally used for rendering of lists
 defaulttempl = {
-    'parent': _changeidtmpl + ' ',
-    'manifest': _changeidtmpl,
-    'file_copy': '{name} ({source})',
-    'envvar': '{key}={value}',
-    'extra': '{key}={value|stringescape}'
+    b'parent': _changeidtmpl + b' ',
+    b'manifest': _changeidtmpl,
+    b'file_copy': b'{name} ({source})',
+    b'envvar': b'{key}={value}',
+    b'extra': b'{key}={value|stringescape}',
 }
 # filecopy is preserved for compatibility reasons
-defaulttempl['filecopy'] = defaulttempl['file_copy']
+defaulttempl[b'filecopy'] = defaulttempl[b'file_copy']
 
 # keywords are callables (see registrar.templatekeyword for details)
 keywords = {}
 templatekeyword = registrar.templatekeyword(keywords)
 
-@templatekeyword('author', requires={'ctx'})
+
+@templatekeyword(b'author', requires={b'ctx'})
 def showauthor(context, mapping):
     """Alias for ``{user}``"""
     return showuser(context, mapping)
 
-@templatekeyword('bisect', requires={'repo', 'ctx'})
+
+@templatekeyword(b'bisect', requires={b'repo', b'ctx'})
 def showbisect(context, mapping):
     """String. The changeset bisection status."""
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
     return hbisect.label(repo, ctx.node())
 
-@templatekeyword('branch', requires={'ctx'})
+
+@templatekeyword(b'branch', requires={b'ctx'})
 def showbranch(context, mapping):
     """String. The name of the branch on which the changeset was
     committed.
     """
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     return ctx.branch()
 
-@templatekeyword('branches', requires={'ctx'})
+
+@templatekeyword(b'branches', requires={b'ctx'})
 def showbranches(context, mapping):
     """List of strings. The name of the branch on which the
     changeset was committed. Will be empty if the branch name was
     default. (DEPRECATED)
     """
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     branch = ctx.branch()
-    if branch != 'default':
-        return compatlist(context, mapping, 'branch', [branch],
-                          plural='branches')
-    return compatlist(context, mapping, 'branch', [], plural='branches')
+    if branch != b'default':
+        return compatlist(
+            context, mapping, b'branch', [branch], plural=b'branches'
+        )
+    return compatlist(context, mapping, b'branch', [], plural=b'branches')
 
-@templatekeyword('bookmarks', requires={'repo', 'ctx'})
+
+@templatekeyword(b'bookmarks', requires={b'repo', b'ctx'})
 def showbookmarks(context, mapping):
     """List of strings. Any bookmarks associated with the
     changeset. Also sets 'active', the name of the active bookmark.
     """
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
     bookmarks = ctx.bookmarks()
     active = repo._activebookmark
-    makemap = lambda v: {'bookmark': v, 'active': active, 'current': active}
-    f = _showcompatlist(context, mapping, 'bookmark', bookmarks)
+    makemap = lambda v: {b'bookmark': v, b'active': active, b'current': active}
+    f = _showcompatlist(context, mapping, b'bookmark', bookmarks)
     return _hybrid(f, bookmarks, makemap, pycompat.identity)
 
-@templatekeyword('children', requires={'ctx'})
+
+@templatekeyword(b'children', requires={b'ctx'})
 def showchildren(context, mapping):
     """List of strings. The children of the changeset."""
-    ctx = context.resource(mapping, 'ctx')
-    childrevs = ['%d:%s' % (cctx.rev(), cctx) for cctx in ctx.children()]
-    return compatlist(context, mapping, 'children', childrevs, element='child')
+    ctx = context.resource(mapping, b'ctx')
+    childrevs = [b'%d:%s' % (cctx.rev(), cctx) for cctx in ctx.children()]
+    return compatlist(
+        context, mapping, b'children', childrevs, element=b'child'
+    )
+
 
 # Deprecated, but kept alive for help generation a purpose.
-@templatekeyword('currentbookmark', requires={'repo', 'ctx'})
+@templatekeyword(b'currentbookmark', requires={b'repo', b'ctx'})
 def showcurrentbookmark(context, mapping):
     """String. The active bookmark, if it is associated with the changeset.
     (DEPRECATED)"""
     return showactivebookmark(context, mapping)
 
-@templatekeyword('activebookmark', requires={'repo', 'ctx'})
+
+@templatekeyword(b'activebookmark', requires={b'repo', b'ctx'})
 def showactivebookmark(context, mapping):
     """String. The active bookmark, if it is associated with the changeset."""
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
     active = repo._activebookmark
     if active and active in ctx.bookmarks():
         return active
-    return ''
+    return b''
 
-@templatekeyword('date', requires={'ctx'})
+
+@templatekeyword(b'date', requires={b'ctx'})
 def showdate(context, mapping):
     """Date information. The date when the changeset was committed."""
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     # the default string format is '<float(unixtime)><tzoffset>' because
     # python-hglib splits date at decimal separator.
-    return templateutil.date(ctx.date(), showfmt='%d.0%d')
+    return templateutil.date(ctx.date(), showfmt=b'%d.0%d')
 
-@templatekeyword('desc', requires={'ctx'})
+
+@templatekeyword(b'desc', requires={b'ctx'})
 def showdescription(context, mapping):
     """String. The text of the changeset description."""
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     s = ctx.description()
     if isinstance(s, encoding.localstr):
         # try hard to preserve utf-8 bytes
@@ -237,161 +261,190 @@
     else:
         return s.strip()
 
-@templatekeyword('diffstat', requires={'ui', 'ctx'})
+
+@templatekeyword(b'diffstat', requires={b'ui', b'ctx'})
 def showdiffstat(context, mapping):
     """String. Statistics of changes with the following format:
     "modified files: +added/-removed lines"
     """
-    ui = context.resource(mapping, 'ui')
-    ctx = context.resource(mapping, 'ctx')
-    diffopts = diffutil.diffallopts(ui, {'noprefix': False})
+    ui = context.resource(mapping, b'ui')
+    ctx = context.resource(mapping, b'ctx')
+    diffopts = diffutil.diffallopts(ui, {b'noprefix': False})
     diff = ctx.diff(opts=diffopts)
     stats = patch.diffstatdata(util.iterlines(diff))
     maxname, maxtotal, adds, removes, binary = patch.diffstatsum(stats)
-    return '%d: +%d/-%d' % (len(stats), adds, removes)
+    return b'%d: +%d/-%d' % (len(stats), adds, removes)
 
-@templatekeyword('envvars', requires={'ui'})
+
+@templatekeyword(b'envvars', requires={b'ui'})
 def showenvvars(context, mapping):
     """A dictionary of environment variables. (EXPERIMENTAL)"""
-    ui = context.resource(mapping, 'ui')
+    ui = context.resource(mapping, b'ui')
     env = ui.exportableenviron()
     env = util.sortdict((k, env[k]) for k in sorted(env))
-    return compatdict(context, mapping, 'envvar', env, plural='envvars')
+    return compatdict(context, mapping, b'envvar', env, plural=b'envvars')
 
-@templatekeyword('extras', requires={'ctx'})
+
+@templatekeyword(b'extras', requires={b'ctx'})
 def showextras(context, mapping):
     """List of dicts with key, value entries of the 'extras'
     field of this changeset."""
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     extras = ctx.extra()
     extras = util.sortdict((k, extras[k]) for k in sorted(extras))
-    makemap = lambda k: {'key': k, 'value': extras[k]}
+    makemap = lambda k: {b'key': k, b'value': extras[k]}
     c = [makemap(k) for k in extras]
-    f = _showcompatlist(context, mapping, 'extra', c, plural='extras')
-    return _hybrid(f, extras, makemap,
-                   lambda k: '%s=%s' % (k, stringutil.escapestr(extras[k])))
+    f = _showcompatlist(context, mapping, b'extra', c, plural=b'extras')
+    return _hybrid(
+        f,
+        extras,
+        makemap,
+        lambda k: b'%s=%s' % (k, stringutil.escapestr(extras[k])),
+    )
+
 
 def _getfilestatus(context, mapping, listall=False):
-    ctx = context.resource(mapping, 'ctx')
-    revcache = context.resource(mapping, 'revcache')
-    if 'filestatus' not in revcache or revcache['filestatusall'] < listall:
-        stat = ctx.p1().status(ctx, listignored=listall, listclean=listall,
-                               listunknown=listall)
-        revcache['filestatus'] = stat
-        revcache['filestatusall'] = listall
-    return revcache['filestatus']
+    ctx = context.resource(mapping, b'ctx')
+    revcache = context.resource(mapping, b'revcache')
+    if b'filestatus' not in revcache or revcache[b'filestatusall'] < listall:
+        stat = ctx.p1().status(
+            ctx, listignored=listall, listclean=listall, listunknown=listall
+        )
+        revcache[b'filestatus'] = stat
+        revcache[b'filestatusall'] = listall
+    return revcache[b'filestatus']
+
 
 def _getfilestatusmap(context, mapping, listall=False):
-    revcache = context.resource(mapping, 'revcache')
-    if 'filestatusmap' not in revcache or revcache['filestatusall'] < listall:
+    revcache = context.resource(mapping, b'revcache')
+    if b'filestatusmap' not in revcache or revcache[b'filestatusall'] < listall:
         stat = _getfilestatus(context, mapping, listall=listall)
-        revcache['filestatusmap'] = statmap = {}
-        for char, files in zip(pycompat.iterbytestr('MAR!?IC'), stat):
+        revcache[b'filestatusmap'] = statmap = {}
+        for char, files in zip(pycompat.iterbytestr(b'MAR!?IC'), stat):
             statmap.update((f, char) for f in files)
-    return revcache['filestatusmap']  # {path: statchar}
+    return revcache[b'filestatusmap']  # {path: statchar}
+
 
-@templatekeyword('file_copies',
-                 requires={'repo', 'ctx', 'cache', 'revcache'})
+@templatekeyword(
+    b'file_copies', requires={b'repo', b'ctx', b'cache', b'revcache'}
+)
 def showfilecopies(context, mapping):
     """List of strings. Files copied in this changeset with
     their sources.
     """
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
-    cache = context.resource(mapping, 'cache')
-    copies = context.resource(mapping, 'revcache').get('copies')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
+    cache = context.resource(mapping, b'cache')
+    copies = context.resource(mapping, b'revcache').get(b'copies')
     if copies is None:
-        if 'getcopies' not in cache:
-            cache['getcopies'] = scmutil.getcopiesfn(repo)
-        getcopies = cache['getcopies']
+        if b'getcopies' not in cache:
+            cache[b'getcopies'] = scmutil.getcopiesfn(repo)
+        getcopies = cache[b'getcopies']
         copies = getcopies(ctx)
-    return templateutil.compatfilecopiesdict(context, mapping, 'file_copy',
-                                             copies)
+    return templateutil.compatfilecopiesdict(
+        context, mapping, b'file_copy', copies
+    )
+
 
 # showfilecopiesswitch() displays file copies only if copy records are
 # provided before calling the templater, usually with a --copies
 # command line switch.
-@templatekeyword('file_copies_switch', requires={'revcache'})
+@templatekeyword(b'file_copies_switch', requires={b'revcache'})
 def showfilecopiesswitch(context, mapping):
     """List of strings. Like "file_copies" but displayed
     only if the --copied switch is set.
     """
-    copies = context.resource(mapping, 'revcache').get('copies') or []
-    return templateutil.compatfilecopiesdict(context, mapping, 'file_copy',
-                                             copies)
+    copies = context.resource(mapping, b'revcache').get(b'copies') or []
+    return templateutil.compatfilecopiesdict(
+        context, mapping, b'file_copy', copies
+    )
 
-@templatekeyword('file_adds', requires={'ctx', 'revcache'})
+
+@templatekeyword(b'file_adds', requires={b'ctx', b'revcache'})
 def showfileadds(context, mapping):
     """List of strings. Files added by this changeset."""
-    ctx = context.resource(mapping, 'ctx')
-    return templateutil.compatfileslist(context, mapping, 'file_add',
-                                        ctx.filesadded())
+    ctx = context.resource(mapping, b'ctx')
+    return templateutil.compatfileslist(
+        context, mapping, b'file_add', ctx.filesadded()
+    )
 
-@templatekeyword('file_dels', requires={'ctx', 'revcache'})
+
+@templatekeyword(b'file_dels', requires={b'ctx', b'revcache'})
 def showfiledels(context, mapping):
     """List of strings. Files removed by this changeset."""
-    ctx = context.resource(mapping, 'ctx')
-    return templateutil.compatfileslist(context, mapping, 'file_del',
-                                        ctx.filesremoved())
+    ctx = context.resource(mapping, b'ctx')
+    return templateutil.compatfileslist(
+        context, mapping, b'file_del', ctx.filesremoved()
+    )
 
-@templatekeyword('file_mods', requires={'ctx', 'revcache'})
+
+@templatekeyword(b'file_mods', requires={b'ctx', b'revcache'})
 def showfilemods(context, mapping):
     """List of strings. Files modified by this changeset."""
-    ctx = context.resource(mapping, 'ctx')
-    return templateutil.compatfileslist(context, mapping, 'file_mod',
-                                        ctx.filesmodified())
+    ctx = context.resource(mapping, b'ctx')
+    return templateutil.compatfileslist(
+        context, mapping, b'file_mod', ctx.filesmodified()
+    )
 
-@templatekeyword('files', requires={'ctx'})
+
+@templatekeyword(b'files', requires={b'ctx'})
 def showfiles(context, mapping):
     """List of strings. All files modified, added, or removed by this
     changeset.
     """
-    ctx = context.resource(mapping, 'ctx')
-    return templateutil.compatfileslist(context, mapping, 'file', ctx.files())
+    ctx = context.resource(mapping, b'ctx')
+    return templateutil.compatfileslist(context, mapping, b'file', ctx.files())
 
-@templatekeyword('graphnode', requires={'repo', 'ctx'})
+
+@templatekeyword(b'graphnode', requires={b'repo', b'ctx'})
 def showgraphnode(context, mapping):
     """String. The character representing the changeset node in an ASCII
     revision graph."""
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
     return getgraphnode(repo, ctx)
 
+
 def getgraphnode(repo, ctx):
     return getgraphnodecurrent(repo, ctx) or getgraphnodesymbol(ctx)
 
+
 def getgraphnodecurrent(repo, ctx):
     wpnodes = repo.dirstate.parents()
     if wpnodes[1] == nullid:
         wpnodes = wpnodes[:1]
     if ctx.node() in wpnodes:
-        return '@'
+        return b'@'
     else:
-        return ''
+        return b''
+
 
 def getgraphnodesymbol(ctx):
     if ctx.obsolete():
-        return 'x'
+        return b'x'
     elif ctx.isunstable():
-        return '*'
+        return b'*'
     elif ctx.closesbranch():
-        return '_'
+        return b'_'
     else:
-        return 'o'
+        return b'o'
 
-@templatekeyword('graphwidth', requires=())
+
+@templatekeyword(b'graphwidth', requires=())
 def showgraphwidth(context, mapping):
     """Integer. The width of the graph drawn by 'log --graph' or zero."""
     # just hosts documentation; should be overridden by template mapping
     return 0
 
-@templatekeyword('index', requires=())
+
+@templatekeyword(b'index', requires=())
 def showindex(context, mapping):
     """Integer. The current iteration of the loop. (0 indexed)"""
     # just hosts documentation; should be overridden by template mapping
-    raise error.Abort(_("can't use index in this context"))
+    raise error.Abort(_(b"can't use index in this context"))
 
-@templatekeyword('latesttag', requires={'repo', 'ctx', 'cache'})
+
+@templatekeyword(b'latesttag', requires={b'repo', b'ctx', b'cache'})
 def showlatesttag(context, mapping):
     """List of strings. The global tags on the most recent globally
     tagged ancestor of this changeset.  If no such tags exist, the list
@@ -399,6 +452,7 @@
     """
     return showlatesttags(context, mapping, None)
 
+
 def showlatesttags(context, mapping, pattern):
     """helper method for the latesttag keyword and function"""
     latesttags = getlatesttags(context, mapping, pattern)
@@ -407,49 +461,54 @@
     # branches in a stable manner- it is the date the tagged cset was created,
     # not the date the tag was created.  Therefore it isn't made visible here.
     makemap = lambda v: {
-        'changes': _showchangessincetag,
-        'distance': latesttags[1],
-        'latesttag': v,   # BC with {latesttag % '{latesttag}'}
-        'tag': v
+        b'changes': _showchangessincetag,
+        b'distance': latesttags[1],
+        b'latesttag': v,  # BC with {latesttag % '{latesttag}'}
+        b'tag': v,
     }
 
     tags = latesttags[2]
-    f = _showcompatlist(context, mapping, 'latesttag', tags, separator=':')
+    f = _showcompatlist(context, mapping, b'latesttag', tags, separator=b':')
     return _hybrid(f, tags, makemap, pycompat.identity)
 
-@templatekeyword('latesttagdistance', requires={'repo', 'ctx', 'cache'})
+
+@templatekeyword(b'latesttagdistance', requires={b'repo', b'ctx', b'cache'})
 def showlatesttagdistance(context, mapping):
     """Integer. Longest path to the latest tag."""
     return getlatesttags(context, mapping)[1]
 
-@templatekeyword('changessincelatesttag', requires={'repo', 'ctx', 'cache'})
+
+@templatekeyword(b'changessincelatesttag', requires={b'repo', b'ctx', b'cache'})
 def showchangessincelatesttag(context, mapping):
     """Integer. All ancestors not in the latest tag."""
     tag = getlatesttags(context, mapping)[2][0]
-    mapping = context.overlaymap(mapping, {'tag': tag})
+    mapping = context.overlaymap(mapping, {b'tag': tag})
     return _showchangessincetag(context, mapping)
 
+
 def _showchangessincetag(context, mapping):
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
     offset = 0
     revs = [ctx.rev()]
-    tag = context.symbol(mapping, 'tag')
+    tag = context.symbol(mapping, b'tag')
 
     # The only() revset doesn't currently support wdir()
     if ctx.rev() is None:
         offset = 1
         revs = [p.rev() for p in ctx.parents()]
 
-    return len(repo.revs('only(%ld, %s)', revs, tag)) + offset
+    return len(repo.revs(b'only(%ld, %s)', revs, tag)) + offset
+
 
 # teach templater latesttags.changes is switched to (context, mapping) API
-_showchangessincetag._requires = {'repo', 'ctx'}
+_showchangessincetag._requires = {b'repo', b'ctx'}
+
 
-@templatekeyword('manifest', requires={'repo', 'ctx'})
+@templatekeyword(b'manifest', requires={b'repo', b'ctx'})
 def showmanifest(context, mapping):
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
     mnode = ctx.manifestnode()
     if mnode is None:
         mnode = wdirid
@@ -457,12 +516,14 @@
     else:
         mrev = repo.manifestlog.rev(mnode)
     mhex = hex(mnode)
-    mapping = context.overlaymap(mapping, {'rev': mrev, 'node': mhex})
-    f = context.process('manifest', mapping)
-    return templateutil.hybriditem(f, None, f,
-                                   lambda x: {'rev': mrev, 'node': mhex})
+    mapping = context.overlaymap(mapping, {b'rev': mrev, b'node': mhex})
+    f = context.process(b'manifest', mapping)
+    return templateutil.hybriditem(
+        f, None, f, lambda x: {b'rev': mrev, b'node': mhex}
+    )
 
-@templatekeyword('obsfate', requires={'ui', 'repo', 'ctx'})
+
+@templatekeyword(b'obsfate', requires={b'ui', b'repo', b'ctx'})
 def showobsfate(context, mapping):
     # this function returns a list containing pre-formatted obsfate strings.
     #
@@ -470,132 +531,153 @@
     # the verbosity templatekw available.
     succsandmarkers = showsuccsandmarkers(context, mapping)
 
-    ui = context.resource(mapping, 'ui')
-    repo = context.resource(mapping, 'repo')
+    ui = context.resource(mapping, b'ui')
+    repo = context.resource(mapping, b'repo')
     values = []
 
     for x in succsandmarkers.tovalue(context, mapping):
-        v = obsutil.obsfateprinter(ui, repo, x['successors'], x['markers'],
-                                   scmutil.formatchangeid)
+        v = obsutil.obsfateprinter(
+            ui, repo, x[b'successors'], x[b'markers'], scmutil.formatchangeid
+        )
         values.append(v)
 
-    return compatlist(context, mapping, "fate", values)
+    return compatlist(context, mapping, b"fate", values)
+
 
 def shownames(context, mapping, namespace):
     """helper method to generate a template keyword for a namespace"""
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
     ns = repo.names[namespace]
     names = ns.names(repo, ctx.node())
-    return compatlist(context, mapping, ns.templatename, names,
-                      plural=namespace)
+    return compatlist(
+        context, mapping, ns.templatename, names, plural=namespace
+    )
 
-@templatekeyword('namespaces', requires={'repo', 'ctx'})
+
+@templatekeyword(b'namespaces', requires={b'repo', b'ctx'})
 def shownamespaces(context, mapping):
     """Dict of lists. Names attached to this changeset per
     namespace."""
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
 
     namespaces = util.sortdict()
+
     def makensmapfn(ns):
         # 'name' for iterating over namespaces, templatename for local reference
-        return lambda v: {'name': v, ns.templatename: v}
+        return lambda v: {b'name': v, ns.templatename: v}
 
-    for k, ns in repo.names.iteritems():
+    for k, ns in pycompat.iteritems(repo.names):
         names = ns.names(repo, ctx.node())
-        f = _showcompatlist(context, mapping, 'name', names)
+        f = _showcompatlist(context, mapping, b'name', names)
         namespaces[k] = _hybrid(f, names, makensmapfn(ns), pycompat.identity)
 
-    f = _showcompatlist(context, mapping, 'namespace', list(namespaces))
+    f = _showcompatlist(context, mapping, b'namespace', list(namespaces))
 
     def makemap(ns):
         return {
-            'namespace': ns,
-            'names': namespaces[ns],
-            'builtin': repo.names[ns].builtin,
-            'colorname': repo.names[ns].colorname,
+            b'namespace': ns,
+            b'names': namespaces[ns],
+            b'builtin': repo.names[ns].builtin,
+            b'colorname': repo.names[ns].colorname,
         }
 
     return _hybrid(f, namespaces, makemap, pycompat.identity)
 
-@templatekeyword('negrev', requires={'repo', 'ctx'})
+
+@templatekeyword(b'negrev', requires={b'repo', b'ctx'})
 def shownegrev(context, mapping):
     """Integer. The repository-local changeset negative revision number,
     which counts in the opposite direction."""
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     rev = ctx.rev()
     if rev is None or rev < 0:  # wdir() or nullrev?
         return None
-    repo = context.resource(mapping, 'repo')
+    repo = context.resource(mapping, b'repo')
     return rev - len(repo)
 
-@templatekeyword('node', requires={'ctx'})
+
+@templatekeyword(b'node', requires={b'ctx'})
 def shownode(context, mapping):
     """String. The changeset identification hash, as a 40 hexadecimal
     digit string.
     """
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     return ctx.hex()
 
-@templatekeyword('obsolete', requires={'ctx'})
+
+@templatekeyword(b'obsolete', requires={b'ctx'})
 def showobsolete(context, mapping):
     """String. Whether the changeset is obsolete. (EXPERIMENTAL)"""
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     if ctx.obsolete():
-        return 'obsolete'
-    return ''
+        return b'obsolete'
+    return b''
 
-@templatekeyword('path', requires={'fctx'})
+
+@templatekeyword(b'path', requires={b'fctx'})
 def showpath(context, mapping):
     """String. Repository-absolute path of the current file. (EXPERIMENTAL)"""
-    fctx = context.resource(mapping, 'fctx')
+    fctx = context.resource(mapping, b'fctx')
     return fctx.path()
 
-@templatekeyword('peerurls', requires={'repo'})
+
+@templatekeyword(b'peerurls', requires={b'repo'})
 def showpeerurls(context, mapping):
     """A dictionary of repository locations defined in the [paths] section
     of your configuration file."""
-    repo = context.resource(mapping, 'repo')
+    repo = context.resource(mapping, b'repo')
     # see commands.paths() for naming of dictionary keys
     paths = repo.ui.paths
-    urls = util.sortdict((k, p.rawloc) for k, p in sorted(paths.iteritems()))
+    urls = util.sortdict(
+        (k, p.rawloc) for k, p in sorted(pycompat.iteritems(paths))
+    )
+
     def makemap(k):
         p = paths[k]
-        d = {'name': k, 'url': p.rawloc}
-        d.update((o, v) for o, v in sorted(p.suboptions.iteritems()))
+        d = {b'name': k, b'url': p.rawloc}
+        d.update((o, v) for o, v in sorted(pycompat.iteritems(p.suboptions)))
         return d
-    return _hybrid(None, urls, makemap, lambda k: '%s=%s' % (k, urls[k]))
+
+    return _hybrid(None, urls, makemap, lambda k: b'%s=%s' % (k, urls[k]))
 
-@templatekeyword("predecessors", requires={'repo', 'ctx'})
+
+@templatekeyword(b"predecessors", requires={b'repo', b'ctx'})
 def showpredecessors(context, mapping):
     """Returns the list of the closest visible predecessors. (EXPERIMENTAL)"""
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
     predecessors = sorted(obsutil.closestpredecessors(repo, ctx.node()))
     predecessors = pycompat.maplist(hex, predecessors)
 
-    return _hybrid(None, predecessors,
-                   lambda x: {'ctx': repo[x]},
-                   lambda x: scmutil.formatchangeid(repo[x]))
+    return _hybrid(
+        None,
+        predecessors,
+        lambda x: {b'ctx': repo[x]},
+        lambda x: scmutil.formatchangeid(repo[x]),
+    )
 
-@templatekeyword('reporoot', requires={'repo'})
+
+@templatekeyword(b'reporoot', requires={b'repo'})
 def showreporoot(context, mapping):
     """String. The root directory of the current repository."""
-    repo = context.resource(mapping, 'repo')
+    repo = context.resource(mapping, b'repo')
     return repo.root
 
-@templatekeyword('size', requires={'fctx'})
+
+@templatekeyword(b'size', requires={b'fctx'})
 def showsize(context, mapping):
     """Integer. Size of the current file in bytes. (EXPERIMENTAL)"""
-    fctx = context.resource(mapping, 'fctx')
+    fctx = context.resource(mapping, b'fctx')
     return fctx.size()
 
+
 # requires 'fctx' to denote {status} depends on (ctx, path) pair
-@templatekeyword('status', requires={'ctx', 'fctx', 'revcache'})
+@templatekeyword(b'status', requires={b'ctx', b'fctx', b'revcache'})
 def showstatus(context, mapping):
     """String. Status code of the current file. (EXPERIMENTAL)"""
-    path = templateutil.runsymbol(context, mapping, 'path')
+    path = templateutil.runsymbol(context, mapping, b'path')
     path = templateutil.stringify(context, mapping, path)
     if not path:
         return
@@ -604,23 +686,28 @@
         statmap = _getfilestatusmap(context, mapping, listall=True)
     return statmap.get(path)
 
-@templatekeyword("successorssets", requires={'repo', 'ctx'})
+
+@templatekeyword(b"successorssets", requires={b'repo', b'ctx'})
 def showsuccessorssets(context, mapping):
     """Returns a string of sets of successors for a changectx. Format used
     is: [ctx1, ctx2], [ctx3] if ctx has been split into ctx1 and ctx2
     while also diverged into ctx3. (EXPERIMENTAL)"""
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
     if not ctx.obsolete():
-        return ''
+        return b''
 
     ssets = obsutil.successorssets(repo, ctx.node(), closest=True)
     ssets = [[hex(n) for n in ss] for ss in ssets]
 
     data = []
     for ss in ssets:
-        h = _hybrid(None, ss, lambda x: {'ctx': repo[x]},
-                    lambda x: scmutil.formatchangeid(repo[x]))
+        h = _hybrid(
+            None,
+            ss,
+            lambda x: {b'ctx': repo[x]},
+            lambda x: scmutil.formatchangeid(repo[x]),
+        )
         data.append(h)
 
     # Format the successorssets
@@ -628,20 +715,22 @@
         return templateutil.stringify(context, mapping, d)
 
     def gen(data):
-        yield "; ".join(render(d) for d in data)
+        yield b"; ".join(render(d) for d in data)
 
-    return _hybrid(gen(data), data, lambda x: {'successorset': x},
-                   pycompat.identity)
+    return _hybrid(
+        gen(data), data, lambda x: {b'successorset': x}, pycompat.identity
+    )
 
-@templatekeyword("succsandmarkers", requires={'repo', 'ctx'})
+
+@templatekeyword(b"succsandmarkers", requires={b'repo', b'ctx'})
 def showsuccsandmarkers(context, mapping):
     """Returns a list of dict for each final successor of ctx. The dict
     contains successors node id in "successors" keys and the list of
     obs-markers from ctx to the set of successors in "markers".
     (EXPERIMENTAL)
     """
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
 
     values = obsutil.successorsandmarkers(repo, ctx)
 
@@ -652,16 +741,19 @@
     data = []
     for i in values:
         # Format successors
-        successors = i['successors']
+        successors = i[b'successors']
 
         successors = [hex(n) for n in successors]
-        successors = _hybrid(None, successors,
-                             lambda x: {'ctx': repo[x]},
-                             lambda x: scmutil.formatchangeid(repo[x]))
+        successors = _hybrid(
+            None,
+            successors,
+            lambda x: {b'ctx': repo[x]},
+            lambda x: scmutil.formatchangeid(repo[x]),
+        )
 
         # Format markers
         finalmarkers = []
-        for m in i['markers']:
+        for m in i[b'markers']:
             hexprec = hex(m[0])
             hexsucs = tuple(hex(n) for n in m[1])
             hexparents = None
@@ -670,191 +762,231 @@
             newmarker = (hexprec, hexsucs) + m[2:5] + (hexparents,) + m[6:]
             finalmarkers.append(newmarker)
 
-        data.append({'successors': successors, 'markers': finalmarkers})
+        data.append({b'successors': successors, b'markers': finalmarkers})
 
     return templateutil.mappinglist(data)
 
-@templatekeyword('p1', requires={'ctx'})
+
+@templatekeyword(b'p1', requires={b'ctx'})
 def showp1(context, mapping):
     """Changeset. The changeset's first parent. ``{p1.rev}`` for the revision
     number, and ``{p1.node}`` for the identification hash."""
-    ctx = context.resource(mapping, 'ctx')
-    return templateutil.mappingdict({'ctx': ctx.p1()}, tmpl=_changeidtmpl)
+    ctx = context.resource(mapping, b'ctx')
+    return templateutil.mappingdict({b'ctx': ctx.p1()}, tmpl=_changeidtmpl)
 
-@templatekeyword('p2', requires={'ctx'})
+
+@templatekeyword(b'p2', requires={b'ctx'})
 def showp2(context, mapping):
     """Changeset. The changeset's second parent. ``{p2.rev}`` for the revision
     number, and ``{p2.node}`` for the identification hash."""
-    ctx = context.resource(mapping, 'ctx')
-    return templateutil.mappingdict({'ctx': ctx.p2()}, tmpl=_changeidtmpl)
+    ctx = context.resource(mapping, b'ctx')
+    return templateutil.mappingdict({b'ctx': ctx.p2()}, tmpl=_changeidtmpl)
 
-@templatekeyword('p1rev', requires={'ctx'})
+
+@templatekeyword(b'p1rev', requires={b'ctx'})
 def showp1rev(context, mapping):
     """Integer. The repository-local revision number of the changeset's
     first parent, or -1 if the changeset has no parents. (DEPRECATED)"""
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     return ctx.p1().rev()
 
-@templatekeyword('p2rev', requires={'ctx'})
+
+@templatekeyword(b'p2rev', requires={b'ctx'})
 def showp2rev(context, mapping):
     """Integer. The repository-local revision number of the changeset's
     second parent, or -1 if the changeset has no second parent. (DEPRECATED)"""
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     return ctx.p2().rev()
 
-@templatekeyword('p1node', requires={'ctx'})
+
+@templatekeyword(b'p1node', requires={b'ctx'})
 def showp1node(context, mapping):
     """String. The identification hash of the changeset's first parent,
     as a 40 digit hexadecimal string. If the changeset has no parents, all
     digits are 0. (DEPRECATED)"""
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     return ctx.p1().hex()
 
-@templatekeyword('p2node', requires={'ctx'})
+
+@templatekeyword(b'p2node', requires={b'ctx'})
 def showp2node(context, mapping):
     """String. The identification hash of the changeset's second
     parent, as a 40 digit hexadecimal string. If the changeset has no second
     parent, all digits are 0. (DEPRECATED)"""
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     return ctx.p2().hex()
 
-@templatekeyword('parents', requires={'repo', 'ctx'})
+
+@templatekeyword(b'parents', requires={b'repo', b'ctx'})
 def showparents(context, mapping):
     """List of strings. The parents of the changeset in "rev:node"
     format. If the changeset has only one "natural" parent (the predecessor
     revision) nothing is shown."""
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
     pctxs = scmutil.meaningfulparents(repo, ctx)
     prevs = [p.rev() for p in pctxs]
-    parents = [[('rev', p.rev()),
-                ('node', p.hex()),
-                ('phase', p.phasestr())]
-               for p in pctxs]
-    f = _showcompatlist(context, mapping, 'parent', parents)
-    return _hybrid(f, prevs, lambda x: {'ctx': repo[x]},
-                   lambda x: scmutil.formatchangeid(repo[x]), keytype=int)
+    parents = [
+        [(b'rev', p.rev()), (b'node', p.hex()), (b'phase', p.phasestr())]
+        for p in pctxs
+    ]
+    f = _showcompatlist(context, mapping, b'parent', parents)
+    return _hybrid(
+        f,
+        prevs,
+        lambda x: {b'ctx': repo[x]},
+        lambda x: scmutil.formatchangeid(repo[x]),
+        keytype=int,
+    )
 
-@templatekeyword('phase', requires={'ctx'})
+
+@templatekeyword(b'phase', requires={b'ctx'})
 def showphase(context, mapping):
     """String. The changeset phase name."""
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     return ctx.phasestr()
 
-@templatekeyword('phaseidx', requires={'ctx'})
+
+@templatekeyword(b'phaseidx', requires={b'ctx'})
 def showphaseidx(context, mapping):
     """Integer. The changeset phase index. (ADVANCED)"""
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     return ctx.phase()
 
-@templatekeyword('rev', requires={'ctx'})
+
+@templatekeyword(b'rev', requires={b'ctx'})
 def showrev(context, mapping):
     """Integer. The repository-local changeset revision number."""
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     return scmutil.intrev(ctx)
 
+
 def showrevslist(context, mapping, name, revs):
     """helper to generate a list of revisions in which a mapped template will
     be evaluated"""
-    repo = context.resource(mapping, 'repo')
+    repo = context.resource(mapping, b'repo')
     # revs may be a smartset; don't compute it until f() has to be evaluated
     def f():
-        srevs = ['%d' % r for r in revs]
+        srevs = [b'%d' % r for r in revs]
         return _showcompatlist(context, mapping, name, srevs)
-    return _hybrid(f, revs,
-                   lambda x: {name: x, 'ctx': repo[x]},
-                   pycompat.identity, keytype=int)
 
-@templatekeyword('subrepos', requires={'ctx'})
+    return _hybrid(
+        f,
+        revs,
+        lambda x: {name: x, b'ctx': repo[x]},
+        pycompat.identity,
+        keytype=int,
+    )
+
+
+@templatekeyword(b'subrepos', requires={b'ctx'})
 def showsubrepos(context, mapping):
     """List of strings. Updated subrepositories in the changeset."""
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     substate = ctx.substate
     if not substate:
-        return compatlist(context, mapping, 'subrepo', [])
+        return compatlist(context, mapping, b'subrepo', [])
     psubstate = ctx.p1().substate or {}
     subrepos = []
     for sub in substate:
         if sub not in psubstate or substate[sub] != psubstate[sub]:
-            subrepos.append(sub) # modified or newly added in ctx
+            subrepos.append(sub)  # modified or newly added in ctx
     for sub in psubstate:
         if sub not in substate:
-            subrepos.append(sub) # removed in ctx
-    return compatlist(context, mapping, 'subrepo', sorted(subrepos))
+            subrepos.append(sub)  # removed in ctx
+    return compatlist(context, mapping, b'subrepo', sorted(subrepos))
+
 
 # don't remove "showtags" definition, even though namespaces will put
 # a helper function for "tags" keyword into "keywords" map automatically,
 # because online help text is built without namespaces initialization
-@templatekeyword('tags', requires={'repo', 'ctx'})
+@templatekeyword(b'tags', requires={b'repo', b'ctx'})
 def showtags(context, mapping):
     """List of strings. Any tags associated with the changeset."""
-    return shownames(context, mapping, 'tags')
+    return shownames(context, mapping, b'tags')
 
-@templatekeyword('termwidth', requires={'ui'})
+
+@templatekeyword(b'termwidth', requires={b'ui'})
 def showtermwidth(context, mapping):
     """Integer. The width of the current terminal."""
-    ui = context.resource(mapping, 'ui')
+    ui = context.resource(mapping, b'ui')
     return ui.termwidth()
 
-@templatekeyword('user', requires={'ctx'})
+
+@templatekeyword(b'user', requires={b'ctx'})
 def showuser(context, mapping):
     """String. The unmodified author of the changeset."""
-    ctx = context.resource(mapping, 'ctx')
+    ctx = context.resource(mapping, b'ctx')
     return ctx.user()
 
-@templatekeyword('instabilities', requires={'ctx'})
+
+@templatekeyword(b'instabilities', requires={b'ctx'})
 def showinstabilities(context, mapping):
     """List of strings. Evolution instabilities affecting the changeset.
     (EXPERIMENTAL)
     """
-    ctx = context.resource(mapping, 'ctx')
-    return compatlist(context, mapping, 'instability', ctx.instabilities(),
-                      plural='instabilities')
+    ctx = context.resource(mapping, b'ctx')
+    return compatlist(
+        context,
+        mapping,
+        b'instability',
+        ctx.instabilities(),
+        plural=b'instabilities',
+    )
 
-@templatekeyword('verbosity', requires={'ui'})
+
+@templatekeyword(b'verbosity', requires={b'ui'})
 def showverbosity(context, mapping):
     """String. The current output verbosity in 'debug', 'quiet', 'verbose',
     or ''."""
-    ui = context.resource(mapping, 'ui')
+    ui = context.resource(mapping, b'ui')
     # see logcmdutil.changesettemplater for priority of these flags
     if ui.debugflag:
-        return 'debug'
+        return b'debug'
     elif ui.quiet:
-        return 'quiet'
+        return b'quiet'
     elif ui.verbose:
-        return 'verbose'
-    return ''
+        return b'verbose'
+    return b''
 
-@templatekeyword('whyunstable', requires={'repo', 'ctx'})
+
+@templatekeyword(b'whyunstable', requires={b'repo', b'ctx'})
 def showwhyunstable(context, mapping):
     """List of dicts explaining all instabilities of a changeset.
     (EXPERIMENTAL)
     """
-    repo = context.resource(mapping, 'repo')
-    ctx = context.resource(mapping, 'ctx')
+    repo = context.resource(mapping, b'repo')
+    ctx = context.resource(mapping, b'ctx')
 
     def formatnode(ctx):
-        return '%s (%s)' % (scmutil.formatchangeid(ctx), ctx.phasestr())
+        return b'%s (%s)' % (scmutil.formatchangeid(ctx), ctx.phasestr())
 
     entries = obsutil.whyunstable(repo, ctx)
 
     for entry in entries:
-        if entry.get('divergentnodes'):
-            dnodes = entry['divergentnodes']
-            dnhybrid = _hybrid(None, [dnode.hex() for dnode in dnodes],
-                               lambda x: {'ctx': repo[x]},
-                               lambda x: formatnode(repo[x]))
-            entry['divergentnodes'] = dnhybrid
+        if entry.get(b'divergentnodes'):
+            dnodes = entry[b'divergentnodes']
+            dnhybrid = _hybrid(
+                None,
+                [dnode.hex() for dnode in dnodes],
+                lambda x: {b'ctx': repo[x]},
+                lambda x: formatnode(repo[x]),
+            )
+            entry[b'divergentnodes'] = dnhybrid
 
-    tmpl = ('{instability}:{if(divergentnodes, " ")}{divergentnodes} '
-            '{reason} {node|short}')
-    return templateutil.mappinglist(entries, tmpl=tmpl, sep='\n')
+    tmpl = (
+        b'{instability}:{if(divergentnodes, " ")}{divergentnodes} '
+        b'{reason} {node|short}'
+    )
+    return templateutil.mappinglist(entries, tmpl=tmpl, sep=b'\n')
+
 
 def loadkeyword(ui, extname, registrarobj):
     """Load template keyword from specified registrarobj
     """
-    for name, func in registrarobj._table.iteritems():
+    for name, func in pycompat.iteritems(registrarobj._table):
         keywords[name] = func
 
+
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = keywords.values()
--- a/mercurial/templater.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/templater.py	Mon Oct 21 11:09:48 2019 -0400
@@ -68,6 +68,7 @@
 import os
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     config,
     encoding,
@@ -79,32 +80,31 @@
     templateutil,
     util,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
 
 # template parsing
 
 elements = {
     # token-type: binding-strength, primary, prefix, infix, suffix
-    "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
-    ".": (18, None, None, (".", 18), None),
-    "%": (15, None, None, ("%", 15), None),
-    "|": (15, None, None, ("|", 15), None),
-    "*": (5, None, None, ("*", 5), None),
-    "/": (5, None, None, ("/", 5), None),
-    "+": (4, None, None, ("+", 4), None),
-    "-": (4, None, ("negate", 19), ("-", 4), None),
-    "=": (3, None, None, ("keyvalue", 3), None),
-    ",": (2, None, None, ("list", 2), None),
-    ")": (0, None, None, None, None),
-    "integer": (0, "integer", None, None, None),
-    "symbol": (0, "symbol", None, None, None),
-    "string": (0, "string", None, None, None),
-    "template": (0, "template", None, None, None),
-    "end": (0, None, None, None, None),
+    b"(": (20, None, (b"group", 1, b")"), (b"func", 1, b")"), None),
+    b".": (18, None, None, (b".", 18), None),
+    b"%": (15, None, None, (b"%", 15), None),
+    b"|": (15, None, None, (b"|", 15), None),
+    b"*": (5, None, None, (b"*", 5), None),
+    b"/": (5, None, None, (b"/", 5), None),
+    b"+": (4, None, None, (b"+", 4), None),
+    b"-": (4, None, (b"negate", 19), (b"-", 4), None),
+    b"=": (3, None, None, (b"keyvalue", 3), None),
+    b",": (2, None, None, (b"list", 2), None),
+    b")": (0, None, None, None, None),
+    b"integer": (0, b"integer", None, None, None),
+    b"symbol": (0, b"symbol", None, None, None),
+    b"string": (0, b"string", None, None, None),
+    b"template": (0, b"template", None, None, None),
+    b"end": (0, None, None, None, None),
 }
 
+
 def tokenize(program, start, end, term=None):
     """Parse a template expression into a stream of tokens, which must end
     with term if specified"""
@@ -112,30 +112,30 @@
     program = pycompat.bytestr(program)
     while pos < end:
         c = program[pos]
-        if c.isspace(): # skip inter-token whitespace
+        if c.isspace():  # skip inter-token whitespace
             pass
-        elif c in "(=,).%|+-*/": # handle simple operators
+        elif c in b"(=,).%|+-*/":  # handle simple operators
             yield (c, None, pos)
-        elif c in '"\'': # handle quoted templates
+        elif c in b'"\'':  # handle quoted templates
             s = pos + 1
             data, pos = _parsetemplate(program, s, end, c)
-            yield ('template', data, s)
+            yield (b'template', data, s)
             pos -= 1
-        elif c == 'r' and program[pos:pos + 2] in ("r'", 'r"'):
+        elif c == b'r' and program[pos : pos + 2] in (b"r'", b'r"'):
             # handle quoted strings
             c = program[pos + 1]
             s = pos = pos + 2
-            while pos < end: # find closing quote
+            while pos < end:  # find closing quote
                 d = program[pos]
-                if d == '\\': # skip over escaped characters
+                if d == b'\\':  # skip over escaped characters
                     pos += 2
                     continue
                 if d == c:
-                    yield ('string', program[s:pos], s)
+                    yield (b'string', program[s:pos], s)
                     break
                 pos += 1
             else:
-                raise error.ParseError(_("unterminated string"), s)
+                raise error.ParseError(_(b"unterminated string"), s)
         elif c.isdigit():
             s = pos
             while pos < end:
@@ -143,10 +143,14 @@
                 if not d.isdigit():
                     break
                 pos += 1
-            yield ('integer', program[s:pos], s)
+            yield (b'integer', program[s:pos], s)
             pos -= 1
-        elif (c == '\\' and program[pos:pos + 2] in (br"\'", br'\"')
-              or c == 'r' and program[pos:pos + 3] in (br"r\'", br'r\"')):
+        elif (
+            c == b'\\'
+            and program[pos : pos + 2] in (br"\'", br'\"')
+            or c == b'r'
+            and program[pos : pos + 3] in (br"r\'", br'r\"')
+        ):
             # handle escaped quoted strings for compatibility with 2.9.2-3.4,
             # where some of nested templates were preprocessed as strings and
             # then compiled. therefore, \"...\" was allowed. (issue4733)
@@ -157,50 +161,51 @@
             # {f("\\\\ {g(\"\\\"\")}"}    \\ {g("\"")}    [r'\\', {g("\"")}]
             #             ~~~~~~~~
             #             escaped quoted string
-            if c == 'r':
+            if c == b'r':
                 pos += 1
-                token = 'string'
+                token = b'string'
             else:
-                token = 'template'
-            quote = program[pos:pos + 2]
+                token = b'template'
+            quote = program[pos : pos + 2]
             s = pos = pos + 2
-            while pos < end: # find closing escaped quote
-                if program.startswith('\\\\\\', pos, end):
-                    pos += 4 # skip over double escaped characters
+            while pos < end:  # find closing escaped quote
+                if program.startswith(b'\\\\\\', pos, end):
+                    pos += 4  # skip over double escaped characters
                     continue
                 if program.startswith(quote, pos, end):
                     # interpret as if it were a part of an outer string
                     data = parser.unescapestr(program[s:pos])
-                    if token == 'template':
+                    if token == b'template':
                         data = _parsetemplate(data, 0, len(data))[0]
                     yield (token, data, s)
                     pos += 1
                     break
                 pos += 1
             else:
-                raise error.ParseError(_("unterminated string"), s)
-        elif c.isalnum() or c in '_':
+                raise error.ParseError(_(b"unterminated string"), s)
+        elif c.isalnum() or c in b'_':
             s = pos
             pos += 1
-            while pos < end: # find end of symbol
+            while pos < end:  # find end of symbol
                 d = program[pos]
-                if not (d.isalnum() or d == "_"):
+                if not (d.isalnum() or d == b"_"):
                     break
                 pos += 1
             sym = program[s:pos]
-            yield ('symbol', sym, s)
+            yield (b'symbol', sym, s)
             pos -= 1
         elif c == term:
-            yield ('end', None, pos)
+            yield (b'end', None, pos)
             return
         else:
-            raise error.ParseError(_("syntax error"), pos)
+            raise error.ParseError(_(b"syntax error"), pos)
         pos += 1
     if term:
-        raise error.ParseError(_("unterminated template expansion"), start)
-    yield ('end', None, pos)
+        raise error.ParseError(_(b"unterminated template expansion"), start)
+    yield (b'end', None, pos)
 
-def _parsetemplate(tmpl, start, stop, quote=''):
+
+def _parsetemplate(tmpl, start, stop, quote=b''):
     r"""
     >>> _parsetemplate(b'foo{bar}"baz', 0, 12)
     ([('string', 'foo'), ('symbol', 'bar'), ('string', '"baz')], 12)
@@ -215,15 +220,16 @@
     """
     parsed = []
     for typ, val, pos in _scantemplate(tmpl, start, stop, quote):
-        if typ == 'string':
+        if typ == b'string':
             parsed.append((typ, val))
-        elif typ == 'template':
+        elif typ == b'template':
             parsed.append(val)
-        elif typ == 'end':
+        elif typ == b'end':
             return parsed, pos
         else:
-            raise error.ProgrammingError('unexpected type: %s' % typ)
-    raise error.ProgrammingError('unterminated scanning of template')
+            raise error.ProgrammingError(b'unexpected type: %s' % typ)
+    raise error.ProgrammingError(b'unterminated scanning of template')
+
 
 def scantemplate(tmpl, raw=False):
     r"""Scan (type, start, end) positions of outermost elements in template
@@ -247,66 +253,73 @@
     for typ, val, pos in _scantemplate(tmpl, 0, len(tmpl), raw=raw):
         if last:
             yield last + (pos,)
-        if typ == 'end':
+        if typ == b'end':
             return
         else:
             last = (typ, pos)
-    raise error.ProgrammingError('unterminated scanning of template')
+    raise error.ProgrammingError(b'unterminated scanning of template')
+
 
-def _scantemplate(tmpl, start, stop, quote='', raw=False):
+def _scantemplate(tmpl, start, stop, quote=b'', raw=False):
     """Parse template string into chunks of strings and template expressions"""
-    sepchars = '{' + quote
+    sepchars = b'{' + quote
     unescape = [parser.unescapestr, pycompat.identity][raw]
     pos = start
     p = parser.parser(elements)
     try:
         while pos < stop:
-            n = min((tmpl.find(c, pos, stop)
-                     for c in pycompat.bytestr(sepchars)),
-                    key=lambda n: (n < 0, n))
+            n = min(
+                (tmpl.find(c, pos, stop) for c in pycompat.bytestr(sepchars)),
+                key=lambda n: (n < 0, n),
+            )
             if n < 0:
-                yield ('string', unescape(tmpl[pos:stop]), pos)
+                yield (b'string', unescape(tmpl[pos:stop]), pos)
                 pos = stop
                 break
-            c = tmpl[n:n + 1]
+            c = tmpl[n : n + 1]
             bs = 0  # count leading backslashes
             if not raw:
-                bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
+                bs = (n - pos) - len(tmpl[pos:n].rstrip(b'\\'))
             if bs % 2 == 1:
                 # escaped (e.g. '\{', '\\\{', but not '\\{')
-                yield ('string', unescape(tmpl[pos:n - 1]) + c, pos)
+                yield (b'string', unescape(tmpl[pos : n - 1]) + c, pos)
                 pos = n + 1
                 continue
             if n > pos:
-                yield ('string', unescape(tmpl[pos:n]), pos)
+                yield (b'string', unescape(tmpl[pos:n]), pos)
             if c == quote:
-                yield ('end', None, n + 1)
+                yield (b'end', None, n + 1)
                 return
 
-            parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}'))
-            if not tmpl.startswith('}', pos):
-                raise error.ParseError(_("invalid token"), pos)
-            yield ('template', parseres, n)
+            parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, b'}'))
+            if not tmpl.startswith(b'}', pos):
+                raise error.ParseError(_(b"invalid token"), pos)
+            yield (b'template', parseres, n)
             pos += 1
 
         if quote:
-            raise error.ParseError(_("unterminated string"), start)
+            raise error.ParseError(_(b"unterminated string"), start)
     except error.ParseError as inst:
-        if len(inst.args) > 1:  # has location
-            loc = inst.args[1]
-            # Offset the caret location by the number of newlines before the
-            # location of the error, since we will replace one-char newlines
-            # with the two-char literal r'\n'.
-            offset = tmpl[:loc].count('\n')
-            tmpl = tmpl.replace('\n', br'\n')
-            # We want the caret to point to the place in the template that
-            # failed to parse, but in a hint we get a open paren at the
-            # start. Therefore, we print "loc + 1" spaces (instead of "loc")
-            # to line up the caret with the location of the error.
-            inst.hint = (tmpl + '\n'
-                         + ' ' * (loc + 1 + offset) + '^ ' + _('here'))
+        _addparseerrorhint(inst, tmpl)
         raise
-    yield ('end', None, pos)
+    yield (b'end', None, pos)
+
+
+def _addparseerrorhint(inst, tmpl):
+    if len(inst.args) <= 1:
+        return  # no location
+    loc = inst.args[1]
+    # Offset the caret location by the number of newlines before the
+    # location of the error, since we will replace one-char newlines
+    # with the two-char literal r'\n'.
+    offset = tmpl[:loc].count(b'\n')
+    tmpl = tmpl.replace(b'\n', br'\n')
+    # We want the caret to point to the place in the template that
+    # failed to parse, but in a hint we get a open paren at the
+    # start. Therefore, we print "loc + 1" spaces (instead of "loc")
+    # to line up the caret with the location of the error.
+    inst.hint = tmpl + b'\n' + b' ' * (loc + 1 + offset) + b'^ ' + _(b'here')
+
 
 def _unnesttemplatelist(tree):
     """Expand list of templates to node tuple
@@ -330,89 +343,108 @@
     if not isinstance(tree, tuple):
         return tree
     op = tree[0]
-    if op != 'template':
+    if op != b'template':
         return (op,) + tuple(_unnesttemplatelist(x) for x in tree[1:])
 
     assert len(tree) == 2
     xs = tuple(_unnesttemplatelist(x) for x in tree[1])
     if not xs:
-        return ('string', '')  # empty template ""
-    elif len(xs) == 1 and xs[0][0] == 'string':
+        return (b'string', b'')  # empty template ""
+    elif len(xs) == 1 and xs[0][0] == b'string':
         return xs[0]  # fast path for string with no template fragment "x"
     else:
         return (op,) + xs
 
+
 def parse(tmpl):
     """Parse template string into tree"""
     parsed, pos = _parsetemplate(tmpl, 0, len(tmpl))
-    assert pos == len(tmpl), 'unquoted template should be consumed'
-    return _unnesttemplatelist(('template', parsed))
+    assert pos == len(tmpl), b'unquoted template should be consumed'
+    return _unnesttemplatelist((b'template', parsed))
 
-def _parseexpr(expr):
+
+def parseexpr(expr):
     """Parse a template expression into tree
 
-    >>> _parseexpr(b'"foo"')
+    >>> parseexpr(b'"foo"')
     ('string', 'foo')
-    >>> _parseexpr(b'foo(bar)')
+    >>> parseexpr(b'foo(bar)')
     ('func', ('symbol', 'foo'), ('symbol', 'bar'))
-    >>> _parseexpr(b'foo(')
+    >>> parseexpr(b'foo(')
     Traceback (most recent call last):
       ...
     ParseError: ('not a prefix: end', 4)
-    >>> _parseexpr(b'"foo" "bar"')
+    >>> parseexpr(b'"foo" "bar"')
     Traceback (most recent call last):
       ...
     ParseError: ('invalid token', 7)
     """
+    try:
+        return _parseexpr(expr)
+    except error.ParseError as inst:
+        _addparseerrorhint(inst, expr)
+        raise
+
+
+def _parseexpr(expr):
     p = parser.parser(elements)
     tree, pos = p.parse(tokenize(expr, 0, len(expr)))
     if pos != len(expr):
-        raise error.ParseError(_('invalid token'), pos)
+        raise error.ParseError(_(b'invalid token'), pos)
     return _unnesttemplatelist(tree)
 
+
 def prettyformat(tree):
-    return parser.prettyformat(tree, ('integer', 'string', 'symbol'))
+    return parser.prettyformat(tree, (b'integer', b'string', b'symbol'))
+
 
 def compileexp(exp, context, curmethods):
     """Compile parsed template tree to (func, data) pair"""
     if not exp:
-        raise error.ParseError(_("missing argument"))
+        raise error.ParseError(_(b"missing argument"))
     t = exp[0]
     return curmethods[t](exp, context)
 
+
 # template evaluation
 
+
 def getsymbol(exp):
-    if exp[0] == 'symbol':
+    if exp[0] == b'symbol':
         return exp[1]
-    raise error.ParseError(_("expected a symbol, got '%s'") % exp[0])
+    raise error.ParseError(_(b"expected a symbol, got '%s'") % exp[0])
+
 
 def getlist(x):
     if not x:
         return []
-    if x[0] == 'list':
+    if x[0] == b'list':
         return getlist(x[1]) + [x[2]]
     return [x]
 
+
 def gettemplate(exp, context):
     """Compile given template tree or load named template from map file;
     returns (func, data) pair"""
-    if exp[0] in ('template', 'string'):
+    if exp[0] in (b'template', b'string'):
         return compileexp(exp, context, methods)
-    if exp[0] == 'symbol':
+    if exp[0] == b'symbol':
         # unlike runsymbol(), here 'symbol' is always taken as template name
         # even if it exists in mapping. this allows us to override mapping
         # by web templates, e.g. 'changelogtag' is redefined in map file.
         return context._load(exp[1])
-    raise error.ParseError(_("expected template specifier"))
+    raise error.ParseError(_(b"expected template specifier"))
+
 
 def _runrecursivesymbol(context, mapping, key):
-    raise error.Abort(_("recursive reference '%s' in template") % key)
+    raise error.Abort(_(b"recursive reference '%s' in template") % key)
+
 
 def buildtemplate(exp, context):
     ctmpl = [compileexp(e, context, methods) for e in exp[1:]]
     return (templateutil.runtemplate, ctmpl)
 
+
 def buildfilter(exp, context):
     n = getsymbol(exp[2])
     if n in context._filters:
@@ -423,27 +455,32 @@
         f = context._funcs[n]
         args = _buildfuncargs(exp[1], context, methods, n, f._argspec)
         return (f, args)
-    raise error.ParseError(_("unknown function '%s'") % n)
+    raise error.ParseError(_(b"unknown function '%s'") % n)
+
 
 def buildmap(exp, context):
     darg = compileexp(exp[1], context, methods)
     targ = gettemplate(exp[2], context)
     return (templateutil.runmap, (darg, targ))
 
+
 def buildmember(exp, context):
     darg = compileexp(exp[1], context, methods)
     memb = getsymbol(exp[2])
     return (templateutil.runmember, (darg, memb))
 
+
 def buildnegate(exp, context):
     arg = compileexp(exp[1], context, exprmethods)
     return (templateutil.runnegate, arg)
 
+
 def buildarithmetic(exp, context, func):
     left = compileexp(exp[1], context, exprmethods)
     right = compileexp(exp[2], context, exprmethods)
     return (templateutil.runarithmetic, (func, left, right))
 
+
 def buildfunc(exp, context):
     n = getsymbol(exp[1])
     if n in context._funcs:
@@ -453,10 +490,11 @@
     if n in context._filters:
         args = _buildfuncargs(exp[2], context, exprmethods, n, argspec=None)
         if len(args) != 1:
-            raise error.ParseError(_("filter %s expects one argument") % n)
+            raise error.ParseError(_(b"filter %s expects one argument") % n)
         f = context._filters[n]
         return (templateutil.runfilter, (args[0], f))
-    raise error.ParseError(_("unknown function '%s'") % n)
+    raise error.ParseError(_(b"unknown function '%s'") % n)
+
 
 def _buildfuncargs(exp, context, curmethods, funcname, argspec):
     """Compile parsed tree of function arguments into list or dict of
@@ -473,9 +511,13 @@
     >>> list(args.keys()), list(args[b'opts'].keys())
     (['opts'], ['opts', 'k'])
     """
+
     def compiledict(xs):
-        return util.sortdict((k, compileexp(x, context, curmethods))
-                             for k, x in xs.iteritems())
+        return util.sortdict(
+            (k, compileexp(x, context, curmethods))
+            for k, x in pycompat.iteritems(xs)
+        )
+
     def compilelist(xs):
         return [compileexp(x, context, curmethods) for x in xs]
 
@@ -485,8 +527,13 @@
 
     # function with argspec: return dict of named args
     _poskeys, varkey, _keys, optkey = argspec = parser.splitargspec(argspec)
-    treeargs = parser.buildargsdict(getlist(exp), funcname, argspec,
-                                    keyvaluenode='keyvalue', keynode='symbol')
+    treeargs = parser.buildargsdict(
+        getlist(exp),
+        funcname,
+        argspec,
+        keyvaluenode=b'keyvalue',
+        keynode=b'symbol',
+    )
     compargs = util.sortdict()
     if varkey:
         compargs[varkey] = compilelist(treeargs.pop(varkey))
@@ -495,64 +542,75 @@
     compargs.update(compiledict(treeargs))
     return compargs
 
+
 def buildkeyvaluepair(exp, content):
-    raise error.ParseError(_("can't use a key-value pair in this context"))
+    raise error.ParseError(_(b"can't use a key-value pair in this context"))
+
 
 def buildlist(exp, context):
-    raise error.ParseError(_("can't use a list in this context"),
-                           hint=_('check place of comma and parens'))
+    raise error.ParseError(
+        _(b"can't use a list in this context"),
+        hint=_(b'check place of comma and parens'),
+    )
+
 
 # methods to interpret function arguments or inner expressions (e.g. {_(x)})
 exprmethods = {
-    "integer": lambda e, c: (templateutil.runinteger, e[1]),
-    "string": lambda e, c: (templateutil.runstring, e[1]),
-    "symbol": lambda e, c: (templateutil.runsymbol, e[1]),
-    "template": buildtemplate,
-    "group": lambda e, c: compileexp(e[1], c, exprmethods),
-    ".": buildmember,
-    "|": buildfilter,
-    "%": buildmap,
-    "func": buildfunc,
-    "keyvalue": buildkeyvaluepair,
-    "list": buildlist,
-    "+": lambda e, c: buildarithmetic(e, c, lambda a, b: a + b),
-    "-": lambda e, c: buildarithmetic(e, c, lambda a, b: a - b),
-    "negate": buildnegate,
-    "*": lambda e, c: buildarithmetic(e, c, lambda a, b: a * b),
-    "/": lambda e, c: buildarithmetic(e, c, lambda a, b: a // b),
-    }
+    b"integer": lambda e, c: (templateutil.runinteger, e[1]),
+    b"string": lambda e, c: (templateutil.runstring, e[1]),
+    b"symbol": lambda e, c: (templateutil.runsymbol, e[1]),
+    b"template": buildtemplate,
+    b"group": lambda e, c: compileexp(e[1], c, exprmethods),
+    b".": buildmember,
+    b"|": buildfilter,
+    b"%": buildmap,
+    b"func": buildfunc,
+    b"keyvalue": buildkeyvaluepair,
+    b"list": buildlist,
+    b"+": lambda e, c: buildarithmetic(e, c, lambda a, b: a + b),
+    b"-": lambda e, c: buildarithmetic(e, c, lambda a, b: a - b),
+    b"negate": buildnegate,
+    b"*": lambda e, c: buildarithmetic(e, c, lambda a, b: a * b),
+    b"/": lambda e, c: buildarithmetic(e, c, lambda a, b: a // b),
+}
 
 # methods to interpret top-level template (e.g. {x}, {x|_}, {x % "y"})
 methods = exprmethods.copy()
-methods["integer"] = exprmethods["symbol"]  # '{1}' as variable
+methods[b"integer"] = exprmethods[b"symbol"]  # '{1}' as variable
+
 
 class _aliasrules(parser.basealiasrules):
     """Parsing and expansion rule set of template aliases"""
-    _section = _('template alias')
+
+    _section = _(b'template alias')
     _parse = staticmethod(_parseexpr)
 
     @staticmethod
     def _trygetfunc(tree):
         """Return (name, args) if tree is func(...) or ...|filter; otherwise
         None"""
-        if tree[0] == 'func' and tree[1][0] == 'symbol':
+        if tree[0] == b'func' and tree[1][0] == b'symbol':
             return tree[1][1], getlist(tree[2])
-        if tree[0] == '|' and tree[2][0] == 'symbol':
+        if tree[0] == b'|' and tree[2][0] == b'symbol':
             return tree[2][1], [tree[1]]
 
+
 def expandaliases(tree, aliases):
     """Return new tree of aliases are expanded"""
     aliasmap = _aliasrules.buildmap(aliases)
     return _aliasrules.expand(aliasmap, tree)
 
+
 # template engine
 
+
 def unquotestring(s):
     '''unwrap quotes if any; otherwise returns unmodified string'''
-    if len(s) < 2 or s[0] not in "'\"" or s[0] != s[-1]:
+    if len(s) < 2 or s[0] not in b"'\"" or s[0] != s[-1]:
         return s
     return s[1:-1]
 
+
 class resourcemapper(object):
     """Mapper of internal template resources"""
 
@@ -575,6 +633,7 @@
         """Return a dict of additional mapping items which should be paired
         with the given new mapping"""
 
+
 class nullresourcemapper(resourcemapper):
     def availablekeys(self, mapping):
         return set()
@@ -588,6 +647,7 @@
     def populatemap(self, context, origmapping, newmapping):
         return {}
 
+
 class engine(object):
     '''template expansion engine.
 
@@ -630,12 +690,18 @@
         # new resources, so the defaults will be re-evaluated (issue5612)
         knownres = self._resources.knownkeys()
         newres = self._resources.availablekeys(newmapping)
-        mapping = {k: v for k, v in origmapping.iteritems()
-                   if (k in knownres  # not a symbol per self.symbol()
-                       or newres.isdisjoint(self._defaultrequires(k)))}
+        mapping = {
+            k: v
+            for k, v in pycompat.iteritems(origmapping)
+            if (
+                k in knownres  # not a symbol per self.symbol()
+                or newres.isdisjoint(self._defaultrequires(k))
+            )
+        }
         mapping.update(newmapping)
         mapping.update(
-            self._resources.populatemap(self, origmapping, newmapping))
+            self._resources.populatemap(self, origmapping, newmapping)
+        )
         return mapping
 
     def _defaultrequires(self, key):
@@ -668,7 +734,8 @@
         v = self._resources.lookup(mapping, key)
         if v is None:
             raise templateutil.ResourceUnavailable(
-                _('template resource not available: %s') % key)
+                _(b'template resource not available: %s') % key
+            )
         return v
 
     def _load(self, t):
@@ -679,7 +746,7 @@
             self._cache[t] = (_runrecursivesymbol, t)
             try:
                 self._cache[t] = compileexp(x, self, methods)
-            except: # re-raises
+            except:  # re-raises
                 del self._cache[t]
                 raise
         return self._cache[t]
@@ -725,36 +792,40 @@
             mapping = extramapping
         return templateutil.flatten(self, mapping, func(self, mapping, data))
 
+
 def stylelist():
     paths = templatepaths()
     if not paths:
-        return _('no templates found, try `hg debuginstall` for more info')
+        return _(b'no templates found, try `hg debuginstall` for more info')
     dirlist = os.listdir(paths[0])
     stylelist = []
     for file in dirlist:
-        split = file.split(".")
-        if split[-1] in ('orig', 'rej'):
+        split = file.split(b".")
+        if split[-1] in (b'orig', b'rej'):
             continue
-        if split[0] == "map-cmdline":
+        if split[0] == b"map-cmdline":
             stylelist.append(split[1])
-    return ", ".join(sorted(stylelist))
+    return b", ".join(sorted(stylelist))
+
 
 def _readmapfile(mapfile):
     """Load template elements from the given map file"""
     if not os.path.exists(mapfile):
-        raise error.Abort(_("style '%s' not found") % mapfile,
-                          hint=_("available styles: %s") % stylelist())
+        raise error.Abort(
+            _(b"style '%s' not found") % mapfile,
+            hint=_(b"available styles: %s") % stylelist(),
+        )
 
     base = os.path.dirname(mapfile)
     conf = config.config(includepaths=templatepaths())
-    conf.read(mapfile, remap={'': 'templates'})
+    conf.read(mapfile, remap={b'': b'templates'})
 
     cache = {}
     tmap = {}
     aliases = []
 
-    val = conf.get('templates', '__base__')
-    if val and val[0] not in "'\"":
+    val = conf.get(b'templates', b'__base__')
+    if val and val[0] not in b"'\"":
         # treat as a pointer to a base class for this style
         path = util.normpath(os.path.join(base, val))
 
@@ -765,27 +836,30 @@
                 if os.path.isfile(p2):
                     path = p2
                     break
-                p3 = util.normpath(os.path.join(p2, "map"))
+                p3 = util.normpath(os.path.join(p2, b"map"))
                 if os.path.isfile(p3):
                     path = p3
                     break
 
         cache, tmap, aliases = _readmapfile(path)
 
-    for key, val in conf['templates'].items():
+    for key, val in conf[b'templates'].items():
         if not val:
-            raise error.ParseError(_('missing value'),
-                                   conf.source('templates', key))
-        if val[0] in "'\"":
+            raise error.ParseError(
+                _(b'missing value'), conf.source(b'templates', key)
+            )
+        if val[0] in b"'\"":
             if val[0] != val[-1]:
-                raise error.ParseError(_('unmatched quotes'),
-                                       conf.source('templates', key))
+                raise error.ParseError(
+                    _(b'unmatched quotes'), conf.source(b'templates', key)
+                )
             cache[key] = unquotestring(val)
-        elif key != '__base__':
+        elif key != b'__base__':
             tmap[key] = os.path.join(base, val)
-    aliases.extend(conf['templatealias'].items())
+    aliases.extend(conf[b'templatealias'].items())
     return cache, tmap, aliases
 
+
 class loader(object):
     """Load template fragments optionally from a map file"""
 
@@ -806,11 +880,13 @@
                 self.cache[t] = util.readfile(self._map[t])
             except KeyError as inst:
                 raise templateutil.TemplateNotFound(
-                    _('"%s" not in template map') % inst.args[0])
+                    _(b'"%s" not in template map') % inst.args[0]
+                )
             except IOError as inst:
-                reason = (_('template file %s: %s')
-                          % (self._map[t],
-                             stringutil.forcebytestr(inst.args[1])))
+                reason = _(b'template file %s: %s') % (
+                    self._map[t],
+                    stringutil.forcebytestr(inst.args[1]),
+                )
                 raise IOError(inst.args[0], encoding.strfromlocal(reason))
         return self._parse(self.cache[t])
 
@@ -824,23 +900,23 @@
         if not tree:
             return
         op = tree[0]
-        if op == 'symbol':
+        if op == b'symbol':
             s = tree[1]
             if s in syms[0]:
-                return # avoid recursion: s -> cache[s] -> s
+                return  # avoid recursion: s -> cache[s] -> s
             syms[0].add(s)
             if s in self.cache or s in self._map:
                 # s may be a reference for named template
                 self._findsymbolsused(self.load(s), syms)
             return
-        if op in {'integer', 'string'}:
+        if op in {b'integer', b'string'}:
             return
         # '{arg|func}' == '{func(arg)}'
-        if op == '|':
+        if op == b'|':
             syms[1].add(getsymbol(tree[2]))
             self._findsymbolsused(tree[1], syms)
             return
-        if op == 'func':
+        if op == b'func':
             syms[1].add(getsymbol(tree[1]))
             self._findsymbolsused(tree[2], syms)
             return
@@ -857,10 +933,18 @@
         self._findsymbolsused(self.load(t), syms)
         return syms
 
+
 class templater(object):
-
-    def __init__(self, filters=None, defaults=None, resources=None,
-                 cache=None, aliases=(), minchunk=1024, maxchunk=65536):
+    def __init__(
+        self,
+        filters=None,
+        defaults=None,
+        resources=None,
+        cache=None,
+        aliases=(),
+        minchunk=1024,
+        maxchunk=65536,
+    ):
         """Create template engine optionally with preloaded template fragments
 
         - ``filters``: a dict of functions to transform a value into another.
@@ -882,8 +966,16 @@
         self._minchunk, self._maxchunk = minchunk, maxchunk
 
     @classmethod
-    def frommapfile(cls, mapfile, filters=None, defaults=None, resources=None,
-                    cache=None, minchunk=1024, maxchunk=65536):
+    def frommapfile(
+        cls,
+        mapfile,
+        filters=None,
+        defaults=None,
+        resources=None,
+        cache=None,
+        minchunk=1024,
+        maxchunk=65536,
+    ):
         """Create templater from the specified map file"""
         t = cls(filters, defaults, resources, cache, [], minchunk, maxchunk)
         cache, tmap, aliases = _readmapfile(mapfile)
@@ -918,7 +1010,7 @@
 
         This may load additional templates from the map file.
         """
-        return self.symbolsused('')
+        return self.symbolsused(b'')
 
     def symbolsused(self, t):
         """Look up (keywords, filters/functions) referenced from the name
@@ -930,7 +1022,7 @@
 
     def renderdefault(self, mapping):
         """Render the default unnamed template and return result as string"""
-        return self.render('', mapping)
+        return self.render(b'', mapping)
 
     def render(self, t, mapping):
         """Render the specified named template and return result as string"""
@@ -941,17 +1033,19 @@
         yields chunks"""
         stream = self._proc.process(t, mapping)
         if self._minchunk:
-            stream = util.increasingchunks(stream, min=self._minchunk,
-                                           max=self._maxchunk)
+            stream = util.increasingchunks(
+                stream, min=self._minchunk, max=self._maxchunk
+            )
         return stream
 
+
 def templatepaths():
     '''return locations used for template files.'''
-    pathsrel = ['templates']
-    paths = [os.path.normpath(os.path.join(util.datapath, f))
-             for f in pathsrel]
+    pathsrel = [b'templates']
+    paths = [os.path.normpath(os.path.join(util.datapath, f)) for f in pathsrel]
     return [p for p in paths if os.path.isdir(p)]
 
+
 def templatepath(name):
     '''return location of template file. returns None if not found.'''
     for p in templatepaths():
@@ -960,6 +1054,7 @@
             return f
     return None
 
+
 def stylemap(styles, paths=None):
     """Return path to mapfile for a given style.
 
@@ -979,13 +1074,16 @@
 
     for style in styles:
         # only plain name is allowed to honor template paths
-        if (not style
+        if (
+            not style
             or style in (pycompat.oscurdir, pycompat.ospardir)
             or pycompat.ossep in style
-            or pycompat.osaltsep and pycompat.osaltsep in style):
+            or pycompat.osaltsep
+            and pycompat.osaltsep in style
+        ):
             continue
-        locations = [os.path.join(style, 'map'), 'map-' + style]
-        locations.append('map')
+        locations = [os.path.join(style, b'map'), b'map-' + style]
+        locations.append(b'map')
 
         for path in paths:
             for location in locations:
@@ -993,4 +1091,4 @@
                 if os.path.isfile(mapfile):
                     return style, mapfile
 
-    raise RuntimeError("No hgweb templates found in %r" % paths)
+    raise RuntimeError(b"No hgweb templates found in %r" % paths)
--- a/mercurial/templateutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/templateutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,6 +11,7 @@
 import types
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     error,
     pycompat,
@@ -21,12 +22,15 @@
     stringutil,
 )
 
+
 class ResourceUnavailable(error.Abort):
     pass
 
+
 class TemplateNotFound(error.Abort):
     pass
 
+
 class wrapped(object):
     """Object requiring extra conversion prior to displaying or processing
     as value
@@ -103,6 +107,7 @@
         A returned value must be serializable by templaterfilters.json().
         """
 
+
 class mappable(object):
     """Object which can be converted to a single template mapping"""
 
@@ -113,6 +118,7 @@
     def tomap(self, context):
         """Create a single template mapping representing this"""
 
+
 class wrappedbytes(wrapped):
     """Wrapper for byte string"""
 
@@ -124,8 +130,9 @@
         return item in self._value
 
     def getmember(self, context, mapping, key):
-        raise error.ParseError(_('%r is not a dictionary')
-                               % pycompat.bytestr(self._value))
+        raise error.ParseError(
+            _(b'%r is not a dictionary') % pycompat.bytestr(self._value)
+        )
 
     def getmin(self, context, mapping):
         return self._getby(context, mapping, min)
@@ -135,16 +142,18 @@
 
     def _getby(self, context, mapping, func):
         if not self._value:
-            raise error.ParseError(_('empty string'))
+            raise error.ParseError(_(b'empty string'))
         return func(pycompat.iterbytestr(self._value))
 
     def filter(self, context, mapping, select):
-        raise error.ParseError(_('%r is not filterable')
-                               % pycompat.bytestr(self._value))
+        raise error.ParseError(
+            _(b'%r is not filterable') % pycompat.bytestr(self._value)
+        )
 
     def itermaps(self, context):
-        raise error.ParseError(_('%r is not iterable of mappings')
-                               % pycompat.bytestr(self._value))
+        raise error.ParseError(
+            _(b'%r is not iterable of mappings') % pycompat.bytestr(self._value)
+        )
 
     def join(self, context, mapping, sep):
         return joinitems(pycompat.iterbytestr(self._value), sep)
@@ -158,6 +167,7 @@
     def tovalue(self, context, mapping):
         return self._value
 
+
 class wrappedvalue(wrapped):
     """Generic wrapper for pure non-list/dict/bytes value"""
 
@@ -165,26 +175,27 @@
         self._value = value
 
     def contains(self, context, mapping, item):
-        raise error.ParseError(_("%r is not iterable") % self._value)
+        raise error.ParseError(_(b"%r is not iterable") % self._value)
 
     def getmember(self, context, mapping, key):
-        raise error.ParseError(_('%r is not a dictionary') % self._value)
+        raise error.ParseError(_(b'%r is not a dictionary') % self._value)
 
     def getmin(self, context, mapping):
-        raise error.ParseError(_("%r is not iterable") % self._value)
+        raise error.ParseError(_(b"%r is not iterable") % self._value)
 
     def getmax(self, context, mapping):
-        raise error.ParseError(_("%r is not iterable") % self._value)
+        raise error.ParseError(_(b"%r is not iterable") % self._value)
 
     def filter(self, context, mapping, select):
-        raise error.ParseError(_("%r is not iterable") % self._value)
+        raise error.ParseError(_(b"%r is not iterable") % self._value)
 
     def itermaps(self, context):
-        raise error.ParseError(_('%r is not iterable of mappings')
-                               % self._value)
+        raise error.ParseError(
+            _(b'%r is not iterable of mappings') % self._value
+        )
 
     def join(self, context, mapping, sep):
-        raise error.ParseError(_('%r is not iterable') % self._value)
+        raise error.ParseError(_(b'%r is not iterable') % self._value)
 
     def show(self, context, mapping):
         if self._value is None:
@@ -202,38 +213,39 @@
     def tovalue(self, context, mapping):
         return self._value
 
+
 class date(mappable, wrapped):
     """Wrapper for date tuple"""
 
-    def __init__(self, value, showfmt='%d %d'):
+    def __init__(self, value, showfmt=b'%d %d'):
         # value may be (float, int), but public interface shouldn't support
         # floating-point timestamp
         self._unixtime, self._tzoffset = map(int, value)
         self._showfmt = showfmt
 
     def contains(self, context, mapping, item):
-        raise error.ParseError(_('date is not iterable'))
+        raise error.ParseError(_(b'date is not iterable'))
 
     def getmember(self, context, mapping, key):
-        raise error.ParseError(_('date is not a dictionary'))
+        raise error.ParseError(_(b'date is not a dictionary'))
 
     def getmin(self, context, mapping):
-        raise error.ParseError(_('date is not iterable'))
+        raise error.ParseError(_(b'date is not iterable'))
 
     def getmax(self, context, mapping):
-        raise error.ParseError(_('date is not iterable'))
+        raise error.ParseError(_(b'date is not iterable'))
 
     def filter(self, context, mapping, select):
-        raise error.ParseError(_('date is not iterable'))
+        raise error.ParseError(_(b'date is not iterable'))
 
     def join(self, context, mapping, sep):
-        raise error.ParseError(_("date is not iterable"))
+        raise error.ParseError(_(b"date is not iterable"))
 
     def show(self, context, mapping):
         return self._showfmt % (self._unixtime, self._tzoffset)
 
     def tomap(self, context):
-        return {'unixtime': self._unixtime, 'tzoffset': self._tzoffset}
+        return {b'unixtime': self._unixtime, b'tzoffset': self._tzoffset}
 
     def tobool(self, context, mapping):
         return True
@@ -241,6 +253,7 @@
     def tovalue(self, context, mapping):
         return (self._unixtime, self._tzoffset)
 
+
 class hybrid(wrapped):
     """Wrapper for list or dict to support legacy template
 
@@ -266,8 +279,8 @@
 
     def getmember(self, context, mapping, key):
         # TODO: maybe split hybrid list/dict types?
-        if not util.safehasattr(self._values, 'get'):
-            raise error.ParseError(_('not a dictionary'))
+        if not util.safehasattr(self._values, b'get'):
+            raise error.ParseError(_(b'not a dictionary'))
         key = unwrapastype(context, mapping, key, self._keytype)
         return self._wrapvalue(key, self._values.get(key))
 
@@ -279,22 +292,25 @@
 
     def _getby(self, context, mapping, func):
         if not self._values:
-            raise error.ParseError(_('empty sequence'))
+            raise error.ParseError(_(b'empty sequence'))
         val = func(self._values)
         return self._wrapvalue(val, val)
 
     def _wrapvalue(self, key, val):
         if val is None:
             return
-        if util.safehasattr(val, '_makemap'):
+        if util.safehasattr(val, b'_makemap'):
             # a nested hybrid list/dict, which has its own way of map operation
             return val
         return hybriditem(None, key, val, self._makemap)
 
     def filter(self, context, mapping, select):
-        if util.safehasattr(self._values, 'get'):
-            values = {k: v for k, v in self._values.iteritems()
-                      if select(self._wrapvalue(k, v))}
+        if util.safehasattr(self._values, b'get'):
+            values = {
+                k: v
+                for k, v in pycompat.iteritems(self._values)
+                if select(self._wrapvalue(k, v))
+            }
         else:
             values = [v for v in self._values if select(self._wrapvalue(v, v))]
         return hybrid(None, values, self._makemap, self._joinfmt, self._keytype)
@@ -312,7 +328,7 @@
         # TODO: switch gen to (context, mapping) API?
         gen = self._gen
         if gen is None:
-            return self.join(context, mapping, ' ')
+            return self.join(context, mapping, b' ')
         if callable(gen):
             return gen()
         return gen
@@ -323,11 +339,14 @@
     def tovalue(self, context, mapping):
         # TODO: make it non-recursive for trivial lists/dicts
         xs = self._values
-        if util.safehasattr(xs, 'get'):
-            return {k: unwrapvalue(context, mapping, v)
-                    for k, v in xs.iteritems()}
+        if util.safehasattr(xs, b'get'):
+            return {
+                k: unwrapvalue(context, mapping, v)
+                for k, v in pycompat.iteritems(xs)
+            }
         return [unwrapvalue(context, mapping, x) for x in xs]
 
+
 class hybriditem(mappable, wrapped):
     """Wrapper for non-list/dict object to support map operation
 
@@ -386,6 +405,7 @@
     def tovalue(self, context, mapping):
         return _unthunk(context, mapping, self._value)
 
+
 class _mappingsequence(wrapped):
     """Wrapper for sequence of template mappings
 
@@ -395,28 +415,30 @@
     Template mappings may be nested.
     """
 
-    def __init__(self, name=None, tmpl=None, sep=''):
+    def __init__(self, name=None, tmpl=None, sep=b''):
         if name is not None and tmpl is not None:
-            raise error.ProgrammingError('name and tmpl are mutually exclusive')
+            raise error.ProgrammingError(
+                b'name and tmpl are mutually exclusive'
+            )
         self._name = name
         self._tmpl = tmpl
         self._defaultsep = sep
 
     def contains(self, context, mapping, item):
-        raise error.ParseError(_('not comparable'))
+        raise error.ParseError(_(b'not comparable'))
 
     def getmember(self, context, mapping, key):
-        raise error.ParseError(_('not a dictionary'))
+        raise error.ParseError(_(b'not a dictionary'))
 
     def getmin(self, context, mapping):
-        raise error.ParseError(_('not comparable'))
+        raise error.ParseError(_(b'not comparable'))
 
     def getmax(self, context, mapping):
-        raise error.ParseError(_('not comparable'))
+        raise error.ParseError(_(b'not comparable'))
 
     def filter(self, context, mapping, select):
         # implement if necessary; we'll need a wrapped type for a mapping dict
-        raise error.ParseError(_('not filterable without template'))
+        raise error.ParseError(_(b'not filterable without template'))
 
     def join(self, context, mapping, sep):
         mapsiter = _iteroverlaymaps(context, mapping, self.itermaps(context))
@@ -425,7 +447,7 @@
         elif self._tmpl:
             itemiter = (context.expand(self._tmpl, m) for m in mapsiter)
         else:
-            raise error.ParseError(_('not displayable without template'))
+            raise error.ParseError(_(b'not displayable without template'))
         return joinitems(itemiter, sep)
 
     def show(self, context, mapping):
@@ -437,10 +459,16 @@
         for nm in self.itermaps(context):
             # drop internal resources (recursively) which shouldn't be displayed
             lm = context.overlaymap(mapping, nm)
-            items.append({k: unwrapvalue(context, lm, v)
-                          for k, v in nm.iteritems() if k not in knownres})
+            items.append(
+                {
+                    k: unwrapvalue(context, lm, v)
+                    for k, v in pycompat.iteritems(nm)
+                    if k not in knownres
+                }
+            )
         return items
 
+
 class mappinggenerator(_mappingsequence):
     """Wrapper for generator of template mappings
 
@@ -448,7 +476,7 @@
     mapping dicts.
     """
 
-    def __init__(self, make, args=(), name=None, tmpl=None, sep=''):
+    def __init__(self, make, args=(), name=None, tmpl=None, sep=b''):
         super(mappinggenerator, self).__init__(name, tmpl, sep)
         self._make = make
         self._args = args
@@ -459,10 +487,11 @@
     def tobool(self, context, mapping):
         return _nonempty(self.itermaps(context))
 
+
 class mappinglist(_mappingsequence):
     """Wrapper for list of template mappings"""
 
-    def __init__(self, mappings, name=None, tmpl=None, sep=''):
+    def __init__(self, mappings, name=None, tmpl=None, sep=b''):
         super(mappinglist, self).__init__(name, tmpl, sep)
         self._mappings = mappings
 
@@ -472,6 +501,7 @@
     def tobool(self, context, mapping):
         return bool(self._mappings)
 
+
 class mappingdict(mappable, _mappingsequence):
     """Wrapper for a single template mapping
 
@@ -495,6 +525,7 @@
     def tovalue(self, context, mapping):
         return super(mappingdict, self).tovalue(context, mapping)[0]
 
+
 class mappingnone(wrappedvalue):
     """Wrapper for None, but supports map operation
 
@@ -508,6 +539,7 @@
     def itermaps(self, context):
         return iter([])
 
+
 class mappedgenerator(wrapped):
     """Wrapper for generator of strings which acts as a list
 
@@ -528,7 +560,7 @@
         return self._make(context, *self._args)
 
     def getmember(self, context, mapping, key):
-        raise error.ParseError(_('not a dictionary'))
+        raise error.ParseError(_(b'not a dictionary'))
 
     def getmin(self, context, mapping):
         return self._getby(context, mapping, min)
@@ -539,7 +571,7 @@
     def _getby(self, context, mapping, func):
         xs = self.tovalue(context, mapping)
         if not xs:
-            raise error.ParseError(_('empty sequence'))
+            raise error.ParseError(_(b'empty sequence'))
         return func(xs)
 
     @staticmethod
@@ -554,13 +586,13 @@
         return mappedgenerator(self._filteredgen, args)
 
     def itermaps(self, context):
-        raise error.ParseError(_('list of strings is not mappable'))
+        raise error.ParseError(_(b'list of strings is not mappable'))
 
     def join(self, context, mapping, sep):
         return joinitems(self._gen(context), sep)
 
     def show(self, context, mapping):
-        return self.join(context, mapping, '')
+        return self.join(context, mapping, b'')
 
     def tobool(self, context, mapping):
         return _nonempty(self._gen(context))
@@ -568,36 +600,61 @@
     def tovalue(self, context, mapping):
         return [stringify(context, mapping, x) for x in self._gen(context)]
 
-def hybriddict(data, key='key', value='value', fmt=None, gen=None):
+
+def hybriddict(data, key=b'key', value=b'value', fmt=None, gen=None):
     """Wrap data to support both dict-like and string-like operations"""
     prefmt = pycompat.identity
     if fmt is None:
-        fmt = '%s=%s'
+        fmt = b'%s=%s'
         prefmt = pycompat.bytestr
-    return hybrid(gen, data, lambda k: {key: k, value: data[k]},
-                  lambda k: fmt % (prefmt(k), prefmt(data[k])))
+    return hybrid(
+        gen,
+        data,
+        lambda k: {key: k, value: data[k]},
+        lambda k: fmt % (prefmt(k), prefmt(data[k])),
+    )
+
 
 def hybridlist(data, name, fmt=None, gen=None):
     """Wrap data to support both list-like and string-like operations"""
     prefmt = pycompat.identity
     if fmt is None:
-        fmt = '%s'
+        fmt = b'%s'
         prefmt = pycompat.bytestr
     return hybrid(gen, data, lambda x: {name: x}, lambda x: fmt % prefmt(x))
 
-def compatdict(context, mapping, name, data, key='key', value='value',
-               fmt=None, plural=None, separator=' '):
+
+def compatdict(
+    context,
+    mapping,
+    name,
+    data,
+    key=b'key',
+    value=b'value',
+    fmt=None,
+    plural=None,
+    separator=b' ',
+):
     """Wrap data like hybriddict(), but also supports old-style list template
 
     This exists for backward compatibility with the old-style template. Use
     hybriddict() for new template keywords.
     """
-    c = [{key: k, value: v} for k, v in data.iteritems()]
+    c = [{key: k, value: v} for k, v in pycompat.iteritems(data)]
     f = _showcompatlist(context, mapping, name, c, plural, separator)
     return hybriddict(data, key=key, value=value, fmt=fmt, gen=f)
 
-def compatlist(context, mapping, name, data, element=None, fmt=None,
-               plural=None, separator=' '):
+
+def compatlist(
+    context,
+    mapping,
+    name,
+    data,
+    element=None,
+    fmt=None,
+    plural=None,
+    separator=b' ',
+):
     """Wrap data like hybridlist(), but also supports old-style list template
 
     This exists for backward compatibility with the old-style template. Use
@@ -606,6 +663,7 @@
     f = _showcompatlist(context, mapping, name, data, plural, separator)
     return hybridlist(data, name=element or name, fmt=fmt, gen=f)
 
+
 def compatfilecopiesdict(context, mapping, name, copies):
     """Wrap list of (dest, source) file names to support old-style list
     template and field names
@@ -614,12 +672,16 @@
     keywords.
     """
     # no need to provide {path} to old-style list template
-    c = [{'name': k, 'source': v} for k, v in copies]
-    f = _showcompatlist(context, mapping, name, c, plural='file_copies')
+    c = [{b'name': k, b'source': v} for k, v in copies]
+    f = _showcompatlist(context, mapping, name, c, plural=b'file_copies')
     copies = util.sortdict(copies)
-    return hybrid(f, copies,
-                  lambda k: {'name': k, 'path': k, 'source': copies[k]},
-                  lambda k: '%s (%s)' % (k, copies[k]))
+    return hybrid(
+        f,
+        copies,
+        lambda k: {b'name': k, b'path': k, b'source': copies[k]},
+        lambda k: b'%s (%s)' % (k, copies[k]),
+    )
+
 
 def compatfileslist(context, mapping, name, files):
     """Wrap list of file names to support old-style list template and field
@@ -629,10 +691,14 @@
     keywords.
     """
     f = _showcompatlist(context, mapping, name, files)
-    return hybrid(f, files, lambda x: {'file': x, 'path': x},
-                  pycompat.identity)
+    return hybrid(
+        f, files, lambda x: {b'file': x, b'path': x}, pycompat.identity
+    )
 
-def _showcompatlist(context, mapping, name, values, plural=None, separator=' '):
+
+def _showcompatlist(
+    context, mapping, name, values, plural=None, separator=b' '
+):
     """Return a generator that renders old-style list template
 
     name is name of key in template map.
@@ -655,9 +721,9 @@
     expand 'end_foos'.
     """
     if not plural:
-        plural = name + 's'
+        plural = name + b's'
     if not values:
-        noname = 'no_' + plural
+        noname = b'no_' + plural
         if context.preload(noname):
             yield context.process(noname, mapping)
         return
@@ -670,9 +736,10 @@
                 r.update(mapping)
                 yield r
         return
-    startname = 'start_' + plural
+    startname = b'start_' + plural
     if context.preload(startname):
         yield context.process(startname, mapping)
+
     def one(v, tag=name):
         vmapping = {}
         try:
@@ -689,7 +756,8 @@
                 vmapping[name] = v
         vmapping = context.overlaymap(mapping, vmapping)
         return context.process(tag, vmapping)
-    lastname = 'last_' + name
+
+    lastname = b'last_' + name
     if context.preload(lastname):
         last = values.pop()
     else:
@@ -698,10 +766,11 @@
         yield one(v)
     if last is not None:
         yield one(last, tag=lastname)
-    endname = 'end_' + plural
+    endname = b'end_' + plural
     if context.preload(endname):
         yield context.process(endname, mapping)
 
+
 def flatten(context, mapping, thing):
     """Yield a single stream from a possibly nested set of iterators"""
     if isinstance(thing, wrapped):
@@ -711,11 +780,13 @@
     elif isinstance(thing, str):
         # We can only hit this on Python 3, and it's here to guard
         # against infinite recursion.
-        raise error.ProgrammingError('Mercurial IO including templates is done'
-                                     ' with bytes, not strings, got %r' % thing)
+        raise error.ProgrammingError(
+            b'Mercurial IO including templates is done'
+            b' with bytes, not strings, got %r' % thing
+        )
     elif thing is None:
         pass
-    elif not util.safehasattr(thing, '__iter__'):
+    elif not util.safehasattr(thing, b'__iter__'):
         yield pycompat.bytestr(thing)
     else:
         for i in thing:
@@ -725,18 +796,20 @@
                 yield i
             elif i is None:
                 pass
-            elif not util.safehasattr(i, '__iter__'):
+            elif not util.safehasattr(i, b'__iter__'):
                 yield pycompat.bytestr(i)
             else:
                 for j in flatten(context, mapping, i):
                     yield j
 
+
 def stringify(context, mapping, thing):
     """Turn values into bytes by converting into text and concatenating them"""
     if isinstance(thing, bytes):
         return thing  # retain localstr to be round-tripped
     return b''.join(flatten(context, mapping, thing))
 
+
 def findsymbolicname(arg):
     """Find symbolic name for the given compiled expression; returns None
     if nothing found reliably"""
@@ -749,6 +822,7 @@
         else:
             return None
 
+
 def _nonempty(xiter):
     try:
         next(xiter)
@@ -756,23 +830,27 @@
     except StopIteration:
         return False
 
+
 def _unthunk(context, mapping, thing):
     """Evaluate a lazy byte string into value"""
     if not isinstance(thing, types.GeneratorType):
         return thing
     return stringify(context, mapping, thing)
 
+
 def evalrawexp(context, mapping, arg):
     """Evaluate given argument as a bare template object which may require
     further processing (such as folding generator of strings)"""
     func, data = arg
     return func(context, mapping, data)
 
+
 def evalwrapped(context, mapping, arg):
     """Evaluate given argument to wrapped object"""
     thing = evalrawexp(context, mapping, arg)
     return makewrapped(context, mapping, thing)
 
+
 def makewrapped(context, mapping, thing):
     """Lift object to a wrapped type"""
     if isinstance(thing, wrapped):
@@ -782,10 +860,12 @@
         return wrappedbytes(thing)
     return wrappedvalue(thing)
 
+
 def evalfuncarg(context, mapping, arg):
     """Evaluate given argument as value type"""
     return unwrapvalue(context, mapping, evalrawexp(context, mapping, arg))
 
+
 def unwrapvalue(context, mapping, thing):
     """Move the inner value object out of the wrapper"""
     if isinstance(thing, wrapped):
@@ -794,6 +874,7 @@
     # such as date tuple, but filter does not want generator.
     return _unthunk(context, mapping, thing)
 
+
 def evalboolean(context, mapping, arg):
     """Evaluate given argument as boolean, but also takes boolean literals"""
     func, data = arg
@@ -806,12 +887,14 @@
         thing = func(context, mapping, data)
     return makewrapped(context, mapping, thing).tobool(context, mapping)
 
+
 def evaldate(context, mapping, arg, err=None):
     """Evaluate given argument as a date tuple or a date string; returns
     a (unixtime, offset) tuple"""
     thing = evalrawexp(context, mapping, arg)
     return unwrapdate(context, mapping, thing, err)
 
+
 def unwrapdate(context, mapping, thing, err=None):
     if isinstance(thing, date):
         return thing.tovalue(context, mapping)
@@ -820,26 +903,30 @@
     try:
         return dateutil.parsedate(thing)
     except AttributeError:
-        raise error.ParseError(err or _('not a date tuple nor a string'))
+        raise error.ParseError(err or _(b'not a date tuple nor a string'))
     except error.ParseError:
         if not err:
             raise
         raise error.ParseError(err)
 
+
 def evalinteger(context, mapping, arg, err=None):
     thing = evalrawexp(context, mapping, arg)
     return unwrapinteger(context, mapping, thing, err)
 
+
 def unwrapinteger(context, mapping, thing, err=None):
     thing = unwrapvalue(context, mapping, thing)
     try:
         return int(thing)
     except (TypeError, ValueError):
-        raise error.ParseError(err or _('not an integer'))
+        raise error.ParseError(err or _(b'not an integer'))
+
 
 def evalstring(context, mapping, arg):
     return stringify(context, mapping, evalrawexp(context, mapping, arg))
 
+
 def evalstringliteral(context, mapping, arg):
     """Evaluate given argument as string template, but returns symbol name
     if it is unknown"""
@@ -850,6 +937,7 @@
         thing = func(context, mapping, data)
     return stringify(context, mapping, thing)
 
+
 _unwrapfuncbytype = {
     None: unwrapvalue,
     bytes: stringify,
@@ -857,26 +945,32 @@
     int: unwrapinteger,
 }
 
+
 def unwrapastype(context, mapping, thing, typ):
     """Move the inner value object out of the wrapper and coerce its type"""
     try:
         f = _unwrapfuncbytype[typ]
     except KeyError:
-        raise error.ProgrammingError('invalid type specified: %r' % typ)
+        raise error.ProgrammingError(b'invalid type specified: %r' % typ)
     return f(context, mapping, thing)
 
+
 def runinteger(context, mapping, data):
     return int(data)
 
+
 def runstring(context, mapping, data):
     return data
 
+
 def _recursivesymbolblocker(key):
     def showrecursion(context, mapping):
-        raise error.Abort(_("recursive reference '%s' in template") % key)
+        raise error.Abort(_(b"recursive reference '%s' in template") % key)
+
     return showrecursion
 
-def runsymbol(context, mapping, key, default=''):
+
+def runsymbol(context, mapping, key, default=b''):
     v = context.symbol(mapping, key)
     if v is None:
         # put poison to cut recursion. we can't move this to parsing phase
@@ -896,10 +990,12 @@
             return None
     return v
 
+
 def runtemplate(context, mapping, template):
     for arg in template:
         yield evalrawexp(context, mapping, arg)
 
+
 def runfilter(context, mapping, data):
     arg, filt = data
     thing = evalrawexp(context, mapping, arg)
@@ -910,22 +1006,27 @@
     except error.ParseError as e:
         raise error.ParseError(bytes(e), hint=_formatfiltererror(arg, filt))
 
+
 def _formatfiltererror(arg, filt):
     fn = pycompat.sysbytes(filt.__name__)
     sym = findsymbolicname(arg)
     if not sym:
-        return _("incompatible use of template filter '%s'") % fn
-    return (_("template filter '%s' is not compatible with keyword '%s'")
-            % (fn, sym))
+        return _(b"incompatible use of template filter '%s'") % fn
+    return _(b"template filter '%s' is not compatible with keyword '%s'") % (
+        fn,
+        sym,
+    )
+
 
 def _iteroverlaymaps(context, origmapping, newmappings):
     """Generate combined mappings from the original mapping and an iterable
     of partial mappings to override the original"""
     for i, nm in enumerate(newmappings):
         lm = context.overlaymap(origmapping, nm)
-        lm['index'] = i
+        lm[b'index'] = i
         yield lm
 
+
 def _applymap(context, mapping, d, darg, targ):
     try:
         diter = d.itermaps(context)
@@ -933,16 +1034,18 @@
         sym = findsymbolicname(darg)
         if not sym:
             raise
-        hint = _("keyword '%s' does not support map operation") % sym
+        hint = _(b"keyword '%s' does not support map operation") % sym
         raise error.ParseError(bytes(err), hint=hint)
     for lm in _iteroverlaymaps(context, mapping, diter):
         yield evalrawexp(context, lm, targ)
 
+
 def runmap(context, mapping, data):
     darg, targ = data
     d = evalwrapped(context, mapping, darg)
     return mappedgenerator(_applymap, args=(mapping, d, darg, targ))
 
+
 def runmember(context, mapping, data):
     darg, memb = data
     d = evalwrapped(context, mapping, darg)
@@ -955,24 +1058,30 @@
         sym = findsymbolicname(darg)
         if not sym:
             raise
-        hint = _("keyword '%s' does not support member operation") % sym
+        hint = _(b"keyword '%s' does not support member operation") % sym
         raise error.ParseError(bytes(err), hint=hint)
 
+
 def runnegate(context, mapping, data):
-    data = evalinteger(context, mapping, data,
-                       _('negation needs an integer argument'))
+    data = evalinteger(
+        context, mapping, data, _(b'negation needs an integer argument')
+    )
     return -data
 
+
 def runarithmetic(context, mapping, data):
     func, left, right = data
-    left = evalinteger(context, mapping, left,
-                       _('arithmetic only defined on integers'))
-    right = evalinteger(context, mapping, right,
-                        _('arithmetic only defined on integers'))
+    left = evalinteger(
+        context, mapping, left, _(b'arithmetic only defined on integers')
+    )
+    right = evalinteger(
+        context, mapping, right, _(b'arithmetic only defined on integers')
+    )
     try:
         return func(left, right)
     except ZeroDivisionError:
-        raise error.Abort(_('division by zero is not defined'))
+        raise error.Abort(_(b'division by zero is not defined'))
+
 
 def joinitems(itemiter, sep):
     """Join items with the separator; Returns generator of bytes"""
--- a/mercurial/testing/storage.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/testing/storage.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,19 +14,21 @@
     nullid,
     nullrev,
 )
+from ..pycompat import getattr
 from .. import (
     error,
     mdiff,
-    repository,
 )
-from ..utils import (
-    storageutil,
-)
+from ..interfaces import repository
+from ..utils import storageutil
+
 
 class basetestcase(unittest.TestCase):
-    if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
-        assertRaisesRegex = (# camelcase-required
-            unittest.TestCase.assertRaisesRegexp)
+    if not getattr(unittest.TestCase, 'assertRaisesRegex', False):
+        assertRaisesRegex = (  # camelcase-required
+            unittest.TestCase.assertRaisesRegexp
+        )
+
 
 class ifileindextests(basetestcase):
     """Generic tests for the ifileindex interface.
@@ -36,10 +38,11 @@
 
     Use ``makeifileindextests()`` to create an instance of this type.
     """
+
     def testempty(self):
         f = self._makefilefn()
-        self.assertEqual(len(f), 0, 'new file store has 0 length by default')
-        self.assertEqual(list(f), [], 'iter yields nothing by default')
+        self.assertEqual(len(f), 0, b'new file store has 0 length by default')
+        self.assertEqual(list(f), [], b'iter yields nothing by default')
 
         gen = iter(f)
         with self.assertRaises(StopIteration):
@@ -396,6 +399,7 @@
         self.assertEqual(f.children(node4), [])
         self.assertEqual(f.children(node5), [])
 
+
 class ifiledatatests(basetestcase):
     """Generic tests for the ifiledata interface.
 
@@ -404,12 +408,15 @@
 
     Use ``makeifiledatatests()`` to create an instance of this type.
     """
+
     def testempty(self):
         f = self._makefilefn()
 
         self.assertEqual(f.storageinfo(), {})
-        self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
-                         {'revisionscount': 0, 'trackedsize': 0})
+        self.assertEqual(
+            f.storageinfo(revisionscount=True, trackedsize=True),
+            {b'revisionscount': 0, b'trackedsize': 0},
+        )
 
         self.assertEqual(f.size(nullrev), 0)
 
@@ -421,7 +428,7 @@
                 f.size(i)
 
         self.assertEqual(f.revision(nullid), b'')
-        self.assertEqual(f.revision(nullid, raw=True), b'')
+        self.assertEqual(f.rawdata(nullid), b'')
 
         with self.assertRaises(error.LookupError):
             f.revision(b'\x01' * 20)
@@ -464,8 +471,10 @@
             node = f.add(fulltext, None, tr, 0, nullid, nullid)
 
         self.assertEqual(f.storageinfo(), {})
-        self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
-                         {'revisionscount': 1, 'trackedsize': len(fulltext)})
+        self.assertEqual(
+            f.storageinfo(revisionscount=True, trackedsize=True),
+            {b'revisionscount': 1, b'trackedsize': len(fulltext)},
+        )
 
         self.assertEqual(f.size(0), len(fulltext))
 
@@ -473,7 +482,7 @@
             f.size(1)
 
         self.assertEqual(f.revision(node), fulltext)
-        self.assertEqual(f.revision(node, raw=True), fulltext)
+        self.assertEqual(f.rawdata(node), fulltext)
 
         self.assertEqual(f.read(node), fulltext)
 
@@ -533,9 +542,12 @@
         self.assertEqual(
             f.storageinfo(revisionscount=True, trackedsize=True),
             {
-                'revisionscount': 3,
-                'trackedsize': len(fulltext0) + len(fulltext1) + len(fulltext2),
-            })
+                b'revisionscount': 3,
+                b'trackedsize': len(fulltext0)
+                + len(fulltext1)
+                + len(fulltext2),
+            },
+        )
 
         self.assertEqual(f.size(0), len(fulltext0))
         self.assertEqual(f.size(1), len(fulltext1))
@@ -545,11 +557,11 @@
             f.size(3)
 
         self.assertEqual(f.revision(node0), fulltext0)
-        self.assertEqual(f.revision(node0, raw=True), fulltext0)
+        self.assertEqual(f.rawdata(node0), fulltext0)
         self.assertEqual(f.revision(node1), fulltext1)
-        self.assertEqual(f.revision(node1, raw=True), fulltext1)
+        self.assertEqual(f.rawdata(node1), fulltext1)
         self.assertEqual(f.revision(node2), fulltext2)
-        self.assertEqual(f.revision(node2, raw=True), fulltext2)
+        self.assertEqual(f.rawdata(node2), fulltext2)
 
         with self.assertRaises(error.LookupError):
             f.revision(b'\x01' * 20)
@@ -601,9 +613,10 @@
         self.assertEqual(rev.basenode, node0)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
-        self.assertEqual(rev.delta,
-                         b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
-                         fulltext1)
+        self.assertEqual(
+            rev.delta,
+            b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' + fulltext1,
+        )
 
         rev = next(gen)
 
@@ -614,9 +627,10 @@
         self.assertEqual(rev.basenode, node1)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
-        self.assertEqual(rev.delta,
-                         b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
-                         fulltext2)
+        self.assertEqual(
+            rev.delta,
+            b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' + fulltext2,
+        )
 
         with self.assertRaises(StopIteration):
             next(gen)
@@ -644,9 +658,10 @@
         self.assertEqual(rev.basenode, node0)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
-        self.assertEqual(rev.delta,
-                         b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
-                         fulltext1)
+        self.assertEqual(
+            rev.delta,
+            b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' + fulltext1,
+        )
 
         rev = next(gen)
 
@@ -657,27 +672,30 @@
         self.assertEqual(rev.basenode, node1)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
-        self.assertEqual(rev.delta,
-                         b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
-                         fulltext2)
+        self.assertEqual(
+            rev.delta,
+            b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' + fulltext2,
+        )
 
         with self.assertRaises(StopIteration):
             next(gen)
 
         # Unrecognized nodesorder value raises ProgrammingError.
         with self.assertRaises(error.ProgrammingError):
-            list(f.emitrevisions([], nodesorder='bad'))
+            list(f.emitrevisions([], nodesorder=b'bad'))
 
         # nodesorder=storage is recognized. But we can't test it thoroughly
         # because behavior is storage-dependent.
-        res = list(f.emitrevisions([node2, node1, node0],
-                                         nodesorder='storage'))
+        res = list(
+            f.emitrevisions([node2, node1, node0], nodesorder=b'storage')
+        )
         self.assertEqual(len(res), 3)
         self.assertEqual({o.node for o in res}, {node0, node1, node2})
 
         # nodesorder=nodes forces the order.
-        gen = f.emitrevisions([node2, node0], nodesorder='nodes',
-                              revisiondata=True)
+        gen = f.emitrevisions(
+            [node2, node0], nodesorder=b'nodes', revisiondata=True
+        )
 
         rev = next(gen)
         self.assertEqual(rev.node, node2)
@@ -717,16 +735,18 @@
         self.assertEqual(rev.basenode, node1)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
-        self.assertEqual(rev.delta,
-                         b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
-                         fulltext2)
+        self.assertEqual(
+            rev.delta,
+            b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' + fulltext2,
+        )
 
         with self.assertRaises(StopIteration):
             next(gen)
 
         # assumehaveparentrevisions=True allows delta against initial revision.
-        gen = f.emitrevisions([node2, node1],
-                              revisiondata=True, assumehaveparentrevisions=True)
+        gen = f.emitrevisions(
+            [node2, node1], revisiondata=True, assumehaveparentrevisions=True
+        )
 
         rev = next(gen)
         self.assertEqual(rev.node, node1)
@@ -735,14 +755,16 @@
         self.assertEqual(rev.basenode, node0)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
-        self.assertEqual(rev.delta,
-                         b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
-                         fulltext1)
+        self.assertEqual(
+            rev.delta,
+            b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' + fulltext1,
+        )
 
         # forceprevious=True forces a delta against the previous revision.
         # Special case for initial revision.
-        gen = f.emitrevisions([node0], revisiondata=True,
-                              deltamode=repository.CG_DELTAMODE_PREV)
+        gen = f.emitrevisions(
+            [node0], revisiondata=True, deltamode=repository.CG_DELTAMODE_PREV
+        )
 
         rev = next(gen)
         self.assertEqual(rev.node, node0)
@@ -751,15 +773,19 @@
         self.assertEqual(rev.basenode, nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
-        self.assertEqual(rev.delta,
-                         b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
-                         fulltext0)
+        self.assertEqual(
+            rev.delta,
+            b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' + fulltext0,
+        )
 
         with self.assertRaises(StopIteration):
             next(gen)
 
-        gen = f.emitrevisions([node0, node2], revisiondata=True,
-                              deltamode=repository.CG_DELTAMODE_PREV)
+        gen = f.emitrevisions(
+            [node0, node2],
+            revisiondata=True,
+            deltamode=repository.CG_DELTAMODE_PREV,
+        )
 
         rev = next(gen)
         self.assertEqual(rev.node, node0)
@@ -768,9 +794,10 @@
         self.assertEqual(rev.basenode, nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
-        self.assertEqual(rev.delta,
-                         b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
-                         fulltext0)
+        self.assertEqual(
+            rev.delta,
+            b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' + fulltext0,
+        )
 
         rev = next(gen)
         self.assertEqual(rev.node, node2)
@@ -796,17 +823,21 @@
             b'copyrev': b'b' * 40,
         }
 
-        stored1 = b''.join([
-            b'\x01\ncopy: source0\n',
-            b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
-            fulltext1,
-        ])
+        stored1 = b''.join(
+            [
+                b'\x01\ncopy: source0\n',
+                b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
+                fulltext1,
+            ]
+        )
 
-        stored2 = b''.join([
-            b'\x01\ncopy: source1\n',
-            b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
-            fulltext2,
-        ])
+        stored2 = b''.join(
+            [
+                b'\x01\ncopy: source1\n',
+                b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
+                fulltext2,
+            ]
+        )
 
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
@@ -819,9 +850,9 @@
         self.assertEqual(f.size(2), len(fulltext2))
 
         self.assertEqual(f.revision(node1), stored1)
-        self.assertEqual(f.revision(node1, raw=True), stored1)
+        self.assertEqual(f.rawdata(node1), stored1)
         self.assertEqual(f.revision(node2), stored2)
-        self.assertEqual(f.revision(node2, raw=True), stored2)
+        self.assertEqual(f.rawdata(node2), stored2)
 
         self.assertEqual(f.read(node1), fulltext1)
         self.assertEqual(f.read(node2), fulltext2)
@@ -845,11 +876,13 @@
             b'copy': b'source0',
             b'copyrev': b'b' * 40,
         }
-        stored1 = b''.join([
-            b'\x01\ncopy: source0\n',
-            b'copyrev: %s\n' % (b'b' * 40),
-            b'\x01\n\x01\nbar',
-        ])
+        stored1 = b''.join(
+            [
+                b'\x01\ncopy: source0\n',
+                b'copyrev: %s\n' % (b'b' * 40),
+                b'\x01\n\x01\nbar',
+            ]
+        )
 
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
@@ -862,10 +895,10 @@
         self.assertEqual(f.size(1), len(fulltext1))
 
         self.assertEqual(f.revision(node0), stored0)
-        self.assertEqual(f.revision(node0, raw=True), stored0)
+        self.assertEqual(f.rawdata(node0), stored0)
 
         self.assertEqual(f.revision(node1), stored1)
-        self.assertEqual(f.revision(node1, raw=True), stored1)
+        self.assertEqual(f.rawdata(node1), stored1)
 
         self.assertEqual(f.read(node0), fulltext0)
         self.assertEqual(f.read(node1), fulltext1)
@@ -886,8 +919,9 @@
             node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
             node1 = b'\xaa' * 20
 
-            self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
-                                   rawtext=fulltext1)
+            self._addrawrevisionfn(
+                f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+            )
 
         self.assertEqual(len(f), 2)
         self.assertEqual(f.parents(node1), (node0, nullid))
@@ -896,10 +930,10 @@
         with self.assertRaises(error.StorageError):
             f.revision(node1)
 
-        # raw=True still verifies because there are no special storage
+        # rawdata() still verifies because there are no special storage
         # settings.
         with self.assertRaises(error.StorageError):
-            f.revision(node1, raw=True)
+            f.rawdata(node1)
 
         # read() behaves like revision().
         with self.assertRaises(error.StorageError):
@@ -909,7 +943,7 @@
         # reading/validating the fulltext to return rename metadata.
 
     def testbadnoderevisionraw(self):
-        # Like above except we test revision(raw=True) first to isolate
+        # Like above except we test rawdata() first to isolate
         # revision caching behavior.
         f = self._makefilefn()
 
@@ -920,14 +954,15 @@
             node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
             node1 = b'\xaa' * 20
 
-            self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
-                                   rawtext=fulltext1)
+            self._addrawrevisionfn(
+                f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+            )
 
         with self.assertRaises(error.StorageError):
-            f.revision(node1, raw=True)
+            f.rawdata(node1)
 
         with self.assertRaises(error.StorageError):
-            f.revision(node1, raw=True)
+            f.rawdata(node1)
 
     def testbadnoderevisionraw(self):
         # Like above except we test read() first to isolate revision caching
@@ -941,8 +976,9 @@
             node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
             node1 = b'\xaa' * 20
 
-            self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
-                                   rawtext=fulltext1)
+            self._addrawrevisionfn(
+                f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+            )
 
         with self.assertRaises(error.StorageError):
             f.read(node1)
@@ -961,8 +997,9 @@
             node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
             node1 = b'\xaa' * 20
 
-            self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
-                                   rawtext=fulltext1)
+            self._addrawrevisionfn(
+                f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+            )
 
         with self.assertRaises(error.StorageError):
             f.read(node1)
@@ -971,8 +1008,9 @@
 
         with self._maketransactionfn() as tr:
             delta = mdiff.textdiff(fulltext1, fulltext2)
-            self._addrawrevisionfn(f, tr, node2, node1, nullid,
-                                   2, delta=(1, delta))
+            self._addrawrevisionfn(
+                f, tr, node2, node1, nullid, 2, delta=(1, delta)
+            )
 
         self.assertEqual(len(f), 3)
 
@@ -983,9 +1021,7 @@
     def testcensored(self):
         f = self._makefilefn()
 
-        stored1 = storageutil.packmeta({
-            b'censored': b'tombstone',
-        }, b'')
+        stored1 = storageutil.packmeta({b'censored': b'tombstone',}, b'')
 
         with self._maketransactionfn() as tr:
             node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
@@ -993,8 +1029,9 @@
             # The node value doesn't matter since we can't verify it.
             node1 = b'\xbb' * 20
 
-            self._addrawrevisionfn(f, tr, node1, node0, nullid, 1, stored1,
-                                   censored=True)
+            self._addrawrevisionfn(
+                f, tr, node1, node0, nullid, 1, stored1, censored=True
+            )
 
         self.assertTrue(f.iscensored(1))
 
@@ -1002,20 +1039,18 @@
             f.revision(1)
 
         with self.assertRaises(error.CensoredNodeError):
-            f.revision(1, raw=True)
+            f.rawdata(1)
 
         with self.assertRaises(error.CensoredNodeError):
             f.read(1)
 
     def testcensoredrawrevision(self):
-        # Like above, except we do the revision(raw=True) request first to
+        # Like above, except we do the rawdata() request first to
         # isolate revision caching behavior.
 
         f = self._makefilefn()
 
-        stored1 = storageutil.packmeta({
-            b'censored': b'tombstone',
-        }, b'')
+        stored1 = storageutil.packmeta({b'censored': b'tombstone',}, b'')
 
         with self._maketransactionfn() as tr:
             node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
@@ -1023,11 +1058,13 @@
             # The node value doesn't matter since we can't verify it.
             node1 = b'\xbb' * 20
 
-            self._addrawrevisionfn(f, tr, node1, node0, nullid, 1, stored1,
-                                   censored=True)
+            self._addrawrevisionfn(
+                f, tr, node1, node0, nullid, 1, stored1, censored=True
+            )
 
         with self.assertRaises(error.CensoredNodeError):
-            f.revision(1, raw=True)
+            f.rawdata(1)
+
 
 class ifilemutationtests(basetestcase):
     """Generic tests for the ifilemutation interface.
@@ -1037,6 +1074,7 @@
 
     Use ``makeifilemutationtests()`` to create an instance of this type.
     """
+
     def testaddnoop(self):
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
@@ -1071,6 +1109,7 @@
         f = self._makefilefn()
 
         callbackargs = []
+
         def cb(*args, **kwargs):
             callbackargs.append((args, kwargs))
 
@@ -1099,9 +1138,13 @@
         with self._maketransactionfn() as tr:
             nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
 
-        self.assertEqual(nodes, [
-            b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
-            b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
+        self.assertEqual(
+            nodes,
+            [
+                b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
+                b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'
+            ],
+        )
 
         self.assertEqual(len(callbackargs), 1)
         self.assertEqual(callbackargs[0][0][1], nodes[0])
@@ -1147,9 +1190,7 @@
         # Attempt to apply a delta made against a censored revision.
         f = self._makefilefn()
 
-        stored1 = storageutil.packmeta({
-            b'censored': b'tombstone',
-        }, b'')
+        stored1 = storageutil.packmeta({b'censored': b'tombstone',}, b'')
 
         with self._maketransactionfn() as tr:
             node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
@@ -1157,8 +1198,9 @@
             # The node value doesn't matter since we can't verify it.
             node1 = b'\xbb' * 20
 
-            self._addrawrevisionfn(f, tr, node1, node0, nullid, 1, stored1,
-                                   censored=True)
+            self._addrawrevisionfn(
+                f, tr, node1, node0, nullid, 1, stored1, censored=True
+            )
 
         delta = mdiff.textdiff(b'bar\n' * 30, (b'bar\n' * 30) + b'baz\n')
         deltas = [(b'\xcc' * 20, node1, nullid, b'\x01' * 20, node1, delta, 0)]
@@ -1304,6 +1346,7 @@
         with self.assertRaises(error.LookupError):
             f.rev(node1)
 
+
 def makeifileindextests(makefilefn, maketransactionfn, addrawrevisionfn):
     """Create a unittest.TestCase class suitable for testing file storage.
 
@@ -1329,6 +1372,7 @@
     }
     return type(r'ifileindextests', (ifileindextests,), d)
 
+
 def makeifiledatatests(makefilefn, maketransactionfn, addrawrevisionfn):
     d = {
         r'_makefilefn': makefilefn,
@@ -1337,6 +1381,7 @@
     }
     return type(r'ifiledatatests', (ifiledatatests,), d)
 
+
 def makeifilemutationtests(makefilefn, maketransactionfn, addrawrevisionfn):
     d = {
         r'_makefilefn': makefilefn,
--- a/mercurial/transaction.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/transaction.py	Mon Oct 21 11:09:48 2019 -0400
@@ -21,43 +21,60 @@
     pycompat,
     util,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
 
 version = 2
 
 # These are the file generators that should only be executed after the
 # finalizers are done, since they rely on the output of the finalizers (like
 # the changelog having been written).
-postfinalizegenerators = {
-    'bookmarks',
-    'dirstate'
-}
+postfinalizegenerators = {b'bookmarks', b'dirstate'}
 
-gengroupall='all'
-gengroupprefinalize='prefinalize'
-gengrouppostfinalize='postfinalize'
+gengroupall = b'all'
+gengroupprefinalize = b'prefinalize'
+gengrouppostfinalize = b'postfinalize'
+
 
 def active(func):
     def _active(self, *args, **kwds):
         if self._count == 0:
-            raise error.Abort(_(
-                'cannot use transaction when it is already committed/aborted'))
+            raise error.Abort(
+                _(
+                    b'cannot use transaction when it is already committed/aborted'
+                )
+            )
         return func(self, *args, **kwds)
+
     return _active
 
-def _playback(journal, report, opener, vfsmap, entries, backupentries,
-              unlink=True, checkambigfiles=None):
+
+def _playback(
+    journal,
+    report,
+    opener,
+    vfsmap,
+    entries,
+    backupentries,
+    unlink=True,
+    checkambigfiles=None,
+):
     for f, o, _ignore in entries:
         if o or not unlink:
-            checkambig = checkambigfiles and (f, '') in checkambigfiles
+            checkambig = checkambigfiles and (f, b'') in checkambigfiles
             try:
-                fp = opener(f, 'a', checkambig=checkambig)
+                fp = opener(f, b'a', checkambig=checkambig)
+                if fp.tell() < o:
+                    raise error.Abort(
+                        _(
+                            b"attempted to truncate %s to %d bytes, but it was "
+                            b"already %d bytes\n"
+                        )
+                        % (f, o, fp.tell())
+                    )
                 fp.truncate(o)
                 fp.close()
             except IOError:
-                report(_("failed to truncate %s\n") % f)
+                report(_(b"failed to truncate %s\n") % f)
                 raise
         else:
             try:
@@ -69,8 +86,7 @@
     backupfiles = []
     for l, f, b, c in backupentries:
         if l not in vfsmap and c:
-            report("couldn't handle %s: unknown cache location %s\n"
-                        % (b, l))
+            report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
         vfs = vfsmap[l]
         try:
             if f and b:
@@ -81,7 +97,7 @@
                     util.copyfile(backuppath, filepath, checkambig=checkambig)
                     backupfiles.append(b)
                 except IOError:
-                    report(_("failed to recover %s\n") % f)
+                    report(_(b"failed to recover %s\n") % f)
             else:
                 target = f or b
                 try:
@@ -93,7 +109,7 @@
             if not c:
                 raise
 
-    backuppath = "%s.backupfiles" % journal
+    backuppath = b"%s.backupfiles" % journal
     if opener.exists(backuppath):
         opener.unlink(backuppath)
     opener.unlink(journal)
@@ -105,10 +121,22 @@
         # only pure backup file remains, it is sage to ignore any error
         pass
 
+
 class transaction(util.transactional):
-    def __init__(self, report, opener, vfsmap, journalname, undoname=None,
-                 after=None, createmode=None, validator=None, releasefn=None,
-                 checkambigfiles=None, name=r'<unnamed>'):
+    def __init__(
+        self,
+        report,
+        opener,
+        vfsmap,
+        journalname,
+        undoname=None,
+        after=None,
+        createmode=None,
+        validator=None,
+        releasefn=None,
+        checkambigfiles=None,
+        name=r'<unnamed>',
+    ):
         """Begin a new transaction
 
         Begins a new transaction that allows rolling back writes in the event of
@@ -129,7 +157,7 @@
         self._opener = opener
         # a map to access file in various {location -> vfs}
         vfsmap = vfsmap.copy()
-        vfsmap[''] = opener  # set default value
+        vfsmap[b''] = opener  # set default value
         self._vfsmap = vfsmap
         self._after = after
         self._entries = []
@@ -160,7 +188,7 @@
 
         # a dict of arguments to be passed to hooks
         self.hookargs = {}
-        self._file = opener.open(self._journal, "w")
+        self._file = opener.open(self._journal, b"w")
 
         # a list of ('location', 'path', 'backuppath', cache) entries.
         # - if 'backuppath' is empty, no file existed at backup time
@@ -170,9 +198,9 @@
         # (cache is currently unused)
         self._backupentries = []
         self._backupmap = {}
-        self._backupjournal = "%s.backupfiles" % self._journal
-        self._backupsfile = opener.open(self._backupjournal, 'w')
-        self._backupsfile.write('%d\n' % version)
+        self._backupjournal = b"%s.backupfiles" % self._journal
+        self._backupsfile = opener.open(self._backupjournal, b'w')
+        self._backupsfile.write(b'%d\n' % version)
 
         if createmode is not None:
             opener.chmod(self._journal, createmode & 0o666)
@@ -193,8 +221,11 @@
 
     def __repr__(self):
         name = r'/'.join(self._names)
-        return (r'<transaction name=%s, count=%d, usages=%d>' %
-                (name, self._count, self._usages))
+        return r'<transaction name=%s, count=%d, usages=%d>' % (
+            name,
+            self._count,
+            self._usages,
+        )
 
     def __del__(self):
         if self._journal:
@@ -236,11 +267,11 @@
         self._entries.append((file, offset, data))
         self._map[file] = len(self._entries) - 1
         # add enough data to the journal to do the truncate
-        self._file.write("%s\0%d\n" % (file, offset))
+        self._file.write(b"%s\0%d\n" % (file, offset))
         self._file.flush()
 
     @active
-    def addbackup(self, file, hardlink=True, location=''):
+    def addbackup(self, file, hardlink=True, location=b''):
         """Adds a backup of the file to the transaction
 
         Calling addbackup() creates a hardlink backup of the specified file
@@ -251,21 +282,21 @@
         * `hardlink`: use a hardlink to quickly create the backup
         """
         if self._queue:
-            msg = 'cannot use transaction.addbackup inside "group"'
+            msg = b'cannot use transaction.addbackup inside "group"'
             raise error.ProgrammingError(msg)
 
         if file in self._map or file in self._backupmap:
             return
         vfs = self._vfsmap[location]
         dirname, filename = vfs.split(file)
-        backupfilename = "%s.backup.%s" % (self._journal, filename)
+        backupfilename = b"%s.backup.%s" % (self._journal, filename)
         backupfile = vfs.reljoin(dirname, backupfilename)
         if vfs.exists(file):
             filepath = vfs.join(file)
             backuppath = vfs.join(backupfile)
             util.copyfile(filepath, backuppath, hardlink=hardlink)
         else:
-            backupfile = ''
+            backupfile = b''
 
         self._addbackupentry((location, file, backupfile, False))
 
@@ -273,21 +304,22 @@
         """register a new backup entry and write it to disk"""
         self._backupentries.append(entry)
         self._backupmap[entry[1]] = len(self._backupentries) - 1
-        self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
+        self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
         self._backupsfile.flush()
 
     @active
-    def registertmp(self, tmpfile, location=''):
+    def registertmp(self, tmpfile, location=b''):
         """register a temporary transaction file
 
         Such files will be deleted when the transaction exits (on both
         failure and success).
         """
-        self._addbackupentry((location, '', tmpfile, False))
+        self._addbackupentry((location, b'', tmpfile, False))
 
     @active
-    def addfilegenerator(self, genid, filenames, genfunc, order=0,
-                         location=''):
+    def addfilegenerator(
+        self, genid, filenames, genfunc, order=0, location=b''
+    ):
         """add a function to generates some files at transaction commit
 
         The `genfunc` argument is a function capable of generating proper
@@ -320,17 +352,19 @@
         if genid in self._filegenerators:
             del self._filegenerators[genid]
 
-    def _generatefiles(self, suffix='', group=gengroupall):
+    def _generatefiles(self, suffix=b'', group=gengroupall):
         # write files registered for generation
         any = False
-        for id, entry in sorted(self._filegenerators.iteritems()):
+        for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
             any = True
             order, filenames, genfunc, location = entry
 
             # for generation at closing, check if it's before or after finalize
             postfinalize = group == gengrouppostfinalize
-            if (group != gengroupall and
-                (id in postfinalizegenerators) != (postfinalize)):
+            if (
+                group != gengroupall
+                and (id in postfinalizegenerators) != postfinalize
+            ):
                 continue
 
             vfs = self._vfsmap[location]
@@ -344,8 +378,9 @@
                     else:
                         self.addbackup(name, location=location)
                         checkambig = (name, location) in self._checkambigfiles
-                    files.append(vfs(name, 'w', atomictemp=True,
-                                     checkambig=checkambig))
+                    files.append(
+                        vfs(name, b'w', atomictemp=True, checkambig=checkambig)
+                    )
                 genfunc(*files)
                 for f in files:
                     f.close()
@@ -375,7 +410,7 @@
             raise KeyError(file)
         index = self._map[file]
         self._entries[index] = (file, offset, data)
-        self._file.write("%s\0%d\n" % (file, offset))
+        self._file.write(b"%s\0%d\n" % (file, offset))
         self._file.flush()
 
     @active
@@ -417,7 +452,7 @@
             # remove callback since the data will have been flushed
             any = self._pendingcallback.pop(cat)(self)
             self._anypending = self._anypending or any
-        self._anypending |= self._generatefiles(suffix='.pending')
+        self._anypending |= self._generatefiles(suffix=b'.pending')
         return self._anypending
 
     @active
@@ -463,7 +498,7 @@
         '''commit the transaction'''
         if self._count == 1:
             self._validator(self)  # will raise exception if needed
-            self._validator = None # Help prevent cycles.
+            self._validator = None  # Help prevent cycles.
             self._generatefiles(group=gengroupprefinalize)
             categories = sorted(self._finalizecallback)
             for cat in categories:
@@ -480,8 +515,9 @@
         # cleanup temporary files
         for l, f, b, c in self._backupentries:
             if l not in self._vfsmap and c:
-                self._report("couldn't remove %s: unknown cache location %s\n"
-                             % (b, l))
+                self._report(
+                    b"couldn't remove %s: unknown cache location %s\n" % (b, l)
+                )
                 continue
             vfs = self._vfsmap[l]
             if not f and b and vfs.exists(b):
@@ -491,21 +527,24 @@
                     if not c:
                         raise
                     # Abort may be raise by read only opener
-                    self._report("couldn't remove %s: %s\n"
-                                 % (vfs.join(b), inst))
+                    self._report(
+                        b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
+                    )
         self._entries = []
         self._writeundo()
         if self._after:
             self._after()
-            self._after = None # Help prevent cycles.
+            self._after = None  # Help prevent cycles.
         if self._opener.isfile(self._backupjournal):
             self._opener.unlink(self._backupjournal)
         if self._opener.isfile(self._journal):
             self._opener.unlink(self._journal)
         for l, _f, b, c in self._backupentries:
             if l not in self._vfsmap and c:
-                self._report("couldn't remove %s: unknown cache location"
-                             "%s\n" % (b, l))
+                self._report(
+                    b"couldn't remove %s: unknown cache location"
+                    b"%s\n" % (b, l)
+                )
                 continue
             vfs = self._vfsmap[l]
             if b and vfs.exists(b):
@@ -515,13 +554,14 @@
                     if not c:
                         raise
                     # Abort may be raise by read only opener
-                    self._report("couldn't remove %s: %s\n"
-                                 % (vfs.join(b), inst))
+                    self._report(
+                        b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
+                    )
         self._backupentries = []
         self._journal = None
 
-        self._releasefn(self, True) # notify success of closing transaction
-        self._releasefn = None # Help prevent cycles.
+        self._releasefn(self, True)  # notify success of closing transaction
+        self._releasefn = None  # Help prevent cycles.
 
         # run post close action
         categories = sorted(self._postclosecallback)
@@ -541,18 +581,21 @@
         """write transaction data for possible future undo call"""
         if self._undoname is None:
             return
-        undobackupfile = self._opener.open("%s.backupfiles" % self._undoname,
-                                           'w')
-        undobackupfile.write('%d\n' % version)
+        undobackupfile = self._opener.open(
+            b"%s.backupfiles" % self._undoname, b'w'
+        )
+        undobackupfile.write(b'%d\n' % version)
         for l, f, b, c in self._backupentries:
             if not f:  # temporary file
                 continue
             if not b:
-                u = ''
+                u = b''
             else:
                 if l not in self._vfsmap and c:
-                    self._report("couldn't remove %s: unknown cache location"
-                                 "%s\n" % (b, l))
+                    self._report(
+                        b"couldn't remove %s: unknown cache location"
+                        b"%s\n" % (b, l)
+                    )
                     continue
                 vfs = self._vfsmap[l]
                 base, name = vfs.split(b)
@@ -560,10 +603,9 @@
                 uname = name.replace(self._journal, self._undoname, 1)
                 u = vfs.reljoin(base, uname)
                 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
-            undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
+            undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
         undobackupfile.close()
 
-
     def _abort(self):
         self._count = 0
         self._usages = 0
@@ -578,25 +620,34 @@
                     self._opener.unlink(self._journal)
                 return
 
-            self._report(_("transaction abort!\n"))
+            self._report(_(b"transaction abort!\n"))
 
             try:
                 for cat in sorted(self._abortcallback):
                     self._abortcallback[cat](self)
                 # Prevent double usage and help clear cycles.
                 self._abortcallback = None
-                _playback(self._journal, self._report, self._opener,
-                          self._vfsmap, self._entries, self._backupentries,
-                          False, checkambigfiles=self._checkambigfiles)
-                self._report(_("rollback completed\n"))
+                _playback(
+                    self._journal,
+                    self._report,
+                    self._opener,
+                    self._vfsmap,
+                    self._entries,
+                    self._backupentries,
+                    False,
+                    checkambigfiles=self._checkambigfiles,
+                )
+                self._report(_(b"rollback completed\n"))
             except BaseException as exc:
-                self._report(_("rollback failed - please run hg recover\n"))
-                self._report(_("(failure reason: %s)\n")
-                             % stringutil.forcebytestr(exc))
+                self._report(_(b"rollback failed - please run hg recover\n"))
+                self._report(
+                    _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
+                )
         finally:
             self._journal = None
-            self._releasefn(self, False) # notify failure of transaction
-            self._releasefn = None # Help prevent cycles.
+            self._releasefn(self, False)  # notify failure of transaction
+            self._releasefn = None  # Help prevent cycles.
+
 
 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
     """Rolls back the transaction contained in the given file
@@ -622,13 +673,14 @@
     fp.close()
     for l in lines:
         try:
-            f, o = l.split('\0')
+            f, o = l.split(b'\0')
             entries.append((f, int(o), None))
         except ValueError:
             report(
-                _("couldn't read journal entry %r!\n") % pycompat.bytestr(l))
+                _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
+            )
 
-    backupjournal = "%s.backupfiles" % file
+    backupjournal = b"%s.backupfiles" % file
     if opener.exists(backupjournal):
         fp = opener.open(backupjournal)
         lines = fp.readlines()
@@ -639,11 +691,22 @@
                     if line:
                         # Shave off the trailing newline
                         line = line[:-1]
-                        l, f, b, c = line.split('\0')
+                        l, f, b, c = line.split(b'\0')
                         backupentries.append((l, f, b, bool(c)))
             else:
-                report(_("journal was created by a different version of "
-                         "Mercurial\n"))
+                report(
+                    _(
+                        b"journal was created by a different version of "
+                        b"Mercurial\n"
+                    )
+                )
 
-    _playback(file, report, opener, vfsmap, entries, backupentries,
-              checkambigfiles=checkambigfiles)
+    _playback(
+        file,
+        report,
+        opener,
+        vfsmap,
+        entries,
+        backupentries,
+        checkambigfiles=checkambigfiles,
+    )
--- a/mercurial/treediscovery.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/treediscovery.py	Mon Oct 21 11:09:48 2019 -0400
@@ -19,6 +19,7 @@
     pycompat,
 )
 
+
 def findcommonincoming(repo, remote, heads=None, force=False):
     """Return a tuple (common, fetch, heads) used to identify the common
     subset of nodes between repo and remote.
@@ -38,7 +39,7 @@
 
     if not heads:
         with remote.commandexecutor() as e:
-            heads = e.callcommand('heads', {}).result()
+            heads = e.callcommand(b'heads', {}).result()
 
     if repo.changelog.tip() == nullid:
         base.add(nullid)
@@ -48,7 +49,7 @@
 
     # assume we're closer to the tip than the root
     # and start by examining the heads
-    repo.ui.status(_("searching for changes\n"))
+    repo.ui.status(_(b"searching for changes\n"))
 
     unknown = []
     for h in heads:
@@ -62,14 +63,14 @@
 
     req = set(unknown)
     reqcnt = 0
-    progress = repo.ui.makeprogress(_('searching'), unit=_('queries'))
+    progress = repo.ui.makeprogress(_(b'searching'), unit=_(b'queries'))
 
     # search through remote branches
     # a 'branch' here is a linear segment of history, with four parts:
     # head, root, first parent, second parent
     # (a branch always has two parents (or none) by definition)
     with remote.commandexecutor() as e:
-        branches = e.callcommand('branches', {'nodes': unknown}).result()
+        branches = e.callcommand(b'branches', {b'nodes': unknown}).result()
 
     unknown = collections.deque(branches)
     while unknown:
@@ -79,27 +80,27 @@
             if n[0] in seen:
                 continue
 
-            repo.ui.debug("examining %s:%s\n"
-                          % (short(n[0]), short(n[1])))
-            if n[0] == nullid: # found the end of the branch
+            repo.ui.debug(b"examining %s:%s\n" % (short(n[0]), short(n[1])))
+            if n[0] == nullid:  # found the end of the branch
                 pass
             elif n in seenbranch:
-                repo.ui.debug("branch already found\n")
+                repo.ui.debug(b"branch already found\n")
                 continue
-            elif n[1] and knownnode(n[1]): # do we know the base?
-                repo.ui.debug("found incomplete branch %s:%s\n"
-                              % (short(n[0]), short(n[1])))
-                search.append(n[0:2]) # schedule branch range for scanning
+            elif n[1] and knownnode(n[1]):  # do we know the base?
+                repo.ui.debug(
+                    b"found incomplete branch %s:%s\n"
+                    % (short(n[0]), short(n[1]))
+                )
+                search.append(n[0:2])  # schedule branch range for scanning
                 seenbranch.add(n)
             else:
                 if n[1] not in seen and n[1] not in fetch:
                     if knownnode(n[2]) and knownnode(n[3]):
-                        repo.ui.debug("found new changeset %s\n" %
-                                      short(n[1]))
-                        fetch.add(n[1]) # earliest unknown
+                        repo.ui.debug(b"found new changeset %s\n" % short(n[1]))
+                        fetch.add(n[1])  # earliest unknown
                     for p in n[2:4]:
                         if knownnode(p):
-                            base.add(p) # latest known
+                            base.add(p)  # latest known
 
                 for p in n[2:4]:
                     if p not in req and not knownnode(p):
@@ -110,17 +111,19 @@
         if r:
             reqcnt += 1
             progress.increment()
-            repo.ui.debug("request %d: %s\n" %
-                        (reqcnt, " ".join(map(short, r))))
+            repo.ui.debug(
+                b"request %d: %s\n" % (reqcnt, b" ".join(map(short, r)))
+            )
             for p in pycompat.xrange(0, len(r), 10):
                 with remote.commandexecutor() as e:
-                    branches = e.callcommand('branches', {
-                        'nodes': r[p:p + 10],
-                    }).result()
+                    branches = e.callcommand(
+                        b'branches', {b'nodes': r[p : p + 10],}
+                    ).result()
 
                 for b in branches:
-                    repo.ui.debug("received %s:%s\n" %
-                                  (short(b[0]), short(b[1])))
+                    repo.ui.debug(
+                        b"received %s:%s\n" % (short(b[0]), short(b[1]))
+                    )
                     unknown.append(b)
 
     # do binary search on the branches we found
@@ -130,23 +133,26 @@
         progress.increment()
 
         with remote.commandexecutor() as e:
-            between = e.callcommand('between', {'pairs': search}).result()
+            between = e.callcommand(b'between', {b'pairs': search}).result()
 
         for n, l in zip(search, between):
             l.append(n[1])
             p = n[0]
             f = 1
             for i in l:
-                repo.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
+                repo.ui.debug(b"narrowing %d:%d %s\n" % (f, len(l), short(i)))
                 if knownnode(i):
                     if f <= 2:
-                        repo.ui.debug("found new branch changeset %s\n" %
-                                          short(p))
+                        repo.ui.debug(
+                            b"found new branch changeset %s\n" % short(p)
+                        )
                         fetch.add(p)
                         base.add(i)
                     else:
-                        repo.ui.debug("narrowed branch search to %s:%s\n"
-                                      % (short(p), short(i)))
+                        repo.ui.debug(
+                            b"narrowed branch search to %s:%s\n"
+                            % (short(p), short(i))
+                        )
                         newsearch.append((p, i))
                     break
                 p, f = i, f * 2
@@ -155,20 +161,22 @@
     # sanity check our fetch list
     for f in fetch:
         if knownnode(f):
-            raise error.RepoError(_("already have changeset ")
-                                  + short(f[:4]))
+            raise error.RepoError(_(b"already have changeset ") + short(f[:4]))
 
     base = list(base)
     if base == [nullid]:
         if force:
-            repo.ui.warn(_("warning: repository is unrelated\n"))
+            repo.ui.warn(_(b"warning: repository is unrelated\n"))
         else:
-            raise error.Abort(_("repository is unrelated"))
+            raise error.Abort(_(b"repository is unrelated"))
 
-    repo.ui.debug("found new changesets starting at " +
-                 " ".join([short(f) for f in fetch]) + "\n")
+    repo.ui.debug(
+        b"found new changesets starting at "
+        + b" ".join([short(f) for f in fetch])
+        + b"\n"
+    )
 
     progress.complete()
-    repo.ui.debug("%d total queries\n" % reqcnt)
+    repo.ui.debug(b"%d total queries\n" % reqcnt)
 
     return base, list(fetch), heads
--- a/mercurial/txnutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/txnutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,15 +9,15 @@
 
 import errno
 
-from . import (
-    encoding,
-)
+from . import encoding
+
 
 def mayhavepending(root):
     '''return whether 'root' may have pending changes, which are
     visible to this process.
     '''
-    return root == encoding.environ.get('HG_PENDING')
+    return root == encoding.environ.get(b'HG_PENDING')
+
 
 def trypending(root, vfs, filename, **kwargs):
     '''Open  file to be read according to HG_PENDING environment variable
@@ -29,7 +29,7 @@
     '''
     if mayhavepending(root):
         try:
-            return (vfs('%s.pending' % filename, **kwargs), True)
+            return (vfs(b'%s.pending' % filename, **kwargs), True)
         except IOError as inst:
             if inst.errno != errno.ENOENT:
                 raise
--- a/mercurial/ui.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/ui.py	Mon Oct 21 11:09:48 2019 -0400
@@ -22,6 +22,11 @@
 
 from .i18n import _
 from .node import hex
+from .pycompat import (
+    getattr,
+    open,
+    setattr,
+)
 
 from . import (
     color,
@@ -46,8 +51,9 @@
 urlreq = util.urlreq
 
 # for use with str.translate(None, _keepalnum), to keep just alphanumerics
-_keepalnum = ''.join(c for c in map(pycompat.bytechr, range(256))
-                     if not c.isalnum())
+_keepalnum = b''.join(
+    c for c in map(pycompat.bytechr, range(256)) if not c.isalnum()
+)
 
 # The config knobs that will be altered (if unset) by ui.tweakdefaults.
 tweakrc = b"""
@@ -78,8 +84,7 @@
 """
 
 samplehgrcs = {
-    'user':
-b"""# example user config (see 'hg help config' for more info)
+    b'user': b"""# example user config (see 'hg help config' for more info)
 [ui]
 # name and email, e.g.
 # username = Jane Doe <jdoe@example.com>
@@ -106,9 +111,7 @@
 # rebase =
 # uncommit =
 """,
-
-    'cloned':
-b"""# example repository config (see 'hg help config' for more info)
+    b'cloned': b"""# example repository config (see 'hg help config' for more info)
 [paths]
 default = %s
 
@@ -123,9 +126,7 @@
 # name and email (local to this repository, optional), e.g.
 # username = Jane Doe <jdoe@example.com>
 """,
-
-    'local':
-b"""# example repository config (see 'hg help config' for more info)
+    b'local': b"""# example repository config (see 'hg help config' for more info)
 [paths]
 # path aliases to other clones of this repo in URLs or filesystem paths
 # (see 'hg help config.paths' for more info)
@@ -139,9 +140,7 @@
 # name and email (local to this repository, optional), e.g.
 # username = Jane Doe <jdoe@example.com>
 """,
-
-    'global':
-b"""# example system-wide hg config (see 'hg help config' for more info)
+    b'global': b"""# example system-wide hg config (see 'hg help config' for more info)
 
 [ui]
 # uncomment to disable color in command output
@@ -161,14 +160,18 @@
 """,
 }
 
+
 def _maybestrurl(maybebytes):
     return pycompat.rapply(pycompat.strurl, maybebytes)
 
+
 def _maybebytesurl(maybestr):
     return pycompat.rapply(pycompat.bytesurl, maybestr)
 
+
 class httppasswordmgrdbproxy(object):
     """Delays loading urllib2 until it's needed."""
+
     def __init__(self):
         self._mgr = None
 
@@ -179,17 +182,23 @@
 
     def add_password(self, realm, uris, user, passwd):
         return self._get_mgr().add_password(
-            _maybestrurl(realm), _maybestrurl(uris),
-            _maybestrurl(user), _maybestrurl(passwd))
+            _maybestrurl(realm),
+            _maybestrurl(uris),
+            _maybestrurl(user),
+            _maybestrurl(passwd),
+        )
 
     def find_user_password(self, realm, uri):
         mgr = self._get_mgr()
-        return _maybebytesurl(mgr.find_user_password(_maybestrurl(realm),
-                                                     _maybestrurl(uri)))
+        return _maybebytesurl(
+            mgr.find_user_password(_maybestrurl(realm), _maybestrurl(uri))
+        )
+
 
 def _catchterm(*args):
     raise error.SignalInterrupt
 
+
 # unique object used to detect no default value has been provided when
 # retrieving configuration value.
 _unset = object()
@@ -197,6 +206,7 @@
 # _reqexithandlers: callbacks run at the end of a request
 _reqexithandlers = []
 
+
 class ui(object):
     def __init__(self, src=None):
         """Create a fresh new ui object if no src given
@@ -216,9 +226,9 @@
         self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
         self._reportuntrusted = True
         self._knownconfig = configitems.coreitems
-        self._ocfg = config.config() # overlay
-        self._tcfg = config.config() # trusted
-        self._ucfg = config.config() # untrusted
+        self._ocfg = config.config()  # overlay
+        self._tcfg = config.config()  # trusted
+        self._ucfg = config.config()  # untrusted
         self._trustusers = set()
         self._trustgroups = set()
         self.callhooks = True
@@ -280,8 +290,8 @@
             self.httppasswordmgrdb = httppasswordmgrdbproxy()
             self._blockedtimes = collections.defaultdict(int)
 
-        allowed = self.configlist('experimental', 'exportableenviron')
-        if '*' in allowed:
+        allowed = self.configlist(b'experimental', b'exportableenviron')
+        if b'*' in allowed:
             self._exportableenviron = self.environ
         else:
             self._exportableenviron = {}
@@ -295,9 +305,9 @@
         u = cls()
         # we always trust global config files and environment variables
         for t, f in rcutil.rccomponents():
-            if t == 'path':
+            if t == b'path':
                 u.readconfig(f, trust=True)
-            elif t == 'items':
+            elif t == b'items':
                 sections = set()
                 for section, name, value, source in f:
                     # do not set u._ocfg
@@ -308,14 +318,14 @@
                 for section in sections:
                     u.fixconfig(section=section)
             else:
-                raise error.ProgrammingError('unknown rctype: %s' % t)
+                raise error.ProgrammingError(b'unknown rctype: %s' % t)
         u._maybetweakdefaults()
         return u
 
     def _maybetweakdefaults(self):
-        if not self.configbool('ui', 'tweakdefaults'):
+        if not self.configbool(b'ui', b'tweakdefaults'):
             return
-        if self._tweaked or self.plain('tweakdefaults'):
+        if self._tweaked or self.plain(b'tweakdefaults'):
             return
 
         # Note: it is SUPER IMPORTANT that you set self._tweaked to
@@ -326,11 +336,11 @@
         # avoid this weirdness.
         self._tweaked = True
         tmpcfg = config.config()
-        tmpcfg.parse('<tweakdefaults>', tweakrc)
+        tmpcfg.parse(b'<tweakdefaults>', tweakrc)
         for section in tmpcfg:
             for name, value in tmpcfg.items(section):
                 if not self.hasconfig(section, name):
-                    self.setconfig(section, name, value, "<tweakdefaults>")
+                    self.setconfig(section, name, value, b"<tweakdefaults>")
 
     def copy(self):
         return self.__class__(self)
@@ -348,8 +358,9 @@
         try:
             yield
         finally:
-            self._blockedtimes[key + '_blocked'] += (
-                (util.timer() - starttime) * 1000)
+            self._blockedtimes[key + b'_blocked'] += (
+                util.timer() - starttime
+            ) * 1000
 
     @contextlib.contextmanager
     def uninterruptible(self):
@@ -360,9 +371,10 @@
         lets you advise Mercurial that something risky is happening so
         that control-C etc can be blocked if desired.
         """
-        enabled = self.configbool('experimental', 'nointerrupt')
-        if (enabled and
-            self.configbool('experimental', 'nointerrupt-interactiveonly')):
+        enabled = self.configbool(b'experimental', b'nointerrupt')
+        if enabled and self.configbool(
+            b'experimental', b'nointerrupt-interactiveonly'
+        ):
             enabled = self.interactive()
         if self._uninterruptible or not enabled:
             # if nointerrupt support is turned off, the process isn't
@@ -370,11 +382,14 @@
             # block, do nothing.
             yield
             return
+
         def warn():
-            self.warn(_("shutting down cleanly\n"))
+            self.warn(_(b"shutting down cleanly\n"))
             self.warn(
-                _("press ^C again to terminate immediately (dangerous)\n"))
+                _(b"press ^C again to terminate immediately (dangerous)\n")
+            )
             return True
+
         with procutil.uninterruptible(warn):
             try:
                 self._uninterruptible = True
@@ -391,7 +406,7 @@
             return True
 
         tusers, tgroups = self._trustusers, self._trustgroups
-        if '*' in tusers or '*' in tgroups:
+        if b'*' in tusers or b'*' in tgroups:
             return True
 
         user = util.username(st.st_uid)
@@ -400,16 +415,22 @@
             return True
 
         if self._reportuntrusted:
-            self.warn(_('not trusting file %s from untrusted '
-                        'user %s, group %s\n') % (f, user, group))
+            self.warn(
+                _(
+                    b'not trusting file %s from untrusted '
+                    b'user %s, group %s\n'
+                )
+                % (f, user, group)
+            )
         return False
 
-    def readconfig(self, filename, root=None, trust=False,
-                   sections=None, remap=None):
+    def readconfig(
+        self, filename, root=None, trust=False, sections=None, remap=None
+    ):
         try:
             fp = open(filename, r'rb')
         except IOError:
-            if not sections: # ignore unless we were looking for something
+            if not sections:  # ignore unless we were looking for something
                 return
             raise
 
@@ -422,28 +443,37 @@
         except error.ConfigError as inst:
             if trusted:
                 raise
-            self.warn(_("ignored: %s\n") % stringutil.forcebytestr(inst))
+            self.warn(_(b"ignored: %s\n") % stringutil.forcebytestr(inst))
 
         if self.plain():
-            for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
-                      'logtemplate', 'message-output', 'statuscopies', 'style',
-                      'traceback', 'verbose'):
-                if k in cfg['ui']:
-                    del cfg['ui'][k]
-            for k, v in cfg.items('defaults'):
-                del cfg['defaults'][k]
-            for k, v in cfg.items('commands'):
-                del cfg['commands'][k]
+            for k in (
+                b'debug',
+                b'fallbackencoding',
+                b'quiet',
+                b'slash',
+                b'logtemplate',
+                b'message-output',
+                b'statuscopies',
+                b'style',
+                b'traceback',
+                b'verbose',
+            ):
+                if k in cfg[b'ui']:
+                    del cfg[b'ui'][k]
+            for k, v in cfg.items(b'defaults'):
+                del cfg[b'defaults'][k]
+            for k, v in cfg.items(b'commands'):
+                del cfg[b'commands'][k]
         # Don't remove aliases from the configuration if in the exceptionlist
-        if self.plain('alias'):
-            for k, v in cfg.items('alias'):
-                del cfg['alias'][k]
-        if self.plain('revsetalias'):
-            for k, v in cfg.items('revsetalias'):
-                del cfg['revsetalias'][k]
-        if self.plain('templatealias'):
-            for k, v in cfg.items('templatealias'):
-                del cfg['templatealias'][k]
+        if self.plain(b'alias'):
+            for k, v in cfg.items(b'alias'):
+                del cfg[b'alias'][k]
+        if self.plain(b'revsetalias'):
+            for k, v in cfg.items(b'revsetalias'):
+                del cfg[b'revsetalias'][k]
+        if self.plain(b'templatealias'):
+            for k, v in cfg.items(b'templatealias'):
+                del cfg[b'templatealias'][k]
 
         if trusted:
             self._tcfg.update(cfg)
@@ -452,48 +482,51 @@
         self._ucfg.update(self._ocfg)
 
         if root is None:
-            root = os.path.expanduser('~')
+            root = os.path.expanduser(b'~')
         self.fixconfig(root=root)
 
     def fixconfig(self, root=None, section=None):
-        if section in (None, 'paths'):
+        if section in (None, b'paths'):
             # expand vars and ~
             # translate paths relative to root (or home) into absolute paths
             root = root or encoding.getcwd()
             for c in self._tcfg, self._ucfg, self._ocfg:
-                for n, p in c.items('paths'):
+                for n, p in c.items(b'paths'):
                     # Ignore sub-options.
-                    if ':' in n:
+                    if b':' in n:
                         continue
                     if not p:
                         continue
-                    if '%%' in p:
-                        s = self.configsource('paths', n) or 'none'
-                        self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
-                                  % (n, p, s))
-                        p = p.replace('%%', '%')
+                    if b'%%' in p:
+                        s = self.configsource(b'paths', n) or b'none'
+                        self.warn(
+                            _(b"(deprecated '%%' in path %s=%s from %s)\n")
+                            % (n, p, s)
+                        )
+                        p = p.replace(b'%%', b'%')
                     p = util.expandpath(p)
                     if not util.hasscheme(p) and not os.path.isabs(p):
                         p = os.path.normpath(os.path.join(root, p))
-                    c.set("paths", n, p)
+                    c.set(b"paths", n, p)
 
-        if section in (None, 'ui'):
+        if section in (None, b'ui'):
             # update ui options
             self._fmsgout, self._fmsgerr = _selectmsgdests(self)
-            self.debugflag = self.configbool('ui', 'debug')
-            self.verbose = self.debugflag or self.configbool('ui', 'verbose')
-            self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
+            self.debugflag = self.configbool(b'ui', b'debug')
+            self.verbose = self.debugflag or self.configbool(b'ui', b'verbose')
+            self.quiet = not self.debugflag and self.configbool(b'ui', b'quiet')
             if self.verbose and self.quiet:
                 self.quiet = self.verbose = False
-            self._reportuntrusted = self.debugflag or self.configbool("ui",
-                "report_untrusted")
-            self.tracebackflag = self.configbool('ui', 'traceback')
-            self.logblockedtimes = self.configbool('ui', 'logblockedtimes')
+            self._reportuntrusted = self.debugflag or self.configbool(
+                b"ui", b"report_untrusted"
+            )
+            self.tracebackflag = self.configbool(b'ui', b'traceback')
+            self.logblockedtimes = self.configbool(b'ui', b'logblockedtimes')
 
-        if section in (None, 'trusted'):
+        if section in (None, b'trusted'):
             # update trust information
-            self._trustusers.update(self.configlist('trusted', 'users'))
-            self._trustgroups.update(self.configlist('trusted', 'groups'))
+            self._trustusers.update(self.configlist(b'trusted', b'users'))
+            self._trustgroups.update(self.configlist(b'trusted', b'groups'))
 
         if section in (None, b'devel', b'ui') and self.debugflag:
             tracked = set()
@@ -504,15 +537,18 @@
                 self.setlogger(b'debug', logger)
 
     def backupconfig(self, section, item):
-        return (self._ocfg.backup(section, item),
-                self._tcfg.backup(section, item),
-                self._ucfg.backup(section, item),)
+        return (
+            self._ocfg.backup(section, item),
+            self._tcfg.backup(section, item),
+            self._ucfg.backup(section, item),
+        )
+
     def restoreconfig(self, data):
         self._ocfg.restore(data[0])
         self._tcfg.restore(data[1])
         self._ucfg.restore(data[2])
 
-    def setconfig(self, section, name, value, source=''):
+    def setconfig(self, section, name, value, source=b''):
         for cfg in (self._ocfg, self._tcfg, self._ucfg):
             cfg.set(section, name, value, source)
         self.fixconfig(section=section)
@@ -526,8 +562,9 @@
 
     def config(self, section, name, default=_unset, untrusted=False):
         """return the plain string version of a config"""
-        value = self._config(section, name, default=default,
-                             untrusted=untrusted)
+        value = self._config(
+            section, name, default=default, untrusted=untrusted
+        )
         if value is _unset:
             return None
         return value
@@ -544,27 +581,31 @@
             else:
                 itemdefault = item.default
         else:
-            msg = ("accessing unregistered config item: '%s.%s'")
+            msg = b"accessing unregistered config item: '%s.%s'"
             msg %= (section, name)
-            self.develwarn(msg, 2, 'warn-config-unknown')
+            self.develwarn(msg, 2, b'warn-config-unknown')
 
         if default is _unset:
             if item is None:
                 value = default
             elif item.default is configitems.dynamicdefault:
                 value = None
-                msg = "config item requires an explicit default value: '%s.%s'"
+                msg = b"config item requires an explicit default value: '%s.%s'"
                 msg %= (section, name)
-                self.develwarn(msg, 2, 'warn-config-default')
+                self.develwarn(msg, 2, b'warn-config-default')
             else:
                 value = itemdefault
-        elif (item is not None
-              and item.default is not configitems.dynamicdefault
-              and default != itemdefault):
-            msg = ("specifying a mismatched default value for a registered "
-                   "config item: '%s.%s' '%s'")
+        elif (
+            item is not None
+            and item.default is not configitems.dynamicdefault
+            and default != itemdefault
+        ):
+            msg = (
+                b"specifying a mismatched default value for a registered "
+                b"config item: '%s.%s' '%s'"
+            )
             msg %= (section, name, pycompat.bytestr(default))
-            self.develwarn(msg, 2, 'warn-config-default')
+            self.develwarn(msg, 2, b'warn-config-default')
 
         for s, n in alternates:
             candidate = self._data(untrusted).get(s, n, None)
@@ -576,8 +617,10 @@
             for s, n in alternates:
                 uvalue = self._ucfg.get(s, n)
                 if uvalue is not None and uvalue != value:
-                    self.debug("ignoring untrusted configuration option "
-                               "%s.%s = %s\n" % (s, n, uvalue))
+                    self.debug(
+                        b"ignoring untrusted configuration option "
+                        b"%s.%s = %s\n" % (s, n, uvalue)
+                    )
         return value
 
     def configsuboptions(self, section, name, default=_unset, untrusted=False):
@@ -593,29 +636,31 @@
         main = self.config(section, name, default, untrusted=untrusted)
         data = self._data(untrusted)
         sub = {}
-        prefix = '%s:' % name
+        prefix = b'%s:' % name
         for k, v in data.items(section):
             if k.startswith(prefix):
-                sub[k[len(prefix):]] = v
+                sub[k[len(prefix) :]] = v
 
         if self.debugflag and not untrusted and self._reportuntrusted:
             for k, v in sub.items():
-                uvalue = self._ucfg.get(section, '%s:%s' % (name, k))
+                uvalue = self._ucfg.get(section, b'%s:%s' % (name, k))
                 if uvalue is not None and uvalue != v:
-                    self.debug('ignoring untrusted configuration option '
-                               '%s:%s.%s = %s\n' % (section, name, k, uvalue))
+                    self.debug(
+                        b'ignoring untrusted configuration option '
+                        b'%s:%s.%s = %s\n' % (section, name, k, uvalue)
+                    )
 
         return main, sub
 
     def configpath(self, section, name, default=_unset, untrusted=False):
-        'get a path config item, expanded relative to repo root or config file'
+        b'get a path config item, expanded relative to repo root or config file'
         v = self.config(section, name, default, untrusted)
         if v is None:
             return None
-        if not os.path.isabs(v) or "://" not in v:
+        if not os.path.isabs(v) or b"://" not in v:
             src = self.configsource(section, name, untrusted)
-            if ':' in src:
-                base = os.path.dirname(src.rsplit(':')[0])
+            if b':' in src:
+                base = os.path.dirname(src.rsplit(b':')[0])
                 v = os.path.join(base, os.path.expanduser(v))
         return v
 
@@ -651,12 +696,14 @@
             return v
         b = stringutil.parsebool(v)
         if b is None:
-            raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
-                                    % (section, name, v))
+            raise error.ConfigError(
+                _(b"%s.%s is not a boolean ('%s')") % (section, name, v)
+            )
         return b
 
-    def configwith(self, convert, section, name, default=_unset,
-                   desc=None, untrusted=False):
+    def configwith(
+        self, convert, section, name, default=_unset, desc=None, untrusted=False
+    ):
         """parse a configuration element with a conversion function
 
         >>> u = ui(); s = b'foo'
@@ -681,14 +728,15 @@
 
         v = self.config(section, name, default, untrusted)
         if v is None:
-            return v # do not attempt to convert None
+            return v  # do not attempt to convert None
         try:
             return convert(v)
         except (ValueError, error.ParseError):
             if desc is None:
                 desc = pycompat.sysbytes(convert.__name__)
-            raise error.ConfigError(_("%s.%s is not a valid %s ('%s')")
-                                    % (section, name, desc, v))
+            raise error.ConfigError(
+                _(b"%s.%s is not a valid %s ('%s')") % (section, name, desc, v)
+            )
 
     def configint(self, section, name, default=_unset, untrusted=False):
         """parse a configuration element as an integer
@@ -709,8 +757,9 @@
         ConfigError: foo.invalid is not a valid integer ('somevalue')
         """
 
-        return self.configwith(int, section, name, default, 'integer',
-                               untrusted)
+        return self.configwith(
+            int, section, name, default, b'integer', untrusted
+        )
 
     def configbytes(self, section, name, default=_unset, untrusted=False):
         """parse a configuration element as a quantity in bytes
@@ -744,8 +793,10 @@
         try:
             return util.sizetoint(value)
         except error.ParseError:
-            raise error.ConfigError(_("%s.%s is not a byte quantity ('%s')")
-                                    % (section, name, value))
+            raise error.ConfigError(
+                _(b"%s.%s is not a byte quantity ('%s')")
+                % (section, name, value)
+            )
 
     def configlist(self, section, name, default=_unset, untrusted=False):
         """parse a configuration element as a list of comma/space separated
@@ -760,8 +811,9 @@
         ['this', 'is', 'a small', 'test']
         """
         # default is not always a list
-        v = self.configwith(config.parselist, section, name, default,
-                               'list', untrusted)
+        v = self.configwith(
+            config.parselist, section, name, default, b'list', untrusted
+        )
         if isinstance(v, bytes):
             return config.parselist(v)
         elif v is None:
@@ -777,12 +829,24 @@
         (0, 0)
         """
         if self.config(section, name, default, untrusted):
-            return self.configwith(dateutil.parsedate, section, name, default,
-                                   'date', untrusted)
+            return self.configwith(
+                dateutil.parsedate, section, name, default, b'date', untrusted
+            )
         if default is _unset:
             return None
         return default
 
+    def configdefault(self, section, name):
+        """returns the default value of the config item"""
+        item = self._knownconfig.get(section, {}).get(name)
+        itemdefault = None
+        if item is not None:
+            if callable(item.default):
+                itemdefault = item.default()
+            else:
+                itemdefault = item.default
+        return itemdefault
+
     def hasconfig(self, section, name, untrusted=False):
         return self._data(untrusted).hasitem(section, name)
 
@@ -793,12 +857,14 @@
     def configitems(self, section, untrusted=False, ignoresub=False):
         items = self._data(untrusted).items(section)
         if ignoresub:
-            items = [i for i in items if ':' not in i[0]]
+            items = [i for i in items if b':' not in i[0]]
         if self.debugflag and not untrusted and self._reportuntrusted:
             for k, v in self._ucfg.items(section):
                 if self._tcfg.get(section, k) != v:
-                    self.debug("ignoring untrusted configuration option "
-                               "%s.%s = %s\n" % (section, k, v))
+                    self.debug(
+                        b"ignoring untrusted configuration option "
+                        b"%s.%s = %s\n" % (section, k, v)
+                    )
         return items
 
     def walkconfig(self, untrusted=False):
@@ -823,14 +889,19 @@
         - False if feature is disabled by default and not included in HGPLAIN
         - True otherwise
         '''
-        if ('HGPLAIN' not in encoding.environ and
-                'HGPLAINEXCEPT' not in encoding.environ):
+        if (
+            b'HGPLAIN' not in encoding.environ
+            and b'HGPLAINEXCEPT' not in encoding.environ
+        ):
             return False
-        exceptions = encoding.environ.get('HGPLAINEXCEPT',
-                '').strip().split(',')
+        exceptions = (
+            encoding.environ.get(b'HGPLAINEXCEPT', b'').strip().split(b',')
+        )
         # TODO: add support for HGPLAIN=+feature,-feature syntax
-        if '+strictflags' not in encoding.environ.get('HGPLAIN', '').split(','):
-            exceptions.append('strictflags')
+        if b'+strictflags' not in encoding.environ.get(b'HGPLAIN', b'').split(
+            b','
+        ):
+            exceptions.append(b'strictflags')
         if feature and exceptions:
             return feature not in exceptions
         return True
@@ -845,31 +916,35 @@
         ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
         If no username could be found, raise an Abort error.
         """
-        user = encoding.environ.get("HGUSER")
+        user = encoding.environ.get(b"HGUSER")
         if user is None:
-            user = self.config("ui", "username")
+            user = self.config(b"ui", b"username")
             if user is not None:
                 user = os.path.expandvars(user)
         if user is None:
-            user = encoding.environ.get("EMAIL")
+            user = encoding.environ.get(b"EMAIL")
         if user is None and acceptempty:
             return user
-        if user is None and self.configbool("ui", "askusername"):
-            user = self.prompt(_("enter a commit username:"), default=None)
+        if user is None and self.configbool(b"ui", b"askusername"):
+            user = self.prompt(_(b"enter a commit username:"), default=None)
         if user is None and not self.interactive():
             try:
-                user = '%s@%s' % (procutil.getuser(),
-                                  encoding.strtolocal(socket.getfqdn()))
-                self.warn(_("no username found, using '%s' instead\n") % user)
+                user = b'%s@%s' % (
+                    procutil.getuser(),
+                    encoding.strtolocal(socket.getfqdn()),
+                )
+                self.warn(_(b"no username found, using '%s' instead\n") % user)
             except KeyError:
                 pass
         if not user:
-            raise error.Abort(_('no username supplied'),
-                             hint=_("use 'hg config --edit' "
-                                    'to set your username'))
-        if "\n" in user:
-            raise error.Abort(_("username %r contains a newline\n")
-                              % pycompat.bytestr(user))
+            raise error.Abort(
+                _(b'no username supplied'),
+                hint=_(b"use 'hg config --edit' " b'to set your username'),
+            )
+        if b"\n" in user:
+            raise error.Abort(
+                _(b"username %r contains a newline\n") % pycompat.bytestr(user)
+            )
         return user
 
     def shortuser(self, user):
@@ -965,7 +1040,7 @@
         else:
             self._bufferapplylabels = None
 
-        return "".join(self._buffers.pop())
+        return b"".join(self._buffers.pop())
 
     def _isbuffered(self, dest):
         if dest is self._fout:
@@ -983,7 +1058,7 @@
     def canbatchlabeledwrites(self):
         '''check if write calls with labels are batchable'''
         # Windows color printing is special, see ``write``.
-        return self._colormode != 'win32'
+        return self._colormode != b'win32'
 
     def write(self, *args, **opts):
         '''write args to output
@@ -997,6 +1072,13 @@
         Label names take the form of "topic.type". For example, ui.debug()
         issues a label of "ui.debug".
 
+        Progress reports via stderr are normally cleared before writing as
+        stdout and stderr go to the same terminal. This can be skipped with
+        the optional keyword argument "keepprogressbar". The progress bar
+        will continue to occupy a partial line on stderr in that case.
+        This functionality is intended when Mercurial acts as data source
+        in a pipe.
+
         When labeling output for a specific command, a label of
         "cmdname.type" is recommended. For example, status issues
         a label of "status.modified" for modified files.
@@ -1005,34 +1087,36 @@
 
         # inlined _write() for speed
         if self._buffers:
-            label = opts.get(r'label', '')
+            label = opts.get(r'label', b'')
             if label and self._bufferapplylabels:
                 self._buffers[-1].extend(self.label(a, label) for a in args)
             else:
                 self._buffers[-1].extend(args)
             return
 
-        # inliend _writenobuf() for speed
-        self._progclear()
+        # inlined _writenobuf() for speed
+        if not opts.get(r'keepprogressbar', False):
+            self._progclear()
         msg = b''.join(args)
 
         # opencode timeblockedsection because this is a critical path
         starttime = util.timer()
         try:
-            if self._colormode == 'win32':
+            if self._colormode == b'win32':
                 # windows color printing is its own can of crab, defer to
                 # the color module and that is it.
                 color.win32print(self, dest.write, msg, **opts)
             else:
                 if self._colormode is not None:
-                    label = opts.get(r'label', '')
+                    label = opts.get(r'label', b'')
                     msg = self.label(msg, label)
                 dest.write(msg)
         except IOError as err:
             raise error.StdioError(err)
         finally:
-            self._blockedtimes['stdio_blocked'] += (
-                (util.timer() - starttime) * 1000)
+            self._blockedtimes[b'stdio_blocked'] += (
+                util.timer() - starttime
+            ) * 1000
 
     def write_err(self, *args, **opts):
         self._write(self._ferr, *args, **opts)
@@ -1040,7 +1124,7 @@
     def _write(self, dest, *args, **opts):
         # update write() as well if you touch this code
         if self._isbuffered(dest):
-            label = opts.get(r'label', '')
+            label = opts.get(r'label', b'')
             if label and self._bufferapplylabels:
                 self._buffers[-1].extend(self.label(a, label) for a in args)
             else:
@@ -1050,7 +1134,8 @@
 
     def _writenobuf(self, dest, *args, **opts):
         # update write() as well if you touch this code
-        self._progclear()
+        if not opts.get(r'keepprogressbar', False):
+            self._progclear()
         msg = b''.join(args)
 
         # opencode timeblockedsection because this is a critical path
@@ -1062,13 +1147,13 @@
                 # channel for machine-readable output with metadata, where
                 # no extra colorization is necessary.
                 dest.write(msg, **opts)
-            elif self._colormode == 'win32':
+            elif self._colormode == b'win32':
                 # windows color printing is its own can of crab, defer to
                 # the color module and that is it.
                 color.win32print(self, dest.write, msg, **opts)
             else:
                 if self._colormode is not None:
-                    label = opts.get(r'label', '')
+                    label = opts.get(r'label', b'')
                     msg = self.label(msg, label)
                 dest.write(msg)
             # stderr may be buffered under win32 when redirected to files,
@@ -1076,14 +1161,18 @@
             if dest is self._ferr and not getattr(self._ferr, 'closed', False):
                 dest.flush()
         except IOError as err:
-            if (dest is self._ferr
-                and err.errno in (errno.EPIPE, errno.EIO, errno.EBADF)):
+            if dest is self._ferr and err.errno in (
+                errno.EPIPE,
+                errno.EIO,
+                errno.EBADF,
+            ):
                 # no way to report the error, so ignore it
                 return
             raise error.StdioError(err)
         finally:
-            self._blockedtimes['stdio_blocked'] += (
-                (util.timer() - starttime) * 1000)
+            self._blockedtimes[b'stdio_blocked'] += (
+                util.timer() - starttime
+            ) * 1000
 
     def _writemsg(self, dest, *args, **opts):
         _writemsgwith(self._write, dest, *args, **opts)
@@ -1107,11 +1196,12 @@
                     if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
                         raise error.StdioError(err)
         finally:
-            self._blockedtimes['stdio_blocked'] += (
-                (util.timer() - starttime) * 1000)
+            self._blockedtimes[b'stdio_blocked'] += (
+                util.timer() - starttime
+            ) * 1000
 
     def _isatty(self, fh):
-        if self.configbool('ui', 'nontty'):
+        if self.configbool(b'ui', b'nontty'):
             return False
         return procutil.isatty(fh)
 
@@ -1164,31 +1254,31 @@
           command: The full, non-aliased name of the command. That is, "log"
                    not "history, "summary" not "summ", etc.
         """
-        if (self._disablepager
-            or self.pageractive):
+        if self._disablepager or self.pageractive:
             # how pager should do is already determined
             return
 
-        if not command.startswith('internal-always-') and (
+        if not command.startswith(b'internal-always-') and (
             # explicit --pager=on (= 'internal-always-' prefix) should
             # take precedence over disabling factors below
-            command in self.configlist('pager', 'ignore')
-            or not self.configbool('ui', 'paginate')
-            or not self.configbool('pager', 'attend-' + command, True)
-            or encoding.environ.get('TERM') == 'dumb'
+            command in self.configlist(b'pager', b'ignore')
+            or not self.configbool(b'ui', b'paginate')
+            or not self.configbool(b'pager', b'attend-' + command, True)
+            or encoding.environ.get(b'TERM') == b'dumb'
             # TODO: if we want to allow HGPLAINEXCEPT=pager,
             # formatted() will need some adjustment.
             or not self.formatted()
             or self.plain()
             or self._buffers
             # TODO: expose debugger-enabled on the UI object
-            or '--debugger' in pycompat.sysargv):
+            or b'--debugger' in pycompat.sysargv
+        ):
             # We only want to paginate if the ui appears to be
             # interactive, the user didn't say HGPLAIN or
             # HGPLAINEXCEPT=pager, and the user didn't specify --debug.
             return
 
-        pagercmd = self.config('pager', 'pager', rcutil.fallbackpager)
+        pagercmd = self.config(b'pager', b'pager', rcutil.fallbackpager)
         if not pagercmd:
             return
 
@@ -1197,25 +1287,26 @@
             if name not in encoding.environ:
                 pagerenv[name] = value
 
-        self.debug('starting pager for command %s\n' %
-                   stringutil.pprint(command))
+        self.debug(
+            b'starting pager for command %s\n' % stringutil.pprint(command)
+        )
         self.flush()
 
         wasformatted = self.formatted()
-        if util.safehasattr(signal, "SIGPIPE"):
+        if util.safehasattr(signal, b"SIGPIPE"):
             signal.signal(signal.SIGPIPE, _catchterm)
         if self._runpager(pagercmd, pagerenv):
             self.pageractive = True
             # Preserve the formatted-ness of the UI. This is important
             # because we mess with stdout, which might confuse
             # auto-detection of things being formatted.
-            self.setconfig('ui', 'formatted', wasformatted, 'pager')
-            self.setconfig('ui', 'interactive', False, 'pager')
+            self.setconfig(b'ui', b'formatted', wasformatted, b'pager')
+            self.setconfig(b'ui', b'interactive', False, b'pager')
 
             # If pagermode differs from color.mode, reconfigure color now that
             # pageractive is set.
             cm = self._colormode
-            if cm != self.config('color', 'pagermode', cm):
+            if cm != self.config(b'color', b'pagermode', cm):
                 color.setup(self)
         else:
             # If the pager can't be spawned in dispatch when --pager=on is
@@ -1229,14 +1320,14 @@
         This is separate in part so that extensions (like chg) can
         override how a pager is invoked.
         """
-        if command == 'cat':
+        if command == b'cat':
             # Save ourselves some work.
             return False
         # If the command doesn't contain any of these characters, we
         # assume it's a binary and exec it directly. This means for
         # simple pager command configurations, we can degrade
         # gracefully and tell the user about their broken pager.
-        shell = any(c in command for c in "|&;<>()$`\\\"' \t\n*?[#~=%")
+        shell = any(c in command for c in b"|&;<>()$`\\\"' \t\n*?[#~=%")
 
         if pycompat.iswindows and not shell:
             # Window's built-in `more` cannot be invoked with shell=False, but
@@ -1246,22 +1337,29 @@
             # determine which one to use.
             fullcmd = procutil.findexe(command)
             if not fullcmd:
-                self.warn(_("missing pager command '%s', skipping pager\n")
-                          % command)
+                self.warn(
+                    _(b"missing pager command '%s', skipping pager\n") % command
+                )
                 return False
 
             command = fullcmd
 
         try:
             pager = subprocess.Popen(
-                procutil.tonativestr(command), shell=shell, bufsize=-1,
-                close_fds=procutil.closefds, stdin=subprocess.PIPE,
-                stdout=procutil.stdout, stderr=procutil.stderr,
-                env=procutil.tonativeenv(procutil.shellenviron(env)))
+                procutil.tonativestr(command),
+                shell=shell,
+                bufsize=-1,
+                close_fds=procutil.closefds,
+                stdin=subprocess.PIPE,
+                stdout=procutil.stdout,
+                stderr=procutil.stderr,
+                env=procutil.tonativeenv(procutil.shellenviron(env)),
+            )
         except OSError as e:
             if e.errno == errno.ENOENT and not shell:
-                self.warn(_("missing pager command '%s', skipping pager\n")
-                          % command)
+                self.warn(
+                    _(b"missing pager command '%s', skipping pager\n") % command
+                )
                 return False
             raise
 
@@ -1275,7 +1373,7 @@
 
         @self.atexit
         def killpager():
-            if util.safehasattr(signal, "SIGINT"):
+            if util.safehasattr(signal, b"SIGINT"):
                 signal.signal(signal.SIGINT, signal.SIG_IGN)
             # restore original fds, closing pager.stdin copies in the process
             os.dup2(stdoutfd, procutil.stdout.fileno())
@@ -1318,56 +1416,53 @@
         Then histedit will use the text interface and chunkselector will use
         the default curses interface (crecord at the moment).
         """
-        alldefaults = frozenset(["text", "curses"])
+        alldefaults = frozenset([b"text", b"curses"])
 
         featureinterfaces = {
-            "chunkselector": [
-                "text",
-                "curses",
-            ],
-            "histedit": [
-                "text",
-                "curses",
-            ],
+            b"chunkselector": [b"text", b"curses",],
+            b"histedit": [b"text", b"curses",],
         }
 
         # Feature-specific interface
         if feature not in featureinterfaces.keys():
             # Programming error, not user error
-            raise ValueError("Unknown feature requested %s" % feature)
+            raise ValueError(b"Unknown feature requested %s" % feature)
 
         availableinterfaces = frozenset(featureinterfaces[feature])
         if alldefaults > availableinterfaces:
             # Programming error, not user error. We need a use case to
             # define the right thing to do here.
             raise ValueError(
-                "Feature %s does not handle all default interfaces" %
-                feature)
+                b"Feature %s does not handle all default interfaces" % feature
+            )
 
-        if self.plain() or encoding.environ.get('TERM') == 'dumb':
-            return "text"
+        if self.plain() or encoding.environ.get(b'TERM') == b'dumb':
+            return b"text"
 
         # Default interface for all the features
-        defaultinterface = "text"
-        i = self.config("ui", "interface")
+        defaultinterface = b"text"
+        i = self.config(b"ui", b"interface")
         if i in alldefaults:
             defaultinterface = i
 
         choseninterface = defaultinterface
-        f = self.config("ui", "interface.%s" % feature)
+        f = self.config(b"ui", b"interface.%s" % feature)
         if f in availableinterfaces:
             choseninterface = f
 
         if i is not None and defaultinterface != i:
             if f is not None:
-                self.warn(_("invalid value for ui.interface: %s\n") %
-                          (i,))
+                self.warn(_(b"invalid value for ui.interface: %s\n") % (i,))
             else:
-                self.warn(_("invalid value for ui.interface: %s (using %s)\n") %
-                         (i, choseninterface))
+                self.warn(
+                    _(b"invalid value for ui.interface: %s (using %s)\n")
+                    % (i, choseninterface)
+                )
         if f is not None and choseninterface != f:
-            self.warn(_("invalid value for ui.interface.%s: %s (using %s)\n") %
-                      (feature, f, choseninterface))
+            self.warn(
+                _(b"invalid value for ui.interface.%s: %s (using %s)\n")
+                % (feature, f, choseninterface)
+            )
 
         return choseninterface
 
@@ -1385,7 +1480,7 @@
 
         This function refers to input only; for output, see `ui.formatted()'.
         '''
-        i = self.configbool("ui", "interactive")
+        i = self.configbool(b"ui", b"interactive")
         if i is None:
             # some environments replace stdin without implementing isatty
             # usually those are non-interactive
@@ -1396,9 +1491,9 @@
     def termwidth(self):
         '''how wide is the terminal in columns?
         '''
-        if 'COLUMNS' in encoding.environ:
+        if b'COLUMNS' in encoding.environ:
             try:
-                return int(encoding.environ['COLUMNS'])
+                return int(encoding.environ[b'COLUMNS'])
             except ValueError:
                 pass
         return scmutil.termsize(self)[0]
@@ -1423,7 +1518,7 @@
         if self.plain():
             return False
 
-        i = self.configbool("ui", "formatted")
+        i = self.configbool(b"ui", b"formatted")
         if i is None:
             # some environments replace stdout without implementing isatty
             # usually those are non-interactive
@@ -1431,39 +1526,45 @@
 
         return i
 
-    def _readline(self, prompt=' ', promptopts=None):
+    def _readline(self, prompt=b' ', promptopts=None):
         # Replacing stdin/stdout temporarily is a hard problem on Python 3
         # because they have to be text streams with *no buffering*. Instead,
         # we use rawinput() only if call_readline() will be invoked by
         # PyOS_Readline(), so no I/O will be made at Python layer.
-        usereadline = (self._isatty(self._fin) and self._isatty(self._fout)
-                       and procutil.isstdin(self._fin)
-                       and procutil.isstdout(self._fout))
+        usereadline = (
+            self._isatty(self._fin)
+            and self._isatty(self._fout)
+            and procutil.isstdin(self._fin)
+            and procutil.isstdout(self._fout)
+        )
         if usereadline:
             try:
                 # magically add command line editing support, where
                 # available
                 import readline
+
                 # force demandimport to really load the module
                 readline.read_history_file
                 # windows sometimes raises something other than ImportError
             except Exception:
                 usereadline = False
 
-        if self._colormode == 'win32' or not usereadline:
+        if self._colormode == b'win32' or not usereadline:
             if not promptopts:
                 promptopts = {}
-            self._writemsgnobuf(self._fmsgout, prompt, type='prompt',
-                                **promptopts)
+            self._writemsgnobuf(
+                self._fmsgout, prompt, type=b'prompt', **promptopts
+            )
             self.flush()
-            prompt = ' '
+            prompt = b' '
         else:
-            prompt = self.label(prompt, 'ui.prompt') + ' '
+            prompt = self.label(prompt, b'ui.prompt') + b' '
 
         # prompt ' ' must exist; otherwise readline may delete entire line
         # - http://bugs.python.org/issue12833
-        with self.timeblockedsection('stdio'):
+        with self.timeblockedsection(b'stdio'):
             if usereadline:
+                prompt = encoding.strfromlocal(prompt)
                 line = encoding.strtolocal(pycompat.rawinput(prompt))
                 # When stdin is in binary mode on Windows, it can cause
                 # raw_input() to emit an extra trailing carriage return
@@ -1479,7 +1580,7 @@
 
         return line
 
-    def prompt(self, msg, default="y"):
+    def prompt(self, msg, default=b"y"):
         """Prompt user with msg, read response.
         If ui is not interactive, the default is returned.
         """
@@ -1488,16 +1589,17 @@
     def _prompt(self, msg, **opts):
         default = opts[r'default']
         if not self.interactive():
-            self._writemsg(self._fmsgout, msg, ' ', type='prompt', **opts)
-            self._writemsg(self._fmsgout, default or '', "\n",
-                           type='promptecho')
+            self._writemsg(self._fmsgout, msg, b' ', type=b'prompt', **opts)
+            self._writemsg(
+                self._fmsgout, default or b'', b"\n", type=b'promptecho'
+            )
             return default
         try:
             r = self._readline(prompt=msg, promptopts=opts)
             if not r:
                 r = default
-            if self.configbool('ui', 'promptecho'):
-                self._writemsg(self._fmsgout, r, "\n", type='promptecho')
+            if self.configbool(b'ui', b'promptecho'):
+                self._writemsg(self._fmsgout, r, b"\n", type=b'promptecho')
             return r
         except EOFError:
             raise error.ResponseExpected()
@@ -1524,10 +1626,12 @@
         # except an ampersand followed by a character.
         m = re.match(br'(?s)(.+?)\$\$([^\$]*&[^ \$].*)', prompt)
         msg = m.group(1)
-        choices = [p.strip(' ') for p in m.group(2).split('$$')]
+        choices = [p.strip(b' ') for p in m.group(2).split(b'$$')]
+
         def choicetuple(s):
-            ampidx = s.index('&')
-            return s[ampidx + 1:ampidx + 2].lower(), s.replace('&', '', 1)
+            ampidx = s.index(b'&')
+            return s[ampidx + 1 : ampidx + 2].lower(), s.replace(b'&', b'', 1)
+
         return (msg, [choicetuple(s) for s in choices])
 
     def promptchoice(self, prompt, default=0):
@@ -1548,22 +1652,26 @@
             if r.lower() in resps:
                 return resps.index(r.lower())
             # TODO: shouldn't it be a warning?
-            self._writemsg(self._fmsgout, _("unrecognized response\n"))
+            self._writemsg(self._fmsgout, _(b"unrecognized response\n"))
 
     def getpass(self, prompt=None, default=None):
         if not self.interactive():
             return default
         try:
-            self._writemsg(self._fmsgerr, prompt or _('password: '),
-                           type='prompt', password=True)
+            self._writemsg(
+                self._fmsgerr,
+                prompt or _(b'password: '),
+                type=b'prompt',
+                password=True,
+            )
             # disable getpass() only if explicitly specified. it's still valid
             # to interact with tty even if fin is not a tty.
-            with self.timeblockedsection('stdio'):
-                if self.configbool('ui', 'nontty'):
+            with self.timeblockedsection(b'stdio'):
+                if self.configbool(b'ui', b'nontty'):
                     l = self._fin.readline()
                     if not l:
                         raise EOFError
-                    return l.rstrip('\n')
+                    return l.rstrip(b'\n')
                 else:
                     return getpass.getpass(r'')
         except EOFError:
@@ -1575,21 +1683,21 @@
         This adds an output label of "ui.status".
         '''
         if not self.quiet:
-            self._writemsg(self._fmsgout, type='status', *msg, **opts)
+            self._writemsg(self._fmsgout, type=b'status', *msg, **opts)
 
     def warn(self, *msg, **opts):
         '''write warning message to output (stderr)
 
         This adds an output label of "ui.warning".
         '''
-        self._writemsg(self._fmsgerr, type='warning', *msg, **opts)
+        self._writemsg(self._fmsgerr, type=b'warning', *msg, **opts)
 
     def error(self, *msg, **opts):
         '''write error message to output (stderr)
 
         This adds an output label of "ui.error".
         '''
-        self._writemsg(self._fmsgerr, type='error', *msg, **opts)
+        self._writemsg(self._fmsgerr, type=b'error', *msg, **opts)
 
     def note(self, *msg, **opts):
         '''write note to output (if ui.verbose is True)
@@ -1597,7 +1705,7 @@
         This adds an output label of "ui.note".
         '''
         if self.verbose:
-            self._writemsg(self._fmsgout, type='note', *msg, **opts)
+            self._writemsg(self._fmsgout, type=b'note', *msg, **opts)
 
     def debug(self, *msg, **opts):
         '''write debug message to output (if ui.debugflag is True)
@@ -1605,61 +1713,84 @@
         This adds an output label of "ui.debug".
         '''
         if self.debugflag:
-            self._writemsg(self._fmsgout, type='debug', *msg, **opts)
+            self._writemsg(self._fmsgout, type=b'debug', *msg, **opts)
             self.log(b'debug', b'%s', b''.join(msg))
 
-    def edit(self, text, user, extra=None, editform=None, pending=None,
-             repopath=None, action=None):
+    # Aliases to defeat check-code.
+    statusnoi18n = status
+    notenoi18n = note
+    warnnoi18n = warn
+    writenoi18n = write
+
+    def edit(
+        self,
+        text,
+        user,
+        extra=None,
+        editform=None,
+        pending=None,
+        repopath=None,
+        action=None,
+    ):
         if action is None:
-            self.develwarn('action is None but will soon be a required '
-                           'parameter to ui.edit()')
+            self.develwarn(
+                b'action is None but will soon be a required '
+                b'parameter to ui.edit()'
+            )
         extra_defaults = {
-            'prefix': 'editor',
-            'suffix': '.txt',
+            b'prefix': b'editor',
+            b'suffix': b'.txt',
         }
         if extra is not None:
-            if extra.get('suffix') is not None:
-                self.develwarn('extra.suffix is not None but will soon be '
-                               'ignored by ui.edit()')
+            if extra.get(b'suffix') is not None:
+                self.develwarn(
+                    b'extra.suffix is not None but will soon be '
+                    b'ignored by ui.edit()'
+                )
             extra_defaults.update(extra)
         extra = extra_defaults
 
-        if action == 'diff':
-            suffix = '.diff'
+        if action == b'diff':
+            suffix = b'.diff'
         elif action:
-            suffix = '.%s.hg.txt' % action
+            suffix = b'.%s.hg.txt' % action
         else:
-            suffix = extra['suffix']
+            suffix = extra[b'suffix']
 
         rdir = None
-        if self.configbool('experimental', 'editortmpinhg'):
+        if self.configbool(b'experimental', b'editortmpinhg'):
             rdir = repopath
-        (fd, name) = pycompat.mkstemp(prefix='hg-' + extra['prefix'] + '-',
-                                      suffix=suffix,
-                                      dir=rdir)
+        (fd, name) = pycompat.mkstemp(
+            prefix=b'hg-' + extra[b'prefix'] + b'-', suffix=suffix, dir=rdir
+        )
         try:
             f = os.fdopen(fd, r'wb')
             f.write(util.tonativeeol(text))
             f.close()
 
-            environ = {'HGUSER': user}
-            if 'transplant_source' in extra:
-                environ.update({'HGREVISION': hex(extra['transplant_source'])})
-            for label in ('intermediate-source', 'source', 'rebase_source'):
+            environ = {b'HGUSER': user}
+            if b'transplant_source' in extra:
+                environ.update(
+                    {b'HGREVISION': hex(extra[b'transplant_source'])}
+                )
+            for label in (b'intermediate-source', b'source', b'rebase_source'):
                 if label in extra:
-                    environ.update({'HGREVISION': extra[label]})
+                    environ.update({b'HGREVISION': extra[label]})
                     break
             if editform:
-                environ.update({'HGEDITFORM': editform})
+                environ.update({b'HGEDITFORM': editform})
             if pending:
-                environ.update({'HG_PENDING': pending})
+                environ.update({b'HG_PENDING': pending})
 
             editor = self.geteditor()
 
-            self.system("%s \"%s\"" % (editor, name),
-                        environ=environ,
-                        onerr=error.Abort, errprefix=_("edit failed"),
-                        blockedtag='editor')
+            self.system(
+                b"%s \"%s\"" % (editor, name),
+                environ=environ,
+                onerr=error.Abort,
+                errprefix=_(b"edit failed"),
+                blockedtag=b'editor',
+            )
 
             f = open(name, r'rb')
             t = util.fromnativeeol(f.read())
@@ -1669,8 +1800,15 @@
 
         return t
 
-    def system(self, cmd, environ=None, cwd=None, onerr=None, errprefix=None,
-               blockedtag=None):
+    def system(
+        self,
+        cmd,
+        environ=None,
+        cwd=None,
+        onerr=None,
+        errprefix=None,
+        blockedtag=None,
+    ):
         '''execute shell command with appropriate output stream. command
         output will be redirected if fout is not stdout.
 
@@ -1681,17 +1819,19 @@
             # Long cmds tend to be because of an absolute path on cmd. Keep
             # the tail end instead
             cmdsuffix = cmd.translate(None, _keepalnum)[-85:]
-            blockedtag = 'unknown_system_' + cmdsuffix
+            blockedtag = b'unknown_system_' + cmdsuffix
         out = self._fout
         if any(s[1] for s in self._bufferstates):
             out = self
         with self.timeblockedsection(blockedtag):
             rc = self._runsystem(cmd, environ=environ, cwd=cwd, out=out)
         if rc and onerr:
-            errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
-                                procutil.explainexit(rc))
+            errmsg = b'%s %s' % (
+                os.path.basename(cmd.split(None, 1)[0]),
+                procutil.explainexit(rc),
+            )
             if errprefix:
-                errmsg = '%s: %s' % (errprefix, errmsg)
+                errmsg = b'%s: %s' % (errprefix, errmsg)
             raise onerr(errmsg)
         return rc
 
@@ -1715,10 +1855,12 @@
                 exconly = traceback.format_exception_only(cause[0], cause[1])
 
                 # exclude frame where 'exc' was chained and rethrown from exctb
-                self.write_err('Traceback (most recent call last):\n',
-                               ''.join(exctb[:-1]),
-                               ''.join(causetb),
-                               ''.join(exconly))
+                self.write_err(
+                    b'Traceback (most recent call last):\n',
+                    b''.join(exctb[:-1]),
+                    b''.join(causetb),
+                    b''.join(exconly),
+                )
             else:
                 output = traceback.format_exception(exc[0], exc[1], exc[2])
                 self.write_err(encoding.strtolocal(r''.join(output)))
@@ -1726,33 +1868,37 @@
 
     def geteditor(self):
         '''return editor to use'''
-        if pycompat.sysplatform == 'plan9':
+        if pycompat.sysplatform == b'plan9':
             # vi is the MIPS instruction simulator on Plan 9. We
             # instead default to E to plumb commit messages to
             # avoid confusion.
-            editor = 'E'
+            editor = b'E'
         else:
-            editor = 'vi'
-        return (encoding.environ.get("HGEDITOR") or
-                self.config("ui", "editor", editor))
+            editor = b'vi'
+        return encoding.environ.get(b"HGEDITOR") or self.config(
+            b"ui", b"editor", editor
+        )
 
     @util.propertycache
     def _progbar(self):
         """setup the progbar singleton to the ui object"""
-        if (self.quiet or self.debugflag
-                or self.configbool('progress', 'disable')
-                or not progress.shouldprint(self)):
+        if (
+            self.quiet
+            or self.debugflag
+            or self.configbool(b'progress', b'disable')
+            or not progress.shouldprint(self)
+        ):
             return None
         return getprogbar(self)
 
     def _progclear(self):
         """clear progress bar output if any. use it before any output"""
-        if not haveprogbar(): # nothing loaded yet
+        if not haveprogbar():  # nothing loaded yet
             return
         if self._progbar is not None and self._progbar.printed:
             self._progbar.clear()
 
-    def progress(self, topic, pos, item="", unit="", total=None):
+    def progress(self, topic, pos, item=b"", unit=b"", total=None):
         '''show a progress message
 
         By default a textual progress bar will be displayed if an operation
@@ -1767,15 +1913,16 @@
         All topics should be marked closed by setting pos to None at
         termination.
         '''
-        self.deprecwarn("use ui.makeprogress() instead of ui.progress()",
-                        "5.1")
+        self.deprecwarn(
+            b"use ui.makeprogress() instead of ui.progress()", b"5.1"
+        )
         progress = self.makeprogress(topic, unit, total)
         if pos is not None:
             progress.update(pos, item=item)
         else:
             progress.complete()
 
-    def makeprogress(self, topic, unit="", total=None):
+    def makeprogress(self, topic, unit=b"", total=None):
         """Create a progress helper for the specified topic"""
         if getattr(self._fmsgerr, 'structured', False):
             # channel for machine-readable output with metadata, just send
@@ -1784,13 +1931,23 @@
             # time) from progbar. we might want to support update delay to
             # reduce the cost of transferring progress messages.
             def updatebar(topic, pos, item, unit, total):
-                self._fmsgerr.write(None, type=b'progress', topic=topic,
-                                    pos=pos, item=item, unit=unit, total=total)
+                self._fmsgerr.write(
+                    None,
+                    type=b'progress',
+                    topic=topic,
+                    pos=pos,
+                    item=item,
+                    unit=unit,
+                    total=total,
+                )
+
         elif self._progbar is not None:
             updatebar = self._progbar.progress
         else:
+
             def updatebar(topic, pos, item, unit, total):
                 pass
+
         return scmutil.progress(self, updatebar, topic, unit, total)
 
     def getlogger(self, name):
@@ -1818,8 +1975,9 @@
         '''
         if not self._loggers:
             return
-        activeloggers = [l for l in self._loggers.itervalues()
-                         if l.tracked(event)]
+        activeloggers = [
+            l for l in pycompat.itervalues(self._loggers) if l.tracked(event)
+        ]
         if not activeloggers:
             return
         msg = msgfmt % msgargs
@@ -1853,24 +2011,27 @@
         Use 'stacklevel' to report the offender some layers further up in the
         stack.
         """
-        if not self.configbool('devel', 'all-warnings'):
-            if config is None or not self.configbool('devel', config):
+        if not self.configbool(b'devel', b'all-warnings'):
+            if config is None or not self.configbool(b'devel', config):
                 return
-        msg = 'devel-warn: ' + msg
-        stacklevel += 1 # get in develwarn
+        msg = b'devel-warn: ' + msg
+        stacklevel += 1  # get in develwarn
         if self.tracebackflag:
             util.debugstacktrace(msg, stacklevel, self._ferr, self._fout)
-            self.log('develwarn', '%s at:\n%s' %
-                     (msg, ''.join(util.getstackframes(stacklevel))))
+            self.log(
+                b'develwarn',
+                b'%s at:\n%s'
+                % (msg, b''.join(util.getstackframes(stacklevel))),
+            )
         else:
             curframe = inspect.currentframe()
             calframe = inspect.getouterframes(curframe, 2)
             fname, lineno, fmsg = calframe[stacklevel][1:4]
             fname, fmsg = pycompat.sysbytes(fname), pycompat.sysbytes(fmsg)
-            self.write_err('%s at: %s:%d (%s)\n'
-                           % (msg, fname, lineno, fmsg))
-            self.log('develwarn', '%s at: %s:%d (%s)\n',
-                     msg, fname, lineno, fmsg)
+            self.write_err(b'%s at: %s:%d (%s)\n' % (msg, fname, lineno, fmsg))
+            self.log(
+                b'develwarn', b'%s at: %s:%d (%s)\n', msg, fname, lineno, fmsg
+            )
             curframe = calframe = None  # avoid cycles
 
     def deprecwarn(self, msg, version, stacklevel=2):
@@ -1879,12 +2040,16 @@
         - msg: message explaining what is deprecated and how to upgrade,
         - version: last version where the API will be supported,
         """
-        if not (self.configbool('devel', 'all-warnings')
-                or self.configbool('devel', 'deprec-warn')):
+        if not (
+            self.configbool(b'devel', b'all-warnings')
+            or self.configbool(b'devel', b'deprec-warn')
+        ):
             return
-        msg += ("\n(compatibility will be dropped after Mercurial-%s,"
-                " update your code.)") % version
-        self.develwarn(msg, stacklevel=stacklevel, config='deprec-warn')
+        msg += (
+            b"\n(compatibility will be dropped after Mercurial-%s,"
+            b" update your code.)"
+        ) % version
+        self.develwarn(msg, stacklevel=stacklevel, config=b'deprec-warn')
 
     def exportableenviron(self):
         """The environment variables that are safe to export, e.g. through
@@ -1893,7 +2058,7 @@
         return self._exportableenviron
 
     @contextlib.contextmanager
-    def configoverride(self, overrides, source=""):
+    def configoverride(self, overrides, source=b""):
         """Context manager for temporary config overrides
         `overrides` must be a dict of the following structure:
         {(section, name) : value}"""
@@ -1908,8 +2073,9 @@
                 self.restoreconfig(backup)
             # just restoring ui.quiet config to the previous value is not enough
             # as it does not update ui.quiet class member
-            if ('ui', 'quiet') in overrides:
-                self.fixconfig(section='ui')
+            if (b'ui', b'quiet') in overrides:
+                self.fixconfig(section=b'ui')
+
 
 class paths(dict):
     """Represents a collection of paths and their configs.
@@ -1917,14 +2083,15 @@
     Data is initially derived from ui instances and the config files they have
     loaded.
     """
+
     def __init__(self, ui):
         dict.__init__(self)
 
-        for name, loc in ui.configitems('paths', ignoresub=True):
+        for name, loc in ui.configitems(b'paths', ignoresub=True):
             # No location is the same as not existing.
             if not loc:
                 continue
-            loc, sub = ui.configsuboptions('paths', name)
+            loc, sub = ui.configsuboptions(b'paths', name)
             self[name] = path(ui, name, rawloc=loc, suboptions=sub)
 
     def getpath(self, name, default=None):
@@ -1962,11 +2129,12 @@
                 # We don't pass sub-options in, so no need to pass ui instance.
                 return path(None, None, rawloc=name)
             except ValueError:
-                raise error.RepoError(_('repository %s does not exist') %
-                                        name)
+                raise error.RepoError(_(b'repository %s does not exist') % name)
+
 
 _pathsuboptions = {}
 
+
 def pathsuboption(option, attr):
     """Decorator used to declare a path sub-option.
 
@@ -1981,32 +2149,42 @@
     This decorator can be used to perform additional verification of
     sub-options and to change the type of sub-options.
     """
+
     def register(func):
         _pathsuboptions[option] = (attr, func)
         return func
+
     return register
 
-@pathsuboption('pushurl', 'pushloc')
+
+@pathsuboption(b'pushurl', b'pushloc')
 def pushurlpathoption(ui, path, value):
     u = util.url(value)
     # Actually require a URL.
     if not u.scheme:
-        ui.warn(_('(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
+        ui.warn(_(b'(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
         return None
 
     # Don't support the #foo syntax in the push URL to declare branch to
     # push.
     if u.fragment:
-        ui.warn(_('("#fragment" in paths.%s:pushurl not supported; '
-                  'ignoring)\n') % path.name)
+        ui.warn(
+            _(
+                b'("#fragment" in paths.%s:pushurl not supported; '
+                b'ignoring)\n'
+            )
+            % path.name
+        )
         u.fragment = None
 
     return bytes(u)
 
-@pathsuboption('pushrev', 'pushrev')
+
+@pathsuboption(b'pushrev', b'pushrev')
 def pushrevpathoption(ui, path, value):
     return value
 
+
 class path(object):
     """Represents an individual path and its configuration."""
 
@@ -2023,7 +2201,7 @@
         ``ValueError`` is raised.
         """
         if not rawloc:
-            raise ValueError('rawloc must be defined')
+            raise ValueError(b'rawloc must be defined')
 
         # Locations may define branches via syntax <base>#<branch>.
         u = util.url(rawloc)
@@ -2037,20 +2215,22 @@
 
         self.name = name
         self.rawloc = rawloc
-        self.loc = '%s' % u
+        self.loc = b'%s' % u
 
         # When given a raw location but not a symbolic name, validate the
         # location is valid.
         if not name and not u.scheme and not self._isvalidlocalpath(self.loc):
-            raise ValueError('location is not a URL or path to a local '
-                             'repo: %s' % rawloc)
+            raise ValueError(
+                b'location is not a URL or path to a local '
+                b'repo: %s' % rawloc
+            )
 
         suboptions = suboptions or {}
 
         # Now process the sub-options. If a sub-option is registered, its
         # attribute will always be present. The value will be None if there
         # was no valid sub-option.
-        for suboption, (attr, func) in _pathsuboptions.iteritems():
+        for suboption, (attr, func) in pycompat.iteritems(_pathsuboptions):
             if suboption not in suboptions:
                 setattr(self, attr, None)
                 continue
@@ -2064,7 +2244,7 @@
         'valid' in this case (like when pulling from a git repo into a hg
         one)."""
         try:
-            return os.path.isdir(os.path.join(path, '.hg'))
+            return os.path.isdir(os.path.join(path, b'.hg'))
         # Python 2 may return TypeError. Python 3, ValueError.
         except (TypeError, ValueError):
             return False
@@ -2076,16 +2256,18 @@
         This is intended to be used for presentation purposes.
         """
         d = {}
-        for subopt, (attr, _func) in _pathsuboptions.iteritems():
+        for subopt, (attr, _func) in pycompat.iteritems(_pathsuboptions):
             value = getattr(self, attr)
             if value is not None:
                 d[subopt] = value
         return d
 
+
 # we instantiate one globally shared progress bar to avoid
 # competing progress bars when multiple UI objects get created
 _progresssingleton = None
 
+
 def getprogbar(ui):
     global _progresssingleton
     if _progresssingleton is None:
@@ -2094,9 +2276,11 @@
         _progresssingleton = progress.progbar(ui)
     return _progresssingleton
 
+
 def haveprogbar():
     return _progresssingleton is not None
 
+
 def _selectmsgdests(ui):
     name = ui.config(b'ui', b'message-output')
     if name == b'channel':
@@ -2112,6 +2296,7 @@
         return ui.ferr, ui.ferr
     raise error.Abort(b'invalid ui.message-output destination: %s' % name)
 
+
 def _writemsgwith(write, dest, *args, **opts):
     """Write ui message with the given ui._write*() function
 
@@ -2120,5 +2305,5 @@
     """
     # TODO: maybe change 'type' to a mandatory option
     if r'type' in opts and not getattr(dest, 'structured', False):
-        opts[r'label'] = opts.get(r'label', '') + ' ui.%s' % opts.pop(r'type')
+        opts[r'label'] = opts.get(r'label', b'') + b' ui.%s' % opts.pop(r'type')
     write(dest, *args, **opts)
--- a/mercurial/unionrepo.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/unionrepo.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,7 +14,7 @@
 from __future__ import absolute_import
 
 from .i18n import _
-from .node import nullid
+from .pycompat import getattr
 
 from . import (
     changelog,
@@ -31,6 +31,7 @@
     vfs as vfsmod,
 )
 
+
 class unionrevlog(revlog.revlog):
     def __init__(self, opener, indexfile, revlog2, linkmapper):
         # How it works:
@@ -45,20 +46,20 @@
 
         n = len(self)
         self.repotiprev = n - 1
-        self.bundlerevs = set() # used by 'bundle()' revset expression
+        self.bundlerevs = set()  # used by 'bundle()' revset expression
         for rev2 in self.revlog2:
             rev = self.revlog2.index[rev2]
             # rev numbers - in revlog2, very different from self.rev
             _start, _csize, rsize, base, linkrev, p1rev, p2rev, node = rev
             flags = _start & 0xFFFF
 
-            if linkmapper is None: # link is to same revlog
-                assert linkrev == rev2 # we never link back
+            if linkmapper is None:  # link is to same revlog
+                assert linkrev == rev2  # we never link back
                 link = n
-            else: # rev must be mapped from repo2 cl to unified cl by linkmapper
+            else:  # rev must be mapped from repo2 cl to unified cl by linkmapper
                 link = linkmapper(linkrev)
 
-            if linkmapper is not None: # link is to same revlog
+            if linkmapper is not None:  # link is to same revlog
                 base = linkmapper(base)
 
             if node in self.nodemap:
@@ -71,8 +72,16 @@
 
             # TODO: it's probably wrong to set compressed length to None, but
             # I have no idea if csize is valid in the base revlog context.
-            e = (flags, None, rsize, base,
-                 link, self.rev(p1node), self.rev(p2node), node)
+            e = (
+                flags,
+                None,
+                rsize,
+                base,
+                link,
+                self.rev(p1node),
+                self.rev(p2node),
+                node,
+            )
             self.index.append(e)
             self.nodemap[node] = n
             self.bundlerevs.add(n)
@@ -88,16 +97,14 @@
         if rev1 > self.repotiprev and rev2 > self.repotiprev:
             return self.revlog2.revdiff(
                 self.revlog2.rev(self.node(rev1)),
-                self.revlog2.rev(self.node(rev2)))
+                self.revlog2.rev(self.node(rev2)),
+            )
         elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
-            return self.baserevdiff(rev1, rev2)
+            return super(unionrevlog, self).revdiff(rev1, rev2)
 
-        return mdiff.textdiff(self.revision(rev1), self.revision(rev2))
+        return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
 
-    def revision(self, nodeorrev, _df=None, raw=False):
-        """return an uncompressed revision of a given node or revision
-        number.
-        """
+    def _revisiondata(self, nodeorrev, _df=None, raw=False):
         if isinstance(nodeorrev, int):
             rev = nodeorrev
             node = self.node(rev)
@@ -105,83 +112,64 @@
             node = nodeorrev
             rev = self.rev(node)
 
-        if node == nullid:
-            return ""
-
         if rev > self.repotiprev:
-            text = self.revlog2.revision(node)
-            self._revisioncache = (node, rev, text)
+            # work around manifestrevlog NOT being a revlog
+            revlog2 = getattr(self.revlog2, '_revlog', self.revlog2)
+            func = revlog2._revisiondata
         else:
-            text = self.baserevision(rev)
-            # already cached
-        return text
-
-    def baserevision(self, nodeorrev):
-        # Revlog subclasses may override 'revision' method to modify format of
-        # content retrieved from revlog. To use unionrevlog with such class one
-        # needs to override 'baserevision' and make more specific call here.
-        return revlog.revlog.revision(self, nodeorrev)
-
-    def baserevdiff(self, rev1, rev2):
-        # Exists for the same purpose as baserevision.
-        return revlog.revlog.revdiff(self, rev1, rev2)
+            func = super(unionrevlog, self)._revisiondata
+        return func(node, _df=_df, raw=raw)
 
     def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
         raise NotImplementedError
-    def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
-                 maybemissingparents=False):
+
+    def addgroup(
+        self,
+        deltas,
+        linkmapper,
+        transaction,
+        addrevisioncb=None,
+        maybemissingparents=False,
+    ):
         raise NotImplementedError
+
     def strip(self, minlink, transaction):
         raise NotImplementedError
+
     def checksize(self):
         raise NotImplementedError
 
+
 class unionchangelog(unionrevlog, changelog.changelog):
     def __init__(self, opener, opener2):
         changelog.changelog.__init__(self, opener)
         linkmapper = None
         changelog2 = changelog.changelog(opener2)
-        unionrevlog.__init__(self, opener, self.indexfile, changelog2,
-                             linkmapper)
+        unionrevlog.__init__(
+            self, opener, self.indexfile, changelog2, linkmapper
+        )
 
-    def baserevision(self, nodeorrev):
-        # Although changelog doesn't override 'revision' method, some extensions
-        # may replace this class with another that does. Same story with
-        # manifest and filelog classes.
-        return changelog.changelog.revision(self, nodeorrev)
-
-    def baserevdiff(self, rev1, rev2):
-        return changelog.changelog.revdiff(self, rev1, rev2)
 
 class unionmanifest(unionrevlog, manifest.manifestrevlog):
     def __init__(self, opener, opener2, linkmapper):
         manifest.manifestrevlog.__init__(self, opener)
         manifest2 = manifest.manifestrevlog(opener2)
-        unionrevlog.__init__(self, opener, self.indexfile, manifest2,
-                             linkmapper)
+        unionrevlog.__init__(
+            self, opener, self.indexfile, manifest2, linkmapper
+        )
 
-    def baserevision(self, nodeorrev):
-        return manifest.manifestrevlog.revision(self, nodeorrev)
-
-    def baserevdiff(self, rev1, rev2):
-        return manifest.manifestrevlog.revdiff(self, rev1, rev2)
 
 class unionfilelog(filelog.filelog):
     def __init__(self, opener, path, opener2, linkmapper, repo):
         filelog.filelog.__init__(self, opener, path)
         filelog2 = filelog.filelog(opener2, path)
-        self._revlog = unionrevlog(opener, self.indexfile,
-                                   filelog2._revlog, linkmapper)
+        self._revlog = unionrevlog(
+            opener, self.indexfile, filelog2._revlog, linkmapper
+        )
         self._repo = repo
         self.repotiprev = self._revlog.repotiprev
         self.revlog2 = self._revlog.revlog2
 
-    def baserevision(self, nodeorrev):
-        return filelog.filelog.revision(self, nodeorrev)
-
-    def baserevdiff(self, rev1, rev2):
-        return filelog.filelog.revdiff(self, rev1, rev2)
-
     def iscensored(self, rev):
         """Check if a revision is censored."""
         if rev <= self.repotiprev:
@@ -189,21 +177,24 @@
         node = self.node(rev)
         return self.revlog2.iscensored(self.revlog2.rev(node))
 
+
 class unionpeer(localrepo.localpeer):
     def canpush(self):
         return False
 
+
 class unionrepository(object):
     """Represents the union of data in 2 repositories.
 
     Instances are not usable if constructed directly. Use ``instance()``
     or ``makeunionrepository()`` to create a usable instance.
     """
+
     def __init__(self, repo2, url):
         self.repo2 = repo2
         self._url = url
 
-        self.ui.setconfig('phases', 'publish', False, 'unionrepo')
+        self.ui.setconfig(b'phases', b'publish', False, b'unionrepo')
 
     @localrepo.unfilteredpropertycache
     def changelog(self):
@@ -211,10 +202,12 @@
 
     @localrepo.unfilteredpropertycache
     def manifestlog(self):
-        rootstore = unionmanifest(self.svfs, self.repo2.svfs,
-                                  self.unfiltered()._clrev)
-        return manifest.manifestlog(self.svfs, self, rootstore,
-                                    self.narrowmatch())
+        rootstore = unionmanifest(
+            self.svfs, self.repo2.svfs, self.unfiltered()._clrev
+        )
+        return manifest.manifestlog(
+            self.svfs, self, rootstore, self.narrowmatch()
+        )
 
     def _clrev(self, rev2):
         """map from repo2 changelog rev to temporary rev in self.changelog"""
@@ -225,8 +218,9 @@
         return self._url
 
     def file(self, f):
-        return unionfilelog(self.svfs, f, self.repo2.svfs,
-                            self.unfiltered()._clrev, self)
+        return unionfilelog(
+            self.svfs, f, self.repo2.svfs, self.unfiltered()._clrev, self
+        )
 
     def close(self):
         self.repo2.close()
@@ -238,29 +232,30 @@
         return unionpeer(self)
 
     def getcwd(self):
-        return encoding.getcwd() # always outside the repo
+        return encoding.getcwd()  # always outside the repo
+
 
 def instance(ui, path, create, intents=None, createopts=None):
     if create:
-        raise error.Abort(_('cannot create new union repository'))
-    parentpath = ui.config("bundle", "mainreporoot")
+        raise error.Abort(_(b'cannot create new union repository'))
+    parentpath = ui.config(b"bundle", b"mainreporoot")
     if not parentpath:
         # try to find the correct path to the working directory repo
         parentpath = cmdutil.findrepo(encoding.getcwd())
         if parentpath is None:
-            parentpath = ''
+            parentpath = b''
     if parentpath:
         # Try to make the full path relative so we get a nice, short URL.
         # In particular, we don't want temp dir names in test outputs.
         cwd = encoding.getcwd()
         if parentpath == cwd:
-            parentpath = ''
+            parentpath = b''
         else:
             cwd = pathutil.normasprefix(cwd)
             if parentpath.startswith(cwd):
-                parentpath = parentpath[len(cwd):]
-    if path.startswith('union:'):
-        s = path.split(":", 1)[1].split("+", 1)
+                parentpath = parentpath[len(cwd) :]
+    if path.startswith(b'union:'):
+        s = path.split(b":", 1)[1].split(b"+", 1)
         if len(s) == 1:
             repopath, repopath2 = parentpath, s[0]
         else:
@@ -270,13 +265,16 @@
 
     return makeunionrepository(ui, repopath, repopath2)
 
+
 def makeunionrepository(ui, repopath1, repopath2):
     """Make a union repository object from 2 local repo paths."""
     repo1 = localrepo.instance(ui, repopath1, create=False)
     repo2 = localrepo.instance(ui, repopath2, create=False)
 
-    url = 'union:%s+%s' % (util.expandpath(repopath1),
-                           util.expandpath(repopath2))
+    url = b'union:%s+%s' % (
+        util.expandpath(repopath1),
+        util.expandpath(repopath2),
+    )
 
     class derivedunionrepository(unionrepository, repo1.__class__):
         pass
--- a/mercurial/upgrade.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/upgrade.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,8 +10,10 @@
 import stat
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     changelog,
+    copies,
     error,
     filelog,
     hg,
@@ -24,9 +26,14 @@
     vfs as vfsmod,
 )
 
-from .utils import (
-    compression,
-)
+from .utils import compression
+
+# list of requirements that request a clone of all revlog if added/removed
+RECLONES_REQUIREMENTS = {
+    b'generaldelta',
+    localrepo.SPARSEREVLOG_REQUIREMENT,
+}
+
 
 def requiredsourcerequirements(repo):
     """Obtain requirements required to be present to upgrade a repo.
@@ -36,11 +43,12 @@
     """
     return {
         # Introduced in Mercurial 0.9.2.
-        'revlogv1',
+        b'revlogv1',
         # Introduced in Mercurial 0.9.2.
-        'store',
+        b'store',
     }
 
+
 def blocksourcerequirements(repo):
     """Obtain requirements that will prevent an upgrade from occurring.
 
@@ -50,14 +58,15 @@
     return {
         # The upgrade code does not yet support these experimental features.
         # This is an artificial limitation.
-        'treemanifest',
+        b'treemanifest',
         # This was a precursor to generaldelta and was never enabled by default.
         # It should (hopefully) not exist in the wild.
-        'parentdelta',
+        b'parentdelta',
         # Upgrade should operate on the actual store, not the shared link.
-        'shared',
+        b'shared',
     }
 
+
 def supportremovedrequirements(repo):
     """Obtain requirements that can be removed during an upgrade.
 
@@ -67,15 +76,18 @@
     """
     supported = {
         localrepo.SPARSEREVLOG_REQUIREMENT,
+        localrepo.SIDEDATA_REQUIREMENT,
+        localrepo.COPIESSDC_REQUIREMENT,
     }
     for name in compression.compengines:
         engine = compression.compengines[name]
         if engine.available() and engine.revlogheader():
             supported.add(b'exp-compression-%s' % name)
-            if engine.name() == 'zstd':
+            if engine.name() == b'zstd':
                 supported.add(b'revlog-compression-zstd')
     return supported
 
+
 def supporteddestrequirements(repo):
     """Obtain requirements that upgrade supports in the destination.
 
@@ -85,21 +97,24 @@
     Extensions should monkeypatch this to add their custom requirements.
     """
     supported = {
-        'dotencode',
-        'fncache',
-        'generaldelta',
-        'revlogv1',
-        'store',
+        b'dotencode',
+        b'fncache',
+        b'generaldelta',
+        b'revlogv1',
+        b'store',
         localrepo.SPARSEREVLOG_REQUIREMENT,
+        localrepo.SIDEDATA_REQUIREMENT,
+        localrepo.COPIESSDC_REQUIREMENT,
     }
     for name in compression.compengines:
         engine = compression.compengines[name]
         if engine.available() and engine.revlogheader():
             supported.add(b'exp-compression-%s' % name)
-            if engine.name() == 'zstd':
+            if engine.name() == b'zstd':
                 supported.add(b'revlog-compression-zstd')
     return supported
 
+
 def allowednewrequirements(repo):
     """Obtain requirements that can be added to a repository during upgrade.
 
@@ -111,24 +126,29 @@
     future, unknown requirements from accidentally being added.
     """
     supported = {
-        'dotencode',
-        'fncache',
-        'generaldelta',
+        b'dotencode',
+        b'fncache',
+        b'generaldelta',
         localrepo.SPARSEREVLOG_REQUIREMENT,
+        localrepo.SIDEDATA_REQUIREMENT,
+        localrepo.COPIESSDC_REQUIREMENT,
     }
     for name in compression.compengines:
         engine = compression.compengines[name]
         if engine.available() and engine.revlogheader():
             supported.add(b'exp-compression-%s' % name)
-            if engine.name() == 'zstd':
+            if engine.name() == b'zstd':
                 supported.add(b'revlog-compression-zstd')
     return supported
 
+
 def preservedrequirements(repo):
     return set()
 
-deficiency = 'deficiency'
-optimisation = 'optimization'
+
+deficiency = b'deficiency'
+optimisation = b'optimization'
+
 
 class improvement(object):
     """Represents an improvement that can be made as part of an upgrade.
@@ -154,6 +174,7 @@
        Message intended for humans explaining what an upgrade addressing this
        issue will do. Should be worded in the future tense.
     """
+
     def __init__(self, name, type, description, upgrademessage):
         self.name = name
         self.type = type
@@ -172,14 +193,18 @@
     def __hash__(self):
         return hash(self.name)
 
+
 allformatvariant = []
 
+
 def registerformatvariant(cls):
     allformatvariant.append(cls)
     return cls
 
+
 class formatvariant(improvement):
     """an improvement subclass dedicated to repository format"""
+
     type = deficiency
     ### The following attributes should be defined for each class:
 
@@ -212,6 +237,7 @@
         """current value of the variant in the configuration"""
         raise NotImplementedError()
 
+
 class requirementformatvariant(formatvariant):
     """formatvariant based on a 'requirement' name.
 
@@ -225,7 +251,8 @@
     @staticmethod
     def _newreporequirements(ui):
         return localrepo.newreporequirements(
-            ui, localrepo.defaultcreateopts(ui))
+            ui, localrepo.defaultcreateopts(ui)
+        )
 
     @classmethod
     def fromrepo(cls, repo):
@@ -237,93 +264,148 @@
         assert cls._requirement is not None
         return cls._requirement in cls._newreporequirements(repo.ui)
 
+
 @registerformatvariant
 class fncache(requirementformatvariant):
-    name = 'fncache'
+    name = b'fncache'
 
-    _requirement = 'fncache'
+    _requirement = b'fncache'
 
     default = True
 
-    description = _('long and reserved filenames may not work correctly; '
-                    'repository performance is sub-optimal')
+    description = _(
+        b'long and reserved filenames may not work correctly; '
+        b'repository performance is sub-optimal'
+    )
 
-    upgrademessage = _('repository will be more resilient to storing '
-                       'certain paths and performance of certain '
-                       'operations should be improved')
+    upgrademessage = _(
+        b'repository will be more resilient to storing '
+        b'certain paths and performance of certain '
+        b'operations should be improved'
+    )
+
 
 @registerformatvariant
 class dotencode(requirementformatvariant):
-    name = 'dotencode'
+    name = b'dotencode'
 
-    _requirement = 'dotencode'
+    _requirement = b'dotencode'
 
     default = True
 
-    description = _('storage of filenames beginning with a period or '
-                    'space may not work correctly')
+    description = _(
+        b'storage of filenames beginning with a period or '
+        b'space may not work correctly'
+    )
 
-    upgrademessage = _('repository will be better able to store files '
-                       'beginning with a space or period')
+    upgrademessage = _(
+        b'repository will be better able to store files '
+        b'beginning with a space or period'
+    )
+
 
 @registerformatvariant
 class generaldelta(requirementformatvariant):
-    name = 'generaldelta'
+    name = b'generaldelta'
 
-    _requirement = 'generaldelta'
+    _requirement = b'generaldelta'
 
     default = True
 
-    description = _('deltas within internal storage are unable to '
-                    'choose optimal revisions; repository is larger and '
-                    'slower than it could be; interaction with other '
-                    'repositories may require extra network and CPU '
-                    'resources, making "hg push" and "hg pull" slower')
+    description = _(
+        b'deltas within internal storage are unable to '
+        b'choose optimal revisions; repository is larger and '
+        b'slower than it could be; interaction with other '
+        b'repositories may require extra network and CPU '
+        b'resources, making "hg push" and "hg pull" slower'
+    )
 
-    upgrademessage = _('repository storage will be able to create '
-                       'optimal deltas; new repository data will be '
-                       'smaller and read times should decrease; '
-                       'interacting with other repositories using this '
-                       'storage model should require less network and '
-                       'CPU resources, making "hg push" and "hg pull" '
-                       'faster')
+    upgrademessage = _(
+        b'repository storage will be able to create '
+        b'optimal deltas; new repository data will be '
+        b'smaller and read times should decrease; '
+        b'interacting with other repositories using this '
+        b'storage model should require less network and '
+        b'CPU resources, making "hg push" and "hg pull" '
+        b'faster'
+    )
+
 
 @registerformatvariant
 class sparserevlog(requirementformatvariant):
-    name = 'sparserevlog'
+    name = b'sparserevlog'
 
     _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
 
     default = True
 
-    description = _('in order to limit disk reading and memory usage on older '
-                    'version, the span of a delta chain from its root to its '
-                    'end is limited, whatever the relevant data in this span. '
-                    'This can severly limit Mercurial ability to build good '
-                    'chain of delta resulting is much more storage space being '
-                    'taken and limit reusability of on disk delta during '
-                    'exchange.'
-                   )
+    description = _(
+        b'in order to limit disk reading and memory usage on older '
+        b'version, the span of a delta chain from its root to its '
+        b'end is limited, whatever the relevant data in this span. '
+        b'This can severly limit Mercurial ability to build good '
+        b'chain of delta resulting is much more storage space being '
+        b'taken and limit reusability of on disk delta during '
+        b'exchange.'
+    )
+
+    upgrademessage = _(
+        b'Revlog supports delta chain with more unused data '
+        b'between payload. These gaps will be skipped at read '
+        b'time. This allows for better delta chains, making a '
+        b'better compression and faster exchange with server.'
+    )
+
+
+@registerformatvariant
+class sidedata(requirementformatvariant):
+    name = b'sidedata'
+
+    _requirement = localrepo.SIDEDATA_REQUIREMENT
 
-    upgrademessage = _('Revlog supports delta chain with more unused data '
-                       'between payload. These gaps will be skipped at read '
-                       'time. This allows for better delta chains, making a '
-                       'better compression and faster exchange with server.')
+    default = False
+
+    description = _(
+        b'Allows storage of extra data alongside a revision, '
+        b'unlocking various caching options.'
+    )
+
+    upgrademessage = _(b'Allows storage of extra data alongside a revision.')
+
+
+@registerformatvariant
+class copiessdc(requirementformatvariant):
+    name = b'copies-sdc'
+
+    _requirement = localrepo.COPIESSDC_REQUIREMENT
+
+    default = False
+
+    description = _(b'Stores copies information alongside changesets.')
+
+    upgrademessage = _(
+        b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
+    )
+
 
 @registerformatvariant
 class removecldeltachain(formatvariant):
-    name = 'plain-cl-delta'
+    name = b'plain-cl-delta'
 
     default = True
 
-    description = _('changelog storage is using deltas instead of '
-                    'raw entries; changelog reading and any '
-                    'operation relying on changelog data are slower '
-                    'than they could be')
+    description = _(
+        b'changelog storage is using deltas instead of '
+        b'raw entries; changelog reading and any '
+        b'operation relying on changelog data are slower '
+        b'than they could be'
+    )
 
-    upgrademessage = _('changelog storage will be reformated to '
-                       'store raw entries; changelog reading will be '
-                       'faster; changelog size may be reduced')
+    upgrademessage = _(
+        b'changelog storage will be reformated to '
+        b'store raw entries; changelog reading will be '
+        b'faster; changelog size may be reduced'
+    )
 
     @staticmethod
     def fromrepo(repo):
@@ -337,16 +419,20 @@
     def fromconfig(repo):
         return True
 
+
 @registerformatvariant
 class compressionengine(formatvariant):
-    name = 'compression'
-    default = 'zlib'
+    name = b'compression'
+    default = b'zlib'
 
-    description = _('Compresion algorithm used to compress data. '
-                    'Some engine are faster than other')
+    description = _(
+        b'Compresion algorithm used to compress data. '
+        b'Some engine are faster than other'
+    )
 
-    upgrademessage = _('revlog content will be recompressed with the new '
-                       'algorithm.')
+    upgrademessage = _(
+        b'revlog content will be recompressed with the new algorithm.'
+    )
 
     @classmethod
     def fromrepo(cls, repo):
@@ -354,50 +440,52 @@
         # strickly speaking, revlog seems to support mixed compression style.
         #
         # The compression used for new entries will be "the last one"
-        compression = 'zlib'
+        compression = b'zlib'
         for req in repo.requirements:
             prefix = req.startswith
-            if prefix('revlog-compression-') or prefix('exp-compression-'):
-                compression = req.split('-', 2)[2]
+            if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
+                compression = req.split(b'-', 2)[2]
         return compression
 
     @classmethod
     def fromconfig(cls, repo):
-        return repo.ui.config('format', 'revlog-compression')
+        return repo.ui.config(b'format', b'revlog-compression')
+
 
 @registerformatvariant
 class compressionlevel(formatvariant):
-    name = 'compression-level'
-    default = 'default'
+    name = b'compression-level'
+    default = b'default'
 
-    description = _('compression level')
+    description = _(b'compression level')
 
-    upgrademessage = _('revlog content will be recompressed')
+    upgrademessage = _(b'revlog content will be recompressed')
 
     @classmethod
     def fromrepo(cls, repo):
         comp = compressionengine.fromrepo(repo)
         level = None
-        if comp == 'zlib':
-            level = repo.ui.configint('storage', 'revlog.zlib.level')
-        elif comp == 'zstd':
-            level = repo.ui.configint('storage', 'revlog.zstd.level')
+        if comp == b'zlib':
+            level = repo.ui.configint(b'storage', b'revlog.zlib.level')
+        elif comp == b'zstd':
+            level = repo.ui.configint(b'storage', b'revlog.zstd.level')
         if level is None:
-            return 'default'
+            return b'default'
         return bytes(level)
 
     @classmethod
     def fromconfig(cls, repo):
         comp = compressionengine.fromconfig(repo)
         level = None
-        if comp == 'zlib':
-            level = repo.ui.configint('storage', 'revlog.zlib.level')
-        elif comp == 'zstd':
-            level = repo.ui.configint('storage', 'revlog.zstd.level')
+        if comp == b'zlib':
+            level = repo.ui.configint(b'storage', b'revlog.zlib.level')
+        elif comp == b'zstd':
+            level = repo.ui.configint(b'storage', b'revlog.zstd.level')
         if level is None:
-            return 'default'
+            return b'default'
         return bytes(level)
 
+
 def finddeficiencies(repo):
     """returns a list of deficiencies that the repo suffer from"""
     deficiencies = []
@@ -412,6 +500,7 @@
 
     return deficiencies
 
+
 # search without '-' to support older form on newer client.
 #
 # We don't enforce backward compatibility for debug command so this
@@ -419,74 +508,104 @@
 # forms in script when comparing result is anoying enough to add
 # backward compatibility for a while.
 legacy_opts_map = {
-    'redeltaparent': 're-delta-parent',
-    'redeltamultibase': 're-delta-multibase',
-    'redeltaall': 're-delta-all',
-    'redeltafulladd': 're-delta-fulladd',
+    b'redeltaparent': b're-delta-parent',
+    b'redeltamultibase': b're-delta-multibase',
+    b'redeltaall': b're-delta-all',
+    b'redeltafulladd': b're-delta-fulladd',
 }
 
+
 def findoptimizations(repo):
     """Determine optimisation that could be used during upgrade"""
     # These are unconditionally added. There is logic later that figures out
     # which ones to apply.
     optimizations = []
 
-    optimizations.append(improvement(
-        name='re-delta-parent',
-        type=optimisation,
-        description=_('deltas within internal storage will be recalculated to '
-                      'choose an optimal base revision where this was not '
-                      'already done; the size of the repository may shrink and '
-                      'various operations may become faster; the first time '
-                      'this optimization is performed could slow down upgrade '
-                      'execution considerably; subsequent invocations should '
-                      'not run noticeably slower'),
-        upgrademessage=_('deltas within internal storage will choose a new '
-                         'base revision if needed')))
+    optimizations.append(
+        improvement(
+            name=b're-delta-parent',
+            type=optimisation,
+            description=_(
+                b'deltas within internal storage will be recalculated to '
+                b'choose an optimal base revision where this was not '
+                b'already done; the size of the repository may shrink and '
+                b'various operations may become faster; the first time '
+                b'this optimization is performed could slow down upgrade '
+                b'execution considerably; subsequent invocations should '
+                b'not run noticeably slower'
+            ),
+            upgrademessage=_(
+                b'deltas within internal storage will choose a new '
+                b'base revision if needed'
+            ),
+        )
+    )
 
-    optimizations.append(improvement(
-        name='re-delta-multibase',
-        type=optimisation,
-        description=_('deltas within internal storage will be recalculated '
-                      'against multiple base revision and the smallest '
-                      'difference will be used; the size of the repository may '
-                      'shrink significantly when there are many merges; this '
-                      'optimization will slow down execution in proportion to '
-                      'the number of merges in the repository and the amount '
-                      'of files in the repository; this slow down should not '
-                      'be significant unless there are tens of thousands of '
-                      'files and thousands of merges'),
-        upgrademessage=_('deltas within internal storage will choose an '
-                         'optimal delta by computing deltas against multiple '
-                         'parents; may slow down execution time '
-                         'significantly')))
+    optimizations.append(
+        improvement(
+            name=b're-delta-multibase',
+            type=optimisation,
+            description=_(
+                b'deltas within internal storage will be recalculated '
+                b'against multiple base revision and the smallest '
+                b'difference will be used; the size of the repository may '
+                b'shrink significantly when there are many merges; this '
+                b'optimization will slow down execution in proportion to '
+                b'the number of merges in the repository and the amount '
+                b'of files in the repository; this slow down should not '
+                b'be significant unless there are tens of thousands of '
+                b'files and thousands of merges'
+            ),
+            upgrademessage=_(
+                b'deltas within internal storage will choose an '
+                b'optimal delta by computing deltas against multiple '
+                b'parents; may slow down execution time '
+                b'significantly'
+            ),
+        )
+    )
 
-    optimizations.append(improvement(
-        name='re-delta-all',
-        type=optimisation,
-        description=_('deltas within internal storage will always be '
-                      'recalculated without reusing prior deltas; this will '
-                      'likely make execution run several times slower; this '
-                      'optimization is typically not needed'),
-        upgrademessage=_('deltas within internal storage will be fully '
-                         'recomputed; this will likely drastically slow down '
-                         'execution time')))
+    optimizations.append(
+        improvement(
+            name=b're-delta-all',
+            type=optimisation,
+            description=_(
+                b'deltas within internal storage will always be '
+                b'recalculated without reusing prior deltas; this will '
+                b'likely make execution run several times slower; this '
+                b'optimization is typically not needed'
+            ),
+            upgrademessage=_(
+                b'deltas within internal storage will be fully '
+                b'recomputed; this will likely drastically slow down '
+                b'execution time'
+            ),
+        )
+    )
 
-    optimizations.append(improvement(
-        name='re-delta-fulladd',
-        type=optimisation,
-        description=_('every revision will be re-added as if it was new '
-                      'content. It will go through the full storage '
-                      'mechanism giving extensions a chance to process it '
-                      '(eg. lfs). This is similar to "re-delta-all" but even '
-                      'slower since more logic is involved.'),
-        upgrademessage=_('each revision will be added as new content to the '
-                         'internal storage; this will likely drastically slow '
-                         'down execution time, but some extensions might need '
-                         'it')))
+    optimizations.append(
+        improvement(
+            name=b're-delta-fulladd',
+            type=optimisation,
+            description=_(
+                b'every revision will be re-added as if it was new '
+                b'content. It will go through the full storage '
+                b'mechanism giving extensions a chance to process it '
+                b'(eg. lfs). This is similar to "re-delta-all" but even '
+                b'slower since more logic is involved.'
+            ),
+            upgrademessage=_(
+                b'each revision will be added as new content to the '
+                b'internal storage; this will likely drastically slow '
+                b'down execution time, but some extensions might need '
+                b'it'
+            ),
+        )
+    )
 
     return optimizations
 
+
 def determineactions(repo, deficiencies, sourcereqs, destreqs):
     """Determine upgrade actions that will be performed.
 
@@ -519,21 +638,105 @@
 
     return newactions
 
+
 def _revlogfrompath(repo, path):
     """Obtain a revlog from a repo path.
 
     An instance of the appropriate class is returned.
     """
-    if path == '00changelog.i':
+    if path == b'00changelog.i':
         return changelog.changelog(repo.svfs)
-    elif path.endswith('00manifest.i'):
-        mandir = path[:-len('00manifest.i')]
+    elif path.endswith(b'00manifest.i'):
+        mandir = path[: -len(b'00manifest.i')]
         return manifest.manifestrevlog(repo.svfs, tree=mandir)
     else:
-        #reverse of "/".join(("data", path + ".i"))
+        # reverse of "/".join(("data", path + ".i"))
         return filelog.filelog(repo.svfs, path[5:-2])
 
-def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents):
+
+def _copyrevlog(tr, destrepo, oldrl, unencodedname):
+    """copy all relevant files for `oldrl` into `destrepo` store
+
+    Files are copied "as is" without any transformation. The copy is performed
+    without extra checks. Callers are responsible for making sure the copied
+    content is compatible with format of the destination repository.
+    """
+    oldrl = getattr(oldrl, '_revlog', oldrl)
+    newrl = _revlogfrompath(destrepo, unencodedname)
+    newrl = getattr(newrl, '_revlog', newrl)
+
+    oldvfs = oldrl.opener
+    newvfs = newrl.opener
+    oldindex = oldvfs.join(oldrl.indexfile)
+    newindex = newvfs.join(newrl.indexfile)
+    olddata = oldvfs.join(oldrl.datafile)
+    newdata = newvfs.join(newrl.datafile)
+
+    with newvfs(newrl.indexfile, b'w'):
+        pass  # create all the directories
+
+    util.copyfile(oldindex, newindex)
+    copydata = oldrl.opener.exists(oldrl.datafile)
+    if copydata:
+        util.copyfile(olddata, newdata)
+
+    if not (
+        unencodedname.endswith(b'00changelog.i')
+        or unencodedname.endswith(b'00manifest.i')
+    ):
+        destrepo.svfs.fncache.add(unencodedname)
+        if copydata:
+            destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
+
+
+UPGRADE_CHANGELOG = object()
+UPGRADE_MANIFEST = object()
+UPGRADE_FILELOG = object()
+
+UPGRADE_ALL_REVLOGS = frozenset(
+    [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOG]
+)
+
+
+def getsidedatacompanion(srcrepo, dstrepo):
+    sidedatacompanion = None
+    removedreqs = srcrepo.requirements - dstrepo.requirements
+    addedreqs = dstrepo.requirements - srcrepo.requirements
+    if localrepo.SIDEDATA_REQUIREMENT in removedreqs:
+
+        def sidedatacompanion(rl, rev):
+            rl = getattr(rl, '_revlog', rl)
+            if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
+                return True, (), {}
+            return False, (), {}
+
+    elif localrepo.COPIESSDC_REQUIREMENT in addedreqs:
+        sidedatacompanion = copies.getsidedataadder(srcrepo, dstrepo)
+    elif localrepo.COPIESSDC_REQUIREMENT in removedreqs:
+        sidedatacompanion = copies.getsidedataremover(srcrepo, dstrepo)
+    return sidedatacompanion
+
+
+def matchrevlog(revlogfilter, entry):
+    """check is a revlog is selected for cloning
+
+    The store entry is checked against the passed filter"""
+    if entry.endswith(b'00changelog.i'):
+        return UPGRADE_CHANGELOG in revlogfilter
+    elif entry.endswith(b'00manifest.i'):
+        return UPGRADE_MANIFEST in revlogfilter
+    return UPGRADE_FILELOG in revlogfilter
+
+
+def _clonerevlogs(
+    ui,
+    srcrepo,
+    dstrepo,
+    tr,
+    deltareuse,
+    forcedeltabothparents,
+    revlogs=UPGRADE_ALL_REVLOGS,
+):
     """Copy revlogs between 2 repos."""
     revcount = 0
     srcsize = 0
@@ -554,20 +757,26 @@
     crawsize = 0
     cdstsize = 0
 
+    alldatafiles = list(srcrepo.store.walk())
+
     # Perform a pass to collect metadata. This validates we can open all
     # source files and allows a unified progress bar to be displayed.
-    for unencoded, encoded, size in srcrepo.store.walk():
-        if unencoded.endswith('.d'):
+    for unencoded, encoded, size in alldatafiles:
+        if unencoded.endswith(b'.d'):
             continue
 
         rl = _revlogfrompath(srcrepo, unencoded)
 
-        info = rl.storageinfo(exclusivefiles=True, revisionscount=True,
-                              trackedsize=True, storedsize=True)
+        info = rl.storageinfo(
+            exclusivefiles=True,
+            revisionscount=True,
+            trackedsize=True,
+            storedsize=True,
+        )
 
-        revcount += info['revisionscount'] or 0
-        datasize = info['storedsize'] or 0
-        rawsize = info['trackedsize'] or 0
+        revcount += info[b'revisionscount'] or 0
+        datasize = info[b'storedsize'] or 0
+        rawsize = info[b'trackedsize'] or 0
 
         srcsize += datasize
         srcrawsize += rawsize
@@ -588,77 +797,133 @@
             fsrcsize += datasize
             frawsize += rawsize
         else:
-            error.ProgrammingError('unknown revlog type')
+            error.ProgrammingError(b'unknown revlog type')
 
     if not revcount:
         return
 
-    ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
-               '%d in changelog)\n') %
-             (revcount, frevcount, mrevcount, crevcount))
-    ui.write(_('migrating %s in store; %s tracked data\n') % (
-             (util.bytecount(srcsize), util.bytecount(srcrawsize))))
+    ui.write(
+        _(
+            b'migrating %d total revisions (%d in filelogs, %d in manifests, '
+            b'%d in changelog)\n'
+        )
+        % (revcount, frevcount, mrevcount, crevcount)
+    )
+    ui.write(
+        _(b'migrating %s in store; %s tracked data\n')
+        % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
+    )
 
     # Used to keep track of progress.
     progress = None
+
     def oncopiedrevision(rl, rev, node):
         progress.increment()
 
+    sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
+
     # Do the actual copying.
     # FUTURE this operation can be farmed off to worker processes.
     seen = set()
-    for unencoded, encoded, size in srcrepo.store.walk():
-        if unencoded.endswith('.d'):
+    for unencoded, encoded, size in alldatafiles:
+        if unencoded.endswith(b'.d'):
             continue
 
         oldrl = _revlogfrompath(srcrepo, unencoded)
-        newrl = _revlogfrompath(dstrepo, unencoded)
 
-        if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
-            ui.write(_('finished migrating %d manifest revisions across %d '
-                       'manifests; change in size: %s\n') %
-                     (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
+        if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
+            ui.write(
+                _(
+                    b'finished migrating %d manifest revisions across %d '
+                    b'manifests; change in size: %s\n'
+                )
+                % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
+            )
 
-            ui.write(_('migrating changelog containing %d revisions '
-                       '(%s in store; %s tracked data)\n') %
-                     (crevcount, util.bytecount(csrcsize),
-                      util.bytecount(crawsize)))
-            seen.add('c')
-            progress = srcrepo.ui.makeprogress(_('changelog revisions'),
-                                               total=crevcount)
-        elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
-            ui.write(_('finished migrating %d filelog revisions across %d '
-                       'filelogs; change in size: %s\n') %
-                     (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
+            ui.write(
+                _(
+                    b'migrating changelog containing %d revisions '
+                    b'(%s in store; %s tracked data)\n'
+                )
+                % (
+                    crevcount,
+                    util.bytecount(csrcsize),
+                    util.bytecount(crawsize),
+                )
+            )
+            seen.add(b'c')
+            progress = srcrepo.ui.makeprogress(
+                _(b'changelog revisions'), total=crevcount
+            )
+        elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
+            ui.write(
+                _(
+                    b'finished migrating %d filelog revisions across %d '
+                    b'filelogs; change in size: %s\n'
+                )
+                % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
+            )
 
-            ui.write(_('migrating %d manifests containing %d revisions '
-                       '(%s in store; %s tracked data)\n') %
-                     (mcount, mrevcount, util.bytecount(msrcsize),
-                      util.bytecount(mrawsize)))
-            seen.add('m')
+            ui.write(
+                _(
+                    b'migrating %d manifests containing %d revisions '
+                    b'(%s in store; %s tracked data)\n'
+                )
+                % (
+                    mcount,
+                    mrevcount,
+                    util.bytecount(msrcsize),
+                    util.bytecount(mrawsize),
+                )
+            )
+            seen.add(b'm')
             if progress:
                 progress.complete()
-            progress = srcrepo.ui.makeprogress(_('manifest revisions'),
-                                               total=mrevcount)
-        elif 'f' not in seen:
-            ui.write(_('migrating %d filelogs containing %d revisions '
-                       '(%s in store; %s tracked data)\n') %
-                     (fcount, frevcount, util.bytecount(fsrcsize),
-                      util.bytecount(frawsize)))
-            seen.add('f')
+            progress = srcrepo.ui.makeprogress(
+                _(b'manifest revisions'), total=mrevcount
+            )
+        elif b'f' not in seen:
+            ui.write(
+                _(
+                    b'migrating %d filelogs containing %d revisions '
+                    b'(%s in store; %s tracked data)\n'
+                )
+                % (
+                    fcount,
+                    frevcount,
+                    util.bytecount(fsrcsize),
+                    util.bytecount(frawsize),
+                )
+            )
+            seen.add(b'f')
             if progress:
                 progress.complete()
-            progress = srcrepo.ui.makeprogress(_('file revisions'),
-                                               total=frevcount)
-
+            progress = srcrepo.ui.makeprogress(
+                _(b'file revisions'), total=frevcount
+            )
 
-        ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
-        oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
-                    deltareuse=deltareuse,
-                    forcedeltabothparents=forcedeltabothparents)
+        if matchrevlog(revlogs, unencoded):
+            ui.note(
+                _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
+            )
+            newrl = _revlogfrompath(dstrepo, unencoded)
+            oldrl.clone(
+                tr,
+                newrl,
+                addrevisioncb=oncopiedrevision,
+                deltareuse=deltareuse,
+                forcedeltabothparents=forcedeltabothparents,
+                sidedatacompanion=sidedatacompanion,
+            )
+        else:
+            msg = _(b'blindly copying %s containing %i revisions\n')
+            ui.note(msg % (unencoded, len(oldrl)))
+            _copyrevlog(tr, dstrepo, oldrl, unencoded)
+
+            newrl = _revlogfrompath(dstrepo, unencoded)
 
         info = newrl.storageinfo(storedsize=True)
-        datasize = info['storedsize'] or 0
+        datasize = info[b'storedsize'] or 0
 
         dstsize += datasize
 
@@ -671,11 +936,22 @@
 
     progress.complete()
 
-    ui.write(_('finished migrating %d changelog revisions; change in size: '
-               '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
+    ui.write(
+        _(
+            b'finished migrating %d changelog revisions; change in size: '
+            b'%s\n'
+        )
+        % (crevcount, util.bytecount(cdstsize - csrcsize))
+    )
 
-    ui.write(_('finished migrating %d total revisions; total change in store '
-               'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
+    ui.write(
+        _(
+            b'finished migrating %d total revisions; total change in store '
+            b'size: %s\n'
+        )
+        % (revcount, util.bytecount(dstsize - srcsize))
+    )
+
 
 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
     """Determine whether to copy a store file during upgrade.
@@ -694,20 +970,21 @@
     Function should return ``True`` if the file is to be copied.
     """
     # Skip revlogs.
-    if path.endswith(('.i', '.d')):
+    if path.endswith((b'.i', b'.d')):
         return False
     # Skip transaction related files.
-    if path.startswith('undo'):
+    if path.startswith(b'undo'):
         return False
     # Only copy regular files.
     if mode != stat.S_IFREG:
         return False
     # Skip other skipped files.
-    if path in ('lock', 'fncache'):
+    if path in (b'lock', b'fncache'):
         return False
 
     return True
 
+
 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
     """Hook point for extensions to perform additional actions during upgrade.
 
@@ -715,7 +992,10 @@
     before the new store is swapped into the original location.
     """
 
-def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
+
+def _upgraderepo(
+    ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
+):
     """Do the low-level work of upgrading a repository.
 
     The upgrade is effectively performed as a copy between a source
@@ -728,127 +1008,200 @@
     assert srcrepo.currentwlock()
     assert dstrepo.currentwlock()
 
-    ui.write(_('(it is safe to interrupt this process any time before '
-               'data migration completes)\n'))
+    ui.write(
+        _(
+            b'(it is safe to interrupt this process any time before '
+            b'data migration completes)\n'
+        )
+    )
 
-    if 're-delta-all' in actions:
+    if b're-delta-all' in actions:
         deltareuse = revlog.revlog.DELTAREUSENEVER
-    elif 're-delta-parent' in actions:
+    elif b're-delta-parent' in actions:
         deltareuse = revlog.revlog.DELTAREUSESAMEREVS
-    elif 're-delta-multibase' in actions:
+    elif b're-delta-multibase' in actions:
         deltareuse = revlog.revlog.DELTAREUSESAMEREVS
-    elif 're-delta-fulladd' in actions:
+    elif b're-delta-fulladd' in actions:
         deltareuse = revlog.revlog.DELTAREUSEFULLADD
     else:
         deltareuse = revlog.revlog.DELTAREUSEALWAYS
 
-    with dstrepo.transaction('upgrade') as tr:
-        _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
-                     're-delta-multibase' in actions)
+    with dstrepo.transaction(b'upgrade') as tr:
+        _clonerevlogs(
+            ui,
+            srcrepo,
+            dstrepo,
+            tr,
+            deltareuse,
+            b're-delta-multibase' in actions,
+            revlogs=revlogs,
+        )
 
     # Now copy other files in the store directory.
     # The sorted() makes execution deterministic.
-    for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
-        if not _filterstorefile(srcrepo, dstrepo, requirements,
-                                       p, kind, st):
+    for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
+        if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
             continue
 
-        srcrepo.ui.write(_('copying %s\n') % p)
+        srcrepo.ui.write(_(b'copying %s\n') % p)
         src = srcrepo.store.rawvfs.join(p)
         dst = dstrepo.store.rawvfs.join(p)
         util.copyfile(src, dst, copystat=True)
 
     _finishdatamigration(ui, srcrepo, dstrepo, requirements)
 
-    ui.write(_('data fully migrated to temporary repository\n'))
+    ui.write(_(b'data fully migrated to temporary repository\n'))
 
-    backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
+    backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
     backupvfs = vfsmod.vfs(backuppath)
 
     # Make a backup of requires file first, as it is the first to be modified.
-    util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
+    util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
 
     # We install an arbitrary requirement that clients must not support
     # as a mechanism to lock out new clients during the data swap. This is
     # better than allowing a client to continue while the repository is in
     # an inconsistent state.
-    ui.write(_('marking source repository as being upgraded; clients will be '
-               'unable to read from repository\n'))
-    scmutil.writerequires(srcrepo.vfs,
-                          srcrepo.requirements | {'upgradeinprogress'})
+    ui.write(
+        _(
+            b'marking source repository as being upgraded; clients will be '
+            b'unable to read from repository\n'
+        )
+    )
+    scmutil.writerequires(
+        srcrepo.vfs, srcrepo.requirements | {b'upgradeinprogress'}
+    )
 
-    ui.write(_('starting in-place swap of repository data\n'))
-    ui.write(_('replaced files will be backed up at %s\n') %
-             backuppath)
+    ui.write(_(b'starting in-place swap of repository data\n'))
+    ui.write(_(b'replaced files will be backed up at %s\n') % backuppath)
 
     # Now swap in the new store directory. Doing it as a rename should make
     # the operation nearly instantaneous and atomic (at least in well-behaved
     # environments).
-    ui.write(_('replacing store...\n'))
+    ui.write(_(b'replacing store...\n'))
     tstart = util.timer()
-    util.rename(srcrepo.spath, backupvfs.join('store'))
+    util.rename(srcrepo.spath, backupvfs.join(b'store'))
     util.rename(dstrepo.spath, srcrepo.spath)
     elapsed = util.timer() - tstart
-    ui.write(_('store replacement complete; repository was inconsistent for '
-               '%0.1fs\n') % elapsed)
+    ui.write(
+        _(
+            b'store replacement complete; repository was inconsistent for '
+            b'%0.1fs\n'
+        )
+        % elapsed
+    )
 
     # We first write the requirements file. Any new requirements will lock
     # out legacy clients.
-    ui.write(_('finalizing requirements file and making repository readable '
-               'again\n'))
+    ui.write(
+        _(
+            b'finalizing requirements file and making repository readable '
+            b'again\n'
+        )
+    )
     scmutil.writerequires(srcrepo.vfs, requirements)
 
     # The lock file from the old store won't be removed because nothing has a
     # reference to its new location. So clean it up manually. Alternatively, we
     # could update srcrepo.svfs and other variables to point to the new
     # location. This is simpler.
-    backupvfs.unlink('store/lock')
+    backupvfs.unlink(b'store/lock')
 
     return backuppath
 
-def upgraderepo(ui, repo, run=False, optimize=None, backup=True):
+
+def upgraderepo(
+    ui,
+    repo,
+    run=False,
+    optimize=None,
+    backup=True,
+    manifest=None,
+    changelog=None,
+):
     """Upgrade a repository in place."""
     if optimize is None:
         optimize = []
     optimize = set(legacy_opts_map.get(o, o) for o in optimize)
     repo = repo.unfiltered()
 
+    revlogs = set(UPGRADE_ALL_REVLOGS)
+    specentries = ((b'c', changelog), (b'm', manifest))
+    specified = [(y, x) for (y, x) in specentries if x is not None]
+    if specified:
+        # we have some limitation on revlogs to be recloned
+        if any(x for y, x in specified):
+            revlogs = set()
+            for r, enabled in specified:
+                if enabled:
+                    if r == b'c':
+                        revlogs.add(UPGRADE_CHANGELOG)
+                    elif r == b'm':
+                        revlogs.add(UPGRADE_MANIFEST)
+        else:
+            # none are enabled
+            for r, __ in specified:
+                if r == b'c':
+                    revlogs.discard(UPGRADE_CHANGELOG)
+                elif r == b'm':
+                    revlogs.discard(UPGRADE_MANIFEST)
+
     # Ensure the repository can be upgraded.
     missingreqs = requiredsourcerequirements(repo) - repo.requirements
     if missingreqs:
-        raise error.Abort(_('cannot upgrade repository; requirement '
-                            'missing: %s') % _(', ').join(sorted(missingreqs)))
+        raise error.Abort(
+            _(b'cannot upgrade repository; requirement missing: %s')
+            % _(b', ').join(sorted(missingreqs))
+        )
 
     blockedreqs = blocksourcerequirements(repo) & repo.requirements
     if blockedreqs:
-        raise error.Abort(_('cannot upgrade repository; unsupported source '
-                            'requirement: %s') %
-                          _(', ').join(sorted(blockedreqs)))
+        raise error.Abort(
+            _(
+                b'cannot upgrade repository; unsupported source '
+                b'requirement: %s'
+            )
+            % _(b', ').join(sorted(blockedreqs))
+        )
 
     # FUTURE there is potentially a need to control the wanted requirements via
     # command arguments or via an extension hook point.
     newreqs = localrepo.newreporequirements(
-        repo.ui, localrepo.defaultcreateopts(repo.ui))
+        repo.ui, localrepo.defaultcreateopts(repo.ui)
+    )
     newreqs.update(preservedrequirements(repo))
 
-    noremovereqs = (repo.requirements - newreqs -
-                   supportremovedrequirements(repo))
+    noremovereqs = (
+        repo.requirements - newreqs - supportremovedrequirements(repo)
+    )
     if noremovereqs:
-        raise error.Abort(_('cannot upgrade repository; requirement would be '
-                            'removed: %s') % _(', ').join(sorted(noremovereqs)))
+        raise error.Abort(
+            _(
+                b'cannot upgrade repository; requirement would be '
+                b'removed: %s'
+            )
+            % _(b', ').join(sorted(noremovereqs))
+        )
 
-    noaddreqs = (newreqs - repo.requirements -
-                 allowednewrequirements(repo))
+    noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
     if noaddreqs:
-        raise error.Abort(_('cannot upgrade repository; do not support adding '
-                            'requirement: %s') %
-                          _(', ').join(sorted(noaddreqs)))
+        raise error.Abort(
+            _(
+                b'cannot upgrade repository; do not support adding '
+                b'requirement: %s'
+            )
+            % _(b', ').join(sorted(noaddreqs))
+        )
 
     unsupportedreqs = newreqs - supporteddestrequirements(repo)
     if unsupportedreqs:
-        raise error.Abort(_('cannot upgrade repository; do not support '
-                            'destination requirement: %s') %
-                          _(', ').join(sorted(unsupportedreqs)))
+        raise error.Abort(
+            _(
+                b'cannot upgrade repository; do not support '
+                b'destination requirement: %s'
+            )
+            % _(b', ').join(sorted(unsupportedreqs))
+        )
 
     # Find and validate all improvements that can be made.
     alloptimizations = findoptimizations(repo)
@@ -860,36 +1213,59 @@
             optimizations.append(o)
             optimize.discard(o.name)
 
-    if optimize: # anything left is unknown
-        raise error.Abort(_('unknown optimization action requested: %s') %
-                          ', '.join(sorted(optimize)),
-                          hint=_('run without arguments to see valid '
-                                 'optimizations'))
+    if optimize:  # anything left is unknown
+        raise error.Abort(
+            _(b'unknown optimization action requested: %s')
+            % b', '.join(sorted(optimize)),
+            hint=_(b'run without arguments to see valid optimizations'),
+        )
 
     deficiencies = finddeficiencies(repo)
     actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
-    actions.extend(o for o in sorted(optimizations)
-                   # determineactions could have added optimisation
-                   if o not in actions)
+    actions.extend(
+        o
+        for o in sorted(optimizations)
+        # determineactions could have added optimisation
+        if o not in actions
+    )
+
+    removedreqs = repo.requirements - newreqs
+    addedreqs = newreqs - repo.requirements
+
+    if revlogs != UPGRADE_ALL_REVLOGS:
+        incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
+        if incompatible:
+            msg = _(
+                b'ignoring revlogs selection flags, format requirements '
+                b'change: %s\n'
+            )
+            ui.warn(msg % b', '.join(sorted(incompatible)))
+            revlogs = UPGRADE_ALL_REVLOGS
 
     def printrequirements():
-        ui.write(_('requirements\n'))
-        ui.write(_('   preserved: %s\n') %
-                 _(', ').join(sorted(newreqs & repo.requirements)))
+        ui.write(_(b'requirements\n'))
+        ui.write(
+            _(b'   preserved: %s\n')
+            % _(b', ').join(sorted(newreqs & repo.requirements))
+        )
 
         if repo.requirements - newreqs:
-            ui.write(_('   removed: %s\n') %
-                     _(', ').join(sorted(repo.requirements - newreqs)))
+            ui.write(
+                _(b'   removed: %s\n')
+                % _(b', ').join(sorted(repo.requirements - newreqs))
+            )
 
         if newreqs - repo.requirements:
-            ui.write(_('   added: %s\n') %
-                     _(', ').join(sorted(newreqs - repo.requirements)))
+            ui.write(
+                _(b'   added: %s\n')
+                % _(b', ').join(sorted(newreqs - repo.requirements))
+            )
 
-        ui.write('\n')
+        ui.write(b'\n')
 
     def printupgradeactions():
         for a in actions:
-            ui.write('%s\n   %s\n\n' % (a.name, a.upgrademessage))
+            ui.write(b'%s\n   %s\n\n' % (a.name, a.upgrademessage))
 
     if not run:
         fromconfig = []
@@ -904,24 +1280,40 @@
         if fromconfig or onlydefault:
 
             if fromconfig:
-                ui.write(_('repository lacks features recommended by '
-                           'current config options:\n\n'))
+                ui.write(
+                    _(
+                        b'repository lacks features recommended by '
+                        b'current config options:\n\n'
+                    )
+                )
                 for i in fromconfig:
-                    ui.write('%s\n   %s\n\n' % (i.name, i.description))
+                    ui.write(b'%s\n   %s\n\n' % (i.name, i.description))
 
             if onlydefault:
-                ui.write(_('repository lacks features used by the default '
-                           'config options:\n\n'))
+                ui.write(
+                    _(
+                        b'repository lacks features used by the default '
+                        b'config options:\n\n'
+                    )
+                )
                 for i in onlydefault:
-                    ui.write('%s\n   %s\n\n' % (i.name, i.description))
+                    ui.write(b'%s\n   %s\n\n' % (i.name, i.description))
 
-            ui.write('\n')
+            ui.write(b'\n')
         else:
-            ui.write(_('(no feature deficiencies found in existing '
-                       'repository)\n'))
+            ui.write(
+                _(
+                    b'(no feature deficiencies found in existing '
+                    b'repository)\n'
+                )
+            )
 
-        ui.write(_('performing an upgrade with "--run" will make the following '
-                   'changes:\n\n'))
+        ui.write(
+            _(
+                b'performing an upgrade with "--run" will make the following '
+                b'changes:\n\n'
+            )
+        )
 
         printrequirements()
         printupgradeactions()
@@ -929,52 +1321,67 @@
         unusedoptimize = [i for i in alloptimizations if i not in actions]
 
         if unusedoptimize:
-            ui.write(_('additional optimizations are available by specifying '
-                     '"--optimize <name>":\n\n'))
+            ui.write(
+                _(
+                    b'additional optimizations are available by specifying '
+                    b'"--optimize <name>":\n\n'
+                )
+            )
             for i in unusedoptimize:
-                ui.write(_('%s\n   %s\n\n') % (i.name, i.description))
+                ui.write(_(b'%s\n   %s\n\n') % (i.name, i.description))
         return
 
     # Else we're in the run=true case.
-    ui.write(_('upgrade will perform the following actions:\n\n'))
+    ui.write(_(b'upgrade will perform the following actions:\n\n'))
     printrequirements()
     printupgradeactions()
 
     upgradeactions = [a.name for a in actions]
 
-    ui.write(_('beginning upgrade...\n'))
+    ui.write(_(b'beginning upgrade...\n'))
     with repo.wlock(), repo.lock():
-        ui.write(_('repository locked and read-only\n'))
+        ui.write(_(b'repository locked and read-only\n'))
         # Our strategy for upgrading the repository is to create a new,
         # temporary repository, write data to it, then do a swap of the
         # data. There are less heavyweight ways to do this, but it is easier
         # to create a new repo object than to instantiate all the components
         # (like the store) separately.
-        tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
+        tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
         backuppath = None
         try:
-            ui.write(_('creating temporary repository to stage migrated '
-                       'data: %s\n') % tmppath)
+            ui.write(
+                _(
+                    b'creating temporary repository to stage migrated '
+                    b'data: %s\n'
+                )
+                % tmppath
+            )
 
             # clone ui without using ui.copy because repo.ui is protected
             repoui = repo.ui.__class__(repo.ui)
             dstrepo = hg.repository(repoui, path=tmppath, create=True)
 
             with dstrepo.wlock(), dstrepo.lock():
-                backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
-                                          upgradeactions)
+                backuppath = _upgraderepo(
+                    ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
+                )
             if not (backup or backuppath is None):
-                ui.write(_('removing old repository content%s\n') % backuppath)
+                ui.write(_(b'removing old repository content%s\n') % backuppath)
                 repo.vfs.rmtree(backuppath, forcibly=True)
                 backuppath = None
 
         finally:
-            ui.write(_('removing temporary repository %s\n') % tmppath)
+            ui.write(_(b'removing temporary repository %s\n') % tmppath)
             repo.vfs.rmtree(tmppath, forcibly=True)
 
             if backuppath:
-                ui.warn(_('copy of old repository backed up at %s\n') %
-                        backuppath)
-                ui.warn(_('the old repository will not be deleted; remove '
-                          'it to free up disk space once the upgraded '
-                          'repository is verified\n'))
+                ui.warn(
+                    _(b'copy of old repository backed up at %s\n') % backuppath
+                )
+                ui.warn(
+                    _(
+                        b'the old repository will not be deleted; remove '
+                        b'it to free up disk space once the upgraded '
+                        b'repository is verified\n'
+                    )
+                )
--- a/mercurial/url.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/url.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,6 +15,7 @@
 import sys
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     encoding,
     error,
@@ -25,15 +26,14 @@
     urllibcompat,
     util,
 )
-from .utils import (
-    stringutil,
-)
+from .utils import stringutil
 
 httplib = util.httplib
 stringio = util.stringio
 urlerr = util.urlerr
 urlreq = util.urlreq
 
+
 def escape(s, quote=None):
     '''Replace special characters "&", "<" and ">" to HTML-safe sequences.
     If the optional flag quote is true, the quotation mark character (")
@@ -49,6 +49,7 @@
         s = s.replace(b'"', b"&quot;")
     return s
 
+
 class passwordmgr(object):
     def __init__(self, ui, passwddb):
         self.ui = ui
@@ -71,22 +72,26 @@
             res = httpconnectionmod.readauthforuri(self.ui, authuri, user)
             if res:
                 group, auth = res
-                user, passwd = auth.get('username'), auth.get('password')
-                self.ui.debug("using auth.%s.* for authentication\n" % group)
+                user, passwd = auth.get(b'username'), auth.get(b'password')
+                self.ui.debug(b"using auth.%s.* for authentication\n" % group)
         if not user or not passwd:
             u = util.url(pycompat.bytesurl(authuri))
             u.query = None
             if not self.ui.interactive():
-                raise error.Abort(_('http authorization required for %s') %
-                                  util.hidepassword(bytes(u)))
+                raise error.Abort(
+                    _(b'http authorization required for %s')
+                    % util.hidepassword(bytes(u))
+                )
 
-            self.ui.write(_("http authorization required for %s\n") %
-                          util.hidepassword(bytes(u)))
-            self.ui.write(_("realm: %s\n") % pycompat.bytesurl(realm))
+            self.ui.write(
+                _(b"http authorization required for %s\n")
+                % util.hidepassword(bytes(u))
+            )
+            self.ui.write(_(b"realm: %s\n") % pycompat.bytesurl(realm))
             if user:
-                self.ui.write(_("user: %s\n") % user)
+                self.ui.write(_(b"user: %s\n") % user)
             else:
-                user = self.ui.prompt(_("user:"), default=None)
+                user = self.ui.prompt(_(b"user:"), default=None)
 
             if not passwd:
                 passwd = self.ui.getpass()
@@ -96,37 +101,45 @@
         return (pycompat.strurl(user), pycompat.strurl(passwd))
 
     def _writedebug(self, user, passwd):
-        msg = _('http auth: user %s, password %s\n')
-        self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set'))
+        msg = _(b'http auth: user %s, password %s\n')
+        self.ui.debug(msg % (user, passwd and b'*' * len(passwd) or b'not set'))
 
     def find_stored_password(self, authuri):
         return self.passwddb.find_user_password(None, authuri)
 
+
 class proxyhandler(urlreq.proxyhandler):
     def __init__(self, ui):
-        proxyurl = (ui.config("http_proxy", "host") or
-                        encoding.environ.get('http_proxy'))
+        proxyurl = ui.config(b"http_proxy", b"host") or encoding.environ.get(
+            b'http_proxy'
+        )
         # XXX proxyauthinfo = None
 
         if proxyurl:
             # proxy can be proper url or host[:port]
-            if not (proxyurl.startswith('http:') or
-                    proxyurl.startswith('https:')):
-                proxyurl = 'http://' + proxyurl + '/'
+            if not (
+                proxyurl.startswith(b'http:') or proxyurl.startswith(b'https:')
+            ):
+                proxyurl = b'http://' + proxyurl + b'/'
             proxy = util.url(proxyurl)
             if not proxy.user:
-                proxy.user = ui.config("http_proxy", "user")
-                proxy.passwd = ui.config("http_proxy", "passwd")
+                proxy.user = ui.config(b"http_proxy", b"user")
+                proxy.passwd = ui.config(b"http_proxy", b"passwd")
 
             # see if we should use a proxy for this url
-            no_list = ["localhost", "127.0.0.1"]
-            no_list.extend([p.lower() for
-                            p in ui.configlist("http_proxy", "no")])
-            no_list.extend([p.strip().lower() for
-                            p in encoding.environ.get("no_proxy", '').split(',')
-                            if p.strip()])
+            no_list = [b"localhost", b"127.0.0.1"]
+            no_list.extend(
+                [p.lower() for p in ui.configlist(b"http_proxy", b"no")]
+            )
+            no_list.extend(
+                [
+                    p.strip().lower()
+                    for p in encoding.environ.get(b"no_proxy", b'').split(b',')
+                    if p.strip()
+                ]
+            )
             # "http_proxy.always" config is for running tests on localhost
-            if ui.configbool("http_proxy", "always"):
+            if ui.configbool(b"http_proxy", b"always"):
                 self.no_list = []
             else:
                 self.no_list = no_list
@@ -135,7 +148,7 @@
             # expects them to be.
             proxyurl = str(proxy)
             proxies = {r'http': proxyurl, r'https': proxyurl}
-            ui.debug('proxying through %s\n' % util.hidepassword(bytes(proxy)))
+            ui.debug(b'proxying through %s\n' % util.hidepassword(bytes(proxy)))
         else:
             proxies = {}
 
@@ -143,17 +156,18 @@
         self.ui = ui
 
     def proxy_open(self, req, proxy, type_):
-        host = pycompat.bytesurl(urllibcompat.gethost(req)).split(':')[0]
+        host = pycompat.bytesurl(urllibcompat.gethost(req)).split(b':')[0]
         for e in self.no_list:
             if host == e:
                 return None
-            if e.startswith('*.') and host.endswith(e[2:]):
+            if e.startswith(b'*.') and host.endswith(e[2:]):
                 return None
-            if e.startswith('.') and host.endswith(e[1:]):
+            if e.startswith(b'.') and host.endswith(e[1:]):
                 return None
 
         return urlreq.proxyhandler.proxy_open(self, req, proxy, type_)
 
+
 def _gen_sendfile(orgsend):
     def _sendfile(self, data):
         # send a file
@@ -164,9 +178,12 @@
                 orgsend(self, chunk)
         else:
             orgsend(self, data)
+
     return _sendfile
 
-has_https = util.safehasattr(urlreq, 'httpshandler')
+
+has_https = util.safehasattr(urlreq, b'httpshandler')
+
 
 class httpconnection(keepalive.HTTPConnection):
     # must be able to send big bundle as stream.
@@ -181,6 +198,7 @@
             return proxyres
         return keepalive.HTTPConnection.getresponse(self)
 
+
 # Large parts of this function have their origin from before Python 2.6
 # and could potentially be removed.
 def _generic_start_transaction(handler, h, req):
@@ -193,10 +211,10 @@
         tunnel_host = urllibcompat.getselector(req)
         new_tunnel = False
 
-    if new_tunnel or tunnel_host == urllibcompat.getfullurl(req): # has proxy
+    if new_tunnel or tunnel_host == urllibcompat.getfullurl(req):  # has proxy
         u = util.url(pycompat.bytesurl(tunnel_host))
-        if new_tunnel or u.scheme == 'https': # only use CONNECT for HTTPS
-            h.realhostport = ':'.join([u.host, (u.port or '443')])
+        if new_tunnel or u.scheme == b'https':  # only use CONNECT for HTTPS
+            h.realhostport = b':'.join([u.host, (u.port or b'443')])
             h.headers = req.headers.copy()
             h.headers.update(handler.parent.addheaders)
             return
@@ -204,14 +222,19 @@
     h.realhostport = None
     h.headers = None
 
+
 def _generic_proxytunnel(self):
     proxyheaders = dict(
-            [(x, self.headers[x]) for x in self.headers
-             if x.lower().startswith(r'proxy-')])
-    self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport)
-    for header in proxyheaders.iteritems():
-        self.send('%s: %s\r\n' % header)
-    self.send('\r\n')
+        [
+            (x, self.headers[x])
+            for x in self.headers
+            if x.lower().startswith(r'proxy-')
+        ]
+    )
+    self.send(b'CONNECT %s HTTP/1.0\r\n' % self.realhostport)
+    for header in pycompat.iteritems(proxyheaders):
+        self.send(b'%s: %s\r\n' % header)
+    self.send(b'\r\n')
 
     # majority of the following code is duplicated from
     # httplib.HTTPConnection as there are no adequate places to
@@ -219,31 +242,29 @@
     # strict was removed in Python 3.4.
     kwargs = {}
     if not pycompat.ispy3:
-        kwargs['strict'] = self.strict
+        kwargs[b'strict'] = self.strict
 
-    res = self.response_class(self.sock,
-                              method=self._method,
-                              **kwargs)
+    res = self.response_class(self.sock, method=self._method, **kwargs)
 
     while True:
         version, status, reason = res._read_status()
         if status != httplib.CONTINUE:
             break
         # skip lines that are all whitespace
-        list(iter(lambda: res.fp.readline().strip(), ''))
+        list(iter(lambda: res.fp.readline().strip(), b''))
     res.status = status
     res.reason = reason.strip()
 
     if res.status == 200:
         # skip lines until we find a blank line
-        list(iter(res.fp.readline, '\r\n'))
+        list(iter(res.fp.readline, b'\r\n'))
         return True
 
-    if version == 'HTTP/1.0':
+    if version == b'HTTP/1.0':
         res.version = 10
-    elif version.startswith('HTTP/1.'):
+    elif version.startswith(b'HTTP/1.'):
         res.version = 11
-    elif version == 'HTTP/0.9':
+    elif version == b'HTTP/0.9':
         res.version = 9
     else:
         raise httplib.UnknownProtocol(version)
@@ -259,8 +280,8 @@
     res.msg.fp = None
 
     # are we using the chunked-style of transfer encoding?
-    trenc = res.msg.getheader('transfer-encoding')
-    if trenc and trenc.lower() == "chunked":
+    trenc = res.msg.getheader(b'transfer-encoding')
+    if trenc and trenc.lower() == b"chunked":
         res.chunked = 1
         res.chunk_left = None
     else:
@@ -272,7 +293,7 @@
     # do we have a Content-Length?
     # NOTE: RFC 2616, section 4.4, #3 says we ignore this if
     # transfer-encoding is "chunked"
-    length = res.msg.getheader('content-length')
+    length = res.msg.getheader(b'content-length')
     if length and not res.chunked:
         try:
             res.length = int(length)
@@ -285,23 +306,25 @@
         res.length = None
 
     # does the body have a fixed length? (of zero)
-    if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or
-        100 <= status < 200 or # 1xx codes
-        res._method == 'HEAD'):
+    if (
+        status == httplib.NO_CONTENT
+        or status == httplib.NOT_MODIFIED
+        or 100 <= status < 200
+        or res._method == b'HEAD'  # 1xx codes
+    ):
         res.length = 0
 
     # if the connection remains open, and we aren't using chunked, and
     # a content-length was not provided, then assume that the connection
     # WILL close.
-    if (not res.will_close and
-       not res.chunked and
-       res.length is None):
+    if not res.will_close and not res.chunked and res.length is None:
         res.will_close = 1
 
     self.proxyres = res
 
     return False
 
+
 class httphandler(keepalive.HTTPHandler):
     def http_open(self, req):
         return self.do_open(httpconnection, req)
@@ -310,6 +333,7 @@
         _generic_start_transaction(self, h, req)
         return keepalive.HTTPHandler._start_transaction(self, h, req)
 
+
 class logginghttpconnection(keepalive.HTTPConnection):
     def __init__(self, createconn, *args, **kwargs):
         keepalive.HTTPConnection.__init__(self, *args, **kwargs)
@@ -319,14 +343,16 @@
         # copied from 2.7.14, since old implementations directly call
         # socket.create_connection()
         def connect(self):
-            self.sock = self._create_connection((self.host, self.port),
-                                                self.timeout,
-                                                self.source_address)
+            self.sock = self._create_connection(
+                (self.host, self.port), self.timeout, self.source_address
+            )
             if self._tunnel_host:
                 self._tunnel()
 
+
 class logginghttphandler(httphandler):
     """HTTP handler that logs socket I/O."""
+
     def __init__(self, logfh, name, observeropts, timeout=None):
         super(logginghttphandler, self).__init__(timeout=timeout)
 
@@ -343,12 +369,15 @@
     def _makeconnection(self, *args, **kwargs):
         def createconnection(*args, **kwargs):
             sock = socket.create_connection(*args, **kwargs)
-            return util.makeloggingsocket(self._logfh, sock, self._logname,
-                                          **self._observeropts)
+            return util.makeloggingsocket(
+                self._logfh, sock, self._logname, **self._observeropts
+            )
 
         return logginghttpconnection(createconnection, *args, **kwargs)
 
+
 if has_https:
+
     class httpsconnection(keepalive.HTTPConnection):
         response_class = keepalive.HTTPResponse
         default_port = httplib.HTTPS_PORT
@@ -356,8 +385,15 @@
         send = _gen_sendfile(keepalive.safesend)
         getresponse = keepalive.wrapgetresponse(httplib.HTTPConnection)
 
-        def __init__(self, host, port=None, key_file=None, cert_file=None,
-                     *args, **kwargs):
+        def __init__(
+            self,
+            host,
+            port=None,
+            key_file=None,
+            cert_file=None,
+            *args,
+            **kwargs
+        ):
             keepalive.HTTPConnection.__init__(self, host, port, *args, **kwargs)
             self.key_file = key_file
             self.cert_file = cert_file
@@ -366,12 +402,16 @@
             self.sock = socket.create_connection((self.host, self.port))
 
             host = self.host
-            if self.realhostport: # use CONNECT proxy
+            if self.realhostport:  # use CONNECT proxy
                 _generic_proxytunnel(self)
-                host = self.realhostport.rsplit(':', 1)[0]
+                host = self.realhostport.rsplit(b':', 1)[0]
             self.sock = sslutil.wrapsocket(
-                self.sock, self.key_file, self.cert_file, ui=self.ui,
-                serverhostname=host)
+                self.sock,
+                self.key_file,
+                self.cert_file,
+                ui=self.ui,
+                serverhostname=host,
+            )
             sslutil.validatesocket(self.sock)
 
     class httpshandler(keepalive.KeepAliveHandler, urlreq.httpshandler):
@@ -379,8 +419,7 @@
             keepalive.KeepAliveHandler.__init__(self, timeout=timeout)
             urlreq.httpshandler.__init__(self)
             self.ui = ui
-            self.pwmgr = passwordmgr(self.ui,
-                                     self.ui.httppasswordmgrdb)
+            self.pwmgr = passwordmgr(self.ui, self.ui.httppasswordmgrdb)
 
         def _start_transaction(self, h, req):
             _generic_start_transaction(self, h, req)
@@ -395,7 +434,7 @@
             if res:
                 group, auth = res
                 self.auth = auth
-                self.ui.debug("using auth.%s.* for authentication\n" % group)
+                self.ui.debug(b"using auth.%s.* for authentication\n" % group)
             else:
                 self.auth = None
             return self.do_open(self._makeconnection, req)
@@ -404,23 +443,25 @@
             keyfile = None
             certfile = None
 
-            if len(args) >= 1: # key_file
+            if len(args) >= 1:  # key_file
                 keyfile = args[0]
-            if len(args) >= 2: # cert_file
+            if len(args) >= 2:  # cert_file
                 certfile = args[1]
             args = args[2:]
 
             # if the user has specified different key/cert files in
             # hgrc, we prefer these
-            if self.auth and 'key' in self.auth and 'cert' in self.auth:
-                keyfile = self.auth['key']
-                certfile = self.auth['cert']
+            if self.auth and b'key' in self.auth and b'cert' in self.auth:
+                keyfile = self.auth[b'key']
+                certfile = self.auth[b'cert']
 
-            conn = httpsconnection(host, port, keyfile, certfile, *args,
-                                   **kwargs)
+            conn = httpsconnection(
+                host, port, keyfile, certfile, *args, **kwargs
+            )
             conn.ui = self.ui
             return conn
 
+
 class httpdigestauthhandler(urlreq.httpdigestauthhandler):
     def __init__(self, *args, **kwargs):
         urlreq.httpdigestauthhandler.__init__(self, *args, **kwargs)
@@ -438,7 +479,9 @@
             self.retried_req = req
             self.retried = 0
         return urlreq.httpdigestauthhandler.http_error_auth_reqed(
-                    self, auth_header, host, req, headers)
+            self, auth_header, host, req, headers
+        )
+
 
 class httpbasicauthhandler(urlreq.httpbasicauthhandler):
     def __init__(self, *args, **kwargs):
@@ -470,13 +513,15 @@
             self.retried_req = req
             self.retried = 0
         return urlreq.httpbasicauthhandler.http_error_auth_reqed(
-                        self, auth_header, host, req, headers)
+            self, auth_header, host, req, headers
+        )
 
     def retry_http_basic_auth(self, host, req, realm):
         user, pw = self.passwd.find_user_password(
-            realm, urllibcompat.getfullurl(req))
+            realm, urllibcompat.getfullurl(req)
+        )
         if pw is not None:
-            raw = "%s:%s" % (pycompat.bytesurl(user), pycompat.bytesurl(pw))
+            raw = b"%s:%s" % (pycompat.bytesurl(user), pycompat.bytesurl(pw))
             auth = r'Basic %s' % pycompat.strurl(base64.b64encode(raw).strip())
             if req.get_header(self.auth_header, None) == auth:
                 return None
@@ -486,23 +531,30 @@
         else:
             return None
 
+
 class cookiehandler(urlreq.basehandler):
     def __init__(self, ui):
         self.cookiejar = None
 
-        cookiefile = ui.config('auth', 'cookiefile')
+        cookiefile = ui.config(b'auth', b'cookiefile')
         if not cookiefile:
             return
 
         cookiefile = util.expandpath(cookiefile)
         try:
             cookiejar = util.cookielib.MozillaCookieJar(
-                pycompat.fsdecode(cookiefile))
+                pycompat.fsdecode(cookiefile)
+            )
             cookiejar.load()
             self.cookiejar = cookiejar
         except util.cookielib.LoadError as e:
-            ui.warn(_('(error loading cookie file %s: %s; continuing without '
-                      'cookies)\n') % (cookiefile, stringutil.forcebytestr(e)))
+            ui.warn(
+                _(
+                    b'(error loading cookie file %s: %s; continuing without '
+                    b'cookies)\n'
+                )
+                % (cookiefile, stringutil.forcebytestr(e))
+            )
 
     def http_request(self, request):
         if self.cookiejar:
@@ -516,10 +568,19 @@
 
         return request
 
+
 handlerfuncs = []
 
-def opener(ui, authinfo=None, useragent=None, loggingfh=None,
-           loggingname=b's', loggingopts=None, sendaccept=True):
+
+def opener(
+    ui,
+    authinfo=None,
+    useragent=None,
+    loggingfh=None,
+    loggingname=b's',
+    loggingopts=None,
+    sendaccept=True,
+):
     '''
     construct an opener suitable for urllib2
     authinfo will be added to the password manager
@@ -535,12 +596,15 @@
     ``sendaccept`` allows controlling whether the ``Accept`` request header
     is sent. The header is sent by default.
     '''
-    timeout = ui.configwith(float, 'http', 'timeout')
+    timeout = ui.configwith(float, b'http', b'timeout')
     handlers = []
 
     if loggingfh:
-        handlers.append(logginghttphandler(loggingfh, loggingname,
-                                           loggingopts or {}, timeout=timeout))
+        handlers.append(
+            logginghttphandler(
+                loggingfh, loggingname, loggingopts or {}, timeout=timeout
+            )
+        )
         # We don't yet support HTTPS when logging I/O. If we attempt to open
         # an HTTPS URL, we'll likely fail due to unknown protocol.
 
@@ -557,11 +621,14 @@
         saveduser, savedpass = passmgr.find_stored_password(uris[0])
         if user != saveduser or passwd:
             passmgr.add_password(realm, uris, user, passwd)
-        ui.debug('http auth: user %s, password %s\n' %
-                 (user, passwd and '*' * len(passwd) or 'not set'))
+        ui.debug(
+            b'http auth: user %s, password %s\n'
+            % (user, passwd and b'*' * len(passwd) or b'not set')
+        )
 
-    handlers.extend((httpbasicauthhandler(passmgr),
-                     httpdigestauthhandler(passmgr)))
+    handlers.extend(
+        (httpbasicauthhandler(passmgr), httpdigestauthhandler(passmgr))
+    )
     handlers.extend([h(ui, passmgr) for h in handlerfuncs])
     handlers.append(cookiehandler(ui))
     opener = urlreq.buildopener(*handlers)
@@ -587,7 +654,7 @@
     # The custom user agent is for lfs, because unfortunately some servers
     # do look at this value.
     if not useragent:
-        agent = 'mercurial/proto-1.0 (Mercurial %s)' % util.version()
+        agent = b'mercurial/proto-1.0 (Mercurial %s)' % util.version()
         opener.addheaders = [(r'User-agent', pycompat.sysstr(agent))]
     else:
         opener.addheaders = [(r'User-agent', pycompat.sysstr(useragent))]
@@ -601,6 +668,7 @@
 
     return opener
 
+
 def open(ui, url_, data=None, sendaccept=True):
     u = util.url(url_)
     if u.scheme:
@@ -608,11 +676,12 @@
         url_, authinfo = u.authinfo()
     else:
         path = util.normpath(os.path.abspath(url_))
-        url_ = 'file://' + pycompat.bytesurl(urlreq.pathname2url(path))
+        url_ = b'file://' + pycompat.bytesurl(urlreq.pathname2url(path))
         authinfo = None
-    return opener(ui, authinfo,
-                  sendaccept=sendaccept).open(pycompat.strurl(url_),
-                                              data)
+    return opener(ui, authinfo, sendaccept=sendaccept).open(
+        pycompat.strurl(url_), data
+    )
+
 
 def wrapresponse(resp):
     """Wrap a response object with common error handlers.
@@ -631,21 +700,29 @@
                 if e.expected:
                     got = len(e.partial)
                     total = e.expected + got
-                    msg = _('HTTP request error (incomplete response; '
-                            'expected %d bytes got %d)') % (total, got)
+                    msg = _(
+                        b'HTTP request error (incomplete response; '
+                        b'expected %d bytes got %d)'
+                    ) % (total, got)
                 else:
-                    msg = _('HTTP request error (incomplete response)')
+                    msg = _(b'HTTP request error (incomplete response)')
 
                 raise error.PeerTransportError(
                     msg,
-                    hint=_('this may be an intermittent network failure; '
-                           'if the error persists, consider contacting the '
-                           'network or server operator'))
+                    hint=_(
+                        b'this may be an intermittent network failure; '
+                        b'if the error persists, consider contacting the '
+                        b'network or server operator'
+                    ),
+                )
             except httplib.HTTPException as e:
                 raise error.PeerTransportError(
-                    _('HTTP request error (%s)') % e,
-                    hint=_('this may be an intermittent network failure; '
-                           'if the error persists, consider contacting the '
-                           'network or server operator'))
+                    _(b'HTTP request error (%s)') % e,
+                    hint=_(
+                        b'this may be an intermittent network failure; '
+                        b'if the error persists, consider contacting the '
+                        b'network or server operator'
+                    ),
+                )
 
     resp.__class__ = readerproxy
--- a/mercurial/urllibcompat.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/urllibcompat.py	Mon Oct 21 11:09:48 2019 -0400
@@ -6,10 +6,12 @@
 # GNU General Public License version 2 or any later version.
 from __future__ import absolute_import
 
+from .pycompat import getattr
 from . import pycompat
 
 _sysstr = pycompat.sysstr
 
+
 class _pycompatstub(object):
     def __init__(self):
         self._aliases = {}
@@ -18,8 +20,8 @@
         """Add items that will be populated at the first access"""
         items = map(_sysstr, items)
         self._aliases.update(
-            (item.replace(r'_', r'').lower(), (origin, item))
-            for item in items)
+            (item.replace(r'_', r'').lower(), (origin, item)) for item in items
+        )
 
     def _registeralias(self, origin, attr, name):
         """Alias ``origin``.``attr`` as ``name``"""
@@ -33,60 +35,69 @@
         self.__dict__[name] = obj = getattr(origin, item)
         return obj
 
+
 httpserver = _pycompatstub()
 urlreq = _pycompatstub()
 urlerr = _pycompatstub()
 
 if pycompat.ispy3:
     import urllib.parse
-    urlreq._registeraliases(urllib.parse, (
-        "splitattr",
-        "splitpasswd",
-        "splitport",
-        "splituser",
-        "urlparse",
-        "urlunparse",
-    ))
-    urlreq._registeralias(urllib.parse, "parse_qs", "parseqs")
-    urlreq._registeralias(urllib.parse, "parse_qsl", "parseqsl")
-    urlreq._registeralias(urllib.parse, "unquote_to_bytes", "unquote")
+
+    urlreq._registeraliases(
+        urllib.parse,
+        (
+            b"splitattr",
+            b"splitpasswd",
+            b"splitport",
+            b"splituser",
+            b"urlparse",
+            b"urlunparse",
+        ),
+    )
+    urlreq._registeralias(urllib.parse, b"parse_qs", b"parseqs")
+    urlreq._registeralias(urllib.parse, b"parse_qsl", b"parseqsl")
+    urlreq._registeralias(urllib.parse, b"unquote_to_bytes", b"unquote")
     import urllib.request
-    urlreq._registeraliases(urllib.request, (
-        "AbstractHTTPHandler",
-        "BaseHandler",
-        "build_opener",
-        "FileHandler",
-        "FTPHandler",
-        "ftpwrapper",
-        "HTTPHandler",
-        "HTTPSHandler",
-        "install_opener",
-        "pathname2url",
-        "HTTPBasicAuthHandler",
-        "HTTPDigestAuthHandler",
-        "HTTPPasswordMgrWithDefaultRealm",
-        "ProxyHandler",
-        "Request",
-        "url2pathname",
-        "urlopen",
-    ))
+
+    urlreq._registeraliases(
+        urllib.request,
+        (
+            b"AbstractHTTPHandler",
+            b"BaseHandler",
+            b"build_opener",
+            b"FileHandler",
+            b"FTPHandler",
+            b"ftpwrapper",
+            b"HTTPHandler",
+            b"HTTPSHandler",
+            b"install_opener",
+            b"pathname2url",
+            b"HTTPBasicAuthHandler",
+            b"HTTPDigestAuthHandler",
+            b"HTTPPasswordMgrWithDefaultRealm",
+            b"ProxyHandler",
+            b"Request",
+            b"url2pathname",
+            b"urlopen",
+        ),
+    )
     import urllib.response
-    urlreq._registeraliases(urllib.response, (
-        "addclosehook",
-        "addinfourl",
-    ))
+
+    urlreq._registeraliases(urllib.response, (b"addclosehook", b"addinfourl",))
     import urllib.error
-    urlerr._registeraliases(urllib.error, (
-        "HTTPError",
-        "URLError",
-    ))
+
+    urlerr._registeraliases(urllib.error, (b"HTTPError", b"URLError",))
     import http.server
-    httpserver._registeraliases(http.server, (
-        "HTTPServer",
-        "BaseHTTPRequestHandler",
-        "SimpleHTTPRequestHandler",
-        "CGIHTTPRequestHandler",
-    ))
+
+    httpserver._registeraliases(
+        http.server,
+        (
+            b"HTTPServer",
+            b"BaseHTTPRequestHandler",
+            b"SimpleHTTPRequestHandler",
+            b"CGIHTTPRequestHandler",
+        ),
+    )
 
     # urllib.parse.quote() accepts both str and bytes, decodes bytes
     # (if necessary), and returns str. This is wonky. We provide a custom
@@ -102,8 +113,8 @@
     # urllib.parse.urlencode() returns str. We use this function to make
     # sure we return bytes.
     def urlencode(query, doseq=False):
-            s = urllib.parse.urlencode(query, doseq=doseq)
-            return s.encode('ascii')
+        s = urllib.parse.urlencode(query, doseq=doseq)
+        return s.encode('ascii')
 
     urlreq.quote = quote
     urlreq.urlencode = urlencode
@@ -122,6 +133,8 @@
 
     def hasdata(req):
         return req.data is not None
+
+
 else:
     import BaseHTTPServer
     import CGIHTTPServer
@@ -129,56 +142,54 @@
     import urllib2
     import urllib
     import urlparse
-    urlreq._registeraliases(urllib, (
-        "addclosehook",
-        "addinfourl",
-        "ftpwrapper",
-        "pathname2url",
-        "quote",
-        "splitattr",
-        "splitpasswd",
-        "splitport",
-        "splituser",
-        "unquote",
-        "url2pathname",
-        "urlencode",
-    ))
-    urlreq._registeraliases(urllib2, (
-        "AbstractHTTPHandler",
-        "BaseHandler",
-        "build_opener",
-        "FileHandler",
-        "FTPHandler",
-        "HTTPBasicAuthHandler",
-        "HTTPDigestAuthHandler",
-        "HTTPHandler",
-        "HTTPPasswordMgrWithDefaultRealm",
-        "HTTPSHandler",
-        "install_opener",
-        "ProxyHandler",
-        "Request",
-        "urlopen",
-    ))
-    urlreq._registeraliases(urlparse, (
-        "urlparse",
-        "urlunparse",
-    ))
-    urlreq._registeralias(urlparse, "parse_qs", "parseqs")
-    urlreq._registeralias(urlparse, "parse_qsl", "parseqsl")
-    urlerr._registeraliases(urllib2, (
-        "HTTPError",
-        "URLError",
-    ))
-    httpserver._registeraliases(BaseHTTPServer, (
-        "HTTPServer",
-        "BaseHTTPRequestHandler",
-    ))
-    httpserver._registeraliases(SimpleHTTPServer, (
-        "SimpleHTTPRequestHandler",
-    ))
-    httpserver._registeraliases(CGIHTTPServer, (
-        "CGIHTTPRequestHandler",
-    ))
+
+    urlreq._registeraliases(
+        urllib,
+        (
+            b"addclosehook",
+            b"addinfourl",
+            b"ftpwrapper",
+            b"pathname2url",
+            b"quote",
+            b"splitattr",
+            b"splitpasswd",
+            b"splitport",
+            b"splituser",
+            b"unquote",
+            b"url2pathname",
+            b"urlencode",
+        ),
+    )
+    urlreq._registeraliases(
+        urllib2,
+        (
+            b"AbstractHTTPHandler",
+            b"BaseHandler",
+            b"build_opener",
+            b"FileHandler",
+            b"FTPHandler",
+            b"HTTPBasicAuthHandler",
+            b"HTTPDigestAuthHandler",
+            b"HTTPHandler",
+            b"HTTPPasswordMgrWithDefaultRealm",
+            b"HTTPSHandler",
+            b"install_opener",
+            b"ProxyHandler",
+            b"Request",
+            b"urlopen",
+        ),
+    )
+    urlreq._registeraliases(urlparse, (b"urlparse", b"urlunparse",))
+    urlreq._registeralias(urlparse, b"parse_qs", b"parseqs")
+    urlreq._registeralias(urlparse, b"parse_qsl", b"parseqsl")
+    urlerr._registeraliases(urllib2, (b"HTTPError", b"URLError",))
+    httpserver._registeraliases(
+        BaseHTTPServer, (b"HTTPServer", b"BaseHTTPRequestHandler",)
+    )
+    httpserver._registeraliases(
+        SimpleHTTPServer, (b"SimpleHTTPRequestHandler",)
+    )
+    httpserver._registeraliases(CGIHTTPServer, (b"CGIHTTPRequestHandler",))
 
     def gethost(req):
         return req.get_host()
--- a/mercurial/util.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/util.py	Mon Oct 21 11:09:48 2019 -0400
@@ -34,8 +34,12 @@
 import traceback
 import warnings
 
-from .thirdparty import (
-    attr,
+from .thirdparty import attr
+from .pycompat import (
+    delattr,
+    getattr,
+    open,
+    setattr,
 )
 from hgdemandimport import tracing
 from . import (
@@ -53,7 +57,7 @@
     stringutil,
 )
 
-rustdirs = policy.importrust('dirstate', 'Dirs')
+rustdirs = policy.importrust(r'dirstate', r'Dirs')
 
 base85 = policy.importmod(r'base85')
 osutil = policy.importmod(r'osutil')
@@ -142,16 +146,18 @@
 
 _notset = object()
 
+
 def bitsfrom(container):
     bits = 0
     for bit in container:
         bits |= bit
     return bits
 
+
 # python 2.6 still have deprecation warning enabled by default. We do not want
 # to display anything to standard user so detect if we are running test and
 # only use python deprecation warning in this case.
-_dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
+_dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
 if _dowarn:
     # explicitly unfilter our warning for python 2.7
     #
@@ -164,13 +170,20 @@
     warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
 if _dowarn and pycompat.ispy3:
     # silence warning emitted by passing user string to re.sub()
-    warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
-                            r'mercurial')
-    warnings.filterwarnings(r'ignore', r'invalid escape sequence',
-                            DeprecationWarning, r'mercurial')
+    warnings.filterwarnings(
+        r'ignore', r'bad escape', DeprecationWarning, r'mercurial'
+    )
+    warnings.filterwarnings(
+        r'ignore', r'invalid escape sequence', DeprecationWarning, r'mercurial'
+    )
     # TODO: reinvent imp.is_frozen()
-    warnings.filterwarnings(r'ignore', r'the imp module is deprecated',
-                            DeprecationWarning, r'mercurial')
+    warnings.filterwarnings(
+        r'ignore',
+        r'the imp module is deprecated',
+        DeprecationWarning,
+        r'mercurial',
+    )
+
 
 def nouideprecwarn(msg, version, stacklevel=1):
     """Issue an python native deprecation warning
@@ -178,21 +191,25 @@
     This is a noop outside of tests, use 'ui.deprecwarn' when possible.
     """
     if _dowarn:
-        msg += ("\n(compatibility will be dropped after Mercurial-%s,"
-                " update your code.)") % version
+        msg += (
+            b"\n(compatibility will be dropped after Mercurial-%s,"
+            b" update your code.)"
+        ) % version
         warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
 
+
 DIGESTS = {
-    'md5': hashlib.md5,
-    'sha1': hashlib.sha1,
-    'sha512': hashlib.sha512,
+    b'md5': hashlib.md5,
+    b'sha1': hashlib.sha1,
+    b'sha512': hashlib.sha512,
 }
 # List of digest types from strongest to weakest
-DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
+DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
 
 for k in DIGESTS_BY_STRENGTH:
     assert k in DIGESTS
 
+
 class digester(object):
     """helper to compute digests.
 
@@ -210,11 +227,11 @@
     'sha1'
     """
 
-    def __init__(self, digests, s=''):
+    def __init__(self, digests, s=b''):
         self._hashes = {}
         for k in digests:
             if k not in DIGESTS:
-                raise error.Abort(_('unknown digest type: %s') % k)
+                raise error.Abort(_(b'unknown digest type: %s') % k)
             self._hashes[k] = DIGESTS[k]()
         if s:
             self.update(s)
@@ -225,7 +242,7 @@
 
     def __getitem__(self, key):
         if key not in DIGESTS:
-            raise error.Abort(_('unknown digest type: %s') % k)
+            raise error.Abort(_(b'unknown digest type: %s') % k)
         return nodemod.hex(self._hashes[key].digest())
 
     def __iter__(self):
@@ -240,6 +257,7 @@
                 return k
         return None
 
+
 class digestchecker(object):
     """file handle wrapper that additionally checks content against a given
     size and digests.
@@ -264,24 +282,32 @@
 
     def validate(self):
         if self._size != self._got:
-            raise error.Abort(_('size mismatch: expected %d, got %d') %
-                              (self._size, self._got))
+            raise error.Abort(
+                _(b'size mismatch: expected %d, got %d')
+                % (self._size, self._got)
+            )
         for k, v in self._digests.items():
             if v != self._digester[k]:
                 # i18n: first parameter is a digest name
-                raise error.Abort(_('%s mismatch: expected %s, got %s') %
-                                  (k, v, self._digester[k]))
+                raise error.Abort(
+                    _(b'%s mismatch: expected %s, got %s')
+                    % (k, v, self._digester[k])
+                )
+
 
 try:
     buffer = buffer
 except NameError:
+
     def buffer(sliceable, offset=0, length=None):
         if length is not None:
-            return memoryview(sliceable)[offset:offset + length]
+            return memoryview(sliceable)[offset : offset + length]
         return memoryview(sliceable)[offset:]
 
+
 _chunksize = 4096
 
+
 class bufferedinputpipe(object):
     """a manually buffered input pipe
 
@@ -296,6 +322,7 @@
     This class lives in the 'util' module because it makes use of the 'os'
     module from the python stdlib.
     """
+
     def __new__(cls, fh):
         # If we receive a fileobjectproxy, we need to use a variation of this
         # class that notifies observers about activity.
@@ -342,17 +369,17 @@
         if len(self._buffer) > 1:
             # this should not happen because both read and readline end with a
             # _frombuffer call that collapse it.
-            self._buffer = [''.join(self._buffer)]
+            self._buffer = [b''.join(self._buffer)]
             self._lenbuf = len(self._buffer[0])
         lfi = -1
         if self._buffer:
-            lfi = self._buffer[-1].find('\n')
+            lfi = self._buffer[-1].find(b'\n')
         while (not self._eof) and lfi < 0:
             self._fillbuffer()
             if self._buffer:
-                lfi = self._buffer[-1].find('\n')
+                lfi = self._buffer[-1].find(b'\n')
         size = lfi + 1
-        if lfi < 0: # end of file
+        if lfi < 0:  # end of file
             size = self._lenbuf
         elif len(self._buffer) > 1:
             # we need to take previous chunks into account
@@ -364,13 +391,13 @@
 
         The data are removed from the buffer."""
         if size == 0 or not self._buffer:
-            return ''
+            return b''
         buf = self._buffer[0]
         if len(self._buffer) > 1:
-            buf = ''.join(self._buffer)
+            buf = b''.join(self._buffer)
 
         data = buf[:size]
-        buf = buf[len(data):]
+        buf = buf[len(data) :]
         if buf:
             self._buffer = [buf]
             self._lenbuf = len(buf)
@@ -390,6 +417,7 @@
 
         return data
 
+
 def mmapread(fp):
     try:
         fd = getattr(fp, 'fileno', lambda: fp)()
@@ -398,15 +426,17 @@
         # Empty files cannot be mmapped, but mmapread should still work.  Check
         # if the file is empty, and if so, return an empty buffer.
         if os.fstat(fd).st_size == 0:
-            return ''
+            return b''
         raise
 
+
 class fileobjectproxy(object):
     """A proxy around file objects that tells a watcher when events occur.
 
     This type is intended to only be used for testing purposes. Think hard
     before using it in important code.
     """
+
     __slots__ = (
         r'_orig',
         r'_observer',
@@ -419,7 +449,6 @@
     def __getattribute__(self, name):
         ours = {
             r'_observer',
-
             # IOBase
             r'close',
             # closed if a property
@@ -485,79 +514,99 @@
 
     def close(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'close', *args, **kwargs)
+            r'close', *args, **kwargs
+        )
 
     def fileno(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'fileno', *args, **kwargs)
+            r'fileno', *args, **kwargs
+        )
 
     def flush(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'flush', *args, **kwargs)
+            r'flush', *args, **kwargs
+        )
 
     def isatty(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'isatty', *args, **kwargs)
+            r'isatty', *args, **kwargs
+        )
 
     def readable(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'readable', *args, **kwargs)
+            r'readable', *args, **kwargs
+        )
 
     def readline(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'readline', *args, **kwargs)
+            r'readline', *args, **kwargs
+        )
 
     def readlines(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'readlines', *args, **kwargs)
+            r'readlines', *args, **kwargs
+        )
 
     def seek(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'seek', *args, **kwargs)
+            r'seek', *args, **kwargs
+        )
 
     def seekable(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'seekable', *args, **kwargs)
+            r'seekable', *args, **kwargs
+        )
 
     def tell(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'tell', *args, **kwargs)
+            r'tell', *args, **kwargs
+        )
 
     def truncate(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'truncate', *args, **kwargs)
+            r'truncate', *args, **kwargs
+        )
 
     def writable(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'writable', *args, **kwargs)
+            r'writable', *args, **kwargs
+        )
 
     def writelines(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'writelines', *args, **kwargs)
+            r'writelines', *args, **kwargs
+        )
 
     def read(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'read', *args, **kwargs)
+            r'read', *args, **kwargs
+        )
 
     def readall(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'readall', *args, **kwargs)
+            r'readall', *args, **kwargs
+        )
 
     def readinto(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'readinto', *args, **kwargs)
+            r'readinto', *args, **kwargs
+        )
 
     def write(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'write', *args, **kwargs)
+            r'write', *args, **kwargs
+        )
 
     def detach(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'detach', *args, **kwargs)
+            r'detach', *args, **kwargs
+        )
 
     def read1(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'read1', *args, **kwargs)
+            r'read1', *args, **kwargs
+        )
+
 
 class observedbufferedinputpipe(bufferedinputpipe):
     """A variation of bufferedinputpipe that is aware of fileobjectproxy.
@@ -570,10 +619,11 @@
     ``os.read()`` events. It also re-publishes other events, such as
     ``read()`` and ``readline()``.
     """
+
     def _fillbuffer(self):
         res = super(observedbufferedinputpipe, self)._fillbuffer()
 
-        fn = getattr(self._input._observer, r'osread', None)
+        fn = getattr(self._input._observer, 'osread', None)
         if fn:
             fn(res, _chunksize)
 
@@ -584,7 +634,7 @@
     def read(self, size):
         res = super(observedbufferedinputpipe, self).read(size)
 
-        fn = getattr(self._input._observer, r'bufferedread', None)
+        fn = getattr(self._input._observer, 'bufferedread', None)
         if fn:
             fn(res, size)
 
@@ -593,12 +643,13 @@
     def readline(self, *args, **kwargs):
         res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
 
-        fn = getattr(self._input._observer, r'bufferedreadline', None)
+        fn = getattr(self._input._observer, 'bufferedreadline', None)
         if fn:
             fn(res)
 
         return res
 
+
 PROXIED_SOCKET_METHODS = {
     r'makefile',
     r'recv',
@@ -614,6 +665,7 @@
     r'setsockopt',
 }
 
+
 class socketproxy(object):
     """A proxy around a socket that tells a watcher when events occur.
 
@@ -622,6 +674,7 @@
     This type is intended to only be used for testing purposes. Think hard
     before using it in important code.
     """
+
     __slots__ = (
         r'_orig',
         r'_observer',
@@ -664,93 +717,115 @@
 
     def makefile(self, *args, **kwargs):
         res = object.__getattribute__(self, r'_observedcall')(
-            r'makefile', *args, **kwargs)
+            r'makefile', *args, **kwargs
+        )
 
         # The file object may be used for I/O. So we turn it into a
         # proxy using our observer.
         observer = object.__getattribute__(self, r'_observer')
-        return makeloggingfileobject(observer.fh, res, observer.name,
-                                     reads=observer.reads,
-                                     writes=observer.writes,
-                                     logdata=observer.logdata,
-                                     logdataapis=observer.logdataapis)
+        return makeloggingfileobject(
+            observer.fh,
+            res,
+            observer.name,
+            reads=observer.reads,
+            writes=observer.writes,
+            logdata=observer.logdata,
+            logdataapis=observer.logdataapis,
+        )
 
     def recv(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'recv', *args, **kwargs)
+            r'recv', *args, **kwargs
+        )
 
     def recvfrom(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'recvfrom', *args, **kwargs)
+            r'recvfrom', *args, **kwargs
+        )
 
     def recvfrom_into(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'recvfrom_into', *args, **kwargs)
+            r'recvfrom_into', *args, **kwargs
+        )
 
     def recv_into(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'recv_info', *args, **kwargs)
+            r'recv_info', *args, **kwargs
+        )
 
     def send(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'send', *args, **kwargs)
+            r'send', *args, **kwargs
+        )
 
     def sendall(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'sendall', *args, **kwargs)
+            r'sendall', *args, **kwargs
+        )
 
     def sendto(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'sendto', *args, **kwargs)
+            r'sendto', *args, **kwargs
+        )
 
     def setblocking(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'setblocking', *args, **kwargs)
+            r'setblocking', *args, **kwargs
+        )
 
     def settimeout(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'settimeout', *args, **kwargs)
+            r'settimeout', *args, **kwargs
+        )
 
     def gettimeout(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'gettimeout', *args, **kwargs)
+            r'gettimeout', *args, **kwargs
+        )
 
     def setsockopt(self, *args, **kwargs):
         return object.__getattribute__(self, r'_observedcall')(
-            r'setsockopt', *args, **kwargs)
+            r'setsockopt', *args, **kwargs
+        )
+
 
 class baseproxyobserver(object):
     def _writedata(self, data):
         if not self.logdata:
             if self.logdataapis:
-                self.fh.write('\n')
+                self.fh.write(b'\n')
                 self.fh.flush()
             return
 
         # Simple case writes all data on a single line.
         if b'\n' not in data:
             if self.logdataapis:
-                self.fh.write(': %s\n' % stringutil.escapestr(data))
+                self.fh.write(b': %s\n' % stringutil.escapestr(data))
             else:
-                self.fh.write('%s>     %s\n'
-                              % (self.name, stringutil.escapestr(data)))
+                self.fh.write(
+                    b'%s>     %s\n' % (self.name, stringutil.escapestr(data))
+                )
             self.fh.flush()
             return
 
         # Data with newlines is written to multiple lines.
         if self.logdataapis:
-            self.fh.write(':\n')
+            self.fh.write(b':\n')
 
         lines = data.splitlines(True)
         for line in lines:
-            self.fh.write('%s>     %s\n'
-                          % (self.name, stringutil.escapestr(line)))
+            self.fh.write(
+                b'%s>     %s\n' % (self.name, stringutil.escapestr(line))
+            )
         self.fh.flush()
 
+
 class fileobjectobserver(baseproxyobserver):
     """Logs file object activity."""
-    def __init__(self, fh, name, reads=True, writes=True, logdata=False,
-                 logdataapis=True):
+
+    def __init__(
+        self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
+    ):
         self.fh = fh
         self.name = name
         self.logdata = logdata
@@ -763,9 +838,9 @@
             return
         # Python 3 can return None from reads at EOF instead of empty strings.
         if res is None:
-            res = ''
-
-        if size == -1 and res == '':
+            res = b''
+
+        if size == -1 and res == b'':
             # Suppress pointless read(-1) calls that return
             # nothing. These happen _a lot_ on Python 3, and there
             # doesn't seem to be a better workaround to have matching
@@ -773,7 +848,7 @@
             return
 
         if self.logdataapis:
-            self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
+            self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
 
         self._writedata(res)
 
@@ -782,7 +857,7 @@
             return
 
         if self.logdataapis:
-            self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
+            self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
 
         self._writedata(res)
 
@@ -791,8 +866,9 @@
             return
 
         if self.logdataapis:
-            self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
-                                                      res))
+            self.fh.write(
+                b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
+            )
 
         data = dest[0:res] if res is not None else b''
 
@@ -813,7 +889,7 @@
             res = len(data)
 
         if self.logdataapis:
-            self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
+            self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
 
         self._writedata(data)
 
@@ -821,7 +897,7 @@
         if not self.writes:
             return
 
-        self.fh.write('%s> flush() -> %r\n' % (self.name, res))
+        self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
 
     # For observedbufferedinputpipe.
     def bufferedread(self, res, size):
@@ -829,8 +905,9 @@
             return
 
         if self.logdataapis:
-            self.fh.write('%s> bufferedread(%d) -> %d' % (
-                self.name, size, len(res)))
+            self.fh.write(
+                b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
+            )
 
         self._writedata(res)
 
@@ -839,23 +916,42 @@
             return
 
         if self.logdataapis:
-            self.fh.write('%s> bufferedreadline() -> %d' % (
-                self.name, len(res)))
+            self.fh.write(
+                b'%s> bufferedreadline() -> %d' % (self.name, len(res))
+            )
 
         self._writedata(res)
 
-def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
-                          logdata=False, logdataapis=True):
+
+def makeloggingfileobject(
+    logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
+):
     """Turn a file object into a logging file object."""
 
-    observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
-                                  logdata=logdata, logdataapis=logdataapis)
+    observer = fileobjectobserver(
+        logh,
+        name,
+        reads=reads,
+        writes=writes,
+        logdata=logdata,
+        logdataapis=logdataapis,
+    )
     return fileobjectproxy(fh, observer)
 
+
 class socketobserver(baseproxyobserver):
     """Logs socket activity."""
-    def __init__(self, fh, name, reads=True, writes=True, states=True,
-                 logdata=False, logdataapis=True):
+
+    def __init__(
+        self,
+        fh,
+        name,
+        reads=True,
+        writes=True,
+        states=True,
+        logdata=False,
+        logdataapis=True,
+    ):
         self.fh = fh
         self.name = name
         self.reads = reads
@@ -868,16 +964,16 @@
         if not self.states:
             return
 
-        self.fh.write('%s> makefile(%r, %r)\n' % (
-            self.name, mode, bufsize))
+        self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
 
     def recv(self, res, size, flags=0):
         if not self.reads:
             return
 
         if self.logdataapis:
-            self.fh.write('%s> recv(%d, %d) -> %d' % (
-                self.name, size, flags, len(res)))
+            self.fh.write(
+                b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
+            )
         self._writedata(res)
 
     def recvfrom(self, res, size, flags=0):
@@ -885,8 +981,10 @@
             return
 
         if self.logdataapis:
-            self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
-                self.name, size, flags, len(res[0])))
+            self.fh.write(
+                b'%s> recvfrom(%d, %d) -> %d'
+                % (self.name, size, flags, len(res[0]))
+            )
 
         self._writedata(res[0])
 
@@ -895,18 +993,21 @@
             return
 
         if self.logdataapis:
-            self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
-                self.name, size, flags, res[0]))
-
-        self._writedata(buf[0:res[0]])
+            self.fh.write(
+                b'%s> recvfrom_into(%d, %d) -> %d'
+                % (self.name, size, flags, res[0])
+            )
+
+        self._writedata(buf[0 : res[0]])
 
     def recv_into(self, res, buf, size=0, flags=0):
         if not self.reads:
             return
 
         if self.logdataapis:
-            self.fh.write('%s> recv_into(%d, %d) -> %d' % (
-                self.name, size, flags, res))
+            self.fh.write(
+                b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
+            )
 
         self._writedata(buf[0:res])
 
@@ -914,8 +1015,9 @@
         if not self.writes:
             return
 
-        self.fh.write('%s> send(%d, %d) -> %d' % (
-            self.name, len(data), flags, len(res)))
+        self.fh.write(
+            b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
+        )
         self._writedata(data)
 
     def sendall(self, res, data, flags=0):
@@ -924,8 +1026,9 @@
 
         if self.logdataapis:
             # Returns None on success. So don't bother reporting return value.
-            self.fh.write('%s> sendall(%d, %d)' % (
-                self.name, len(data), flags))
+            self.fh.write(
+                b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
+            )
 
         self._writedata(data)
 
@@ -939,8 +1042,10 @@
             flags = 0
 
         if self.logdataapis:
-            self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
-                self.name, len(data), flags, address, res))
+            self.fh.write(
+                b'%s> sendto(%d, %d, %r) -> %d'
+                % (self.name, len(data), flags, address, res)
+            )
 
         self._writedata(data)
 
@@ -948,43 +1053,63 @@
         if not self.states:
             return
 
-        self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
+        self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
 
     def settimeout(self, res, value):
         if not self.states:
             return
 
-        self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
+        self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
 
     def gettimeout(self, res):
         if not self.states:
             return
 
-        self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
+        self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
 
     def setsockopt(self, res, level, optname, value):
         if not self.states:
             return
 
-        self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
-            self.name, level, optname, value, res))
-
-def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
-                      logdata=False, logdataapis=True):
+        self.fh.write(
+            b'%s> setsockopt(%r, %r, %r) -> %r\n'
+            % (self.name, level, optname, value, res)
+        )
+
+
+def makeloggingsocket(
+    logh,
+    fh,
+    name,
+    reads=True,
+    writes=True,
+    states=True,
+    logdata=False,
+    logdataapis=True,
+):
     """Turn a socket into a logging socket."""
 
-    observer = socketobserver(logh, name, reads=reads, writes=writes,
-                              states=states, logdata=logdata,
-                              logdataapis=logdataapis)
+    observer = socketobserver(
+        logh,
+        name,
+        reads=reads,
+        writes=writes,
+        states=states,
+        logdata=logdata,
+        logdataapis=logdataapis,
+    )
     return socketproxy(fh, observer)
 
+
 def version():
     """Return version information if available."""
     try:
         from . import __version__
+
         return __version__.version
     except ImportError:
-        return 'unknown'
+        return b'unknown'
+
 
 def versiontuple(v=None, n=4):
     """Parses a Mercurial version string into an N-tuple.
@@ -1045,14 +1170,14 @@
         v = version()
     m = remod.match(br'(\d+(?:\.\d+){,2})[\+-]?(.*)', v)
     if not m:
-        vparts, extra = '', v
+        vparts, extra = b'', v
     elif m.group(2):
         vparts, extra = m.groups()
     else:
         vparts, extra = m.group(1), None
 
     vints = []
-    for i in vparts.split('.'):
+    for i in vparts.split(b'.'):
         try:
             vints.append(int(i))
         except ValueError:
@@ -1068,15 +1193,18 @@
     if n == 4:
         return (vints[0], vints[1], vints[2], extra)
 
+
 def cachefunc(func):
     '''cache the result of function calls'''
     # XXX doesn't handle keywords args
     if func.__code__.co_argcount == 0:
         cache = []
+
         def f():
             if len(cache) == 0:
                 cache.append(func())
             return cache[0]
+
         return f
     cache = {}
     if func.__code__.co_argcount == 1:
@@ -1086,7 +1214,9 @@
             if arg not in cache:
                 cache[arg] = func(arg)
             return cache[arg]
+
     else:
+
         def f(*args):
             if args not in cache:
                 cache[args] = func(*args)
@@ -1094,6 +1224,7 @@
 
     return f
 
+
 class cow(object):
     """helper class to make copy-on-write easier
 
@@ -1112,6 +1243,7 @@
         self._copied = getattr(self, '_copied', 0) + 1
         return self
 
+
 class sortdict(collections.OrderedDict):
     '''a simple sorted dictionary
 
@@ -1133,10 +1265,11 @@
         # __setitem__() isn't called as of PyPy 5.8.0
         def update(self, src):
             if isinstance(src, dict):
-                src = src.iteritems()
+                src = pycompat.iteritems(src)
             for k, v in src:
                 self[k] = v
 
+
 class cowdict(cow, dict):
     """copy-on-write dict
 
@@ -1163,14 +1296,17 @@
     True
     """
 
+
 class cowsortdict(cow, sortdict):
     """copy-on-write sortdict
 
     Be sure to call d = d.preparewrite() before writing to d.
     """
 
+
 class transactional(object):
     """Base class for making a transactional type into a context manager."""
+
     __metaclass__ = abc.ABCMeta
 
     @abc.abstractmethod
@@ -1194,6 +1330,7 @@
         finally:
             self.release()
 
+
 @contextlib.contextmanager
 def acceptintervention(tr=None):
     """A context manager that closes the transaction on InterventionRequired
@@ -1212,16 +1349,19 @@
     finally:
         tr.release()
 
+
 @contextlib.contextmanager
 def nullcontextmanager():
     yield
 
+
 class _lrucachenode(object):
     """A node in a doubly linked list.
 
     Holds a reference to nodes on either side as well as a key-value
     pair for the dictionary entry.
     """
+
     __slots__ = (r'next', r'prev', r'key', r'value', r'cost')
 
     def __init__(self):
@@ -1238,6 +1378,7 @@
         self.value = None
         self.cost = 0
 
+
 class lrucachedict(object):
     """Dict that caches most recent accesses and sets.
 
@@ -1260,6 +1401,7 @@
     to e.g. set a max memory limit and associate an estimated bytes size
     cost to each item in the cache. By default, no maximum cost is enforced.
     """
+
     def __init__(self, max, maxcost=0):
         self._cache = {}
 
@@ -1530,11 +1672,13 @@
             n.markempty()
             n = n.prev
 
+
 def lrucachefunc(func):
     '''cache most recent results of function calls'''
     cache = {}
     order = collections.deque()
     if func.__code__.co_argcount == 1:
+
         def f(arg):
             if arg not in cache:
                 if len(cache) > 20:
@@ -1544,7 +1688,9 @@
                 order.remove(arg)
             order.append(arg)
             return cache[arg]
+
     else:
+
         def f(*args):
             if args not in cache:
                 if len(cache) > 20:
@@ -1557,10 +1703,12 @@
 
     return f
 
+
 class propertycache(object):
     def __init__(self, func):
         self.func = func
         self.name = func.__name__
+
     def __get__(self, obj, type=None):
         result = self.func(obj)
         self.cachevalue(obj, result)
@@ -1570,15 +1718,18 @@
         # __dict__ assignment required to bypass __setattr__ (eg: repoview)
         obj.__dict__[self.name] = value
 
+
 def clearcachedproperty(obj, prop):
     '''clear a cached property value, if one has been set'''
     prop = pycompat.sysstr(prop)
     if prop in obj.__dict__:
         del obj.__dict__[prop]
 
+
 def increasingchunks(source, min=1024, max=65536):
     '''return no less than min bytes per chunk while data remains,
     doubling min after each chunk until it reaches max'''
+
     def log2(x):
         if not x:
             return 0
@@ -1601,18 +1752,21 @@
                     min = nmin
                 if min > max:
                     min = max
-            yield ''.join(buf)
+            yield b''.join(buf)
             blen = 0
             buf = []
     if buf:
-        yield ''.join(buf)
+        yield b''.join(buf)
+
 
 def always(fn):
     return True
 
+
 def never(fn):
     return False
 
+
 def nogc(func):
     """disable garbage collector
 
@@ -1626,6 +1780,7 @@
     This garbage collector issue have been fixed in 2.7. But it still affect
     CPython's performance.
     """
+
     def wrapper(*args, **kwargs):
         gcenabled = gc.isenabled()
         gc.disable()
@@ -1634,12 +1789,15 @@
         finally:
             if gcenabled:
                 gc.enable()
+
     return wrapper
 
+
 if pycompat.ispypy:
     # PyPy runs slower with gc disabled
     nogc = lambda x: x
 
+
 def pathto(root, n1, n2):
     '''return the relative path from one place to another.
     root should use os.sep to separate directories
@@ -1656,18 +1814,19 @@
     if os.path.isabs(n1):
         if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
             return os.path.join(root, localpath(n2))
-        n2 = '/'.join((pconvert(root), n2))
-    a, b = splitpath(n1), n2.split('/')
+        n2 = b'/'.join((pconvert(root), n2))
+    a, b = splitpath(n1), n2.split(b'/')
     a.reverse()
     b.reverse()
     while a and b and a[-1] == b[-1]:
         a.pop()
         b.pop()
     b.reverse()
-    return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
+    return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
+
 
 # the location of data files matching the source code
-if procutil.mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
+if procutil.mainfrozen() and getattr(sys, 'frozen', None) != b'macosx_app':
     # executable version (py2exe) doesn't support __file__
     datapath = os.path.dirname(pycompat.sysexecutable)
 else:
@@ -1675,8 +1834,10 @@
 
 i18n.setdatapath(datapath)
 
+
 def checksignature(func):
     '''wrap a function with code to check for calling errors'''
+
     def check(*args, **kwargs):
         try:
             return func(*args, **kwargs)
@@ -1687,23 +1848,25 @@
 
     return check
 
+
 # a whilelist of known filesystems where hardlink works reliably
 _hardlinkfswhitelist = {
-    'apfs',
-    'btrfs',
-    'ext2',
-    'ext3',
-    'ext4',
-    'hfs',
-    'jfs',
-    'NTFS',
-    'reiserfs',
-    'tmpfs',
-    'ufs',
-    'xfs',
-    'zfs',
+    b'apfs',
+    b'btrfs',
+    b'ext2',
+    b'ext3',
+    b'ext4',
+    b'hfs',
+    b'jfs',
+    b'NTFS',
+    b'reiserfs',
+    b'tmpfs',
+    b'ufs',
+    b'xfs',
+    b'zfs',
 }
 
+
 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
     '''copy a file, preserving mode and optionally other stat info like
     atime/mtime
@@ -1734,7 +1897,7 @@
             oslink(src, dest)
             return
         except (IOError, OSError):
-            pass # fall back to normal copy
+            pass  # fall back to normal copy
     if os.path.islink(src):
         os.symlink(os.readlink(src), dest)
         # copytime is ignored for symlinks, but in general copytime isn't needed
@@ -1752,23 +1915,26 @@
                     if newstat.isambig(oldstat):
                         # stat of copied file is ambiguous to original one
                         advanced = (
-                            oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
+                            oldstat.stat[stat.ST_MTIME] + 1
+                        ) & 0x7FFFFFFF
                         os.utime(dest, (advanced, advanced))
         except shutil.Error as inst:
             raise error.Abort(str(inst))
 
+
 def copyfiles(src, dst, hardlink=None, progress=None):
     """Copy a directory tree using hardlinks if possible."""
     num = 0
 
     def settopic():
         if progress:
-            progress.topic = _('linking') if hardlink else _('copying')
+            progress.topic = _(b'linking') if hardlink else _(b'copying')
 
     if os.path.isdir(src):
         if hardlink is None:
-            hardlink = (os.stat(src).st_dev ==
-                        os.stat(os.path.dirname(dst)).st_dev)
+            hardlink = (
+                os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
+            )
         settopic()
         os.mkdir(dst)
         for name, kind in listdir(src):
@@ -1778,8 +1944,10 @@
             num += n
     else:
         if hardlink is None:
-            hardlink = (os.stat(os.path.dirname(src)).st_dev ==
-                        os.stat(os.path.dirname(dst)).st_dev)
+            hardlink = (
+                os.stat(os.path.dirname(src)).st_dev
+                == os.stat(os.path.dirname(dst)).st_dev
+            )
         settopic()
 
         if hardlink:
@@ -1796,12 +1964,34 @@
 
     return hardlink, num
 
+
 _winreservednames = {
-    'con', 'prn', 'aux', 'nul',
-    'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
-    'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
+    b'con',
+    b'prn',
+    b'aux',
+    b'nul',
+    b'com1',
+    b'com2',
+    b'com3',
+    b'com4',
+    b'com5',
+    b'com6',
+    b'com7',
+    b'com8',
+    b'com9',
+    b'lpt1',
+    b'lpt2',
+    b'lpt3',
+    b'lpt4',
+    b'lpt5',
+    b'lpt6',
+    b'lpt7',
+    b'lpt8',
+    b'lpt9',
 }
-_winreservedchars = ':*?"<>|'
+_winreservedchars = b':*?"<>|'
+
+
 def checkwinfilename(path):
     r'''Check that the base-relative path is a valid filename on Windows.
     Returns None if the path is ok, or a UI string describing the problem.
@@ -1826,28 +2016,42 @@
     >>> checkwinfilename(b"foo\\/bar")
     "directory name ends with '\\', which is invalid on Windows"
     '''
-    if path.endswith('\\'):
-        return _("filename ends with '\\', which is invalid on Windows")
-    if '\\/' in path:
-        return _("directory name ends with '\\', which is invalid on Windows")
-    for n in path.replace('\\', '/').split('/'):
+    if path.endswith(b'\\'):
+        return _(b"filename ends with '\\', which is invalid on Windows")
+    if b'\\/' in path:
+        return _(b"directory name ends with '\\', which is invalid on Windows")
+    for n in path.replace(b'\\', b'/').split(b'/'):
         if not n:
             continue
         for c in _filenamebytestr(n):
             if c in _winreservedchars:
-                return _("filename contains '%s', which is reserved "
-                         "on Windows") % c
+                return (
+                    _(
+                        b"filename contains '%s', which is reserved "
+                        b"on Windows"
+                    )
+                    % c
+                )
             if ord(c) <= 31:
-                return _("filename contains '%s', which is invalid "
-                         "on Windows") % stringutil.escapestr(c)
-        base = n.split('.')[0]
+                return _(
+                    b"filename contains '%s', which is invalid on Windows"
+                ) % stringutil.escapestr(c)
+        base = n.split(b'.')[0]
         if base and base.lower() in _winreservednames:
-            return _("filename contains '%s', which is reserved "
-                     "on Windows") % base
+            return (
+                _(b"filename contains '%s', which is reserved on Windows")
+                % base
+            )
         t = n[-1:]
-        if t in '. ' and n not in '..':
-            return _("filename ends with '%s', which is not allowed "
-                     "on Windows") % t
+        if t in b'. ' and n not in b'..':
+            return (
+                _(
+                    b"filename ends with '%s', which is not allowed "
+                    b"on Windows"
+                )
+                % t
+            )
+
 
 if pycompat.iswindows:
     checkosfilename = checkwinfilename
@@ -1859,6 +2063,7 @@
 if safehasattr(time, "perf_counter"):
     timer = time.perf_counter
 
+
 def makelock(info, pathname):
     """Create a lock file atomically if possible
 
@@ -1870,7 +2075,7 @@
     except OSError as why:
         if why.errno == errno.EEXIST:
             raise
-    except AttributeError: # no symlink in os
+    except AttributeError:  # no symlink in os
         pass
 
     flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
@@ -1878,17 +2083,19 @@
     os.write(ld, info)
     os.close(ld)
 
+
 def readlock(pathname):
     try:
         return readlink(pathname)
     except OSError as why:
         if why.errno not in (errno.EINVAL, errno.ENOSYS):
             raise
-    except AttributeError: # no symlink in os
+    except AttributeError:  # no symlink in os
         pass
-    with posixfile(pathname, 'rb') as fp:
+    with posixfile(pathname, b'rb') as fp:
         return fp.read()
 
+
 def fstat(fp):
     '''stat file object that may not have fileno method.'''
     try:
@@ -1896,8 +2103,10 @@
     except AttributeError:
         return os.stat(fp.name)
 
+
 # File system features
 
+
 def fscasesensitive(path):
     """
     Return true if the given path is on a case-sensitive filesystem
@@ -1911,7 +2120,7 @@
     if b == b2:
         b2 = b.lower()
         if b == b2:
-            return True # no evidence against case sensitivity
+            return True  # no evidence against case sensitivity
     p2 = os.path.join(d, b2)
     try:
         s2 = os.lstat(p2)
@@ -1921,18 +2130,21 @@
     except OSError:
         return True
 
+
 try:
     import re2
+
     _re2 = None
 except ImportError:
     _re2 = False
 
+
 class _re(object):
     def _checkre2(self):
         global _re2
         try:
             # check if match works, see issue3964
-            _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
+            _re2 = bool(re2.match(r'\[([^\[]+)\]', b'[ui]'))
         except ImportError:
             _re2 = False
 
@@ -1946,9 +2158,9 @@
             self._checkre2()
         if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
             if flags & remod.IGNORECASE:
-                pat = '(?i)' + pat
+                pat = b'(?i)' + pat
             if flags & remod.MULTILINE:
-                pat = '(?m)' + pat
+                pat = b'(?m)' + pat
             try:
                 return re2.compile(pat)
             except re2.error:
@@ -1970,9 +2182,12 @@
         else:
             return remod.escape
 
+
 re = _re()
 
 _fspathcache = {}
+
+
 def fspath(name, root):
     '''Get name in the case stored in the filesystem
 
@@ -1983,6 +2198,7 @@
 
     The root should be normcase-ed, too.
     '''
+
     def _makefspathcacheentry(dir):
         return dict((normcase(n), n) for n in os.listdir(dir))
 
@@ -1990,7 +2206,7 @@
     if pycompat.osaltsep:
         seps = seps + pycompat.osaltsep
     # Protect backslashes. This gets silly very quickly.
-    seps.replace('\\','\\\\')
+    seps.replace(b'\\', b'\\\\')
     pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
     dir = os.path.normpath(root)
     result = []
@@ -2013,7 +2229,8 @@
         result.append(found or part)
         dir = os.path.join(dir, part)
 
-    return ''.join(result)
+    return b''.join(result)
+
 
 def checknlink(testfile):
     '''check whether hardlink count reporting works properly'''
@@ -2022,10 +2239,13 @@
     # work around issue2543 (or testfile may get lost on Samba shares)
     f1, f2, fp = None, None, None
     try:
-        fd, f1 = pycompat.mkstemp(prefix='.%s-' % os.path.basename(testfile),
-                                  suffix='1~', dir=os.path.dirname(testfile))
+        fd, f1 = pycompat.mkstemp(
+            prefix=b'.%s-' % os.path.basename(testfile),
+            suffix=b'1~',
+            dir=os.path.dirname(testfile),
+        )
         os.close(fd)
-        f2 = '%s2~' % f1[:-2]
+        f2 = b'%s2~' % f1[:-2]
 
         oslink(f1, f2)
         # nlinks() may behave differently for files on Windows shares if
@@ -2044,10 +2264,15 @@
             except OSError:
                 pass
 
+
 def endswithsep(path):
     '''Check path ends with os.sep or os.altsep.'''
-    return (path.endswith(pycompat.ossep)
-            or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
+    return (
+        path.endswith(pycompat.ossep)
+        or pycompat.osaltsep
+        and path.endswith(pycompat.osaltsep)
+    )
+
 
 def splitpath(path):
     '''Split path by os.sep.
@@ -2057,6 +2282,7 @@
     function if need.'''
     return path.split(pycompat.ossep)
 
+
 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
     """Create a temporary file with the same contents from name
 
@@ -2068,7 +2294,7 @@
     Returns the name of the temporary file.
     """
     d, fn = os.path.split(name)
-    fd, temp = pycompat.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
+    fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
     os.close(fd)
     # Temporary files are created with mode 0600, which is usually not
     # what we want.  If the original file already exists, just copy
@@ -2079,19 +2305,19 @@
         return temp
     try:
         try:
-            ifp = posixfile(name, "rb")
+            ifp = posixfile(name, b"rb")
         except IOError as inst:
             if inst.errno == errno.ENOENT:
                 return temp
             if not getattr(inst, 'filename', None):
                 inst.filename = name
             raise
-        ofp = posixfile(temp, "wb")
+        ofp = posixfile(temp, b"wb")
         for chunk in filechunkiter(ifp):
             ofp.write(chunk)
         ifp.close()
         ofp.close()
-    except: # re-raises
+    except:  # re-raises
         try:
             os.unlink(temp)
         except OSError:
@@ -2099,6 +2325,7 @@
         raise
     return temp
 
+
 class filestat(object):
     """help to exactly detect change of a file
 
@@ -2106,6 +2333,7 @@
     exists. Otherwise, it is None. This can avoid preparative
     'exists()' examination on client side of this class.
     """
+
     def __init__(self, stat):
         self.stat = stat
 
@@ -2131,9 +2359,11 @@
             # if ambiguity between stat of new and old file is
             # avoided, comparison of size, ctime and mtime is enough
             # to exactly detect change of a file regardless of platform
-            return (self.stat.st_size == old.stat.st_size and
-                    self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
-                    self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
+            return (
+                self.stat.st_size == old.stat.st_size
+                and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
+                and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
+            )
         except AttributeError:
             pass
         try:
@@ -2172,7 +2402,7 @@
         S[n].mtime", even if size of a file isn't changed.
         """
         try:
-            return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
+            return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
         except AttributeError:
             return False
 
@@ -2187,7 +2417,7 @@
 
         Otherwise, this returns True, as "ambiguity is avoided".
         """
-        advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
+        advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
         try:
             os.utime(path, (advanced, advanced))
         except OSError as inst:
@@ -2201,6 +2431,7 @@
     def __ne__(self, other):
         return not self == other
 
+
 class atomictempfile(object):
     '''writable file object that atomically updates a file
 
@@ -2214,11 +2445,15 @@
     useful only if target file is guarded by any lock (e.g. repo.lock
     or repo.wlock).
     '''
-    def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
-        self.__name = name      # permanent name
-        self._tempname = mktempcopy(name, emptyok=('w' in mode),
-                                    createmode=createmode,
-                                    enforcewritable=('w' in mode))
+
+    def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
+        self.__name = name  # permanent name
+        self._tempname = mktempcopy(
+            name,
+            emptyok=(b'w' in mode),
+            createmode=createmode,
+            enforcewritable=(b'w' in mode),
+        )
 
         self._fp = posixfile(self._tempname, mode)
         self._checkambig = checkambig
@@ -2240,7 +2475,7 @@
                 newstat = filestat.frompath(filename)
                 if newstat.isambig(oldstat):
                     # stat of changed file is ambiguous to original one
-                    advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
+                    advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
                     os.utime(filename, (advanced, advanced))
             else:
                 rename(self._tempname, filename)
@@ -2254,7 +2489,7 @@
             self._fp.close()
 
     def __del__(self):
-        if safehasattr(self, '_fp'): # constructor actually did something
+        if safehasattr(self, '_fp'):  # constructor actually did something
             self.discard()
 
     def __enter__(self):
@@ -2266,6 +2501,7 @@
         else:
             self.close()
 
+
 def unlinkpath(f, ignoremissing=False, rmdir=True):
     """unlink and remove the directory if it is empty"""
     if ignoremissing:
@@ -2279,6 +2515,7 @@
         except OSError:
             pass
 
+
 def tryunlink(f):
     """Attempt to remove a file, ignoring ENOENT errors."""
     try:
@@ -2287,6 +2524,7 @@
         if e.errno != errno.ENOENT:
             raise
 
+
 def makedirs(name, mode=None, notindexed=False):
     """recursive directory creation with parent mode inheritance
 
@@ -2315,27 +2553,32 @@
     if mode is not None:
         os.chmod(name, mode)
 
+
 def readfile(path):
-    with open(path, 'rb') as fp:
+    with open(path, b'rb') as fp:
         return fp.read()
 
+
 def writefile(path, text):
-    with open(path, 'wb') as fp:
+    with open(path, b'wb') as fp:
         fp.write(text)
 
+
 def appendfile(path, text):
-    with open(path, 'ab') as fp:
+    with open(path, b'ab') as fp:
         fp.write(text)
 
+
 class chunkbuffer(object):
     """Allow arbitrary sized chunks of data to be efficiently read from an
     iterator over chunks of arbitrary size."""
 
     def __init__(self, in_iter):
         """in_iter is the iterator that's iterating over the input chunks."""
+
         def splitbig(chunks):
             for chunk in chunks:
-                if len(chunk) > 2**20:
+                if len(chunk) > 2 ** 20:
                     pos = 0
                     while pos < len(chunk):
                         end = pos + 2 ** 18
@@ -2343,6 +2586,7 @@
                         pos = end
                 else:
                     yield chunk
+
         self.iter = splitbig(in_iter)
         self._queue = collections.deque()
         self._chunkoffset = 0
@@ -2353,7 +2597,7 @@
 
         If size parameter is omitted, read everything"""
         if l is None:
-            return ''.join(self.iter)
+            return b''.join(self.iter)
 
         left = l
         buf = []
@@ -2361,7 +2605,7 @@
         while left > 0:
             # refill the queue
             if not queue:
-                target = 2**18
+                target = 2 ** 18
                 for chunk in self.iter:
                     queue.append(chunk)
                     target -= len(chunk)
@@ -2401,11 +2645,12 @@
 
             # Partial chunk needed.
             else:
-                buf.append(chunk[offset:offset + left])
+                buf.append(chunk[offset : offset + left])
                 self._chunkoffset += left
                 left -= chunkremaining
 
-        return ''.join(buf)
+        return b''.join(buf)
+
 
 def filechunkiter(f, size=131072, limit=None):
     """Create a generator that produces the data in the file size
@@ -2428,6 +2673,7 @@
             limit -= len(s)
         yield s
 
+
 class cappedreader(object):
     """A file object proxy that allows reading up to N bytes.
 
@@ -2439,6 +2685,7 @@
     in addition to I/O that is performed by this instance. If there is,
     state tracking will get out of sync and unexpected results will ensue.
     """
+
     def __init__(self, fh, limit):
         """Allow reading up to <limit> bytes from <fh>."""
         self._fh = fh
@@ -2462,9 +2709,10 @@
         if res is None:
             return None
 
-        b[0:len(res)] = res
+        b[0 : len(res)] = res
         return len(res)
 
+
 def unitcountfn(*unittable):
     '''return a function that renders a readable count of some quantity'''
 
@@ -2476,6 +2724,7 @@
 
     return go
 
+
 def processlinerange(fromline, toline):
     """Check that linerange <fromline>:<toline> makes sense and return a
     0-based range.
@@ -2492,23 +2741,25 @@
     ParseError: fromline must be strictly positive
     """
     if toline - fromline < 0:
-        raise error.ParseError(_("line range must be positive"))
+        raise error.ParseError(_(b"line range must be positive"))
     if fromline < 1:
-        raise error.ParseError(_("fromline must be strictly positive"))
+        raise error.ParseError(_(b"fromline must be strictly positive"))
     return fromline - 1, toline
 
+
 bytecount = unitcountfn(
-    (100, 1 << 30, _('%.0f GB')),
-    (10, 1 << 30, _('%.1f GB')),
-    (1, 1 << 30, _('%.2f GB')),
-    (100, 1 << 20, _('%.0f MB')),
-    (10, 1 << 20, _('%.1f MB')),
-    (1, 1 << 20, _('%.2f MB')),
-    (100, 1 << 10, _('%.0f KB')),
-    (10, 1 << 10, _('%.1f KB')),
-    (1, 1 << 10, _('%.2f KB')),
-    (1, 1, _('%.0f bytes')),
-    )
+    (100, 1 << 30, _(b'%.0f GB')),
+    (10, 1 << 30, _(b'%.1f GB')),
+    (1, 1 << 30, _(b'%.2f GB')),
+    (100, 1 << 20, _(b'%.0f MB')),
+    (10, 1 << 20, _(b'%.1f MB')),
+    (1, 1 << 20, _(b'%.2f MB')),
+    (100, 1 << 10, _(b'%.0f KB')),
+    (10, 1 << 10, _(b'%.1f KB')),
+    (1, 1 << 10, _(b'%.2f KB')),
+    (1, 1, _(b'%.0f bytes')),
+)
+
 
 class transformingwriter(object):
     """Writable file wrapper to transform data by function"""
@@ -2526,21 +2777,26 @@
     def write(self, data):
         return self._fp.write(self._encode(data))
 
+
 # Matches a single EOL which can either be a CRLF where repeated CR
 # are removed or a LF. We do not care about old Macintosh files, so a
 # stray CR is an error.
 _eolre = remod.compile(br'\r*\n')
 
+
 def tolf(s):
-    return _eolre.sub('\n', s)
+    return _eolre.sub(b'\n', s)
+
 
 def tocrlf(s):
-    return _eolre.sub('\r\n', s)
+    return _eolre.sub(b'\r\n', s)
+
 
 def _crlfwriter(fp):
     return transformingwriter(fp, tocrlf)
 
-if pycompat.oslinesep == '\r\n':
+
+if pycompat.oslinesep == b'\r\n':
     tonativeeol = tocrlf
     fromnativeeol = tolf
     nativeeolwriter = _crlfwriter
@@ -2549,8 +2805,10 @@
     fromnativeeol = pycompat.identity
     nativeeolwriter = pycompat.identity
 
-if (pyplatform.python_implementation() == 'CPython' and
-    sys.version_info < (3, 0)):
+if pyplatform.python_implementation() == b'CPython' and sys.version_info < (
+    3,
+    0,
+):
     # There is an issue in CPython that some IO methods do not handle EINTR
     # correctly. The following table shows what CPython version (and functions)
     # are affected (buggy: has the EINTR bug, okay: otherwise):
@@ -2578,13 +2836,14 @@
     if sys.version_info >= (2, 7, 4):
         # fp.readline deals with EINTR correctly, use it as a workaround.
         def _safeiterfile(fp):
-            return iter(fp.readline, '')
+            return iter(fp.readline, b'')
+
     else:
         # fp.read* are broken too, manually deal with EINTR in a stupid way.
         # note: this may block longer than necessary because of bufsize.
         def _safeiterfile(fp, bufsize=4096):
             fd = fp.fileno()
-            line = ''
+            line = b''
             while True:
                 try:
                     buf = os.read(fd, bufsize)
@@ -2595,11 +2854,11 @@
                     else:
                         raise
                 line += buf
-                if '\n' in buf:
+                if b'\n' in buf:
                     splitted = line.splitlines(True)
-                    line = ''
+                    line = b''
                     for l in splitted:
-                        if l[-1] == '\n':
+                        if l[-1] == b'\n':
                             yield l
                         else:
                             line = l
@@ -2616,19 +2875,24 @@
             return fp
         else:
             return _safeiterfile(fp)
+
+
 else:
     # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
     def iterfile(fp):
         return fp
 
+
 def iterlines(iterator):
     for chunk in iterator:
         for line in chunk.splitlines():
             yield line
 
+
 def expandpath(path):
     return os.path.expanduser(os.path.expandvars(path))
 
+
 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
     """Return the result of interpolating items in the mapping into string s.
 
@@ -2643,9 +2907,9 @@
     its escaping.
     """
     fn = fn or (lambda s: s)
-    patterns = '|'.join(mapping.keys())
+    patterns = b'|'.join(mapping.keys())
     if escape_prefix:
-        patterns += '|' + prefix
+        patterns += b'|' + prefix
         if len(prefix) > 1:
             prefix_char = prefix[1:]
         else:
@@ -2654,6 +2918,7 @@
     r = remod.compile(br'%s(%s)' % (prefix, patterns))
     return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
 
+
 def getport(port):
     """Return the port for a given network service.
 
@@ -2669,8 +2934,10 @@
     try:
         return socket.getservbyname(pycompat.sysstr(port))
     except socket.error:
-        raise error.Abort(_("no port number associated with service '%s'")
-                          % port)
+        raise error.Abort(
+            _(b"no port number associated with service '%s'") % port
+        )
+
 
 class url(object):
     r"""Reliable URL parser.
@@ -2746,38 +3013,38 @@
     <url scheme: 'http'>
     """
 
-    _safechars = "!~*'()+"
-    _safepchars = "/!~*'()+:\\"
-    _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
+    _safechars = b"!~*'()+"
+    _safepchars = b"/!~*'()+:\\"
+    _matchscheme = remod.compile(b'^[a-zA-Z0-9+.\\-]+:').match
 
     def __init__(self, path, parsequery=True, parsefragment=True):
         # We slowly chomp away at path until we have only the path left
         self.scheme = self.user = self.passwd = self.host = None
         self.port = self.path = self.query = self.fragment = None
         self._localpath = True
-        self._hostport = ''
+        self._hostport = b''
         self._origpath = path
 
-        if parsefragment and '#' in path:
-            path, self.fragment = path.split('#', 1)
+        if parsefragment and b'#' in path:
+            path, self.fragment = path.split(b'#', 1)
 
         # special case for Windows drive letters and UNC paths
-        if hasdriveletter(path) or path.startswith('\\\\'):
+        if hasdriveletter(path) or path.startswith(b'\\\\'):
             self.path = path
             return
 
         # For compatibility reasons, we can't handle bundle paths as
         # normal URLS
-        if path.startswith('bundle:'):
-            self.scheme = 'bundle'
+        if path.startswith(b'bundle:'):
+            self.scheme = b'bundle'
             path = path[7:]
-            if path.startswith('//'):
+            if path.startswith(b'//'):
                 path = path[2:]
             self.path = path
             return
 
         if self._matchscheme(path):
-            parts = path.split(':', 1)
+            parts = path.split(b':', 1)
             if parts[0]:
                 self.scheme, path = parts
                 self._localpath = False
@@ -2785,23 +3052,23 @@
         if not path:
             path = None
             if self._localpath:
-                self.path = ''
+                self.path = b''
                 return
         else:
             if self._localpath:
                 self.path = path
                 return
 
-            if parsequery and '?' in path:
-                path, self.query = path.split('?', 1)
+            if parsequery and b'?' in path:
+                path, self.query = path.split(b'?', 1)
                 if not path:
                     path = None
                 if not self.query:
                     self.query = None
 
             # // is required to specify a host/authority
-            if path and path.startswith('//'):
-                parts = path[2:].split('/', 1)
+            if path and path.startswith(b'//'):
+                parts = path[2:].split(b'/', 1)
                 if len(parts) > 1:
                     self.host, path = parts
                 else:
@@ -2812,32 +3079,41 @@
                     # path of file:///d is /d
                     # path of file:///d:/ is d:/, not /d:/
                     if path and not hasdriveletter(path):
-                        path = '/' + path
-
-            if self.host and '@' in self.host:
-                self.user, self.host = self.host.rsplit('@', 1)
-                if ':' in self.user:
-                    self.user, self.passwd = self.user.split(':', 1)
+                        path = b'/' + path
+
+            if self.host and b'@' in self.host:
+                self.user, self.host = self.host.rsplit(b'@', 1)
+                if b':' in self.user:
+                    self.user, self.passwd = self.user.split(b':', 1)
                 if not self.host:
                     self.host = None
 
             # Don't split on colons in IPv6 addresses without ports
-            if (self.host and ':' in self.host and
-                not (self.host.startswith('[') and self.host.endswith(']'))):
+            if (
+                self.host
+                and b':' in self.host
+                and not (
+                    self.host.startswith(b'[') and self.host.endswith(b']')
+                )
+            ):
                 self._hostport = self.host
-                self.host, self.port = self.host.rsplit(':', 1)
+                self.host, self.port = self.host.rsplit(b':', 1)
                 if not self.host:
                     self.host = None
 
-            if (self.host and self.scheme == 'file' and
-                self.host not in ('localhost', '127.0.0.1', '[::1]')):
-                raise error.Abort(_('file:// URLs can only refer to localhost'))
+            if (
+                self.host
+                and self.scheme == b'file'
+                and self.host not in (b'localhost', b'127.0.0.1', b'[::1]')
+            ):
+                raise error.Abort(
+                    _(b'file:// URLs can only refer to localhost')
+                )
 
         self.path = path
 
         # leave the query string escaped
-        for a in ('user', 'passwd', 'host', 'port',
-                  'path', 'fragment'):
+        for a in (b'user', b'passwd', b'host', b'port', b'path', b'fragment'):
             v = getattr(self, a)
             if v is not None:
                 setattr(self, a, urlreq.unquote(v))
@@ -2845,12 +3121,20 @@
     @encoding.strmethod
     def __repr__(self):
         attrs = []
-        for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
-                  'query', 'fragment'):
+        for a in (
+            b'scheme',
+            b'user',
+            b'passwd',
+            b'host',
+            b'port',
+            b'path',
+            b'query',
+            b'fragment',
+        ):
             v = getattr(self, a)
             if v is not None:
-                attrs.append('%s: %r' % (a, pycompat.bytestr(v)))
-        return '<url %s>' % ', '.join(attrs)
+                attrs.append(b'%s: %r' % (a, pycompat.bytestr(v)))
+        return b'<url %s>' % b', '.join(attrs)
 
     def __bytes__(self):
         r"""Join the URL's components back into a URL string.
@@ -2888,35 +3172,38 @@
         """
         if self._localpath:
             s = self.path
-            if self.scheme == 'bundle':
-                s = 'bundle:' + s
+            if self.scheme == b'bundle':
+                s = b'bundle:' + s
             if self.fragment:
-                s += '#' + self.fragment
+                s += b'#' + self.fragment
             return s
 
-        s = self.scheme + ':'
+        s = self.scheme + b':'
         if self.user or self.passwd or self.host:
-            s += '//'
-        elif self.scheme and (not self.path or self.path.startswith('/')
-                              or hasdriveletter(self.path)):
-            s += '//'
+            s += b'//'
+        elif self.scheme and (
+            not self.path
+            or self.path.startswith(b'/')
+            or hasdriveletter(self.path)
+        ):
+            s += b'//'
             if hasdriveletter(self.path):
-                s += '/'
+                s += b'/'
         if self.user:
             s += urlreq.quote(self.user, safe=self._safechars)
         if self.passwd:
-            s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
+            s += b':' + urlreq.quote(self.passwd, safe=self._safechars)
         if self.user or self.passwd:
-            s += '@'
+            s += b'@'
         if self.host:
-            if not (self.host.startswith('[') and self.host.endswith(']')):
+            if not (self.host.startswith(b'[') and self.host.endswith(b']')):
                 s += urlreq.quote(self.host)
             else:
                 s += self.host
         if self.port:
-            s += ':' + urlreq.quote(self.port)
+            s += b':' + urlreq.quote(self.port)
         if self.host:
-            s += '/'
+            s += b'/'
         if self.path:
             # TODO: similar to the query string, we should not unescape the
             # path when we store it, the path might contain '%2f' = '/',
@@ -2924,9 +3211,9 @@
             s += urlreq.quote(self.path, safe=self._safepchars)
         if self.query:
             # we store the query in escaped form.
-            s += '?' + self.query
+            s += b'?' + self.query
         if self.fragment is not None:
-            s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
+            s += b'#' + urlreq.quote(self.fragment, safe=self._safepchars)
         return s
 
     __str__ = encoding.strmethod(__bytes__)
@@ -2944,47 +3231,54 @@
         # URIs must not contain credentials. The host is passed in the
         # URIs list because Python < 2.4.3 uses only that to search for
         # a password.
-        return (s, (None, (s, self.host),
-                    self.user, self.passwd or ''))
+        return (s, (None, (s, self.host), self.user, self.passwd or b''))
 
     def isabs(self):
-        if self.scheme and self.scheme != 'file':
-            return True # remote URL
+        if self.scheme and self.scheme != b'file':
+            return True  # remote URL
         if hasdriveletter(self.path):
-            return True # absolute for our purposes - can't be joined()
+            return True  # absolute for our purposes - can't be joined()
         if self.path.startswith(br'\\'):
-            return True # Windows UNC path
-        if self.path.startswith('/'):
-            return True # POSIX-style
+            return True  # Windows UNC path
+        if self.path.startswith(b'/'):
+            return True  # POSIX-style
         return False
 
     def localpath(self):
-        if self.scheme == 'file' or self.scheme == 'bundle':
-            path = self.path or '/'
+        if self.scheme == b'file' or self.scheme == b'bundle':
+            path = self.path or b'/'
             # For Windows, we need to promote hosts containing drive
             # letters to paths with drive letters.
             if hasdriveletter(self._hostport):
-                path = self._hostport + '/' + self.path
-            elif (self.host is not None and self.path
-                  and not hasdriveletter(path)):
-                path = '/' + path
+                path = self._hostport + b'/' + self.path
+            elif (
+                self.host is not None and self.path and not hasdriveletter(path)
+            ):
+                path = b'/' + path
             return path
         return self._origpath
 
     def islocal(self):
         '''whether localpath will return something that posixfile can open'''
-        return (not self.scheme or self.scheme == 'file'
-                or self.scheme == 'bundle')
+        return (
+            not self.scheme
+            or self.scheme == b'file'
+            or self.scheme == b'bundle'
+        )
+
 
 def hasscheme(path):
     return bool(url(path).scheme)
 
+
 def hasdriveletter(path):
-    return path and path[1:2] == ':' and path[0:1].isalpha()
+    return path and path[1:2] == b':' and path[0:1].isalpha()
+
 
 def urllocalpath(path):
     return url(path, parsequery=False, parsefragment=False).localpath()
 
+
 def checksafessh(path):
     """check if a path / url is a potentially unsafe ssh exploit (SEC)
 
@@ -2996,38 +3290,43 @@
     Raises an error.Abort when the url is unsafe.
     """
     path = urlreq.unquote(path)
-    if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
-        raise error.Abort(_('potentially unsafe url: %r') %
-                          (pycompat.bytestr(path),))
+    if path.startswith(b'ssh://-') or path.startswith(b'svn+ssh://-'):
+        raise error.Abort(
+            _(b'potentially unsafe url: %r') % (pycompat.bytestr(path),)
+        )
+
 
 def hidepassword(u):
     '''hide user credential in a url string'''
     u = url(u)
     if u.passwd:
-        u.passwd = '***'
+        u.passwd = b'***'
     return bytes(u)
 
+
 def removeauth(u):
     '''remove all authentication information from a url string'''
     u = url(u)
     u.user = u.passwd = None
     return bytes(u)
 
+
 timecount = unitcountfn(
-    (1, 1e3, _('%.0f s')),
-    (100, 1, _('%.1f s')),
-    (10, 1, _('%.2f s')),
-    (1, 1, _('%.3f s')),
-    (100, 0.001, _('%.1f ms')),
-    (10, 0.001, _('%.2f ms')),
-    (1, 0.001, _('%.3f ms')),
-    (100, 0.000001, _('%.1f us')),
-    (10, 0.000001, _('%.2f us')),
-    (1, 0.000001, _('%.3f us')),
-    (100, 0.000000001, _('%.1f ns')),
-    (10, 0.000000001, _('%.2f ns')),
-    (1, 0.000000001, _('%.3f ns')),
-    )
+    (1, 1e3, _(b'%.0f s')),
+    (100, 1, _(b'%.1f s')),
+    (10, 1, _(b'%.2f s')),
+    (1, 1, _(b'%.3f s')),
+    (100, 0.001, _(b'%.1f ms')),
+    (10, 0.001, _(b'%.2f ms')),
+    (1, 0.001, _(b'%.3f ms')),
+    (100, 0.000001, _(b'%.1f us')),
+    (10, 0.000001, _(b'%.2f us')),
+    (1, 0.000001, _(b'%.3f us')),
+    (100, 0.000000001, _(b'%.1f ns')),
+    (10, 0.000000001, _(b'%.2f ns')),
+    (1, 0.000000001, _(b'%.3f ns')),
+)
+
 
 @attr.s
 class timedcmstats(object):
@@ -3043,10 +3342,11 @@
     level = attr.ib(default=1)
 
     def __bytes__(self):
-        return timecount(self.elapsed) if self.elapsed else '<unknown>'
+        return timecount(self.elapsed) if self.elapsed else b'<unknown>'
 
     __str__ = encoding.strmethod(__bytes__)
 
+
 @contextlib.contextmanager
 def timedcm(whencefmt, *whenceargs):
     """A context manager that produces timing information for a given context.
@@ -3066,8 +3366,10 @@
         timing_stats.elapsed = timer() - timing_stats.start
         timedcm._nested -= 1
 
+
 timedcm._nested = 0
 
+
 def timed(func):
     '''Report the execution time of a function call to stderr.
 
@@ -3083,14 +3385,29 @@
         with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
             result = func(*args, **kwargs)
         stderr = procutil.stderr
-        stderr.write('%s%s: %s\n' % (
-            ' ' * time_stats.level * 2, pycompat.bytestr(func.__name__),
-            time_stats))
+        stderr.write(
+            b'%s%s: %s\n'
+            % (
+                b' ' * time_stats.level * 2,
+                pycompat.bytestr(func.__name__),
+                time_stats,
+            )
+        )
         return result
+
     return wrapper
 
-_sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
-              ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
+
+_sizeunits = (
+    (b'm', 2 ** 20),
+    (b'k', 2 ** 10),
+    (b'g', 2 ** 30),
+    (b'kb', 2 ** 10),
+    (b'mb', 2 ** 20),
+    (b'gb', 2 ** 30),
+    (b'b', 1),
+)
+
 
 def sizetoint(s):
     '''Convert a space specifier to a byte count.
@@ -3106,10 +3423,11 @@
     try:
         for k, u in _sizeunits:
             if t.endswith(k):
-                return int(float(t[:-len(k)]) * u)
+                return int(float(t[: -len(k)]) * u)
         return int(t)
     except ValueError:
-        raise error.ParseError(_("couldn't parse size: %s") % s)
+        raise error.ParseError(_(b"couldn't parse size: %s") % s)
+
 
 class hooks(object):
     '''A collection of hook functions that can be used to extend a
@@ -3129,7 +3447,8 @@
             results.append(hook(*args))
         return results
 
-def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
+
+def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
     '''Yields lines for a nicely formatted stacktrace.
     Skips the 'skip' last entries, then return the last 'depth' entries.
     Each file+linenumber is formatted according to fileline.
@@ -3141,9 +3460,10 @@
 
     Not be used in production code but very convenient while developing.
     '''
-    entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
-        for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
-        ][-depth:]
+    entries = [
+        (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
+        for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
+    ][-depth:]
     if entries:
         fnmax = max(len(entry[0]) for entry in entries)
         for fnln, func in entries:
@@ -3152,8 +3472,14 @@
             else:
                 yield line % (fnmax, fnln, func)
 
-def debugstacktrace(msg='stacktrace', skip=0,
-                    f=procutil.stderr, otherf=procutil.stdout, depth=0):
+
+def debugstacktrace(
+    msg=b'stacktrace',
+    skip=0,
+    f=procutil.stderr,
+    otherf=procutil.stdout,
+    depth=0,
+):
     '''Writes a message to f (stderr) with a nicely formatted stacktrace.
     Skips the 'skip' entries closest to the call, then show 'depth' entries.
     By default it will flush stdout first.
@@ -3162,11 +3488,12 @@
     '''
     if otherf:
         otherf.flush()
-    f.write('%s at:\n' % msg.rstrip())
+    f.write(b'%s at:\n' % msg.rstrip())
     for line in getstackframes(skip + 1, depth=depth):
         f.write(line)
     f.flush()
 
+
 class dirs(object):
     '''a multiset of directory names from a dirstate or manifest'''
 
@@ -3174,12 +3501,13 @@
         self._dirs = {}
         addpath = self.addpath
         if isinstance(map, dict) and skip is not None:
-            for f, s in map.iteritems():
+            for f, s in pycompat.iteritems(map):
                 if s[0] != skip:
                     addpath(f)
         elif skip is not None:
-            raise error.ProgrammingError("skip character is only supported "
-                                         "with a dict source")
+            raise error.ProgrammingError(
+                b"skip character is only supported with a dict source"
+            )
         else:
             for f in map:
                 addpath(f)
@@ -3206,23 +3534,26 @@
     def __contains__(self, d):
         return d in self._dirs
 
+
 if safehasattr(parsers, 'dirs'):
     dirs = parsers.dirs
 
 if rustdirs is not None:
     dirs = rustdirs
 
+
 def finddirs(path):
-    pos = path.rfind('/')
+    pos = path.rfind(b'/')
     while pos != -1:
         yield path[:pos]
-        pos = path.rfind('/', 0, pos)
-    yield ''
+        pos = path.rfind(b'/', 0, pos)
+    yield b''
 
 
 # convenient shortcut
 dst = debugstacktrace
 
+
 def safename(f, tag, ctx, others=None):
     """
     Generate a name that it is safe to rename f to in the given context.
@@ -3238,23 +3569,26 @@
     if others is None:
         others = set()
 
-    fn = '%s~%s' % (f, tag)
+    fn = b'%s~%s' % (f, tag)
     if fn not in ctx and fn not in others:
         return fn
     for n in itertools.count(1):
-        fn = '%s~%s~%s' % (f, tag, n)
+        fn = b'%s~%s~%s' % (f, tag, n)
         if fn not in ctx and fn not in others:
             return fn
 
+
 def readexactly(stream, n):
     '''read n bytes from stream.read and abort if less was available'''
     s = stream.read(n)
     if len(s) < n:
-        raise error.Abort(_("stream ended unexpectedly"
-                           " (got %d bytes, expected %d)")
-                          % (len(s), n))
+        raise error.Abort(
+            _(b"stream ended unexpectedly (got %d bytes, expected %d)")
+            % (len(s), n)
+        )
     return s
 
+
 def uvarintencode(value):
     """Encode an unsigned integer value to a varint.
 
@@ -3279,18 +3613,18 @@
     ProgrammingError: negative value for uvarint: -1
     """
     if value < 0:
-        raise error.ProgrammingError('negative value for uvarint: %d'
-                                     % value)
-    bits = value & 0x7f
+        raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
+    bits = value & 0x7F
     value >>= 7
     bytes = []
     while value:
         bytes.append(pycompat.bytechr(0x80 | bits))
-        bits = value & 0x7f
+        bits = value & 0x7F
         value >>= 7
     bytes.append(pycompat.bytechr(bits))
 
-    return ''.join(bytes)
+    return b''.join(bytes)
+
 
 def uvarintdecodestream(fh):
     """Decode an unsigned variable length integer from a stream.
@@ -3320,7 +3654,7 @@
     shift = 0
     while True:
         byte = ord(readexactly(fh, 1))
-        result |= ((byte & 0x7f) << shift)
+        result |= (byte & 0x7F) << shift
         if not (byte & 0x80):
             return result
         shift += 7
--- a/mercurial/utils/cborutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/utils/cborutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -46,11 +46,14 @@
 
 # Indefinite types begin with their major type ORd with information value 31.
 BEGIN_INDEFINITE_BYTESTRING = struct.pack(
-    r'>B', MAJOR_TYPE_BYTESTRING << 5 | SUBTYPE_INDEFINITE)
+    r'>B', MAJOR_TYPE_BYTESTRING << 5 | SUBTYPE_INDEFINITE
+)
 BEGIN_INDEFINITE_ARRAY = struct.pack(
-    r'>B', MAJOR_TYPE_ARRAY << 5 | SUBTYPE_INDEFINITE)
+    r'>B', MAJOR_TYPE_ARRAY << 5 | SUBTYPE_INDEFINITE
+)
 BEGIN_INDEFINITE_MAP = struct.pack(
-    r'>B', MAJOR_TYPE_MAP << 5 | SUBTYPE_INDEFINITE)
+    r'>B', MAJOR_TYPE_MAP << 5 | SUBTYPE_INDEFINITE
+)
 
 ENCODED_LENGTH_1 = struct.Struct(r'>B')
 ENCODED_LENGTH_2 = struct.Struct(r'>BB')
@@ -62,6 +65,7 @@
 BREAK = b'\xff'
 BREAK_INT = 255
 
+
 def encodelength(majortype, length):
     """Obtain a value encoding the major type and its length."""
     if length < 24:
@@ -75,10 +79,12 @@
     else:
         return ENCODED_LENGTH_5.pack(majortype << 5 | 27, length)
 
+
 def streamencodebytestring(v):
     yield encodelength(MAJOR_TYPE_BYTESTRING, len(v))
     yield v
 
+
 def streamencodebytestringfromiter(it):
     """Convert an iterator of chunks to an indefinite bytestring.
 
@@ -93,6 +99,7 @@
 
     yield BREAK
 
+
 def streamencodeindefinitebytestring(source, chunksize=65536):
     """Given a large source buffer, emit as an indefinite length bytestring.
 
@@ -104,7 +111,7 @@
     l = len(source)
 
     while True:
-        chunk = source[i:i + chunksize]
+        chunk = source[i : i + chunksize]
         i += len(chunk)
 
         yield encodelength(MAJOR_TYPE_BYTESTRING, len(chunk))
@@ -115,15 +122,17 @@
 
     yield BREAK
 
+
 def streamencodeint(v):
     if v >= 18446744073709551616 or v < -18446744073709551616:
-        raise ValueError('big integers not supported')
+        raise ValueError(b'big integers not supported')
 
     if v >= 0:
         yield encodelength(MAJOR_TYPE_UINT, v)
     else:
         yield encodelength(MAJOR_TYPE_NEGINT, abs(v) - 1)
 
+
 def streamencodearray(l):
     """Encode a known size iterable to an array."""
 
@@ -133,6 +142,7 @@
         for chunk in streamencode(i):
             yield chunk
 
+
 def streamencodearrayfromiter(it):
     """Encode an iterator of items to an indefinite length array."""
 
@@ -144,9 +154,11 @@
 
     yield BREAK
 
+
 def _mixedtypesortkey(v):
     return type(v).__name__, v
 
+
 def streamencodeset(s):
     # https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml defines
     # semantic tag 258 for finite sets.
@@ -155,6 +167,7 @@
     for chunk in streamencodearray(sorted(s, key=_mixedtypesortkey)):
         yield chunk
 
+
 def streamencodemap(d):
     """Encode dictionary to a generator.
 
@@ -162,13 +175,15 @@
     """
     yield encodelength(MAJOR_TYPE_MAP, len(d))
 
-    for key, value in sorted(d.iteritems(),
-                             key=lambda x: _mixedtypesortkey(x[0])):
+    for key, value in sorted(
+        pycompat.iteritems(d), key=lambda x: _mixedtypesortkey(x[0])
+    ):
         for chunk in streamencode(key):
             yield chunk
         for chunk in streamencode(value):
             yield chunk
 
+
 def streamencodemapfromiter(it):
     """Given an iterable of (key, value), encode to an indefinite length map."""
     yield BEGIN_INDEFINITE_MAP
@@ -181,14 +196,17 @@
 
     yield BREAK
 
+
 def streamencodebool(b):
     # major type 7, simple value 20 and 21.
     yield b'\xf5' if b else b'\xf4'
 
+
 def streamencodenone(v):
     # major type 7, simple value 22.
     yield b'\xf6'
 
+
 STREAM_ENCODERS = {
     bytes: streamencodebytestring,
     int: streamencodeint,
@@ -201,6 +219,7 @@
     type(None): streamencodenone,
 }
 
+
 def streamencode(v):
     """Encode a value in a streaming manner.
 
@@ -222,24 +241,31 @@
             break
 
     if not fn:
-        raise ValueError('do not know how to encode %s' % type(v))
+        raise ValueError(b'do not know how to encode %s' % type(v))
 
     return fn(v)
 
+
 class CBORDecodeError(Exception):
     """Represents an error decoding CBOR."""
 
+
 if sys.version_info.major >= 3:
+
     def _elementtointeger(b, i):
         return b[i]
+
+
 else:
+
     def _elementtointeger(b, i):
         return ord(b[i])
 
+
 STRUCT_BIG_UBYTE = struct.Struct(r'>B')
-STRUCT_BIG_USHORT = struct.Struct('>H')
-STRUCT_BIG_ULONG = struct.Struct('>L')
-STRUCT_BIG_ULONGLONG = struct.Struct('>Q')
+STRUCT_BIG_USHORT = struct.Struct(b'>H')
+STRUCT_BIG_ULONG = struct.Struct(b'>L')
+STRUCT_BIG_ULONGLONG = struct.Struct(b'>Q')
 
 SPECIAL_NONE = 0
 SPECIAL_START_INDEFINITE_BYTESTRING = 1
@@ -248,6 +274,7 @@
 SPECIAL_START_SET = 4
 SPECIAL_INDEFINITE_BREAK = 5
 
+
 def decodeitem(b, offset=0):
     """Decode a new CBOR value from a buffer at offset.
 
@@ -301,8 +328,9 @@
     elif majortype == MAJOR_TYPE_BYTESTRING:
         # Beginning of bytestrings are treated as uints in order to
         # decode their length, which may be indefinite.
-        complete, size, readcount = decodeuint(subtype, b, offset,
-                                               allowindefinite=True)
+        complete, size, readcount = decodeuint(
+            subtype, b, offset, allowindefinite=True
+        )
 
         # We don't know the size of the bytestring. It must be a definitive
         # length since the indefinite subtype would be encoded in the initial
@@ -314,7 +342,7 @@
         if size is not None:
             # And the data is available in the buffer.
             if offset + readcount + size <= len(b):
-                value = b[offset + readcount:offset + readcount + size]
+                value = b[offset + readcount : offset + readcount + size]
                 return True, value, readcount + size + 1, SPECIAL_NONE
 
             # And we need more data in order to return the bytestring.
@@ -327,7 +355,7 @@
             return True, None, 1, SPECIAL_START_INDEFINITE_BYTESTRING
 
     elif majortype == MAJOR_TYPE_STRING:
-        raise CBORDecodeError('string major type not supported')
+        raise CBORDecodeError(b'string major type not supported')
 
     elif majortype == MAJOR_TYPE_ARRAY:
         # Beginning of arrays are treated as uints in order to decode their
@@ -367,20 +395,22 @@
             if offset + readcount >= len(b):
                 return False, None, -1, SPECIAL_NONE
 
-            complete, size, readcount2, special = decodeitem(b,
-                                                             offset + readcount)
+            complete, size, readcount2, special = decodeitem(
+                b, offset + readcount
+            )
 
             if not complete:
                 return False, None, readcount2, SPECIAL_NONE
 
             if special != SPECIAL_START_ARRAY:
-                raise CBORDecodeError('expected array after finite set '
-                                      'semantic tag')
+                raise CBORDecodeError(
+                    b'expected array after finite set semantic tag'
+                )
 
             return True, size, readcount + readcount2 + 1, SPECIAL_START_SET
 
         else:
-            raise CBORDecodeError('semantic tag %d not allowed' % tagvalue)
+            raise CBORDecodeError(b'semantic tag %d not allowed' % tagvalue)
 
     elif majortype == MAJOR_TYPE_SPECIAL:
         # Only specific values for the information field are allowed.
@@ -394,10 +424,11 @@
             return True, None, 1, SPECIAL_INDEFINITE_BREAK
         # If value is 24, subtype is in next byte.
         else:
-            raise CBORDecodeError('special type %d not allowed' % subtype)
+            raise CBORDecodeError(b'special type %d not allowed' % subtype)
     else:
         assert False
 
+
 def decodeuint(subtype, b, offset=0, allowindefinite=False):
     """Decode an unsigned integer.
 
@@ -426,10 +457,11 @@
         if allowindefinite:
             return True, None, 0
         else:
-            raise CBORDecodeError('indefinite length uint not allowed here')
+            raise CBORDecodeError(b'indefinite length uint not allowed here')
     elif subtype >= 28:
-        raise CBORDecodeError('unsupported subtype on integer type: %d' %
-                              subtype)
+        raise CBORDecodeError(
+            b'unsupported subtype on integer type: %d' % subtype
+        )
 
     if subtype == 24:
         s = STRUCT_BIG_UBYTE
@@ -440,13 +472,14 @@
     elif subtype == 27:
         s = STRUCT_BIG_ULONGLONG
     else:
-        raise CBORDecodeError('bounds condition checking violation')
+        raise CBORDecodeError(b'bounds condition checking violation')
 
     if len(b) - offset >= s.size:
         return True, s.unpack_from(b, offset)[0], s.size
     else:
         return False, None, len(b) - offset - s.size
 
+
 class bytestringchunk(bytes):
     """Represents a chunk/segment in an indefinite length bytestring.
 
@@ -462,6 +495,7 @@
 
         return self
 
+
 class sansiodecoder(object):
     """A CBOR decoder that doesn't perform its own I/O.
 
@@ -606,40 +640,38 @@
                     self._decodedvalues.append(value)
 
                 elif special == SPECIAL_START_ARRAY:
-                    self._collectionstack.append({
-                        'remaining': value,
-                        'v': [],
-                    })
+                    self._collectionstack.append(
+                        {b'remaining': value, b'v': [],}
+                    )
                     self._state = self._STATE_WANT_ARRAY_VALUE
 
                 elif special == SPECIAL_START_MAP:
-                    self._collectionstack.append({
-                        'remaining': value,
-                        'v': {},
-                    })
+                    self._collectionstack.append(
+                        {b'remaining': value, b'v': {},}
+                    )
                     self._state = self._STATE_WANT_MAP_KEY
 
                 elif special == SPECIAL_START_SET:
-                    self._collectionstack.append({
-                        'remaining': value,
-                        'v': set(),
-                    })
+                    self._collectionstack.append(
+                        {b'remaining': value, b'v': set(),}
+                    )
                     self._state = self._STATE_WANT_SET_VALUE
 
                 elif special == SPECIAL_START_INDEFINITE_BYTESTRING:
                     self._state = self._STATE_WANT_BYTESTRING_CHUNK_FIRST
 
                 else:
-                    raise CBORDecodeError('unhandled special state: %d' %
-                                          special)
+                    raise CBORDecodeError(
+                        b'unhandled special state: %d' % special
+                    )
 
             # This value becomes an element of the current array.
             elif self._state == self._STATE_WANT_ARRAY_VALUE:
                 # Simple values get appended.
                 if special == SPECIAL_NONE:
                     c = self._collectionstack[-1]
-                    c['v'].append(value)
-                    c['remaining'] -= 1
+                    c[b'v'].append(value)
+                    c[b'remaining'] -= 1
 
                     # self._state doesn't need changed.
 
@@ -648,13 +680,12 @@
                     lastc = self._collectionstack[-1]
                     newvalue = []
 
-                    lastc['v'].append(newvalue)
-                    lastc['remaining'] -= 1
+                    lastc[b'v'].append(newvalue)
+                    lastc[b'remaining'] -= 1
 
-                    self._collectionstack.append({
-                        'remaining': value,
-                        'v': newvalue,
-                    })
+                    self._collectionstack.append(
+                        {b'remaining': value, b'v': newvalue,}
+                    )
 
                     # self._state doesn't need changed.
 
@@ -663,13 +694,12 @@
                     lastc = self._collectionstack[-1]
                     newvalue = {}
 
-                    lastc['v'].append(newvalue)
-                    lastc['remaining'] -= 1
+                    lastc[b'v'].append(newvalue)
+                    lastc[b'remaining'] -= 1
 
-                    self._collectionstack.append({
-                        'remaining': value,
-                        'v': newvalue
-                    })
+                    self._collectionstack.append(
+                        {b'remaining': value, b'v': newvalue}
+                    )
 
                     self._state = self._STATE_WANT_MAP_KEY
 
@@ -677,23 +707,26 @@
                     lastc = self._collectionstack[-1]
                     newvalue = set()
 
-                    lastc['v'].append(newvalue)
-                    lastc['remaining'] -= 1
+                    lastc[b'v'].append(newvalue)
+                    lastc[b'remaining'] -= 1
 
-                    self._collectionstack.append({
-                        'remaining': value,
-                        'v': newvalue,
-                    })
+                    self._collectionstack.append(
+                        {b'remaining': value, b'v': newvalue,}
+                    )
 
                     self._state = self._STATE_WANT_SET_VALUE
 
                 elif special == SPECIAL_START_INDEFINITE_BYTESTRING:
-                    raise CBORDecodeError('indefinite length bytestrings '
-                                          'not allowed as array values')
+                    raise CBORDecodeError(
+                        b'indefinite length bytestrings '
+                        b'not allowed as array values'
+                    )
 
                 else:
-                    raise CBORDecodeError('unhandled special item when '
-                                          'expecting array value: %d' % special)
+                    raise CBORDecodeError(
+                        b'unhandled special item when '
+                        b'expecting array value: %d' % special
+                    )
 
             # This value becomes the key of the current map instance.
             elif self._state == self._STATE_WANT_MAP_KEY:
@@ -702,26 +735,34 @@
                     self._state = self._STATE_WANT_MAP_VALUE
 
                 elif special == SPECIAL_START_INDEFINITE_BYTESTRING:
-                    raise CBORDecodeError('indefinite length bytestrings '
-                                          'not allowed as map keys')
+                    raise CBORDecodeError(
+                        b'indefinite length bytestrings '
+                        b'not allowed as map keys'
+                    )
 
-                elif special in (SPECIAL_START_ARRAY, SPECIAL_START_MAP,
-                                 SPECIAL_START_SET):
-                    raise CBORDecodeError('collections not supported as map '
-                                          'keys')
+                elif special in (
+                    SPECIAL_START_ARRAY,
+                    SPECIAL_START_MAP,
+                    SPECIAL_START_SET,
+                ):
+                    raise CBORDecodeError(
+                        b'collections not supported as map keys'
+                    )
 
                 # We do not allow special values to be used as map keys.
                 else:
-                    raise CBORDecodeError('unhandled special item when '
-                                          'expecting map key: %d' % special)
+                    raise CBORDecodeError(
+                        b'unhandled special item when '
+                        b'expecting map key: %d' % special
+                    )
 
             # This value becomes the value of the current map key.
             elif self._state == self._STATE_WANT_MAP_VALUE:
                 # Simple values simply get inserted into the map.
                 if special == SPECIAL_NONE:
                     lastc = self._collectionstack[-1]
-                    lastc['v'][self._currentmapkey] = value
-                    lastc['remaining'] -= 1
+                    lastc[b'v'][self._currentmapkey] = value
+                    lastc[b'remaining'] -= 1
 
                     self._state = self._STATE_WANT_MAP_KEY
 
@@ -730,13 +771,12 @@
                     lastc = self._collectionstack[-1]
                     newvalue = []
 
-                    lastc['v'][self._currentmapkey] = newvalue
-                    lastc['remaining'] -= 1
+                    lastc[b'v'][self._currentmapkey] = newvalue
+                    lastc[b'remaining'] -= 1
 
-                    self._collectionstack.append({
-                        'remaining': value,
-                        'v': newvalue,
-                    })
+                    self._collectionstack.append(
+                        {b'remaining': value, b'v': newvalue,}
+                    )
 
                     self._state = self._STATE_WANT_ARRAY_VALUE
 
@@ -745,13 +785,12 @@
                     lastc = self._collectionstack[-1]
                     newvalue = {}
 
-                    lastc['v'][self._currentmapkey] = newvalue
-                    lastc['remaining'] -= 1
+                    lastc[b'v'][self._currentmapkey] = newvalue
+                    lastc[b'remaining'] -= 1
 
-                    self._collectionstack.append({
-                        'remaining': value,
-                        'v': newvalue,
-                    })
+                    self._collectionstack.append(
+                        {b'remaining': value, b'v': newvalue,}
+                    )
 
                     self._state = self._STATE_WANT_MAP_KEY
 
@@ -760,23 +799,26 @@
                     lastc = self._collectionstack[-1]
                     newvalue = set()
 
-                    lastc['v'][self._currentmapkey] = newvalue
-                    lastc['remaining'] -= 1
+                    lastc[b'v'][self._currentmapkey] = newvalue
+                    lastc[b'remaining'] -= 1
 
-                    self._collectionstack.append({
-                        'remaining': value,
-                        'v': newvalue,
-                    })
+                    self._collectionstack.append(
+                        {b'remaining': value, b'v': newvalue,}
+                    )
 
                     self._state = self._STATE_WANT_SET_VALUE
 
                 elif special == SPECIAL_START_INDEFINITE_BYTESTRING:
-                    raise CBORDecodeError('indefinite length bytestrings not '
-                                          'allowed as map values')
+                    raise CBORDecodeError(
+                        b'indefinite length bytestrings not '
+                        b'allowed as map values'
+                    )
 
                 else:
-                    raise CBORDecodeError('unhandled special item when '
-                                          'expecting map value: %d' % special)
+                    raise CBORDecodeError(
+                        b'unhandled special item when '
+                        b'expecting map value: %d' % special
+                    )
 
                 self._currentmapkey = None
 
@@ -784,31 +826,39 @@
             elif self._state == self._STATE_WANT_SET_VALUE:
                 if special == SPECIAL_NONE:
                     lastc = self._collectionstack[-1]
-                    lastc['v'].add(value)
-                    lastc['remaining'] -= 1
+                    lastc[b'v'].add(value)
+                    lastc[b'remaining'] -= 1
 
                 elif special == SPECIAL_START_INDEFINITE_BYTESTRING:
-                    raise CBORDecodeError('indefinite length bytestrings not '
-                                          'allowed as set values')
+                    raise CBORDecodeError(
+                        b'indefinite length bytestrings not '
+                        b'allowed as set values'
+                    )
 
-                elif special in (SPECIAL_START_ARRAY,
-                                 SPECIAL_START_MAP,
-                                 SPECIAL_START_SET):
-                    raise CBORDecodeError('collections not allowed as set '
-                                          'values')
+                elif special in (
+                    SPECIAL_START_ARRAY,
+                    SPECIAL_START_MAP,
+                    SPECIAL_START_SET,
+                ):
+                    raise CBORDecodeError(
+                        b'collections not allowed as set values'
+                    )
 
                 # We don't allow non-trivial types to exist as set values.
                 else:
-                    raise CBORDecodeError('unhandled special item when '
-                                          'expecting set value: %d' % special)
+                    raise CBORDecodeError(
+                        b'unhandled special item when '
+                        b'expecting set value: %d' % special
+                    )
 
             # This value represents the first chunk in an indefinite length
             # bytestring.
             elif self._state == self._STATE_WANT_BYTESTRING_CHUNK_FIRST:
                 # We received a full chunk.
                 if special == SPECIAL_NONE:
-                    self._decodedvalues.append(bytestringchunk(value,
-                                                               first=True))
+                    self._decodedvalues.append(
+                        bytestringchunk(value, first=True)
+                    )
 
                     self._state = self._STATE_WANT_BYTESTRING_CHUNK_SUBSEQUENT
 
@@ -818,9 +868,9 @@
                     # We /could/ convert this to a b''. But we want to preserve
                     # the nature of the underlying data so consumers expecting
                     # an indefinite length bytestring get one.
-                    self._decodedvalues.append(bytestringchunk(b'',
-                                                               first=True,
-                                                               last=True))
+                    self._decodedvalues.append(
+                        bytestringchunk(b'', first=True, last=True)
+                    )
 
                     # Since indefinite length bytestrings can't be used in
                     # collections, we must be at the root level.
@@ -828,9 +878,10 @@
                     self._state = self._STATE_NONE
 
                 else:
-                    raise CBORDecodeError('unexpected special value when '
-                                          'expecting bytestring chunk: %d' %
-                                          special)
+                    raise CBORDecodeError(
+                        b'unexpected special value when '
+                        b'expecting bytestring chunk: %d' % special
+                    )
 
             # This value represents the non-initial chunk in an indefinite
             # length bytestring.
@@ -849,27 +900,31 @@
                     self._state = self._STATE_NONE
 
                 else:
-                    raise CBORDecodeError('unexpected special value when '
-                                          'expecting bytestring chunk: %d' %
-                                          special)
+                    raise CBORDecodeError(
+                        b'unexpected special value when '
+                        b'expecting bytestring chunk: %d' % special
+                    )
 
             else:
-                raise CBORDecodeError('unhandled decoder state: %d' %
-                                      self._state)
+                raise CBORDecodeError(
+                    b'unhandled decoder state: %d' % self._state
+                )
 
             # We could have just added the final value in a collection. End
             # all complete collections at the top of the stack.
             while True:
                 # Bail if we're not waiting on a new collection item.
-                if self._state not in (self._STATE_WANT_ARRAY_VALUE,
-                                       self._STATE_WANT_MAP_KEY,
-                                       self._STATE_WANT_SET_VALUE):
+                if self._state not in (
+                    self._STATE_WANT_ARRAY_VALUE,
+                    self._STATE_WANT_MAP_KEY,
+                    self._STATE_WANT_SET_VALUE,
+                ):
                     break
 
                 # Or we are expecting more items for this collection.
                 lastc = self._collectionstack[-1]
 
-                if lastc['remaining']:
+                if lastc[b'remaining']:
                     break
 
                 # The collection at the top of the stack is complete.
@@ -886,11 +941,11 @@
                         list: self._STATE_WANT_ARRAY_VALUE,
                         dict: self._STATE_WANT_MAP_KEY,
                         set: self._STATE_WANT_SET_VALUE,
-                    }[type(self._collectionstack[-1]['v'])]
+                    }[type(self._collectionstack[-1][b'v'])]
 
                 # If this is the root collection, emit it.
                 else:
-                    self._decodedvalues.append(lastc['v'])
+                    self._decodedvalues.append(lastc[b'v'])
                     self._state = self._STATE_NONE
 
         return (
@@ -909,6 +964,7 @@
         self._decodedvalues = []
         return l
 
+
 class bufferingdecoder(object):
     """A CBOR decoder that buffers undecoded input.
 
@@ -919,6 +975,7 @@
     TODO consider adding limits as to the maximum amount of data that can
     be buffered.
     """
+
     def __init__(self):
         self._decoder = sansiodecoder()
         self._chunks = []
@@ -978,6 +1035,7 @@
     def getavailable(self):
         return self._decoder.getavailable()
 
+
 def decodeall(b):
     """Decode all CBOR items present in an iterable of bytes.
 
@@ -995,9 +1053,9 @@
     havevalues, readcount, wantbytes = decoder.decode(b)
 
     if readcount != len(b):
-        raise CBORDecodeError('input data not fully consumed')
+        raise CBORDecodeError(b'input data not fully consumed')
 
     if decoder.inprogress:
-        raise CBORDecodeError('input data not complete')
+        raise CBORDecodeError(b'input data not complete')
 
     return decoder.getavailable()
--- a/mercurial/utils/compression.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/utils/compression.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,14 +10,13 @@
 import collections
 import zlib
 
+from ..pycompat import getattr
 from .. import (
     error,
     i18n,
     pycompat,
 )
-from . import (
-    stringutil,
-)
+from . import stringutil
 
 safehasattr = pycompat.safehasattr
 
@@ -26,17 +25,20 @@
 
 # compression code
 
-SERVERROLE = 'server'
-CLIENTROLE = 'client'
+SERVERROLE = b'server'
+CLIENTROLE = b'client'
 
-compewireprotosupport = collections.namedtuple(r'compenginewireprotosupport',
-                                               (r'name', r'serverpriority',
-                                                r'clientpriority'))
+compewireprotosupport = collections.namedtuple(
+    r'compenginewireprotosupport',
+    (r'name', r'serverpriority', r'clientpriority'),
+)
+
 
 class propertycache(object):
     def __init__(self, func):
         self.func = func
         self.name = func.__name__
+
     def __get__(self, obj, type=None):
         result = self.func(obj)
         self.cachevalue(obj, result)
@@ -46,6 +48,7 @@
         # __dict__ assignment required to bypass __setattr__ (eg: repoview)
         obj.__dict__[self.name] = value
 
+
 class compressormanager(object):
     """Holds registrations of various compression engines.
 
@@ -56,6 +59,7 @@
     Compressors are registered against the global instance by calling its
     ``register()`` method.
     """
+
     def __init__(self):
         self._engines = {}
         # Bundle spec human name to engine name.
@@ -82,24 +86,28 @@
         The argument must be a ``compressionengine`` instance.
         """
         if not isinstance(engine, compressionengine):
-            raise ValueError(_('argument must be a compressionengine'))
+            raise ValueError(_(b'argument must be a compressionengine'))
 
         name = engine.name()
 
         if name in self._engines:
-            raise error.Abort(_('compression engine %s already registered') %
-                              name)
+            raise error.Abort(
+                _(b'compression engine %s already registered') % name
+            )
 
         bundleinfo = engine.bundletype()
         if bundleinfo:
             bundlename, bundletype = bundleinfo
 
             if bundlename in self._bundlenames:
-                raise error.Abort(_('bundle name %s already registered') %
-                                  bundlename)
+                raise error.Abort(
+                    _(b'bundle name %s already registered') % bundlename
+                )
             if bundletype in self._bundletypes:
-                raise error.Abort(_('bundle type %s already registered by %s') %
-                                  (bundletype, self._bundletypes[bundletype]))
+                raise error.Abort(
+                    _(b'bundle type %s already registered by %s')
+                    % (bundletype, self._bundletypes[bundletype])
+                )
 
             # No external facing name declared.
             if bundlename:
@@ -111,16 +119,22 @@
         if wiresupport:
             wiretype = wiresupport.name
             if wiretype in self._wiretypes:
-                raise error.Abort(_('wire protocol compression %s already '
-                                    'registered by %s') %
-                                  (wiretype, self._wiretypes[wiretype]))
+                raise error.Abort(
+                    _(
+                        b'wire protocol compression %s already '
+                        b'registered by %s'
+                    )
+                    % (wiretype, self._wiretypes[wiretype])
+                )
 
             self._wiretypes[wiretype] = name
 
         revlogheader = engine.revlogheader()
         if revlogheader and revlogheader in self._revlogheaders:
-            raise error.Abort(_('revlog header %s already registered by %s') %
-                              (revlogheader, self._revlogheaders[revlogheader]))
+            raise error.Abort(
+                _(b'revlog header %s already registered by %s')
+                % (revlogheader, self._revlogheaders[revlogheader])
+            )
 
         if revlogheader:
             self._revlogheaders[revlogheader] = name
@@ -144,8 +158,9 @@
         """
         engine = self._engines[self._bundlenames[bundlename]]
         if not engine.available():
-            raise error.Abort(_('compression engine %s could not be loaded') %
-                              engine.name())
+            raise error.Abort(
+                _(b'compression engine %s could not be loaded') % engine.name()
+            )
         return engine
 
     def forbundletype(self, bundletype):
@@ -157,8 +172,9 @@
         """
         engine = self._engines[self._bundletypes[bundletype]]
         if not engine.available():
-            raise error.Abort(_('compression engine %s could not be loaded') %
-                              engine.name())
+            raise error.Abort(
+                _(b'compression engine %s could not be loaded') % engine.name()
+            )
         return engine
 
     def supportedwireengines(self, role, onlyavailable=True):
@@ -171,7 +187,7 @@
         """
         assert role in (SERVERROLE, CLIENTROLE)
 
-        attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
+        attr = b'serverpriority' if role == SERVERROLE else b'clientpriority'
 
         engines = [self._engines[e] for e in self._wiretypes.values()]
         if onlyavailable:
@@ -189,8 +205,9 @@
     def forwiretype(self, wiretype):
         engine = self._engines[self._wiretypes[wiretype]]
         if not engine.available():
-            raise error.Abort(_('compression engine %s could not be loaded') %
-                              engine.name())
+            raise error.Abort(
+                _(b'compression engine %s could not be loaded') % engine.name()
+            )
         return engine
 
     def forrevlogheader(self, header):
@@ -200,13 +217,16 @@
         """
         return self._engines[self._revlogheaders[header]]
 
+
 compengines = compressormanager()
 
+
 class compressionengine(object):
     """Base class for compression engines.
 
     Compression engines must implement the interface defined by this class.
     """
+
     def name(self):
         """Returns the name of the compression engine.
 
@@ -319,6 +339,7 @@
         """
         raise NotImplementedError()
 
+
 class _CompressedStreamReader(object):
     def __init__(self, fh):
         if safehasattr(fh, 'unbufferedread'):
@@ -338,13 +359,13 @@
             while self._pending:
                 if len(self._pending[0]) > l + self._pos:
                     newbuf = self._pending[0]
-                    buf.append(newbuf[self._pos:self._pos + l])
+                    buf.append(newbuf[self._pos : self._pos + l])
                     self._pos += l
-                    return ''.join(buf)
+                    return b''.join(buf)
 
                 newbuf = self._pending.pop(0)
                 if self._pos:
-                    buf.append(newbuf[self._pos:])
+                    buf.append(newbuf[self._pos :])
                     l -= len(newbuf) - self._pos
                 else:
                     buf.append(newbuf)
@@ -352,41 +373,45 @@
                 self._pos = 0
 
             if self._eof:
-                return ''.join(buf)
+                return b''.join(buf)
             chunk = self._reader(65536)
             self._decompress(chunk)
             if not chunk and not self._pending and not self._eof:
                 # No progress and no new data, bail out
-                return ''.join(buf)
+                return b''.join(buf)
+
 
 class _GzipCompressedStreamReader(_CompressedStreamReader):
     def __init__(self, fh):
         super(_GzipCompressedStreamReader, self).__init__(fh)
         self._decompobj = zlib.decompressobj()
+
     def _decompress(self, chunk):
         newbuf = self._decompobj.decompress(chunk)
         if newbuf:
             self._pending.append(newbuf)
         d = self._decompobj.copy()
         try:
-            d.decompress('x')
+            d.decompress(b'x')
             d.flush()
-            if d.unused_data == 'x':
+            if d.unused_data == b'x':
                 self._eof = True
         except zlib.error:
             pass
 
+
 class _BZ2CompressedStreamReader(_CompressedStreamReader):
     def __init__(self, fh):
         super(_BZ2CompressedStreamReader, self).__init__(fh)
         self._decompobj = bz2.BZ2Decompressor()
+
     def _decompress(self, chunk):
         newbuf = self._decompobj.decompress(chunk)
         if newbuf:
             self._pending.append(newbuf)
         try:
             while True:
-                newbuf = self._decompobj.decompress('')
+                newbuf = self._decompobj.decompress(b'')
                 if newbuf:
                     self._pending.append(newbuf)
                 else:
@@ -394,25 +419,28 @@
         except EOFError:
             self._eof = True
 
+
 class _TruncatedBZ2CompressedStreamReader(_BZ2CompressedStreamReader):
     def __init__(self, fh):
         super(_TruncatedBZ2CompressedStreamReader, self).__init__(fh)
-        newbuf = self._decompobj.decompress('BZ')
+        newbuf = self._decompobj.decompress(b'BZ')
         if newbuf:
             self._pending.append(newbuf)
 
+
 class _ZstdCompressedStreamReader(_CompressedStreamReader):
     def __init__(self, fh, zstd):
         super(_ZstdCompressedStreamReader, self).__init__(fh)
         self._zstd = zstd
         self._decompobj = zstd.ZstdDecompressor().decompressobj()
+
     def _decompress(self, chunk):
         newbuf = self._decompobj.decompress(chunk)
         if newbuf:
             self._pending.append(newbuf)
         try:
             while True:
-                newbuf = self._decompobj.decompress('')
+                newbuf = self._decompobj.decompress(b'')
                 if newbuf:
                     self._pending.append(newbuf)
                 else:
@@ -420,9 +448,10 @@
         except self._zstd.ZstdError:
             self._eof = True
 
+
 class _zlibengine(compressionengine):
     def name(self):
-        return 'zlib'
+        return b'zlib'
 
     def bundletype(self):
         """zlib compression using the DEFLATE algorithm.
@@ -431,18 +460,18 @@
         algorithm strikes a reasonable balance between compression ratio
         and size.
         """
-        return 'gzip', 'GZ'
+        return b'gzip', b'GZ'
 
     def wireprotosupport(self):
-        return compewireprotosupport('zlib', 20, 20)
+        return compewireprotosupport(b'zlib', 20, 20)
 
     def revlogheader(self):
-        return 'x'
+        return b'x'
 
     def compressstream(self, it, opts=None):
         opts = opts or {}
 
-        z = zlib.compressobj(opts.get('level', -1))
+        z = zlib.compressobj(opts.get(b'level', -1))
         for chunk in it:
             data = z.compress(chunk)
             # Not all calls to compress emit data. It is cheaper to inspect
@@ -456,7 +485,6 @@
         return _GzipCompressedStreamReader(fh)
 
     class zlibrevlogcompressor(object):
-
         def __init__(self, level=None):
             self._level = level
 
@@ -488,33 +516,37 @@
                 parts = []
                 pos = 0
                 while pos < insize:
-                    pos2 = pos + 2**20
+                    pos2 = pos + 2 ** 20
                     parts.append(z.compress(data[pos:pos2]))
                     pos = pos2
                 parts.append(z.flush())
 
                 if sum(map(len, parts)) < insize:
-                    return ''.join(parts)
+                    return b''.join(parts)
                 return None
 
         def decompress(self, data):
             try:
                 return zlib.decompress(data)
             except zlib.error as e:
-                raise error.StorageError(_('revlog decompress error: %s') %
-                                         stringutil.forcebytestr(e))
+                raise error.StorageError(
+                    _(b'revlog decompress error: %s')
+                    % stringutil.forcebytestr(e)
+                )
 
     def revlogcompressor(self, opts=None):
         level = None
         if opts is not None:
-            level = opts.get('zlib.level')
+            level = opts.get(b'zlib.level')
         return self.zlibrevlogcompressor(level)
 
+
 compengines.register(_zlibengine())
 
+
 class _bz2engine(compressionengine):
     def name(self):
-        return 'bz2'
+        return b'bz2'
 
     def bundletype(self):
         """An algorithm that produces smaller bundles than ``gzip``.
@@ -528,16 +560,16 @@
         If available, the ``zstd`` engine can yield similar or better
         compression at much higher speeds.
         """
-        return 'bzip2', 'BZ'
+        return b'bzip2', b'BZ'
 
     # We declare a protocol name but don't advertise by default because
     # it is slow.
     def wireprotosupport(self):
-        return compewireprotosupport('bzip2', 0, 0)
+        return compewireprotosupport(b'bzip2', 0, 0)
 
     def compressstream(self, it, opts=None):
         opts = opts or {}
-        z = bz2.BZ2Compressor(opts.get('level', 9))
+        z = bz2.BZ2Compressor(opts.get(b'level', 9))
         for chunk in it:
             data = z.compress(chunk)
             if data:
@@ -548,38 +580,42 @@
     def decompressorreader(self, fh):
         return _BZ2CompressedStreamReader(fh)
 
+
 compengines.register(_bz2engine())
 
+
 class _truncatedbz2engine(compressionengine):
     def name(self):
-        return 'bz2truncated'
+        return b'bz2truncated'
 
     def bundletype(self):
-        return None, '_truncatedBZ'
+        return None, b'_truncatedBZ'
 
     # We don't implement compressstream because it is hackily handled elsewhere.
 
     def decompressorreader(self, fh):
         return _TruncatedBZ2CompressedStreamReader(fh)
 
+
 compengines.register(_truncatedbz2engine())
 
+
 class _noopengine(compressionengine):
     def name(self):
-        return 'none'
+        return b'none'
 
     def bundletype(self):
         """No compression is performed.
 
         Use this compression engine to explicitly disable compression.
         """
-        return 'none', 'UN'
+        return b'none', b'UN'
 
     # Clients always support uncompressed payloads. Servers don't because
     # unless you are on a fast network, uncompressed payloads can easily
     # saturate your network pipe.
     def wireprotosupport(self):
-        return compewireprotosupport('none', 0, 10)
+        return compewireprotosupport(b'none', 0, 10)
 
     # We don't implement revlogheader because it is handled specially
     # in the revlog class.
@@ -597,11 +633,13 @@
     def revlogcompressor(self, opts=None):
         return self.nooprevlogcompressor()
 
+
 compengines.register(_noopengine())
 
+
 class _zstdengine(compressionengine):
     def name(self):
-        return 'zstd'
+        return b'zstd'
 
     @propertycache
     def _module(self):
@@ -609,6 +647,7 @@
         # until first access.
         try:
             from .. import zstd
+
             # Force delayed import.
             zstd.__version__
             return zstd
@@ -630,20 +669,20 @@
         If this engine is available and backwards compatibility is not a
         concern, it is likely the best available engine.
         """
-        return 'zstd', 'ZS'
+        return b'zstd', b'ZS'
 
     def wireprotosupport(self):
-        return compewireprotosupport('zstd', 50, 50)
+        return compewireprotosupport(b'zstd', 50, 50)
 
     def revlogheader(self):
-        return '\x28'
+        return b'\x28'
 
     def compressstream(self, it, opts=None):
         opts = opts or {}
         # zstd level 3 is almost always significantly faster than zlib
         # while providing no worse compression. It strikes a good balance
         # between speed and compression.
-        level = opts.get('level', 3)
+        level = opts.get(b'level', 3)
 
         zstd = self._module
         z = zstd.ZstdCompressor(level=level).compressobj()
@@ -694,7 +733,7 @@
                 chunks.append(z.flush())
 
                 if sum(map(len, chunks)) < insize:
-                    return ''.join(chunks)
+                    return b''.join(chunks)
                 return None
 
         def decompress(self, data):
@@ -714,22 +753,26 @@
                     pos = pos2
                 # Frame should be exhausted, so no finish() API.
 
-                return ''.join(chunks)
+                return b''.join(chunks)
             except Exception as e:
-                raise error.StorageError(_('revlog decompress error: %s') %
-                                         stringutil.forcebytestr(e))
+                raise error.StorageError(
+                    _(b'revlog decompress error: %s')
+                    % stringutil.forcebytestr(e)
+                )
 
     def revlogcompressor(self, opts=None):
         opts = opts or {}
-        level = opts.get('zstd.level')
+        level = opts.get(b'zstd.level')
         if level is None:
-            level = opts.get('level')
+            level = opts.get(b'level')
         if level is None:
             level = 3
         return self.zstdrevlogcompressor(self._module, level=level)
 
+
 compengines.register(_zstdengine())
 
+
 def bundlecompressiontopics():
     """Obtains a list of available bundle compressions for use in help."""
     # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
@@ -761,4 +804,5 @@
 
     return items
 
+
 i18nfunctions = bundlecompressiontopics().values()
--- a/mercurial/utils/dateutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/utils/dateutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -20,45 +20,41 @@
 
 # used by parsedate
 defaultdateformats = (
-    '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
-    '%Y-%m-%dT%H:%M',    #   without seconds
-    '%Y-%m-%dT%H%M%S',   # another awful but legal variant without :
-    '%Y-%m-%dT%H%M',     #   without seconds
-    '%Y-%m-%d %H:%M:%S', # our common legal variant
-    '%Y-%m-%d %H:%M',    #   without seconds
-    '%Y-%m-%d %H%M%S',   # without :
-    '%Y-%m-%d %H%M',     #   without seconds
-    '%Y-%m-%d %I:%M:%S%p',
-    '%Y-%m-%d %H:%M',
-    '%Y-%m-%d %I:%M%p',
-    '%Y-%m-%d',
-    '%m-%d',
-    '%m/%d',
-    '%m/%d/%y',
-    '%m/%d/%Y',
-    '%a %b %d %H:%M:%S %Y',
-    '%a %b %d %I:%M:%S%p %Y',
-    '%a, %d %b %Y %H:%M:%S',        #  GNU coreutils "/bin/date --rfc-2822"
-    '%b %d %H:%M:%S %Y',
-    '%b %d %I:%M:%S%p %Y',
-    '%b %d %H:%M:%S',
-    '%b %d %I:%M:%S%p',
-    '%b %d %H:%M',
-    '%b %d %I:%M%p',
-    '%b %d %Y',
-    '%b %d',
-    '%H:%M:%S',
-    '%I:%M:%S%p',
-    '%H:%M',
-    '%I:%M%p',
+    b'%Y-%m-%dT%H:%M:%S',  # the 'real' ISO8601
+    b'%Y-%m-%dT%H:%M',  #   without seconds
+    b'%Y-%m-%dT%H%M%S',  # another awful but legal variant without :
+    b'%Y-%m-%dT%H%M',  #   without seconds
+    b'%Y-%m-%d %H:%M:%S',  # our common legal variant
+    b'%Y-%m-%d %H:%M',  #   without seconds
+    b'%Y-%m-%d %H%M%S',  # without :
+    b'%Y-%m-%d %H%M',  #   without seconds
+    b'%Y-%m-%d %I:%M:%S%p',
+    b'%Y-%m-%d %H:%M',
+    b'%Y-%m-%d %I:%M%p',
+    b'%Y-%m-%d',
+    b'%m-%d',
+    b'%m/%d',
+    b'%m/%d/%y',
+    b'%m/%d/%Y',
+    b'%a %b %d %H:%M:%S %Y',
+    b'%a %b %d %I:%M:%S%p %Y',
+    b'%a, %d %b %Y %H:%M:%S',  #  GNU coreutils "/bin/date --rfc-2822"
+    b'%b %d %H:%M:%S %Y',
+    b'%b %d %I:%M:%S%p %Y',
+    b'%b %d %H:%M:%S',
+    b'%b %d %I:%M:%S%p',
+    b'%b %d %H:%M',
+    b'%b %d %I:%M%p',
+    b'%b %d %Y',
+    b'%b %d',
+    b'%H:%M:%S',
+    b'%I:%M:%S%p',
+    b'%H:%M',
+    b'%I:%M%p',
 )
 
-extendeddateformats = defaultdateformats + (
-    "%Y",
-    "%Y-%m",
-    "%b",
-    "%b %Y",
-)
+extendeddateformats = defaultdateformats + (b"%Y", b"%Y-%m", b"%b", b"%b %Y",)
+
 
 def makedate(timestamp=None):
     '''Return a unix timestamp (or the current time) as a (unixtime,
@@ -66,14 +62,16 @@
     if timestamp is None:
         timestamp = time.time()
     if timestamp < 0:
-        hint = _("check your clock")
-        raise error.Abort(_("negative timestamp: %d") % timestamp, hint=hint)
-    delta = (datetime.datetime.utcfromtimestamp(timestamp) -
-             datetime.datetime.fromtimestamp(timestamp))
+        hint = _(b"check your clock")
+        raise error.Abort(_(b"negative timestamp: %d") % timestamp, hint=hint)
+    delta = datetime.datetime.utcfromtimestamp(
+        timestamp
+    ) - datetime.datetime.fromtimestamp(timestamp)
     tz = delta.days * 86400 + delta.seconds
     return timestamp, tz
 
-def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
+
+def datestr(date=None, format=b'%a %b %d %H:%M:%S %Y %1%2'):
     """represent a (unixtime, offset) tuple as a localized time.
     unixtime is seconds since the epoch, and offset is the time zone's
     number of seconds away from UTC.
@@ -90,16 +88,16 @@
     'Fri Dec 13 20:45:52 1901 +0000'
     """
     t, tz = date or makedate()
-    if "%1" in format or "%2" in format or "%z" in format:
-        sign = (tz > 0) and "-" or "+"
+    if b"%1" in format or b"%2" in format or b"%z" in format:
+        sign = (tz > 0) and b"-" or b"+"
         minutes = abs(tz) // 60
         q, r = divmod(minutes, 60)
-        format = format.replace("%z", "%1%2")
-        format = format.replace("%1", "%c%02d" % (sign, q))
-        format = format.replace("%2", "%02d" % r)
+        format = format.replace(b"%z", b"%1%2")
+        format = format.replace(b"%1", b"%c%02d" % (sign, q))
+        format = format.replace(b"%2", b"%02d" % r)
     d = t - tz
-    if d > 0x7fffffff:
-        d = 0x7fffffff
+    if d > 0x7FFFFFFF:
+        d = 0x7FFFFFFF
     elif d < -0x80000000:
         d = -0x80000000
     # Never use time.gmtime() and datetime.datetime.fromtimestamp()
@@ -109,39 +107,47 @@
     s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
     return s
 
+
 def shortdate(date=None):
     """turn (timestamp, tzoff) tuple into iso 8631 date."""
-    return datestr(date, format='%Y-%m-%d')
+    return datestr(date, format=b'%Y-%m-%d')
+
 
 def parsetimezone(s):
     """find a trailing timezone, if any, in string, and return a
        (offset, remainder) pair"""
     s = pycompat.bytestr(s)
 
-    if s.endswith("GMT") or s.endswith("UTC"):
+    if s.endswith(b"GMT") or s.endswith(b"UTC"):
         return 0, s[:-3].rstrip()
 
     # Unix-style timezones [+-]hhmm
-    if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
-        sign = (s[-5] == "+") and 1 or -1
+    if len(s) >= 5 and s[-5] in b"+-" and s[-4:].isdigit():
+        sign = (s[-5] == b"+") and 1 or -1
         hours = int(s[-4:-2])
         minutes = int(s[-2:])
         return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
 
     # ISO8601 trailing Z
-    if s.endswith("Z") and s[-2:-1].isdigit():
+    if s.endswith(b"Z") and s[-2:-1].isdigit():
         return 0, s[:-1]
 
     # ISO8601-style [+-]hh:mm
-    if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
-        s[-5:-3].isdigit() and s[-2:].isdigit()):
-        sign = (s[-6] == "+") and 1 or -1
+    if (
+        len(s) >= 6
+        and s[-6] in b"+-"
+        and s[-3] == b":"
+        and s[-5:-3].isdigit()
+        and s[-2:].isdigit()
+    ):
+        sign = (s[-6] == b"+") and 1 or -1
         hours = int(s[-5:-3])
         minutes = int(s[-2:])
         return -sign * (hours * 60 + minutes) * 60, s[:-6]
 
     return None, s
 
+
 def strdate(string, format, defaults=None):
     """parse a localized time string and return a (unixtime, offset) tuple.
     if the string cannot be parsed, ValueError is raised."""
@@ -152,20 +158,28 @@
     offset, date = parsetimezone(string)
 
     # add missing elements from defaults
-    usenow = False # default to using biased defaults
-    for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
+    usenow = False  # default to using biased defaults
+    for part in (
+        b"S",
+        b"M",
+        b"HI",
+        b"d",
+        b"mb",
+        b"yY",
+    ):  # decreasing specificity
         part = pycompat.bytestr(part)
-        found = [True for p in part if ("%"+p) in format]
+        found = [True for p in part if (b"%" + p) in format]
         if not found:
-            date += "@" + defaults[part][usenow]
-            format += "@%" + part[0]
+            date += b"@" + defaults[part][usenow]
+            format += b"@%" + part[0]
         else:
             # We've found a specific time element, less specific time
             # elements are relative to today
             usenow = True
 
-    timetuple = time.strptime(encoding.strfromlocal(date),
-                              encoding.strfromlocal(format))
+    timetuple = time.strptime(
+        encoding.strfromlocal(date), encoding.strfromlocal(format)
+    )
     localunixtime = int(calendar.timegm(timetuple))
     if offset is None:
         # local timezone
@@ -175,6 +189,7 @@
         unixtime = localunixtime + offset
     return unixtime, offset
 
+
 def parsedate(date, formats=None, bias=None):
     """parse a localized date/time and return a (unixtime, offset) tuple.
 
@@ -205,33 +220,34 @@
         formats = defaultdateformats
     date = date.strip()
 
-    if date == 'now' or date == _('now'):
+    if date == b'now' or date == _(b'now'):
         return makedate()
-    if date == 'today' or date == _('today'):
+    if date == b'today' or date == _(b'today'):
         date = datetime.date.today().strftime(r'%b %d')
         date = encoding.strtolocal(date)
-    elif date == 'yesterday' or date == _('yesterday'):
-        date = (datetime.date.today() -
-                datetime.timedelta(days=1)).strftime(r'%b %d')
+    elif date == b'yesterday' or date == _(b'yesterday'):
+        date = (datetime.date.today() - datetime.timedelta(days=1)).strftime(
+            r'%b %d'
+        )
         date = encoding.strtolocal(date)
 
     try:
-        when, offset = map(int, date.split(' '))
+        when, offset = map(int, date.split(b' '))
     except ValueError:
         # fill out defaults
         now = makedate()
         defaults = {}
-        for part in ("d", "mb", "yY", "HI", "M", "S"):
+        for part in (b"d", b"mb", b"yY", b"HI", b"M", b"S"):
             # this piece is for rounding the specific end of unknowns
             b = bias.get(part)
             if b is None:
-                if part[0:1] in "HMS":
-                    b = "00"
+                if part[0:1] in b"HMS":
+                    b = b"00"
                 else:
-                    b = "0"
+                    b = b"0"
 
             # this piece is for matching the generic end to today's date
-            n = datestr(now, "%" + part[0:1])
+            n = datestr(now, b"%" + part[0:1])
 
             defaults[part] = (b, n)
 
@@ -244,17 +260,19 @@
                 break
         else:
             raise error.ParseError(
-                _('invalid date: %r') % pycompat.bytestr(date))
+                _(b'invalid date: %r') % pycompat.bytestr(date)
+            )
     # validate explicit (probably user-specified) date and
     # time zone offset. values must fit in signed 32 bits for
     # current 32-bit linux runtimes. timezones go from UTC-12
     # to UTC+14
-    if when < -0x80000000 or when > 0x7fffffff:
-        raise error.ParseError(_('date exceeds 32 bits: %d') % when)
+    if when < -0x80000000 or when > 0x7FFFFFFF:
+        raise error.ParseError(_(b'date exceeds 32 bits: %d') % when)
     if offset < -50400 or offset > 43200:
-        raise error.ParseError(_('impossible time zone offset: %d') % offset)
+        raise error.ParseError(_(b'impossible time zone offset: %d') % offset)
     return when, offset
 
+
 def matchdate(date):
     """Return a function that matches a given date match specifier
 
@@ -285,42 +303,43 @@
     """
 
     def lower(date):
-        d = {'mb': "1", 'd': "1"}
+        d = {b'mb': b"1", b'd': b"1"}
         return parsedate(date, extendeddateformats, d)[0]
 
     def upper(date):
-        d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
-        for days in ("31", "30", "29"):
+        d = {b'mb': b"12", b'HI': b"23", b'M': b"59", b'S': b"59"}
+        for days in (b"31", b"30", b"29"):
             try:
-                d["d"] = days
+                d[b"d"] = days
                 return parsedate(date, extendeddateformats, d)[0]
             except error.ParseError:
                 pass
-        d["d"] = "28"
+        d[b"d"] = b"28"
         return parsedate(date, extendeddateformats, d)[0]
 
     date = date.strip()
 
     if not date:
-        raise error.Abort(_("dates cannot consist entirely of whitespace"))
+        raise error.Abort(_(b"dates cannot consist entirely of whitespace"))
     elif date[0:1] == b"<":
         if not date[1:]:
-            raise error.Abort(_("invalid day spec, use '<DATE'"))
+            raise error.Abort(_(b"invalid day spec, use '<DATE'"))
         when = upper(date[1:])
         return lambda x: x <= when
     elif date[0:1] == b">":
         if not date[1:]:
-            raise error.Abort(_("invalid day spec, use '>DATE'"))
+            raise error.Abort(_(b"invalid day spec, use '>DATE'"))
         when = lower(date[1:])
         return lambda x: x >= when
     elif date[0:1] == b"-":
         try:
             days = int(date[1:])
         except ValueError:
-            raise error.Abort(_("invalid day spec: %s") % date[1:])
+            raise error.Abort(_(b"invalid day spec: %s") % date[1:])
         if days < 0:
-            raise error.Abort(_("%s must be nonnegative (see 'hg help dates')")
-                % date[1:])
+            raise error.Abort(
+                _(b"%s must be nonnegative (see 'hg help dates')") % date[1:]
+            )
         when = makedate()[0] - days * 3600 * 24
         return lambda x: x >= when
     elif b" to " in date:
--- a/mercurial/utils/interfaceutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-# interfaceutil.py - Utilities for declaring interfaces.
-#
-# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-# zope.interface imposes a run-time cost due to module import overhead and
-# bookkeeping for declaring interfaces. So, we use stubs for various
-# zope.interface primitives unless instructed otherwise.
-
-from __future__ import absolute_import
-
-from .. import (
-    encoding,
-)
-
-if encoding.environ.get('HGREALINTERFACES'):
-    from ..thirdparty.zope import (
-        interface as zi,
-    )
-
-    Attribute = zi.Attribute
-    Interface = zi.Interface
-    implementer = zi.implementer
-else:
-    class Attribute(object):
-        def __init__(self, __name__, __doc__=''):
-            pass
-
-    class Interface(object):
-        def __init__(self, name, bases=(), attrs=None, __doc__=None,
-                 __module__=None):
-            pass
-
-    def implementer(*ifaces):
-        def wrapper(cls):
-            return cls
-
-        return wrapper
--- a/mercurial/utils/procutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/utils/procutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -20,6 +20,10 @@
 import time
 
 from ..i18n import _
+from ..pycompat import (
+    getattr,
+    open,
+)
 
 from .. import (
     encoding,
@@ -34,12 +38,14 @@
 stdin = pycompat.stdin
 stdout = pycompat.stdout
 
+
 def isatty(fp):
     try:
         return fp.isatty()
     except AttributeError:
         return False
 
+
 # glibc determines buffering on first write to stdout - if we replace a TTY
 # destined stdout with a pipe destined stdout (e.g. pager), we want line
 # buffering (or unbuffered, on Windows)
@@ -47,11 +53,14 @@
     if pycompat.iswindows:
         # Windows doesn't support line buffering
         stdout = os.fdopen(stdout.fileno(), r'wb', 0)
-    else:
+    elif not pycompat.ispy3:
+        # on Python 3, stdout (sys.stdout.buffer) is already line buffered and
+        # buffering=1 is not handled in binary mode
         stdout = os.fdopen(stdout.fileno(), r'wb', 1)
 
 if pycompat.iswindows:
     from .. import windows as platform
+
     stdout = platform.winstdout(stdout)
 else:
     from .. import posix as platform
@@ -82,12 +91,14 @@
 
 closefds = pycompat.isposix
 
+
 def explainexit(code):
     """return a message describing a subprocess status
     (codes from kill are negative - not os.system/wait encoding)"""
     if code >= 0:
-        return _("exited with status %d") % code
-    return _("killed by signal %d") % -code
+        return _(b"exited with status %d") % code
+    return _(b"killed by signal %d") % -code
+
 
 class _pfile(object):
     """File-like wrapper for a stream opened by subprocess.Popen()"""
@@ -113,59 +124,85 @@
     def __exit__(self, exc_type, exc_value, exc_tb):
         self.close()
 
-def popen(cmd, mode='rb', bufsize=-1):
-    if mode == 'rb':
+
+def popen(cmd, mode=b'rb', bufsize=-1):
+    if mode == b'rb':
         return _popenreader(cmd, bufsize)
-    elif mode == 'wb':
+    elif mode == b'wb':
         return _popenwriter(cmd, bufsize)
-    raise error.ProgrammingError('unsupported mode: %r' % mode)
+    raise error.ProgrammingError(b'unsupported mode: %r' % mode)
+
 
 def _popenreader(cmd, bufsize):
-    p = subprocess.Popen(tonativestr(quotecommand(cmd)),
-                         shell=True, bufsize=bufsize,
-                         close_fds=closefds,
-                         stdout=subprocess.PIPE)
+    p = subprocess.Popen(
+        tonativestr(quotecommand(cmd)),
+        shell=True,
+        bufsize=bufsize,
+        close_fds=closefds,
+        stdout=subprocess.PIPE,
+    )
     return _pfile(p, p.stdout)
 
+
 def _popenwriter(cmd, bufsize):
-    p = subprocess.Popen(tonativestr(quotecommand(cmd)),
-                         shell=True, bufsize=bufsize,
-                         close_fds=closefds,
-                         stdin=subprocess.PIPE)
+    p = subprocess.Popen(
+        tonativestr(quotecommand(cmd)),
+        shell=True,
+        bufsize=bufsize,
+        close_fds=closefds,
+        stdin=subprocess.PIPE,
+    )
     return _pfile(p, p.stdin)
 
+
 def popen2(cmd, env=None):
     # Setting bufsize to -1 lets the system decide the buffer size.
     # The default for bufsize is 0, meaning unbuffered. This leads to
     # poor performance on Mac OS X: http://bugs.python.org/issue4194
-    p = subprocess.Popen(tonativestr(cmd),
-                         shell=True, bufsize=-1,
-                         close_fds=closefds,
-                         stdin=subprocess.PIPE, stdout=subprocess.PIPE,
-                         env=tonativeenv(env))
+    p = subprocess.Popen(
+        tonativestr(cmd),
+        shell=True,
+        bufsize=-1,
+        close_fds=closefds,
+        stdin=subprocess.PIPE,
+        stdout=subprocess.PIPE,
+        env=tonativeenv(env),
+    )
     return p.stdin, p.stdout
 
+
 def popen3(cmd, env=None):
     stdin, stdout, stderr, p = popen4(cmd, env)
     return stdin, stdout, stderr
 
+
 def popen4(cmd, env=None, bufsize=-1):
-    p = subprocess.Popen(tonativestr(cmd),
-                         shell=True, bufsize=bufsize,
-                         close_fds=closefds,
-                         stdin=subprocess.PIPE, stdout=subprocess.PIPE,
-                         stderr=subprocess.PIPE,
-                         env=tonativeenv(env))
+    p = subprocess.Popen(
+        tonativestr(cmd),
+        shell=True,
+        bufsize=bufsize,
+        close_fds=closefds,
+        stdin=subprocess.PIPE,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        env=tonativeenv(env),
+    )
     return p.stdin, p.stdout, p.stderr, p
 
+
 def pipefilter(s, cmd):
     '''filter string S through command CMD, returning its output'''
-    p = subprocess.Popen(tonativestr(cmd),
-                         shell=True, close_fds=closefds,
-                         stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+    p = subprocess.Popen(
+        tonativestr(cmd),
+        shell=True,
+        close_fds=closefds,
+        stdin=subprocess.PIPE,
+        stdout=subprocess.PIPE,
+    )
     pout, perr = p.communicate(s)
     return pout
 
+
 def tempfilter(s, cmd):
     '''filter string S through a pair of temporary files with CMD.
     CMD is used as a template to create the real command to be run,
@@ -173,21 +210,22 @@
     the temporary files generated.'''
     inname, outname = None, None
     try:
-        infd, inname = pycompat.mkstemp(prefix='hg-filter-in-')
+        infd, inname = pycompat.mkstemp(prefix=b'hg-filter-in-')
         fp = os.fdopen(infd, r'wb')
         fp.write(s)
         fp.close()
-        outfd, outname = pycompat.mkstemp(prefix='hg-filter-out-')
+        outfd, outname = pycompat.mkstemp(prefix=b'hg-filter-out-')
         os.close(outfd)
-        cmd = cmd.replace('INFILE', inname)
-        cmd = cmd.replace('OUTFILE', outname)
+        cmd = cmd.replace(b'INFILE', inname)
+        cmd = cmd.replace(b'OUTFILE', outname)
         code = system(cmd)
-        if pycompat.sysplatform == 'OpenVMS' and code & 1:
+        if pycompat.sysplatform == b'OpenVMS' and code & 1:
             code = 0
         if code:
-            raise error.Abort(_("command '%s' failed: %s") %
-                              (cmd, explainexit(code)))
-        with open(outname, 'rb') as fp:
+            raise error.Abort(
+                _(b"command '%s' failed: %s") % (cmd, explainexit(code))
+            )
+        with open(outname, b'rb') as fp:
             return fp.read()
     finally:
         try:
@@ -201,72 +239,90 @@
         except OSError:
             pass
 
+
 _filtertable = {
-    'tempfile:': tempfilter,
-    'pipe:': pipefilter,
+    b'tempfile:': tempfilter,
+    b'pipe:': pipefilter,
 }
 
+
 def filter(s, cmd):
-    "filter a string through a command that transforms its input to its output"
-    for name, fn in _filtertable.iteritems():
+    b"filter a string through a command that transforms its input to its output"
+    for name, fn in pycompat.iteritems(_filtertable):
         if cmd.startswith(name):
-            return fn(s, cmd[len(name):].lstrip())
+            return fn(s, cmd[len(name) :].lstrip())
     return pipefilter(s, cmd)
 
+
 def mainfrozen():
     """return True if we are a frozen executable.
 
     The code supports py2exe (most common, Windows only) and tools/freeze
     (portable, not much used).
     """
-    return (pycompat.safehasattr(sys, "frozen") or # new py2exe
-            pycompat.safehasattr(sys, "importers") or # old py2exe
-            imp.is_frozen(r"__main__")) # tools/freeze
+    return (
+        pycompat.safehasattr(sys, "frozen")
+        or pycompat.safehasattr(sys, "importers")  # new py2exe
+        or imp.is_frozen(r"__main__")  # old py2exe
+    )  # tools/freeze
+
 
 _hgexecutable = None
 
+
 def hgexecutable():
     """return location of the 'hg' executable.
 
     Defaults to $HG or 'hg' in the search path.
     """
     if _hgexecutable is None:
-        hg = encoding.environ.get('HG')
+        hg = encoding.environ.get(b'HG')
         mainmod = sys.modules[r'__main__']
         if hg:
             _sethgexecutable(hg)
         elif mainfrozen():
-            if getattr(sys, 'frozen', None) == 'macosx_app':
+            if getattr(sys, 'frozen', None) == b'macosx_app':
                 # Env variable set by py2app
-                _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
+                _sethgexecutable(encoding.environ[b'EXECUTABLEPATH'])
             else:
                 _sethgexecutable(pycompat.sysexecutable)
-        elif (not pycompat.iswindows and os.path.basename(
-            pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
+        elif (
+            not pycompat.iswindows
+            and os.path.basename(
+                pycompat.fsencode(getattr(mainmod, '__file__', b''))
+            )
+            == b'hg'
+        ):
             _sethgexecutable(pycompat.fsencode(mainmod.__file__))
         else:
-            exe = findexe('hg') or os.path.basename(sys.argv[0])
-            _sethgexecutable(exe)
+            _sethgexecutable(
+                findexe(b'hg') or os.path.basename(pycompat.sysargv[0])
+            )
     return _hgexecutable
 
+
 def _sethgexecutable(path):
     """set location of the 'hg' executable"""
     global _hgexecutable
     _hgexecutable = path
 
+
 def _testfileno(f, stdf):
     fileno = getattr(f, 'fileno', None)
     try:
         return fileno and fileno() == stdf.fileno()
     except io.UnsupportedOperation:
-        return False # fileno() raised UnsupportedOperation
+        return False  # fileno() raised UnsupportedOperation
+
 
 def isstdin(f):
     return _testfileno(f, sys.__stdin__)
 
+
 def isstdout(f):
     return _testfileno(f, sys.__stdout__)
 
+
 def protectstdio(uin, uout):
     """Duplicate streams and redirect original if (uin, uout) are stdio
 
@@ -291,6 +347,7 @@
         fout = os.fdopen(newfd, r'wb')
     return fin, fout
 
+
 def restorestdio(uin, uout, fin, fout):
     """Restore (uin, uout) streams from possibly duplicated (fin, fout)"""
     uout.flush()
@@ -299,37 +356,45 @@
             os.dup2(f.fileno(), uif.fileno())
             f.close()
 
+
 def shellenviron(environ=None):
     """return environ with optional override, useful for shelling out"""
+
     def py2shell(val):
-        'convert python object into string that is useful to shell'
+        b'convert python object into string that is useful to shell'
         if val is None or val is False:
-            return '0'
+            return b'0'
         if val is True:
-            return '1'
+            return b'1'
         return pycompat.bytestr(val)
+
     env = dict(encoding.environ)
     if environ:
-        env.update((k, py2shell(v)) for k, v in environ.iteritems())
-    env['HG'] = hgexecutable()
+        env.update((k, py2shell(v)) for k, v in pycompat.iteritems(environ))
+    env[b'HG'] = hgexecutable()
     return env
 
+
 if pycompat.iswindows:
+
     def shelltonative(cmd, env):
         return platform.shelltocmdexe(cmd, shellenviron(env))
 
     tonativestr = encoding.strfromlocal
 else:
+
     def shelltonative(cmd, env):
         return cmd
 
     tonativestr = pycompat.identity
 
+
 def tonativeenv(env):
     '''convert the environment from bytes to strings suitable for Popen(), etc.
     '''
     return pycompat.rapply(tonativestr, env)
 
+
 def system(cmd, environ=None, cwd=None, out=None):
     '''enhanced shell command execution.
     run with environment maybe modified, maybe in different dir.
@@ -343,29 +408,36 @@
     cmd = quotecommand(cmd)
     env = shellenviron(environ)
     if out is None or isstdout(out):
-        rc = subprocess.call(tonativestr(cmd),
-                             shell=True, close_fds=closefds,
-                             env=tonativeenv(env),
-                             cwd=pycompat.rapply(tonativestr, cwd))
+        rc = subprocess.call(
+            tonativestr(cmd),
+            shell=True,
+            close_fds=closefds,
+            env=tonativeenv(env),
+            cwd=pycompat.rapply(tonativestr, cwd),
+        )
     else:
-        proc = subprocess.Popen(tonativestr(cmd),
-                                shell=True, close_fds=closefds,
-                                env=tonativeenv(env),
-                                cwd=pycompat.rapply(tonativestr, cwd),
-                                stdout=subprocess.PIPE,
-                                stderr=subprocess.STDOUT)
-        for line in iter(proc.stdout.readline, ''):
+        proc = subprocess.Popen(
+            tonativestr(cmd),
+            shell=True,
+            close_fds=closefds,
+            env=tonativeenv(env),
+            cwd=pycompat.rapply(tonativestr, cwd),
+            stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT,
+        )
+        for line in iter(proc.stdout.readline, b''):
             out.write(line)
         proc.wait()
         rc = proc.returncode
-    if pycompat.sysplatform == 'OpenVMS' and rc & 1:
+    if pycompat.sysplatform == b'OpenVMS' and rc & 1:
         rc = 0
     return rc
 
+
 def gui():
     '''Are we running in a GUI?'''
     if pycompat.isdarwin:
-        if 'SSH_CONNECTION' in encoding.environ:
+        if b'SSH_CONNECTION' in encoding.environ:
             # handle SSH access to a box where the user is logged in
             return False
         elif getattr(osutil, 'isgui', None):
@@ -375,7 +447,8 @@
             # pure build; use a safe default
             return True
     else:
-        return pycompat.iswindows or encoding.environ.get("DISPLAY")
+        return pycompat.iswindows or encoding.environ.get(b"DISPLAY")
+
 
 def hgcmd():
     """Return the command used to execute current hg
@@ -385,13 +458,14 @@
     get either the python call or current executable.
     """
     if mainfrozen():
-        if getattr(sys, 'frozen', None) == 'macosx_app':
+        if getattr(sys, 'frozen', None) == b'macosx_app':
             # Env variable set by py2app
-            return [encoding.environ['EXECUTABLEPATH']]
+            return [encoding.environ[b'EXECUTABLEPATH']]
         else:
             return [pycompat.sysexecutable]
     return _gethgcmd()
 
+
 def rundetached(args, condfn):
     """Execute the argument list in a detached process.
 
@@ -409,8 +483,10 @@
     # running process on success. Instead we listen for SIGCHLD telling
     # us our child process terminated.
     terminated = set()
+
     def handler(signum, frame):
         terminated.add(os.wait())
+
     prevhandler = None
     SIGCHLD = getattr(signal, 'SIGCHLD', None)
     if SIGCHLD is not None:
@@ -418,8 +494,7 @@
     try:
         pid = spawndetached(args)
         while not condfn():
-            if ((pid in terminated or not testpid(pid))
-                and not condfn()):
+            if (pid in terminated or not testpid(pid)) and not condfn():
                 return -1
             time.sleep(0.1)
         return pid
@@ -427,6 +502,7 @@
         if prevhandler is not None:
             signal.signal(signal.SIGCHLD, prevhandler)
 
+
 @contextlib.contextmanager
 def uninterruptible(warn):
     """Inhibit SIGINT handling on a region of code.
@@ -460,6 +536,7 @@
         if shouldbail:
             raise KeyboardInterrupt
 
+
 if pycompat.iswindows:
     # no fork on Windows, but we can create a detached process
     # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx
@@ -471,18 +548,27 @@
     _creationflags = DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP
 
     def runbgcommand(
-      script, env, shell=False, stdout=None, stderr=None, ensurestart=True):
+        script, env, shell=False, stdout=None, stderr=None, ensurestart=True
+    ):
         '''Spawn a command without waiting for it to finish.'''
         # we can't use close_fds *and* redirect stdin. I'm not sure that we
         # need to because the detached process has no console connection.
         subprocess.Popen(
             tonativestr(script),
-            shell=shell, env=tonativeenv(env), close_fds=True,
-            creationflags=_creationflags, stdout=stdout,
-            stderr=stderr)
+            shell=shell,
+            env=tonativeenv(env),
+            close_fds=True,
+            creationflags=_creationflags,
+            stdout=stdout,
+            stderr=stderr,
+        )
+
+
 else:
+
     def runbgcommand(
-      cmd, env, shell=False, stdout=None, stderr=None, ensurestart=True):
+        cmd, env, shell=False, stdout=None, stderr=None, ensurestart=True
+    ):
         '''Spawn a command without waiting for it to finish.'''
         # double-fork to completely detach from the parent process
         # based on http://code.activestate.com/recipes/278731
@@ -495,7 +581,7 @@
             if os.WIFEXITED(status):
                 returncode = os.WEXITSTATUS(status)
             else:
-                returncode = -os.WTERMSIG(status)
+                returncode = -(os.WTERMSIG(status))
             if returncode != 0:
                 # The child process's return code is 0 on success, an errno
                 # value on failure, or 255 if we don't have a valid errno
@@ -506,8 +592,10 @@
                 # doesn't seem worth adding that complexity here, though.)
                 if returncode == 255:
                     returncode = errno.EINVAL
-                raise OSError(returncode, 'error running %r: %s' %
-                              (cmd, os.strerror(returncode)))
+                raise OSError(
+                    returncode,
+                    b'error running %r: %s' % (cmd, os.strerror(returncode)),
+                )
             return
 
         returncode = 255
@@ -515,20 +603,26 @@
             # Start a new session
             os.setsid()
 
-            stdin = open(os.devnull, 'r')
+            stdin = open(os.devnull, b'r')
             if stdout is None:
-                stdout = open(os.devnull, 'w')
+                stdout = open(os.devnull, b'w')
             if stderr is None:
-                stderr = open(os.devnull, 'w')
+                stderr = open(os.devnull, b'w')
 
             # connect stdin to devnull to make sure the subprocess can't
             # muck up that stream for mercurial.
             subprocess.Popen(
-                cmd, shell=shell, env=env, close_fds=True,
-                stdin=stdin, stdout=stdout, stderr=stderr)
+                cmd,
+                shell=shell,
+                env=env,
+                close_fds=True,
+                stdin=stdin,
+                stdout=stdout,
+                stderr=stderr,
+            )
             returncode = 0
         except EnvironmentError as ex:
-            returncode = (ex.errno & 0xff)
+            returncode = ex.errno & 0xFF
             if returncode == 0:
                 # This shouldn't happen, but just in case make sure the
                 # return code is never 0 here.
--- a/mercurial/utils/repoviewutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/utils/repoviewutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,9 +14,11 @@
 # * X - Y is as small as possible.
 # This create and ordering used for branchmap purpose.
 # the ordering may be partial
-subsettable = {None: 'visible',
-               'visible-hidden': 'visible',
-               'visible': 'served',
-               'served.hidden': 'served',
-               'served': 'immutable',
-               'immutable': 'base'}
+subsettable = {
+    None: b'visible',
+    b'visible-hidden': b'visible',
+    b'visible': b'served',
+    b'served.hidden': b'served',
+    b'served': b'immutable',
+    b'immutable': b'base',
+}
--- a/mercurial/utils/storageutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/utils/storageutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -22,11 +22,12 @@
     error,
     mdiff,
     pycompat,
-    repository,
 )
+from ..interfaces import repository
 
 _nullhash = hashlib.sha1(nullid)
 
+
 def hashrevisionsha1(text, p1, p2):
     """Compute the SHA-1 for revision data and its parents.
 
@@ -52,8 +53,10 @@
     s.update(text)
     return s.digest()
 
+
 METADATA_RE = re.compile(b'\x01\n')
 
+
 def parsemeta(text):
     """Parse metadata header from revision data.
 
@@ -71,16 +74,19 @@
         meta[k] = v
     return meta, s + 2
 
+
 def packmeta(meta, text):
     """Add metadata to fulltext to produce revision text."""
     keys = sorted(meta)
     metatext = b''.join(b'%s: %s\n' % (k, meta[k]) for k in keys)
     return b'\x01\n%s\x01\n%s' % (metatext, text)
 
+
 def iscensoredtext(text):
     meta = parsemeta(text)[0]
     return meta and b'censored' in meta
 
+
 def filtermetadata(text):
     """Extract just the revision data from source text.
 
@@ -91,7 +97,8 @@
         return text
 
     offset = text.index(b'\x01\n', 2)
-    return text[offset + 2:]
+    return text[offset + 2 :]
+
 
 def filerevisioncopied(store, node):
     """Resolve file revision copy metadata.
@@ -112,6 +119,7 @@
 
     return False
 
+
 def filedataequivalent(store, node, filedata):
     """Determines whether file data is equivalent to a stored node.
 
@@ -148,6 +156,7 @@
 
     return False
 
+
 def iterrevs(storelen, start=0, stop=None):
     """Iterate over revision numbers in a store."""
     step = 1
@@ -163,6 +172,7 @@
 
     return pycompat.xrange(start, stop, step)
 
+
 def fileidlookup(store, fileid, identifier):
     """Resolve the file node for a value.
 
@@ -184,8 +194,9 @@
         try:
             return store.node(fileid)
         except IndexError:
-            raise error.LookupError('%d' % fileid, identifier,
-                                    _('no match found'))
+            raise error.LookupError(
+                b'%d' % fileid, identifier, _(b'no match found')
+            )
 
     if len(fileid) == 20:
         try:
@@ -215,7 +226,8 @@
     except (ValueError, OverflowError):
         pass
 
-    raise error.LookupError(fileid, identifier, _('no match found'))
+    raise error.LookupError(fileid, identifier, _(b'no match found'))
+
 
 def resolvestripinfo(minlinkrev, tiprev, headrevs, linkrevfn, parentrevsfn):
     """Resolve information needed to strip revisions.
@@ -268,10 +280,21 @@
 
     return strippoint, brokenrevs
 
-def emitrevisions(store, nodes, nodesorder, resultcls, deltaparentfn=None,
-                  candeltafn=None, rawsizefn=None, revdifffn=None, flagsfn=None,
-                  deltamode=repository.CG_DELTAMODE_STD,
-                  revisiondata=False, assumehaveparentrevisions=False):
+
+def emitrevisions(
+    store,
+    nodes,
+    nodesorder,
+    resultcls,
+    deltaparentfn=None,
+    candeltafn=None,
+    rawsizefn=None,
+    revdifffn=None,
+    flagsfn=None,
+    deltamode=repository.CG_DELTAMODE_STD,
+    revisiondata=False,
+    assumehaveparentrevisions=False,
+):
     """Generic implementation of ifiledata.emitrevisions().
 
     Emitting revision data is subtly complex. This function attempts to
@@ -304,9 +327,9 @@
 
     ``rawsizefn`` (optional)
        Callable receiving a revision number and returning the length of the
-       ``store.revision(rev, raw=True)``.
+       ``store.rawdata(rev)``.
 
-       If not defined, ``len(store.revision(rev, raw=True))`` will be called.
+       If not defined, ``len(store.rawdata(rev))`` will be called.
 
     ``revdifffn`` (optional)
        Callable receiving a pair of revision numbers that returns a delta
@@ -338,12 +361,12 @@
     fnode = store.node
     frev = store.rev
 
-    if nodesorder == 'nodes':
+    if nodesorder == b'nodes':
         revs = [frev(n) for n in nodes]
-    elif nodesorder == 'linear':
+    elif nodesorder == b'linear':
         revs = set(frev(n) for n in nodes)
         revs = dagop.linearize(revs, store.parentrevs)
-    else: # storage and default
+    else:  # storage and default
         revs = sorted(frev(n) for n in nodes)
 
     prevrev = None
@@ -388,8 +411,7 @@
 
             # Base revision is a parent that hasn't been emitted already.
             # Use it if we can assume the receiver has the parent revision.
-            elif (assumehaveparentrevisions
-                  and deltaparentrev in (p1rev, p2rev)):
+            elif assumehaveparentrevisions and deltaparentrev in (p1rev, p2rev):
                 baserev = deltaparentrev
 
             # No guarantee the receiver has the delta parent. Send delta
@@ -422,7 +444,7 @@
         if revisiondata:
             if store.iscensored(baserev) or store.iscensored(rev):
                 try:
-                    revision = store.revision(node, raw=True)
+                    revision = store.rawdata(node)
                 except error.CensoredNodeError as e:
                     revision = e.tombstone
 
@@ -430,19 +452,20 @@
                     if rawsizefn:
                         baserevisionsize = rawsizefn(baserev)
                     else:
-                        baserevisionsize = len(store.revision(baserev,
-                                                              raw=True))
+                        baserevisionsize = len(store.rawdata(baserev))
 
-            elif (baserev == nullrev
-                    and deltamode != repository.CG_DELTAMODE_PREV):
-                revision = store.revision(node, raw=True)
+            elif (
+                baserev == nullrev and deltamode != repository.CG_DELTAMODE_PREV
+            ):
+                revision = store.rawdata(node)
                 available.add(rev)
             else:
                 if revdifffn:
                     delta = revdifffn(baserev, rev)
                 else:
-                    delta = mdiff.textdiff(store.revision(baserev, raw=True),
-                                           store.revision(rev, raw=True))
+                    delta = mdiff.textdiff(
+                        store.rawdata(baserev), store.rawdata(rev)
+                    )
 
                 available.add(rev)
 
@@ -454,10 +477,12 @@
             flags=flagsfn(rev) if flagsfn else 0,
             baserevisionsize=baserevisionsize,
             revision=revision,
-            delta=delta)
+            delta=delta,
+        )
 
         prevrev = rev
 
+
 def deltaiscensored(delta, baserev, baselenfn):
     """Determine if a delta represents censored revision data.
 
@@ -473,7 +498,7 @@
     # "\1\ncensored:". A delta producing such a censored revision must be a
     # full-replacement delta, so we inspect the first and only patch in the
     # delta for this prefix.
-    hlen = struct.calcsize(">lll")
+    hlen = struct.calcsize(b">lll")
     if len(delta) <= hlen:
         return False
 
@@ -482,6 +507,6 @@
     if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
         return False
 
-    add = "\1\ncensored:"
+    add = b"\1\ncensored:"
     addlen = len(add)
-    return newlen >= addlen and delta[hlen:hlen + addlen] == add
+    return newlen >= addlen and delta[hlen : hlen + addlen] == add
--- a/mercurial/utils/stringutil.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/utils/stringutil.py	Mon Oct 21 11:09:48 2019 -0400
@@ -30,6 +30,7 @@
 _regexescapemap = {ord(i): (b'\\' + i).decode('latin1') for i in _respecial}
 regexbytesescapemap = {i: (b'\\' + i) for i in _respecial}
 
+
 def reescape(pat):
     """Drop-in replacement for re.escape."""
     # NOTE: it is intentional that this works on unicodes and not
@@ -44,10 +45,12 @@
         return pat
     return pat.encode('latin1')
 
+
 def pprint(o, bprefix=False, indent=0, level=0):
     """Pretty print an object."""
     return b''.join(pprintgen(o, bprefix=bprefix, indent=indent, level=level))
 
+
 def pprintgen(o, bprefix=False, indent=0, level=0):
     """Pretty print an object to a generator of atoms.
 
@@ -63,153 +66,158 @@
 
     if isinstance(o, bytes):
         if bprefix:
-            yield "b'%s'" % escapestr(o)
+            yield b"b'%s'" % escapestr(o)
         else:
-            yield "'%s'" % escapestr(o)
+            yield b"'%s'" % escapestr(o)
     elif isinstance(o, bytearray):
         # codecs.escape_encode() can't handle bytearray, so escapestr fails
         # without coercion.
-        yield "bytearray['%s']" % escapestr(bytes(o))
+        yield b"bytearray['%s']" % escapestr(bytes(o))
     elif isinstance(o, list):
         if not o:
-            yield '[]'
+            yield b'[]'
             return
 
-        yield '['
+        yield b'['
 
         if indent:
             level += 1
-            yield '\n'
-            yield ' ' * (level * indent)
+            yield b'\n'
+            yield b' ' * (level * indent)
 
         for i, a in enumerate(o):
-            for chunk in pprintgen(a, bprefix=bprefix, indent=indent,
-                                   level=level):
+            for chunk in pprintgen(
+                a, bprefix=bprefix, indent=indent, level=level
+            ):
                 yield chunk
 
             if i + 1 < len(o):
                 if indent:
-                    yield ',\n'
-                    yield ' ' * (level * indent)
+                    yield b',\n'
+                    yield b' ' * (level * indent)
                 else:
-                    yield ', '
+                    yield b', '
 
         if indent:
             level -= 1
-            yield '\n'
-            yield ' ' * (level * indent)
+            yield b'\n'
+            yield b' ' * (level * indent)
 
-        yield ']'
+        yield b']'
     elif isinstance(o, dict):
         if not o:
-            yield '{}'
+            yield b'{}'
             return
 
-        yield '{'
+        yield b'{'
 
         if indent:
             level += 1
-            yield '\n'
-            yield ' ' * (level * indent)
+            yield b'\n'
+            yield b' ' * (level * indent)
 
         for i, (k, v) in enumerate(sorted(o.items())):
-            for chunk in pprintgen(k, bprefix=bprefix, indent=indent,
-                                   level=level):
+            for chunk in pprintgen(
+                k, bprefix=bprefix, indent=indent, level=level
+            ):
                 yield chunk
 
-            yield ': '
+            yield b': '
 
-            for chunk in pprintgen(v, bprefix=bprefix, indent=indent,
-                                   level=level):
+            for chunk in pprintgen(
+                v, bprefix=bprefix, indent=indent, level=level
+            ):
                 yield chunk
 
             if i + 1 < len(o):
                 if indent:
-                    yield ',\n'
-                    yield ' ' * (level * indent)
+                    yield b',\n'
+                    yield b' ' * (level * indent)
                 else:
-                    yield ', '
+                    yield b', '
 
         if indent:
             level -= 1
-            yield '\n'
-            yield ' ' * (level * indent)
+            yield b'\n'
+            yield b' ' * (level * indent)
 
-        yield '}'
+        yield b'}'
     elif isinstance(o, set):
         if not o:
-            yield 'set([])'
+            yield b'set([])'
             return
 
-        yield 'set(['
+        yield b'set(['
 
         if indent:
             level += 1
-            yield '\n'
-            yield ' ' * (level * indent)
+            yield b'\n'
+            yield b' ' * (level * indent)
 
         for i, k in enumerate(sorted(o)):
-            for chunk in pprintgen(k, bprefix=bprefix, indent=indent,
-                                   level=level):
+            for chunk in pprintgen(
+                k, bprefix=bprefix, indent=indent, level=level
+            ):
                 yield chunk
 
             if i + 1 < len(o):
                 if indent:
-                    yield ',\n'
-                    yield ' ' * (level * indent)
+                    yield b',\n'
+                    yield b' ' * (level * indent)
                 else:
-                    yield ', '
+                    yield b', '
 
         if indent:
             level -= 1
-            yield '\n'
-            yield ' ' * (level * indent)
+            yield b'\n'
+            yield b' ' * (level * indent)
 
-        yield '])'
+        yield b'])'
     elif isinstance(o, tuple):
         if not o:
-            yield '()'
+            yield b'()'
             return
 
-        yield '('
+        yield b'('
 
         if indent:
             level += 1
-            yield '\n'
-            yield ' ' * (level * indent)
+            yield b'\n'
+            yield b' ' * (level * indent)
 
         for i, a in enumerate(o):
-            for chunk in pprintgen(a, bprefix=bprefix, indent=indent,
-                                   level=level):
+            for chunk in pprintgen(
+                a, bprefix=bprefix, indent=indent, level=level
+            ):
                 yield chunk
 
             if i + 1 < len(o):
                 if indent:
-                    yield ',\n'
-                    yield ' ' * (level * indent)
+                    yield b',\n'
+                    yield b' ' * (level * indent)
                 else:
-                    yield ', '
+                    yield b', '
 
         if indent:
             level -= 1
-            yield '\n'
-            yield ' ' * (level * indent)
+            yield b'\n'
+            yield b' ' * (level * indent)
 
-        yield ')'
+        yield b')'
     elif isinstance(o, types.GeneratorType):
         # Special case of empty generator.
         try:
             nextitem = next(o)
         except StopIteration:
-            yield 'gen[]'
+            yield b'gen[]'
             return
 
-        yield 'gen['
+        yield b'gen['
 
         if indent:
             level += 1
-            yield '\n'
-            yield ' ' * (level * indent)
+            yield b'\n'
+            yield b' ' * (level * indent)
 
         last = False
 
@@ -221,26 +229,28 @@
             except StopIteration:
                 last = True
 
-            for chunk in pprintgen(current, bprefix=bprefix, indent=indent,
-                                   level=level):
+            for chunk in pprintgen(
+                current, bprefix=bprefix, indent=indent, level=level
+            ):
                 yield chunk
 
             if not last:
                 if indent:
-                    yield ',\n'
-                    yield ' ' * (level * indent)
+                    yield b',\n'
+                    yield b' ' * (level * indent)
                 else:
-                    yield ', '
+                    yield b', '
 
         if indent:
             level -= 1
-            yield '\n'
-            yield ' ' * (level * indent)
+            yield b'\n'
+            yield b' ' * (level * indent)
 
-        yield ']'
+        yield b']'
     else:
         yield pycompat.byterepr(o)
 
+
 def prettyrepr(o):
     """Pretty print a representation of a possibly-nested object"""
     lines = []
@@ -251,21 +261,22 @@
         #      ~~~~~~~~~~~~~~~~
         #      p0    p1        q0    q1
         q0 = -1
-        q1 = rs.find('<', p1 + 1)
+        q1 = rs.find(b'<', p1 + 1)
         if q1 < 0:
             q1 = len(rs)
-        elif q1 > p1 + 1 and rs.startswith('=', q1 - 1):
+        elif q1 > p1 + 1 and rs.startswith(b'=', q1 - 1):
             # backtrack for ' field=<'
-            q0 = rs.rfind(' ', p1 + 1, q1 - 1)
+            q0 = rs.rfind(b' ', p1 + 1, q1 - 1)
         if q0 < 0:
             q0 = q1
         else:
             q0 += 1  # skip ' '
-        l = rs.count('<', 0, p0) - rs.count('>', 0, p0)
+        l = rs.count(b'<', 0, p0) - rs.count(b'>', 0, p0)
         assert l >= 0
         lines.append((l, rs[p0:q0].rstrip()))
         p0, p1 = q0, q1
-    return '\n'.join('  ' * l + s for l, s in lines)
+    return b'\n'.join(b'  ' * l + s for l, s in lines)
+
 
 def buildrepr(r):
     """Format an optional printable representation from unexpanded bits
@@ -280,7 +291,7 @@
     ========  =================================
     """
     if r is None:
-        return ''
+        return b''
     elif isinstance(r, tuple):
         return r[0] % pycompat.rapply(pycompat.maybebytestr, r[1:])
     elif isinstance(r, bytes):
@@ -290,9 +301,11 @@
     else:
         return pprint(r)
 
+
 def binary(s):
     """return true if a string is binary data"""
-    return bool(s and '\0' in s)
+    return bool(s and b'\0' in s)
+
 
 def stringmatcher(pattern, casesensitive=True):
     """
@@ -332,7 +345,7 @@
     >>> itest(b'ABCDEFG', b'abc', b'def', b'abcdefg')
     ('literal', 'ABCDEFG', [False, False, True])
     """
-    if pattern.startswith('re:'):
+    if pattern.startswith(b're:'):
         pattern = pattern[3:]
         try:
             flags = 0
@@ -340,10 +353,9 @@
                 flags = remod.I
             regex = remod.compile(pattern, flags)
         except remod.error as e:
-            raise error.ParseError(_('invalid regular expression: %s')
-                                   % e)
-        return 're', pattern, regex.search
-    elif pattern.startswith('literal:'):
+            raise error.ParseError(_(b'invalid regular expression: %s') % e)
+        return b're', pattern, regex.search
+    elif pattern.startswith(b'literal:'):
         pattern = pattern[8:]
 
     match = pattern.__eq__
@@ -351,40 +363,44 @@
     if not casesensitive:
         ipat = encoding.lower(pattern)
         match = lambda s: ipat == encoding.lower(s)
-    return 'literal', pattern, match
+    return b'literal', pattern, match
+
 
 def shortuser(user):
     """Return a short representation of a user name or email address."""
-    f = user.find('@')
+    f = user.find(b'@')
     if f >= 0:
         user = user[:f]
-    f = user.find('<')
+    f = user.find(b'<')
     if f >= 0:
-        user = user[f + 1:]
-    f = user.find(' ')
+        user = user[f + 1 :]
+    f = user.find(b' ')
     if f >= 0:
         user = user[:f]
-    f = user.find('.')
+    f = user.find(b'.')
     if f >= 0:
         user = user[:f]
     return user
 
+
 def emailuser(user):
     """Return the user portion of an email address."""
-    f = user.find('@')
+    f = user.find(b'@')
     if f >= 0:
         user = user[:f]
-    f = user.find('<')
+    f = user.find(b'<')
     if f >= 0:
-        user = user[f + 1:]
+        user = user[f + 1 :]
     return user
 
+
 def email(author):
     '''get email of author.'''
-    r = author.find('>')
+    r = author.find(b'>')
     if r == -1:
         r = None
-    return author[author.find('<') + 1:r]
+    return author[author.find(b'<') + 1 : r]
+
 
 def person(author):
     """Returns the name before an email address,
@@ -405,21 +421,24 @@
     >>> person(b'"Foo Bar <foo@bar>')
     'Foo Bar'
     """
-    if '@' not in author:
+    if b'@' not in author:
         return author
-    f = author.find('<')
+    f = author.find(b'<')
     if f != -1:
-        return author[:f].strip(' "').replace('\\"', '"')
-    f = author.find('@')
-    return author[:f].replace('.', ' ')
+        return author[:f].strip(b' "').replace(b'\\"', b'"')
+    f = author.find(b'@')
+    return author[:f].replace(b'.', b' ')
+
 
 @attr.s(hash=True)
 class mailmapping(object):
     '''Represents a username/email key or value in
     a mailmap file'''
+
     email = attr.ib()
     name = attr.ib(default=None)
 
+
 def _ismailmaplineinvalid(names, emails):
     '''Returns True if the parsed names and emails
     in a mailmap entry are invalid.
@@ -444,6 +463,7 @@
     '''
     return not emails or not names and len(emails) < 2
 
+
 def parsemailmap(mailmapcontent):
     """Parses data in the .mailmap format
 
@@ -477,7 +497,7 @@
 
         # Don't bother checking the line if it is a comment or
         # is an improperly formed author field
-        if line.lstrip().startswith('#'):
+        if line.lstrip().startswith(b'#'):
             continue
 
         # names, emails hold the parsed emails and names for each line
@@ -486,17 +506,17 @@
         namebuilder = []
 
         for element in line.split():
-            if element.startswith('#'):
+            if element.startswith(b'#'):
                 # If we reach a comment in the mailmap file, move on
                 break
 
-            elif element.startswith('<') and element.endswith('>'):
+            elif element.startswith(b'<') and element.endswith(b'>'):
                 # We have found an email.
                 # Parse it, and finalize any names from earlier
                 emails.append(element[1:-1])  # Slice off the "<>"
 
                 if namebuilder:
-                    names.append(' '.join(namebuilder))
+                    names.append(b' '.join(namebuilder))
                     namebuilder = []
 
                 # Break if we have found a second email, any other
@@ -515,17 +535,16 @@
             continue
 
         mailmapkey = mailmapping(
-            email=emails[-1],
-            name=names[-1] if len(names) == 2 else None,
+            email=emails[-1], name=names[-1] if len(names) == 2 else None,
         )
 
         mailmap[mailmapkey] = mailmapping(
-            email=emails[0],
-            name=names[0] if names else None,
+            email=emails[0], name=names[0] if names else None,
         )
 
     return mailmap
 
+
 def mapname(mailmap, author):
     """Returns the author field according to the mailmap cache, or
     the original author field.
@@ -568,13 +587,15 @@
         proper = mailmap.get(commit2, mailmapping(None, None))
 
     # Return the author field with proper values filled in
-    return '%s <%s>' % (
+    return b'%s <%s>' % (
         proper.name if proper.name else commit.name,
         proper.email if proper.email else commit.email,
     )
 
+
 _correctauthorformat = remod.compile(br'^[^<]+\s\<[^<>]+@[^<>]+\>$')
 
+
 def isauthorwellformed(author):
     '''Return True if the author field is well formed
     (ie "Contributor Name <contrib@email.dom>")
@@ -596,9 +617,11 @@
     '''
     return _correctauthorformat.match(author) is not None
 
+
 def ellipsis(text, maxlength=400):
     """Trim string to at most maxlength (default: 400) columns in display."""
-    return encoding.trim(text, maxlength, ellipsis='...')
+    return encoding.trim(text, maxlength, ellipsis=b'...')
+
 
 def escapestr(s):
     if isinstance(s, memoryview):
@@ -607,9 +630,11 @@
     # Python 3 compatibility
     return codecs.escape_encode(s)[0]
 
+
 def unescapestr(s):
     return codecs.escape_decode(s)[0]
 
+
 def forcebytestr(obj):
     """Portably format an arbitrary object (e.g. exception) into a byte
     string."""
@@ -619,10 +644,12 @@
         # non-ascii string, may be lossy
         return pycompat.bytestr(encoding.strtolocal(str(obj)))
 
+
 def uirepr(s):
     # Avoid double backslash in Windows path repr()
     return pycompat.byterepr(pycompat.bytestr(s)).replace(b'\\\\', b'\\')
 
+
 # delay import of textwrap
 def _MBTextWrapper(**kwargs):
     class tw(textwrap.TextWrapper):
@@ -640,6 +667,7 @@
 
         This requires use decision to determine width of such characters.
         """
+
         def _cutdown(self, ucstr, space_left):
             l = 0
             colwidth = encoding.ucolwidth
@@ -647,7 +675,7 @@
                 l += colwidth(ucstr[i])
                 if space_left < l:
                     return (ucstr[:i], ucstr[i:])
-            return ucstr, ''
+            return ucstr, b''
 
         # overriding of base class
         def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
@@ -667,7 +695,7 @@
 
             lines = []
             if self.width <= 0:
-                raise ValueError("invalid width %r (must be > 0)" % self.width)
+                raise ValueError(b"invalid width %r (must be > 0)" % self.width)
 
             # Arrange in reverse order so items can be efficiently popped
             # from a stack of chucks.
@@ -712,8 +740,11 @@
                     self._handle_long_word(chunks, cur_line, cur_len, width)
 
                 # If the last chunk on this line is all whitespace, drop it.
-                if (self.drop_whitespace and
-                    cur_line and cur_line[-1].strip() == r''):
+                if (
+                    self.drop_whitespace
+                    and cur_line
+                    and cur_line[-1].strip() == r''
+                ):
                     del cur_line[-1]
 
                 # Convert current line back to a string and store it in list
@@ -727,25 +758,43 @@
     _MBTextWrapper = tw
     return tw(**kwargs)
 
-def wrap(line, width, initindent='', hangindent=''):
+
+def wrap(line, width, initindent=b'', hangindent=b''):
     maxindent = max(len(hangindent), len(initindent))
     if width <= maxindent:
         # adjust for weird terminal size
         width = max(78, maxindent + 1)
-    line = line.decode(pycompat.sysstr(encoding.encoding),
-                       pycompat.sysstr(encoding.encodingmode))
-    initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
-                                   pycompat.sysstr(encoding.encodingmode))
-    hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
-                                   pycompat.sysstr(encoding.encodingmode))
-    wrapper = _MBTextWrapper(width=width,
-                             initial_indent=initindent,
-                             subsequent_indent=hangindent)
+    line = line.decode(
+        pycompat.sysstr(encoding.encoding),
+        pycompat.sysstr(encoding.encodingmode),
+    )
+    initindent = initindent.decode(
+        pycompat.sysstr(encoding.encoding),
+        pycompat.sysstr(encoding.encodingmode),
+    )
+    hangindent = hangindent.decode(
+        pycompat.sysstr(encoding.encoding),
+        pycompat.sysstr(encoding.encodingmode),
+    )
+    wrapper = _MBTextWrapper(
+        width=width, initial_indent=initindent, subsequent_indent=hangindent
+    )
     return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
 
-_booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
-             '0': False, 'no': False, 'false': False, 'off': False,
-             'never': False}
+
+_booleans = {
+    b'1': True,
+    b'yes': True,
+    b'true': True,
+    b'on': True,
+    b'always': True,
+    b'0': False,
+    b'no': False,
+    b'false': False,
+    b'off': False,
+    b'never': False,
+}
+
 
 def parsebool(s):
     """Parse s into a boolean.
@@ -754,6 +803,7 @@
     """
     return _booleans.get(s.lower(), None)
 
+
 def evalpythonliteral(s):
     """Evaluate a string containing a Python literal expression"""
     # We could backport our tokenizer hack to rewrite '' to u'' if we want
--- a/mercurial/verify.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/verify.py	Mon Oct 21 11:09:48 2019 -0400
@@ -25,18 +25,21 @@
 VERIFY_DEFAULT = 0
 VERIFY_FULL = 1
 
+
 def verify(repo, level=None):
     with repo.lock():
         v = verifier(repo, level)
         return v.verify()
 
+
 def _normpath(f):
     # under hg < 2.4, convert didn't sanitize paths properly, so a
     # converted repo may contain repeated slashes
-    while '//' in f:
-        f = f.replace('//', '/')
+    while b'//' in f:
+        f = f.replace(b'//', b'/')
     return f
 
+
 class verifier(object):
     def __init__(self, repo, level=None):
         self.repo = repo.unfiltered()
@@ -55,25 +58,25 @@
         self.refersmf = False
         self.fncachewarned = False
         # developer config: verify.skipflags
-        self.skipflags = repo.ui.configint('verify', 'skipflags')
+        self.skipflags = repo.ui.configint(b'verify', b'skipflags')
         self.warnorphanstorefiles = True
 
     def _warn(self, msg):
         """record a "warning" level issue"""
-        self.ui.warn(msg + "\n")
+        self.ui.warn(msg + b"\n")
         self.warnings += 1
 
     def _err(self, linkrev, msg, filename=None):
         """record a "error" level issue"""
         if linkrev is not None:
             self.badrevs.add(linkrev)
-            linkrev = "%d" % linkrev
+            linkrev = b"%d" % linkrev
         else:
-            linkrev = '?'
-        msg = "%s: %s" % (linkrev, msg)
+            linkrev = b'?'
+        msg = b"%s: %s" % (linkrev, msg)
         if filename:
-            msg = "%s@%s" % (filename, msg)
-        self.ui.warn(" " + msg + "\n")
+            msg = b"%s@%s" % (filename, msg)
+        self.ui.warn(b" " + msg + b"\n")
         self.errors += 1
 
     def _exc(self, linkrev, msg, inst, filename=None):
@@ -81,7 +84,7 @@
         fmsg = pycompat.bytestr(inst)
         if not fmsg:
             fmsg = pycompat.byterepr(inst)
-        self._err(linkrev, "%s: %s" % (msg, fmsg), filename)
+        self._err(linkrev, b"%s: %s" % (msg, fmsg), filename)
 
     def _checkrevlog(self, obj, name, linkrev):
         """verify high level property of a revlog
@@ -92,20 +95,20 @@
         - revlog's format version is correct.
         """
         if not len(obj) and (self.havecl or self.havemf):
-            self._err(linkrev, _("empty or missing %s") % name)
+            self._err(linkrev, _(b"empty or missing %s") % name)
             return
 
         d = obj.checksize()
         if d[0]:
-            self._err(None, _("data length off by %d bytes") % d[0], name)
+            self._err(None, _(b"data length off by %d bytes") % d[0], name)
         if d[1]:
-            self._err(None, _("index contains %d extra bytes") % d[1], name)
+            self._err(None, _(b"index contains %d extra bytes") % d[1], name)
 
         if obj.version != revlog.REVLOGV0:
             if not self.revlogv1:
-                self._warn(_("warning: `%s' uses revlog format 1") % name)
+                self._warn(_(b"warning: `%s' uses revlog format 1") % name)
         elif self.revlogv1:
-            self._warn(_("warning: `%s' uses revlog format 0") % name)
+            self._warn(_(b"warning: `%s' uses revlog format 0") % name)
 
     def _checkentry(self, obj, i, node, seen, linkrevs, f):
         """verify a single revlog entry
@@ -130,35 +133,46 @@
         lr = obj.linkrev(obj.rev(node))
         if lr < 0 or (self.havecl and lr not in linkrevs):
             if lr < 0 or lr >= len(self.repo.changelog):
-                msg = _("rev %d points to nonexistent changeset %d")
+                msg = _(b"rev %d points to nonexistent changeset %d")
             else:
-                msg = _("rev %d points to unexpected changeset %d")
+                msg = _(b"rev %d points to unexpected changeset %d")
             self._err(None, msg % (i, lr), f)
             if linkrevs:
                 if f and len(linkrevs) > 1:
                     try:
                         # attempt to filter down to real linkrevs
-                        linkrevs = [l for l in linkrevs
-                                    if self.lrugetctx(l)[f].filenode() == node]
+                        linkrevs = [
+                            l
+                            for l in linkrevs
+                            if self.lrugetctx(l)[f].filenode() == node
+                        ]
                     except Exception:
                         pass
-                self._warn(_(" (expected %s)") % " ".join
-                           (map(pycompat.bytestr, linkrevs)))
-            lr = None # can't be trusted
+                self._warn(
+                    _(b" (expected %s)")
+                    % b" ".join(map(pycompat.bytestr, linkrevs))
+                )
+            lr = None  # can't be trusted
 
         try:
             p1, p2 = obj.parents(node)
             if p1 not in seen and p1 != nullid:
-                self._err(lr, _("unknown parent 1 %s of %s") %
-                    (short(p1), short(node)), f)
+                self._err(
+                    lr,
+                    _(b"unknown parent 1 %s of %s") % (short(p1), short(node)),
+                    f,
+                )
             if p2 not in seen and p2 != nullid:
-                self._err(lr, _("unknown parent 2 %s of %s") %
-                    (short(p2), short(node)), f)
+                self._err(
+                    lr,
+                    _(b"unknown parent 2 %s of %s") % (short(p2), short(node)),
+                    f,
+                )
         except Exception as inst:
-            self._exc(lr, _("checking parents of %s") % short(node), inst, f)
+            self._exc(lr, _(b"checking parents of %s") % short(node), inst, f)
 
         if node in seen:
-            self._err(lr, _("duplicate revision %d (%d)") % (i, seen[node]), f)
+            self._err(lr, _(b"duplicate revision %d (%d)") % (i, seen[node]), f)
         seen[node] = i
         return lr
 
@@ -171,15 +185,17 @@
         # initial validation and generic report
         repo = self.repo
         ui = repo.ui
-        if not repo.url().startswith('file:'):
-            raise error.Abort(_("cannot verify bundle or remote repos"))
+        if not repo.url().startswith(b'file:'):
+            raise error.Abort(_(b"cannot verify bundle or remote repos"))
 
-        if os.path.exists(repo.sjoin("journal")):
-            ui.warn(_("abandoned transaction found - run hg recover\n"))
+        if os.path.exists(repo.sjoin(b"journal")):
+            ui.warn(_(b"abandoned transaction found - run hg recover\n"))
 
         if ui.verbose or not self.revlogv1:
-            ui.status(_("repository uses revlog format %d\n") %
-                           (self.revlogv1 and 1 or 0))
+            ui.status(
+                _(b"repository uses revlog format %d\n")
+                % (self.revlogv1 and 1 or 0)
+            )
 
         # data verification
         mflinkrevs, filelinkrevs = self._verifychangelog()
@@ -189,18 +205,26 @@
         totalfiles, filerevisions = self._verifyfiles(filenodes, filelinkrevs)
 
         # final report
-        ui.status(_("checked %d changesets with %d changes to %d files\n") %
-                       (len(repo.changelog), filerevisions, totalfiles))
+        ui.status(
+            _(b"checked %d changesets with %d changes to %d files\n")
+            % (len(repo.changelog), filerevisions, totalfiles)
+        )
         if self.warnings:
-            ui.warn(_("%d warnings encountered!\n") % self.warnings)
+            ui.warn(_(b"%d warnings encountered!\n") % self.warnings)
         if self.fncachewarned:
-            ui.warn(_('hint: run "hg debugrebuildfncache" to recover from '
-                      'corrupt fncache\n'))
+            ui.warn(
+                _(
+                    b'hint: run "hg debugrebuildfncache" to recover from '
+                    b'corrupt fncache\n'
+                )
+            )
         if self.errors:
-            ui.warn(_("%d integrity errors encountered!\n") % self.errors)
+            ui.warn(_(b"%d integrity errors encountered!\n") % self.errors)
             if self.badrevs:
-                ui.warn(_("(first damaged changeset appears to be %d)\n")
-                        % min(self.badrevs))
+                ui.warn(
+                    _(b"(first damaged changeset appears to be %d)\n")
+                    % min(self.badrevs)
+                )
             return 1
         return 0
 
@@ -225,17 +249,18 @@
         match = self.match
         cl = repo.changelog
 
-        ui.status(_("checking changesets\n"))
+        ui.status(_(b"checking changesets\n"))
         mflinkrevs = {}
         filelinkrevs = {}
         seen = {}
-        self._checkrevlog(cl, "changelog", 0)
-        progress = ui.makeprogress(_('checking'), unit=_('changesets'),
-                                   total=len(repo))
+        self._checkrevlog(cl, b"changelog", 0)
+        progress = ui.makeprogress(
+            _(b'checking'), unit=_(b'changesets'), total=len(repo)
+        )
         for i in repo:
             progress.update(i)
             n = cl.node(i)
-            self._checkentry(cl, i, n, seen, [i], "changelog")
+            self._checkentry(cl, i, n, seen, [i], b"changelog")
 
             try:
                 changes = cl.read(n)
@@ -247,12 +272,13 @@
                         filelinkrevs.setdefault(_normpath(f), []).append(i)
             except Exception as inst:
                 self.refersmf = True
-                self._exc(i, _("unpacking changeset %s") % short(n), inst)
+                self._exc(i, _(b"unpacking changeset %s") % short(n), inst)
         progress.complete()
         return mflinkrevs, filelinkrevs
 
-    def _verifymanifest(self, mflinkrevs, dir="", storefiles=None,
-                        subdirprogress=None):
+    def _verifymanifest(
+        self, mflinkrevs, dir=b"", storefiles=None, subdirprogress=None
+    ):
         """verify the manifestlog content
 
         Inputs:
@@ -287,24 +313,25 @@
         mf = mfl.getstorage(dir)
 
         if not dir:
-            self.ui.status(_("checking manifests\n"))
+            self.ui.status(_(b"checking manifests\n"))
 
         filenodes = {}
         subdirnodes = {}
         seen = {}
-        label = "manifest"
+        label = b"manifest"
         if dir:
             label = dir
             revlogfiles = mf.files()
             storefiles.difference_update(revlogfiles)
-            if subdirprogress: # should be true since we're in a subdirectory
+            if subdirprogress:  # should be true since we're in a subdirectory
                 subdirprogress.increment()
         if self.refersmf:
             # Do not check manifest if there are only changelog entries with
             # null manifests.
             self._checkrevlog(mf, label, 0)
-        progress = ui.makeprogress(_('checking'), unit=_('manifests'),
-                                   total=len(mf))
+        progress = ui.makeprogress(
+            _(b'checking'), unit=_(b'manifests'), total=len(mf)
+        )
         for i in mf:
             if not dir:
                 progress.update(i)
@@ -313,30 +340,34 @@
             if n in mflinkrevs:
                 del mflinkrevs[n]
             elif dir:
-                self._err(lr, _("%s not in parent-directory manifest") %
-                         short(n), label)
+                self._err(
+                    lr,
+                    _(b"%s not in parent-directory manifest") % short(n),
+                    label,
+                )
             else:
-                self._err(lr, _("%s not in changesets") % short(n), label)
+                self._err(lr, _(b"%s not in changesets") % short(n), label)
 
             try:
                 mfdelta = mfl.get(dir, n).readdelta(shallow=True)
                 for f, fn, fl in mfdelta.iterentries():
                     if not f:
-                        self._err(lr, _("entry without name in manifest"))
-                    elif f == "/dev/null":  # ignore this in very old repos
+                        self._err(lr, _(b"entry without name in manifest"))
+                    elif f == b"/dev/null":  # ignore this in very old repos
                         continue
                     fullpath = dir + _normpath(f)
-                    if fl == 't':
+                    if fl == b't':
                         if not match.visitdir(fullpath):
                             continue
-                        subdirnodes.setdefault(fullpath + '/', {}).setdefault(
-                            fn, []).append(lr)
+                        subdirnodes.setdefault(fullpath + b'/', {}).setdefault(
+                            fn, []
+                        ).append(lr)
                     else:
                         if not match(fullpath):
                             continue
                         filenodes.setdefault(fullpath, {}).setdefault(fn, lr)
             except Exception as inst:
-                self._exc(lr, _("reading delta %s") % short(n), inst, label)
+                self._exc(lr, _(b"reading delta %s") % short(n), inst, label)
             if self._level >= VERIFY_FULL:
                 try:
                     # Various issues can affect manifest. So we read each full
@@ -344,8 +375,12 @@
                     # code (eg: hash verification, filename are ordered, etc.)
                     mfdelta = mfl.get(dir, n).read()
                 except Exception as inst:
-                    self._exc(lr, _("reading full manifest %s") % short(n),
-                              inst, label)
+                    self._exc(
+                        lr,
+                        _(b"reading full manifest %s") % short(n),
+                        inst,
+                        label,
+                    )
 
         if not dir:
             progress.complete()
@@ -356,54 +391,68 @@
             changesetpairs = [(c, m) for m in mflinkrevs for c in mflinkrevs[m]]
             for c, m in sorted(changesetpairs):
                 if dir:
-                    self._err(c, _("parent-directory manifest refers to unknown"
-                                   " revision %s") % short(m), label)
+                    self._err(
+                        c,
+                        _(
+                            b"parent-directory manifest refers to unknown"
+                            b" revision %s"
+                        )
+                        % short(m),
+                        label,
+                    )
                 else:
-                    self._err(c, _("changeset refers to unknown revision %s") %
-                              short(m), label)
+                    self._err(
+                        c,
+                        _(b"changeset refers to unknown revision %s")
+                        % short(m),
+                        label,
+                    )
 
         if not dir and subdirnodes:
-            self.ui.status(_("checking directory manifests\n"))
+            self.ui.status(_(b"checking directory manifests\n"))
             storefiles = set()
             subdirs = set()
             revlogv1 = self.revlogv1
             for f, f2, size in repo.store.datafiles():
                 if not f:
-                    self._err(None, _("cannot decode filename '%s'") % f2)
-                elif (size > 0 or not revlogv1) and f.startswith('meta/'):
+                    self._err(None, _(b"cannot decode filename '%s'") % f2)
+                elif (size > 0 or not revlogv1) and f.startswith(b'meta/'):
                     storefiles.add(_normpath(f))
                     subdirs.add(os.path.dirname(f))
-            subdirprogress = ui.makeprogress(_('checking'), unit=_('manifests'),
-                                             total=len(subdirs))
+            subdirprogress = ui.makeprogress(
+                _(b'checking'), unit=_(b'manifests'), total=len(subdirs)
+            )
 
-        for subdir, linkrevs in subdirnodes.iteritems():
-            subdirfilenodes = self._verifymanifest(linkrevs, subdir, storefiles,
-                                                   subdirprogress)
-            for f, onefilenodes in subdirfilenodes.iteritems():
+        for subdir, linkrevs in pycompat.iteritems(subdirnodes):
+            subdirfilenodes = self._verifymanifest(
+                linkrevs, subdir, storefiles, subdirprogress
+            )
+            for f, onefilenodes in pycompat.iteritems(subdirfilenodes):
                 filenodes.setdefault(f, {}).update(onefilenodes)
 
         if not dir and subdirnodes:
             subdirprogress.complete()
             if self.warnorphanstorefiles:
                 for f in sorted(storefiles):
-                    self._warn(_("warning: orphan data file '%s'") % f)
+                    self._warn(_(b"warning: orphan data file '%s'") % f)
 
         return filenodes
 
     def _crosscheckfiles(self, filelinkrevs, filenodes):
         repo = self.repo
         ui = self.ui
-        ui.status(_("crosschecking files in changesets and manifests\n"))
+        ui.status(_(b"crosschecking files in changesets and manifests\n"))
 
         total = len(filelinkrevs) + len(filenodes)
-        progress = ui.makeprogress(_('crosschecking'), unit=_('files'),
-                                   total=total)
+        progress = ui.makeprogress(
+            _(b'crosschecking'), unit=_(b'files'), total=total
+        )
         if self.havemf:
             for f in sorted(filelinkrevs):
                 progress.increment()
                 if f not in filenodes:
                     lr = filelinkrevs[f][0]
-                    self._err(lr, _("in changeset but not in manifest"), f)
+                    self._err(lr, _(b"in changeset but not in manifest"), f)
 
         if self.havecl:
             for f in sorted(filenodes):
@@ -414,7 +463,7 @@
                         lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
                     except Exception:
                         lr = None
-                    self._err(lr, _("in manifest but not in changeset"), f)
+                    self._err(lr, _(b"in manifest but not in changeset"), f)
 
         progress.complete()
 
@@ -424,27 +473,28 @@
         lrugetctx = self.lrugetctx
         revlogv1 = self.revlogv1
         havemf = self.havemf
-        ui.status(_("checking files\n"))
+        ui.status(_(b"checking files\n"))
 
         storefiles = set()
         for f, f2, size in repo.store.datafiles():
             if not f:
-                self._err(None, _("cannot decode filename '%s'") % f2)
-            elif (size > 0 or not revlogv1) and f.startswith('data/'):
+                self._err(None, _(b"cannot decode filename '%s'") % f2)
+            elif (size > 0 or not revlogv1) and f.startswith(b'data/'):
                 storefiles.add(_normpath(f))
 
         state = {
             # TODO this assumes revlog storage for changelog.
-            'expectedversion': self.repo.changelog.version & 0xFFFF,
-            'skipflags': self.skipflags,
+            b'expectedversion': self.repo.changelog.version & 0xFFFF,
+            b'skipflags': self.skipflags,
             # experimental config: censor.policy
-            'erroroncensored': ui.config('censor', 'policy') == 'abort',
+            b'erroroncensored': ui.config(b'censor', b'policy') == b'abort',
         }
 
         files = sorted(set(filenodes) | set(filelinkrevs))
         revisions = 0
-        progress = ui.makeprogress(_('checking'), unit=_('files'),
-                                   total=len(files))
+        progress = ui.makeprogress(
+            _(b'checking'), unit=_(b'files'), total=len(files)
+        )
         for i, f in enumerate(files):
             progress.update(i, item=f)
             try:
@@ -461,7 +511,7 @@
             try:
                 fl = repo.file(f)
             except error.StorageError as e:
-                self._err(lr, _("broken revlog! (%s)") % e, f)
+                self._err(lr, _(b"broken revlog! (%s)") % e, f)
                 continue
 
             for ff in fl.files():
@@ -469,15 +519,16 @@
                     storefiles.remove(ff)
                 except KeyError:
                     if self.warnorphanstorefiles:
-                        self._warn(_(" warning: revlog '%s' not in fncache!") %
-                                  ff)
+                        self._warn(
+                            _(b" warning: revlog '%s' not in fncache!") % ff
+                        )
                         self.fncachewarned = True
 
             if not len(fl) and (self.havecl or self.havemf):
-                self._err(lr, _("empty or missing %s") % f)
+                self._err(lr, _(b"empty or missing %s") % f)
             else:
                 # Guard against implementations not setting this.
-                state['skipread'] = set()
+                state[b'skipread'] = set()
                 for problem in fl.verifyintegrity(state):
                     if problem.node is not None:
                         linkrev = fl.linkrev(fl.rev(problem.node))
@@ -487,12 +538,16 @@
                     if problem.warning:
                         self._warn(problem.warning)
                     elif problem.error:
-                        self._err(linkrev if linkrev is not None else lr,
-                                  problem.error, f)
+                        self._err(
+                            linkrev if linkrev is not None else lr,
+                            problem.error,
+                            f,
+                        )
                     else:
                         raise error.ProgrammingError(
-                            'problem instance does not set warning or error '
-                            'attribute: %s' % problem.msg)
+                            b'problem instance does not set warning or error '
+                            b'attribute: %s' % problem.msg
+                        )
 
             seen = {}
             for i in fl:
@@ -501,11 +556,11 @@
                 lr = self._checkentry(fl, i, n, seen, linkrevs, f)
                 if f in filenodes:
                     if havemf and n not in filenodes[f]:
-                        self._err(lr, _("%s not in manifests") % (short(n)), f)
+                        self._err(lr, _(b"%s not in manifests") % (short(n)), f)
                     else:
                         del filenodes[f][n]
 
-                if n in state['skipread']:
+                if n in state[b'skipread']:
                     continue
 
                 # check renames
@@ -518,35 +573,53 @@
                         if lr is not None and ui.verbose:
                             ctx = lrugetctx(lr)
                             if not any(rp[0] in pctx for pctx in ctx.parents()):
-                                self._warn(_("warning: copy source of '%s' not"
-                                            " in parents of %s") % (f, ctx))
+                                self._warn(
+                                    _(
+                                        b"warning: copy source of '%s' not"
+                                        b" in parents of %s"
+                                    )
+                                    % (f, ctx)
+                                )
                         fl2 = repo.file(rp[0])
                         if not len(fl2):
-                            self._err(lr,
-                                      _("empty or missing copy source revlog "
-                                        "%s:%s") % (rp[0],
-                                      short(rp[1])),
-                                      f)
+                            self._err(
+                                lr,
+                                _(
+                                    b"empty or missing copy source revlog "
+                                    b"%s:%s"
+                                )
+                                % (rp[0], short(rp[1])),
+                                f,
+                            )
                         elif rp[1] == nullid:
-                            ui.note(_("warning: %s@%s: copy source"
-                                      " revision is nullid %s:%s\n")
-                                % (f, lr, rp[0], short(rp[1])))
+                            ui.note(
+                                _(
+                                    b"warning: %s@%s: copy source"
+                                    b" revision is nullid %s:%s\n"
+                                )
+                                % (f, lr, rp[0], short(rp[1]))
+                            )
                         else:
                             fl2.rev(rp[1])
                 except Exception as inst:
-                    self._exc(lr, _("checking rename of %s") % short(n),
-                              inst, f)
+                    self._exc(
+                        lr, _(b"checking rename of %s") % short(n), inst, f
+                    )
 
             # cross-check
             if f in filenodes:
-                fns = [(v, k) for k, v in filenodes[f].iteritems()]
+                fns = [(v, k) for k, v in pycompat.iteritems(filenodes[f])]
                 for lr, node in sorted(fns):
-                    self._err(lr, _("manifest refers to unknown revision %s") %
-                              short(node), f)
+                    self._err(
+                        lr,
+                        _(b"manifest refers to unknown revision %s")
+                        % short(node),
+                        f,
+                    )
         progress.complete()
 
         if self.warnorphanstorefiles:
             for f in sorted(storefiles):
-                self._warn(_("warning: orphan data file '%s'") % f)
+                self._warn(_(b"warning: orphan data file '%s'") % f)
 
         return len(files), revisions
--- a/mercurial/vfs.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/vfs.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,6 +14,11 @@
 import threading
 
 from .i18n import _
+from .pycompat import (
+    delattr,
+    getattr,
+    setattr,
+)
 from . import (
     encoding,
     error,
@@ -22,29 +27,32 @@
     util,
 )
 
+
 def _avoidambig(path, oldstat):
     """Avoid file stat ambiguity forcibly
 
     This function causes copying ``path`` file, if it is owned by
     another (see issue5418 and issue5584 for detail).
     """
+
     def checkandavoid():
         newstat = util.filestat.frompath(path)
         # return whether file stat ambiguity is (already) avoided
-        return (not newstat.isambig(oldstat) or
-                newstat.avoidambig(path, oldstat))
+        return not newstat.isambig(oldstat) or newstat.avoidambig(path, oldstat)
+
     if not checkandavoid():
         # simply copy to change owner of path to get privilege to
         # advance mtime (see issue5418)
         util.rename(util.mktempcopy(path), path)
         checkandavoid()
 
+
 class abstractvfs(object):
     """Abstract base class; cannot be instantiated"""
 
     def __init__(self, *args, **kwargs):
         '''Prevent instantiation; don't call this from subclasses.'''
-        raise NotImplementedError('attempted instantiating ' + str(type(self)))
+        raise NotImplementedError(b'attempted instantiating ' + str(type(self)))
 
     def _auditpath(self, path, mode):
         raise NotImplementedError
@@ -56,9 +64,9 @@
         except IOError as inst:
             if inst.errno != errno.ENOENT:
                 raise
-        return ""
+        return b""
 
-    def tryreadlines(self, path, mode='rb'):
+    def tryreadlines(self, path, mode=b'rb'):
         '''gracefully return an empty array for missing files'''
         try:
             return self.readlines(path, mode=mode)
@@ -78,23 +86,23 @@
         return self.__call__
 
     def read(self, path):
-        with self(path, 'rb') as fp:
+        with self(path, b'rb') as fp:
             return fp.read()
 
-    def readlines(self, path, mode='rb'):
+    def readlines(self, path, mode=b'rb'):
         with self(path, mode=mode) as fp:
             return fp.readlines()
 
     def write(self, path, data, backgroundclose=False, **kwargs):
-        with self(path, 'wb', backgroundclose=backgroundclose, **kwargs) as fp:
+        with self(path, b'wb', backgroundclose=backgroundclose, **kwargs) as fp:
             return fp.write(data)
 
-    def writelines(self, path, data, mode='wb', notindexed=False):
+    def writelines(self, path, data, mode=b'wb', notindexed=False):
         with self(path, mode=mode, notindexed=notindexed) as fp:
             return fp.writelines(data)
 
     def append(self, path, data):
-        with self(path, 'ab') as fp:
+        with self(path, b'ab') as fp:
             return fp.write(data)
 
     def basename(self, path):
@@ -172,9 +180,10 @@
     def mkdir(self, path=None):
         return os.mkdir(self.join(path))
 
-    def mkstemp(self, suffix='', prefix='tmp', dir=None):
-        fd, name = pycompat.mkstemp(suffix=suffix, prefix=prefix,
-                                    dir=self.join(dir))
+    def mkstemp(self, suffix=b'', prefix=b'tmp', dir=None):
+        fd, name = pycompat.mkstemp(
+            suffix=suffix, prefix=prefix, dir=self.join(dir)
+        )
         dname, fname = util.split(name)
         if dir:
             return fd, os.path.join(dir, fname)
@@ -199,7 +208,7 @@
         checkambig=True only in limited cases (see also issue5418 and
         issue5584 for detail).
         """
-        self._auditpath(dst, 'w')
+        self._auditpath(dst, b'w')
         srcpath = self.join(src)
         dstpath = self.join(dst)
         oldstat = checkambig and util.filestat.frompath(dstpath)
@@ -227,6 +236,7 @@
         If ``forcibly``, this tries to remove READ-ONLY files, too.
         """
         if forcibly:
+
             def onerror(function, path, excinfo):
                 if function is not os.remove:
                     raise
@@ -236,10 +246,12 @@
                     raise
                 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
                 os.remove(path)
+
         else:
             onerror = None
-        return shutil.rmtree(self.join(path),
-                             ignore_errors=ignore_errors, onerror=onerror)
+        return shutil.rmtree(
+            self.join(path), ignore_errors=ignore_errors, onerror=onerror
+        )
 
     def setflags(self, path, l, x):
         return util.setflags(self.join(path), l, x)
@@ -255,8 +267,9 @@
         util.tryunlink(self.join(path))
 
     def unlinkpath(self, path=None, ignoremissing=False, rmdir=True):
-        return util.unlinkpath(self.join(path), ignoremissing=ignoremissing,
-                               rmdir=rmdir)
+        return util.unlinkpath(
+            self.join(path), ignoremissing=ignoremissing, rmdir=rmdir
+        )
 
     def utime(self, path=None, t=None):
         return os.utime(self.join(path), t)
@@ -294,7 +307,8 @@
         vfs = getattr(self, 'vfs', self)
         if getattr(vfs, '_backgroundfilecloser', None):
             raise error.Abort(
-                _('can only have 1 active background file closer'))
+                _(b'can only have 1 active background file closer')
+            )
 
         with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
             try:
@@ -303,6 +317,7 @@
             finally:
                 vfs._backgroundfilecloser = None
 
+
 class vfs(abstractvfs):
     '''Operate files relative to a base directory
 
@@ -313,8 +328,15 @@
     (b) the base directory is managed by hg and considered sort-of append-only.
     See pathutil.pathauditor() for details.
     '''
-    def __init__(self, base, audit=True, cacheaudited=False, expandpath=False,
-                 realpath=False):
+
+    def __init__(
+        self,
+        base,
+        audit=True,
+        cacheaudited=False,
+        expandpath=False,
+        realpath=False,
+    ):
         if expandpath:
             base = util.expandpath(base)
         if realpath:
@@ -324,9 +346,10 @@
         if audit:
             self.audit = pathutil.pathauditor(self.base, cached=cacheaudited)
         else:
-            self.audit = (lambda path, mode=None: True)
+            self.audit = lambda path, mode=None: True
         self.createmode = None
         self._trustnlink = None
+        self.options = {}
 
     @util.propertycache
     def _cansymlink(self):
@@ -347,12 +370,20 @@
                 path = os.path.relpath(path, self.base)
             r = util.checkosfilename(path)
             if r:
-                raise error.Abort("%s: %r" % (r, path))
+                raise error.Abort(b"%s: %r" % (r, path))
             self.audit(path, mode=mode)
 
-    def __call__(self, path, mode="r", atomictemp=False, notindexed=False,
-                 backgroundclose=False, checkambig=False, auditpath=True,
-                 makeparentdirs=True):
+    def __call__(
+        self,
+        path,
+        mode=b"r",
+        atomictemp=False,
+        notindexed=False,
+        backgroundclose=False,
+        checkambig=False,
+        auditpath=True,
+        makeparentdirs=True,
+    ):
         '''Open ``path`` file, which is relative to vfs root.
 
         By default, parent directories are created as needed. Newly created
@@ -387,11 +418,11 @@
             self._auditpath(path, mode)
         f = self.join(path)
 
-        if "b" not in mode:
-            mode += "b" # for that other OS
+        if b"b" not in mode:
+            mode += b"b"  # for that other OS
 
         nlink = -1
-        if mode not in ('r', 'rb'):
+        if mode not in (b'r', b'rb'):
             dirname, basename = util.split(f)
             # If basename is empty, then the path is malformed because it points
             # to a directory. Let the posixfile() call below raise IOError.
@@ -399,10 +430,11 @@
                 if atomictemp:
                     if makeparentdirs:
                         util.makedirs(dirname, self.createmode, notindexed)
-                    return util.atomictempfile(f, mode, self.createmode,
-                                               checkambig=checkambig)
+                    return util.atomictempfile(
+                        f, mode, self.createmode, checkambig=checkambig
+                    )
                 try:
-                    if 'w' in mode:
+                    if b'w' in mode:
                         util.unlink(f)
                         nlink = 0
                     else:
@@ -411,7 +443,7 @@
                         with util.posixfile(f):
                             nlink = util.nlinks(f)
                             if nlink < 1:
-                                nlink = 2 # force mktempcopy (issue1922)
+                                nlink = 2  # force mktempcopy (issue1922)
                 except (OSError, IOError) as e:
                     if e.errno != errno.ENOENT:
                         raise
@@ -428,17 +460,26 @@
             self._fixfilemode(f)
 
         if checkambig:
-            if mode in ('r', 'rb'):
-                raise error.Abort(_('implementation error: mode %s is not'
-                                    ' valid for checkambig=True') % mode)
+            if mode in (b'r', b'rb'):
+                raise error.Abort(
+                    _(
+                        b'implementation error: mode %s is not'
+                        b' valid for checkambig=True'
+                    )
+                    % mode
+                )
             fp = checkambigatclosing(fp)
 
-        if (backgroundclose and
-                isinstance(threading.currentThread(), threading._MainThread)):
+        if backgroundclose and isinstance(
+            threading.currentThread(), threading._MainThread
+        ):
             if not self._backgroundfilecloser:
-                raise error.Abort(_('backgroundclose can only be used when a '
-                                  'backgroundclosing context manager is active')
-                                  )
+                raise error.Abort(
+                    _(
+                        b'backgroundclose can only be used when a '
+                        b'backgroundclosing context manager is active'
+                    )
+                )
 
             fp = delayclosedfile(fp, self._backgroundfilecloser)
 
@@ -455,9 +496,12 @@
             try:
                 os.symlink(src, linkname)
             except OSError as err:
-                raise OSError(err.errno, _('could not symlink to %r: %s') %
-                              (src, encoding.strtolocal(err.strerror)),
-                              linkname)
+                raise OSError(
+                    err.errno,
+                    _(b'could not symlink to %r: %s')
+                    % (src, encoding.strtolocal(err.strerror)),
+                    linkname,
+                )
         else:
             self.write(dst, src)
 
@@ -467,8 +511,10 @@
         else:
             return self.base
 
+
 opener = vfs
 
+
 class proxyvfs(abstractvfs):
     def __init__(self, vfs):
         self.vfs = vfs
@@ -484,6 +530,7 @@
     def options(self, value):
         self.vfs.options = value
 
+
 class filtervfs(proxyvfs, abstractvfs):
     '''Wrapper vfs for filtering filenames with a function.'''
 
@@ -500,27 +547,31 @@
         else:
             return self.vfs.join(path)
 
+
 filteropener = filtervfs
 
+
 class readonlyvfs(proxyvfs):
     '''Wrapper vfs preventing any writing.'''
 
     def __init__(self, vfs):
         proxyvfs.__init__(self, vfs)
 
-    def __call__(self, path, mode='r', *args, **kw):
-        if mode not in ('r', 'rb'):
-            raise error.Abort(_('this vfs is read only'))
+    def __call__(self, path, mode=b'r', *args, **kw):
+        if mode not in (b'r', b'rb'):
+            raise error.Abort(_(b'this vfs is read only'))
         return self.vfs(path, mode, *args, **kw)
 
     def join(self, path, *insidef):
         return self.vfs.join(path, *insidef)
 
+
 class closewrapbase(object):
     """Base class of wrapper, which hooks closing
 
     Do not instantiate outside of the vfs layer.
     """
+
     def __init__(self, fh):
         object.__setattr__(self, r'_origfh', fh)
 
@@ -538,16 +589,18 @@
         return self
 
     def __exit__(self, exc_type, exc_value, exc_tb):
-        raise NotImplementedError('attempted instantiating ' + str(type(self)))
+        raise NotImplementedError(b'attempted instantiating ' + str(type(self)))
 
     def close(self):
-        raise NotImplementedError('attempted instantiating ' + str(type(self)))
+        raise NotImplementedError(b'attempted instantiating ' + str(type(self)))
+
 
 class delayclosedfile(closewrapbase):
     """Proxy for a file object whose close is delayed.
 
     Do not instantiate outside of the vfs layer.
     """
+
     def __init__(self, fh, closer):
         super(delayclosedfile, self).__init__(fh)
         object.__setattr__(self, r'_closer', closer)
@@ -558,8 +611,10 @@
     def close(self):
         self._closer.close(self._origfh)
 
+
 class backgroundfilecloser(object):
     """Coordinates background closing of file handles on multiple threads."""
+
     def __init__(self, ui, expectedcount=-1):
         self._running = False
         self._entered = False
@@ -569,7 +624,7 @@
         # Only Windows/NTFS has slow file closing. So only enable by default
         # on that platform. But allow to be enabled elsewhere for testing.
         defaultenabled = pycompat.iswindows
-        enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
+        enabled = ui.configbool(b'worker', b'backgroundclose', defaultenabled)
 
         if not enabled:
             return
@@ -577,23 +632,24 @@
         # There is overhead to starting and stopping the background threads.
         # Don't do background processing unless the file count is large enough
         # to justify it.
-        minfilecount = ui.configint('worker', 'backgroundcloseminfilecount')
+        minfilecount = ui.configint(b'worker', b'backgroundcloseminfilecount')
         # FUTURE dynamically start background threads after minfilecount closes.
         # (We don't currently have any callers that don't know their file count)
         if expectedcount > 0 and expectedcount < minfilecount:
             return
 
-        maxqueue = ui.configint('worker', 'backgroundclosemaxqueue')
-        threadcount = ui.configint('worker', 'backgroundclosethreadcount')
+        maxqueue = ui.configint(b'worker', b'backgroundclosemaxqueue')
+        threadcount = ui.configint(b'worker', b'backgroundclosethreadcount')
 
-        ui.debug('starting %d threads for background file closing\n' %
-                 threadcount)
+        ui.debug(
+            b'starting %d threads for background file closing\n' % threadcount
+        )
 
         self._queue = pycompat.queue.Queue(maxsize=maxqueue)
         self._running = True
 
         for i in range(threadcount):
-            t = threading.Thread(target=self._worker, name='backgroundcloser')
+            t = threading.Thread(target=self._worker, name=b'backgroundcloser')
             self._threads.append(t)
             t.start()
 
@@ -628,8 +684,9 @@
     def close(self, fh):
         """Schedule a file for closing."""
         if not self._entered:
-            raise error.Abort(_('can only call close() when context manager '
-                              'active'))
+            raise error.Abort(
+                _(b'can only call close() when context manager active')
+            )
 
         # If a background thread encountered an exception, raise now so we fail
         # fast. Otherwise we may potentially go on for minutes until the error
@@ -646,6 +703,7 @@
 
         self._queue.put(fh, block=True, timeout=None)
 
+
 class checkambigatclosing(closewrapbase):
     """Proxy for a file object, to avoid ambiguity of file stat
 
@@ -656,6 +714,7 @@
 
     Do not instantiate outside of the vfs layer.
     """
+
     def __init__(self, fh):
         super(checkambigatclosing, self).__init__(fh)
         object.__setattr__(self, r'_oldstat', util.filestat.frompath(fh.name))
--- a/mercurial/win32.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/win32.py	Mon Oct 21 11:09:48 2019 -0400
@@ -55,21 +55,25 @@
     _WPARAM = ctypes.c_ulonglong
     _LPARAM = ctypes.c_longlong
 
+
 class _FILETIME(ctypes.Structure):
-    _fields_ = [(r'dwLowDateTime', _DWORD),
-                (r'dwHighDateTime', _DWORD)]
+    _fields_ = [(r'dwLowDateTime', _DWORD), (r'dwHighDateTime', _DWORD)]
+
 
 class _BY_HANDLE_FILE_INFORMATION(ctypes.Structure):
-    _fields_ = [(r'dwFileAttributes', _DWORD),
-                (r'ftCreationTime', _FILETIME),
-                (r'ftLastAccessTime', _FILETIME),
-                (r'ftLastWriteTime', _FILETIME),
-                (r'dwVolumeSerialNumber', _DWORD),
-                (r'nFileSizeHigh', _DWORD),
-                (r'nFileSizeLow', _DWORD),
-                (r'nNumberOfLinks', _DWORD),
-                (r'nFileIndexHigh', _DWORD),
-                (r'nFileIndexLow', _DWORD)]
+    _fields_ = [
+        (r'dwFileAttributes', _DWORD),
+        (r'ftCreationTime', _FILETIME),
+        (r'ftLastAccessTime', _FILETIME),
+        (r'ftLastWriteTime', _FILETIME),
+        (r'dwVolumeSerialNumber', _DWORD),
+        (r'nFileSizeHigh', _DWORD),
+        (r'nFileSizeLow', _DWORD),
+        (r'nNumberOfLinks', _DWORD),
+        (r'nFileIndexHigh', _DWORD),
+        (r'nFileIndexLow', _DWORD),
+    ]
+
 
 # CreateFile
 _FILE_SHARE_READ = 0x00000001
@@ -90,51 +94,65 @@
 # GetExitCodeProcess
 _STILL_ACTIVE = 259
 
+
 class _STARTUPINFO(ctypes.Structure):
-    _fields_ = [(r'cb', _DWORD),
-                (r'lpReserved', _LPSTR),
-                (r'lpDesktop', _LPSTR),
-                (r'lpTitle', _LPSTR),
-                (r'dwX', _DWORD),
-                (r'dwY', _DWORD),
-                (r'dwXSize', _DWORD),
-                (r'dwYSize', _DWORD),
-                (r'dwXCountChars', _DWORD),
-                (r'dwYCountChars', _DWORD),
-                (r'dwFillAttribute', _DWORD),
-                (r'dwFlags', _DWORD),
-                (r'wShowWindow', _WORD),
-                (r'cbReserved2', _WORD),
-                (r'lpReserved2', ctypes.c_char_p),
-                (r'hStdInput', _HANDLE),
-                (r'hStdOutput', _HANDLE),
-                (r'hStdError', _HANDLE)]
+    _fields_ = [
+        (r'cb', _DWORD),
+        (r'lpReserved', _LPSTR),
+        (r'lpDesktop', _LPSTR),
+        (r'lpTitle', _LPSTR),
+        (r'dwX', _DWORD),
+        (r'dwY', _DWORD),
+        (r'dwXSize', _DWORD),
+        (r'dwYSize', _DWORD),
+        (r'dwXCountChars', _DWORD),
+        (r'dwYCountChars', _DWORD),
+        (r'dwFillAttribute', _DWORD),
+        (r'dwFlags', _DWORD),
+        (r'wShowWindow', _WORD),
+        (r'cbReserved2', _WORD),
+        (r'lpReserved2', ctypes.c_char_p),
+        (r'hStdInput', _HANDLE),
+        (r'hStdOutput', _HANDLE),
+        (r'hStdError', _HANDLE),
+    ]
+
 
 class _PROCESS_INFORMATION(ctypes.Structure):
-    _fields_ = [(r'hProcess', _HANDLE),
-                (r'hThread', _HANDLE),
-                (r'dwProcessId', _DWORD),
-                (r'dwThreadId', _DWORD)]
+    _fields_ = [
+        (r'hProcess', _HANDLE),
+        (r'hThread', _HANDLE),
+        (r'dwProcessId', _DWORD),
+        (r'dwThreadId', _DWORD),
+    ]
+
 
 _CREATE_NO_WINDOW = 0x08000000
 _SW_HIDE = 0
 
+
 class _COORD(ctypes.Structure):
-    _fields_ = [(r'X', ctypes.c_short),
-                (r'Y', ctypes.c_short)]
+    _fields_ = [(r'X', ctypes.c_short), (r'Y', ctypes.c_short)]
+
 
 class _SMALL_RECT(ctypes.Structure):
-    _fields_ = [(r'Left', ctypes.c_short),
-                (r'Top', ctypes.c_short),
-                (r'Right', ctypes.c_short),
-                (r'Bottom', ctypes.c_short)]
+    _fields_ = [
+        (r'Left', ctypes.c_short),
+        (r'Top', ctypes.c_short),
+        (r'Right', ctypes.c_short),
+        (r'Bottom', ctypes.c_short),
+    ]
+
 
 class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
-    _fields_ = [(r'dwSize', _COORD),
-                (r'dwCursorPosition', _COORD),
-                (r'wAttributes', _WORD),
-                (r'srWindow', _SMALL_RECT),
-                (r'dwMaximumWindowSize', _COORD)]
+    _fields_ = [
+        (r'dwSize', _COORD),
+        (r'dwCursorPosition', _COORD),
+        (r'wAttributes', _WORD),
+        (r'srWindow', _SMALL_RECT),
+        (r'dwMaximumWindowSize', _COORD),
+    ]
+
 
 _STD_OUTPUT_HANDLE = _DWORD(-11).value
 _STD_ERROR_HANDLE = _DWORD(-12).value
@@ -150,11 +168,9 @@
 class CERT_CHAIN_CONTEXT(ctypes.Structure):
     _fields_ = (
         (r"cbSize", _DWORD),
-
         # CERT_TRUST_STATUS struct
         (r"dwErrorStatus", _DWORD),
         (r"dwInfoStatus", _DWORD),
-
         (r"cChain", _DWORD),
         (r"rgpChain", ctypes.c_void_p),
         (r"cLowerQualityChainContext", _DWORD),
@@ -163,15 +179,16 @@
         (r"dwRevocationFreshnessTime", _DWORD),
     )
 
+
 class CERT_USAGE_MATCH(ctypes.Structure):
     _fields_ = (
         (r"dwType", _DWORD),
-
-         # CERT_ENHKEY_USAGE struct
+        # CERT_ENHKEY_USAGE struct
         (r"cUsageIdentifier", _DWORD),
-        (r"rgpszUsageIdentifier", ctypes.c_void_p), # LPSTR *
+        (r"rgpszUsageIdentifier", ctypes.c_void_p),  # LPSTR *
     )
 
+
 class CERT_CHAIN_PARA(ctypes.Structure):
     _fields_ = (
         (r"cbSize", _DWORD),
@@ -180,35 +197,45 @@
         (r"dwUrlRetrievalTimeout", _DWORD),
         (r"fCheckRevocationFreshnessTime", _BOOL),
         (r"dwRevocationFreshnessTime", _DWORD),
-        (r"pftCacheResync", ctypes.c_void_p), # LPFILETIME
-        (r"pStrongSignPara", ctypes.c_void_p), # PCCERT_STRONG_SIGN_PARA
+        (r"pftCacheResync", ctypes.c_void_p),  # LPFILETIME
+        (r"pStrongSignPara", ctypes.c_void_p),  # PCCERT_STRONG_SIGN_PARA
         (r"dwStrongSignFlags", _DWORD),
     )
 
+
 # types of parameters of C functions used (required by pypy)
 
-_crypt32.CertCreateCertificateContext.argtypes = [_DWORD, # cert encoding
-                                                  ctypes.c_char_p, # cert
-                                                  _DWORD] # cert size
+_crypt32.CertCreateCertificateContext.argtypes = [
+    _DWORD,  # cert encoding
+    ctypes.c_char_p,  # cert
+    _DWORD,
+]  # cert size
 _crypt32.CertCreateCertificateContext.restype = _PCCERT_CONTEXT
 
 _crypt32.CertGetCertificateChain.argtypes = [
-        ctypes.c_void_p, # HCERTCHAINENGINE
-        _PCCERT_CONTEXT,
-        ctypes.c_void_p, # LPFILETIME
-        ctypes.c_void_p, # HCERTSTORE
-        ctypes.c_void_p, # PCERT_CHAIN_PARA
-        _DWORD,
-        ctypes.c_void_p, # LPVOID
-        ctypes.c_void_p  # PCCERT_CHAIN_CONTEXT *
-    ]
+    ctypes.c_void_p,  # HCERTCHAINENGINE
+    _PCCERT_CONTEXT,
+    ctypes.c_void_p,  # LPFILETIME
+    ctypes.c_void_p,  # HCERTSTORE
+    ctypes.c_void_p,  # PCERT_CHAIN_PARA
+    _DWORD,
+    ctypes.c_void_p,  # LPVOID
+    ctypes.c_void_p,  # PCCERT_CHAIN_CONTEXT *
+]
 _crypt32.CertGetCertificateChain.restype = _BOOL
 
 _crypt32.CertFreeCertificateContext.argtypes = [_PCCERT_CONTEXT]
 _crypt32.CertFreeCertificateContext.restype = _BOOL
 
-_kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p,
-    _DWORD, _DWORD, _HANDLE]
+_kernel32.CreateFileA.argtypes = [
+    _LPCSTR,
+    _DWORD,
+    _DWORD,
+    ctypes.c_void_p,
+    _DWORD,
+    _DWORD,
+    _HANDLE,
+]
 _kernel32.CreateFileA.restype = _HANDLE
 
 _kernel32.GetFileInformationByHandle.argtypes = [_HANDLE, ctypes.c_void_p]
@@ -237,8 +264,16 @@
 _kernel32.GetDriveTypeA.argtypes = [_LPCSTR]
 _kernel32.GetDriveTypeA.restype = _UINT
 
-_kernel32.GetVolumeInformationA.argtypes = [_LPCSTR, ctypes.c_void_p, _DWORD,
-    ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, _DWORD]
+_kernel32.GetVolumeInformationA.argtypes = [
+    _LPCSTR,
+    ctypes.c_void_p,
+    _DWORD,
+    ctypes.c_void_p,
+    ctypes.c_void_p,
+    ctypes.c_void_p,
+    ctypes.c_void_p,
+    _DWORD,
+]
 _kernel32.GetVolumeInformationA.restype = _BOOL
 
 _kernel32.GetVolumePathNameA.argtypes = [_LPCSTR, ctypes.c_void_p, _DWORD]
@@ -256,9 +291,18 @@
 _kernel32.GetModuleFileNameA.argtypes = [_HANDLE, ctypes.c_void_p, _DWORD]
 _kernel32.GetModuleFileNameA.restype = _DWORD
 
-_kernel32.CreateProcessA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p,
-    ctypes.c_void_p, _BOOL, _DWORD, ctypes.c_void_p, _LPCSTR, ctypes.c_void_p,
-    ctypes.c_void_p]
+_kernel32.CreateProcessA.argtypes = [
+    _LPCSTR,
+    _LPCSTR,
+    ctypes.c_void_p,
+    ctypes.c_void_p,
+    _BOOL,
+    _DWORD,
+    ctypes.c_void_p,
+    _LPCSTR,
+    ctypes.c_void_p,
+    ctypes.c_void_p,
+]
 _kernel32.CreateProcessA.restype = _BOOL
 
 _kernel32.ExitProcess.argtypes = [_UINT]
@@ -296,24 +340,39 @@
 _user32.EnumWindows.argtypes = [_WNDENUMPROC, _LPARAM]
 _user32.EnumWindows.restype = _BOOL
 
-_kernel32.PeekNamedPipe.argtypes = [_HANDLE, ctypes.c_void_p, _DWORD,
-    ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
+_kernel32.PeekNamedPipe.argtypes = [
+    _HANDLE,
+    ctypes.c_void_p,
+    _DWORD,
+    ctypes.c_void_p,
+    ctypes.c_void_p,
+    ctypes.c_void_p,
+]
 _kernel32.PeekNamedPipe.restype = _BOOL
 
+
 def _raiseoserror(name):
     # Force the code to a signed int to avoid an 'int too large' error.
     # See https://bugs.python.org/issue28474
     code = _kernel32.GetLastError()
-    if code > 0x7fffffff:
-        code -= 2**32
+    if code > 0x7FFFFFFF:
+        code -= 2 ** 32
     err = ctypes.WinError(code=code)
-    raise OSError(err.errno, r'%s: %s' % (encoding.strfromlocal(name),
-                                          err.strerror))
+    raise OSError(
+        err.errno, r'%s: %s' % (encoding.strfromlocal(name), err.strerror)
+    )
+
 
 def _getfileinfo(name):
-    fh = _kernel32.CreateFileA(name, 0,
-            _FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
-            None, _OPEN_EXISTING, _FILE_FLAG_BACKUP_SEMANTICS, None)
+    fh = _kernel32.CreateFileA(
+        name,
+        0,
+        _FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
+        None,
+        _OPEN_EXISTING,
+        _FILE_FLAG_BACKUP_SEMANTICS,
+        None,
+    )
     if fh == _INVALID_HANDLE_VALUE:
         _raiseoserror(name)
     try:
@@ -324,6 +383,7 @@
     finally:
         _kernel32.CloseHandle(fh)
 
+
 def checkcertificatechain(cert, build=True):
     '''Tests the given certificate to see if there is a complete chain to a
        trusted root certificate.  As a side effect, missing certificates are
@@ -336,13 +396,15 @@
     chainctxptr = ctypes.POINTER(CERT_CHAIN_CONTEXT)
 
     pchainctx = chainctxptr()
-    chainpara = CERT_CHAIN_PARA(cbSize=ctypes.sizeof(CERT_CHAIN_PARA),
-                                RequestedUsage=CERT_USAGE_MATCH())
+    chainpara = CERT_CHAIN_PARA(
+        cbSize=ctypes.sizeof(CERT_CHAIN_PARA), RequestedUsage=CERT_USAGE_MATCH()
+    )
 
-    certctx = _crypt32.CertCreateCertificateContext(X509_ASN_ENCODING, cert,
-                                                    len(cert))
+    certctx = _crypt32.CertCreateCertificateContext(
+        X509_ASN_ENCODING, cert, len(cert)
+    )
     if certctx is None:
-        _raiseoserror('CertCreateCertificateContext')
+        _raiseoserror(b'CertCreateCertificateContext')
 
     flags = 0
 
@@ -351,15 +413,17 @@
 
     try:
         # Building the certificate chain will update root certs as necessary.
-        if not _crypt32.CertGetCertificateChain(None,      # hChainEngine
-                                                certctx,   # pCertContext
-                                                None,      # pTime
-                                                None,      # hAdditionalStore
-                                                ctypes.byref(chainpara),
-                                                flags,
-                                                None,      # pvReserved
-                                                ctypes.byref(pchainctx)):
-            _raiseoserror('CertGetCertificateChain')
+        if not _crypt32.CertGetCertificateChain(
+            None,  # hChainEngine
+            certctx,  # pCertContext
+            None,  # pTime
+            None,  # hAdditionalStore
+            ctypes.byref(chainpara),
+            flags,
+            None,  # pvReserved
+            ctypes.byref(pchainctx),
+        ):
+            _raiseoserror(b'CertGetCertificateChain')
 
         chainctx = pchainctx.contents
 
@@ -369,24 +433,30 @@
             _crypt32.CertFreeCertificateChain(pchainctx)
         _crypt32.CertFreeCertificateContext(certctx)
 
+
 def oslink(src, dst):
     try:
         if not _kernel32.CreateHardLinkA(dst, src, None):
             _raiseoserror(src)
-    except AttributeError: # Wine doesn't support this function
+    except AttributeError:  # Wine doesn't support this function
         _raiseoserror(src)
 
+
 def nlinks(name):
     '''return number of hardlinks for the given file'''
     return _getfileinfo(name).nNumberOfLinks
 
+
 def samefile(path1, path2):
     '''Returns whether path1 and path2 refer to the same file or directory.'''
     res1 = _getfileinfo(path1)
     res2 = _getfileinfo(path2)
-    return (res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
+    return (
+        res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
         and res1.nFileIndexHigh == res2.nFileIndexHigh
-        and res1.nFileIndexLow == res2.nFileIndexLow)
+        and res1.nFileIndexLow == res2.nFileIndexLow
+    )
+
 
 def samedevice(path1, path2):
     '''Returns whether path1 and path2 are on the same device.'''
@@ -394,12 +464,14 @@
     res2 = _getfileinfo(path2)
     return res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
 
+
 def peekpipe(pipe):
     handle = msvcrt.get_osfhandle(pipe.fileno())
     avail = _DWORD()
 
-    if not _kernel32.PeekNamedPipe(handle, None, 0, None, ctypes.byref(avail),
-                                   None):
+    if not _kernel32.PeekNamedPipe(
+        handle, None, 0, None, ctypes.byref(avail), None
+    ):
         err = _kernel32.GetLastError()
         if err == _ERROR_BROKEN_PIPE:
             return 0
@@ -407,12 +479,14 @@
 
     return avail.value
 
+
 def lasterrorwaspipeerror(err):
     if err.errno != errno.EINVAL:
         return False
     err = _kernel32.GetLastError()
     return err == _ERROR_BROKEN_PIPE or err == _ERROR_NO_DATA
 
+
 def testpid(pid):
     '''return True if pid is still running or unable to
     determine, False otherwise'''
@@ -426,17 +500,19 @@
             _kernel32.CloseHandle(h)
     return _kernel32.GetLastError() != _ERROR_INVALID_PARAMETER
 
+
 def executablepath():
     '''return full path of hg.exe'''
     size = 600
     buf = ctypes.create_string_buffer(size + 1)
     len = _kernel32.GetModuleFileNameA(None, ctypes.byref(buf), size)
     if len == 0:
-        raise ctypes.WinError() # Note: WinError is a function
+        raise ctypes.WinError()  # Note: WinError is a function
     elif len == size:
         raise ctypes.WinError(_ERROR_INSUFFICIENT_BUFFER)
     return buf.value
 
+
 def getvolumename(path):
     """Get the mount point of the filesystem from a directory or file
     (best-effort)
@@ -452,10 +528,11 @@
     buf = ctypes.create_string_buffer(size)
 
     if not _kernel32.GetVolumePathNameA(realpath, ctypes.byref(buf), size):
-        raise ctypes.WinError() # Note: WinError is a function
+        raise ctypes.WinError()  # Note: WinError is a function
 
     return buf.value
 
+
 def getfstype(path):
     """Get the filesystem type name from a directory or file (best-effort)
 
@@ -466,20 +543,26 @@
     t = _kernel32.GetDriveTypeA(volume)
 
     if t == _DRIVE_REMOTE:
-        return 'cifs'
-    elif t not in (_DRIVE_REMOVABLE, _DRIVE_FIXED, _DRIVE_CDROM,
-                   _DRIVE_RAMDISK):
+        return b'cifs'
+    elif t not in (
+        _DRIVE_REMOVABLE,
+        _DRIVE_FIXED,
+        _DRIVE_CDROM,
+        _DRIVE_RAMDISK,
+    ):
         return None
 
     size = _MAX_PATH + 1
     name = ctypes.create_string_buffer(size)
 
-    if not _kernel32.GetVolumeInformationA(volume, None, 0, None, None, None,
-                                           ctypes.byref(name), size):
-        raise ctypes.WinError() # Note: WinError is a function
+    if not _kernel32.GetVolumeInformationA(
+        volume, None, 0, None, None, None, ctypes.byref(name), size
+    ):
+        raise ctypes.WinError()  # Note: WinError is a function
 
     return name.value
 
+
 def getuser():
     '''return name of current user'''
     size = _DWORD(300)
@@ -488,36 +571,40 @@
         raise ctypes.WinError()
     return buf.value
 
+
 _signalhandler = []
 
+
 def setsignalhandler():
     '''Register a termination handler for console events including
     CTRL+C. python signal handlers do not work well with socket
     operations.
     '''
+
     def handler(event):
         _kernel32.ExitProcess(1)
 
     if _signalhandler:
-        return # already registered
+        return  # already registered
     h = _SIGNAL_HANDLER(handler)
-    _signalhandler.append(h) # needed to prevent garbage collection
+    _signalhandler.append(h)  # needed to prevent garbage collection
     if not _kernel32.SetConsoleCtrlHandler(h, True):
         raise ctypes.WinError()
 
+
 def hidewindow():
-
     def callback(hwnd, pid):
         wpid = _DWORD()
         _user32.GetWindowThreadProcessId(hwnd, ctypes.byref(wpid))
         if pid == wpid.value:
             _user32.ShowWindow(hwnd, _SW_HIDE)
-            return False # stop enumerating windows
+            return False  # stop enumerating windows
         return True
 
     pid = _kernel32.GetCurrentProcessId()
     _user32.EnumWindows(_WNDENUMPROC(callback), pid)
 
+
 def termsize():
     # cmd.exe does not handle CR like a unix console, the CR is
     # counted in the line length. On 80 columns consoles, if 80
@@ -527,24 +614,27 @@
     height = 25
     # Query stderr to avoid problems with redirections
     screenbuf = _kernel32.GetStdHandle(
-                  _STD_ERROR_HANDLE) # don't close the handle returned
+        _STD_ERROR_HANDLE
+    )  # don't close the handle returned
     if screenbuf is None or screenbuf == _INVALID_HANDLE_VALUE:
         return width, height
     csbi = _CONSOLE_SCREEN_BUFFER_INFO()
-    if not _kernel32.GetConsoleScreenBufferInfo(
-                        screenbuf, ctypes.byref(csbi)):
+    if not _kernel32.GetConsoleScreenBufferInfo(screenbuf, ctypes.byref(csbi)):
         return width, height
     width = csbi.srWindow.Right - csbi.srWindow.Left  # don't '+ 1'
     height = csbi.srWindow.Bottom - csbi.srWindow.Top + 1
     return width, height
 
+
 def enablevtmode():
     '''Enable virtual terminal mode for the associated console.  Return True if
     enabled, else False.'''
 
     ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4
 
-    handle = _kernel32.GetStdHandle(_STD_OUTPUT_HANDLE) # don't close the handle
+    handle = _kernel32.GetStdHandle(
+        _STD_OUTPUT_HANDLE
+    )  # don't close the handle
     if handle == _INVALID_HANDLE_VALUE:
         return False
 
@@ -561,6 +651,7 @@
 
     return True
 
+
 def spawndetached(args):
     # No standard library function really spawns a fully detached
     # process under win32 because they allocate pipes or other objects
@@ -572,19 +663,28 @@
 
     pi = _PROCESS_INFORMATION()
 
-    env = ''
+    env = b''
     for k in encoding.environ:
-        env += "%s=%s\0" % (k, encoding.environ[k])
+        env += b"%s=%s\0" % (k, encoding.environ[k])
     if not env:
-        env = '\0'
-    env += '\0'
+        env = b'\0'
+    env += b'\0'
 
     args = subprocess.list2cmdline(pycompat.rapply(encoding.strfromlocal, args))
 
     # TODO: CreateProcessW on py3?
     res = _kernel32.CreateProcessA(
-        None, encoding.strtolocal(args), None, None, False, _CREATE_NO_WINDOW,
-        env, encoding.getcwd(), ctypes.byref(si), ctypes.byref(pi))
+        None,
+        encoding.strtolocal(args),
+        None,
+        None,
+        False,
+        _CREATE_NO_WINDOW,
+        env,
+        encoding.getcwd(),
+        ctypes.byref(si),
+        ctypes.byref(pi),
+    )
     if not res:
         raise ctypes.WinError()
 
@@ -593,15 +693,18 @@
 
     return pi.dwProcessId
 
+
 def unlink(f):
     '''try to implement POSIX' unlink semantics on Windows'''
 
     if os.path.isdir(f):
         # use EPERM because it is POSIX prescribed value, even though
         # unlink(2) on directories returns EISDIR on Linux
-        raise IOError(errno.EPERM,
-                      r"Unlinking directory not permitted: '%s'"
-                      % encoding.strfromlocal(f))
+        raise IOError(
+            errno.EPERM,
+            r"Unlinking directory not permitted: '%s'"
+            % encoding.strfromlocal(f),
+        )
 
     # POSIX allows to unlink and rename open files. Windows has serious
     # problems with doing that:
@@ -621,7 +724,7 @@
     # implicit zombie filename blocking on a temporary name.
 
     for tries in pycompat.xrange(10):
-        temp = '%s-%08x' % (f, random.randint(0, 0xffffffff))
+        temp = b'%s-%08x' % (f, random.randint(0, 0xFFFFFFFF))
         try:
             os.rename(f, temp)  # raises OSError EEXIST if temp exists
             break
@@ -646,6 +749,7 @@
             # leaving some potentially serious inconsistencies.
             pass
 
+
 def makedir(path, notindexed):
     os.mkdir(path)
     if notindexed:
--- a/mercurial/windows.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/windows.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,6 +16,7 @@
 import sys
 
 from .i18n import _
+from .pycompat import getattr
 from . import (
     encoding,
     error,
@@ -26,6 +27,7 @@
 
 try:
     import _winreg as winreg
+
     winreg.CloseKey
 except ImportError:
     import winreg
@@ -49,6 +51,7 @@
 
 umask = 0o022
 
+
 class mixedfilemodewrapper(object):
     """Wraps a file handle when it is opened in read/write mode.
 
@@ -61,6 +64,7 @@
     mode and automatically adds checks or inserts appropriate file positioning
     calls when necessary.
     """
+
     OPNONE = 0
     OPREAD = 1
     OPWRITE = 2
@@ -124,10 +128,12 @@
         object.__setattr__(self, r'_lastop', self.OPREAD)
         return self._fp.readlines(*args, **kwargs)
 
+
 class fdproxy(object):
     """Wraps osutil.posixfile() to override the name attribute to reflect the
     underlying file name.
     """
+
     def __init__(self, name, fp):
         self.name = name
         self._fp = fp
@@ -147,10 +153,11 @@
     def __getattr__(self, name):
         return getattr(self._fp, name)
 
-def posixfile(name, mode='r', buffering=-1):
+
+def posixfile(name, mode=b'r', buffering=-1):
     '''Open a file with even more POSIX-like semantics'''
     try:
-        fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError
+        fp = osutil.posixfile(name, mode, buffering)  # may raise WindowsError
 
         # PyFile_FromFd() ignores the name, and seems to report fp.name as the
         # underlying file descriptor.
@@ -159,21 +166,24 @@
 
         # The position when opening in append mode is implementation defined, so
         # make it consistent with other platforms, which position at EOF.
-        if 'a' in mode:
+        if b'a' in mode:
             fp.seek(0, os.SEEK_END)
 
-        if '+' in mode:
+        if b'+' in mode:
             return mixedfilemodewrapper(fp)
 
         return fp
     except WindowsError as err:
         # convert to a friendlier exception
-        raise IOError(err.errno, r'%s: %s' % (
-            encoding.strfromlocal(name), err.strerror))
+        raise IOError(
+            err.errno, r'%s: %s' % (encoding.strfromlocal(name), err.strerror)
+        )
+
 
 # may be wrapped by win32mbcs extension
 listdir = osutil.listdir
 
+
 class winstdout(object):
     '''stdout on windows misbehaves if sent through a pipe'''
 
@@ -215,48 +225,58 @@
                 raise
             raise IOError(errno.EPIPE, r'Broken pipe')
 
+
 def _is_win_9x():
     '''return true if run on windows 95, 98 or me.'''
     try:
         return sys.getwindowsversion()[3] == 1
     except AttributeError:
-        return 'command' in encoding.environ.get('comspec', '')
+        return b'command' in encoding.environ.get(b'comspec', b'')
+
 
 def openhardlinks():
     return not _is_win_9x()
 
+
 def parsepatchoutput(output_line):
     """parses the output produced by patch and returns the filename"""
     pf = output_line[14:]
-    if pf[0] == '`':
-        pf = pf[1:-1] # Remove the quotes
+    if pf[0] == b'`':
+        pf = pf[1:-1]  # Remove the quotes
     return pf
 
+
 def sshargs(sshcmd, host, user, port):
     '''Build argument list for ssh or Plink'''
-    pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
-    args = user and ("%s@%s" % (user, host)) or host
-    if args.startswith('-') or args.startswith('/'):
+    pflag = b'plink' in sshcmd.lower() and b'-P' or b'-p'
+    args = user and (b"%s@%s" % (user, host)) or host
+    if args.startswith(b'-') or args.startswith(b'/'):
         raise error.Abort(
-            _('illegal ssh hostname or username starting with - or /: %s') %
-            args)
+            _(b'illegal ssh hostname or username starting with - or /: %s')
+            % args
+        )
     args = shellquote(args)
     if port:
-        args = '%s %s %s' % (pflag, shellquote(port), args)
+        args = b'%s %s %s' % (pflag, shellquote(port), args)
     return args
 
+
 def setflags(f, l, x):
     pass
 
+
 def copymode(src, dst, mode=None, enforcewritable=False):
     pass
 
+
 def checkexec(path):
     return False
 
+
 def checklink(path):
     return False
 
+
 def setbinary(fd):
     # When run without console, pipes may expose invalid
     # fileno(), usually set to -1.
@@ -264,25 +284,32 @@
     if fno is not None and fno() >= 0:
         msvcrt.setmode(fno(), os.O_BINARY)
 
+
 def pconvert(path):
-    return path.replace(pycompat.ossep, '/')
+    return path.replace(pycompat.ossep, b'/')
+
 
 def localpath(path):
-    return path.replace('/', '\\')
+    return path.replace(b'/', b'\\')
+
 
 def normpath(path):
     return pconvert(os.path.normpath(path))
 
+
 def normcase(path):
-    return encoding.upper(path) # NTFS compares via upper()
+    return encoding.upper(path)  # NTFS compares via upper()
+
 
 # see posix.py for definitions
 normcasespec = encoding.normcasespecs.upper
 normcasefallback = encoding.upperfallback
 
+
 def samestat(s1, s2):
     return False
 
+
 def shelltocmdexe(path, env):
     r"""Convert shell variables in the form $var and ${var} inside ``path``
     to %var% form.  Existing Windows style variables are left unchanged.
@@ -318,9 +345,9 @@
     index = 0
     pathlen = len(path)
     while index < pathlen:
-        c = path[index:index + 1]
-        if c == b'\'':   # no expansion within single quotes
-            path = path[index + 1:]
+        c = path[index : index + 1]
+        if c == b'\'':  # no expansion within single quotes
+            path = path[index + 1 :]
             pathlen = len(path)
             try:
                 index = path.index(b'\'')
@@ -329,7 +356,7 @@
                 res += c + path
                 index = pathlen - 1
         elif c == b'%':  # variable
-            path = path[index + 1:]
+            path = path[index + 1 :]
             pathlen = len(path)
             try:
                 index = path.index(b'%')
@@ -340,8 +367,8 @@
                 var = path[:index]
                 res += b'%' + var + b'%'
         elif c == b'$':  # variable
-            if path[index + 1:index + 2] == b'{':
-                path = path[index + 2:]
+            if path[index + 1 : index + 2] == b'{':
+                path = path[index + 2 :]
                 pathlen = len(path)
                 try:
                     index = path.index(b'}')
@@ -358,11 +385,11 @@
             else:
                 var = b''
                 index += 1
-                c = path[index:index + 1]
+                c = path[index : index + 1]
                 while c != b'' and c in varchars:
                     var += c
                     index += 1
-                    c = path[index:index + 1]
+                    c = path[index : index + 1]
                 # Some variables (like HG_OLDNODE) may be defined, but have an
                 # empty value.  Those need to be skipped because when spawning
                 # cmd.exe to run the hook, it doesn't replace %VAR% for an empty
@@ -376,13 +403,19 @@
 
                 if c != b'':
                     index -= 1
-        elif (c == b'~' and index + 1 < pathlen
-              and path[index + 1:index + 2] in (b'\\', b'/')):
-            res += "%USERPROFILE%"
-        elif (c == b'\\' and index + 1 < pathlen
-              and path[index + 1:index + 2] in (b'$', b'~')):
+        elif (
+            c == b'~'
+            and index + 1 < pathlen
+            and path[index + 1 : index + 2] in (b'\\', b'/')
+        ):
+            res += b"%USERPROFILE%"
+        elif (
+            c == b'\\'
+            and index + 1 < pathlen
+            and path[index + 1 : index + 2] in (b'$', b'~')
+        ):
             # Skip '\', but only if it is escaping $ or ~
-            res += path[index + 1:index + 2]
+            res += path[index + 1 : index + 2]
             index += 1
         else:
             res += c
@@ -390,6 +423,7 @@
         index += 1
     return res
 
+
 # A sequence of backslashes is special iff it precedes a double quote:
 # - if there's an even number of backslashes, the double quote is not
 #   quoted (i.e. it ends the quoted region)
@@ -403,6 +437,8 @@
 # quote we've appended to the end)
 _quotere = None
 _needsshellquote = None
+
+
 def shellquote(s):
     r"""
     >>> shellquote(br'C:\Users\xyz')
@@ -432,40 +468,45 @@
         return s
     return b'"%s"' % _quotere.sub(br'\1\1\\\2', s)
 
+
 def _unquote(s):
     if s.startswith(b'"') and s.endswith(b'"'):
         return s[1:-1]
     return s
 
+
 def shellsplit(s):
     """Parse a command string in cmd.exe way (best-effort)"""
     return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False))
 
+
 def quotecommand(cmd):
     """Build a command string suitable for os.popen* calls."""
     if sys.version_info < (2, 7, 1):
         # Python versions since 2.7.1 do this extra quoting themselves
-        return '"' + cmd + '"'
+        return b'"' + cmd + b'"'
     return cmd
 
+
 # if you change this stub into a real check, please try to implement the
 # username and groupname functions above, too.
 def isowner(st):
     return True
 
+
 def findexe(command):
     '''Find executable for command searching like cmd.exe does.
     If command is a basename then PATH is searched for command.
     PATH isn't searched if command is an absolute or relative path.
     An extension from PATHEXT is found and added if not present.
     If command isn't found None is returned.'''
-    pathext = encoding.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
+    pathext = encoding.environ.get(b'PATHEXT', b'.COM;.EXE;.BAT;.CMD')
     pathexts = [ext for ext in pathext.lower().split(pycompat.ospathsep)]
     if os.path.splitext(command)[1].lower() in pathexts:
-        pathexts = ['']
+        pathexts = [b'']
 
     def findexisting(pathcommand):
-        'Will append extension (if needed) and return existing file'
+        b'Will append extension (if needed) and return existing file'
         for ext in pathexts:
             executable = pathcommand + ext
             if os.path.exists(executable):
@@ -475,57 +516,66 @@
     if pycompat.ossep in command:
         return findexisting(command)
 
-    for path in encoding.environ.get('PATH', '').split(pycompat.ospathsep):
+    for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep):
         executable = findexisting(os.path.join(path, command))
         if executable is not None:
             return executable
     return findexisting(os.path.expanduser(os.path.expandvars(command)))
 
+
 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
 
+
 def statfiles(files):
     '''Stat each file in files. Yield each stat, or None if a file
     does not exist or has a type we don't care about.
 
     Cluster and cache stat per directory to minimize number of OS stat calls.'''
-    dircache = {} # dirname -> filename -> status | None if file does not exist
+    dircache = {}  # dirname -> filename -> status | None if file does not exist
     getkind = stat.S_IFMT
     for nf in files:
-        nf  = normcase(nf)
+        nf = normcase(nf)
         dir, base = os.path.split(nf)
         if not dir:
-            dir = '.'
+            dir = b'.'
         cache = dircache.get(dir, None)
         if cache is None:
             try:
-                dmap = dict([(normcase(n), s)
-                             for n, k, s in listdir(dir, True)
-                             if getkind(s.st_mode) in _wantedkinds])
+                dmap = dict(
+                    [
+                        (normcase(n), s)
+                        for n, k, s in listdir(dir, True)
+                        if getkind(s.st_mode) in _wantedkinds
+                    ]
+                )
             except OSError as err:
                 # Python >= 2.5 returns ENOENT and adds winerror field
                 # EINVAL is raised if dir is not a directory.
-                if err.errno not in (errno.ENOENT, errno.EINVAL,
-                                     errno.ENOTDIR):
+                if err.errno not in (errno.ENOENT, errno.EINVAL, errno.ENOTDIR):
                     raise
                 dmap = {}
             cache = dircache.setdefault(dir, dmap)
         yield cache.get(base, None)
 
+
 def username(uid=None):
     """Return the name of the user with the given uid.
 
     If uid is None, return the name of the current user."""
     return None
 
+
 def groupname(gid=None):
     """Return the name of the group with the given gid.
 
     If gid is None, return the name of the current group."""
     return None
 
+
 def readlink(pathname):
     return pycompat.fsencode(os.readlink(pycompat.fsdecode(pathname)))
 
+
 def removedirs(name):
     """special version of os.removedirs that does not remove symlinked
     directories or junction points if they actually contain files"""
@@ -544,6 +594,7 @@
             break
         head, tail = os.path.split(head)
 
+
 def rename(src, dst):
     '''atomically rename file src to dst, replacing dst if it exists'''
     try:
@@ -554,16 +605,20 @@
         unlink(dst)
         os.rename(src, dst)
 
+
 def gethgcmd():
     return [encoding.strtolocal(arg) for arg in [sys.executable] + sys.argv[:1]]
 
+
 def groupmembers(name):
     # Don't support groups on Windows for now
     raise KeyError
 
+
 def isexec(f):
     return False
 
+
 class cachestat(object):
     def __init__(self, path):
         pass
@@ -571,6 +626,7 @@
     def cacheable(self):
         return False
 
+
 def lookupreg(key, valname=None, scope=None):
     ''' Look up a key/value name in the Windows registry.
 
@@ -594,20 +650,25 @@
         except EnvironmentError:
             pass
 
+
 expandglobs = True
 
+
 def statislink(st):
     '''check whether a stat result is a symlink'''
     return False
 
+
 def statisexec(st):
     '''check whether a stat result is an executable file'''
     return False
 
+
 def poll(fds):
     # see posix.py for description
     raise NotImplementedError()
 
+
 def readpipe(pipe):
     """Read all available data from a pipe."""
     chunks = []
@@ -621,7 +682,8 @@
             break
         chunks.append(s)
 
-    return ''.join(chunks)
+    return b''.join(chunks)
+
 
 def bindunixsocket(sock, path):
     raise NotImplementedError(r'unsupported platform')
--- a/mercurial/wireprotoframing.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/wireprotoframing.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,9 +15,8 @@
 import struct
 
 from .i18n import _
-from .thirdparty import (
-    attr,
-)
+from .pycompat import getattr
+from .thirdparty import attr
 from . import (
     encoding,
     error,
@@ -121,18 +120,20 @@
 
 ARGUMENT_RECORD_HEADER = struct.Struct(r'<HH')
 
+
 def humanflags(mapping, value):
     """Convert a numeric flags value to a human value, using a mapping table."""
-    namemap = {v: k for k, v in mapping.iteritems()}
+    namemap = {v: k for k, v in pycompat.iteritems(mapping)}
     flags = []
     val = 1
     while value >= val:
         if value & val:
-            flags.append(namemap.get(val, '<unknown 0x%02x>' % val))
+            flags.append(namemap.get(val, b'<unknown 0x%02x>' % val))
         val <<= 1
 
     return b'|'.join(flags)
 
+
 @attr.s(slots=True)
 class frameheader(object):
     """Represents the data in a frame header."""
@@ -144,6 +145,7 @@
     typeid = attr.ib()
     flags = attr.ib()
 
+
 @attr.s(slots=True, repr=False)
 class frame(object):
     """Represents a parsed frame."""
@@ -157,17 +159,25 @@
 
     @encoding.strmethod
     def __repr__(self):
-        typename = '<unknown 0x%02x>' % self.typeid
-        for name, value in FRAME_TYPES.iteritems():
+        typename = b'<unknown 0x%02x>' % self.typeid
+        for name, value in pycompat.iteritems(FRAME_TYPES):
             if value == self.typeid:
                 typename = name
                 break
 
-        return ('frame(size=%d; request=%d; stream=%d; streamflags=%s; '
-                'type=%s; flags=%s)' % (
-            len(self.payload), self.requestid, self.streamid,
-            humanflags(STREAM_FLAGS, self.streamflags), typename,
-            humanflags(FRAME_TYPE_FLAGS.get(self.typeid, {}), self.flags)))
+        return (
+            b'frame(size=%d; request=%d; stream=%d; streamflags=%s; '
+            b'type=%s; flags=%s)'
+            % (
+                len(self.payload),
+                self.requestid,
+                self.streamid,
+                humanflags(STREAM_FLAGS, self.streamflags),
+                typename,
+                humanflags(FRAME_TYPE_FLAGS.get(self.typeid, {}), self.flags),
+            )
+        )
+
 
 def makeframe(requestid, streamid, streamflags, typeid, flags, payload):
     """Assemble a frame into a byte array."""
@@ -189,6 +199,7 @@
 
     return frame
 
+
 def makeframefromhumanstring(s):
     """Create a frame from a human readable string
 
@@ -238,15 +249,22 @@
             finalflags |= int(flag)
 
     if payload.startswith(b'cbor:'):
-        payload = b''.join(cborutil.streamencode(
-            stringutil.evalpythonliteral(payload[5:])))
+        payload = b''.join(
+            cborutil.streamencode(stringutil.evalpythonliteral(payload[5:]))
+        )
 
     else:
         payload = stringutil.unescapestr(payload)
 
-    return makeframe(requestid=requestid, streamid=streamid,
-                     streamflags=finalstreamflags, typeid=frametype,
-                     flags=finalflags, payload=payload)
+    return makeframe(
+        requestid=requestid,
+        streamid=streamid,
+        streamflags=finalstreamflags,
+        typeid=frametype,
+        flags=finalflags,
+        payload=payload,
+    )
+
 
 def parseheader(data):
     """Parse a unified framing protocol frame header from a buffer.
@@ -265,11 +283,13 @@
     requestid, streamid, streamflags = struct.unpack_from(r'<HBB', data, 3)
     typeflags = data[7]
 
-    frametype = (typeflags & 0xf0) >> 4
-    frameflags = typeflags & 0x0f
+    frametype = (typeflags & 0xF0) >> 4
+    frameflags = typeflags & 0x0F
 
-    return frameheader(framelength, requestid, streamid, streamflags,
-                       frametype, frameflags)
+    return frameheader(
+        framelength, requestid, streamid, streamflags, frametype, frameflags
+    )
+
 
 def readframe(fh):
     """Read a unified framing protocol frame from a file object.
@@ -286,22 +306,34 @@
         return None
 
     if readcount != FRAME_HEADER_SIZE:
-        raise error.Abort(_('received incomplete frame: got %d bytes: %s') %
-                          (readcount, header))
+        raise error.Abort(
+            _(b'received incomplete frame: got %d bytes: %s')
+            % (readcount, header)
+        )
 
     h = parseheader(header)
 
     payload = fh.read(h.length)
     if len(payload) != h.length:
-        raise error.Abort(_('frame length error: expected %d; got %d') %
-                          (h.length, len(payload)))
+        raise error.Abort(
+            _(b'frame length error: expected %d; got %d')
+            % (h.length, len(payload))
+        )
+
+    return frame(
+        h.requestid, h.streamid, h.streamflags, h.typeid, h.flags, payload
+    )
+
 
-    return frame(h.requestid, h.streamid, h.streamflags, h.typeid, h.flags,
-                 payload)
-
-def createcommandframes(stream, requestid, cmd, args, datafh=None,
-                        maxframesize=DEFAULT_MAX_FRAME_SIZE,
-                        redirect=None):
+def createcommandframes(
+    stream,
+    requestid,
+    cmd,
+    args,
+    datafh=None,
+    maxframesize=DEFAULT_MAX_FRAME_SIZE,
+    redirect=None,
+):
     """Create frames necessary to transmit a request to run a command.
 
     This is a generator of bytearrays. Each item represents a frame
@@ -331,16 +363,18 @@
         if datafh:
             flags |= FLAG_COMMAND_REQUEST_EXPECT_DATA
 
-        payload = data[offset:offset + maxframesize]
+        payload = data[offset : offset + maxframesize]
         offset += len(payload)
 
         if len(payload) == maxframesize and offset < len(data):
             flags |= FLAG_COMMAND_REQUEST_MORE_FRAMES
 
-        yield stream.makeframe(requestid=requestid,
-                               typeid=FRAME_TYPE_COMMAND_REQUEST,
-                               flags=flags,
-                               payload=payload)
+        yield stream.makeframe(
+            requestid=requestid,
+            typeid=FRAME_TYPE_COMMAND_REQUEST,
+            flags=flags,
+            payload=payload,
+        )
 
         if not (flags & FLAG_COMMAND_REQUEST_MORE_FRAMES):
             break
@@ -357,14 +391,17 @@
                 assert datafh.read(1) == b''
                 done = True
 
-            yield stream.makeframe(requestid=requestid,
-                                   typeid=FRAME_TYPE_COMMAND_DATA,
-                                   flags=flags,
-                                   payload=data)
+            yield stream.makeframe(
+                requestid=requestid,
+                typeid=FRAME_TYPE_COMMAND_DATA,
+                flags=flags,
+                payload=data,
+            )
 
             if done:
                 break
 
+
 def createcommandresponseokframe(stream, requestid):
     overall = b''.join(cborutil.streamencode({b'status': b'ok'}))
 
@@ -377,20 +414,24 @@
     else:
         encoded = False
 
-    return stream.makeframe(requestid=requestid,
-                            typeid=FRAME_TYPE_COMMAND_RESPONSE,
-                            flags=FLAG_COMMAND_RESPONSE_CONTINUATION,
-                            payload=overall,
-                            encoded=encoded)
+    return stream.makeframe(
+        requestid=requestid,
+        typeid=FRAME_TYPE_COMMAND_RESPONSE,
+        flags=FLAG_COMMAND_RESPONSE_CONTINUATION,
+        payload=overall,
+        encoded=encoded,
+    )
 
-def createcommandresponseeosframes(stream, requestid,
-                                   maxframesize=DEFAULT_MAX_FRAME_SIZE):
+
+def createcommandresponseeosframes(
+    stream, requestid, maxframesize=DEFAULT_MAX_FRAME_SIZE
+):
     """Create an empty payload frame representing command end-of-stream."""
     payload = stream.flush()
 
     offset = 0
     while True:
-        chunk = payload[offset:offset + maxframesize]
+        chunk = payload[offset : offset + maxframesize]
         offset += len(chunk)
 
         done = offset == len(payload)
@@ -400,26 +441,31 @@
         else:
             flags = FLAG_COMMAND_RESPONSE_CONTINUATION
 
-        yield stream.makeframe(requestid=requestid,
-                               typeid=FRAME_TYPE_COMMAND_RESPONSE,
-                               flags=flags,
-                               payload=chunk,
-                               encoded=payload != b'')
+        yield stream.makeframe(
+            requestid=requestid,
+            typeid=FRAME_TYPE_COMMAND_RESPONSE,
+            flags=flags,
+            payload=chunk,
+            encoded=payload != b'',
+        )
 
         if done:
             break
 
+
 def createalternatelocationresponseframe(stream, requestid, location):
     data = {
         b'status': b'redirect',
-        b'location': {
-            b'url': location.url,
-            b'mediatype': location.mediatype,
-        }
+        b'location': {b'url': location.url, b'mediatype': location.mediatype,},
     }
 
-    for a in (r'size', r'fullhashes', r'fullhashseed', r'serverdercerts',
-              r'servercadercerts'):
+    for a in (
+        r'size',
+        r'fullhashes',
+        r'fullhashseed',
+        r'serverdercerts',
+        r'servercadercerts',
+    ):
         value = getattr(location, a)
         if value is not None:
             data[b'location'][pycompat.bytestr(a)] = value
@@ -432,48 +478,52 @@
     else:
         encoded = False
 
-    return stream.makeframe(requestid=requestid,
-                            typeid=FRAME_TYPE_COMMAND_RESPONSE,
-                            flags=FLAG_COMMAND_RESPONSE_CONTINUATION,
-                            payload=payload,
-                            encoded=encoded)
+    return stream.makeframe(
+        requestid=requestid,
+        typeid=FRAME_TYPE_COMMAND_RESPONSE,
+        flags=FLAG_COMMAND_RESPONSE_CONTINUATION,
+        payload=payload,
+        encoded=encoded,
+    )
+
 
 def createcommanderrorresponse(stream, requestid, message, args=None):
     # TODO should this be using a list of {'msg': ..., 'args': {}} so atom
     # formatting works consistently?
-    m = {
-        b'status': b'error',
-        b'error': {
-            b'message': message,
-        }
-    }
+    m = {b'status': b'error', b'error': {b'message': message,}}
 
     if args:
         m[b'error'][b'args'] = args
 
     overall = b''.join(cborutil.streamencode(m))
 
-    yield stream.makeframe(requestid=requestid,
-                           typeid=FRAME_TYPE_COMMAND_RESPONSE,
-                           flags=FLAG_COMMAND_RESPONSE_EOS,
-                           payload=overall)
+    yield stream.makeframe(
+        requestid=requestid,
+        typeid=FRAME_TYPE_COMMAND_RESPONSE,
+        flags=FLAG_COMMAND_RESPONSE_EOS,
+        payload=overall,
+    )
+
 
 def createerrorframe(stream, requestid, msg, errtype):
     # TODO properly handle frame size limits.
     assert len(msg) <= DEFAULT_MAX_FRAME_SIZE
 
-    payload = b''.join(cborutil.streamencode({
-        b'type': errtype,
-        b'message': [{b'msg': msg}],
-    }))
+    payload = b''.join(
+        cborutil.streamencode({b'type': errtype, b'message': [{b'msg': msg}],})
+    )
 
-    yield stream.makeframe(requestid=requestid,
-                           typeid=FRAME_TYPE_ERROR_RESPONSE,
-                           flags=0,
-                           payload=payload)
+    yield stream.makeframe(
+        requestid=requestid,
+        typeid=FRAME_TYPE_ERROR_RESPONSE,
+        flags=0,
+        payload=payload,
+    )
 
-def createtextoutputframe(stream, requestid, atoms,
-                          maxframesize=DEFAULT_MAX_FRAME_SIZE):
+
+def createtextoutputframe(
+    stream, requestid, atoms, maxframesize=DEFAULT_MAX_FRAME_SIZE
+):
     """Create a text output frame to render text to people.
 
     ``atoms`` is a 3-tuple of (formatting string, args, labels).
@@ -489,13 +539,13 @@
         # TODO look for localstr, other types here?
 
         if not isinstance(formatting, bytes):
-            raise ValueError('must use bytes formatting strings')
+            raise ValueError(b'must use bytes formatting strings')
         for arg in args:
             if not isinstance(arg, bytes):
-                raise ValueError('must use bytes for arguments')
+                raise ValueError(b'must use bytes for arguments')
         for label in labels:
             if not isinstance(label, bytes):
-                raise ValueError('must use bytes for labels')
+                raise ValueError(b'must use bytes for labels')
 
         # Formatting string must be ASCII.
         formatting = formatting.decode(r'ascii', r'replace').encode(r'ascii')
@@ -504,8 +554,9 @@
         args = [a.decode(r'utf-8', r'replace').encode(r'utf-8') for a in args]
 
         # Labels must be ASCII.
-        labels = [l.decode(r'ascii', r'strict').encode(r'ascii')
-                  for l in labels]
+        labels = [
+            l.decode(r'ascii', r'strict').encode(r'ascii') for l in labels
+        ]
 
         atom = {b'msg': formatting}
         if args:
@@ -518,12 +569,15 @@
     payload = b''.join(cborutil.streamencode(atomdicts))
 
     if len(payload) > maxframesize:
-        raise ValueError('cannot encode data in a single frame')
+        raise ValueError(b'cannot encode data in a single frame')
 
-    yield stream.makeframe(requestid=requestid,
-                           typeid=FRAME_TYPE_TEXT_OUTPUT,
-                           flags=0,
-                           payload=payload)
+    yield stream.makeframe(
+        requestid=requestid,
+        typeid=FRAME_TYPE_TEXT_OUTPUT,
+        flags=0,
+        payload=payload,
+    )
+
 
 class bufferingcommandresponseemitter(object):
     """Helper object to emit command response frames intelligently.
@@ -536,6 +590,7 @@
     So it might make sense to implement this functionality at the stream
     level.
     """
+
     def __init__(self, stream, requestid, maxframesize=DEFAULT_MAX_FRAME_SIZE):
         self._stream = stream
         self._requestid = requestid
@@ -581,7 +636,7 @@
             # Now emit frames for the big chunk.
             offset = 0
             while True:
-                chunk = data[offset:offset + self._maxsize]
+                chunk = data[offset : offset + self._maxsize]
                 offset += len(chunk)
 
                 yield self._stream.makeframe(
@@ -589,7 +644,8 @@
                     typeid=FRAME_TYPE_COMMAND_RESPONSE,
                     flags=FLAG_COMMAND_RESPONSE_CONTINUATION,
                     payload=chunk,
-                    encoded=True)
+                    encoded=True,
+                )
 
                 if offset == len(data):
                     return
@@ -625,13 +681,17 @@
             typeid=FRAME_TYPE_COMMAND_RESPONSE,
             flags=FLAG_COMMAND_RESPONSE_CONTINUATION,
             payload=payload,
-            encoded=True)
+            encoded=True,
+        )
+
 
 # TODO consider defining encoders/decoders using the util.compressionengine
 # mechanism.
 
+
 class identityencoder(object):
     """Encoder for the "identity" stream encoding profile."""
+
     def __init__(self, ui):
         pass
 
@@ -644,20 +704,24 @@
     def finish(self):
         return b''
 
+
 class identitydecoder(object):
     """Decoder for the "identity" stream encoding profile."""
 
     def __init__(self, ui, extraobjs):
         if extraobjs:
-            raise error.Abort(_('identity decoder received unexpected '
-                                'additional values'))
+            raise error.Abort(
+                _(b'identity decoder received unexpected additional values')
+            )
 
     def decode(self, data):
         return data
 
+
 class zlibencoder(object):
     def __init__(self, ui):
         import zlib
+
         self._zlib = zlib
         self._compressor = zlib.compressobj()
 
@@ -674,13 +738,15 @@
         self._compressor = None
         return res
 
+
 class zlibdecoder(object):
     def __init__(self, ui, extraobjs):
         import zlib
 
         if extraobjs:
-            raise error.Abort(_('zlib decoder received unexpected '
-                                'additional values'))
+            raise error.Abort(
+                _(b'zlib decoder received unexpected additional values')
+            )
 
         self._decompressor = zlib.decompressobj()
 
@@ -692,6 +758,7 @@
 
         return self._decompressor.decompress(data)
 
+
 class zstdbaseencoder(object):
     def __init__(self, level):
         from . import zstd
@@ -714,38 +781,46 @@
         self._compressor = None
         return res
 
+
 class zstd8mbencoder(zstdbaseencoder):
     def __init__(self, ui):
         super(zstd8mbencoder, self).__init__(3)
 
+
 class zstdbasedecoder(object):
     def __init__(self, maxwindowsize):
         from . import zstd
+
         dctx = zstd.ZstdDecompressor(max_window_size=maxwindowsize)
         self._decompressor = dctx.decompressobj()
 
     def decode(self, data):
         return self._decompressor.decompress(data)
 
+
 class zstd8mbdecoder(zstdbasedecoder):
     def __init__(self, ui, extraobjs):
         if extraobjs:
-            raise error.Abort(_('zstd8mb decoder received unexpected '
-                                'additional values'))
+            raise error.Abort(
+                _(b'zstd8mb decoder received unexpected additional values')
+            )
 
         super(zstd8mbdecoder, self).__init__(maxwindowsize=8 * 1048576)
 
+
 # We lazily populate this to avoid excessive module imports when importing
 # this module.
 STREAM_ENCODERS = {}
 STREAM_ENCODERS_ORDER = []
 
+
 def populatestreamencoders():
     if STREAM_ENCODERS:
         return
 
     try:
         from . import zstd
+
         zstd.__version__
     except ImportError:
         zstd = None
@@ -761,6 +836,7 @@
     STREAM_ENCODERS[b'identity'] = (identityencoder, identitydecoder)
     STREAM_ENCODERS_ORDER.append(b'identity')
 
+
 class stream(object):
     """Represents a logical unidirectional series of frames."""
 
@@ -778,8 +854,10 @@
             streamflags |= STREAM_FLAG_BEGIN_STREAM
             self._active = True
 
-        return makeframe(requestid, self.streamid, streamflags, typeid, flags,
-                         payload)
+        return makeframe(
+            requestid, self.streamid, streamflags, typeid, flags, payload
+        )
+
 
 class inputstream(stream):
     """Represents a stream used for receiving data."""
@@ -795,7 +873,7 @@
         decoded from the stream encoding settings frame payloads.
         """
         if name not in STREAM_ENCODERS:
-            raise error.Abort(_('unknown stream decoder: %s') % name)
+            raise error.Abort(_(b'unknown stream decoder: %s') % name)
 
         self._decoder = STREAM_ENCODERS[name][1](ui, extraobjs)
 
@@ -813,6 +891,7 @@
 
         return self._decoder.flush()
 
+
 class outputstream(stream):
     """Represents a stream used for sending data."""
 
@@ -828,7 +907,7 @@
         Receives the stream profile name.
         """
         if name not in STREAM_ENCODERS:
-            raise error.Abort(_('unknown stream encoder: %s') % name)
+            raise error.Abort(_(b'unknown stream encoder: %s') % name)
 
         self._encoder = STREAM_ENCODERS[name][0](ui)
         self._encodername = name
@@ -851,8 +930,7 @@
 
         self._encoder.finish()
 
-    def makeframe(self, requestid, typeid, flags, payload,
-                  encoded=False):
+    def makeframe(self, requestid, typeid, flags, payload, encoded=False):
         """Create a frame to be sent out over this stream.
 
         Only returns the frame instance. Does not actually send it.
@@ -866,16 +944,20 @@
             if not self.streamsettingssent:
                 raise error.ProgrammingError(
                     b'attempting to send encoded frame without sending stream '
-                    b'settings')
+                    b'settings'
+                )
 
             streamflags |= STREAM_FLAG_ENCODING_APPLIED
 
-        if (typeid == FRAME_TYPE_STREAM_SETTINGS
-            and flags & FLAG_STREAM_ENCODING_SETTINGS_EOS):
+        if (
+            typeid == FRAME_TYPE_STREAM_SETTINGS
+            and flags & FLAG_STREAM_ENCODING_SETTINGS_EOS
+        ):
             self.streamsettingssent = True
 
-        return makeframe(requestid, self.streamid, streamflags, typeid, flags,
-                         payload)
+        return makeframe(
+            requestid, self.streamid, streamflags, typeid, flags, payload
+        )
 
     def makestreamsettingsframe(self, requestid):
         """Create a stream settings frame for this stream.
@@ -887,19 +969,27 @@
             return None
 
         payload = b''.join(cborutil.streamencode(self._encodername))
-        return self.makeframe(requestid, FRAME_TYPE_STREAM_SETTINGS,
-                              FLAG_STREAM_ENCODING_SETTINGS_EOS, payload)
+        return self.makeframe(
+            requestid,
+            FRAME_TYPE_STREAM_SETTINGS,
+            FLAG_STREAM_ENCODING_SETTINGS_EOS,
+            payload,
+        )
+
 
 def ensureserverstream(stream):
     if stream.streamid % 2:
-        raise error.ProgrammingError('server should only write to even '
-                                     'numbered streams; %d is not even' %
-                                     stream.streamid)
+        raise error.ProgrammingError(
+            b'server should only write to even '
+            b'numbered streams; %d is not even' % stream.streamid
+        )
+
 
 DEFAULT_PROTOCOL_SETTINGS = {
-    'contentencodings': [b'identity'],
+    b'contentencodings': [b'identity'],
 }
 
+
 class serverreactor(object):
     """Holds state of a server handling frame-based protocol requests.
 
@@ -977,7 +1067,7 @@
         """
         self._ui = ui
         self._deferoutput = deferoutput
-        self._state = 'initial'
+        self._state = b'initial'
         self._nextoutgoingstreamid = 2
         self._bufferedframegens = []
         # stream id -> stream instance for all active streams from the client.
@@ -1004,40 +1094,45 @@
         if any, the consumer should take next.
         """
         if not frame.streamid % 2:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('received frame with even numbered stream ID: %d') %
-                  frame.streamid)
+                _(b'received frame with even numbered stream ID: %d')
+                % frame.streamid
+            )
 
         if frame.streamid not in self._incomingstreams:
             if not frame.streamflags & STREAM_FLAG_BEGIN_STREAM:
-                self._state = 'errored'
+                self._state = b'errored'
                 return self._makeerrorresult(
-                    _('received frame on unknown inactive stream without '
-                      'beginning of stream flag set'))
+                    _(
+                        b'received frame on unknown inactive stream without '
+                        b'beginning of stream flag set'
+                    )
+                )
 
             self._incomingstreams[frame.streamid] = inputstream(frame.streamid)
 
         if frame.streamflags & STREAM_FLAG_ENCODING_APPLIED:
             # TODO handle decoding frames
-            self._state = 'errored'
-            raise error.ProgrammingError('support for decoding stream payloads '
-                                         'not yet implemented')
+            self._state = b'errored'
+            raise error.ProgrammingError(
+                b'support for decoding stream payloads not yet implemented'
+            )
 
         if frame.streamflags & STREAM_FLAG_END_STREAM:
             del self._incomingstreams[frame.streamid]
 
         handlers = {
-            'initial': self._onframeinitial,
-            'protocol-settings-receiving': self._onframeprotocolsettings,
-            'idle': self._onframeidle,
-            'command-receiving': self._onframecommandreceiving,
-            'errored': self._onframeerrored,
+            b'initial': self._onframeinitial,
+            b'protocol-settings-receiving': self._onframeprotocolsettings,
+            b'idle': self._onframeidle,
+            b'command-receiving': self._onframecommandreceiving,
+            b'errored': self._onframeerrored,
         }
 
         meth = handlers.get(self._state)
         if not meth:
-            raise error.ProgrammingError('unhandled state: %s' % self._state)
+            raise error.ProgrammingError(b'unhandled state: %s' % self._state)
 
         return meth(frame)
 
@@ -1080,20 +1175,25 @@
 
                     if emitted:
                         for frame in createcommandresponseeosframes(
-                            stream, requestid):
+                            stream, requestid
+                        ):
                             yield frame
                     break
 
                 except error.WireprotoCommandError as e:
                     for frame in createcommanderrorresponse(
-                        stream, requestid, e.message, e.messageargs):
+                        stream, requestid, e.message, e.messageargs
+                    ):
                         yield frame
                     break
 
                 except Exception as e:
                     for frame in createerrorframe(
-                        stream, requestid, '%s' % stringutil.forcebytestr(e),
-                        errtype='server'):
+                        stream,
+                        requestid,
+                        b'%s' % stringutil.forcebytestr(e),
+                        errtype=b'server',
+                    ):
 
                         yield frame
 
@@ -1105,15 +1205,17 @@
                     if isinstance(o, wireprototypes.alternatelocationresponse):
                         if emitted:
                             raise error.ProgrammingError(
-                                'alternatelocationresponse seen after initial '
-                                'output object')
+                                b'alternatelocationresponse seen after initial '
+                                b'output object'
+                            )
 
                         frame = stream.makestreamsettingsframe(requestid)
                         if frame:
                             yield frame
 
                         yield createalternatelocationresponseframe(
-                            stream, requestid, o)
+                            stream, requestid, o
+                        )
 
                         alternatelocationsent = True
                         emitted = True
@@ -1121,7 +1223,8 @@
 
                     if alternatelocationsent:
                         raise error.ProgrammingError(
-                            'object follows alternatelocationresponse')
+                            b'object follows alternatelocationresponse'
+                        )
 
                     if not emitted:
                         # Frame is optional.
@@ -1147,9 +1250,11 @@
                             yield frame
 
                     elif isinstance(
-                        o, wireprototypes.indefinitebytestringresponse):
+                        o, wireprototypes.indefinitebytestringresponse
+                    ):
                         for chunk in cborutil.streamencodebytestringfromiter(
-                            o.chunks):
+                            o.chunks
+                        ):
 
                             for frame in emitter.send(chunk):
                                 yield frame
@@ -1161,9 +1266,9 @@
                                 yield frame
 
                 except Exception as e:
-                    for frame in createerrorframe(stream, requestid,
-                                                  '%s' % e,
-                                                  errtype='server'):
+                    for frame in createerrorframe(
+                        stream, requestid, b'%s' % e, errtype=b'server'
+                    ):
                         yield frame
 
                     break
@@ -1181,7 +1286,7 @@
         # TODO should we do anything about in-flight commands?
 
         if not self._deferoutput or not self._bufferedframegens:
-            return 'noop', {}
+            return b'noop', {}
 
         # If we buffered all our responses, emit those.
         def makegen():
@@ -1189,25 +1294,22 @@
                 for frame in gen:
                     yield frame
 
-        return 'sendframes', {
-            'framegen': makegen(),
-        }
+        return b'sendframes', {b'framegen': makegen(),}
 
     def _handlesendframes(self, framegen):
         if self._deferoutput:
             self._bufferedframegens.append(framegen)
-            return 'noop', {}
+            return b'noop', {}
         else:
-            return 'sendframes', {
-                'framegen': framegen,
-            }
+            return b'sendframes', {b'framegen': framegen,}
 
     def onservererror(self, stream, requestid, msg):
         ensureserverstream(stream)
 
         def sendframes():
-            for frame in createerrorframe(stream, requestid, msg,
-                                          errtype='server'):
+            for frame in createerrorframe(
+                stream, requestid, msg, errtype=b'server'
+            ):
                 yield frame
 
             self._activecommands.remove(requestid)
@@ -1219,8 +1321,9 @@
         ensureserverstream(stream)
 
         def sendframes():
-            for frame in createcommanderrorresponse(stream, requestid, message,
-                                                    args):
+            for frame in createcommanderrorresponse(
+                stream, requestid, message, args
+            ):
                 yield frame
 
             self._activecommands.remove(requestid)
@@ -1243,40 +1346,40 @@
         # Always use the *server's* preferred encoder over the client's,
         # as servers have more to lose from sub-optimal encoders being used.
         for name in STREAM_ENCODERS_ORDER:
-            if name in self._sendersettings['contentencodings']:
+            if name in self._sendersettings[b'contentencodings']:
                 s.setencoder(self._ui, name)
                 break
 
         return s
 
     def _makeerrorresult(self, msg):
-        return 'error', {
-            'message': msg,
-        }
+        return b'error', {b'message': msg,}
 
     def _makeruncommandresult(self, requestid):
         entry = self._receivingcommands[requestid]
 
-        if not entry['requestdone']:
-            self._state = 'errored'
-            raise error.ProgrammingError('should not be called without '
-                                         'requestdone set')
+        if not entry[b'requestdone']:
+            self._state = b'errored'
+            raise error.ProgrammingError(
+                b'should not be called without requestdone set'
+            )
 
         del self._receivingcommands[requestid]
 
         if self._receivingcommands:
-            self._state = 'command-receiving'
+            self._state = b'command-receiving'
         else:
-            self._state = 'idle'
+            self._state = b'idle'
 
         # Decode the payloads as CBOR.
-        entry['payload'].seek(0)
-        request = cborutil.decodeall(entry['payload'].getvalue())[0]
+        entry[b'payload'].seek(0)
+        request = cborutil.decodeall(entry[b'payload'].getvalue())[0]
 
         if b'name' not in request:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('command request missing "name" field'))
+                _(b'command request missing "name" field')
+            )
 
         if b'args' not in request:
             request[b'args'] = {}
@@ -1284,86 +1387,107 @@
         assert requestid not in self._activecommands
         self._activecommands.add(requestid)
 
-        return 'runcommand', {
-            'requestid': requestid,
-            'command': request[b'name'],
-            'args': request[b'args'],
-            'redirect': request.get(b'redirect'),
-            'data': entry['data'].getvalue() if entry['data'] else None,
-        }
+        return (
+            b'runcommand',
+            {
+                b'requestid': requestid,
+                b'command': request[b'name'],
+                b'args': request[b'args'],
+                b'redirect': request.get(b'redirect'),
+                b'data': entry[b'data'].getvalue() if entry[b'data'] else None,
+            },
+        )
 
     def _makewantframeresult(self):
-        return 'wantframe', {
-            'state': self._state,
-        }
+        return b'wantframe', {b'state': self._state,}
 
     def _validatecommandrequestframe(self, frame):
         new = frame.flags & FLAG_COMMAND_REQUEST_NEW
         continuation = frame.flags & FLAG_COMMAND_REQUEST_CONTINUATION
 
         if new and continuation:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('received command request frame with both new and '
-                  'continuation flags set'))
+                _(
+                    b'received command request frame with both new and '
+                    b'continuation flags set'
+                )
+            )
 
         if not new and not continuation:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('received command request frame with neither new nor '
-                  'continuation flags set'))
+                _(
+                    b'received command request frame with neither new nor '
+                    b'continuation flags set'
+                )
+            )
 
     def _onframeinitial(self, frame):
         # Called when we receive a frame when in the "initial" state.
         if frame.typeid == FRAME_TYPE_SENDER_PROTOCOL_SETTINGS:
-            self._state = 'protocol-settings-receiving'
+            self._state = b'protocol-settings-receiving'
             self._protocolsettingsdecoder = cborutil.bufferingdecoder()
             return self._onframeprotocolsettings(frame)
 
         elif frame.typeid == FRAME_TYPE_COMMAND_REQUEST:
-            self._state = 'idle'
+            self._state = b'idle'
             return self._onframeidle(frame)
 
         else:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('expected sender protocol settings or command request '
-                  'frame; got %d') % frame.typeid)
+                _(
+                    b'expected sender protocol settings or command request '
+                    b'frame; got %d'
+                )
+                % frame.typeid
+            )
 
     def _onframeprotocolsettings(self, frame):
-        assert self._state == 'protocol-settings-receiving'
+        assert self._state == b'protocol-settings-receiving'
         assert self._protocolsettingsdecoder is not None
 
         if frame.typeid != FRAME_TYPE_SENDER_PROTOCOL_SETTINGS:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('expected sender protocol settings frame; got %d') %
-                frame.typeid)
+                _(b'expected sender protocol settings frame; got %d')
+                % frame.typeid
+            )
 
         more = frame.flags & FLAG_SENDER_PROTOCOL_SETTINGS_CONTINUATION
         eos = frame.flags & FLAG_SENDER_PROTOCOL_SETTINGS_EOS
 
         if more and eos:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('sender protocol settings frame cannot have both '
-                  'continuation and end of stream flags set'))
+                _(
+                    b'sender protocol settings frame cannot have both '
+                    b'continuation and end of stream flags set'
+                )
+            )
 
         if not more and not eos:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('sender protocol settings frame must have continuation or '
-                  'end of stream flag set'))
+                _(
+                    b'sender protocol settings frame must have continuation or '
+                    b'end of stream flag set'
+                )
+            )
 
         # TODO establish limits for maximum amount of data that can be
         # buffered.
         try:
             self._protocolsettingsdecoder.decode(frame.payload)
         except Exception as e:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('error decoding CBOR from sender protocol settings frame: %s')
-                % stringutil.forcebytestr(e))
+                _(
+                    b'error decoding CBOR from sender protocol settings frame: %s'
+                )
+                % stringutil.forcebytestr(e)
+            )
 
         if more:
             return self._makewantframeresult()
@@ -1374,21 +1498,25 @@
         self._protocolsettingsdecoder = None
 
         if not decoded:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('sender protocol settings frame did not contain CBOR data'))
+                _(b'sender protocol settings frame did not contain CBOR data')
+            )
         elif len(decoded) > 1:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('sender protocol settings frame contained multiple CBOR '
-                  'values'))
+                _(
+                    b'sender protocol settings frame contained multiple CBOR '
+                    b'values'
+                )
+            )
 
         d = decoded[0]
 
         if b'contentencodings' in d:
-            self._sendersettings['contentencodings'] = d[b'contentencodings']
+            self._sendersettings[b'contentencodings'] = d[b'contentencodings']
 
-        self._state = 'idle'
+        self._state = b'idle'
 
         return self._makewantframeresult()
 
@@ -1396,41 +1524,45 @@
         # The only frame type that should be received in this state is a
         # command request.
         if frame.typeid != FRAME_TYPE_COMMAND_REQUEST:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('expected command request frame; got %d') % frame.typeid)
+                _(b'expected command request frame; got %d') % frame.typeid
+            )
 
         res = self._validatecommandrequestframe(frame)
         if res:
             return res
 
         if frame.requestid in self._receivingcommands:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('request with ID %d already received') % frame.requestid)
+                _(b'request with ID %d already received') % frame.requestid
+            )
 
         if frame.requestid in self._activecommands:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('request with ID %d is already active') % frame.requestid)
+                _(b'request with ID %d is already active') % frame.requestid
+            )
 
         new = frame.flags & FLAG_COMMAND_REQUEST_NEW
         moreframes = frame.flags & FLAG_COMMAND_REQUEST_MORE_FRAMES
         expectingdata = frame.flags & FLAG_COMMAND_REQUEST_EXPECT_DATA
 
         if not new:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('received command request frame without new flag set'))
+                _(b'received command request frame without new flag set')
+            )
 
         payload = util.bytesio()
         payload.write(frame.payload)
 
         self._receivingcommands[frame.requestid] = {
-            'payload': payload,
-            'data': None,
-            'requestdone': not moreframes,
-            'expectingdata': bool(expectingdata),
+            b'payload': payload,
+            b'data': None,
+            b'requestdone': not moreframes,
+            b'expectingdata': bool(expectingdata),
         }
 
         # This is the final frame for this request. Dispatch it.
@@ -1438,7 +1570,7 @@
             return self._makeruncommandresult(frame.requestid)
 
         assert moreframes or expectingdata
-        self._state = 'command-receiving'
+        self._state = b'command-receiving'
         return self._makewantframeresult()
 
     def _onframecommandreceiving(self, frame):
@@ -1454,16 +1586,18 @@
         # All other frames should be related to a command that is currently
         # receiving but is not active.
         if frame.requestid in self._activecommands:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('received frame for request that is still active: %d') %
-                frame.requestid)
+                _(b'received frame for request that is still active: %d')
+                % frame.requestid
+            )
 
         if frame.requestid not in self._receivingcommands:
-            self._state = 'errored'
+            self._state = b'errored'
             return self._makeerrorresult(
-                _('received frame for request that is not receiving: %d') %
-                  frame.requestid)
+                _(b'received frame for request that is not receiving: %d')
+                % frame.requestid
+            )
 
         entry = self._receivingcommands[frame.requestid]
 
@@ -1471,21 +1605,25 @@
             moreframes = frame.flags & FLAG_COMMAND_REQUEST_MORE_FRAMES
             expectingdata = bool(frame.flags & FLAG_COMMAND_REQUEST_EXPECT_DATA)
 
-            if entry['requestdone']:
-                self._state = 'errored'
+            if entry[b'requestdone']:
+                self._state = b'errored'
                 return self._makeerrorresult(
-                    _('received command request frame when request frames '
-                      'were supposedly done'))
+                    _(
+                        b'received command request frame when request frames '
+                        b'were supposedly done'
+                    )
+                )
 
-            if expectingdata != entry['expectingdata']:
-                self._state = 'errored'
+            if expectingdata != entry[b'expectingdata']:
+                self._state = b'errored'
                 return self._makeerrorresult(
-                    _('mismatch between expect data flag and previous frame'))
+                    _(b'mismatch between expect data flag and previous frame')
+                )
 
-            entry['payload'].write(frame.payload)
+            entry[b'payload'].write(frame.payload)
 
             if not moreframes:
-                entry['requestdone'] = True
+                entry[b'requestdone'] = True
 
             if not moreframes and not expectingdata:
                 return self._makeruncommandresult(frame.requestid)
@@ -1493,39 +1631,44 @@
             return self._makewantframeresult()
 
         elif frame.typeid == FRAME_TYPE_COMMAND_DATA:
-            if not entry['expectingdata']:
-                self._state = 'errored'
-                return self._makeerrorresult(_(
-                    'received command data frame for request that is not '
-                    'expecting data: %d') % frame.requestid)
+            if not entry[b'expectingdata']:
+                self._state = b'errored'
+                return self._makeerrorresult(
+                    _(
+                        b'received command data frame for request that is not '
+                        b'expecting data: %d'
+                    )
+                    % frame.requestid
+                )
 
-            if entry['data'] is None:
-                entry['data'] = util.bytesio()
+            if entry[b'data'] is None:
+                entry[b'data'] = util.bytesio()
 
             return self._handlecommanddataframe(frame, entry)
         else:
-            self._state = 'errored'
-            return self._makeerrorresult(_(
-                'received unexpected frame type: %d') % frame.typeid)
+            self._state = b'errored'
+            return self._makeerrorresult(
+                _(b'received unexpected frame type: %d') % frame.typeid
+            )
 
     def _handlecommanddataframe(self, frame, entry):
         assert frame.typeid == FRAME_TYPE_COMMAND_DATA
 
         # TODO support streaming data instead of buffering it.
-        entry['data'].write(frame.payload)
+        entry[b'data'].write(frame.payload)
 
         if frame.flags & FLAG_COMMAND_DATA_CONTINUATION:
             return self._makewantframeresult()
         elif frame.flags & FLAG_COMMAND_DATA_EOS:
-            entry['data'].seek(0)
+            entry[b'data'].seek(0)
             return self._makeruncommandresult(frame.requestid)
         else:
-            self._state = 'errored'
-            return self._makeerrorresult(_('command data frame without '
-                                           'flags'))
+            self._state = b'errored'
+            return self._makeerrorresult(_(b'command data frame without flags'))
 
     def _onframeerrored(self, frame):
-        return self._makeerrorresult(_('server already errored'))
+        return self._makeerrorresult(_(b'server already errored'))
+
 
 class commandrequest(object):
     """Represents a request to run a command."""
@@ -1536,7 +1679,8 @@
         self.args = args
         self.datafh = datafh
         self.redirect = redirect
-        self.state = 'pending'
+        self.state = b'pending'
+
 
 class clientreactor(object):
     """Holds state of a client issuing frame-based protocol requests.
@@ -1584,8 +1728,14 @@
        is expected to follow or we're at the end of the response stream,
        respectively.
     """
-    def __init__(self, ui, hasmultiplesend=False, buffersends=True,
-                 clientcontentencoders=None):
+
+    def __init__(
+        self,
+        ui,
+        hasmultiplesend=False,
+        buffersends=True,
+        clientcontentencoders=None,
+    ):
         """Create a new instance.
 
         ``hasmultiplesend`` indicates whether multiple sends are supported
@@ -1629,29 +1779,33 @@
         Returns a 3-tuple of (request, action, action data).
         """
         if not self._canissuecommands:
-            raise error.ProgrammingError('cannot issue new commands')
+            raise error.ProgrammingError(b'cannot issue new commands')
 
         requestid = self._nextrequestid
         self._nextrequestid += 2
 
-        request = commandrequest(requestid, name, args, datafh=datafh,
-                                 redirect=redirect)
+        request = commandrequest(
+            requestid, name, args, datafh=datafh, redirect=redirect
+        )
 
         if self._buffersends:
             self._pendingrequests.append(request)
-            return request, 'noop', {}
+            return request, b'noop', {}
         else:
             if not self._cansend:
-                raise error.ProgrammingError('sends cannot be performed on '
-                                             'this instance')
+                raise error.ProgrammingError(
+                    b'sends cannot be performed on this instance'
+                )
 
             if not self._hasmultiplesend:
                 self._cansend = False
                 self._canissuecommands = False
 
-            return request, 'sendframes', {
-                'framegen': self._makecommandframes(request),
-            }
+            return (
+                request,
+                b'sendframes',
+                {b'framegen': self._makecommandframes(request),},
+            )
 
     def flushcommands(self):
         """Request that all queued commands be sent.
@@ -1664,11 +1818,12 @@
         requests are allowed after this is called.
         """
         if not self._pendingrequests:
-            return 'noop', {}
+            return b'noop', {}
 
         if not self._cansend:
-            raise error.ProgrammingError('sends cannot be performed on this '
-                                         'instance')
+            raise error.ProgrammingError(
+                b'sends cannot be performed on this instance'
+            )
 
         # If the instance only allows sending once, mark that we have fired
         # our one shot.
@@ -1682,9 +1837,7 @@
                 for frame in self._makecommandframes(request):
                     yield frame
 
-        return 'sendframes', {
-            'framegen': makeframes(),
-        }
+        return b'sendframes', {b'framegen': makeframes(),}
 
     def _makecommandframes(self, request):
         """Emit frames to issue a command request.
@@ -1693,32 +1846,37 @@
         state.
         """
         self._activerequests[request.requestid] = request
-        request.state = 'sending'
+        request.state = b'sending'
 
         if not self._protocolsettingssent and self._clientcontentencoders:
             self._protocolsettingssent = True
 
-            payload = b''.join(cborutil.streamencode({
-                b'contentencodings': self._clientcontentencoders,
-            }))
+            payload = b''.join(
+                cborutil.streamencode(
+                    {b'contentencodings': self._clientcontentencoders,}
+                )
+            )
 
             yield self._outgoingstream.makeframe(
                 requestid=request.requestid,
                 typeid=FRAME_TYPE_SENDER_PROTOCOL_SETTINGS,
                 flags=FLAG_SENDER_PROTOCOL_SETTINGS_EOS,
-                payload=payload)
+                payload=payload,
+            )
 
-        res = createcommandframes(self._outgoingstream,
-                                  request.requestid,
-                                  request.name,
-                                  request.args,
-                                  datafh=request.datafh,
-                                  redirect=request.redirect)
+        res = createcommandframes(
+            self._outgoingstream,
+            request.requestid,
+            request.name,
+            request.args,
+            datafh=request.datafh,
+            redirect=request.redirect,
+        )
 
         for frame in res:
             yield frame
 
-        request.state = 'sent'
+        request.state = b'sent'
 
     def onframerecv(self, frame):
         """Process a frame that has been received off the wire.
@@ -1727,21 +1885,29 @@
         caller needs to take as a result of receiving this frame.
         """
         if frame.streamid % 2:
-            return 'error', {
-                'message': (
-                    _('received frame with odd numbered stream ID: %d') %
-                    frame.streamid),
-            }
+            return (
+                b'error',
+                {
+                    b'message': (
+                        _(b'received frame with odd numbered stream ID: %d')
+                        % frame.streamid
+                    ),
+                },
+            )
 
         if frame.streamid not in self._incomingstreams:
             if not frame.streamflags & STREAM_FLAG_BEGIN_STREAM:
-                return 'error', {
-                    'message': _('received frame on unknown stream '
-                                 'without beginning of stream flag set'),
-                }
+                return (
+                    b'error',
+                    {
+                        b'message': _(
+                            b'received frame on unknown stream '
+                            b'without beginning of stream flag set'
+                        ),
+                    },
+                )
 
-            self._incomingstreams[frame.streamid] = inputstream(
-                frame.streamid)
+            self._incomingstreams[frame.streamid] = inputstream(frame.streamid)
 
         stream = self._incomingstreams[frame.streamid]
 
@@ -1758,13 +1924,18 @@
             return self._onstreamsettingsframe(frame)
 
         if frame.requestid not in self._activerequests:
-            return 'error', {
-                'message': (_('received frame for inactive request ID: %d') %
-                            frame.requestid),
-            }
+            return (
+                b'error',
+                {
+                    b'message': (
+                        _(b'received frame for inactive request ID: %d')
+                        % frame.requestid
+                    ),
+                },
+            )
 
         request = self._activerequests[frame.requestid]
-        request.state = 'receiving'
+        request.state = b'receiving'
 
         handlers = {
             FRAME_TYPE_COMMAND_RESPONSE: self._oncommandresponseframe,
@@ -1773,8 +1944,9 @@
 
         meth = handlers.get(frame.typeid)
         if not meth:
-            raise error.ProgrammingError('unhandled frame type: %d' %
-                                         frame.typeid)
+            raise error.ProgrammingError(
+                b'unhandled frame type: %d' % frame.typeid
+            )
 
         return meth(request, frame)
 
@@ -1785,16 +1957,28 @@
         eos = frame.flags & FLAG_STREAM_ENCODING_SETTINGS_EOS
 
         if more and eos:
-            return 'error', {
-                'message': (_('stream encoding settings frame cannot have both '
-                              'continuation and end of stream flags set')),
-            }
+            return (
+                b'error',
+                {
+                    b'message': (
+                        _(
+                            b'stream encoding settings frame cannot have both '
+                            b'continuation and end of stream flags set'
+                        )
+                    ),
+                },
+            )
 
         if not more and not eos:
-            return 'error', {
-                'message': _('stream encoding settings frame must have '
-                             'continuation or end of stream flag set'),
-            }
+            return (
+                b'error',
+                {
+                    b'message': _(
+                        b'stream encoding settings frame must have '
+                        b'continuation or end of stream flag set'
+                    ),
+                },
+            )
 
         if frame.streamid not in self._streamsettingsdecoders:
             decoder = cborutil.bufferingdecoder()
@@ -1805,14 +1989,21 @@
         try:
             decoder.decode(frame.payload)
         except Exception as e:
-            return 'error', {
-                'message': (_('error decoding CBOR from stream encoding '
-                             'settings frame: %s') %
-                           stringutil.forcebytestr(e)),
-            }
+            return (
+                b'error',
+                {
+                    b'message': (
+                        _(
+                            b'error decoding CBOR from stream encoding '
+                            b'settings frame: %s'
+                        )
+                        % stringutil.forcebytestr(e)
+                    ),
+                },
+            )
 
         if more:
-            return 'noop', {}
+            return b'noop', {}
 
         assert eos
 
@@ -1820,44 +2011,60 @@
         del self._streamsettingsdecoders[frame.streamid]
 
         if not decoded:
-            return 'error', {
-                'message': _('stream encoding settings frame did not contain '
-                             'CBOR data'),
-            }
+            return (
+                b'error',
+                {
+                    b'message': _(
+                        b'stream encoding settings frame did not contain '
+                        b'CBOR data'
+                    ),
+                },
+            )
 
         try:
-            self._incomingstreams[frame.streamid].setdecoder(self._ui,
-                                                             decoded[0],
-                                                             decoded[1:])
+            self._incomingstreams[frame.streamid].setdecoder(
+                self._ui, decoded[0], decoded[1:]
+            )
         except Exception as e:
-            return 'error', {
-                'message': (_('error setting stream decoder: %s') %
-                            stringutil.forcebytestr(e)),
-            }
+            return (
+                b'error',
+                {
+                    b'message': (
+                        _(b'error setting stream decoder: %s')
+                        % stringutil.forcebytestr(e)
+                    ),
+                },
+            )
 
-        return 'noop', {}
+        return b'noop', {}
 
     def _oncommandresponseframe(self, request, frame):
         if frame.flags & FLAG_COMMAND_RESPONSE_EOS:
-            request.state = 'received'
+            request.state = b'received'
             del self._activerequests[request.requestid]
 
-        return 'responsedata', {
-            'request': request,
-            'expectmore': frame.flags & FLAG_COMMAND_RESPONSE_CONTINUATION,
-            'eos': frame.flags & FLAG_COMMAND_RESPONSE_EOS,
-            'data': frame.payload,
-        }
+        return (
+            b'responsedata',
+            {
+                b'request': request,
+                b'expectmore': frame.flags & FLAG_COMMAND_RESPONSE_CONTINUATION,
+                b'eos': frame.flags & FLAG_COMMAND_RESPONSE_EOS,
+                b'data': frame.payload,
+            },
+        )
 
     def _onerrorresponseframe(self, request, frame):
-        request.state = 'errored'
+        request.state = b'errored'
         del self._activerequests[request.requestid]
 
         # The payload should be a CBOR map.
         m = cborutil.decodeall(frame.payload)[0]
 
-        return 'error', {
-            'request': request,
-            'type': m['type'],
-            'message': m['message'],
-        }
+        return (
+            b'error',
+            {
+                b'request': request,
+                b'type': m[b'type'],
+                b'message': m[b'message'],
+            },
+        )
--- a/mercurial/wireprotoserver.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/wireprotoserver.py	Mon Oct 21 11:09:48 2019 -0400
@@ -21,10 +21,10 @@
     wireprotov1server,
     wireprotov2server,
 )
+from .interfaces import util as interfaceutil
 from .utils import (
     cborutil,
     compression,
-    interfaceutil,
 )
 
 stringio = util.stringio
@@ -34,13 +34,14 @@
 
 HTTP_OK = 200
 
-HGTYPE = 'application/mercurial-0.1'
-HGTYPE2 = 'application/mercurial-0.2'
-HGERRTYPE = 'application/hg-error'
+HGTYPE = b'application/mercurial-0.1'
+HGTYPE2 = b'application/mercurial-0.2'
+HGERRTYPE = b'application/hg-error'
 
 SSHV1 = wireprototypes.SSHV1
 SSHV2 = wireprototypes.SSHV2
 
+
 def decodevaluefromheaders(req, headerprefix):
     """Decode a long value from multiple HTTP request headers.
 
@@ -55,7 +56,8 @@
         chunks.append(pycompat.bytesurl(v))
         i += 1
 
-    return ''.join(chunks)
+    return b''.join(chunks)
+
 
 @interfaceutil.implementer(wireprototypes.baseprotocolhandler)
 class httpv1protocolhandler(object):
@@ -67,19 +69,19 @@
 
     @property
     def name(self):
-        return 'http-v1'
+        return b'http-v1'
 
     def getargs(self, args):
         knownargs = self._args()
         data = {}
         keys = args.split()
         for k in keys:
-            if k == '*':
+            if k == b'*':
                 star = {}
                 for key in knownargs.keys():
-                    if key != 'cmd' and key not in keys:
+                    if key != b'cmd' and key not in keys:
                         star[key] = knownargs[key][0]
-                data['*'] = star
+                data[b'*'] = star
             else:
                 data[k] = knownargs[k][0]
         return [data[k] for k in keys]
@@ -88,8 +90,11 @@
         args = self._req.qsparams.asdictoflists()
         postlen = int(self._req.headers.get(b'X-HgArgs-Post', 0))
         if postlen:
-            args.update(urlreq.parseqs(
-                self._req.bodyfh.read(postlen), keep_blank_values=True))
+            args.update(
+                urlreq.parseqs(
+                    self._req.bodyfh.read(postlen), keep_blank_values=True
+                )
+            )
             return args
 
         argvalue = decodevaluefromheaders(self._req, b'X-HgArg')
@@ -99,7 +104,7 @@
     def getprotocaps(self):
         if self._protocaps is None:
             value = decodevaluefromheaders(self._req, b'X-HgProto')
-            self._protocaps = set(value.split(' '))
+            self._protocaps = set(value.split(b' '))
         return self._protocaps
 
     def getpayload(self):
@@ -127,35 +132,40 @@
             self._ui.ferr = olderr
 
     def client(self):
-        return 'remote:%s:%s:%s' % (
+        return b'remote:%s:%s:%s' % (
             self._req.urlscheme,
-            urlreq.quote(self._req.remotehost or ''),
-            urlreq.quote(self._req.remoteuser or ''))
+            urlreq.quote(self._req.remotehost or b''),
+            urlreq.quote(self._req.remoteuser or b''),
+        )
 
     def addcapabilities(self, repo, caps):
         caps.append(b'batch')
 
-        caps.append('httpheader=%d' %
-                    repo.ui.configint('server', 'maxhttpheaderlen'))
-        if repo.ui.configbool('experimental', 'httppostargs'):
-            caps.append('httppostargs')
+        caps.append(
+            b'httpheader=%d' % repo.ui.configint(b'server', b'maxhttpheaderlen')
+        )
+        if repo.ui.configbool(b'experimental', b'httppostargs'):
+            caps.append(b'httppostargs')
 
         # FUTURE advertise 0.2rx once support is implemented
         # FUTURE advertise minrx and mintx after consulting config option
-        caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
+        caps.append(b'httpmediatype=0.1rx,0.1tx,0.2tx')
 
-        compengines = wireprototypes.supportedcompengines(repo.ui,
-            compression.SERVERROLE)
+        compengines = wireprototypes.supportedcompengines(
+            repo.ui, compression.SERVERROLE
+        )
         if compengines:
-            comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
-                                 for e in compengines)
-            caps.append('compression=%s' % comptypes)
+            comptypes = b','.join(
+                urlreq.quote(e.wireprotosupport().name) for e in compengines
+            )
+            caps.append(b'compression=%s' % comptypes)
 
         return caps
 
     def checkperm(self, perm):
         return self._checkperm(perm)
 
+
 # This method exists mostly so that extensions like remotefilelog can
 # disable a kludgey legacy method only over http. As of early 2018,
 # there are no other known users, so with any luck we can discard this
@@ -163,6 +173,7 @@
 def iscmd(cmd):
     return cmd in wireprotov1server.commands
 
+
 def handlewsgirequest(rctx, req, res, checkperm):
     """Possibly process a wire protocol request.
 
@@ -183,10 +194,10 @@
     # HTTP version 1 wire protocol requests are denoted by a "cmd" query
     # string parameter. If it isn't present, this isn't a wire protocol
     # request.
-    if 'cmd' not in req.qsparams:
+    if b'cmd' not in req.qsparams:
         return False
 
-    cmd = req.qsparams['cmd']
+    cmd = req.qsparams[b'cmd']
 
     # The "cmd" request parameter is used by both the wire protocol and hgweb.
     # While not all wire protocol commands are available for all transports,
@@ -204,14 +215,15 @@
     # in this case. We send an HTTP 404 for backwards compatibility reasons.
     if req.dispatchpath:
         res.status = hgwebcommon.statusmessage(404)
-        res.headers['Content-Type'] = HGTYPE
+        res.headers[b'Content-Type'] = HGTYPE
         # TODO This is not a good response to issue for this request. This
         # is mostly for BC for now.
-        res.setbodybytes('0\n%s\n' % b'Not Found')
+        res.setbodybytes(b'0\n%s\n' % b'Not Found')
         return True
 
-    proto = httpv1protocolhandler(req, repo.ui,
-                                  lambda perm: checkperm(rctx, req, perm))
+    proto = httpv1protocolhandler(
+        req, repo.ui, lambda perm: checkperm(rctx, req, perm)
+    )
 
     # The permissions checker should be the only thing that can raise an
     # ErrorResponse. It is kind of a layer violation to catch an hgweb
@@ -225,22 +237,24 @@
         res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
         # TODO This response body assumes the failed command was
         # "unbundle." That assumption is not always valid.
-        res.setbodybytes('0\n%s\n' % pycompat.bytestr(e))
+        res.setbodybytes(b'0\n%s\n' % pycompat.bytestr(e))
 
     return True
 
+
 def _availableapis(repo):
     apis = set()
 
     # Registered APIs are made available via config options of the name of
     # the protocol.
     for k, v in API_HANDLERS.items():
-        section, option = v['config']
+        section, option = v[b'config']
         if repo.ui.configbool(section, option):
             apis.add(k)
 
     return apis
 
+
 def handlewsgiapirequest(rctx, req, res, checkperm):
     """Handle requests to /api/*."""
     assert req.dispatchparts[0] == b'api'
@@ -249,10 +263,10 @@
 
     # This whole URL space is experimental for now. But we want to
     # reserve the URL space. So, 404 all URLs if the feature isn't enabled.
-    if not repo.ui.configbool('experimental', 'web.apiserver'):
+    if not repo.ui.configbool(b'experimental', b'web.apiserver'):
         res.status = b'404 Not Found'
         res.headers[b'Content-Type'] = b'text/plain'
-        res.setbodybytes(_('Experimental API server endpoint not enabled'))
+        res.setbodybytes(_(b'Experimental API server endpoint not enabled'))
         return
 
     # The URL space is /api/<protocol>/*. The structure of URLs under varies
@@ -264,12 +278,16 @@
     if req.dispatchparts == [b'api']:
         res.status = b'200 OK'
         res.headers[b'Content-Type'] = b'text/plain'
-        lines = [_('APIs can be accessed at /api/<name>, where <name> can be '
-                   'one of the following:\n')]
+        lines = [
+            _(
+                b'APIs can be accessed at /api/<name>, where <name> can be '
+                b'one of the following:\n'
+            )
+        ]
         if availableapis:
             lines.extend(sorted(availableapis))
         else:
-            lines.append(_('(no available APIs)\n'))
+            lines.append(_(b'(no available APIs)\n'))
         res.setbodybytes(b'\n'.join(lines))
         return
 
@@ -278,18 +296,22 @@
     if proto not in API_HANDLERS:
         res.status = b'404 Not Found'
         res.headers[b'Content-Type'] = b'text/plain'
-        res.setbodybytes(_('Unknown API: %s\nKnown APIs: %s') % (
-            proto, b', '.join(sorted(availableapis))))
+        res.setbodybytes(
+            _(b'Unknown API: %s\nKnown APIs: %s')
+            % (proto, b', '.join(sorted(availableapis)))
+        )
         return
 
     if proto not in availableapis:
         res.status = b'404 Not Found'
         res.headers[b'Content-Type'] = b'text/plain'
-        res.setbodybytes(_('API %s not enabled\n') % proto)
+        res.setbodybytes(_(b'API %s not enabled\n') % proto)
         return
 
-    API_HANDLERS[proto]['handler'](rctx, req, res, checkperm,
-                                   req.dispatchparts[2:])
+    API_HANDLERS[proto][b'handler'](
+        rctx, req, res, checkperm, req.dispatchparts[2:]
+    )
+
 
 # Maps API name to metadata so custom API can be registered.
 # Keys are:
@@ -304,12 +326,13 @@
 #    descriptor for this service. The response must be serializable to CBOR.
 API_HANDLERS = {
     wireprotov2server.HTTP_WIREPROTO_V2: {
-        'config': ('experimental', 'web.api.http-v2'),
-        'handler': wireprotov2server.handlehttpv2request,
-        'apidescriptor': wireprotov2server.httpv2apidescriptor,
+        b'config': (b'experimental', b'web.api.http-v2'),
+        b'handler': wireprotov2server.handlehttpv2request,
+        b'apidescriptor': wireprotov2server.httpv2apidescriptor,
     },
 }
 
+
 def _httpresponsetype(ui, proto, prefer_uncompressed):
     """Determine the appropriate response type and compression settings.
 
@@ -318,20 +341,21 @@
     # Determine the response media type and compression engine based
     # on the request parameters.
 
-    if '0.2' in proto.getprotocaps():
+    if b'0.2' in proto.getprotocaps():
         # All clients are expected to support uncompressed data.
         if prefer_uncompressed:
             return HGTYPE2, compression._noopengine(), {}
 
         # Now find an agreed upon compression format.
         compformats = wireprotov1server.clientcompressionsupport(proto)
-        for engine in wireprototypes.supportedcompengines(ui,
-                compression.SERVERROLE):
+        for engine in wireprototypes.supportedcompengines(
+            ui, compression.SERVERROLE
+        ):
             if engine.wireprotosupport().name in compformats:
                 opts = {}
-                level = ui.configint('server', '%slevel' % engine.name())
+                level = ui.configint(b'server', b'%slevel' % engine.name())
                 if level is not None:
-                    opts['level'] = level
+                    opts[b'level'] = level
 
                 return HGTYPE2, engine, opts
 
@@ -341,8 +365,9 @@
     # Don't allow untrusted settings because disabling compression or
     # setting a very high compression level could lead to flooding
     # the server's network or CPU.
-    opts = {'level': ui.configint('server', 'zliblevel')}
-    return HGTYPE, util.compengines['zlib'], opts
+    opts = {b'level': ui.configint(b'server', b'zliblevel')}
+    return HGTYPE, util.compengines[b'zlib'], opts
+
 
 def processcapabilitieshandshake(repo, req, res, proto):
     """Called during a ?cmd=capabilities request.
@@ -352,7 +377,7 @@
     advertised services are available, we don't handle the request.
     """
     # Fall back to old behavior unless the API server is enabled.
-    if not repo.ui.configbool('experimental', 'web.apiserver'):
+    if not repo.ui.configbool(b'experimental', b'web.apiserver'):
         return False
 
     clientapis = decodevaluefromheaders(req, b'X-HgUpgrade')
@@ -361,7 +386,7 @@
         return False
 
     # We currently only support CBOR responses.
-    protocaps = set(protocaps.split(' '))
+    protocaps = set(protocaps.split(b' '))
     if b'cbor' not in protocaps:
         return False
 
@@ -370,20 +395,20 @@
     for api in sorted(set(clientapis.split()) & _availableapis(repo)):
         handler = API_HANDLERS[api]
 
-        descriptorfn = handler.get('apidescriptor')
+        descriptorfn = handler.get(b'apidescriptor')
         if not descriptorfn:
             continue
 
         descriptors[api] = descriptorfn(req, repo)
 
-    v1caps = wireprotov1server.dispatch(repo, proto, 'capabilities')
+    v1caps = wireprotov1server.dispatch(repo, proto, b'capabilities')
     assert isinstance(v1caps, wireprototypes.bytesresponse)
 
     m = {
         # TODO allow this to be configurable.
-        'apibase': 'api/',
-        'apis': descriptors,
-        'v1capabilities': v1caps.data,
+        b'apibase': b'api/',
+        b'apis': descriptors,
+        b'v1capabilities': v1caps.data,
     }
 
     res.status = b'200 OK'
@@ -392,6 +417,7 @@
 
     return True
 
+
 def _callhttp(repo, req, res, proto, cmd):
     # Avoid cycle involving hg module.
     from .hgweb import common as hgwebcommon
@@ -401,7 +427,7 @@
         # identifying the compression engine.
         name = engine.wireprotosupport().name
         assert 0 < len(name) < 256
-        yield struct.pack('B', len(name))
+        yield struct.pack(b'B', len(name))
         yield name
 
         for chunk in gen:
@@ -409,11 +435,11 @@
 
     def setresponse(code, contenttype, bodybytes=None, bodygen=None):
         if code == HTTP_OK:
-            res.status = '200 Script output follows'
+            res.status = b'200 Script output follows'
         else:
             res.status = hgwebcommon.statusmessage(code)
 
-        res.headers['Content-Type'] = contenttype
+        res.headers[b'Content-Type'] = contenttype
 
         if bodybytes is not None:
             res.setbodybytes(bodybytes)
@@ -421,16 +447,22 @@
             res.setbodygen(bodygen)
 
     if not wireprotov1server.commands.commandavailable(cmd, proto):
-        setresponse(HTTP_OK, HGERRTYPE,
-                    _('requested wire protocol command is not available over '
-                      'HTTP'))
+        setresponse(
+            HTTP_OK,
+            HGERRTYPE,
+            _(
+                b'requested wire protocol command is not available over '
+                b'HTTP'
+            ),
+        )
         return
 
     proto.checkperm(wireprotov1server.commands[cmd].permission)
 
     # Possibly handle a modern client wanting to switch protocols.
-    if (cmd == 'capabilities' and
-        processcapabilitieshandshake(repo, req, res, proto)):
+    if cmd == b'capabilities' and processcapabilitieshandshake(
+        repo, req, res, proto
+    ):
 
         return
 
@@ -448,7 +480,8 @@
         # This code for compression should not be streamres specific. It
         # is here because we only compress streamres at the moment.
         mediatype, engine, engineopts = _httpresponsetype(
-            repo.ui, proto, rsp.prefer_uncompressed)
+            repo.ui, proto, rsp.prefer_uncompressed
+        )
         gen = engine.compressstream(gen, engineopts)
 
         if mediatype == HGTYPE2:
@@ -456,38 +489,43 @@
 
         setresponse(HTTP_OK, mediatype, bodygen=gen)
     elif isinstance(rsp, wireprototypes.pushres):
-        rsp = '%d\n%s' % (rsp.res, rsp.output)
+        rsp = b'%d\n%s' % (rsp.res, rsp.output)
         setresponse(HTTP_OK, HGTYPE, bodybytes=rsp)
     elif isinstance(rsp, wireprototypes.pusherr):
-        rsp = '0\n%s\n' % rsp.res
+        rsp = b'0\n%s\n' % rsp.res
         res.drain = True
         setresponse(HTTP_OK, HGTYPE, bodybytes=rsp)
     elif isinstance(rsp, wireprototypes.ooberror):
         setresponse(HTTP_OK, HGERRTYPE, bodybytes=rsp.message)
     else:
-        raise error.ProgrammingError('hgweb.protocol internal failure', rsp)
+        raise error.ProgrammingError(b'hgweb.protocol internal failure', rsp)
+
 
 def _sshv1respondbytes(fout, value):
     """Send a bytes response for protocol version 1."""
-    fout.write('%d\n' % len(value))
+    fout.write(b'%d\n' % len(value))
     fout.write(value)
     fout.flush()
 
+
 def _sshv1respondstream(fout, source):
     write = fout.write
     for chunk in source.gen:
         write(chunk)
     fout.flush()
 
+
 def _sshv1respondooberror(fout, ferr, rsp):
     ferr.write(b'%s\n-\n' % rsp)
     ferr.flush()
     fout.write(b'\n')
     fout.flush()
 
+
 @interfaceutil.implementer(wireprototypes.baseprotocolhandler)
 class sshv1protocolhandler(object):
     """Handler for requests services via version 1 of SSH protocol."""
+
     def __init__(self, ui, fin, fout):
         self._ui = ui
         self._fin = fin
@@ -505,15 +543,15 @@
             argline = self._fin.readline()[:-1]
             arg, l = argline.split()
             if arg not in keys:
-                raise error.Abort(_("unexpected parameter %r") % arg)
-            if arg == '*':
+                raise error.Abort(_(b"unexpected parameter %r") % arg)
+            if arg == b'*':
                 star = {}
                 for k in pycompat.xrange(int(l)):
                     argline = self._fin.readline()[:-1]
                     arg, l = argline.split()
                     val = self._fin.read(int(l))
                     star[arg] = val
-                data['*'] = star
+                data[b'*'] = star
             else:
                 val = self._fin.read(int(l))
                 data[arg] = val
@@ -543,8 +581,8 @@
         yield None
 
     def client(self):
-        client = encoding.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
-        return 'remote:ssh:' + client
+        client = encoding.environ.get(b'SSH_CLIENT', b'').split(b' ', 1)[0]
+        return b'remote:ssh:' + client
 
     def addcapabilities(self, repo, caps):
         if self.name == wireprototypes.SSHV1:
@@ -555,6 +593,7 @@
     def checkperm(self, perm):
         pass
 
+
 class sshv2protocolhandler(sshv1protocolhandler):
     """Protocol handler for version 2 of the SSH protocol."""
 
@@ -565,6 +604,7 @@
     def addcapabilities(self, repo, caps):
         return caps
 
+
 def _runsshserver(ui, repo, fin, fout, ev):
     # This function operates like a state machine of sorts. The following
     # states are defined:
@@ -618,35 +658,38 @@
     #    Ths happens by default since protocol version 2 is the same as
     #    version 1 except for the handshake.
 
-    state = 'protov1-serving'
+    state = b'protov1-serving'
     proto = sshv1protocolhandler(ui, fin, fout)
     protoswitched = False
 
     while not ev.is_set():
-        if state == 'protov1-serving':
+        if state == b'protov1-serving':
             # Commands are issued on new lines.
             request = fin.readline()[:-1]
 
             # Empty lines signal to terminate the connection.
             if not request:
-                state = 'shutdown'
+                state = b'shutdown'
                 continue
 
             # It looks like a protocol upgrade request. Transition state to
             # handle it.
             if request.startswith(b'upgrade '):
                 if protoswitched:
-                    _sshv1respondooberror(fout, ui.ferr,
-                                          b'cannot upgrade protocols multiple '
-                                          b'times')
-                    state = 'shutdown'
+                    _sshv1respondooberror(
+                        fout,
+                        ui.ferr,
+                        b'cannot upgrade protocols multiple times',
+                    )
+                    state = b'shutdown'
                     continue
 
-                state = 'upgrade-initial'
+                state = b'upgrade-initial'
                 continue
 
             available = wireprotov1server.commands.commandavailable(
-                request, proto)
+                request, proto
+            )
 
             # This command isn't available. Send an empty response and go
             # back to waiting for a new command.
@@ -655,6 +698,8 @@
                 continue
 
             rsp = wireprotov1server.dispatch(repo, proto, request)
+            repo.ui.fout.flush()
+            repo.ui.ferr.flush()
 
             if isinstance(rsp, bytes):
                 _sshv1respondbytes(fout, rsp)
@@ -672,15 +717,17 @@
             elif isinstance(rsp, wireprototypes.ooberror):
                 _sshv1respondooberror(fout, ui.ferr, rsp.message)
             else:
-                raise error.ProgrammingError('unhandled response type from '
-                                             'wire protocol command: %s' % rsp)
+                raise error.ProgrammingError(
+                    b'unhandled response type from '
+                    b'wire protocol command: %s' % rsp
+                )
 
         # For now, protocol version 2 serving just goes back to version 1.
-        elif state == 'protov2-serving':
-            state = 'protov1-serving'
+        elif state == b'protov2-serving':
+            state = b'protov1-serving'
             continue
 
-        elif state == 'upgrade-initial':
+        elif state == b'upgrade-initial':
             # We should never transition into this state if we've switched
             # protocols.
             assert not protoswitched
@@ -694,20 +741,20 @@
                 token, caps = request.split(b' ')[1:]
             except ValueError:
                 _sshv1respondbytes(fout, b'')
-                state = 'protov1-serving'
+                state = b'protov1-serving'
                 continue
 
             # Send empty response if we don't support upgrading protocols.
-            if not ui.configbool('experimental', 'sshserver.support-v2'):
+            if not ui.configbool(b'experimental', b'sshserver.support-v2'):
                 _sshv1respondbytes(fout, b'')
-                state = 'protov1-serving'
+                state = b'protov1-serving'
                 continue
 
             try:
                 caps = urlreq.parseqs(caps)
             except ValueError:
                 _sshv1respondbytes(fout, b'')
-                state = 'protov1-serving'
+                state = b'protov1-serving'
                 continue
 
             # We don't see an upgrade request to protocol version 2. Ignore
@@ -715,15 +762,15 @@
             wantedprotos = caps.get(b'proto', [b''])[0]
             if SSHV2 not in wantedprotos:
                 _sshv1respondbytes(fout, b'')
-                state = 'protov1-serving'
+                state = b'protov1-serving'
                 continue
 
             # It looks like we can honor this upgrade request to protocol 2.
             # Filter the rest of the handshake protocol request lines.
-            state = 'upgrade-v2-filter-legacy-handshake'
+            state = b'upgrade-v2-filter-legacy-handshake'
             continue
 
-        elif state == 'upgrade-v2-filter-legacy-handshake':
+        elif state == b'upgrade-v2-filter-legacy-handshake':
             # Client should have sent legacy handshake after an ``upgrade``
             # request. Expected lines:
             #
@@ -737,11 +784,13 @@
                 request = fin.readline()[:-1]
 
                 if request != line:
-                    _sshv1respondooberror(fout, ui.ferr,
-                                          b'malformed handshake protocol: '
-                                          b'missing %s' % line)
+                    _sshv1respondooberror(
+                        fout,
+                        ui.ferr,
+                        b'malformed handshake protocol: missing %s' % line,
+                    )
                     ok = False
-                    state = 'shutdown'
+                    state = b'shutdown'
                     break
 
             if not ok:
@@ -749,16 +798,19 @@
 
             request = fin.read(81)
             if request != b'%s-%s' % (b'0' * 40, b'0' * 40):
-                _sshv1respondooberror(fout, ui.ferr,
-                                      b'malformed handshake protocol: '
-                                      b'missing between argument value')
-                state = 'shutdown'
+                _sshv1respondooberror(
+                    fout,
+                    ui.ferr,
+                    b'malformed handshake protocol: '
+                    b'missing between argument value',
+                )
+                state = b'shutdown'
                 continue
 
-            state = 'upgrade-v2-finish'
+            state = b'upgrade-v2-finish'
             continue
 
-        elif state == 'upgrade-v2-finish':
+        elif state == b'upgrade-v2-finish':
             # Send the upgrade response.
             fout.write(b'upgraded %s %s\n' % (token, SSHV2))
             servercaps = wireprotov1server.capabilities(repo, proto)
@@ -769,15 +821,17 @@
             proto = sshv2protocolhandler(ui, fin, fout)
             protoswitched = True
 
-            state = 'protov2-serving'
+            state = b'protov2-serving'
             continue
 
-        elif state == 'shutdown':
+        elif state == b'shutdown':
             break
 
         else:
-            raise error.ProgrammingError('unhandled ssh server state: %s' %
-                                         state)
+            raise error.ProgrammingError(
+                b'unhandled ssh server state: %s' % state
+            )
+
 
 class sshserver(object):
     def __init__(self, ui, repo, logfh=None):
@@ -788,9 +842,11 @@
         # Log write I/O to stdout and stderr if configured.
         if logfh:
             self._fout = util.makeloggingfileobject(
-                logfh, self._fout, 'o', logdata=True)
+                logfh, self._fout, b'o', logdata=True
+            )
             ui.ferr = util.makeloggingfileobject(
-                logfh, ui.ferr, 'e', logdata=True)
+                logfh, ui.ferr, b'e', logdata=True
+            )
 
     def serve_forever(self):
         self.serveuntil(threading.Event())
--- a/mercurial/wireprototypes.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/wireprototypes.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,83 +10,80 @@
     hex,
 )
 from .i18n import _
-from .thirdparty import (
-    attr,
-)
+from .pycompat import getattr
+from .thirdparty import attr
 from . import (
     error,
     util,
 )
-from .utils import (
-    compression,
-    interfaceutil,
-)
+from .interfaces import util as interfaceutil
+from .utils import compression
 
 # Names of the SSH protocol implementations.
-SSHV1 = 'ssh-v1'
+SSHV1 = b'ssh-v1'
 # These are advertised over the wire. Increment the counters at the end
 # to reflect BC breakages.
-SSHV2 = 'exp-ssh-v2-0003'
-HTTP_WIREPROTO_V2 = 'exp-http-v2-0003'
+SSHV2 = b'exp-ssh-v2-0003'
+HTTP_WIREPROTO_V2 = b'exp-http-v2-0003'
 
-NARROWCAP = 'exp-narrow-1'
-ELLIPSESCAP1 = 'exp-ellipses-1'
-ELLIPSESCAP = 'exp-ellipses-2'
+NARROWCAP = b'exp-narrow-1'
+ELLIPSESCAP1 = b'exp-ellipses-1'
+ELLIPSESCAP = b'exp-ellipses-2'
 SUPPORTED_ELLIPSESCAP = (ELLIPSESCAP1, ELLIPSESCAP)
 
 # All available wire protocol transports.
 TRANSPORTS = {
-    SSHV1: {
-        'transport': 'ssh',
-        'version': 1,
-    },
+    SSHV1: {b'transport': b'ssh', b'version': 1,},
     SSHV2: {
-        'transport': 'ssh',
+        b'transport': b'ssh',
         # TODO mark as version 2 once all commands are implemented.
-        'version': 1,
+        b'version': 1,
     },
-    'http-v1': {
-        'transport': 'http',
-        'version': 1,
-    },
-    HTTP_WIREPROTO_V2: {
-        'transport': 'http',
-        'version': 2,
-    }
+    b'http-v1': {b'transport': b'http', b'version': 1,},
+    HTTP_WIREPROTO_V2: {b'transport': b'http', b'version': 2,},
 }
 
+
 class bytesresponse(object):
     """A wire protocol response consisting of raw bytes."""
+
     def __init__(self, data):
         self.data = data
 
+
 class ooberror(object):
     """wireproto reply: failure of a batch of operation
 
     Something failed during a batch call. The error message is stored in
     `self.message`.
     """
+
     def __init__(self, message):
         self.message = message
 
+
 class pushres(object):
     """wireproto reply: success with simple integer return
 
     The call was successful and returned an integer contained in `self.res`.
     """
+
     def __init__(self, res, output):
         self.res = res
         self.output = output
 
+
 class pusherr(object):
     """wireproto reply: failure
 
     The call failed. The `self.res` attribute contains the error message.
     """
+
     def __init__(self, res, output):
         self.res = res
         self.output = output
 
+
 class streamres(object):
     """wireproto reply: binary stream
 
@@ -98,10 +95,12 @@
     uncompressable and that the stream should therefore use the ``none``
     engine.
     """
+
     def __init__(self, gen=None, prefer_uncompressed=False):
         self.gen = gen
         self.prefer_uncompressed = prefer_uncompressed
 
+
 class streamreslegacy(object):
     """wireproto reply: uncompressed binary stream
 
@@ -112,63 +111,73 @@
     Like ``streamres``, but sends an uncompressed data for "version 1" clients
     using the application/mercurial-0.1 media type.
     """
+
     def __init__(self, gen=None):
         self.gen = gen
 
+
 # list of nodes encoding / decoding
-def decodelist(l, sep=' '):
+def decodelist(l, sep=b' '):
     if l:
-        return [bin(v) for v in  l.split(sep)]
+        return [bin(v) for v in l.split(sep)]
     return []
 
-def encodelist(l, sep=' '):
+
+def encodelist(l, sep=b' '):
     try:
         return sep.join(map(hex, l))
     except TypeError:
         raise
 
+
 # batched call argument encoding
 
+
 def escapebatcharg(plain):
-    return (plain
-            .replace(':', ':c')
-            .replace(',', ':o')
-            .replace(';', ':s')
-            .replace('=', ':e'))
+    return (
+        plain.replace(b':', b':c')
+        .replace(b',', b':o')
+        .replace(b';', b':s')
+        .replace(b'=', b':e')
+    )
+
 
 def unescapebatcharg(escaped):
-    return (escaped
-            .replace(':e', '=')
-            .replace(':s', ';')
-            .replace(':o', ',')
-            .replace(':c', ':'))
+    return (
+        escaped.replace(b':e', b'=')
+        .replace(b':s', b';')
+        .replace(b':o', b',')
+        .replace(b':c', b':')
+    )
+
 
 # mapping of options accepted by getbundle and their types
 #
-# Meant to be extended by extensions. It is extensions responsibility to ensure
-# such options are properly processed in exchange.getbundle.
+# Meant to be extended by extensions. It is the extension's responsibility to
+# ensure such options are properly processed in exchange.getbundle.
 #
 # supported types are:
 #
-# :nodes: list of binary nodes
-# :csv:   list of comma-separated values
-# :scsv:  list of comma-separated values return as set
+# :nodes: list of binary nodes, transmitted as space-separated hex nodes
+# :csv:   list of values, transmitted as comma-separated values
+# :scsv:  set of values, transmitted as comma-separated values
 # :plain: string with no transformation needed.
 GETBUNDLE_ARGUMENTS = {
-    'heads':  'nodes',
-    'bookmarks': 'boolean',
-    'common': 'nodes',
-    'obsmarkers': 'boolean',
-    'phases': 'boolean',
-    'bundlecaps': 'scsv',
-    'listkeys': 'csv',
-    'cg': 'boolean',
-    'cbattempted': 'boolean',
-    'stream': 'boolean',
-    'includepats': 'csv',
-    'excludepats': 'csv',
+    b'heads': b'nodes',
+    b'bookmarks': b'boolean',
+    b'common': b'nodes',
+    b'obsmarkers': b'boolean',
+    b'phases': b'boolean',
+    b'bundlecaps': b'scsv',
+    b'listkeys': b'csv',
+    b'cg': b'boolean',
+    b'cbattempted': b'boolean',
+    b'stream': b'boolean',
+    b'includepats': b'csv',
+    b'excludepats': b'csv',
 }
 
+
 class baseprotocolhandler(interfaceutil.Interface):
     """Abstract base class for wire protocol handlers.
 
@@ -182,7 +191,8 @@
         """The name of the protocol implementation.
 
         Used for uniquely identifying the transport type.
-        """)
+        """
+    )
 
     def getargs(args):
         """return the value for arguments in <args>
@@ -237,10 +247,19 @@
         in a protocol specific manner.
         """
 
+
 class commandentry(object):
     """Represents a declared wire protocol command."""
-    def __init__(self, func, args='', transports=None,
-                 permission='push', cachekeyfn=None, extracapabilitiesfn=None):
+
+    def __init__(
+        self,
+        func,
+        args=b'',
+        transports=None,
+        permission=b'push',
+        cachekeyfn=None,
+        extracapabilitiesfn=None,
+    ):
         self.func = func
         self.args = args
         self.transports = transports or set()
@@ -256,8 +275,12 @@
         data not captured by the 2-tuple and a new instance containing
         the union of the two objects is returned.
         """
-        return commandentry(func, args=args, transports=set(self.transports),
-                            permission=self.permission)
+        return commandentry(
+            func,
+            args=args,
+            transports=set(self.transports),
+            permission=self.permission,
+        )
 
     # Old code treats instances as 2-tuples. So expose that interface.
     def __iter__(self):
@@ -270,7 +293,8 @@
         elif i == 1:
             return self.args
         else:
-            raise IndexError('can only access elements 0 and 1')
+            raise IndexError(b'can only access elements 0 and 1')
+
 
 class commanddict(dict):
     """Container for registered wire protocol commands.
@@ -278,13 +302,14 @@
     It behaves like a dict. But __setitem__ is overwritten to allow silent
     coercion of values from 2-tuples for API compatibility.
     """
+
     def __setitem__(self, k, v):
         if isinstance(v, commandentry):
             pass
         # Cast 2-tuples to commandentry instances.
         elif isinstance(v, tuple):
             if len(v) != 2:
-                raise ValueError('command tuples must have exactly 2 elements')
+                raise ValueError(b'command tuples must have exactly 2 elements')
 
             # It is common for extensions to wrap wire protocol commands via
             # e.g. ``wireproto.commands[x] = (newfn, args)``. Because callers
@@ -294,12 +319,17 @@
                 v = self[k]._merge(v[0], v[1])
             else:
                 # Use default values from @wireprotocommand.
-                v = commandentry(v[0], args=v[1],
-                                 transports=set(TRANSPORTS),
-                                 permission='push')
+                v = commandentry(
+                    v[0],
+                    args=v[1],
+                    transports=set(TRANSPORTS),
+                    permission=b'push',
+                )
         else:
-            raise ValueError('command entries must be commandentry instances '
-                             'or 2-tuples')
+            raise ValueError(
+                b'command entries must be commandentry instances '
+                b'or 2-tuples'
+            )
 
         return super(commanddict, self).__setitem__(k, v)
 
@@ -317,6 +347,7 @@
 
         return True
 
+
 def supportedcompengines(ui, role):
     """Obtain the list of supported compression engines for a request."""
     assert role in (compression.CLIENTROLE, compression.SERVERROLE)
@@ -325,24 +356,28 @@
 
     # Allow config to override default list and ordering.
     if role == compression.SERVERROLE:
-        configengines = ui.configlist('server', 'compressionengines')
-        config = 'server.compressionengines'
+        configengines = ui.configlist(b'server', b'compressionengines')
+        config = b'server.compressionengines'
     else:
         # This is currently implemented mainly to facilitate testing. In most
         # cases, the server should be in charge of choosing a compression engine
         # because a server has the most to lose from a sub-optimal choice. (e.g.
         # CPU DoS due to an expensive engine or a network DoS due to poor
         # compression ratio).
-        configengines = ui.configlist('experimental',
-                                      'clientcompressionengines')
-        config = 'experimental.clientcompressionengines'
+        configengines = ui.configlist(
+            b'experimental', b'clientcompressionengines'
+        )
+        config = b'experimental.clientcompressionengines'
 
     # No explicit config. Filter out the ones that aren't supposed to be
     # advertised and return default ordering.
     if not configengines:
-        attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
-        return [e for e in compengines
-                if getattr(e.wireprotosupport(), attr) > 0]
+        attr = (
+            b'serverpriority' if role == util.SERVERROLE else b'clientpriority'
+        )
+        return [
+            e for e in compengines if getattr(e.wireprotosupport(), attr) > 0
+        ]
 
     # If compression engines are listed in the config, assume there is a good
     # reason for it (like server operators wanting to achieve specific
@@ -351,21 +386,30 @@
     validnames = set(e.name() for e in compengines)
     invalidnames = set(e for e in configengines if e not in validnames)
     if invalidnames:
-        raise error.Abort(_('invalid compression engine defined in %s: %s') %
-                          (config, ', '.join(sorted(invalidnames))))
+        raise error.Abort(
+            _(b'invalid compression engine defined in %s: %s')
+            % (config, b', '.join(sorted(invalidnames)))
+        )
 
     compengines = [e for e in compengines if e.name() in configengines]
-    compengines = sorted(compengines,
-                         key=lambda e: configengines.index(e.name()))
+    compengines = sorted(
+        compengines, key=lambda e: configengines.index(e.name())
+    )
 
     if not compengines:
-        raise error.Abort(_('%s config option does not specify any known '
-                            'compression engines') % config,
-                          hint=_('usable compression engines: %s') %
-                          ', '.sorted(validnames))
+        raise error.Abort(
+            _(
+                b'%s config option does not specify any known '
+                b'compression engines'
+            )
+            % config,
+            hint=_(b'usable compression engines: %s')
+            % b', '.sorted(validnames),
+        )
 
     return compengines
 
+
 @attr.s
 class encodedresponse(object):
     """Represents response data that is already content encoded.
@@ -376,8 +420,10 @@
     wire. If commands emit an object of this type, the encoding step is bypassed
     and the content from this object is used instead.
     """
+
     data = attr.ib()
 
+
 @attr.s
 class alternatelocationresponse(object):
     """Represents a response available at an alternate location.
@@ -387,6 +433,7 @@
 
     Only compatible with wire protocol version 2.
     """
+
     url = attr.ib()
     mediatype = attr.ib()
     size = attr.ib(default=None)
@@ -395,6 +442,7 @@
     serverdercerts = attr.ib(default=None)
     servercadercerts = attr.ib(default=None)
 
+
 @attr.s
 class indefinitebytestringresponse(object):
     """Represents an object to be encoded to an indefinite length bytestring.
@@ -402,4 +450,5 @@
     Instances are initialized from an iterable of chunks, with each chunk being
     a bytes instance.
     """
+
     chunks = attr.ib()
--- a/mercurial/wireprotov1peer.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/wireprotov1peer.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,8 +12,10 @@
 import weakref
 
 from .i18n import _
-from .node import (
-    bin,
+from .node import bin
+from .pycompat import (
+    getattr,
+    setattr,
 )
 from . import (
     bundle2,
@@ -22,16 +24,17 @@
     error,
     pushkey as pushkeymod,
     pycompat,
-    repository,
     util,
     wireprototypes,
 )
-from .utils import (
-    interfaceutil,
+from .interfaces import (
+    repository,
+    util as interfaceutil,
 )
 
 urlreq = util.urlreq
 
+
 def batchable(f):
     '''annotation for batchable methods
 
@@ -54,26 +57,31 @@
     which is used by remotebatch to split the call into separate encoding and
     decoding phases.
     '''
+
     def plain(*args, **opts):
         batchable = f(*args, **opts)
         encargsorres, encresref = next(batchable)
         if not encresref:
-            return encargsorres # a local result in this case
+            return encargsorres  # a local result in this case
         self = args[0]
         cmd = pycompat.bytesurl(f.__name__)  # ensure cmd is ascii bytestr
         encresref.set(self._submitone(cmd, encargsorres))
         return next(batchable)
+
     setattr(plain, 'batchable', f)
     setattr(plain, '__name__', f.__name__)
     return plain
 
+
 class future(object):
     '''placeholder for a value to be set later'''
+
     def set(self, value):
-        if util.safehasattr(self, 'value'):
-            raise error.RepoError("future is already set")
+        if util.safehasattr(self, b'value'):
+            raise error.RepoError(b"future is already set")
         self.value = value
 
+
 def encodebatchcmds(req):
     """Return a ``cmds`` argument value for the ``batch`` command."""
     escapearg = wireprototypes.escapebatcharg
@@ -85,11 +93,14 @@
         # servers.
         assert all(escapearg(k) == k for k in argsdict)
 
-        args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
-                        for k, v in argsdict.iteritems())
-        cmds.append('%s %s' % (op, args))
+        args = b','.join(
+            b'%s=%s' % (escapearg(k), escapearg(v))
+            for k, v in pycompat.iteritems(argsdict)
+        )
+        cmds.append(b'%s %s' % (op, args))
 
-    return ';'.join(cmds)
+    return b';'.join(cmds)
+
 
 class unsentfuture(pycompat.futures.Future):
     """A Future variation to represent an unsent command.
@@ -111,6 +122,7 @@
         # on that.
         return self.result(timeout)
 
+
 @interfaceutil.implementer(repository.ipeercommandexecutor)
 class peerexecutor(object):
     def __init__(self, peer):
@@ -130,20 +142,23 @@
 
     def callcommand(self, command, args):
         if self._sent:
-            raise error.ProgrammingError('callcommand() cannot be used '
-                                         'after commands are sent')
+            raise error.ProgrammingError(
+                b'callcommand() cannot be used after commands are sent'
+            )
 
         if self._closed:
-            raise error.ProgrammingError('callcommand() cannot be used '
-                                         'after close()')
+            raise error.ProgrammingError(
+                b'callcommand() cannot be used after close()'
+            )
 
         # Commands are dispatched through methods on the peer.
         fn = getattr(self._peer, pycompat.sysstr(command), None)
 
         if not fn:
             raise error.ProgrammingError(
-                'cannot call command %s: method of same name not available '
-                'on peer' % command)
+                b'cannot call command %s: method of same name not available '
+                b'on peer' % command
+            )
 
         # Commands are either batchable or they aren't. If a command
         # isn't batchable, we send it immediately because the executor
@@ -168,8 +183,9 @@
         else:
             if self._calls:
                 raise error.ProgrammingError(
-                    '%s is not batchable and cannot be called on a command '
-                    'executor along with other commands' % command)
+                    b'%s is not batchable and cannot be called on a command '
+                    b'executor along with other commands' % command
+                )
 
             f = addcall()
 
@@ -232,8 +248,9 @@
                 continue
 
             try:
-                batchable = fn.batchable(fn.__self__,
-                                         **pycompat.strkwargs(args))
+                batchable = fn.batchable(
+                    fn.__self__, **pycompat.strkwargs(args)
+                )
             except Exception:
                 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
                 return
@@ -263,8 +280,9 @@
         # concurrent.futures already solves these problems and its thread pool
         # executor has minimal overhead. So we use it.
         self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
-        self._responsef = self._responseexecutor.submit(self._readbatchresponse,
-                                                        states, wireresults)
+        self._responsef = self._responseexecutor.submit(
+            self._readbatchresponse, states, wireresults
+        )
 
     def close(self):
         self.sendcommands()
@@ -290,8 +308,11 @@
             # errored. Otherwise a result() could wait indefinitely.
             for f in self._futures:
                 if not f.done():
-                    f.set_exception(error.ResponseError(
-                        _('unfulfilled batch command response')))
+                    f.set_exception(
+                        error.ResponseError(
+                            _(b'unfulfilled batch command response')
+                        )
+                    )
 
             self._futures = None
 
@@ -312,8 +333,10 @@
             else:
                 f.set_result(result)
 
-@interfaceutil.implementer(repository.ipeercommands,
-                           repository.ipeerlegacycommands)
+
+@interfaceutil.implementer(
+    repository.ipeercommands, repository.ipeerlegacycommands
+)
 class wirepeer(repository.peer):
     """Client-side interface for communicating with a peer repository.
 
@@ -322,22 +345,23 @@
     See also httppeer.py and sshpeer.py for protocol-specific
     implementations of this interface.
     """
+
     def commandexecutor(self):
         return peerexecutor(self)
 
     # Begin of ipeercommands interface.
 
     def clonebundles(self):
-        self.requirecap('clonebundles', _('clone bundles'))
-        return self._call('clonebundles')
+        self.requirecap(b'clonebundles', _(b'clone bundles'))
+        return self._call(b'clonebundles')
 
     @batchable
     def lookup(self, key):
-        self.requirecap('lookup', _('look up remote revision'))
+        self.requirecap(b'lookup', _(b'look up remote revision'))
         f = future()
-        yield {'key': encoding.fromlocal(key)}, f
+        yield {b'key': encoding.fromlocal(key)}, f
         d = f.value
-        success, data = d[:-1].split(" ", 1)
+        success, data = d[:-1].split(b" ", 1)
         if int(success):
             yield bin(data)
         else:
@@ -351,17 +375,17 @@
         try:
             yield wireprototypes.decodelist(d[:-1])
         except ValueError:
-            self._abort(error.ResponseError(_("unexpected response:"), d))
+            self._abort(error.ResponseError(_(b"unexpected response:"), d))
 
     @batchable
     def known(self, nodes):
         f = future()
-        yield {'nodes': wireprototypes.encodelist(nodes)}, f
+        yield {b'nodes': wireprototypes.encodelist(nodes)}, f
         d = f.value
         try:
             yield [bool(int(b)) for b in pycompat.iterbytestr(d)]
         except ValueError:
-            self._abort(error.ResponseError(_("unexpected response:"), d))
+            self._abort(error.ResponseError(_(b"unexpected response:"), d))
 
     @batchable
     def branchmap(self):
@@ -371,79 +395,83 @@
         try:
             branchmap = {}
             for branchpart in d.splitlines():
-                branchname, branchheads = branchpart.split(' ', 1)
+                branchname, branchheads = branchpart.split(b' ', 1)
                 branchname = encoding.tolocal(urlreq.unquote(branchname))
                 branchheads = wireprototypes.decodelist(branchheads)
                 branchmap[branchname] = branchheads
             yield branchmap
         except TypeError:
-            self._abort(error.ResponseError(_("unexpected response:"), d))
+            self._abort(error.ResponseError(_(b"unexpected response:"), d))
 
     @batchable
     def listkeys(self, namespace):
-        if not self.capable('pushkey'):
+        if not self.capable(b'pushkey'):
             yield {}, None
         f = future()
-        self.ui.debug('preparing listkeys for "%s"\n' % namespace)
-        yield {'namespace': encoding.fromlocal(namespace)}, f
+        self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
+        yield {b'namespace': encoding.fromlocal(namespace)}, f
         d = f.value
-        self.ui.debug('received listkey for "%s": %i bytes\n'
-                      % (namespace, len(d)))
+        self.ui.debug(
+            b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
+        )
         yield pushkeymod.decodekeys(d)
 
     @batchable
     def pushkey(self, namespace, key, old, new):
-        if not self.capable('pushkey'):
+        if not self.capable(b'pushkey'):
             yield False, None
         f = future()
-        self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
-        yield {'namespace': encoding.fromlocal(namespace),
-               'key': encoding.fromlocal(key),
-               'old': encoding.fromlocal(old),
-               'new': encoding.fromlocal(new)}, f
+        self.ui.debug(b'preparing pushkey for "%s:%s"\n' % (namespace, key))
+        yield {
+            b'namespace': encoding.fromlocal(namespace),
+            b'key': encoding.fromlocal(key),
+            b'old': encoding.fromlocal(old),
+            b'new': encoding.fromlocal(new),
+        }, f
         d = f.value
-        d, output = d.split('\n', 1)
+        d, output = d.split(b'\n', 1)
         try:
             d = bool(int(d))
         except ValueError:
             raise error.ResponseError(
-                _('push failed (unexpected response):'), d)
+                _(b'push failed (unexpected response):'), d
+            )
         for l in output.splitlines(True):
-            self.ui.status(_('remote: '), l)
+            self.ui.status(_(b'remote: '), l)
         yield d
 
     def stream_out(self):
-        return self._callstream('stream_out')
+        return self._callstream(b'stream_out')
 
     def getbundle(self, source, **kwargs):
         kwargs = pycompat.byteskwargs(kwargs)
-        self.requirecap('getbundle', _('look up remote changes'))
+        self.requirecap(b'getbundle', _(b'look up remote changes'))
         opts = {}
-        bundlecaps = kwargs.get('bundlecaps') or set()
-        for key, value in kwargs.iteritems():
+        bundlecaps = kwargs.get(b'bundlecaps') or set()
+        for key, value in pycompat.iteritems(kwargs):
             if value is None:
                 continue
             keytype = wireprototypes.GETBUNDLE_ARGUMENTS.get(key)
             if keytype is None:
                 raise error.ProgrammingError(
-                    'Unexpectedly None keytype for key %s' % key)
-            elif keytype == 'nodes':
+                    b'Unexpectedly None keytype for key %s' % key
+                )
+            elif keytype == b'nodes':
                 value = wireprototypes.encodelist(value)
-            elif keytype == 'csv':
-                value = ','.join(value)
-            elif keytype == 'scsv':
-                value = ','.join(sorted(value))
-            elif keytype == 'boolean':
-                value = '%i' % bool(value)
-            elif keytype != 'plain':
-                raise KeyError('unknown getbundle option type %s'
-                               % keytype)
+            elif keytype == b'csv':
+                value = b','.join(value)
+            elif keytype == b'scsv':
+                value = b','.join(sorted(value))
+            elif keytype == b'boolean':
+                value = b'%i' % bool(value)
+            elif keytype != b'plain':
+                raise KeyError(b'unknown getbundle option type %s' % keytype)
             opts[key] = value
-        f = self._callcompressable("getbundle", **pycompat.strkwargs(opts))
-        if any((cap.startswith('HG2') for cap in bundlecaps)):
+        f = self._callcompressable(b"getbundle", **pycompat.strkwargs(opts))
+        if any((cap.startswith(b'HG2') for cap in bundlecaps)):
             return bundle2.getunbundler(self.ui, f)
         else:
-            return changegroupmod.cg1unpacker(f, 'UN')
+            return changegroupmod.cg1unpacker(f, b'UN')
 
     def unbundle(self, bundle, heads, url):
         '''Send cg (a readable file-like object representing the
@@ -459,29 +487,30 @@
         visible to hooks.
         '''
 
-        if heads != ['force'] and self.capable('unbundlehash'):
+        if heads != [b'force'] and self.capable(b'unbundlehash'):
             heads = wireprototypes.encodelist(
-                ['hashed', hashlib.sha1(''.join(sorted(heads))).digest()])
+                [b'hashed', hashlib.sha1(b''.join(sorted(heads))).digest()]
+            )
         else:
             heads = wireprototypes.encodelist(heads)
 
-        if util.safehasattr(bundle, 'deltaheader'):
+        if util.safehasattr(bundle, b'deltaheader'):
             # this a bundle10, do the old style call sequence
-            ret, output = self._callpush("unbundle", bundle, heads=heads)
-            if ret == "":
-                raise error.ResponseError(
-                    _('push failed:'), output)
+            ret, output = self._callpush(b"unbundle", bundle, heads=heads)
+            if ret == b"":
+                raise error.ResponseError(_(b'push failed:'), output)
             try:
                 ret = int(ret)
             except ValueError:
                 raise error.ResponseError(
-                    _('push failed (unexpected response):'), ret)
+                    _(b'push failed (unexpected response):'), ret
+                )
 
             for l in output.splitlines(True):
-                self.ui.status(_('remote: '), l)
+                self.ui.status(_(b'remote: '), l)
         else:
             # bundle2 push. Send a stream, fetch a stream.
-            stream = self._calltwowaystream('unbundle', bundle, heads=heads)
+            stream = self._calltwowaystream(b'unbundle', bundle, heads=heads)
             ret = bundle2.getunbundler(self.ui, stream)
         return ret
 
@@ -491,39 +520,46 @@
 
     def branches(self, nodes):
         n = wireprototypes.encodelist(nodes)
-        d = self._call("branches", nodes=n)
+        d = self._call(b"branches", nodes=n)
         try:
             br = [tuple(wireprototypes.decodelist(b)) for b in d.splitlines()]
             return br
         except ValueError:
-            self._abort(error.ResponseError(_("unexpected response:"), d))
+            self._abort(error.ResponseError(_(b"unexpected response:"), d))
 
     def between(self, pairs):
-        batch = 8 # avoid giant requests
+        batch = 8  # avoid giant requests
         r = []
         for i in pycompat.xrange(0, len(pairs), batch):
-            n = " ".join([wireprototypes.encodelist(p, '-')
-                          for p in pairs[i:i + batch]])
-            d = self._call("between", pairs=n)
+            n = b" ".join(
+                [
+                    wireprototypes.encodelist(p, b'-')
+                    for p in pairs[i : i + batch]
+                ]
+            )
+            d = self._call(b"between", pairs=n)
             try:
-                r.extend(l and wireprototypes.decodelist(l) or []
-                         for l in d.splitlines())
+                r.extend(
+                    l and wireprototypes.decodelist(l) or []
+                    for l in d.splitlines()
+                )
             except ValueError:
-                self._abort(error.ResponseError(_("unexpected response:"), d))
+                self._abort(error.ResponseError(_(b"unexpected response:"), d))
         return r
 
     def changegroup(self, nodes, source):
         n = wireprototypes.encodelist(nodes)
-        f = self._callcompressable("changegroup", roots=n)
-        return changegroupmod.cg1unpacker(f, 'UN')
+        f = self._callcompressable(b"changegroup", roots=n)
+        return changegroupmod.cg1unpacker(f, b'UN')
 
     def changegroupsubset(self, bases, heads, source):
-        self.requirecap('changegroupsubset', _('look up remote changes'))
+        self.requirecap(b'changegroupsubset', _(b'look up remote changes'))
         bases = wireprototypes.encodelist(bases)
         heads = wireprototypes.encodelist(heads)
-        f = self._callcompressable("changegroupsubset",
-                                   bases=bases, heads=heads)
-        return changegroupmod.cg1unpacker(f, 'UN')
+        f = self._callcompressable(
+            b"changegroupsubset", bases=bases, heads=heads
+        )
+        return changegroupmod.cg1unpacker(f, b'UN')
 
     # End of ipeerlegacycommands interface.
 
@@ -533,28 +569,28 @@
         Returns an iterator of the raw responses from the server.
         """
         ui = self.ui
-        if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
-            ui.debug('devel-peer-request: batched-content\n')
+        if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
+            ui.debug(b'devel-peer-request: batched-content\n')
             for op, args in req:
-                msg = 'devel-peer-request:    - %s (%d arguments)\n'
+                msg = b'devel-peer-request:    - %s (%d arguments)\n'
                 ui.debug(msg % (op, len(args)))
 
         unescapearg = wireprototypes.unescapebatcharg
 
-        rsp = self._callstream("batch", cmds=encodebatchcmds(req))
+        rsp = self._callstream(b"batch", cmds=encodebatchcmds(req))
         chunk = rsp.read(1024)
         work = [chunk]
         while chunk:
-            while ';' not in chunk and chunk:
+            while b';' not in chunk and chunk:
                 chunk = rsp.read(1024)
                 work.append(chunk)
-            merged = ''.join(work)
-            while ';' in merged:
-                one, merged = merged.split(';', 1)
+            merged = b''.join(work)
+            while b';' in merged:
+                one, merged = merged.split(b';', 1)
                 yield unescapearg(one)
             chunk = rsp.read(1024)
             work = [merged, chunk]
-        yield unescapearg(''.join(work))
+        yield unescapearg(b''.join(work))
 
     def _submitone(self, op, args):
         return self._call(op, **pycompat.strkwargs(args))
@@ -566,7 +602,7 @@
             opts[r'three'] = three
         if four is not None:
             opts[r'four'] = four
-        return self._call('debugwireargs', one=one, two=two, **opts)
+        return self._call(b'debugwireargs', one=one, two=two, **opts)
 
     def _call(self, cmd, **args):
         """execute <cmd> on the server
--- a/mercurial/wireprotov1server.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/wireprotov1server.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,6 +15,7 @@
     hex,
     nullid,
 )
+from .pycompat import getattr
 
 from . import (
     bundle2,
@@ -38,10 +39,12 @@
 urlerr = util.urlerr
 urlreq = util.urlreq
 
-bundle2requiredmain = _('incompatible Mercurial client; bundle2 required')
-bundle2requiredhint = _('see https://www.mercurial-scm.org/wiki/'
-                        'IncompatibleClient')
-bundle2required = '%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
+bundle2requiredmain = _(b'incompatible Mercurial client; bundle2 required')
+bundle2requiredhint = _(
+    b'see https://www.mercurial-scm.org/wiki/IncompatibleClient'
+)
+bundle2required = b'%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
+
 
 def clientcompressionsupport(proto):
     """Returns a list of compression methods supported by the client.
@@ -51,12 +54,14 @@
     been announced, fallback to the default of zlib and uncompressed.
     """
     for cap in proto.getprotocaps():
-        if cap.startswith('comp='):
-            return cap[5:].split(',')
-    return ['zlib', 'none']
+        if cap.startswith(b'comp='):
+            return cap[5:].split(b',')
+    return [b'zlib', b'none']
+
 
 # wire protocol command can either return a string or one of these classes.
 
+
 def getdispatchrepo(repo, proto, command):
     """Obtain the repo used for processing wire protocol commands.
 
@@ -64,9 +69,10 @@
     extensions that need commands to operate on different repo views under
     specialized circumstances.
     """
-    viewconfig = repo.ui.config('server', 'view')
+    viewconfig = repo.ui.config(b'server', b'view')
     return repo.filtered(viewconfig)
 
+
 def dispatch(repo, proto, command):
     repo = getdispatchrepo(repo, proto, command)
 
@@ -75,6 +81,7 @@
 
     return func(repo, proto, *args)
 
+
 def options(cmd, keys, others):
     opts = {}
     for k in keys:
@@ -82,10 +89,13 @@
             opts[k] = others[k]
             del others[k]
     if others:
-        procutil.stderr.write("warning: %s ignored unexpected arguments %s\n"
-                              % (cmd, ",".join(others)))
+        procutil.stderr.write(
+            b"warning: %s ignored unexpected arguments %s\n"
+            % (cmd, b",".join(others))
+        )
     return opts
 
+
 def bundle1allowed(repo, action):
     """Whether a bundle1 operation is allowed from the server.
 
@@ -97,27 +107,29 @@
     4. server.bundle1
     """
     ui = repo.ui
-    gd = 'generaldelta' in repo.requirements
+    gd = b'generaldelta' in repo.requirements
 
     if gd:
-        v = ui.configbool('server', 'bundle1gd.%s' % action)
+        v = ui.configbool(b'server', b'bundle1gd.%s' % action)
         if v is not None:
             return v
 
-    v = ui.configbool('server', 'bundle1.%s' % action)
+    v = ui.configbool(b'server', b'bundle1.%s' % action)
     if v is not None:
         return v
 
     if gd:
-        v = ui.configbool('server', 'bundle1gd')
+        v = ui.configbool(b'server', b'bundle1gd')
         if v is not None:
             return v
 
-    return ui.configbool('server', 'bundle1')
+    return ui.configbool(b'server', b'bundle1')
+
 
 commands = wireprototypes.commanddict()
 
-def wireprotocommand(name, args=None, permission='push'):
+
+def wireprotocommand(name, args=None, permission=b'push'):
     """Decorator to declare a wire protocol command.
 
     ``name`` is the name of the wire protocol command being provided.
@@ -132,8 +144,9 @@
     because otherwise commands not declaring their permissions could modify
     a repository that is supposed to be read-only.
     """
-    transports = {k for k, v in wireprototypes.TRANSPORTS.items()
-                  if v['version'] == 1}
+    transports = {
+        k for k, v in wireprototypes.TRANSPORTS.items() if v[b'version'] == 1
+    }
 
     # Because SSHv2 is a mirror of SSHv1, we allow "batch" commands through to
     # SSHv2.
@@ -141,57 +154,63 @@
     if name == b'batch':
         transports.add(wireprototypes.SSHV2)
 
-    if permission not in ('push', 'pull'):
-        raise error.ProgrammingError('invalid wire protocol permission; '
-                                     'got %s; expected "push" or "pull"' %
-                                     permission)
+    if permission not in (b'push', b'pull'):
+        raise error.ProgrammingError(
+            b'invalid wire protocol permission; '
+            b'got %s; expected "push" or "pull"' % permission
+        )
 
     if args is None:
-        args = ''
+        args = b''
 
     if not isinstance(args, bytes):
-        raise error.ProgrammingError('arguments for version 1 commands '
-                                     'must be declared as bytes')
+        raise error.ProgrammingError(
+            b'arguments for version 1 commands must be declared as bytes'
+        )
 
     def register(func):
         if name in commands:
-            raise error.ProgrammingError('%s command already registered '
-                                         'for version 1' % name)
+            raise error.ProgrammingError(
+                b'%s command already registered for version 1' % name
+            )
         commands[name] = wireprototypes.commandentry(
-            func, args=args, transports=transports, permission=permission)
+            func, args=args, transports=transports, permission=permission
+        )
 
         return func
+
     return register
 
+
 # TODO define a more appropriate permissions type to use for this.
-@wireprotocommand('batch', 'cmds *', permission='pull')
+@wireprotocommand(b'batch', b'cmds *', permission=b'pull')
 def batch(repo, proto, cmds, others):
     unescapearg = wireprototypes.unescapebatcharg
     res = []
-    for pair in cmds.split(';'):
-        op, args = pair.split(' ', 1)
+    for pair in cmds.split(b';'):
+        op, args = pair.split(b' ', 1)
         vals = {}
-        for a in args.split(','):
+        for a in args.split(b','):
             if a:
-                n, v = a.split('=')
+                n, v = a.split(b'=')
                 vals[unescapearg(n)] = unescapearg(v)
         func, spec = commands[op]
 
         # Validate that client has permissions to perform this command.
         perm = commands[op].permission
-        assert perm in ('push', 'pull')
+        assert perm in (b'push', b'pull')
         proto.checkperm(perm)
 
         if spec:
             keys = spec.split()
             data = {}
             for k in keys:
-                if k == '*':
+                if k == b'*':
                     star = {}
                     for key in vals.keys():
                         if key not in keys:
                             star[key] = vals[key]
-                    data['*'] = star
+                    data[b'*'] = star
                 else:
                     data[k] = vals[k]
             result = func(repo, proto, *[data[k] for k in keys])
@@ -207,38 +226,42 @@
             result = result.data
         res.append(wireprototypes.escapebatcharg(result))
 
-    return wireprototypes.bytesresponse(';'.join(res))
+    return wireprototypes.bytesresponse(b';'.join(res))
+
 
-@wireprotocommand('between', 'pairs', permission='pull')
+@wireprotocommand(b'between', b'pairs', permission=b'pull')
 def between(repo, proto, pairs):
-    pairs = [wireprototypes.decodelist(p, '-') for p in pairs.split(" ")]
+    pairs = [wireprototypes.decodelist(p, b'-') for p in pairs.split(b" ")]
     r = []
     for b in repo.between(pairs):
-        r.append(wireprototypes.encodelist(b) + "\n")
+        r.append(wireprototypes.encodelist(b) + b"\n")
 
-    return wireprototypes.bytesresponse(''.join(r))
+    return wireprototypes.bytesresponse(b''.join(r))
 
-@wireprotocommand('branchmap', permission='pull')
+
+@wireprotocommand(b'branchmap', permission=b'pull')
 def branchmap(repo, proto):
     branchmap = repo.branchmap()
     heads = []
-    for branch, nodes in branchmap.iteritems():
+    for branch, nodes in pycompat.iteritems(branchmap):
         branchname = urlreq.quote(encoding.fromlocal(branch))
         branchnodes = wireprototypes.encodelist(nodes)
-        heads.append('%s %s' % (branchname, branchnodes))
+        heads.append(b'%s %s' % (branchname, branchnodes))
 
-    return wireprototypes.bytesresponse('\n'.join(heads))
+    return wireprototypes.bytesresponse(b'\n'.join(heads))
 
-@wireprotocommand('branches', 'nodes', permission='pull')
+
+@wireprotocommand(b'branches', b'nodes', permission=b'pull')
 def branches(repo, proto, nodes):
     nodes = wireprototypes.decodelist(nodes)
     r = []
     for b in repo.branches(nodes):
-        r.append(wireprototypes.encodelist(b) + "\n")
+        r.append(wireprototypes.encodelist(b) + b"\n")
 
-    return wireprototypes.bytesresponse(''.join(r))
+    return wireprototypes.bytesresponse(b''.join(r))
 
-@wireprotocommand('clonebundles', '', permission='pull')
+
+@wireprotocommand(b'clonebundles', b'', permission=b'pull')
 def clonebundles(repo, proto):
     """Server command for returning info for available bundles to seed clones.
 
@@ -249,10 +272,19 @@
     data center given the client's IP address.
     """
     return wireprototypes.bytesresponse(
-        repo.vfs.tryread('clonebundles.manifest'))
+        repo.vfs.tryread(b'clonebundles.manifest')
+    )
+
 
-wireprotocaps = ['lookup', 'branchmap', 'pushkey',
-                 'known', 'getbundle', 'unbundlehash']
+wireprotocaps = [
+    b'lookup',
+    b'branchmap',
+    b'pushkey',
+    b'known',
+    b'getbundle',
+    b'unbundlehash',
+]
+
 
 def _capabilities(repo, proto):
     """return a list of capabilities for a repo
@@ -269,65 +301,69 @@
 
     # Command of same name as capability isn't exposed to version 1 of
     # transports. So conditionally add it.
-    if commands.commandavailable('changegroupsubset', proto):
-        caps.append('changegroupsubset')
+    if commands.commandavailable(b'changegroupsubset', proto):
+        caps.append(b'changegroupsubset')
 
     if streamclone.allowservergeneration(repo):
-        if repo.ui.configbool('server', 'preferuncompressed'):
-            caps.append('stream-preferred')
+        if repo.ui.configbool(b'server', b'preferuncompressed'):
+            caps.append(b'stream-preferred')
         requiredformats = repo.requirements & repo.supportedformats
         # if our local revlogs are just revlogv1, add 'stream' cap
-        if not requiredformats - {'revlogv1'}:
-            caps.append('stream')
+        if not requiredformats - {b'revlogv1'}:
+            caps.append(b'stream')
         # otherwise, add 'streamreqs' detailing our local revlog format
         else:
-            caps.append('streamreqs=%s' % ','.join(sorted(requiredformats)))
-    if repo.ui.configbool('experimental', 'bundle2-advertise'):
-        capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role='server'))
-        caps.append('bundle2=' + urlreq.quote(capsblob))
-    caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
+            caps.append(b'streamreqs=%s' % b','.join(sorted(requiredformats)))
+    if repo.ui.configbool(b'experimental', b'bundle2-advertise'):
+        capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=b'server'))
+        caps.append(b'bundle2=' + urlreq.quote(capsblob))
+    caps.append(b'unbundle=%s' % b','.join(bundle2.bundlepriority))
 
-    if repo.ui.configbool('experimental', 'narrow'):
+    if repo.ui.configbool(b'experimental', b'narrow'):
         caps.append(wireprototypes.NARROWCAP)
-        if repo.ui.configbool('experimental', 'narrowservebrokenellipses'):
+        if repo.ui.configbool(b'experimental', b'narrowservebrokenellipses'):
             caps.append(wireprototypes.ELLIPSESCAP)
 
     return proto.addcapabilities(repo, caps)
 
+
 # If you are writing an extension and consider wrapping this function. Wrap
 # `_capabilities` instead.
-@wireprotocommand('capabilities', permission='pull')
+@wireprotocommand(b'capabilities', permission=b'pull')
 def capabilities(repo, proto):
     caps = _capabilities(repo, proto)
-    return wireprototypes.bytesresponse(' '.join(sorted(caps)))
+    return wireprototypes.bytesresponse(b' '.join(sorted(caps)))
 
-@wireprotocommand('changegroup', 'roots', permission='pull')
+
+@wireprotocommand(b'changegroup', b'roots', permission=b'pull')
 def changegroup(repo, proto, roots):
     nodes = wireprototypes.decodelist(roots)
-    outgoing = discovery.outgoing(repo, missingroots=nodes,
-                                  missingheads=repo.heads())
-    cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve')
-    gen = iter(lambda: cg.read(32768), '')
+    outgoing = discovery.outgoing(
+        repo, missingroots=nodes, missingheads=repo.heads()
+    )
+    cg = changegroupmod.makechangegroup(repo, outgoing, b'01', b'serve')
+    gen = iter(lambda: cg.read(32768), b'')
     return wireprototypes.streamres(gen=gen)
 
-@wireprotocommand('changegroupsubset', 'bases heads',
-                  permission='pull')
+
+@wireprotocommand(b'changegroupsubset', b'bases heads', permission=b'pull')
 def changegroupsubset(repo, proto, bases, heads):
     bases = wireprototypes.decodelist(bases)
     heads = wireprototypes.decodelist(heads)
-    outgoing = discovery.outgoing(repo, missingroots=bases,
-                                  missingheads=heads)
-    cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve')
-    gen = iter(lambda: cg.read(32768), '')
+    outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
+    cg = changegroupmod.makechangegroup(repo, outgoing, b'01', b'serve')
+    gen = iter(lambda: cg.read(32768), b'')
     return wireprototypes.streamres(gen=gen)
 
-@wireprotocommand('debugwireargs', 'one two *',
-                  permission='pull')
+
+@wireprotocommand(b'debugwireargs', b'one two *', permission=b'pull')
 def debugwireargs(repo, proto, one, two, others):
     # only accept optional args from the known set
-    opts = options('debugwireargs', ['three', 'four'], others)
-    return wireprototypes.bytesresponse(repo.debugwireargs(
-        one, two, **pycompat.strkwargs(opts)))
+    opts = options(b'debugwireargs', [b'three', b'four'], others)
+    return wireprototypes.bytesresponse(
+        repo.debugwireargs(one, two, **pycompat.strkwargs(opts))
+    )
+
 
 def find_pullbundle(repo, proto, opts, clheads, heads, common):
     """Return a file object for the first matching pullbundle.
@@ -344,10 +380,11 @@
       E.g. do not send a bundle of all changes if the client wants only
       one specific branch of many.
     """
+
     def decodehexstring(s):
-        return {binascii.unhexlify(h) for h in s.split(';')}
+        return {binascii.unhexlify(h) for h in s.split(b';')}
 
-    manifest = repo.vfs.tryread('pullbundles.manifest')
+    manifest = repo.vfs.tryread(b'pullbundles.manifest')
     if not manifest:
         return None
     res = exchange.parseclonebundlesmanifest(repo, manifest)
@@ -359,122 +396,131 @@
     common_anc = cl.ancestors([cl.rev(rev) for rev in common], inclusive=True)
     compformats = clientcompressionsupport(proto)
     for entry in res:
-        comp = entry.get('COMPRESSION')
+        comp = entry.get(b'COMPRESSION')
         altcomp = util.compengines._bundlenames.get(comp)
         if comp and comp not in compformats and altcomp not in compformats:
             continue
         # No test yet for VERSION, since V2 is supported by any client
         # that advertises partial pulls
-        if 'heads' in entry:
+        if b'heads' in entry:
             try:
-                bundle_heads = decodehexstring(entry['heads'])
+                bundle_heads = decodehexstring(entry[b'heads'])
             except TypeError:
                 # Bad heads entry
                 continue
             if bundle_heads.issubset(common):
-                continue # Nothing new
+                continue  # Nothing new
             if all(cl.rev(rev) in common_anc for rev in bundle_heads):
-                continue # Still nothing new
-            if any(cl.rev(rev) not in heads_anc and
-                   cl.rev(rev) not in common_anc for rev in bundle_heads):
+                continue  # Still nothing new
+            if any(
+                cl.rev(rev) not in heads_anc and cl.rev(rev) not in common_anc
+                for rev in bundle_heads
+            ):
                 continue
-        if 'bases' in entry:
+        if b'bases' in entry:
             try:
-                bundle_bases = decodehexstring(entry['bases'])
+                bundle_bases = decodehexstring(entry[b'bases'])
             except TypeError:
                 # Bad bases entry
                 continue
             if not all(cl.rev(rev) in common_anc for rev in bundle_bases):
                 continue
-        path = entry['URL']
-        repo.ui.debug('sending pullbundle "%s"\n' % path)
+        path = entry[b'URL']
+        repo.ui.debug(b'sending pullbundle "%s"\n' % path)
         try:
             return repo.vfs.open(path)
         except IOError:
-            repo.ui.debug('pullbundle "%s" not accessible\n' % path)
+            repo.ui.debug(b'pullbundle "%s" not accessible\n' % path)
             continue
     return None
 
-@wireprotocommand('getbundle', '*', permission='pull')
+
+@wireprotocommand(b'getbundle', b'*', permission=b'pull')
 def getbundle(repo, proto, others):
-    opts = options('getbundle', wireprototypes.GETBUNDLE_ARGUMENTS.keys(),
-                   others)
-    for k, v in opts.iteritems():
+    opts = options(
+        b'getbundle', wireprototypes.GETBUNDLE_ARGUMENTS.keys(), others
+    )
+    for k, v in pycompat.iteritems(opts):
         keytype = wireprototypes.GETBUNDLE_ARGUMENTS[k]
-        if keytype == 'nodes':
+        if keytype == b'nodes':
             opts[k] = wireprototypes.decodelist(v)
-        elif keytype == 'csv':
-            opts[k] = list(v.split(','))
-        elif keytype == 'scsv':
-            opts[k] = set(v.split(','))
-        elif keytype == 'boolean':
+        elif keytype == b'csv':
+            opts[k] = list(v.split(b','))
+        elif keytype == b'scsv':
+            opts[k] = set(v.split(b','))
+        elif keytype == b'boolean':
             # Client should serialize False as '0', which is a non-empty string
             # so it evaluates as a True bool.
-            if v == '0':
+            if v == b'0':
                 opts[k] = False
             else:
                 opts[k] = bool(v)
-        elif keytype != 'plain':
-            raise KeyError('unknown getbundle option type %s'
-                           % keytype)
+        elif keytype != b'plain':
+            raise KeyError(b'unknown getbundle option type %s' % keytype)
 
-    if not bundle1allowed(repo, 'pull'):
-        if not exchange.bundle2requested(opts.get('bundlecaps')):
-            if proto.name == 'http-v1':
+    if not bundle1allowed(repo, b'pull'):
+        if not exchange.bundle2requested(opts.get(b'bundlecaps')):
+            if proto.name == b'http-v1':
                 return wireprototypes.ooberror(bundle2required)
-            raise error.Abort(bundle2requiredmain,
-                              hint=bundle2requiredhint)
+            raise error.Abort(bundle2requiredmain, hint=bundle2requiredhint)
 
     try:
         clheads = set(repo.changelog.heads())
-        heads = set(opts.get('heads', set()))
-        common = set(opts.get('common', set()))
+        heads = set(opts.get(b'heads', set()))
+        common = set(opts.get(b'common', set()))
         common.discard(nullid)
-        if (repo.ui.configbool('server', 'pullbundle') and
-            'partial-pull' in proto.getprotocaps()):
+        if (
+            repo.ui.configbool(b'server', b'pullbundle')
+            and b'partial-pull' in proto.getprotocaps()
+        ):
             # Check if a pre-built bundle covers this request.
             bundle = find_pullbundle(repo, proto, opts, clheads, heads, common)
             if bundle:
-                return wireprototypes.streamres(gen=util.filechunkiter(bundle),
-                                                prefer_uncompressed=True)
+                return wireprototypes.streamres(
+                    gen=util.filechunkiter(bundle), prefer_uncompressed=True
+                )
 
-        if repo.ui.configbool('server', 'disablefullbundle'):
+        if repo.ui.configbool(b'server', b'disablefullbundle'):
             # Check to see if this is a full clone.
-            changegroup = opts.get('cg', True)
+            changegroup = opts.get(b'cg', True)
             if changegroup and not common and clheads == heads:
                 raise error.Abort(
-                    _('server has pull-based clones disabled'),
-                    hint=_('remove --pull if specified or upgrade Mercurial'))
+                    _(b'server has pull-based clones disabled'),
+                    hint=_(b'remove --pull if specified or upgrade Mercurial'),
+                )
 
-        info, chunks = exchange.getbundlechunks(repo, 'serve',
-                                                **pycompat.strkwargs(opts))
-        prefercompressed = info.get('prefercompressed', True)
+        info, chunks = exchange.getbundlechunks(
+            repo, b'serve', **pycompat.strkwargs(opts)
+        )
+        prefercompressed = info.get(b'prefercompressed', True)
     except error.Abort as exc:
         # cleanly forward Abort error to the client
-        if not exchange.bundle2requested(opts.get('bundlecaps')):
-            if proto.name == 'http-v1':
-                return wireprototypes.ooberror(pycompat.bytestr(exc) + '\n')
-            raise # cannot do better for bundle1 + ssh
+        if not exchange.bundle2requested(opts.get(b'bundlecaps')):
+            if proto.name == b'http-v1':
+                return wireprototypes.ooberror(pycompat.bytestr(exc) + b'\n')
+            raise  # cannot do better for bundle1 + ssh
         # bundle2 request expect a bundle2 reply
         bundler = bundle2.bundle20(repo.ui)
-        manargs = [('message', pycompat.bytestr(exc))]
+        manargs = [(b'message', pycompat.bytestr(exc))]
         advargs = []
         if exc.hint is not None:
-            advargs.append(('hint', exc.hint))
-        bundler.addpart(bundle2.bundlepart('error:abort',
-                                           manargs, advargs))
+            advargs.append((b'hint', exc.hint))
+        bundler.addpart(bundle2.bundlepart(b'error:abort', manargs, advargs))
         chunks = bundler.getchunks()
         prefercompressed = False
 
     return wireprototypes.streamres(
-        gen=chunks, prefer_uncompressed=not prefercompressed)
+        gen=chunks, prefer_uncompressed=not prefercompressed
+    )
 
-@wireprotocommand('heads', permission='pull')
+
+@wireprotocommand(b'heads', permission=b'pull')
 def heads(repo, proto):
     h = repo.heads()
-    return wireprototypes.bytesresponse(wireprototypes.encodelist(h) + '\n')
+    return wireprototypes.bytesresponse(wireprototypes.encodelist(h) + b'\n')
 
-@wireprotocommand('hello', permission='pull')
+
+@wireprotocommand(b'hello', permission=b'pull')
 def hello(repo, proto):
     """Called as part of SSH handshake to obtain server info.
 
@@ -487,14 +533,16 @@
         capabilities: <token0> <token1> <token2>
     """
     caps = capabilities(repo, proto).data
-    return wireprototypes.bytesresponse('capabilities: %s\n' % caps)
+    return wireprototypes.bytesresponse(b'capabilities: %s\n' % caps)
 
-@wireprotocommand('listkeys', 'namespace', permission='pull')
+
+@wireprotocommand(b'listkeys', b'namespace', permission=b'pull')
 def listkeys(repo, proto, namespace):
     d = sorted(repo.listkeys(encoding.tolocal(namespace)).items())
     return wireprototypes.bytesresponse(pushkeymod.encodekeys(d))
 
-@wireprotocommand('lookup', 'key', permission='pull')
+
+@wireprotocommand(b'lookup', b'key', permission=b'pull')
 def lookup(repo, proto, key):
     try:
         k = encoding.tolocal(key)
@@ -504,21 +552,25 @@
     except Exception as inst:
         r = stringutil.forcebytestr(inst)
         success = 0
-    return wireprototypes.bytesresponse('%d %s\n' % (success, r))
+    return wireprototypes.bytesresponse(b'%d %s\n' % (success, r))
+
 
-@wireprotocommand('known', 'nodes *', permission='pull')
+@wireprotocommand(b'known', b'nodes *', permission=b'pull')
 def known(repo, proto, nodes, others):
-    v = ''.join(b and '1' or '0'
-                for b in repo.known(wireprototypes.decodelist(nodes)))
+    v = b''.join(
+        b and b'1' or b'0' for b in repo.known(wireprototypes.decodelist(nodes))
+    )
     return wireprototypes.bytesresponse(v)
 
-@wireprotocommand('protocaps', 'caps', permission='pull')
+
+@wireprotocommand(b'protocaps', b'caps', permission=b'pull')
 def protocaps(repo, proto, caps):
     if proto.name == wireprototypes.SSHV1:
-        proto._protocaps = set(caps.split(' '))
-    return wireprototypes.bytesresponse('OK')
+        proto._protocaps = set(caps.split(b' '))
+    return wireprototypes.bytesresponse(b'OK')
 
-@wireprotocommand('pushkey', 'namespace key old new', permission='push')
+
+@wireprotocommand(b'pushkey', b'namespace key old new', permission=b'push')
 def pushkey(repo, proto, namespace, key, old, new):
     # compatibility with pre-1.8 clients which were accidentally
     # sending raw binary nodes rather than utf-8-encoded hex
@@ -526,81 +578,98 @@
         # looks like it could be a binary node
         try:
             new.decode('utf-8')
-            new = encoding.tolocal(new) # but cleanly decodes as UTF-8
+            new = encoding.tolocal(new)  # but cleanly decodes as UTF-8
         except UnicodeDecodeError:
-            pass # binary, leave unmodified
+            pass  # binary, leave unmodified
     else:
-        new = encoding.tolocal(new) # normal path
+        new = encoding.tolocal(new)  # normal path
 
     with proto.mayberedirectstdio() as output:
-        r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
-                         encoding.tolocal(old), new) or False
+        r = (
+            repo.pushkey(
+                encoding.tolocal(namespace),
+                encoding.tolocal(key),
+                encoding.tolocal(old),
+                new,
+            )
+            or False
+        )
 
-    output = output.getvalue() if output else ''
-    return wireprototypes.bytesresponse('%d\n%s' % (int(r), output))
+    output = output.getvalue() if output else b''
+    return wireprototypes.bytesresponse(b'%d\n%s' % (int(r), output))
 
-@wireprotocommand('stream_out', permission='pull')
+
+@wireprotocommand(b'stream_out', permission=b'pull')
 def stream(repo, proto):
     '''If the server supports streaming clone, it advertises the "stream"
     capability with a value representing the version and flags of the repo
     it is serving. Client checks to see if it understands the format.
     '''
-    return wireprototypes.streamreslegacy(
-        streamclone.generatev1wireproto(repo))
+    return wireprototypes.streamreslegacy(streamclone.generatev1wireproto(repo))
 
-@wireprotocommand('unbundle', 'heads', permission='push')
+
+@wireprotocommand(b'unbundle', b'heads', permission=b'push')
 def unbundle(repo, proto, heads):
     their_heads = wireprototypes.decodelist(heads)
 
     with proto.mayberedirectstdio() as output:
         try:
-            exchange.check_heads(repo, their_heads, 'preparing changes')
+            exchange.check_heads(repo, their_heads, b'preparing changes')
             cleanup = lambda: None
             try:
                 payload = proto.getpayload()
-                if repo.ui.configbool('server', 'streamunbundle'):
+                if repo.ui.configbool(b'server', b'streamunbundle'):
+
                     def cleanup():
                         # Ensure that the full payload is consumed, so
                         # that the connection doesn't contain trailing garbage.
                         for p in payload:
                             pass
+
                     fp = util.chunkbuffer(payload)
                 else:
                     # write bundle data to temporary file as it can be big
                     fp, tempname = None, None
+
                     def cleanup():
                         if fp:
                             fp.close()
                         if tempname:
                             os.unlink(tempname)
-                    fd, tempname = pycompat.mkstemp(prefix='hg-unbundle-')
-                    repo.ui.debug('redirecting incoming bundle to %s\n' %
-                        tempname)
-                    fp = os.fdopen(fd, pycompat.sysstr('wb+'))
+
+                    fd, tempname = pycompat.mkstemp(prefix=b'hg-unbundle-')
+                    repo.ui.debug(
+                        b'redirecting incoming bundle to %s\n' % tempname
+                    )
+                    fp = os.fdopen(fd, pycompat.sysstr(b'wb+'))
                     for p in payload:
                         fp.write(p)
                     fp.seek(0)
 
                 gen = exchange.readbundle(repo.ui, fp, None)
-                if (isinstance(gen, changegroupmod.cg1unpacker)
-                    and not bundle1allowed(repo, 'push')):
-                    if proto.name == 'http-v1':
+                if isinstance(
+                    gen, changegroupmod.cg1unpacker
+                ) and not bundle1allowed(repo, b'push'):
+                    if proto.name == b'http-v1':
                         # need to special case http because stderr do not get to
                         # the http client on failed push so we need to abuse
                         # some other error type to make sure the message get to
                         # the user.
                         return wireprototypes.ooberror(bundle2required)
-                    raise error.Abort(bundle2requiredmain,
-                                      hint=bundle2requiredhint)
+                    raise error.Abort(
+                        bundle2requiredmain, hint=bundle2requiredhint
+                    )
 
-                r = exchange.unbundle(repo, gen, their_heads, 'serve',
-                                      proto.client())
-                if util.safehasattr(r, 'addpart'):
+                r = exchange.unbundle(
+                    repo, gen, their_heads, b'serve', proto.client()
+                )
+                if util.safehasattr(r, b'addpart'):
                     # The return looks streamable, we are in the bundle2 case
                     # and should return a stream.
                     return wireprototypes.streamreslegacy(gen=r.getchunks())
                 return wireprototypes.pushres(
-                    r, output.getvalue() if output else '')
+                    r, output.getvalue() if output else b''
+                )
 
             finally:
                 cleanup()
@@ -615,16 +684,18 @@
                     # We did not change it to minimise code change.
                     # This need to be moved to something proper.
                     # Feel free to do it.
-                    procutil.stderr.write("abort: %s\n" % exc)
+                    procutil.stderr.write(b"abort: %s\n" % exc)
                     if exc.hint is not None:
-                        procutil.stderr.write("(%s)\n" % exc.hint)
+                        procutil.stderr.write(b"(%s)\n" % exc.hint)
                     procutil.stderr.flush()
                     return wireprototypes.pushres(
-                        0, output.getvalue() if output else '')
+                        0, output.getvalue() if output else b''
+                    )
                 except error.PushRaced:
                     return wireprototypes.pusherr(
                         pycompat.bytestr(exc),
-                        output.getvalue() if output else '')
+                        output.getvalue() if output else b'',
+                    )
 
             bundler = bundle2.bundle20(repo.ui)
             for out in getattr(exc, '_bundle2salvagedoutput', ()):
@@ -635,37 +706,43 @@
                 except error.PushkeyFailed as exc:
                     # check client caps
                     remotecaps = getattr(exc, '_replycaps', None)
-                    if (remotecaps is not None
-                            and 'pushkey' not in remotecaps.get('error', ())):
+                    if (
+                        remotecaps is not None
+                        and b'pushkey' not in remotecaps.get(b'error', ())
+                    ):
                         # no support remote side, fallback to Abort handler.
                         raise
-                    part = bundler.newpart('error:pushkey')
-                    part.addparam('in-reply-to', exc.partid)
+                    part = bundler.newpart(b'error:pushkey')
+                    part.addparam(b'in-reply-to', exc.partid)
                     if exc.namespace is not None:
-                        part.addparam('namespace', exc.namespace,
-                                      mandatory=False)
+                        part.addparam(
+                            b'namespace', exc.namespace, mandatory=False
+                        )
                     if exc.key is not None:
-                        part.addparam('key', exc.key, mandatory=False)
+                        part.addparam(b'key', exc.key, mandatory=False)
                     if exc.new is not None:
-                        part.addparam('new', exc.new, mandatory=False)
+                        part.addparam(b'new', exc.new, mandatory=False)
                     if exc.old is not None:
-                        part.addparam('old', exc.old, mandatory=False)
+                        part.addparam(b'old', exc.old, mandatory=False)
                     if exc.ret is not None:
-                        part.addparam('ret', exc.ret, mandatory=False)
+                        part.addparam(b'ret', exc.ret, mandatory=False)
             except error.BundleValueError as exc:
-                errpart = bundler.newpart('error:unsupportedcontent')
+                errpart = bundler.newpart(b'error:unsupportedcontent')
                 if exc.parttype is not None:
-                    errpart.addparam('parttype', exc.parttype)
+                    errpart.addparam(b'parttype', exc.parttype)
                 if exc.params:
-                    errpart.addparam('params', '\0'.join(exc.params))
+                    errpart.addparam(b'params', b'\0'.join(exc.params))
             except error.Abort as exc:
-                manargs = [('message', stringutil.forcebytestr(exc))]
+                manargs = [(b'message', stringutil.forcebytestr(exc))]
                 advargs = []
                 if exc.hint is not None:
-                    advargs.append(('hint', exc.hint))
-                bundler.addpart(bundle2.bundlepart('error:abort',
-                                                   manargs, advargs))
+                    advargs.append((b'hint', exc.hint))
+                bundler.addpart(
+                    bundle2.bundlepart(b'error:abort', manargs, advargs)
+                )
             except error.PushRaced as exc:
-                bundler.newpart('error:pushraced',
-                                [('message', stringutil.forcebytestr(exc))])
+                bundler.newpart(
+                    b'error:pushraced',
+                    [(b'message', stringutil.forcebytestr(exc))],
+                )
             return wireprototypes.streamreslegacy(gen=bundler.getchunks())
--- a/mercurial/wireprotov2peer.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/wireprotov2peer.py	Mon Oct 21 11:09:48 2019 -0400
@@ -20,9 +20,8 @@
     wireprotoframing,
     wireprototypes,
 )
-from .utils import (
-    cborutil,
-)
+from .utils import cborutil
+
 
 def formatrichmessage(atoms):
     """Format an encoded message from the framing protocol."""
@@ -39,6 +38,7 @@
 
     return b''.join(chunks)
 
+
 SUPPORTED_REDIRECT_PROTOCOLS = {
     b'http',
     b'https',
@@ -49,6 +49,7 @@
     b'sha256',
 }
 
+
 def redirecttargetsupported(ui, target):
     """Determine whether a redirect target entry is supported.
 
@@ -56,13 +57,17 @@
     the server.
     """
     if target.get(b'protocol') not in SUPPORTED_REDIRECT_PROTOCOLS:
-        ui.note(_('(remote redirect target %s uses unsupported protocol: %s)\n')
-                % (target[b'name'], target.get(b'protocol', b'')))
+        ui.note(
+            _(b'(remote redirect target %s uses unsupported protocol: %s)\n')
+            % (target[b'name'], target.get(b'protocol', b''))
+        )
         return False
 
     if target.get(b'snirequired') and not sslutil.hassni:
-        ui.note(_('(redirect target %s requires SNI, which is unsupported)\n') %
-                target[b'name'])
+        ui.note(
+            _(b'(redirect target %s requires SNI, which is unsupported)\n')
+            % target[b'name']
+        )
         return False
 
     if b'tlsversions' in target:
@@ -74,15 +79,20 @@
             supported.add(v[3:])
 
         if not tlsversions & supported:
-            ui.note(_('(remote redirect target %s requires unsupported TLS '
-                      'versions: %s)\n') % (
-                target[b'name'], b', '.join(sorted(tlsversions))))
+            ui.note(
+                _(
+                    b'(remote redirect target %s requires unsupported TLS '
+                    b'versions: %s)\n'
+                )
+                % (target[b'name'], b', '.join(sorted(tlsversions)))
+            )
             return False
 
-    ui.note(_('(remote redirect target %s is compatible)\n') % target[b'name'])
+    ui.note(_(b'(remote redirect target %s is compatible)\n') % target[b'name'])
 
     return True
 
+
 def supportedredirects(ui, apidescriptor):
     """Resolve the "redirect" command request key given an API descriptor.
 
@@ -96,17 +106,24 @@
     if not apidescriptor or b'redirect' not in apidescriptor:
         return None
 
-    targets = [t[b'name'] for t in apidescriptor[b'redirect'][b'targets']
-               if redirecttargetsupported(ui, t)]
+    targets = [
+        t[b'name']
+        for t in apidescriptor[b'redirect'][b'targets']
+        if redirecttargetsupported(ui, t)
+    ]
 
-    hashes = [h for h in apidescriptor[b'redirect'][b'hashes']
-              if h in SUPPORTED_CONTENT_HASHES]
+    hashes = [
+        h
+        for h in apidescriptor[b'redirect'][b'hashes']
+        if h in SUPPORTED_CONTENT_HASHES
+    ]
 
     return {
         b'targets': targets,
         b'hashes': hashes,
     }
 
+
 class commandresponse(object):
     """Represents the response to a command request.
 
@@ -162,9 +179,13 @@
                 # content redirect is the only object in the stream. Fail
                 # if we see a misbehaving server.
                 if self._redirect:
-                    raise error.Abort(_('received unexpected response data '
-                                        'after content redirect; the remote is '
-                                        'buggy'))
+                    raise error.Abort(
+                        _(
+                            b'received unexpected response data '
+                            b'after content redirect; the remote is '
+                            b'buggy'
+                        )
+                    )
 
                 self._pendingevents.append(o)
 
@@ -190,12 +211,13 @@
                 fullhashes=l.get(b'fullhashes'),
                 fullhashseed=l.get(b'fullhashseed'),
                 serverdercerts=l.get(b'serverdercerts'),
-                servercadercerts=l.get(b'servercadercerts'))
+                servercadercerts=l.get(b'servercadercerts'),
+            )
             return
 
-        atoms = [{'msg': o[b'error'][b'message']}]
+        atoms = [{b'msg': o[b'error'][b'message']}]
         if b'args' in o[b'error']:
-            atoms[0]['args'] = o[b'error'][b'args']
+            atoms[0][b'args'] = o[b'error'][b'args']
 
         raise error.RepoError(formatrichmessage(atoms))
 
@@ -237,6 +259,7 @@
             if stop:
                 break
 
+
 class clienthandler(object):
     """Object to handle higher-level client activities.
 
@@ -248,8 +271,9 @@
     with the higher-level peer API.
     """
 
-    def __init__(self, ui, clientreactor, opener=None,
-                 requestbuilder=util.urlreq.request):
+    def __init__(
+        self, ui, clientreactor, opener=None, requestbuilder=util.urlreq.request
+    ):
         self._ui = ui
         self._reactor = clientreactor
         self._requests = {}
@@ -265,11 +289,12 @@
 
         Returns an iterable of frames that should be sent over the wire.
         """
-        request, action, meta = self._reactor.callcommand(command, args,
-                                                          redirect=redirect)
+        request, action, meta = self._reactor.callcommand(
+            command, args, redirect=redirect
+        )
 
-        if action != 'noop':
-            raise error.ProgrammingError('%s not yet supported' % action)
+        if action != b'noop':
+            raise error.ProgrammingError(b'%s not yet supported' % action)
 
         rid = request.requestid
         self._requests[rid] = request
@@ -287,10 +312,10 @@
         """
         action, meta = self._reactor.flushcommands()
 
-        if action != 'sendframes':
-            raise error.ProgrammingError('%s not yet supported' % action)
+        if action != b'sendframes':
+            raise error.ProgrammingError(b'%s not yet supported' % action)
 
-        return meta['framegen']
+        return meta[b'framegen']
 
     def readdata(self, framefh):
         """Attempt to read data and do work.
@@ -304,7 +329,7 @@
                 # TODO tell reactor?
                 self._frameseof = True
             else:
-                self._ui.debug('received %r\n' % frame)
+                self._ui.debug(b'received %r\n' % frame)
                 self._processframe(frame)
 
         # Also try to read the first redirect.
@@ -322,8 +347,8 @@
 
         action, meta = self._reactor.onframerecv(frame)
 
-        if action == 'error':
-            e = error.RepoError(meta['message'])
+        if action == b'error':
+            e = error.RepoError(meta[b'message'])
 
             if frame.requestid in self._responses:
                 self._responses[frame.requestid]._oninputcomplete()
@@ -335,23 +360,24 @@
                 raise e
 
             return
-        elif action == 'noop':
+        elif action == b'noop':
             return
-        elif action == 'responsedata':
+        elif action == b'responsedata':
             # Handled below.
             pass
         else:
-            raise error.ProgrammingError('action not handled: %s' % action)
+            raise error.ProgrammingError(b'action not handled: %s' % action)
 
         if frame.requestid not in self._requests:
             raise error.ProgrammingError(
-                'received frame for unknown request; this is either a bug in '
-                'the clientreactor not screening for this or this instance was '
-                'never told about this request: %r' % frame)
+                b'received frame for unknown request; this is either a bug in '
+                b'the clientreactor not screening for this or this instance was '
+                b'never told about this request: %r' % frame
+            )
 
         response = self._responses[frame.requestid]
 
-        if action == 'responsedata':
+        if action == b'responsedata':
             # Any failures processing this frame should bubble up to the
             # future tracking the request.
             try:
@@ -371,11 +397,12 @@
                     response._onerror(e)
         else:
             raise error.ProgrammingError(
-                'unhandled action from clientreactor: %s' % action)
+                b'unhandled action from clientreactor: %s' % action
+            )
 
     def _processresponsedata(self, frame, meta, response):
         # This can raise. The caller can handle it.
-        response._onresponsedata(meta['data'])
+        response._onresponsedata(meta[b'data'])
 
         # We need to be careful about resolving futures prematurely. If a
         # response is a redirect response, resolving the future before the
@@ -387,7 +414,7 @@
         # EOS occurs or until the initial response object is fully received.
 
         # Always react to eos.
-        if meta['eos']:
+        if meta[b'eos']:
             response._oninputcomplete()
             del self._requests[frame.requestid]
 
@@ -419,20 +446,30 @@
 
     def _followredirect(self, requestid, redirect):
         """Called to initiate redirect following for a request."""
-        self._ui.note(_('(following redirect to %s)\n') % redirect.url)
+        self._ui.note(_(b'(following redirect to %s)\n') % redirect.url)
 
         # TODO handle framed responses.
         if redirect.mediatype != b'application/mercurial-cbor':
-            raise error.Abort(_('cannot handle redirects for the %s media type')
-                              % redirect.mediatype)
+            raise error.Abort(
+                _(b'cannot handle redirects for the %s media type')
+                % redirect.mediatype
+            )
 
         if redirect.fullhashes:
-            self._ui.warn(_('(support for validating hashes on content '
-                            'redirects not supported)\n'))
+            self._ui.warn(
+                _(
+                    b'(support for validating hashes on content '
+                    b'redirects not supported)\n'
+                )
+            )
 
         if redirect.serverdercerts or redirect.servercadercerts:
-            self._ui.warn(_('(support for pinning server certificates on '
-                            'content redirects not supported)\n'))
+            self._ui.warn(
+                _(
+                    b'(support for pinning server certificates on '
+                    b'content redirects not supported)\n'
+                )
+            )
 
         headers = {
             r'Accept': redirect.mediatype,
@@ -444,10 +481,10 @@
             res = self._opener.open(req)
         except util.urlerr.httperror as e:
             if e.code == 401:
-                raise error.Abort(_('authorization failed'))
+                raise error.Abort(_(b'authorization failed'))
             raise
         except util.httplib.HTTPException as e:
-            self._ui.debug('http error requesting %s\n' % req.get_full_url())
+            self._ui.debug(b'http error requesting %s\n' % req.get_full_url())
             self._ui.traceback()
             raise IOError(None, e)
 
@@ -456,9 +493,9 @@
         # The existing response object is associated with frame data. Rather
         # than try to normalize its state, just create a new object.
         oldresponse = self._responses[requestid]
-        self._responses[requestid] = commandresponse(requestid,
-                                                     oldresponse.command,
-                                                     fromredirect=True)
+        self._responses[requestid] = commandresponse(
+            requestid, oldresponse.command, fromredirect=True
+        )
 
         self._redirects.append((requestid, res))
 
@@ -496,37 +533,44 @@
             response._oninputcomplete()
             return False
 
+
 def decodebranchmap(objs):
     # Response should be a single CBOR map of branch name to array of nodes.
     bm = next(objs)
 
     return {encoding.tolocal(k): v for k, v in bm.items()}
 
+
 def decodeheads(objs):
     # Array of node bytestrings.
     return next(objs)
 
+
 def decodeknown(objs):
     # Bytestring where each byte is a 0 or 1.
     raw = next(objs)
 
-    return [True if raw[i:i + 1] == b'1' else False for i in range(len(raw))]
+    return [True if raw[i : i + 1] == b'1' else False for i in range(len(raw))]
+
 
 def decodelistkeys(objs):
     # Map with bytestring keys and values.
     return next(objs)
 
+
 def decodelookup(objs):
     return next(objs)
 
+
 def decodepushkey(objs):
     return next(objs)
 
+
 COMMAND_DECODERS = {
-    'branchmap': decodebranchmap,
-    'heads': decodeheads,
-    'known': decodeknown,
-    'listkeys': decodelistkeys,
-    'lookup': decodelookup,
-    'pushkey': decodepushkey,
+    b'branchmap': decodebranchmap,
+    b'heads': decodeheads,
+    b'known': decodeknown,
+    b'listkeys': decodelistkeys,
+    b'lookup': decodelookup,
+    b'pushkey': decodepushkey,
 }
--- a/mercurial/wireprotov2server.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/wireprotov2server.py	Mon Oct 21 11:09:48 2019 -0400
@@ -28,9 +28,9 @@
     wireprotoframing,
     wireprototypes,
 )
+from .interfaces import util as interfaceutil
 from .utils import (
     cborutil,
-    interfaceutil,
     stringutil,
 )
 
@@ -45,6 +45,7 @@
 # there is a change to how caching works, etc.
 GLOBAL_CACHE_VERSION = 1
 
+
 def handlehttpv2request(rctx, req, res, checkperm, urlparts):
     from .hgweb import common as hgwebcommon
 
@@ -55,14 +56,15 @@
     if not urlparts:
         res.status = b'200 OK'
         res.headers[b'Content-Type'] = b'text/plain'
-        res.setbodybytes(_('HTTP version 2 API handler'))
+        res.setbodybytes(_(b'HTTP version 2 API handler'))
         return
 
     if len(urlparts) == 1:
         res.status = b'404 Not Found'
         res.headers[b'Content-Type'] = b'text/plain'
-        res.setbodybytes(_('do not know how to process %s\n') %
-                         req.dispatchpath)
+        res.setbodybytes(
+            _(b'do not know how to process %s\n') % req.dispatchpath
+        )
         return
 
     permission, command = urlparts[0:2]
@@ -70,13 +72,13 @@
     if permission not in (b'ro', b'rw'):
         res.status = b'404 Not Found'
         res.headers[b'Content-Type'] = b'text/plain'
-        res.setbodybytes(_('unknown permission: %s') % permission)
+        res.setbodybytes(_(b'unknown permission: %s') % permission)
         return
 
-    if req.method != 'POST':
+    if req.method != b'POST':
         res.status = b'405 Method Not Allowed'
         res.headers[b'Allow'] = b'POST'
-        res.setbodybytes(_('commands require POST requests'))
+        res.setbodybytes(_(b'commands require POST requests'))
         return
 
     # At some point we'll want to use our own API instead of recycling the
@@ -84,12 +86,12 @@
     # TODO return reasonable responses - not responses that overload the
     # HTTP status line message for error reporting.
     try:
-        checkperm(rctx, req, 'pull' if permission == b'ro' else 'push')
+        checkperm(rctx, req, b'pull' if permission == b'ro' else b'push')
     except hgwebcommon.ErrorResponse as e:
         res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
         for k, v in e.headers:
             res.headers[k] = v
-        res.setbodybytes('permission denied')
+        res.setbodybytes(b'permission denied')
         return
 
     # We have a special endpoint to reflect the request back at the client.
@@ -100,12 +102,12 @@
     # Extra commands that we handle that aren't really wire protocol
     # commands. Think extra hard before making this hackery available to
     # extension.
-    extracommands = {'multirequest'}
+    extracommands = {b'multirequest'}
 
     if command not in COMMANDS and command not in extracommands:
         res.status = b'404 Not Found'
         res.headers[b'Content-Type'] = b'text/plain'
-        res.setbodybytes(_('unknown wire protocol command: %s\n') % command)
+        res.setbodybytes(_(b'unknown wire protocol command: %s\n') % command)
         return
 
     repo = rctx.repo
@@ -113,19 +115,23 @@
 
     proto = httpv2protocolhandler(req, ui)
 
-    if (not COMMANDS.commandavailable(command, proto)
-        and command not in extracommands):
+    if (
+        not COMMANDS.commandavailable(command, proto)
+        and command not in extracommands
+    ):
         res.status = b'404 Not Found'
         res.headers[b'Content-Type'] = b'text/plain'
-        res.setbodybytes(_('invalid wire protocol command: %s') % command)
+        res.setbodybytes(_(b'invalid wire protocol command: %s') % command)
         return
 
     # TODO consider cases where proxies may add additional Accept headers.
     if req.headers.get(b'Accept') != FRAMINGTYPE:
         res.status = b'406 Not Acceptable'
         res.headers[b'Content-Type'] = b'text/plain'
-        res.setbodybytes(_('client MUST specify Accept header with value: %s\n')
-                           % FRAMINGTYPE)
+        res.setbodybytes(
+            _(b'client MUST specify Accept header with value: %s\n')
+            % FRAMINGTYPE
+        )
         return
 
     if req.headers.get(b'Content-Type') != FRAMINGTYPE:
@@ -133,12 +139,15 @@
         # TODO we should send a response with appropriate media type,
         # since client does Accept it.
         res.headers[b'Content-Type'] = b'text/plain'
-        res.setbodybytes(_('client MUST send Content-Type header with '
-                           'value: %s\n') % FRAMINGTYPE)
+        res.setbodybytes(
+            _(b'client MUST send Content-Type header with value: %s\n')
+            % FRAMINGTYPE
+        )
         return
 
     _processhttpv2request(ui, repo, req, res, permission, command, proto)
 
+
 def _processhttpv2reflectrequest(ui, repo, req, res):
     """Reads unified frame protocol request and dumps out state to client.
 
@@ -151,10 +160,10 @@
     """
     # Reflection APIs have a history of being abused, accidentally disclosing
     # sensitive data, etc. So we have a config knob.
-    if not ui.configbool('experimental', 'web.api.debugreflect'):
+    if not ui.configbool(b'experimental', b'web.api.debugreflect'):
         res.status = b'404 Not Found'
         res.headers[b'Content-Type'] = b'text/plain'
-        res.setbodybytes(_('debugreflect service not available'))
+        res.setbodybytes(_(b'debugreflect service not available'))
         return
 
     # We assume we have a unified framing protocol request body.
@@ -169,21 +178,23 @@
             states.append(b'received: <no frame>')
             break
 
-        states.append(b'received: %d %d %d %s' % (frame.typeid, frame.flags,
-                                                  frame.requestid,
-                                                  frame.payload))
+        states.append(
+            b'received: %d %d %d %s'
+            % (frame.typeid, frame.flags, frame.requestid, frame.payload)
+        )
 
         action, meta = reactor.onframerecv(frame)
         states.append(templatefilters.json((action, meta)))
 
     action, meta = reactor.oninputeof()
-    meta['action'] = action
+    meta[b'action'] = action
     states.append(templatefilters.json(meta))
 
     res.status = b'200 OK'
     res.headers[b'Content-Type'] = b'text/plain'
     res.setbodybytes(b'\n'.join(states))
 
+
 def _processhttpv2request(ui, repo, req, res, authedperm, reqcommand, proto):
     """Post-validation handler for HTTPv2 requests.
 
@@ -205,49 +216,71 @@
 
         action, meta = reactor.onframerecv(frame)
 
-        if action == 'wantframe':
+        if action == b'wantframe':
             # Need more data before we can do anything.
             continue
-        elif action == 'runcommand':
+        elif action == b'runcommand':
             # Defer creating output stream because we need to wait for
             # protocol settings frames so proper encoding can be applied.
             if not outstream:
                 outstream = reactor.makeoutputstream()
 
-            sentoutput = _httpv2runcommand(ui, repo, req, res, authedperm,
-                                           reqcommand, reactor, outstream,
-                                           meta, issubsequent=seencommand)
+            sentoutput = _httpv2runcommand(
+                ui,
+                repo,
+                req,
+                res,
+                authedperm,
+                reqcommand,
+                reactor,
+                outstream,
+                meta,
+                issubsequent=seencommand,
+            )
 
             if sentoutput:
                 return
 
             seencommand = True
 
-        elif action == 'error':
+        elif action == b'error':
             # TODO define proper error mechanism.
             res.status = b'200 OK'
             res.headers[b'Content-Type'] = b'text/plain'
-            res.setbodybytes(meta['message'] + b'\n')
+            res.setbodybytes(meta[b'message'] + b'\n')
             return
         else:
             raise error.ProgrammingError(
-                'unhandled action from frame processor: %s' % action)
+                b'unhandled action from frame processor: %s' % action
+            )
 
     action, meta = reactor.oninputeof()
-    if action == 'sendframes':
+    if action == b'sendframes':
         # We assume we haven't started sending the response yet. If we're
         # wrong, the response type will raise an exception.
         res.status = b'200 OK'
         res.headers[b'Content-Type'] = FRAMINGTYPE
-        res.setbodygen(meta['framegen'])
-    elif action == 'noop':
+        res.setbodygen(meta[b'framegen'])
+    elif action == b'noop':
         pass
     else:
-        raise error.ProgrammingError('unhandled action from frame processor: %s'
-                                     % action)
+        raise error.ProgrammingError(
+            b'unhandled action from frame processor: %s' % action
+        )
+
 
-def _httpv2runcommand(ui, repo, req, res, authedperm, reqcommand, reactor,
-                      outstream, command, issubsequent):
+def _httpv2runcommand(
+    ui,
+    repo,
+    req,
+    res,
+    authedperm,
+    reqcommand,
+    reactor,
+    outstream,
+    command,
+    issubsequent,
+):
     """Dispatch a wire protocol command made from HTTPv2 requests.
 
     The authenticated permission (``authedperm``) along with the original
@@ -268,28 +301,32 @@
     # TODO consider allowing multiple commands to regular command URLs
     # iff each command is the same.
 
-    proto = httpv2protocolhandler(req, ui, args=command['args'])
+    proto = httpv2protocolhandler(req, ui, args=command[b'args'])
 
     if reqcommand == b'multirequest':
-        if not COMMANDS.commandavailable(command['command'], proto):
+        if not COMMANDS.commandavailable(command[b'command'], proto):
             # TODO proper error mechanism
             res.status = b'200 OK'
             res.headers[b'Content-Type'] = b'text/plain'
-            res.setbodybytes(_('wire protocol command not available: %s') %
-                             command['command'])
+            res.setbodybytes(
+                _(b'wire protocol command not available: %s')
+                % command[b'command']
+            )
             return True
 
         # TODO don't use assert here, since it may be elided by -O.
         assert authedperm in (b'ro', b'rw')
-        wirecommand = COMMANDS[command['command']]
-        assert wirecommand.permission in ('push', 'pull')
+        wirecommand = COMMANDS[command[b'command']]
+        assert wirecommand.permission in (b'push', b'pull')
 
-        if authedperm == b'ro' and wirecommand.permission != 'pull':
+        if authedperm == b'ro' and wirecommand.permission != b'pull':
             # TODO proper error mechanism
             res.status = b'403 Forbidden'
             res.headers[b'Content-Type'] = b'text/plain'
-            res.setbodybytes(_('insufficient permissions to execute '
-                               'command: %s') % command['command'])
+            res.setbodybytes(
+                _(b'insufficient permissions to execute command: %s')
+                % command[b'command']
+            )
             return True
 
         # TODO should we also call checkperm() here? Maybe not if we're going
@@ -302,49 +339,57 @@
             # TODO proper error mechanism
             res.status = b'200 OK'
             res.headers[b'Content-Type'] = b'text/plain'
-            res.setbodybytes(_('multiple commands cannot be issued to this '
-                               'URL'))
+            res.setbodybytes(
+                _(b'multiple commands cannot be issued to this URL')
+            )
             return True
 
-        if reqcommand != command['command']:
+        if reqcommand != command[b'command']:
             # TODO define proper error mechanism
             res.status = b'200 OK'
             res.headers[b'Content-Type'] = b'text/plain'
-            res.setbodybytes(_('command in frame must match command in URL'))
+            res.setbodybytes(_(b'command in frame must match command in URL'))
             return True
 
     res.status = b'200 OK'
     res.headers[b'Content-Type'] = FRAMINGTYPE
 
     try:
-        objs = dispatch(repo, proto, command['command'], command['redirect'])
+        objs = dispatch(repo, proto, command[b'command'], command[b'redirect'])
 
         action, meta = reactor.oncommandresponsereadyobjects(
-            outstream, command['requestid'], objs)
+            outstream, command[b'requestid'], objs
+        )
 
     except error.WireprotoCommandError as e:
         action, meta = reactor.oncommanderror(
-            outstream, command['requestid'], e.message, e.messageargs)
+            outstream, command[b'requestid'], e.message, e.messageargs
+        )
 
     except Exception as e:
         action, meta = reactor.onservererror(
-            outstream, command['requestid'],
-            _('exception when invoking command: %s') %
-            stringutil.forcebytestr(e))
+            outstream,
+            command[b'requestid'],
+            _(b'exception when invoking command: %s')
+            % stringutil.forcebytestr(e),
+        )
 
-    if action == 'sendframes':
-        res.setbodygen(meta['framegen'])
+    if action == b'sendframes':
+        res.setbodygen(meta[b'framegen'])
         return True
-    elif action == 'noop':
+    elif action == b'noop':
         return False
     else:
-        raise error.ProgrammingError('unhandled event from reactor: %s' %
-                                     action)
+        raise error.ProgrammingError(
+            b'unhandled event from reactor: %s' % action
+        )
+
 
 def getdispatchrepo(repo, proto, command):
-    viewconfig = repo.ui.config('server', 'view')
+    viewconfig = repo.ui.config(b'server', b'view')
     return repo.filtered(viewconfig)
 
+
 def dispatch(repo, proto, command, redirect):
     """Run a wire protocol command.
 
@@ -377,10 +422,15 @@
         redirecttargets = []
         redirecthashes = []
 
-    cacher = makeresponsecacher(repo, proto, command, args,
-                                cborutil.streamencode,
-                                redirecttargets=redirecttargets,
-                                redirecthashes=redirecthashes)
+    cacher = makeresponsecacher(
+        repo,
+        proto,
+        command,
+        args,
+        cborutil.streamencode,
+        redirecttargets=redirecttargets,
+        redirecthashes=redirecthashes,
+    )
 
     # But we have no cacher. Do default handling.
     if not cacher:
@@ -389,8 +439,9 @@
         return
 
     with cacher:
-        cachekey = entry.cachekeyfn(repo, proto, cacher,
-                                    **pycompat.strkwargs(args))
+        cachekey = entry.cachekeyfn(
+            repo, proto, cacher, **pycompat.strkwargs(args)
+        )
 
         # No cache key or the cacher doesn't like it. Do default handling.
         if cachekey is None or not cacher.setcachekey(cachekey):
@@ -402,7 +453,7 @@
         cached = cacher.lookup()
 
         if cached:
-            for o in cached['objs']:
+            for o in cached[b'objs']:
                 yield o
             return
 
@@ -415,6 +466,7 @@
         for o in cacher.onfinished():
             yield o
 
+
 @interfaceutil.implementer(wireprototypes.baseprotocolhandler)
 class httpv2protocolhandler(object):
     def __init__(self, req, ui, args=None):
@@ -432,15 +484,17 @@
         extra = set(self._args) - set(args)
         if extra:
             raise error.WireprotoCommandError(
-                'unsupported argument to command: %s' %
-                ', '.join(sorted(extra)))
+                b'unsupported argument to command: %s'
+                % b', '.join(sorted(extra))
+            )
 
         # And look for required arguments that are missing.
-        missing = {a for a in args if args[a]['required']} - set(self._args)
+        missing = {a for a in args if args[a][b'required']} - set(self._args)
 
         if missing:
             raise error.WireprotoCommandError(
-                'missing required arguments: %s' % ', '.join(sorted(missing)))
+                b'missing required arguments: %s' % b', '.join(sorted(missing))
+            )
 
         # Now derive the arguments to pass to the command, taking into
         # account the arguments specified by the client.
@@ -448,13 +502,13 @@
         for k, meta in sorted(args.items()):
             # This argument wasn't passed by the client.
             if k not in self._args:
-                data[k] = meta['default']()
+                data[k] = meta[b'default']()
                 continue
 
             v = self._args[k]
 
             # Sets may be expressed as lists. Silently normalize.
-            if meta['type'] == 'set' and isinstance(v, list):
+            if meta[b'type'] == b'set' and isinstance(v, list):
                 v = set(v)
 
             # TODO consider more/stronger type validation.
@@ -483,11 +537,13 @@
     def checkperm(self, perm):
         raise NotImplementedError
 
+
 def httpv2apidescriptor(req, repo):
     proto = httpv2protocolhandler(req, repo.ui)
 
     return _capabilitiesv2(repo, proto)
 
+
 def _capabilitiesv2(repo, proto):
     """Obtain the set of capabilities for version 2 transports.
 
@@ -495,9 +551,9 @@
     transports.
     """
     caps = {
-        'commands': {},
-        'framingmediatypes': [FRAMINGTYPE],
-        'pathfilterprefixes': set(narrowspec.VALID_PREFIXES),
+        b'commands': {},
+        b'framingmediatypes': [FRAMINGTYPE],
+        b'pathfilterprefixes': set(narrowspec.VALID_PREFIXES),
     }
 
     for command, entry in COMMANDS.items():
@@ -507,32 +563,33 @@
             args[arg] = {
                 # TODO should this be a normalized type using CBOR's
                 # terminology?
-                b'type': meta['type'],
-                b'required': meta['required'],
+                b'type': meta[b'type'],
+                b'required': meta[b'required'],
             }
 
-            if not meta['required']:
-                args[arg][b'default'] = meta['default']()
+            if not meta[b'required']:
+                args[arg][b'default'] = meta[b'default']()
 
-            if meta['validvalues']:
-                args[arg][b'validvalues'] = meta['validvalues']
+            if meta[b'validvalues']:
+                args[arg][b'validvalues'] = meta[b'validvalues']
 
         # TODO this type of check should be defined in a per-command callback.
-        if (command == b'rawstorefiledata'
-            and not streamclone.allowservergeneration(repo)):
+        if (
+            command == b'rawstorefiledata'
+            and not streamclone.allowservergeneration(repo)
+        ):
             continue
 
-        caps['commands'][command] = {
-            'args': args,
-            'permissions': [entry.permission],
+        caps[b'commands'][command] = {
+            b'args': args,
+            b'permissions': [entry.permission],
         }
 
         if entry.extracapabilitiesfn:
             extracaps = entry.extracapabilitiesfn(repo, proto)
-            caps['commands'][command].update(extracaps)
+            caps[b'commands'][command].update(extracaps)
 
-    caps['rawrepoformats'] = sorted(repo.requirements &
-                                    repo.supportedformats)
+    caps[b'rawrepoformats'] = sorted(repo.requirements & repo.supportedformats)
 
     targets = getadvertisedredirecttargets(repo, proto)
     if targets:
@@ -543,12 +600,12 @@
 
         for target in targets:
             entry = {
-                b'name': target['name'],
-                b'protocol': target['protocol'],
-                b'uris': target['uris'],
+                b'name': target[b'name'],
+                b'protocol': target[b'protocol'],
+                b'uris': target[b'uris'],
             }
 
-            for key in ('snirequired', 'tlsversions'):
+            for key in (b'snirequired', b'tlsversions'):
                 if key in target:
                     entry[key] = target[key]
 
@@ -556,6 +613,7 @@
 
     return proto.addcapabilities(repo, caps)
 
+
 def getadvertisedredirecttargets(repo, proto):
     """Obtain a list of content redirect targets.
 
@@ -594,8 +652,14 @@
     """
     return []
 
-def wireprotocommand(name, args=None, permission='push', cachekeyfn=None,
-                     extracapabilitiesfn=None):
+
+def wireprotocommand(
+    name,
+    args=None,
+    permission=b'push',
+    cachekeyfn=None,
+    extracapabilitiesfn=None,
+):
     """Decorator to declare a wire protocol command.
 
     ``name`` is the name of the wire protocol command being provided.
@@ -646,61 +710,86 @@
     containing the key in a cache the response to this command may be cached
     under.
     """
-    transports = {k for k, v in wireprototypes.TRANSPORTS.items()
-                  if v['version'] == 2}
+    transports = {
+        k for k, v in wireprototypes.TRANSPORTS.items() if v[b'version'] == 2
+    }
 
-    if permission not in ('push', 'pull'):
-        raise error.ProgrammingError('invalid wire protocol permission; '
-                                     'got %s; expected "push" or "pull"' %
-                                     permission)
+    if permission not in (b'push', b'pull'):
+        raise error.ProgrammingError(
+            b'invalid wire protocol permission; '
+            b'got %s; expected "push" or "pull"' % permission
+        )
 
     if args is None:
         args = {}
 
     if not isinstance(args, dict):
-        raise error.ProgrammingError('arguments for version 2 commands '
-                                     'must be declared as dicts')
+        raise error.ProgrammingError(
+            b'arguments for version 2 commands must be declared as dicts'
+        )
 
     for arg, meta in args.items():
-        if arg == '*':
-            raise error.ProgrammingError('* argument name not allowed on '
-                                         'version 2 commands')
+        if arg == b'*':
+            raise error.ProgrammingError(
+                b'* argument name not allowed on version 2 commands'
+            )
 
         if not isinstance(meta, dict):
-            raise error.ProgrammingError('arguments for version 2 commands '
-                                         'must declare metadata as a dict')
+            raise error.ProgrammingError(
+                b'arguments for version 2 commands '
+                b'must declare metadata as a dict'
+            )
 
-        if 'type' not in meta:
-            raise error.ProgrammingError('%s argument for command %s does not '
-                                         'declare type field' % (arg, name))
+        if b'type' not in meta:
+            raise error.ProgrammingError(
+                b'%s argument for command %s does not '
+                b'declare type field' % (arg, name)
+            )
 
-        if meta['type'] not in ('bytes', 'int', 'list', 'dict', 'set', 'bool'):
-            raise error.ProgrammingError('%s argument for command %s has '
-                                         'illegal type: %s' % (arg, name,
-                                                               meta['type']))
+        if meta[b'type'] not in (
+            b'bytes',
+            b'int',
+            b'list',
+            b'dict',
+            b'set',
+            b'bool',
+        ):
+            raise error.ProgrammingError(
+                b'%s argument for command %s has '
+                b'illegal type: %s' % (arg, name, meta[b'type'])
+            )
 
-        if 'example' not in meta:
-            raise error.ProgrammingError('%s argument for command %s does not '
-                                         'declare example field' % (arg, name))
+        if b'example' not in meta:
+            raise error.ProgrammingError(
+                b'%s argument for command %s does not '
+                b'declare example field' % (arg, name)
+            )
 
-        meta['required'] = 'default' not in meta
+        meta[b'required'] = b'default' not in meta
 
-        meta.setdefault('default', lambda: None)
-        meta.setdefault('validvalues', None)
+        meta.setdefault(b'default', lambda: None)
+        meta.setdefault(b'validvalues', None)
 
     def register(func):
         if name in COMMANDS:
-            raise error.ProgrammingError('%s command already registered '
-                                         'for version 2' % name)
+            raise error.ProgrammingError(
+                b'%s command already registered for version 2' % name
+            )
 
         COMMANDS[name] = wireprototypes.commandentry(
-            func, args=args, transports=transports, permission=permission,
-            cachekeyfn=cachekeyfn, extracapabilitiesfn=extracapabilitiesfn)
+            func,
+            args=args,
+            transports=transports,
+            permission=permission,
+            cachekeyfn=cachekeyfn,
+            extracapabilitiesfn=extracapabilitiesfn,
+        )
 
         return func
 
     return register
 
+
 def makecommandcachekeyfn(command, localversion=None, allargs=False):
     """Construct a cache key derivation function with common features.
 
@@ -715,16 +804,18 @@
     * The repository path.
     """
     if not allargs:
-        raise error.ProgrammingError('only allargs=True is currently supported')
+        raise error.ProgrammingError(
+            b'only allargs=True is currently supported'
+        )
 
     if localversion is None:
-        raise error.ProgrammingError('must set localversion argument value')
+        raise error.ProgrammingError(b'must set localversion argument value')
 
     def cachekeyfn(repo, proto, cacher, **args):
         spec = COMMANDS[command]
 
         # Commands that mutate the repo can not be cached.
-        if spec.permission == 'push':
+        if spec.permission == b'push':
             return None
 
         # TODO config option to disable caching.
@@ -775,8 +866,10 @@
 
     return cachekeyfn
 
-def makeresponsecacher(repo, proto, command, args, objencoderfn,
-                       redirecttargets, redirecthashes):
+
+def makeresponsecacher(
+    repo, proto, command, args, objencoderfn, redirecttargets, redirecthashes
+):
     """Construct a cacher for a cacheable command.
 
     Returns an ``iwireprotocolcommandcacher`` instance.
@@ -786,6 +879,7 @@
     """
     return None
 
+
 def resolvenodes(repo, revisions):
     """Resolve nodes from a revisions specifier data structure."""
     cl = repo.changelog
@@ -795,21 +889,24 @@
     nodes = []
 
     if not isinstance(revisions, list):
-        raise error.WireprotoCommandError('revisions must be defined as an '
-                                          'array')
+        raise error.WireprotoCommandError(
+            b'revisions must be defined as an array'
+        )
 
     for spec in revisions:
         if b'type' not in spec:
             raise error.WireprotoCommandError(
-                'type key not present in revision specifier')
+                b'type key not present in revision specifier'
+            )
 
         typ = spec[b'type']
 
         if typ == b'changesetexplicit':
             if b'nodes' not in spec:
                 raise error.WireprotoCommandError(
-                    'nodes key not present in changesetexplicit revision '
-                    'specifier')
+                    b'nodes key not present in changesetexplicit revision '
+                    b'specifier'
+                )
 
             for node in spec[b'nodes']:
                 if node not in seen:
@@ -820,11 +917,14 @@
             for key in (b'nodes', b'depth'):
                 if key not in spec:
                     raise error.WireprotoCommandError(
-                        '%s key not present in changesetexplicitdepth revision '
-                        'specifier', (key,))
+                        b'%s key not present in changesetexplicitdepth revision '
+                        b'specifier',
+                        (key,),
+                    )
 
-            for rev in repo.revs(b'ancestors(%ln, %s)', spec[b'nodes'],
-                                 spec[b'depth'] - 1):
+            for rev in repo.revs(
+                b'ancestors(%ln, %s)', spec[b'nodes'], spec[b'depth'] - 1
+            ):
                 node = cl.node(rev)
 
                 if node not in seen:
@@ -835,12 +935,15 @@
             for key in (b'roots', b'heads'):
                 if key not in spec:
                     raise error.WireprotoCommandError(
-                        '%s key not present in changesetdagrange revision '
-                        'specifier', (key,))
+                        b'%s key not present in changesetdagrange revision '
+                        b'specifier',
+                        (key,),
+                    )
 
             if not spec[b'heads']:
                 raise error.WireprotoCommandError(
-                    'heads key in changesetdagrange cannot be empty')
+                    b'heads key in changesetdagrange cannot be empty'
+                )
 
             if spec[b'roots']:
                 common = [n for n in spec[b'roots'] if clhasnode(n)]
@@ -854,37 +957,43 @@
 
         else:
             raise error.WireprotoCommandError(
-                'unknown revision specifier type: %s', (typ,))
+                b'unknown revision specifier type: %s', (typ,)
+            )
 
     return nodes
 
-@wireprotocommand('branchmap', permission='pull')
+
+@wireprotocommand(b'branchmap', permission=b'pull')
 def branchmapv2(repo, proto):
-    yield {encoding.fromlocal(k): v
-           for k, v in repo.branchmap().iteritems()}
+    yield {
+        encoding.fromlocal(k): v
+        for k, v in pycompat.iteritems(repo.branchmap())
+    }
 
-@wireprotocommand('capabilities', permission='pull')
+
+@wireprotocommand(b'capabilities', permission=b'pull')
 def capabilitiesv2(repo, proto):
     yield _capabilitiesv2(repo, proto)
 
+
 @wireprotocommand(
-    'changesetdata',
+    b'changesetdata',
     args={
-        'revisions': {
-            'type': 'list',
-            'example': [{
-                b'type': b'changesetexplicit',
-                b'nodes': [b'abcdef...'],
-            }],
+        b'revisions': {
+            b'type': b'list',
+            b'example': [
+                {b'type': b'changesetexplicit', b'nodes': [b'abcdef...'],}
+            ],
         },
-        'fields': {
-            'type': 'set',
-            'default': set,
-            'example': {b'parents', b'revision'},
-            'validvalues': {b'bookmarks', b'parents', b'phase', b'revision'},
+        b'fields': {
+            b'type': b'set',
+            b'default': set,
+            b'example': {b'parents', b'revision'},
+            b'validvalues': {b'bookmarks', b'parents', b'phase', b'revision'},
         },
     },
-    permission='pull')
+    permission=b'pull',
+)
 def changesetdata(repo, proto, revisions, fields):
     # TODO look for unknown fields and abort when they can't be serviced.
     # This could probably be validated by dispatcher using validvalues.
@@ -894,7 +1003,7 @@
     publishing = repo.publishing()
 
     if outgoing:
-        repo.hook('preoutgoing', throw=True, source='serve')
+        repo.hook(b'preoutgoing', throw=True, source=b'serve')
 
     yield {
         b'totalitems': len(outgoing),
@@ -937,7 +1046,7 @@
         followingdata = []
 
         if b'revision' in fields:
-            revisiondata = cl.revision(node, raw=True)
+            revisiondata = cl.rawdata(node)
             followingmeta.append((b'revision', len(revisiondata)))
             followingdata.append(revisiondata)
 
@@ -955,12 +1064,13 @@
     # If requested, send bookmarks from nodes that didn't have revision
     # data sent so receiver is aware of any bookmark updates.
     if b'bookmarks' in fields:
-        for node, marks in sorted(nodebookmarks.iteritems()):
+        for node, marks in sorted(pycompat.iteritems(nodebookmarks)):
             yield {
                 b'node': node,
                 b'bookmarks': sorted(marks),
             }
 
+
 class FileAccessError(Exception):
     """Represents an error accessing a specific file."""
 
@@ -969,6 +1079,7 @@
         self.msg = msg
         self.args = args
 
+
 def getfilestore(repo, proto, path):
     """Obtain a file storage object for use with wire protocol.
 
@@ -980,10 +1091,11 @@
     fl = repo.file(path)
 
     if not len(fl):
-        raise FileAccessError(path, 'unknown file: %s', (path,))
+        raise FileAccessError(path, b'unknown file: %s', (path,))
 
     return fl
 
+
 def emitfilerevisions(repo, path, revisions, linknodes, fields):
     for revision in revisions:
         d = {
@@ -1016,6 +1128,7 @@
         for extra in followingdata:
             yield extra
 
+
 def makefilematcher(repo, pathfilter):
     """Construct a matcher from a path filter dict."""
 
@@ -1025,13 +1138,18 @@
             for pattern in pathfilter.get(key, []):
                 if not pattern.startswith((b'path:', b'rootfilesin:')):
                     raise error.WireprotoCommandError(
-                        '%s pattern must begin with `path:` or `rootfilesin:`; '
-                        'got %s', (key, pattern))
+                        b'%s pattern must begin with `path:` or `rootfilesin:`; '
+                        b'got %s',
+                        (key, pattern),
+                    )
 
     if pathfilter:
-        matcher = matchmod.match(repo.root, b'',
-                                 include=pathfilter.get(b'include', []),
-                                 exclude=pathfilter.get(b'exclude', []))
+        matcher = matchmod.match(
+            repo.root,
+            b'',
+            include=pathfilter.get(b'include', []),
+            exclude=pathfilter.get(b'exclude', []),
+        )
     else:
         matcher = matchmod.match(repo.root, b'')
 
@@ -1039,34 +1157,30 @@
     # filter those out.
     return repo.narrowmatch(matcher)
 
+
 @wireprotocommand(
-    'filedata',
+    b'filedata',
     args={
-        'haveparents': {
-            'type': 'bool',
-            'default': lambda: False,
-            'example': True,
-        },
-        'nodes': {
-            'type': 'list',
-            'example': [b'0123456...'],
+        b'haveparents': {
+            b'type': b'bool',
+            b'default': lambda: False,
+            b'example': True,
         },
-        'fields': {
-            'type': 'set',
-            'default': set,
-            'example': {b'parents', b'revision'},
-            'validvalues': {b'parents', b'revision', b'linknode'},
+        b'nodes': {b'type': b'list', b'example': [b'0123456...'],},
+        b'fields': {
+            b'type': b'set',
+            b'default': set,
+            b'example': {b'parents', b'revision'},
+            b'validvalues': {b'parents', b'revision', b'linknode'},
         },
-        'path': {
-            'type': 'bytes',
-            'example': b'foo.txt',
-        }
+        b'path': {b'type': b'bytes', b'example': b'foo.txt',},
     },
-    permission='pull',
+    permission=b'pull',
     # TODO censoring a file revision won't invalidate the cache.
     # Figure out a way to take censoring into account when deriving
     # the cache key.
-    cachekeyfn=makecommandcachekeyfn('filedata', 1, allargs=True))
+    cachekeyfn=makecommandcachekeyfn(b'filedata', 1, allargs=True),
+)
 def filedata(repo, proto, haveparents, nodes, fields, path):
     # TODO this API allows access to file revisions that are attached to
     # secret changesets. filesdata does not have this problem. Maybe this
@@ -1086,8 +1200,9 @@
         try:
             store.rev(node)
         except error.LookupError:
-            raise error.WireprotoCommandError('unknown file node: %s',
-                                              (hex(node),))
+            raise error.WireprotoCommandError(
+                b'unknown file node: %s', (hex(node),)
+            )
 
         # TODO by creating the filectx against a specific file revision
         # instead of changeset, linkrev() is always used. This is wrong for
@@ -1097,9 +1212,11 @@
         fctx = repo.filectx(path, fileid=node)
         linknodes[node] = clnode(fctx.introrev())
 
-    revisions = store.emitrevisions(nodes,
-                                    revisiondata=b'revision' in fields,
-                                    assumehaveparentrevisions=haveparents)
+    revisions = store.emitrevisions(
+        nodes,
+        revisiondata=b'revision' in fields,
+        assumehaveparentrevisions=haveparents,
+    )
 
     yield {
         b'totalitems': len(nodes),
@@ -1108,47 +1225,54 @@
     for o in emitfilerevisions(repo, path, revisions, linknodes, fields):
         yield o
 
+
 def filesdatacapabilities(repo, proto):
     batchsize = repo.ui.configint(
-        b'experimental', b'server.filesdata.recommended-batch-size')
+        b'experimental', b'server.filesdata.recommended-batch-size'
+    )
     return {
         b'recommendedbatchsize': batchsize,
     }
 
+
 @wireprotocommand(
-    'filesdata',
+    b'filesdata',
     args={
-        'haveparents': {
-            'type': 'bool',
-            'default': lambda: False,
-            'example': True,
+        b'haveparents': {
+            b'type': b'bool',
+            b'default': lambda: False,
+            b'example': True,
         },
-        'fields': {
-            'type': 'set',
-            'default': set,
-            'example': {b'parents', b'revision'},
-            'validvalues': {b'firstchangeset', b'linknode', b'parents',
-                            b'revision'},
+        b'fields': {
+            b'type': b'set',
+            b'default': set,
+            b'example': {b'parents', b'revision'},
+            b'validvalues': {
+                b'firstchangeset',
+                b'linknode',
+                b'parents',
+                b'revision',
+            },
         },
-        'pathfilter': {
-            'type': 'dict',
-            'default': lambda: None,
-            'example': {b'include': [b'path:tests']},
+        b'pathfilter': {
+            b'type': b'dict',
+            b'default': lambda: None,
+            b'example': {b'include': [b'path:tests']},
         },
-        'revisions': {
-            'type': 'list',
-            'example': [{
-                b'type': b'changesetexplicit',
-                b'nodes': [b'abcdef...'],
-            }],
+        b'revisions': {
+            b'type': b'list',
+            b'example': [
+                {b'type': b'changesetexplicit', b'nodes': [b'abcdef...'],}
+            ],
         },
     },
-    permission='pull',
+    permission=b'pull',
     # TODO censoring a file revision won't invalidate the cache.
     # Figure out a way to take censoring into account when deriving
     # the cache key.
-    cachekeyfn=makecommandcachekeyfn('filesdata', 1, allargs=True),
-    extracapabilitiesfn=filesdatacapabilities)
+    cachekeyfn=makecommandcachekeyfn(b'filesdata', 1, allargs=True),
+    extracapabilitiesfn=filesdatacapabilities,
+)
 def filesdata(repo, proto, haveparents, fields, pathfilter, revisions):
     # TODO This should operate on a repo that exposes obsolete changesets. There
     # is a race between a client making a push that obsoletes a changeset and
@@ -1191,7 +1315,7 @@
 
     yield {
         b'totalpaths': len(fnodes),
-        b'totalitems': sum(len(v) for v in fnodes.values())
+        b'totalitems': sum(len(v) for v in fnodes.values()),
     }
 
     for path, filenodes in sorted(fnodes.items()):
@@ -1205,68 +1329,70 @@
             b'totalitems': len(filenodes),
         }
 
-        revisions = store.emitrevisions(filenodes.keys(),
-                                        revisiondata=b'revision' in fields,
-                                        assumehaveparentrevisions=haveparents)
+        revisions = store.emitrevisions(
+            filenodes.keys(),
+            revisiondata=b'revision' in fields,
+            assumehaveparentrevisions=haveparents,
+        )
 
         for o in emitfilerevisions(repo, path, revisions, filenodes, fields):
             yield o
 
+
 @wireprotocommand(
-    'heads',
+    b'heads',
     args={
-        'publiconly': {
-            'type': 'bool',
-            'default': lambda: False,
-            'example': False,
+        b'publiconly': {
+            b'type': b'bool',
+            b'default': lambda: False,
+            b'example': False,
         },
     },
-    permission='pull')
+    permission=b'pull',
+)
 def headsv2(repo, proto, publiconly):
     if publiconly:
-        repo = repo.filtered('immutable')
+        repo = repo.filtered(b'immutable')
 
     yield repo.heads()
 
+
 @wireprotocommand(
-    'known',
+    b'known',
     args={
-        'nodes': {
-            'type': 'list',
-            'default': list,
-            'example': [b'deadbeef'],
+        b'nodes': {
+            b'type': b'list',
+            b'default': list,
+            b'example': [b'deadbeef'],
         },
     },
-    permission='pull')
+    permission=b'pull',
+)
 def knownv2(repo, proto, nodes):
     result = b''.join(b'1' if n else b'0' for n in repo.known(nodes))
     yield result
 
+
 @wireprotocommand(
-    'listkeys',
-    args={
-        'namespace': {
-            'type': 'bytes',
-            'example': b'ns',
-        },
-    },
-    permission='pull')
+    b'listkeys',
+    args={b'namespace': {b'type': b'bytes', b'example': b'ns',},},
+    permission=b'pull',
+)
 def listkeysv2(repo, proto, namespace):
     keys = repo.listkeys(encoding.tolocal(namespace))
-    keys = {encoding.fromlocal(k): encoding.fromlocal(v)
-            for k, v in keys.iteritems()}
+    keys = {
+        encoding.fromlocal(k): encoding.fromlocal(v)
+        for k, v in pycompat.iteritems(keys)
+    }
 
     yield keys
 
+
 @wireprotocommand(
-    'lookup',
-    args={
-        'key': {
-            'type': 'bytes',
-            'example': b'foo',
-        },
-    },
-    permission='pull')
+    b'lookup',
+    args={b'key': {b'type': b'bytes', b'example': b'foo',},},
+    permission=b'pull',
+)
 def lookupv2(repo, proto, key):
     key = encoding.tolocal(key)
 
@@ -1275,40 +1401,38 @@
 
     yield node
 
+
 def manifestdatacapabilities(repo, proto):
     batchsize = repo.ui.configint(
-        b'experimental', b'server.manifestdata.recommended-batch-size')
+        b'experimental', b'server.manifestdata.recommended-batch-size'
+    )
 
     return {
         b'recommendedbatchsize': batchsize,
     }
 
+
 @wireprotocommand(
-    'manifestdata',
+    b'manifestdata',
     args={
-        'nodes': {
-            'type': 'list',
-            'example': [b'0123456...'],
-        },
-        'haveparents': {
-            'type': 'bool',
-            'default': lambda: False,
-            'example': True,
+        b'nodes': {b'type': b'list', b'example': [b'0123456...'],},
+        b'haveparents': {
+            b'type': b'bool',
+            b'default': lambda: False,
+            b'example': True,
         },
-        'fields': {
-            'type': 'set',
-            'default': set,
-            'example': {b'parents', b'revision'},
-            'validvalues': {b'parents', b'revision'},
+        b'fields': {
+            b'type': b'set',
+            b'default': set,
+            b'example': {b'parents', b'revision'},
+            b'validvalues': {b'parents', b'revision'},
         },
-        'tree': {
-            'type': 'bytes',
-            'example': b'',
-        },
+        b'tree': {b'type': b'bytes', b'example': b'',},
     },
-    permission='pull',
-    cachekeyfn=makecommandcachekeyfn('manifestdata', 1, allargs=True),
-    extracapabilitiesfn=manifestdatacapabilities)
+    permission=b'pull',
+    cachekeyfn=makecommandcachekeyfn(b'manifestdata', 1, allargs=True),
+    extracapabilitiesfn=manifestdatacapabilities,
+)
 def manifestdata(repo, proto, haveparents, nodes, fields, tree):
     store = repo.manifestlog.getstorage(tree)
 
@@ -1317,12 +1441,13 @@
         try:
             store.rev(node)
         except error.LookupError:
-            raise error.WireprotoCommandError(
-                'unknown node: %s', (node,))
+            raise error.WireprotoCommandError(b'unknown node: %s', (node,))
 
-    revisions = store.emitrevisions(nodes,
-                                    revisiondata=b'revision' in fields,
-                                    assumehaveparentrevisions=haveparents)
+    revisions = store.emitrevisions(
+        nodes,
+        revisiondata=b'revision' in fields,
+        assumehaveparentrevisions=haveparents,
+    )
 
     yield {
         b'totalitems': len(nodes),
@@ -1356,49 +1481,42 @@
         for extra in followingdata:
             yield extra
 
+
 @wireprotocommand(
-    'pushkey',
+    b'pushkey',
     args={
-        'namespace': {
-            'type': 'bytes',
-            'example': b'ns',
-        },
-        'key': {
-            'type': 'bytes',
-            'example': b'key',
-        },
-        'old': {
-            'type': 'bytes',
-            'example': b'old',
-        },
-        'new': {
-            'type': 'bytes',
-            'example': 'new',
-        },
+        b'namespace': {b'type': b'bytes', b'example': b'ns',},
+        b'key': {b'type': b'bytes', b'example': b'key',},
+        b'old': {b'type': b'bytes', b'example': b'old',},
+        b'new': {b'type': b'bytes', b'example': b'new',},
     },
-    permission='push')
+    permission=b'push',
+)
 def pushkeyv2(repo, proto, namespace, key, old, new):
     # TODO handle ui output redirection
-    yield repo.pushkey(encoding.tolocal(namespace),
-                       encoding.tolocal(key),
-                       encoding.tolocal(old),
-                       encoding.tolocal(new))
+    yield repo.pushkey(
+        encoding.tolocal(namespace),
+        encoding.tolocal(key),
+        encoding.tolocal(old),
+        encoding.tolocal(new),
+    )
 
 
 @wireprotocommand(
-    'rawstorefiledata',
+    b'rawstorefiledata',
     args={
-        'files': {
-            'type': 'list',
-            'example': [b'changelog', b'manifestlog'],
+        b'files': {
+            b'type': b'list',
+            b'example': [b'changelog', b'manifestlog'],
         },
-        'pathfilter': {
-            'type': 'list',
-            'default': lambda: None,
-            'example': {b'include': [b'path:tests']},
+        b'pathfilter': {
+            b'type': b'list',
+            b'default': lambda: None,
+            b'example': {b'include': [b'path:tests']},
         },
     },
-    permission='pull')
+    permission=b'pull',
+)
 def rawstorefiledata(repo, proto, files, pathfilter):
     if not streamclone.allowservergeneration(repo):
         raise error.WireprotoCommandError(b'stream clone is disabled')
@@ -1410,8 +1528,9 @@
 
     unsupported = files - allowedfiles
     if unsupported:
-        raise error.WireprotoCommandError(b'unknown file type: %s',
-                                          (b', '.join(sorted(unsupported)),))
+        raise error.WireprotoCommandError(
+            b'unknown file type: %s', (b', '.join(sorted(unsupported)),)
+        )
 
     with repo.lock():
         topfiles = list(repo.store.topfiles())
@@ -1447,9 +1566,8 @@
         # We have to use a closure for this to ensure the context manager is
         # closed only after sending the final chunk.
         def getfiledata():
-            with repo.svfs(name, 'rb', auditpath=False) as fh:
+            with repo.svfs(name, b'rb', auditpath=False) as fh:
                 for chunk in util.filechunkiter(fh, limit=size):
                     yield chunk
 
-        yield wireprototypes.indefinitebytestringresponse(
-            getfiledata())
+        yield wireprototypes.indefinitebytestringresponse(getfiledata())
--- a/mercurial/worker.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/mercurial/worker.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,6 +16,7 @@
 
 try:
     import selectors
+
     selectors.BaseSelector
 except ImportError:
     from .thirdparty import selectors2 as selectors
@@ -29,6 +30,7 @@
     util,
 )
 
+
 def countcpus():
     '''try to count the number of CPUs on the system'''
 
@@ -42,7 +44,7 @@
 
     # windows
     try:
-        n = int(encoding.environ['NUMBER_OF_PROCESSORS'])
+        n = int(encoding.environ[b'NUMBER_OF_PROCESSORS'])
         if n > 0:
             return n
     except (KeyError, ValueError):
@@ -50,17 +52,19 @@
 
     return 1
 
+
 def _numworkers(ui):
-    s = ui.config('worker', 'numcpus')
+    s = ui.config(b'worker', b'numcpus')
     if s:
         try:
             n = int(s)
             if n >= 1:
                 return n
         except ValueError:
-            raise error.Abort(_('number of cpus must be an integer'))
+            raise error.Abort(_(b'number of cpus must be an integer'))
     return min(max(countcpus(), 4), 32)
 
+
 if pycompat.isposix or pycompat.iswindows:
     _STARTUP_COST = 0.01
     # The Windows worker is thread based. If tasks are CPU bound, threads
@@ -71,6 +75,7 @@
     _STARTUP_COST = 1e30
     _DISALLOW_THREAD_UNSAFE = False
 
+
 def worthwhile(ui, costperop, nops, threadsafe=True):
     '''try to determine whether the benefit of multiple processes can
     outweigh the cost of starting them'''
@@ -83,8 +88,10 @@
     benefit = linear - (_STARTUP_COST * workers + linear / workers)
     return benefit >= 0.15
 
-def worker(ui, costperarg, func, staticargs, args, hasretval=False,
-           threadsafe=True):
+
+def worker(
+    ui, costperarg, func, staticargs, args, hasretval=False, threadsafe=True
+):
     '''run a function, possibly in parallel in multiple worker
     processes.
 
@@ -108,16 +115,18 @@
     a thread-based worker. Should be disabled for CPU heavy tasks that don't
     release the GIL.
     '''
-    enabled = ui.configbool('worker', 'enabled')
+    enabled = ui.configbool(b'worker', b'enabled')
     if enabled and worthwhile(ui, costperarg, len(args), threadsafe=threadsafe):
         return _platformworker(ui, func, staticargs, args, hasretval)
     return func(*staticargs + (args,))
 
+
 def _posixworker(ui, func, staticargs, args, hasretval):
     workers = _numworkers(ui)
     oldhandler = signal.getsignal(signal.SIGINT)
     signal.signal(signal.SIGINT, signal.SIG_IGN)
     pids, problem = set(), [0]
+
     def killworkers():
         # unregister SIGCHLD handler as all children will be killed. This
         # function shouldn't be interrupted by another SIGCHLD; otherwise pids
@@ -130,6 +139,7 @@
             except OSError as err:
                 if err.errno != errno.ESRCH:
                     raise
+
     def waitforworkers(blocking=True):
         for pid in pids.copy():
             p = st = 0
@@ -155,10 +165,12 @@
             st = _exitstatus(st)
             if st and not problem[0]:
                 problem[0] = st
+
     def sigchldhandler(signum, frame):
         waitforworkers(blocking=False)
         if problem[0]:
             killworkers()
+
     oldchldhandler = signal.signal(signal.SIGCHLD, sigchldhandler)
     ui.flush()
     parentpid = os.getpid()
@@ -196,7 +208,7 @@
                     return 0
 
                 ret = scmutil.callcatch(ui, workerfunc)
-        except: # parent re-raises, child never returns
+        except:  # parent re-raises, child never returns
             if os.getpid() == parentpid:
                 raise
             exctype = sys.exc_info()[0]
@@ -206,7 +218,7 @@
             if os.getpid() != parentpid:
                 try:
                     ui.flush()
-                except: # never returns, no re-raises
+                except:  # never returns, no re-raises
                     pass
                 finally:
                     os._exit(ret & 255)
@@ -215,12 +227,14 @@
     for rfd, wfd in pipes:
         os.close(wfd)
         selector.register(os.fdopen(rfd, r'rb', 0), selectors.EVENT_READ)
+
     def cleanup():
         signal.signal(signal.SIGINT, oldhandler)
         waitforworkers()
         signal.signal(signal.SIGCHLD, oldchldhandler)
         selector.close()
         return problem[0]
+
     try:
         openpipes = len(pipes)
         while openpipes > 0:
@@ -239,7 +253,7 @@
                     if e.errno == errno.EINTR:
                         continue
                     raise
-    except: # re-raises
+    except:  # re-raises
         killworkers()
         cleanup()
         raise
@@ -251,6 +265,7 @@
     if hasretval:
         yield True, retval
 
+
 def _posixexitstatus(code):
     '''convert a posix exit status into the same form returned by
     os.spawnv
@@ -259,12 +274,14 @@
     if os.WIFEXITED(code):
         return os.WEXITSTATUS(code)
     elif os.WIFSIGNALED(code):
-        return -os.WTERMSIG(code)
+        return -(os.WTERMSIG(code))
+
 
 def _windowsworker(ui, func, staticargs, args, hasretval):
     class Worker(threading.Thread):
-        def __init__(self, taskqueue, resultqueue, func, staticargs, *args,
-                     **kwargs):
+        def __init__(
+            self, taskqueue, resultqueue, func, staticargs, *args, **kwargs
+        ):
             threading.Thread.__init__(self, *args, **kwargs)
             self._taskqueue = taskqueue
             self._resultqueue = resultqueue
@@ -298,6 +315,7 @@
                 raise
 
     threads = []
+
     def trykillworkers():
         # Allow up to 1 second to clean worker threads nicely
         cleanupend = time.time() + 1
@@ -311,8 +329,12 @@
                 # important to surface the inital exception than the
                 # fact that one of workers may be processing a large
                 # task and does not get to handle the interruption.
-                ui.warn(_("failed to kill worker threads while "
-                          "handling an exception\n"))
+                ui.warn(
+                    _(
+                        b"failed to kill worker threads while "
+                        b"handling an exception\n"
+                    )
+                )
                 return
 
     workers = _numworkers(ui)
@@ -341,7 +363,7 @@
                 if t.exception is not None:
                     raise t.exception
                 threads.remove(t)
-    except (Exception, KeyboardInterrupt): # re-raises
+    except (Exception, KeyboardInterrupt):  # re-raises
         trykillworkers()
         raise
     while not resultqueue.empty():
@@ -353,12 +375,14 @@
     if hasretval:
         yield True, retval
 
+
 if pycompat.iswindows:
     _platformworker = _windowsworker
 else:
     _platformworker = _posixworker
     _exitstatus = _posixexitstatus
 
+
 def partition(lst, nslices):
     '''partition a list into N slices of roughly equal size
 
--- a/relnotes/next	Wed Oct 02 12:20:36 2019 -0400
+++ b/relnotes/next	Mon Oct 21 11:09:48 2019 -0400
@@ -1,5 +1,12 @@
 == New Features ==
 
+ * The amend extension supports the `--currentuser` argument.
+
+ * The amend extension supports the `--close-branch` argument.
+
+ * The amend extension supports the `--secret` argument.
+
+ * The uncommit extension supports the `rewrite.update-timestamp` config option.
 
 == New Experimental Features ==
 
@@ -9,6 +16,15 @@
 
 == Backwards Compatibility Changes ==
 
+ * A shell that supports `$(command)`` syntax for command substitution is now
+   required for running the test suite. The test runner normally uses
+   `sh`, so if that is a shell that doesn't support `$(command)` syntax,
+   you can override it by setting `$HGTEST_SHELL` or by passing it to
+   `run-tests.py --shell <shell>`.
+
+ * The (experimental) narrow extension's wire protocol changed. If
+   you're using it, you'll need to make sure to upgrade server and
+   client at the same time.
 
 == Internal API Changes ==
 
--- a/rust/Cargo.lock	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/Cargo.lock	Mon Oct 21 11:09:48 2019 -0400
@@ -2,25 +2,38 @@
 # It is not intended for manual editing.
 [[package]]
 name = "aho-corasick"
-version = "0.6.9"
+version = "0.7.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "memchr 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "arrayvec"
+version = "0.4.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "autocfg"
-version = "0.1.2"
+version = "0.1.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "bitflags"
-version = "1.0.4"
+version = "1.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "byteorder"
-version = "1.3.1"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "cfg-if"
+version = "0.1.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
@@ -28,46 +41,89 @@
 version = "0.0.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "cpython"
-version = "0.2.1"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "python27-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "python3-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "python27-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "python3-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "crossbeam-epoch"
+version = "0.7.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "crossbeam-queue"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.6.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "either"
+version = "1.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "fuchsia-cprng"
-version = "0.1.0"
+version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "hg-core"
 version = "0.1.0"
 dependencies = [
- "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "memchr 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_pcg 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "hg-cpython"
 version = "0.1.0"
 dependencies = [
- "cpython 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cpython 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "hg-core 0.1.0",
- "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
- "python27-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "python3-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -75,45 +131,69 @@
 version = "0.1.0"
 dependencies = [
  "hg-core 0.1.0",
- "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "lazy_static"
-version = "1.3.0"
+version = "1.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "libc"
-version = "0.2.45"
+version = "0.2.64"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "memchr"
-version = "2.2.0"
+version = "2.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "memoffset"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "nodrop"
+version = "0.1.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "num-traits"
-version = "0.2.6"
+version = "0.2.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "num_cpus"
+version = "1.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+]
 
 [[package]]
 name = "python27-sys"
-version = "0.2.1"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "python3-sys"
-version = "0.2.1"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -121,17 +201,17 @@
 version = "0.6.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "autocfg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_jitter 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_os 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_pcg 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -139,7 +219,7 @@
 version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "autocfg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -148,12 +228,12 @@
 version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "rand_core"
-version = "0.4.0"
+version = "0.4.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
@@ -174,34 +254,34 @@
 
 [[package]]
 name = "rand_jitter"
-version = "0.1.2"
+version = "0.1.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "rand_os"
-version = "0.1.2"
+version = "0.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "fuchsia-cprng 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "rand_pcg"
-version = "0.1.1"
+version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -213,6 +293,28 @@
 ]
 
 [[package]]
+name = "rayon"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon-core 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rayon-core"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "rdrand"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -222,23 +324,19 @@
 
 [[package]]
 name = "regex"
-version = "1.1.0"
+version = "1.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "aho-corasick 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)",
- "memchr 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex-syntax 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)",
  "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "utf8-ranges 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "regex-syntax"
-version = "0.6.4"
+version = "0.6.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "ucd-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
-]
 
 [[package]]
 name = "rustc_version"
@@ -249,6 +347,11 @@
 ]
 
 [[package]]
+name = "scopeguard"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "semver"
 version = "0.9.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -266,22 +369,12 @@
 version = "0.3.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
-name = "ucd-util"
-version = "0.1.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "utf8-ranges"
-version = "1.0.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
 name = "winapi"
-version = "0.3.6"
+version = "0.3.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -299,38 +392,49 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [metadata]
-"checksum aho-corasick 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)" = "1e9a933f4e58658d7b12defcf96dc5c720f20832deebe3e0a19efd3b6aaeeb9e"
-"checksum autocfg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6d640bee2da49f60a4068a7fae53acde8982514ab7bae8b8cea9e88cbcfd799"
-"checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12"
-"checksum byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a019b10a2a7cdeb292db131fc8113e57ea2a908f6e7894b0c3c671893b65dbeb"
+"checksum aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d"
+"checksum arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9"
+"checksum autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "b671c8fb71b457dd4ae18c4ba1e59aa81793daacc361d82fcd410cef0d491875"
+"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
+"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5"
+"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
 "checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
-"checksum cpython 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b489034e723e7f5109fecd19b719e664f89ef925be785885252469e9822fa940"
-"checksum fuchsia-cprng 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "81f7f8eb465745ea9b02e2704612a9946a59fa40572086c6fd49d6ddcf30bf31"
-"checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14"
-"checksum libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)" = "2d2857ec59fadc0773853c664d2d18e7198e83883e7060b63c924cb077bd5c74"
-"checksum memchr 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2efc7bc57c883d4a4d6e3246905283d8dae951bb3bd32f49d6ef297f546e1c39"
-"checksum num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0b3a5d7cc97d6d30d8b9bc8fa19bf45349ffe46241e8816f50f62f6d6aaabee1"
-"checksum python27-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "56114c37d4dca82526d74009df7782a28c871ac9d36b19d4cb9e67672258527e"
-"checksum python3-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "61e4aac43f833fd637e429506cb2ac9d7df672c4b68f2eaaa163649b7fdc0444"
+"checksum cpython 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "85532c648315aeb0829ad216a6a29aa3212cf9319bc7f6daf1404aa0bdd1485f"
+"checksum crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b18cd2e169ad86297e6bc0ad9aa679aee9daa4f19e8163860faf7c164e4f5a71"
+"checksum crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fedcd6772e37f3da2a9af9bf12ebe046c0dfe657992377b4df982a2b54cd37a9"
+"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b"
+"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6"
+"checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3"
+"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
+"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+"checksum libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)" = "74dfca3d9957906e8d1e6a0b641dc9a59848e793f1da2165889fd4f62d10d79c"
+"checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e"
+"checksum memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ce6075db033bbbb7ee5a0bbd3a3186bbae616f57fb001c485c7ff77955f8177f"
+"checksum nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb"
+"checksum num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "6ba9a427cfca2be13aa6f6403b0b7e7368fe982bfa16fccc450ce74c46cd9b32"
+"checksum num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bcef43580c035376c0705c42792c294b66974abbfd2789b511784023f71f3273"
+"checksum python27-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "372555e88a6bc8109eb641380240dc8d25a128fc48363ec9075664daadffdd5b"
+"checksum python3-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f3a8ebed3f1201fda179f3960609dbbc10cd8c75e9f2afcb03788278f367d8ea"
 "checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca"
 "checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef"
 "checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
-"checksum rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d0e7a549d590831370895ab7ba4ea0c1b6b011d106b5ff2da6eee112615e6dc0"
+"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
 "checksum rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4"
 "checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08"
-"checksum rand_jitter 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "080723c6145e37503a2224f801f252e14ac5531cb450f4502698542d188cb3c0"
-"checksum rand_os 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b7c690732391ae0abafced5015ffb53656abfaec61b342290e5eb56b286a679d"
-"checksum rand_pcg 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "086bd09a33c7044e56bb44d5bdde5a60e7f119a9e95b0775f545de759a32fe05"
+"checksum rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b"
+"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071"
+"checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44"
 "checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c"
+"checksum rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "83a27732a533a1be0a0035a111fe76db89ad312f6f0347004c220c57f209a123"
+"checksum rayon-core 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "98dcf634205083b17d0861252431eb2acbfb698ab7478a2d20de07954f47ec7b"
 "checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
-"checksum regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "37e7cbbd370869ce2e8dff25c7018702d10b21a20ef7135316f8daecd6c25b7f"
-"checksum regex-syntax 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4e47a2ed29da7a9e1960e1639e7a982e6edc6d49be308a3b02daf511504a16d1"
+"checksum regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dc220bd33bdce8f093101afe22a037b8eb0e5af33592e6a9caafff0d4cb81cbd"
+"checksum regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716"
 "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
+"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d"
 "checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
 "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
 "checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b"
-"checksum ucd-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "535c204ee4d8434478593480b8f86ab45ec9aae0e83c568ca81abf0fd0e88f86"
-"checksum utf8-ranges 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "796f7e48bef87609f7ade7e06495a87d5cd06c7866e6a5cbfceffc558a243737"
-"checksum winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "92c1eb33641e276cfa214a0522acad57be5c56b10cb348b3c5117db75f3ac4b0"
+"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
 "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
 "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
--- a/rust/hg-core/Cargo.toml	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-core/Cargo.toml	Mon Oct 21 11:09:48 2019 -0400
@@ -8,12 +8,11 @@
 [lib]
 name = "hg"
 
-[dev-dependencies]
-rand = "*"
-rand_pcg = "*"
-
 [dependencies]
 byteorder = "1.3.1"
 lazy_static = "1.3.0"
 memchr = "2.2.0"
-regex = "^1.1"
+rand = "0.6.5"
+rand_pcg = "0.1.1"
+regex = "1.1.0"
+rayon = "1.2.0"
--- a/rust/hg-core/src/ancestors.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-core/src/ancestors.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -784,5 +784,4 @@
         missing_ancestors.remove_ancestors_from(&mut revs).unwrap();
         assert!(!revs.contains(&problem_rev));
     }
-
 }
--- a/rust/hg-core/src/dagops.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-core/src/dagops.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -8,10 +8,10 @@
 //! Miscellaneous DAG operations
 //!
 //! # Terminology
-//! - By *relative heads* of a collection of revision numbers (`Revision`),
-//!   we mean those revisions that have no children among the collection.
-//! - Similarly *relative roots* of a collection of `Revision`, we mean
-//!   those whose parents, if any, don't belong to the collection.
+//! - By *relative heads* of a collection of revision numbers (`Revision`), we
+//!   mean those revisions that have no children among the collection.
+//! - Similarly *relative roots* of a collection of `Revision`, we mean those
+//!   whose parents, if any, don't belong to the collection.
 use super::{Graph, GraphError, Revision, NULL_REVISION};
 use crate::ancestors::AncestorsIterator;
 use std::collections::{BTreeSet, HashSet};
@@ -272,5 +272,4 @@
         );
         Ok(())
     }
-
 }
--- a/rust/hg-core/src/dirstate.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-core/src/dirstate.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -1,36 +1,77 @@
-pub mod dirs_multiset;
-pub mod parsers;
+// dirstate module
+//
+// Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
 
-#[derive(Debug, PartialEq, Copy, Clone)]
-pub struct DirstateParents<'a> {
-    pub p1: &'a [u8],
-    pub p2: &'a [u8],
+use crate::{utils::hg_path::HgPathBuf, DirstateParseError};
+use std::collections::hash_map;
+use std::collections::HashMap;
+use std::convert::TryFrom;
+
+pub mod dirs_multiset;
+pub mod dirstate_map;
+pub mod parsers;
+pub mod status;
+
+#[derive(Debug, PartialEq, Clone)]
+pub struct DirstateParents {
+    pub p1: [u8; 20],
+    pub p2: [u8; 20],
 }
 
 /// The C implementation uses all signed types. This will be an issue
 /// either when 4GB+ source files are commonplace or in 2038, whichever
 /// comes first.
-#[derive(Debug, PartialEq)]
+#[derive(Debug, PartialEq, Copy, Clone)]
 pub struct DirstateEntry {
-    pub state: i8,
+    pub state: EntryState,
     pub mode: i32,
     pub mtime: i32,
     pub size: i32,
 }
 
-pub type DirstateVec = Vec<(Vec<u8>, DirstateEntry)>;
+pub type StateMap = HashMap<HgPathBuf, DirstateEntry>;
+pub type StateMapIter<'a> = hash_map::Iter<'a, HgPathBuf, DirstateEntry>;
+pub type CopyMap = HashMap<HgPathBuf, HgPathBuf>;
+pub type CopyMapIter<'a> = hash_map::Iter<'a, HgPathBuf, HgPathBuf>;
 
-#[derive(Debug, PartialEq)]
-pub struct CopyVecEntry<'a> {
-    pub path: &'a [u8],
-    pub copy_path: &'a [u8],
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum EntryState {
+    Normal,
+    Added,
+    Removed,
+    Merged,
+    Unknown,
 }
 
-pub type CopyVec<'a> = Vec<CopyVecEntry<'a>>;
+impl TryFrom<u8> for EntryState {
+    type Error = DirstateParseError;
 
-/// The Python implementation passes either a mapping (dirstate) or a flat
-/// iterable (manifest)
-pub enum DirsIterable {
-    Dirstate(DirstateVec),
-    Manifest(Vec<Vec<u8>>),
+    fn try_from(value: u8) -> Result<Self, Self::Error> {
+        match value {
+            b'n' => Ok(EntryState::Normal),
+            b'a' => Ok(EntryState::Added),
+            b'r' => Ok(EntryState::Removed),
+            b'm' => Ok(EntryState::Merged),
+            b'?' => Ok(EntryState::Unknown),
+            _ => Err(DirstateParseError::CorruptedEntry(format!(
+                "Incorrect entry state {}",
+                value
+            ))),
+        }
+    }
 }
+
+impl Into<u8> for EntryState {
+    fn into(self) -> u8 {
+        match self {
+            EntryState::Normal => b'n',
+            EntryState::Added => b'a',
+            EntryState::Removed => b'r',
+            EntryState::Merged => b'm',
+            EntryState::Unknown => b'?',
+        }
+    }
+}
--- a/rust/hg-core/src/dirstate/dirs_multiset.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-core/src/dirstate/dirs_multiset.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -8,51 +8,64 @@
 //! A multiset of directory names.
 //!
 //! Used to counts the references to directories in a manifest or dirstate.
-use crate::{utils::files, DirsIterable, DirstateEntry, DirstateMapError};
-use std::collections::hash_map::{Entry, Iter};
+use crate::utils::hg_path::{HgPath, HgPathBuf};
+use crate::{
+    dirstate::EntryState, utils::files, DirstateEntry, DirstateMapError,
+};
+use std::collections::hash_map::{self, Entry};
 use std::collections::HashMap;
 
+// could be encapsulated if we care API stability more seriously
+pub type DirsMultisetIter<'a> = hash_map::Keys<'a, HgPathBuf, u32>;
+
 #[derive(PartialEq, Debug)]
 pub struct DirsMultiset {
-    inner: HashMap<Vec<u8>, u32>,
+    inner: HashMap<HgPathBuf, u32>,
 }
 
 impl DirsMultiset {
-    /// Initializes the multiset from a dirstate or a manifest.
+    /// Initializes the multiset from a dirstate.
     ///
     /// If `skip_state` is provided, skips dirstate entries with equal state.
-    pub fn new(iterable: DirsIterable, skip_state: Option<i8>) -> Self {
+    pub fn from_dirstate(
+        vec: &HashMap<HgPathBuf, DirstateEntry>,
+        skip_state: Option<EntryState>,
+    ) -> Self {
         let mut multiset = DirsMultiset {
             inner: HashMap::new(),
         };
 
-        match iterable {
-            DirsIterable::Dirstate(vec) => {
-                for (ref filename, DirstateEntry { state, .. }) in vec {
-                    // This `if` is optimized out of the loop
-                    if let Some(skip) = skip_state {
-                        if skip != state {
-                            multiset.add_path(filename);
-                        }
-                    } else {
-                        multiset.add_path(filename);
-                    }
-                }
-            }
-            DirsIterable::Manifest(vec) => {
-                for ref filename in vec {
+        for (filename, DirstateEntry { state, .. }) in vec {
+            // This `if` is optimized out of the loop
+            if let Some(skip) = skip_state {
+                if skip != *state {
                     multiset.add_path(filename);
                 }
+            } else {
+                multiset.add_path(filename);
             }
         }
 
         multiset
     }
 
+    /// Initializes the multiset from a manifest.
+    pub fn from_manifest(vec: &Vec<HgPathBuf>) -> Self {
+        let mut multiset = DirsMultiset {
+            inner: HashMap::new(),
+        };
+
+        for filename in vec {
+            multiset.add_path(filename);
+        }
+
+        multiset
+    }
+
     /// Increases the count of deepest directory contained in the path.
     ///
     /// If the directory is not yet in the map, adds its parents.
-    pub fn add_path(&mut self, path: &[u8]) {
+    pub fn add_path(&mut self, path: &HgPath) {
         for subpath in files::find_dirs(path) {
             if let Some(val) = self.inner.get_mut(subpath) {
                 *val += 1;
@@ -69,7 +82,7 @@
     /// If the directory is not in the map, something horrible has happened.
     pub fn delete_path(
         &mut self,
-        path: &[u8],
+        path: &HgPath,
     ) -> Result<(), DirstateMapError> {
         for subpath in files::find_dirs(path) {
             match self.inner.entry(subpath.to_owned()) {
@@ -92,12 +105,12 @@
         Ok(())
     }
 
-    pub fn contains_key(&self, key: &[u8]) -> bool {
+    pub fn contains(&self, key: &HgPath) -> bool {
         self.inner.contains_key(key)
     }
 
-    pub fn iter(&self) -> Iter<Vec<u8>, u32> {
-        self.inner.iter()
+    pub fn iter(&self) -> DirsMultisetIter {
+        self.inner.keys()
     }
 
     pub fn len(&self) -> usize {
@@ -108,25 +121,25 @@
 #[cfg(test)]
 mod tests {
     use super::*;
+    use std::collections::HashMap;
 
     #[test]
     fn test_delete_path_path_not_found() {
-        let mut map = DirsMultiset::new(DirsIterable::Manifest(vec![]), None);
-        let path = b"doesnotexist/";
+        let mut map = DirsMultiset::from_manifest(&vec![]);
+        let path = HgPathBuf::from_bytes(b"doesnotexist/");
         assert_eq!(
-            Err(DirstateMapError::PathNotFound(path.to_vec())),
-            map.delete_path(path)
+            Err(DirstateMapError::PathNotFound(path.to_owned())),
+            map.delete_path(&path)
         );
     }
 
     #[test]
     fn test_delete_path_empty_path() {
-        let mut map =
-            DirsMultiset::new(DirsIterable::Manifest(vec![vec![]]), None);
-        let path = b"";
+        let mut map = DirsMultiset::from_manifest(&vec![HgPathBuf::new()]);
+        let path = HgPath::new(b"");
         assert_eq!(Ok(()), map.delete_path(path));
         assert_eq!(
-            Err(DirstateMapError::PathNotFound(path.to_vec())),
+            Err(DirstateMapError::PathNotFound(path.to_owned())),
             map.delete_path(path)
         );
     }
@@ -136,34 +149,40 @@
         let mut map = DirsMultiset {
             inner: [("", 5), ("a", 3), ("a/b", 2), ("a/c", 1)]
                 .iter()
-                .map(|(k, v)| (k.as_bytes().to_vec(), *v))
+                .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
                 .collect(),
         };
 
-        assert_eq!(Ok(()), map.delete_path(b"a/b/"));
-        assert_eq!(Ok(()), map.delete_path(b"a/b/"));
+        assert_eq!(Ok(()), map.delete_path(HgPath::new(b"a/b/")));
+        eprintln!("{:?}", map);
+        assert_eq!(Ok(()), map.delete_path(HgPath::new(b"a/b/")));
+        eprintln!("{:?}", map);
         assert_eq!(
-            Err(DirstateMapError::PathNotFound(b"a/b/".to_vec())),
-            map.delete_path(b"a/b/")
+            Err(DirstateMapError::PathNotFound(HgPathBuf::from_bytes(
+                b"a/b/"
+            ))),
+            map.delete_path(HgPath::new(b"a/b/"))
         );
 
-        assert_eq!(2, *map.inner.get(&b"a".to_vec()).unwrap());
-        assert_eq!(1, *map.inner.get(&b"a/c".to_vec()).unwrap());
+        assert_eq!(2, *map.inner.get(HgPath::new(b"a")).unwrap());
+        assert_eq!(1, *map.inner.get(HgPath::new(b"a/c")).unwrap());
         eprintln!("{:?}", map);
-        assert_eq!(Ok(()), map.delete_path(b"a/"));
+        assert_eq!(Ok(()), map.delete_path(HgPath::new(b"a/")));
         eprintln!("{:?}", map);
 
-        assert_eq!(Ok(()), map.delete_path(b"a/c/"));
+        assert_eq!(Ok(()), map.delete_path(HgPath::new(b"a/c/")));
         assert_eq!(
-            Err(DirstateMapError::PathNotFound(b"a/c/".to_vec())),
-            map.delete_path(b"a/c/")
+            Err(DirstateMapError::PathNotFound(HgPathBuf::from_bytes(
+                b"a/c/"
+            ))),
+            map.delete_path(HgPath::new(b"a/c/"))
         );
     }
 
     #[test]
     fn test_add_path_empty_path() {
-        let mut map = DirsMultiset::new(DirsIterable::Manifest(vec![]), None);
-        let path = b"";
+        let mut map = DirsMultiset::from_manifest(&vec![]);
+        let path = HgPath::new(b"");
         map.add_path(path);
 
         assert_eq!(1, map.len());
@@ -171,44 +190,44 @@
 
     #[test]
     fn test_add_path_successful() {
-        let mut map = DirsMultiset::new(DirsIterable::Manifest(vec![]), None);
+        let mut map = DirsMultiset::from_manifest(&vec![]);
 
-        map.add_path(b"a/");
-        assert_eq!(1, *map.inner.get(&b"a".to_vec()).unwrap());
-        assert_eq!(1, *map.inner.get(&Vec::new()).unwrap());
+        map.add_path(HgPath::new(b"a/"));
+        assert_eq!(1, *map.inner.get(HgPath::new(b"a")).unwrap());
+        assert_eq!(1, *map.inner.get(HgPath::new(b"")).unwrap());
         assert_eq!(2, map.len());
 
         // Non directory should be ignored
-        map.add_path(b"a");
-        assert_eq!(1, *map.inner.get(&b"a".to_vec()).unwrap());
+        map.add_path(HgPath::new(b"a"));
+        assert_eq!(1, *map.inner.get(HgPath::new(b"a")).unwrap());
         assert_eq!(2, map.len());
 
         // Non directory will still add its base
-        map.add_path(b"a/b");
-        assert_eq!(2, *map.inner.get(&b"a".to_vec()).unwrap());
+        map.add_path(HgPath::new(b"a/b"));
+        assert_eq!(2, *map.inner.get(HgPath::new(b"a")).unwrap());
         assert_eq!(2, map.len());
 
         // Duplicate path works
-        map.add_path(b"a/");
-        assert_eq!(3, *map.inner.get(&b"a".to_vec()).unwrap());
+        map.add_path(HgPath::new(b"a/"));
+        assert_eq!(3, *map.inner.get(HgPath::new(b"a")).unwrap());
 
         // Nested dir adds to its base
-        map.add_path(b"a/b/");
-        assert_eq!(4, *map.inner.get(&b"a".to_vec()).unwrap());
-        assert_eq!(1, *map.inner.get(&b"a/b".to_vec()).unwrap());
+        map.add_path(HgPath::new(b"a/b/"));
+        assert_eq!(4, *map.inner.get(HgPath::new(b"a")).unwrap());
+        assert_eq!(1, *map.inner.get(HgPath::new(b"a/b")).unwrap());
 
         // but not its base's base, because it already existed
-        map.add_path(b"a/b/c/");
-        assert_eq!(4, *map.inner.get(&b"a".to_vec()).unwrap());
-        assert_eq!(2, *map.inner.get(&b"a/b".to_vec()).unwrap());
+        map.add_path(HgPath::new(b"a/b/c/"));
+        assert_eq!(4, *map.inner.get(HgPath::new(b"a")).unwrap());
+        assert_eq!(2, *map.inner.get(HgPath::new(b"a/b")).unwrap());
 
-        map.add_path(b"a/c/");
-        assert_eq!(1, *map.inner.get(&b"a/c".to_vec()).unwrap());
+        map.add_path(HgPath::new(b"a/c/"));
+        assert_eq!(1, *map.inner.get(HgPath::new(b"a/c")).unwrap());
 
         let expected = DirsMultiset {
             inner: [("", 2), ("a", 5), ("a/b", 2), ("a/b/c", 1), ("a/c", 1)]
                 .iter()
-                .map(|(k, v)| (k.as_bytes().to_vec(), *v))
+                .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
                 .collect(),
         };
         assert_eq!(map, expected);
@@ -216,15 +235,13 @@
 
     #[test]
     fn test_dirsmultiset_new_empty() {
-        use DirsIterable::{Dirstate, Manifest};
-
-        let new = DirsMultiset::new(Manifest(vec![]), None);
+        let new = DirsMultiset::from_manifest(&vec![]);
         let expected = DirsMultiset {
             inner: HashMap::new(),
         };
         assert_eq!(expected, new);
 
-        let new = DirsMultiset::new(Dirstate(vec![]), None);
+        let new = DirsMultiset::from_dirstate(&HashMap::new(), None);
         let expected = DirsMultiset {
             inner: HashMap::new(),
         };
@@ -233,18 +250,16 @@
 
     #[test]
     fn test_dirsmultiset_new_no_skip() {
-        use DirsIterable::{Dirstate, Manifest};
-
         let input_vec = ["a/", "b/", "a/c", "a/d/"]
             .iter()
-            .map(|e| e.as_bytes().to_vec())
+            .map(|e| HgPathBuf::from_bytes(e.as_bytes()))
             .collect();
         let expected_inner = [("", 2), ("a", 3), ("b", 1), ("a/d", 1)]
             .iter()
-            .map(|(k, v)| (k.as_bytes().to_vec(), *v))
+            .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
             .collect();
 
-        let new = DirsMultiset::new(Manifest(input_vec), None);
+        let new = DirsMultiset::from_manifest(&input_vec);
         let expected = DirsMultiset {
             inner: expected_inner,
         };
@@ -254,9 +269,9 @@
             .iter()
             .map(|f| {
                 (
-                    f.as_bytes().to_vec(),
+                    HgPathBuf::from_bytes(f.as_bytes()),
                     DirstateEntry {
-                        state: 0,
+                        state: EntryState::Normal,
                         mode: 0,
                         mtime: 0,
                         size: 0,
@@ -266,10 +281,10 @@
             .collect();
         let expected_inner = [("", 2), ("a", 3), ("b", 1), ("a/d", 1)]
             .iter()
-            .map(|(k, v)| (k.as_bytes().to_vec(), *v))
+            .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
             .collect();
 
-        let new = DirsMultiset::new(Dirstate(input_map), None);
+        let new = DirsMultiset::from_dirstate(&input_map, None);
         let expected = DirsMultiset {
             inner: expected_inner,
         };
@@ -278,51 +293,37 @@
 
     #[test]
     fn test_dirsmultiset_new_skip() {
-        use DirsIterable::{Dirstate, Manifest};
-
-        let input_vec = ["a/", "b/", "a/c", "a/d/"]
-            .iter()
-            .map(|e| e.as_bytes().to_vec())
-            .collect();
-        let expected_inner = [("", 2), ("a", 3), ("b", 1), ("a/d", 1)]
-            .iter()
-            .map(|(k, v)| (k.as_bytes().to_vec(), *v))
-            .collect();
-
-        let new = DirsMultiset::new(Manifest(input_vec), Some('n' as i8));
-        let expected = DirsMultiset {
-            inner: expected_inner,
-        };
-        // Skip does not affect a manifest
-        assert_eq!(expected, new);
-
-        let input_map =
-            [("a/", 'n'), ("a/b/", 'n'), ("a/c", 'r'), ("a/d/", 'm')]
-                .iter()
-                .map(|(f, state)| {
-                    (
-                        f.as_bytes().to_vec(),
-                        DirstateEntry {
-                            state: *state as i8,
-                            mode: 0,
-                            mtime: 0,
-                            size: 0,
-                        },
-                    )
-                })
-                .collect();
+        let input_map = [
+            ("a/", EntryState::Normal),
+            ("a/b/", EntryState::Normal),
+            ("a/c", EntryState::Removed),
+            ("a/d/", EntryState::Merged),
+        ]
+        .iter()
+        .map(|(f, state)| {
+            (
+                HgPathBuf::from_bytes(f.as_bytes()),
+                DirstateEntry {
+                    state: *state,
+                    mode: 0,
+                    mtime: 0,
+                    size: 0,
+                },
+            )
+        })
+        .collect();
 
         // "a" incremented with "a/c" and "a/d/"
         let expected_inner = [("", 1), ("a", 2), ("a/d", 1)]
             .iter()
-            .map(|(k, v)| (k.as_bytes().to_vec(), *v))
+            .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
             .collect();
 
-        let new = DirsMultiset::new(Dirstate(input_map), Some('n' as i8));
+        let new =
+            DirsMultiset::from_dirstate(&input_map, Some(EntryState::Normal));
         let expected = DirsMultiset {
             inner: expected_inner,
         };
         assert_eq!(expected, new);
     }
-
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate/dirstate_map.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,424 @@
+// dirstate_map.rs
+//
+// Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use crate::utils::hg_path::{HgPath, HgPathBuf};
+use crate::{
+    dirstate::{parsers::PARENT_SIZE, EntryState},
+    pack_dirstate, parse_dirstate,
+    utils::files::normalize_case,
+    CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateMapError,
+    DirstateParents, DirstateParseError, StateMap,
+};
+use core::borrow::Borrow;
+use std::collections::{HashMap, HashSet};
+use std::convert::TryInto;
+use std::iter::FromIterator;
+use std::ops::Deref;
+use std::time::Duration;
+
+pub type FileFoldMap = HashMap<HgPathBuf, HgPathBuf>;
+
+const NULL_ID: [u8; 20] = [0; 20];
+const MTIME_UNSET: i32 = -1;
+const SIZE_DIRTY: i32 = -2;
+
+#[derive(Default)]
+pub struct DirstateMap {
+    state_map: StateMap,
+    pub copy_map: CopyMap,
+    file_fold_map: Option<FileFoldMap>,
+    pub dirs: Option<DirsMultiset>,
+    pub all_dirs: Option<DirsMultiset>,
+    non_normal_set: HashSet<HgPathBuf>,
+    other_parent_set: HashSet<HgPathBuf>,
+    parents: Option<DirstateParents>,
+    dirty_parents: bool,
+}
+
+/// Should only really be used in python interface code, for clarity
+impl Deref for DirstateMap {
+    type Target = StateMap;
+
+    fn deref(&self) -> &Self::Target {
+        &self.state_map
+    }
+}
+
+impl FromIterator<(HgPathBuf, DirstateEntry)> for DirstateMap {
+    fn from_iter<I: IntoIterator<Item = (HgPathBuf, DirstateEntry)>>(
+        iter: I,
+    ) -> Self {
+        Self {
+            state_map: iter.into_iter().collect(),
+            ..Self::default()
+        }
+    }
+}
+
+impl DirstateMap {
+    pub fn new() -> Self {
+        Self::default()
+    }
+
+    pub fn clear(&mut self) {
+        self.state_map.clear();
+        self.copy_map.clear();
+        self.file_fold_map = None;
+        self.non_normal_set.clear();
+        self.other_parent_set.clear();
+        self.set_parents(&DirstateParents {
+            p1: NULL_ID,
+            p2: NULL_ID,
+        })
+    }
+
+    /// Add a tracked file to the dirstate
+    pub fn add_file(
+        &mut self,
+        filename: &HgPath,
+        old_state: EntryState,
+        entry: DirstateEntry,
+    ) {
+        if old_state == EntryState::Unknown || old_state == EntryState::Removed
+        {
+            if let Some(ref mut dirs) = self.dirs {
+                dirs.add_path(filename)
+            }
+        }
+        if old_state == EntryState::Unknown {
+            if let Some(ref mut all_dirs) = self.all_dirs {
+                all_dirs.add_path(filename)
+            }
+        }
+        self.state_map.insert(filename.to_owned(), entry.to_owned());
+
+        if entry.state != EntryState::Normal || entry.mtime == MTIME_UNSET {
+            self.non_normal_set.insert(filename.to_owned());
+        }
+
+        if entry.size == SIZE_DIRTY {
+            self.other_parent_set.insert(filename.to_owned());
+        }
+    }
+
+    /// Mark a file as removed in the dirstate.
+    ///
+    /// The `size` parameter is used to store sentinel values that indicate
+    /// the file's previous state.  In the future, we should refactor this
+    /// to be more explicit about what that state is.
+    pub fn remove_file(
+        &mut self,
+        filename: &HgPath,
+        old_state: EntryState,
+        size: i32,
+    ) -> Result<(), DirstateMapError> {
+        if old_state != EntryState::Unknown && old_state != EntryState::Removed
+        {
+            if let Some(ref mut dirs) = self.dirs {
+                dirs.delete_path(filename)?;
+            }
+        }
+        if old_state == EntryState::Unknown {
+            if let Some(ref mut all_dirs) = self.all_dirs {
+                all_dirs.add_path(filename);
+            }
+        }
+
+        if let Some(ref mut file_fold_map) = self.file_fold_map {
+            file_fold_map.remove(&normalize_case(filename));
+        }
+        self.state_map.insert(
+            filename.to_owned(),
+            DirstateEntry {
+                state: EntryState::Removed,
+                mode: 0,
+                size,
+                mtime: 0,
+            },
+        );
+        self.non_normal_set.insert(filename.to_owned());
+        Ok(())
+    }
+
+    /// Remove a file from the dirstate.
+    /// Returns `true` if the file was previously recorded.
+    pub fn drop_file(
+        &mut self,
+        filename: &HgPath,
+        old_state: EntryState,
+    ) -> Result<bool, DirstateMapError> {
+        let exists = self.state_map.remove(filename).is_some();
+
+        if exists {
+            if old_state != EntryState::Removed {
+                if let Some(ref mut dirs) = self.dirs {
+                    dirs.delete_path(filename)?;
+                }
+            }
+            if let Some(ref mut all_dirs) = self.all_dirs {
+                all_dirs.delete_path(filename)?;
+            }
+        }
+        if let Some(ref mut file_fold_map) = self.file_fold_map {
+            file_fold_map.remove(&normalize_case(filename));
+        }
+        self.non_normal_set.remove(filename);
+
+        Ok(exists)
+    }
+
+    pub fn clear_ambiguous_times(
+        &mut self,
+        filenames: Vec<HgPathBuf>,
+        now: i32,
+    ) {
+        for filename in filenames {
+            let mut changed = false;
+            self.state_map
+                .entry(filename.to_owned())
+                .and_modify(|entry| {
+                    if entry.state == EntryState::Normal && entry.mtime == now
+                    {
+                        changed = true;
+                        *entry = DirstateEntry {
+                            mtime: MTIME_UNSET,
+                            ..*entry
+                        };
+                    }
+                });
+            if changed {
+                self.non_normal_set.insert(filename.to_owned());
+            }
+        }
+    }
+
+    pub fn non_normal_other_parent_entries(
+        &self,
+    ) -> (HashSet<HgPathBuf>, HashSet<HgPathBuf>) {
+        let mut non_normal = HashSet::new();
+        let mut other_parent = HashSet::new();
+
+        for (
+            filename,
+            DirstateEntry {
+                state, size, mtime, ..
+            },
+        ) in self.state_map.iter()
+        {
+            if *state != EntryState::Normal || *mtime == MTIME_UNSET {
+                non_normal.insert(filename.to_owned());
+            }
+            if *state == EntryState::Normal && *size == SIZE_DIRTY {
+                other_parent.insert(filename.to_owned());
+            }
+        }
+
+        (non_normal, other_parent)
+    }
+
+    /// Both of these setters and their uses appear to be the simplest way to
+    /// emulate a Python lazy property, but it is ugly and unidiomatic.
+    /// TODO One day, rewriting this struct using the typestate might be a
+    /// good idea.
+    pub fn set_all_dirs(&mut self) {
+        if self.all_dirs.is_none() {
+            self.all_dirs =
+                Some(DirsMultiset::from_dirstate(&self.state_map, None));
+        }
+    }
+
+    pub fn set_dirs(&mut self) {
+        if self.dirs.is_none() {
+            self.dirs = Some(DirsMultiset::from_dirstate(
+                &self.state_map,
+                Some(EntryState::Removed),
+            ));
+        }
+    }
+
+    pub fn has_tracked_dir(&mut self, directory: &HgPath) -> bool {
+        self.set_dirs();
+        self.dirs.as_ref().unwrap().contains(directory)
+    }
+
+    pub fn has_dir(&mut self, directory: &HgPath) -> bool {
+        self.set_all_dirs();
+        self.all_dirs.as_ref().unwrap().contains(directory)
+    }
+
+    pub fn parents(
+        &mut self,
+        file_contents: &[u8],
+    ) -> Result<&DirstateParents, DirstateError> {
+        if let Some(ref parents) = self.parents {
+            return Ok(parents);
+        }
+        let parents;
+        if file_contents.len() == PARENT_SIZE * 2 {
+            parents = DirstateParents {
+                p1: file_contents[..PARENT_SIZE].try_into().unwrap(),
+                p2: file_contents[PARENT_SIZE..PARENT_SIZE * 2]
+                    .try_into()
+                    .unwrap(),
+            };
+        } else if file_contents.is_empty() {
+            parents = DirstateParents {
+                p1: NULL_ID,
+                p2: NULL_ID,
+            };
+        } else {
+            return Err(DirstateError::Parse(DirstateParseError::Damaged));
+        }
+
+        self.parents = Some(parents);
+        Ok(self.parents.as_ref().unwrap())
+    }
+
+    pub fn set_parents(&mut self, parents: &DirstateParents) {
+        self.parents = Some(parents.clone());
+        self.dirty_parents = true;
+    }
+
+    pub fn read(
+        &mut self,
+        file_contents: &[u8],
+    ) -> Result<Option<DirstateParents>, DirstateError> {
+        if file_contents.is_empty() {
+            return Ok(None);
+        }
+
+        let parents = parse_dirstate(
+            &mut self.state_map,
+            &mut self.copy_map,
+            file_contents,
+        )?;
+
+        if !self.dirty_parents {
+            self.set_parents(&parents);
+        }
+
+        Ok(Some(parents))
+    }
+
+    pub fn pack(
+        &mut self,
+        parents: DirstateParents,
+        now: Duration,
+    ) -> Result<Vec<u8>, DirstateError> {
+        let packed =
+            pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)?;
+
+        self.dirty_parents = false;
+
+        let result = self.non_normal_other_parent_entries();
+        self.non_normal_set = result.0;
+        self.other_parent_set = result.1;
+        Ok(packed)
+    }
+
+    pub fn build_file_fold_map(&mut self) -> &FileFoldMap {
+        if let Some(ref file_fold_map) = self.file_fold_map {
+            return file_fold_map;
+        }
+        let mut new_file_fold_map = FileFoldMap::new();
+        for (filename, DirstateEntry { state, .. }) in self.state_map.borrow()
+        {
+            if *state == EntryState::Removed {
+                new_file_fold_map
+                    .insert(normalize_case(filename), filename.to_owned());
+            }
+        }
+        self.file_fold_map = Some(new_file_fold_map);
+        self.file_fold_map.as_ref().unwrap()
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_dirs_multiset() {
+        let mut map = DirstateMap::new();
+        assert!(map.dirs.is_none());
+        assert!(map.all_dirs.is_none());
+
+        assert_eq!(false, map.has_dir(HgPath::new(b"nope")));
+        assert!(map.all_dirs.is_some());
+        assert!(map.dirs.is_none());
+
+        assert_eq!(false, map.has_tracked_dir(HgPath::new(b"nope")));
+        assert!(map.dirs.is_some());
+    }
+
+    #[test]
+    fn test_add_file() {
+        let mut map = DirstateMap::new();
+
+        assert_eq!(0, map.len());
+
+        map.add_file(
+            HgPath::new(b"meh"),
+            EntryState::Normal,
+            DirstateEntry {
+                state: EntryState::Normal,
+                mode: 1337,
+                mtime: 1337,
+                size: 1337,
+            },
+        );
+
+        assert_eq!(1, map.len());
+        assert_eq!(0, map.non_normal_set.len());
+        assert_eq!(0, map.other_parent_set.len());
+    }
+
+    #[test]
+    fn test_non_normal_other_parent_entries() {
+        let map: DirstateMap = [
+            (b"f1", (EntryState::Removed, 1337, 1337, 1337)),
+            (b"f2", (EntryState::Normal, 1337, 1337, -1)),
+            (b"f3", (EntryState::Normal, 1337, 1337, 1337)),
+            (b"f4", (EntryState::Normal, 1337, -2, 1337)),
+            (b"f5", (EntryState::Added, 1337, 1337, 1337)),
+            (b"f6", (EntryState::Added, 1337, 1337, -1)),
+            (b"f7", (EntryState::Merged, 1337, 1337, -1)),
+            (b"f8", (EntryState::Merged, 1337, 1337, 1337)),
+            (b"f9", (EntryState::Merged, 1337, -2, 1337)),
+            (b"fa", (EntryState::Added, 1337, -2, 1337)),
+            (b"fb", (EntryState::Removed, 1337, -2, 1337)),
+        ]
+        .iter()
+        .map(|(fname, (state, mode, size, mtime))| {
+            (
+                HgPathBuf::from_bytes(fname.as_ref()),
+                DirstateEntry {
+                    state: *state,
+                    mode: *mode,
+                    size: *size,
+                    mtime: *mtime,
+                },
+            )
+        })
+        .collect();
+
+        let non_normal = [
+            b"f1", b"f2", b"f5", b"f6", b"f7", b"f8", b"f9", b"fa", b"fb",
+        ]
+        .iter()
+        .map(|x| HgPathBuf::from_bytes(x.as_ref()))
+        .collect();
+
+        let mut other_parent = HashSet::new();
+        other_parent.insert(HgPathBuf::from_bytes(b"f4"));
+
+        assert_eq!(
+            (non_normal, other_parent),
+            map.non_normal_other_parent_entries()
+        );
+    }
+}
--- a/rust/hg-core/src/dirstate/parsers.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-core/src/dirstate/parsers.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -3,32 +3,36 @@
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
 
+use crate::utils::hg_path::HgPath;
 use crate::{
-    CopyVec, CopyVecEntry, DirstateEntry, DirstatePackError, DirstateParents,
-    DirstateParseError, DirstateVec,
+    dirstate::{CopyMap, EntryState, StateMap},
+    DirstateEntry, DirstatePackError, DirstateParents, DirstateParseError,
 };
 use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
-use std::collections::HashMap;
+use std::convert::{TryFrom, TryInto};
 use std::io::Cursor;
+use std::time::Duration;
 
 /// Parents are stored in the dirstate as byte hashes.
-const PARENT_SIZE: usize = 20;
+pub const PARENT_SIZE: usize = 20;
 /// Dirstate entries have a static part of 8 + 32 + 32 + 32 + 32 bits.
 const MIN_ENTRY_SIZE: usize = 17;
 
+// TODO parse/pack: is mutate-on-loop better for performance?
+
 pub fn parse_dirstate(
+    state_map: &mut StateMap,
+    copy_map: &mut CopyMap,
     contents: &[u8],
-) -> Result<(DirstateParents, DirstateVec, CopyVec), DirstateParseError> {
+) -> Result<DirstateParents, DirstateParseError> {
     if contents.len() < PARENT_SIZE * 2 {
         return Err(DirstateParseError::TooLittleData);
     }
 
-    let mut dirstate_vec = vec![];
-    let mut copies = vec![];
     let mut curr_pos = PARENT_SIZE * 2;
     let parents = DirstateParents {
-        p1: &contents[..PARENT_SIZE],
-        p2: &contents[PARENT_SIZE..curr_pos],
+        p1: contents[..PARENT_SIZE].try_into().unwrap(),
+        p2: contents[PARENT_SIZE..curr_pos].try_into().unwrap(),
     };
 
     while curr_pos < contents.len() {
@@ -38,7 +42,7 @@
         let entry_bytes = &contents[curr_pos..];
 
         let mut cursor = Cursor::new(entry_bytes);
-        let state = cursor.read_i8()?;
+        let state = EntryState::try_from(cursor.read_u8()?)?;
         let mode = cursor.read_i32::<BigEndian>()?;
         let size = cursor.read_i32::<BigEndian>()?;
         let mtime = cursor.read_i32::<BigEndian>()?;
@@ -57,38 +61,41 @@
         };
 
         if let Some(copy_path) = copy {
-            copies.push(CopyVecEntry { path, copy_path });
+            copy_map.insert(
+                HgPath::new(path).to_owned(),
+                HgPath::new(copy_path).to_owned(),
+            );
         };
-        dirstate_vec.push((
-            path.to_owned(),
+        state_map.insert(
+            HgPath::new(path).to_owned(),
             DirstateEntry {
                 state,
                 mode,
                 size,
                 mtime,
             },
-        ));
+        );
         curr_pos = curr_pos + MIN_ENTRY_SIZE + (path_len);
     }
 
-    Ok((parents, dirstate_vec, copies))
+    Ok(parents)
 }
 
+/// `now` is the duration in seconds since the Unix epoch
 pub fn pack_dirstate(
-    dirstate_vec: &DirstateVec,
-    copymap: &HashMap<Vec<u8>, Vec<u8>>,
+    state_map: &mut StateMap,
+    copy_map: &CopyMap,
     parents: DirstateParents,
-    now: i32,
-) -> Result<(Vec<u8>, DirstateVec), DirstatePackError> {
-    if parents.p1.len() != PARENT_SIZE || parents.p2.len() != PARENT_SIZE {
-        return Err(DirstatePackError::CorruptedParent);
-    }
+    now: Duration,
+) -> Result<Vec<u8>, DirstatePackError> {
+    // TODO move away from i32 before 2038.
+    let now: i32 = now.as_secs().try_into().expect("time overflow");
 
-    let expected_size: usize = dirstate_vec
+    let expected_size: usize = state_map
         .iter()
-        .map(|(ref filename, _)| {
+        .map(|(filename, _)| {
             let mut length = MIN_ENTRY_SIZE + filename.len();
-            if let Some(ref copy) = copymap.get(filename) {
+            if let Some(copy) = copy_map.get(filename) {
                 length += copy.len() + 1;
             }
             length
@@ -97,15 +104,15 @@
     let expected_size = expected_size + PARENT_SIZE * 2;
 
     let mut packed = Vec::with_capacity(expected_size);
-    let mut new_dirstate_vec = vec![];
+    let mut new_state_map = vec![];
 
-    packed.extend(parents.p1);
-    packed.extend(parents.p2);
+    packed.extend(&parents.p1);
+    packed.extend(&parents.p2);
 
-    for (ref filename, entry) in dirstate_vec {
-        let mut new_filename: Vec<u8> = filename.to_owned();
+    for (filename, entry) in state_map.iter() {
+        let new_filename = filename.to_owned();
         let mut new_mtime: i32 = entry.mtime;
-        if entry.state == 'n' as i8 && entry.mtime == now.into() {
+        if entry.state == EntryState::Normal && entry.mtime == now {
             // The file was last modified "simultaneously" with the current
             // write to dirstate (i.e. within the same second for file-
             // systems with a granularity of 1 sec). This commonly happens
@@ -116,7 +123,7 @@
             // contents of the file if the size is the same. This prevents
             // mistakenly treating such files as clean.
             new_mtime = -1;
-            new_dirstate_vec.push((
+            new_state_map.push((
                 filename.to_owned(),
                 DirstateEntry {
                     mtime: new_mtime,
@@ -124,13 +131,13 @@
                 },
             ));
         }
-
-        if let Some(copy) = copymap.get(filename) {
+        let mut new_filename = new_filename.into_vec();
+        if let Some(copy) = copy_map.get(filename) {
             new_filename.push('\0' as u8);
-            new_filename.extend(copy);
+            new_filename.extend(copy.bytes());
         }
 
-        packed.write_i8(entry.state)?;
+        packed.write_u8(entry.state.into())?;
         packed.write_i32::<BigEndian>(entry.mode)?;
         packed.write_i32::<BigEndian>(entry.size)?;
         packed.write_i32::<BigEndian>(new_mtime)?;
@@ -142,247 +149,286 @@
         return Err(DirstatePackError::BadSize(expected_size, packed.len()));
     }
 
-    Ok((packed, new_dirstate_vec))
+    state_map.extend(new_state_map);
+
+    Ok(packed)
 }
 
 #[cfg(test)]
 mod tests {
     use super::*;
+    use crate::utils::hg_path::HgPathBuf;
+    use std::collections::HashMap;
 
     #[test]
     fn test_pack_dirstate_empty() {
-        let dirstate_vec: DirstateVec = vec![];
+        let mut state_map: StateMap = HashMap::new();
         let copymap = HashMap::new();
         let parents = DirstateParents {
-            p1: b"12345678910111213141",
-            p2: b"00000000000000000000",
+            p1: *b"12345678910111213141",
+            p2: *b"00000000000000000000",
         };
-        let now: i32 = 15000000;
-        let expected =
-            (b"1234567891011121314100000000000000000000".to_vec(), vec![]);
+        let now = Duration::new(15000000, 0);
+        let expected = b"1234567891011121314100000000000000000000".to_vec();
 
         assert_eq!(
             expected,
-            pack_dirstate(&dirstate_vec, &copymap, parents, now).unwrap()
+            pack_dirstate(&mut state_map, &copymap, parents, now).unwrap()
         );
+
+        assert!(state_map.is_empty())
     }
     #[test]
     fn test_pack_dirstate_one_entry() {
-        let dirstate_vec: DirstateVec = vec![(
-            vec!['f' as u8, '1' as u8],
+        let expected_state_map: StateMap = [(
+            HgPathBuf::from_bytes(b"f1"),
             DirstateEntry {
-                state: 'n' as i8,
+                state: EntryState::Normal,
                 mode: 0o644,
                 size: 0,
                 mtime: 791231220,
             },
-        )];
+        )]
+        .iter()
+        .cloned()
+        .collect();
+        let mut state_map = expected_state_map.clone();
+
         let copymap = HashMap::new();
         let parents = DirstateParents {
-            p1: b"12345678910111213141",
-            p2: b"00000000000000000000",
+            p1: *b"12345678910111213141",
+            p2: *b"00000000000000000000",
         };
-        let now: i32 = 15000000;
-        let expected = (
-            [
-                49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50,
-                49, 51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
-                48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0,
-                0, 0, 0, 47, 41, 58, 244, 0, 0, 0, 2, 102, 49,
-            ]
-            .to_vec(),
-            vec![],
-        );
+        let now = Duration::new(15000000, 0);
+        let expected = [
+            49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49,
+            51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
+            48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0, 0, 0, 0, 47,
+            41, 58, 244, 0, 0, 0, 2, 102, 49,
+        ]
+        .to_vec();
 
         assert_eq!(
             expected,
-            pack_dirstate(&dirstate_vec, &copymap, parents, now).unwrap()
+            pack_dirstate(&mut state_map, &copymap, parents, now).unwrap()
         );
+
+        assert_eq!(expected_state_map, state_map);
     }
     #[test]
     fn test_pack_dirstate_one_entry_with_copy() {
-        let dirstate_vec: DirstateVec = vec![(
-            b"f1".to_vec(),
+        let expected_state_map: StateMap = [(
+            HgPathBuf::from_bytes(b"f1"),
             DirstateEntry {
-                state: 'n' as i8,
+                state: EntryState::Normal,
                 mode: 0o644,
                 size: 0,
                 mtime: 791231220,
             },
-        )];
+        )]
+        .iter()
+        .cloned()
+        .collect();
+        let mut state_map = expected_state_map.clone();
         let mut copymap = HashMap::new();
-        copymap.insert(b"f1".to_vec(), b"copyname".to_vec());
+        copymap.insert(
+            HgPathBuf::from_bytes(b"f1"),
+            HgPathBuf::from_bytes(b"copyname"),
+        );
         let parents = DirstateParents {
-            p1: b"12345678910111213141",
-            p2: b"00000000000000000000",
+            p1: *b"12345678910111213141",
+            p2: *b"00000000000000000000",
         };
-        let now: i32 = 15000000;
-        let expected = (
-            [
-                49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50,
-                49, 51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
-                48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0,
-                0, 0, 0, 47, 41, 58, 244, 0, 0, 0, 11, 102, 49, 0, 99, 111,
-                112, 121, 110, 97, 109, 101,
-            ]
-            .to_vec(),
-            vec![],
-        );
+        let now = Duration::new(15000000, 0);
+        let expected = [
+            49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49,
+            51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
+            48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0, 0, 0, 0, 47,
+            41, 58, 244, 0, 0, 0, 11, 102, 49, 0, 99, 111, 112, 121, 110, 97,
+            109, 101,
+        ]
+        .to_vec();
 
         assert_eq!(
             expected,
-            pack_dirstate(&dirstate_vec, &copymap, parents, now).unwrap()
+            pack_dirstate(&mut state_map, &copymap, parents, now).unwrap()
         );
+        assert_eq!(expected_state_map, state_map);
     }
 
     #[test]
     fn test_parse_pack_one_entry_with_copy() {
-        let dirstate_vec: DirstateVec = vec![(
-            b"f1".to_vec(),
+        let mut state_map: StateMap = [(
+            HgPathBuf::from_bytes(b"f1"),
             DirstateEntry {
-                state: 'n' as i8,
+                state: EntryState::Normal,
                 mode: 0o644,
                 size: 0,
                 mtime: 791231220,
             },
-        )];
+        )]
+        .iter()
+        .cloned()
+        .collect();
         let mut copymap = HashMap::new();
-        copymap.insert(b"f1".to_vec(), b"copyname".to_vec());
+        copymap.insert(
+            HgPathBuf::from_bytes(b"f1"),
+            HgPathBuf::from_bytes(b"copyname"),
+        );
         let parents = DirstateParents {
-            p1: b"12345678910111213141",
-            p2: b"00000000000000000000",
+            p1: *b"12345678910111213141",
+            p2: *b"00000000000000000000",
         };
-        let now: i32 = 15000000;
+        let now = Duration::new(15000000, 0);
         let result =
-            pack_dirstate(&dirstate_vec, &copymap, parents, now).unwrap();
+            pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
+                .unwrap();
 
+        let mut new_state_map: StateMap = HashMap::new();
+        let mut new_copy_map: CopyMap = HashMap::new();
+        let new_parents = parse_dirstate(
+            &mut new_state_map,
+            &mut new_copy_map,
+            result.as_slice(),
+        )
+        .unwrap();
         assert_eq!(
-            (
-                parents,
-                dirstate_vec,
-                copymap
-                    .iter()
-                    .map(|(k, v)| CopyVecEntry {
-                        path: k.as_slice(),
-                        copy_path: v.as_slice()
-                    })
-                    .collect()
-            ),
-            parse_dirstate(result.0.as_slice()).unwrap()
+            (parents, state_map, copymap),
+            (new_parents, new_state_map, new_copy_map)
         )
     }
 
     #[test]
     fn test_parse_pack_multiple_entries_with_copy() {
-        let dirstate_vec: DirstateVec = vec![
+        let mut state_map: StateMap = [
             (
-                b"f1".to_vec(),
+                HgPathBuf::from_bytes(b"f1"),
                 DirstateEntry {
-                    state: 'n' as i8,
+                    state: EntryState::Normal,
                     mode: 0o644,
                     size: 0,
                     mtime: 791231220,
                 },
             ),
             (
-                b"f2".to_vec(),
+                HgPathBuf::from_bytes(b"f2"),
                 DirstateEntry {
-                    state: 'm' as i8,
+                    state: EntryState::Merged,
                     mode: 0o777,
                     size: 1000,
                     mtime: 791231220,
                 },
             ),
             (
-                b"f3".to_vec(),
+                HgPathBuf::from_bytes(b"f3"),
                 DirstateEntry {
-                    state: 'r' as i8,
+                    state: EntryState::Removed,
                     mode: 0o644,
                     size: 234553,
                     mtime: 791231220,
                 },
             ),
             (
-                b"f4\xF6".to_vec(),
+                HgPathBuf::from_bytes(b"f4\xF6"),
                 DirstateEntry {
-                    state: 'a' as i8,
+                    state: EntryState::Added,
                     mode: 0o644,
                     size: -1,
                     mtime: -1,
                 },
             ),
-        ];
+        ]
+        .iter()
+        .cloned()
+        .collect();
         let mut copymap = HashMap::new();
-        copymap.insert(b"f1".to_vec(), b"copyname".to_vec());
-        copymap.insert(b"f4\xF6".to_vec(), b"copyname2".to_vec());
+        copymap.insert(
+            HgPathBuf::from_bytes(b"f1"),
+            HgPathBuf::from_bytes(b"copyname"),
+        );
+        copymap.insert(
+            HgPathBuf::from_bytes(b"f4\xF6"),
+            HgPathBuf::from_bytes(b"copyname2"),
+        );
         let parents = DirstateParents {
-            p1: b"12345678910111213141",
-            p2: b"00000000000000000000",
+            p1: *b"12345678910111213141",
+            p2: *b"00000000000000000000",
         };
-        let now: i32 = 15000000;
+        let now = Duration::new(15000000, 0);
         let result =
-            pack_dirstate(&dirstate_vec, &copymap, parents, now).unwrap();
+            pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
+                .unwrap();
 
+        let mut new_state_map: StateMap = HashMap::new();
+        let mut new_copy_map: CopyMap = HashMap::new();
+        let new_parents = parse_dirstate(
+            &mut new_state_map,
+            &mut new_copy_map,
+            result.as_slice(),
+        )
+        .unwrap();
         assert_eq!(
-            (parents, dirstate_vec, copymap),
-            parse_dirstate(result.0.as_slice())
-                .and_then(|(p, dvec, cvec)| Ok((
-                    p,
-                    dvec,
-                    cvec.iter()
-                        .map(|entry| (
-                            entry.path.to_vec(),
-                            entry.copy_path.to_vec()
-                        ))
-                        .collect()
-                )))
-                .unwrap()
+            (parents, state_map, copymap),
+            (new_parents, new_state_map, new_copy_map)
         )
     }
 
     #[test]
     /// https://www.mercurial-scm.org/repo/hg/rev/af3f26b6bba4
     fn test_parse_pack_one_entry_with_copy_and_time_conflict() {
-        let dirstate_vec: DirstateVec = vec![(
-            b"f1".to_vec(),
+        let mut state_map: StateMap = [(
+            HgPathBuf::from_bytes(b"f1"),
             DirstateEntry {
-                state: 'n' as i8,
+                state: EntryState::Normal,
                 mode: 0o644,
                 size: 0,
                 mtime: 15000000,
             },
-        )];
+        )]
+        .iter()
+        .cloned()
+        .collect();
         let mut copymap = HashMap::new();
-        copymap.insert(b"f1".to_vec(), b"copyname".to_vec());
+        copymap.insert(
+            HgPathBuf::from_bytes(b"f1"),
+            HgPathBuf::from_bytes(b"copyname"),
+        );
         let parents = DirstateParents {
-            p1: b"12345678910111213141",
-            p2: b"00000000000000000000",
+            p1: *b"12345678910111213141",
+            p2: *b"00000000000000000000",
         };
-        let now: i32 = 15000000;
+        let now = Duration::new(15000000, 0);
         let result =
-            pack_dirstate(&dirstate_vec, &copymap, parents, now).unwrap();
+            pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
+                .unwrap();
+
+        let mut new_state_map: StateMap = HashMap::new();
+        let mut new_copy_map: CopyMap = HashMap::new();
+        let new_parents = parse_dirstate(
+            &mut new_state_map,
+            &mut new_copy_map,
+            result.as_slice(),
+        )
+        .unwrap();
 
         assert_eq!(
             (
                 parents,
-                vec![(
-                    b"f1".to_vec(),
+                [(
+                    HgPathBuf::from_bytes(b"f1"),
                     DirstateEntry {
-                        state: 'n' as i8,
+                        state: EntryState::Normal,
                         mode: 0o644,
                         size: 0,
                         mtime: -1
                     }
-                )],
-                copymap
-                    .iter()
-                    .map(|(k, v)| CopyVecEntry {
-                        path: k.as_slice(),
-                        copy_path: v.as_slice()
-                    })
-                    .collect()
+                )]
+                .iter()
+                .cloned()
+                .collect::<StateMap>(),
+                copymap,
             ),
-            parse_dirstate(result.0.as_slice()).unwrap()
+            (new_parents, new_state_map, new_copy_map)
         )
     }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate/status.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,248 @@
+// status.rs
+//
+// Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Rust implementation of dirstate.status (dirstate.py).
+//! It is currently missing a lot of functionality compared to the Python one
+//! and will only be triggered in narrow cases.
+
+use crate::utils::files::HgMetadata;
+use crate::utils::hg_path::{hg_path_to_path_buf, HgPath, HgPathBuf};
+use crate::{DirstateEntry, DirstateMap, EntryState};
+use rayon::prelude::*;
+use std::collections::HashMap;
+use std::fs::Metadata;
+use std::path::Path;
+
+/// Get stat data about the files explicitly specified by match.
+/// TODO subrepos
+fn walk_explicit(
+    files: &[impl AsRef<HgPath> + Sync],
+    dmap: &DirstateMap,
+    root_dir: impl AsRef<Path> + Sync,
+) -> std::io::Result<HashMap<HgPathBuf, Option<HgMetadata>>> {
+    let mut results = HashMap::new();
+
+    // A tuple of the normalized filename and the `Result` of the call to
+    // `symlink_metadata` for separate handling.
+    type WalkTuple<'a> = (&'a HgPath, std::io::Result<Metadata>);
+
+    let stats_res: std::io::Result<Vec<WalkTuple>> = files
+        .par_iter()
+        .map(|filename| {
+            // TODO normalization
+            let normalized = filename.as_ref();
+
+            let target_filename =
+                root_dir.as_ref().join(hg_path_to_path_buf(normalized)?);
+
+            Ok((normalized, target_filename.symlink_metadata()))
+        })
+        .collect();
+
+    for res in stats_res? {
+        match res {
+            (normalized, Ok(stat)) => {
+                if stat.is_file() {
+                    results.insert(
+                        normalized.to_owned(),
+                        Some(HgMetadata::from_metadata(stat)),
+                    );
+                } else {
+                    if dmap.contains_key(normalized) {
+                        results.insert(normalized.to_owned(), None);
+                    }
+                }
+            }
+            (normalized, Err(_)) => {
+                if dmap.contains_key(normalized) {
+                    results.insert(normalized.to_owned(), None);
+                }
+            }
+        };
+    }
+
+    Ok(results)
+}
+
+// Stat all entries in the `DirstateMap` and return their new metadata.
+pub fn stat_dmap_entries(
+    dmap: &DirstateMap,
+    results: &HashMap<HgPathBuf, Option<HgMetadata>>,
+    root_dir: impl AsRef<Path> + Sync,
+) -> std::io::Result<Vec<(HgPathBuf, Option<HgMetadata>)>> {
+    dmap.par_iter()
+        .filter_map(
+            // Getting file metadata is costly, so we don't do it if the
+            // file is already present in the results, hence `filter_map`
+            |(filename, _)| -> Option<
+                std::io::Result<(HgPathBuf, Option<HgMetadata>)>
+            > {
+                if results.contains_key(filename) {
+                    return None;
+                }
+                let meta = match hg_path_to_path_buf(filename) {
+                    Ok(p) => root_dir.as_ref().join(p).symlink_metadata(),
+                    Err(e) => return Some(Err(e.into())),
+                };
+
+                Some(match meta {
+                    Ok(ref m)
+                        if !(m.file_type().is_file()
+                            || m.file_type().is_symlink()) =>
+                    {
+                        Ok((filename.to_owned(), None))
+                    }
+                    Ok(m) => Ok((
+                        filename.to_owned(),
+                        Some(HgMetadata::from_metadata(m)),
+                    )),
+                    Err(ref e)
+                        if e.kind() == std::io::ErrorKind::NotFound
+                            || e.raw_os_error() == Some(20) =>
+                    {
+                        // Rust does not yet have an `ErrorKind` for
+                        // `NotADirectory` (errno 20)
+                        // It happens if the dirstate contains `foo/bar` and
+                        // foo is not a directory
+                        Ok((filename.to_owned(), None))
+                    }
+                    Err(e) => Err(e),
+                })
+            },
+        )
+        .collect()
+}
+
+pub struct StatusResult {
+    pub modified: Vec<HgPathBuf>,
+    pub added: Vec<HgPathBuf>,
+    pub removed: Vec<HgPathBuf>,
+    pub deleted: Vec<HgPathBuf>,
+    pub clean: Vec<HgPathBuf>,
+    // TODO ignored
+    // TODO unknown
+}
+
+fn build_response(
+    dmap: &DirstateMap,
+    list_clean: bool,
+    last_normal_time: i64,
+    check_exec: bool,
+    results: HashMap<HgPathBuf, Option<HgMetadata>>,
+) -> (Vec<HgPathBuf>, StatusResult) {
+    let mut lookup = vec![];
+    let mut modified = vec![];
+    let mut added = vec![];
+    let mut removed = vec![];
+    let mut deleted = vec![];
+    let mut clean = vec![];
+
+    for (filename, metadata_option) in results.into_iter() {
+        let DirstateEntry {
+            state,
+            mode,
+            mtime,
+            size,
+        } = match dmap.get(&filename) {
+            None => {
+                continue;
+            }
+            Some(e) => *e,
+        };
+
+        match metadata_option {
+            None => {
+                match state {
+                    EntryState::Normal
+                    | EntryState::Merged
+                    | EntryState::Added => deleted.push(filename),
+                    EntryState::Removed => removed.push(filename),
+                    _ => {}
+                };
+            }
+            Some(HgMetadata {
+                st_mode,
+                st_size,
+                st_mtime,
+                ..
+            }) => {
+                match state {
+                    EntryState::Normal => {
+                        // Dates and times that are outside the 31-bit signed
+                        // range are compared modulo 2^31. This should prevent
+                        // it from behaving badly with very large files or
+                        // corrupt dates while still having a high probability
+                        // of detecting changes. (issue2608)
+                        let range_mask = 0x7fffffff;
+
+                        let size_changed = (size != st_size as i32)
+                            && size != (st_size as i32 & range_mask);
+                        let mode_changed = (mode ^ st_mode as i32) & 0o100
+                            != 0o000
+                            && check_exec;
+                        if size >= 0
+                            && (size_changed || mode_changed)
+                            || size == -2  // other parent
+                            || dmap.copy_map.contains_key(&filename)
+                        {
+                            modified.push(filename);
+                        } else if mtime != st_mtime as i32
+                            && mtime != (st_mtime as i32 & range_mask)
+                        {
+                            lookup.push(filename);
+                        } else if st_mtime == last_normal_time {
+                            // the file may have just been marked as normal and
+                            // it may have changed in the same second without
+                            // changing its size. This can happen if we quickly
+                            // do multiple commits. Force lookup, so we don't
+                            // miss such a racy file change.
+                            lookup.push(filename);
+                        } else if list_clean {
+                            clean.push(filename);
+                        }
+                    }
+                    EntryState::Merged => modified.push(filename),
+                    EntryState::Added => added.push(filename),
+                    EntryState::Removed => removed.push(filename),
+                    EntryState::Unknown => {}
+                }
+            }
+        }
+    }
+
+    (
+        lookup,
+        StatusResult {
+            modified,
+            added,
+            removed,
+            deleted,
+            clean,
+        },
+    )
+}
+
+pub fn status(
+    dmap: &DirstateMap,
+    root_dir: impl AsRef<Path> + Sync + Copy,
+    files: &[impl AsRef<HgPath> + Sync],
+    list_clean: bool,
+    last_normal_time: i64,
+    check_exec: bool,
+) -> std::io::Result<(Vec<HgPathBuf>, StatusResult)> {
+    let mut results = walk_explicit(files, &dmap, root_dir)?;
+
+    results.extend(stat_dmap_entries(&dmap, &results, root_dir)?);
+
+    Ok(build_response(
+        &dmap,
+        list_clean,
+        last_normal_time,
+        check_exec,
+        results,
+    ))
+}
--- a/rust/hg-core/src/discovery.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-core/src/discovery.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -10,23 +10,124 @@
 //! This is a Rust counterpart to the `partialdiscovery` class of
 //! `mercurial.setdiscovery`
 
-use super::{Graph, GraphError, Revision};
+use super::{Graph, GraphError, Revision, NULL_REVISION};
 use crate::ancestors::MissingAncestors;
 use crate::dagops;
-use std::collections::HashSet;
+use rand::seq::SliceRandom;
+use rand::{thread_rng, RngCore, SeedableRng};
+use std::cmp::{max, min};
+use std::collections::{HashMap, HashSet, VecDeque};
+
+type Rng = rand_pcg::Pcg32;
 
 pub struct PartialDiscovery<G: Graph + Clone> {
     target_heads: Option<Vec<Revision>>,
     graph: G, // plays the role of self._repo
     common: MissingAncestors<G>,
     undecided: Option<HashSet<Revision>>,
+    children_cache: Option<HashMap<Revision, Vec<Revision>>>,
     missing: HashSet<Revision>,
+    rng: Rng,
+    respect_size: bool,
+    randomize: bool,
 }
 
 pub struct DiscoveryStats {
     pub undecided: Option<usize>,
 }
 
+/// Update an existing sample to match the expected size
+///
+/// The sample is updated with revisions exponentially distant from each
+/// element of `heads`.
+///
+/// If a target size is specified, the sampling will stop once this size is
+/// reached. Otherwise sampling will happen until roots of the <revs> set are
+/// reached.
+///
+/// - `revs`: set of revs we want to discover (if None, `assume` the whole dag
+///   represented by `parentfn`
+/// - `heads`: set of DAG head revs
+/// - `sample`: a sample to update
+/// - `parentfn`: a callable to resolve parents for a revision
+/// - `quicksamplesize`: optional target size of the sample
+fn update_sample<I>(
+    revs: Option<&HashSet<Revision>>,
+    heads: impl IntoIterator<Item = Revision>,
+    sample: &mut HashSet<Revision>,
+    parentsfn: impl Fn(Revision) -> Result<I, GraphError>,
+    quicksamplesize: Option<usize>,
+) -> Result<(), GraphError>
+where
+    I: Iterator<Item = Revision>,
+{
+    let mut distances: HashMap<Revision, u32> = HashMap::new();
+    let mut visit: VecDeque<Revision> = heads.into_iter().collect();
+    let mut factor: u32 = 1;
+    let mut seen: HashSet<Revision> = HashSet::new();
+    while let Some(current) = visit.pop_front() {
+        if !seen.insert(current) {
+            continue;
+        }
+
+        let d = *distances.entry(current).or_insert(1);
+        if d > factor {
+            factor *= 2;
+        }
+        if d == factor {
+            sample.insert(current);
+            if let Some(sz) = quicksamplesize {
+                if sample.len() >= sz {
+                    return Ok(());
+                }
+            }
+        }
+        for p in parentsfn(current)? {
+            if let Some(revs) = revs {
+                if !revs.contains(&p) {
+                    continue;
+                }
+            }
+            distances.entry(p).or_insert(d + 1);
+            visit.push_back(p);
+        }
+    }
+    Ok(())
+}
+
+struct ParentsIterator {
+    parents: [Revision; 2],
+    cur: usize,
+}
+
+impl ParentsIterator {
+    fn graph_parents(
+        graph: &impl Graph,
+        r: Revision,
+    ) -> Result<ParentsIterator, GraphError> {
+        Ok(ParentsIterator {
+            parents: graph.parents(r)?,
+            cur: 0,
+        })
+    }
+}
+
+impl Iterator for ParentsIterator {
+    type Item = Revision;
+
+    fn next(&mut self) -> Option<Revision> {
+        if self.cur > 1 {
+            return None;
+        }
+        let rev = self.parents[self.cur];
+        self.cur += 1;
+        if rev == NULL_REVISION {
+            return self.next();
+        }
+        Some(rev)
+    }
+}
+
 impl<G: Graph + Clone> PartialDiscovery<G> {
     /// Create a PartialDiscovery object, with the intent
     /// of comparing our `::<target_heads>` revset to the contents of another
@@ -38,22 +139,89 @@
     /// If we want to make the signature more flexible,
     /// we'll have to make it a type argument of `PartialDiscovery` or a trait
     /// object since we'll keep it in the meanwhile
-    pub fn new(graph: G, target_heads: Vec<Revision>) -> Self {
+    ///
+    /// The `respect_size` boolean controls how the sampling methods
+    /// will interpret the size argument requested by the caller. If it's
+    /// `false`, they are allowed to produce a sample whose size is more
+    /// appropriate to the situation (typically bigger).
+    ///
+    /// The `randomize` boolean affects sampling, and specifically how
+    /// limiting or last-minute expanding is been done:
+    ///
+    /// If `true`, both will perform random picking from `self.undecided`.
+    /// This is currently the best for actual discoveries.
+    ///
+    /// If `false`, a reproductible picking strategy is performed. This is
+    /// useful for integration tests.
+    pub fn new(
+        graph: G,
+        target_heads: Vec<Revision>,
+        respect_size: bool,
+        randomize: bool,
+    ) -> Self {
+        let mut seed: [u8; 16] = [0; 16];
+        if randomize {
+            thread_rng().fill_bytes(&mut seed);
+        }
+        Self::new_with_seed(graph, target_heads, seed, respect_size, randomize)
+    }
+
+    pub fn new_with_seed(
+        graph: G,
+        target_heads: Vec<Revision>,
+        seed: [u8; 16],
+        respect_size: bool,
+        randomize: bool,
+    ) -> Self {
         PartialDiscovery {
             undecided: None,
+            children_cache: None,
             target_heads: Some(target_heads),
             graph: graph.clone(),
             common: MissingAncestors::new(graph, vec![]),
             missing: HashSet::new(),
+            rng: Rng::from_seed(seed),
+            respect_size: respect_size,
+            randomize: randomize,
         }
     }
 
+    /// Extract at most `size` random elements from sample and return them
+    /// as a vector
+    fn limit_sample(
+        &mut self,
+        mut sample: Vec<Revision>,
+        size: usize,
+    ) -> Vec<Revision> {
+        if !self.randomize {
+            sample.sort();
+            sample.truncate(size);
+            return sample;
+        }
+        let sample_len = sample.len();
+        if sample_len <= size {
+            return sample;
+        }
+        let rng = &mut self.rng;
+        let dropped_size = sample_len - size;
+        let limited_slice = if size < dropped_size {
+            sample.partial_shuffle(rng, size).0
+        } else {
+            sample.partial_shuffle(rng, dropped_size).1
+        };
+        limited_slice.to_owned()
+    }
+
     /// Register revisions known as being common
     pub fn add_common_revisions(
         &mut self,
         common: impl IntoIterator<Item = Revision>,
     ) -> Result<(), GraphError> {
+        let before_len = self.common.get_bases().len();
         self.common.add_bases(common);
+        if self.common.get_bases().len() == before_len {
+            return Ok(());
+        }
         if let Some(ref mut undecided) = self.undecided {
             self.common.remove_ancestors_from(undecided)?;
         }
@@ -61,20 +229,50 @@
     }
 
     /// Register revisions known as being missing
+    ///
+    /// # Performance note
+    ///
+    /// Except in the most trivial case, the first call of this method has
+    /// the side effect of computing `self.undecided` set for the first time,
+    /// and the related caches it might need for efficiency of its internal
+    /// computation. This is typically faster if more information is
+    /// available in `self.common`. Therefore, for good performance, the
+    /// caller should avoid calling this too early.
     pub fn add_missing_revisions(
         &mut self,
         missing: impl IntoIterator<Item = Revision>,
     ) -> Result<(), GraphError> {
-        self.ensure_undecided()?;
-        let range = dagops::range(
-            &self.graph,
-            missing,
-            self.undecided.as_ref().unwrap().iter().cloned(),
-        )?;
+        let mut tovisit: VecDeque<Revision> = missing.into_iter().collect();
+        if tovisit.is_empty() {
+            return Ok(());
+        }
+        self.ensure_children_cache()?;
+        self.ensure_undecided()?; // for safety of possible future refactors
+        let children = self.children_cache.as_ref().unwrap();
+        let mut seen: HashSet<Revision> = HashSet::new();
         let undecided_mut = self.undecided.as_mut().unwrap();
-        for missrev in range {
-            self.missing.insert(missrev);
-            undecided_mut.remove(&missrev);
+        while let Some(rev) = tovisit.pop_front() {
+            if !self.missing.insert(rev) {
+                // either it's known to be missing from a previous
+                // invocation, and there's no need to iterate on its
+                // children (we now they are all missing)
+                // or it's from a previous iteration of this loop
+                // and its children have already been queued
+                continue;
+            }
+            undecided_mut.remove(&rev);
+            match children.get(&rev) {
+                None => {
+                    continue;
+                }
+                Some(this_children) => {
+                    for child in this_children.iter().cloned() {
+                        if seen.insert(child) {
+                            tovisit.push_back(child);
+                        }
+                    }
+                }
+            }
         }
         Ok(())
     }
@@ -124,12 +322,157 @@
         Ok(())
     }
 
+    fn ensure_children_cache(&mut self) -> Result<(), GraphError> {
+        if self.children_cache.is_some() {
+            return Ok(());
+        }
+        self.ensure_undecided()?;
+
+        let mut children: HashMap<Revision, Vec<Revision>> = HashMap::new();
+        for &rev in self.undecided.as_ref().unwrap() {
+            for p in ParentsIterator::graph_parents(&self.graph, rev)? {
+                children.entry(p).or_insert_with(|| Vec::new()).push(rev);
+            }
+        }
+        self.children_cache = Some(children);
+        Ok(())
+    }
+
     /// Provide statistics about the current state of the discovery process
     pub fn stats(&self) -> DiscoveryStats {
         DiscoveryStats {
             undecided: self.undecided.as_ref().map(|s| s.len()),
         }
     }
+
+    pub fn take_quick_sample(
+        &mut self,
+        headrevs: impl IntoIterator<Item = Revision>,
+        size: usize,
+    ) -> Result<Vec<Revision>, GraphError> {
+        self.ensure_undecided()?;
+        let mut sample = {
+            let undecided = self.undecided.as_ref().unwrap();
+            if undecided.len() <= size {
+                return Ok(undecided.iter().cloned().collect());
+            }
+            dagops::heads(&self.graph, undecided.iter())?
+        };
+        if sample.len() >= size {
+            return Ok(self.limit_sample(sample.into_iter().collect(), size));
+        }
+        update_sample(
+            None,
+            headrevs,
+            &mut sample,
+            |r| ParentsIterator::graph_parents(&self.graph, r),
+            Some(size),
+        )?;
+        Ok(sample.into_iter().collect())
+    }
+
+    /// Extract a sample from `self.undecided`, going from its heads and roots.
+    ///
+    /// The `size` parameter is used to avoid useless computations if
+    /// it turns out to be bigger than the whole set of undecided Revisions.
+    ///
+    /// The sample is taken by using `update_sample` from the heads, then
+    /// from the roots, working on the reverse DAG,
+    /// expressed by `self.children_cache`.
+    ///
+    /// No effort is being made to complete or limit the sample to `size`
+    /// but this method returns another interesting size that it derives
+    /// from its knowledge of the structure of the various sets, leaving
+    /// to the caller the decision to use it or not.
+    fn bidirectional_sample(
+        &mut self,
+        size: usize,
+    ) -> Result<(HashSet<Revision>, usize), GraphError> {
+        self.ensure_undecided()?;
+        {
+            // we don't want to compute children_cache before this
+            // but doing it after extracting self.undecided takes a mutable
+            // ref to self while a shareable one is still active.
+            let undecided = self.undecided.as_ref().unwrap();
+            if undecided.len() <= size {
+                return Ok((undecided.clone(), size));
+            }
+        }
+
+        self.ensure_children_cache()?;
+        let revs = self.undecided.as_ref().unwrap();
+        let mut sample: HashSet<Revision> = revs.clone();
+
+        // it's possible that leveraging the children cache would be more
+        // efficient here
+        dagops::retain_heads(&self.graph, &mut sample)?;
+        let revsheads = sample.clone(); // was again heads(revs) in python
+
+        // update from heads
+        update_sample(
+            Some(revs),
+            revsheads.iter().cloned(),
+            &mut sample,
+            |r| ParentsIterator::graph_parents(&self.graph, r),
+            None,
+        )?;
+
+        // update from roots
+        let revroots: HashSet<Revision> =
+            dagops::roots(&self.graph, revs)?.into_iter().collect();
+        let prescribed_size = max(size, min(revroots.len(), revsheads.len()));
+
+        let children = self.children_cache.as_ref().unwrap();
+        let empty_vec: Vec<Revision> = Vec::new();
+        update_sample(
+            Some(revs),
+            revroots,
+            &mut sample,
+            |r| Ok(children.get(&r).unwrap_or(&empty_vec).iter().cloned()),
+            None,
+        )?;
+        Ok((sample, prescribed_size))
+    }
+
+    /// Fill up sample up to the wished size with random undecided Revisions.
+    ///
+    /// This is intended to be used as a last resort completion if the
+    /// regular sampling algorithm returns too few elements.
+    fn random_complete_sample(
+        &mut self,
+        sample: &mut Vec<Revision>,
+        size: usize,
+    ) {
+        let sample_len = sample.len();
+        if size <= sample_len {
+            return;
+        }
+        let take_from: Vec<Revision> = self
+            .undecided
+            .as_ref()
+            .unwrap()
+            .iter()
+            .filter(|&r| !sample.contains(r))
+            .cloned()
+            .collect();
+        sample.extend(self.limit_sample(take_from, size - sample_len));
+    }
+
+    pub fn take_full_sample(
+        &mut self,
+        size: usize,
+    ) -> Result<Vec<Revision>, GraphError> {
+        let (sample_set, prescribed_size) = self.bidirectional_sample(size)?;
+        let size = if self.respect_size {
+            size
+        } else {
+            prescribed_size
+        };
+        let mut sample =
+            self.limit_sample(sample_set.into_iter().collect(), size);
+        self.random_complete_sample(&mut sample, size);
+        Ok(sample)
+    }
 }
 
 #[cfg(test)]
@@ -138,8 +481,30 @@
     use crate::testing::SampleGraph;
 
     /// A PartialDiscovery as for pushing all the heads of `SampleGraph`
+    ///
+    /// To avoid actual randomness in these tests, we give it a fixed
+    /// random seed, but by default we'll test the random version.
     fn full_disco() -> PartialDiscovery<SampleGraph> {
-        PartialDiscovery::new(SampleGraph, vec![10, 11, 12, 13])
+        PartialDiscovery::new_with_seed(
+            SampleGraph,
+            vec![10, 11, 12, 13],
+            [0; 16],
+            true,
+            true,
+        )
+    }
+
+    /// A PartialDiscovery as for pushing the 12 head of `SampleGraph`
+    ///
+    /// To avoid actual randomness in tests, we give it a fixed random seed.
+    fn disco12() -> PartialDiscovery<SampleGraph> {
+        PartialDiscovery::new_with_seed(
+            SampleGraph,
+            vec![12],
+            [0; 16],
+            true,
+            true,
+        )
     }
 
     fn sorted_undecided(
@@ -206,4 +571,124 @@
         assert_eq!(sorted_common_heads(&disco)?, vec![5, 11, 12]);
         Ok(())
     }
+
+    #[test]
+    fn test_add_missing_early_continue() -> Result<(), GraphError> {
+        eprintln!("test_add_missing_early_stop");
+        let mut disco = full_disco();
+        disco.add_common_revisions(vec![13, 3, 4])?;
+        disco.ensure_children_cache()?;
+        // 12 is grand-child of 6 through 9
+        // passing them in this order maximizes the chances of the
+        // early continue to do the wrong thing
+        disco.add_missing_revisions(vec![6, 9, 12])?;
+        assert_eq!(sorted_undecided(&disco), vec![5, 7, 10, 11]);
+        assert_eq!(sorted_missing(&disco), vec![6, 9, 12]);
+        assert!(!disco.is_complete());
+        Ok(())
+    }
+
+    #[test]
+    fn test_limit_sample_no_need_to() {
+        let sample = vec![1, 2, 3, 4];
+        assert_eq!(full_disco().limit_sample(sample, 10), vec![1, 2, 3, 4]);
+    }
+
+    #[test]
+    fn test_limit_sample_less_than_half() {
+        assert_eq!(full_disco().limit_sample((1..6).collect(), 2), vec![4, 2]);
+    }
+
+    #[test]
+    fn test_limit_sample_more_than_half() {
+        assert_eq!(full_disco().limit_sample((1..4).collect(), 2), vec![3, 2]);
+    }
+
+    #[test]
+    fn test_limit_sample_no_random() {
+        let mut disco = full_disco();
+        disco.randomize = false;
+        assert_eq!(
+            disco.limit_sample(vec![1, 8, 13, 5, 7, 3], 4),
+            vec![1, 3, 5, 7]
+        );
+    }
+
+    #[test]
+    fn test_quick_sample_enough_undecided_heads() -> Result<(), GraphError> {
+        let mut disco = full_disco();
+        disco.undecided = Some((1..=13).collect());
+
+        let mut sample_vec = disco.take_quick_sample(vec![], 4)?;
+        sample_vec.sort();
+        assert_eq!(sample_vec, vec![10, 11, 12, 13]);
+        Ok(())
+    }
+
+    #[test]
+    fn test_quick_sample_climbing_from_12() -> Result<(), GraphError> {
+        let mut disco = disco12();
+        disco.ensure_undecided()?;
+
+        let mut sample_vec = disco.take_quick_sample(vec![12], 4)?;
+        sample_vec.sort();
+        // r12's only parent is r9, whose unique grand-parent through the
+        // diamond shape is r4. This ends there because the distance from r4
+        // to the root is only 3.
+        assert_eq!(sample_vec, vec![4, 9, 12]);
+        Ok(())
+    }
+
+    #[test]
+    fn test_children_cache() -> Result<(), GraphError> {
+        let mut disco = full_disco();
+        disco.ensure_children_cache()?;
+
+        let cache = disco.children_cache.unwrap();
+        assert_eq!(cache.get(&2).cloned(), Some(vec![4]));
+        assert_eq!(cache.get(&10).cloned(), None);
+
+        let mut children_4 = cache.get(&4).cloned().unwrap();
+        children_4.sort();
+        assert_eq!(children_4, vec![5, 6, 7]);
+
+        let mut children_7 = cache.get(&7).cloned().unwrap();
+        children_7.sort();
+        assert_eq!(children_7, vec![9, 11]);
+
+        Ok(())
+    }
+
+    #[test]
+    fn test_complete_sample() {
+        let mut disco = full_disco();
+        let undecided: HashSet<Revision> =
+            [4, 7, 9, 2, 3].iter().cloned().collect();
+        disco.undecided = Some(undecided);
+
+        let mut sample = vec![0];
+        disco.random_complete_sample(&mut sample, 3);
+        assert_eq!(sample.len(), 3);
+
+        let mut sample = vec![2, 4, 7];
+        disco.random_complete_sample(&mut sample, 1);
+        assert_eq!(sample.len(), 3);
+    }
+
+    #[test]
+    fn test_bidirectional_sample() -> Result<(), GraphError> {
+        let mut disco = full_disco();
+        disco.undecided = Some((0..=13).into_iter().collect());
+
+        let (sample_set, size) = disco.bidirectional_sample(7)?;
+        assert_eq!(size, 7);
+        let mut sample: Vec<Revision> = sample_set.into_iter().collect();
+        sample.sort();
+        // our DAG is a bit too small for the results to be really interesting
+        // at least it shows that
+        // - we went both ways
+        // - we didn't take all Revisions (6 is not in the sample)
+        assert_eq!(sample, vec![0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13]);
+        Ok(())
+    }
 }
--- a/rust/hg-core/src/filepatterns.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-core/src/filepatterns.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -1,12 +1,19 @@
-use crate::{
-    utils::{files::get_path_from_bytes, SliceExt},
-    LineNumber, PatternError, PatternFileError,
-};
+// filepatterns.rs
+//
+// Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Handling of Mercurial-specific patterns.
+
+use crate::{utils::SliceExt, LineNumber, PatternError, PatternFileError};
 use lazy_static::lazy_static;
 use regex::bytes::{NoExpand, Regex};
 use std::collections::HashMap;
 use std::fs::File;
 use std::io::Read;
+use std::path::{Path, PathBuf};
 use std::vec::Vec;
 
 lazy_static! {
@@ -29,7 +36,8 @@
     Regexp,
     /// Glob that matches at the front of the path
     RootGlob,
-    /// Glob that matches at any suffix of the path (still anchored at slashes)
+    /// Glob that matches at any suffix of the path (still anchored at
+    /// slashes)
     Glob,
     Path,
     RelPath,
@@ -50,8 +58,8 @@
         match c {
             b'*' => {
                 for (source, repl) in GLOB_REPLACEMENTS {
-                    if input.starts_with(source) {
-                        input = &input[source.len()..];
+                    if let Some(rest) = input.drop_prefix(source) {
+                        input = rest;
                         res.extend(*repl);
                         break;
                     }
@@ -149,43 +157,36 @@
             if pattern[0] == b'^' {
                 return pattern.to_owned();
             }
-            let mut res = b".*".to_vec();
-            res.extend(pattern);
-            res
+            [b".*", pattern].concat()
         }
         PatternSyntax::Path | PatternSyntax::RelPath => {
             if pattern == b"." {
                 return vec![];
             }
-            let mut pattern = escape_pattern(pattern);
-            pattern.extend(b"(?:/|$)");
-            pattern
+            [escape_pattern(pattern).as_slice(), b"(?:/|$)"].concat()
         }
         PatternSyntax::RootFiles => {
             let mut res = if pattern == b"." {
                 vec![]
             } else {
                 // Pattern is a directory name.
-                let mut as_vec: Vec<u8> = escape_pattern(pattern);
-                as_vec.push(b'/');
-                as_vec
+                [escape_pattern(pattern).as_slice(), b"/"].concat()
             };
 
             // Anything after the pattern must be a non-directory.
             res.extend(b"[^/]+$");
             res
         }
-        PatternSyntax::Glob
-        | PatternSyntax::RelGlob
-        | PatternSyntax::RootGlob => {
-            let mut res: Vec<u8> = vec![];
-            if syntax == PatternSyntax::RelGlob {
-                res.extend(b"(?:|.*/)");
+        PatternSyntax::RelGlob => {
+            let glob_re = glob_to_re(pattern);
+            if let Some(rest) = glob_re.drop_prefix(b"[^/]*") {
+                [b".*", rest, globsuffix].concat()
+            } else {
+                [b"(?:|.*/)", glob_re.as_slice(), globsuffix].concat()
             }
-
-            res.extend(glob_to_re(pattern));
-            res.extend(globsuffix.iter());
-            res
+        }
+        PatternSyntax::Glob | PatternSyntax::RootGlob => {
+            [glob_to_re(pattern).as_slice(), globsuffix].concat()
         }
     }
 }
@@ -227,11 +228,11 @@
 }
 
 pub type PatternTuple = (Vec<u8>, LineNumber, Vec<u8>);
-type WarningTuple = (Vec<u8>, Vec<u8>);
+type WarningTuple = (PathBuf, Vec<u8>);
 
-pub fn parse_pattern_file_contents(
+pub fn parse_pattern_file_contents<P: AsRef<Path>>(
     lines: &[u8],
-    file_path: &[u8],
+    file_path: P,
     warn: bool,
 ) -> (Vec<PatternTuple>, Vec<WarningTuple>) {
     let comment_regex = Regex::new(r"((?:^|[^\\])(?:\\\\)*)#.*").unwrap();
@@ -259,13 +260,14 @@
             continue;
         }
 
-        if line.starts_with(b"syntax:") {
-            let syntax = line[b"syntax:".len()..].trim();
+        if let Some(syntax) = line.drop_prefix(b"syntax:") {
+            let syntax = syntax.trim();
 
             if let Some(rel_syntax) = SYNTAXES.get(syntax) {
                 current_syntax = rel_syntax;
             } else if warn {
-                warnings.push((file_path.to_owned(), syntax.to_owned()));
+                warnings
+                    .push((file_path.as_ref().to_owned(), syntax.to_owned()));
             }
             continue;
         }
@@ -273,13 +275,14 @@
         let mut line_syntax: &[u8] = &current_syntax;
 
         for (s, rels) in SYNTAXES.iter() {
-            if line.starts_with(rels) {
+            if let Some(rest) = line.drop_prefix(rels) {
                 line_syntax = rels;
-                line = &line[rels.len()..];
+                line = rest;
                 break;
-            } else if line.starts_with(&[s, b":".as_ref()].concat()) {
+            }
+            if let Some(rest) = line.drop_prefix(&[s, &b":"[..]].concat()) {
                 line_syntax = rels;
-                line = &line[s.len() + 1..];
+                line = rest;
                 break;
             }
         }
@@ -293,11 +296,11 @@
     (inputs, warnings)
 }
 
-pub fn read_pattern_file(
-    file_path: &[u8],
+pub fn read_pattern_file<P: AsRef<Path>>(
+    file_path: P,
     warn: bool,
 ) -> Result<(Vec<PatternTuple>, Vec<WarningTuple>), PatternFileError> {
-    let mut f = File::open(get_path_from_bytes(file_path))?;
+    let mut f = File::open(file_path.as_ref())?;
     let mut contents = Vec::new();
 
     f.read_to_end(&mut contents)?;
@@ -339,18 +342,21 @@
 
         assert_eq!(
             vec![(b"relglob:*.elc".to_vec(), 2, b"*.elc".to_vec())],
-            parse_pattern_file_contents(lines, b"file_path", false).0,
+            parse_pattern_file_contents(lines, Path::new("file_path"), false)
+                .0,
         );
 
         let lines = b"syntax: include\nsyntax: glob";
 
         assert_eq!(
-            parse_pattern_file_contents(lines, b"file_path", false).0,
+            parse_pattern_file_contents(lines, Path::new("file_path"), false)
+                .0,
             vec![]
         );
         let lines = b"glob:**.o";
         assert_eq!(
-            parse_pattern_file_contents(lines, b"file_path", false).0,
+            parse_pattern_file_contents(lines, Path::new("file_path"), false)
+                .0,
             vec![(b"relglob:**.o".to_vec(), 1, b"**.o".to_vec())]
         );
     }
--- a/rust/hg-core/src/lib.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-core/src/lib.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -9,14 +9,17 @@
 pub mod discovery;
 pub mod testing; // unconditionally built, for use from integration tests
 pub use dirstate::{
-    dirs_multiset::DirsMultiset,
-    parsers::{pack_dirstate, parse_dirstate},
-    CopyVec, CopyVecEntry, DirsIterable, DirstateEntry, DirstateParents,
-    DirstateVec,
+    dirs_multiset::{DirsMultiset, DirsMultisetIter},
+    dirstate_map::DirstateMap,
+    parsers::{pack_dirstate, parse_dirstate, PARENT_SIZE},
+    status::status,
+    CopyMap, CopyMapIter, DirstateEntry, DirstateParents, EntryState,
+    StateMap, StateMapIter,
 };
 mod filepatterns;
 pub mod utils;
 
+use crate::utils::hg_path::HgPathBuf;
 pub use filepatterns::{
     build_single_regex, read_pattern_file, PatternSyntax, PatternTuple,
 };
@@ -60,6 +63,25 @@
     TooLittleData,
     Overflow,
     CorruptedEntry(String),
+    Damaged,
+}
+
+impl From<std::io::Error> for DirstateParseError {
+    fn from(e: std::io::Error) -> Self {
+        DirstateParseError::CorruptedEntry(e.to_string())
+    }
+}
+
+impl ToString for DirstateParseError {
+    fn to_string(&self) -> String {
+        use crate::DirstateParseError::*;
+        match self {
+            TooLittleData => "Too little data for dirstate.".to_string(),
+            Overflow => "Overflow in dirstate.".to_string(),
+            CorruptedEntry(e) => format!("Corrupted entry: {:?}.", e),
+            Damaged => "Dirstate appears to be damaged.".to_string(),
+        }
+    }
 }
 
 #[derive(Debug, PartialEq)]
@@ -69,21 +91,33 @@
     BadSize(usize, usize),
 }
 
-#[derive(Debug, PartialEq)]
-pub enum DirstateMapError {
-    PathNotFound(Vec<u8>),
-    EmptyPath,
-}
-
 impl From<std::io::Error> for DirstatePackError {
     fn from(e: std::io::Error) -> Self {
         DirstatePackError::CorruptedEntry(e.to_string())
     }
 }
+#[derive(Debug, PartialEq)]
+pub enum DirstateMapError {
+    PathNotFound(HgPathBuf),
+    EmptyPath,
+}
 
-impl From<std::io::Error> for DirstateParseError {
-    fn from(e: std::io::Error) -> Self {
-        DirstateParseError::CorruptedEntry(e.to_string())
+pub enum DirstateError {
+    Parse(DirstateParseError),
+    Pack(DirstatePackError),
+    Map(DirstateMapError),
+    IO(std::io::Error),
+}
+
+impl From<DirstateParseError> for DirstateError {
+    fn from(e: DirstateParseError) -> Self {
+        DirstateError::Parse(e)
+    }
+}
+
+impl From<DirstatePackError> for DirstateError {
+    fn from(e: DirstatePackError) -> Self {
+        DirstateError::Pack(e)
     }
 }
 
@@ -103,3 +137,15 @@
         PatternFileError::IO(e)
     }
 }
+
+impl From<DirstateMapError> for DirstateError {
+    fn from(e: DirstateMapError) -> Self {
+        DirstateError::Map(e)
+    }
+}
+
+impl From<std::io::Error> for DirstateError {
+    fn from(e: std::io::Error) -> Self {
+        DirstateError::IO(e)
+    }
+}
--- a/rust/hg-core/src/utils.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-core/src/utils.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -1,4 +1,14 @@
+// utils module
+//
+// Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Contains useful functions, traits, structs, etc. for use in core.
+
 pub mod files;
+pub mod hg_path;
 
 /// Replaces the `from` slice with the `to` slice inside the `buf` slice.
 ///
@@ -11,8 +21,7 @@
 /// assert_eq!(
 ///     line,
 ///     b"I love writing tests!".to_vec()
-///);
-///
+/// );
 /// ```
 pub fn replace_slice<T>(buf: &mut [T], from: &[T], to: &[T])
 where
@@ -32,6 +41,7 @@
     fn trim_end(&self) -> &Self;
     fn trim_start(&self) -> &Self;
     fn trim(&self) -> &Self;
+    fn drop_prefix(&self, needle: &Self) -> Option<&Self>;
 }
 
 fn is_not_whitespace(c: &u8) -> bool {
@@ -72,4 +82,12 @@
     fn trim(&self) -> &[u8] {
         self.trim_start().trim_end()
     }
+
+    fn drop_prefix(&self, needle: &Self) -> Option<&Self> {
+        if self.starts_with(needle) {
+            Some(&self[needle.len()..])
+        } else {
+            None
+        }
+    }
 }
--- a/rust/hg-core/src/utils/files.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-core/src/utils/files.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -1,4 +1,18 @@
+// files.rs
+//
+// Copyright 2019
+// Raphaël Gomès <rgomes@octobus.net>,
+// Yuya Nishihara <yuya@tcha.org>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Functions for fiddling with files.
+
+use crate::utils::hg_path::{HgPath, HgPathBuf};
 use std::iter::FusedIterator;
+
+use std::fs::Metadata;
 use std::path::Path;
 
 pub fn get_path_from_bytes(bytes: &[u8]) -> &Path {
@@ -8,13 +22,9 @@
         use std::os::unix::ffi::OsStrExt;
         os_str = std::ffi::OsStr::from_bytes(bytes);
     }
-    #[cfg(windows)]
-    {
-        // TODO: convert from Windows MBCS (ANSI encoding) to WTF8.
-        // Perhaps, the return type would have to be Result<PathBuf>.
-        use std::os::windows::ffi::OsStrExt;
-        os_str = std::ffi::OsString::from_wide(bytes);
-    }
+    // TODO Handle other platforms
+    // TODO: convert from WTF8 to Windows MBCS (ANSI encoding).
+    // Perhaps, the return type would have to be Result<PathBuf>.
 
     Path::new(os_str)
 }
@@ -22,20 +32,19 @@
 /// An iterator over repository path yielding itself and its ancestors.
 #[derive(Copy, Clone, Debug)]
 pub struct Ancestors<'a> {
-    next: Option<&'a [u8]>,
+    next: Option<&'a HgPath>,
 }
 
 impl<'a> Iterator for Ancestors<'a> {
-    // if we had an HgPath type, this would yield &'a HgPath
-    type Item = &'a [u8];
+    type Item = &'a HgPath;
 
     fn next(&mut self) -> Option<Self::Item> {
         let next = self.next;
         self.next = match self.next {
             Some(s) if s.is_empty() => None,
             Some(s) => {
-                let p = s.iter().rposition(|&c| c == b'/').unwrap_or(0);
-                Some(&s[..p])
+                let p = s.bytes().rposition(|c| *c == b'/').unwrap_or(0);
+                Some(HgPath::new(&s.as_bytes()[..p]))
             }
             None => None,
         };
@@ -52,7 +61,7 @@
 ///
 /// The path itself isn't included unless it is b"" (meaning the root
 /// directory.)
-pub fn find_dirs<'a>(path: &'a [u8]) -> Ancestors<'a> {
+pub fn find_dirs<'a>(path: &'a HgPath) -> Ancestors<'a> {
     let mut dirs = Ancestors { next: Some(path) };
     if !path.is_empty() {
         dirs.next(); // skip itself
@@ -60,14 +69,50 @@
     dirs
 }
 
+/// TODO more than ASCII?
+pub fn normalize_case(path: &HgPath) -> HgPathBuf {
+    #[cfg(windows)] // NTFS compares via upper()
+    return path.to_ascii_uppercase();
+    #[cfg(unix)]
+    path.to_ascii_lowercase()
+}
+
+#[derive(Eq, PartialEq, Ord, PartialOrd, Copy, Clone)]
+pub struct HgMetadata {
+    pub st_dev: u64,
+    pub st_mode: u32,
+    pub st_nlink: u64,
+    pub st_size: u64,
+    pub st_mtime: i64,
+    pub st_ctime: i64,
+}
+
+// TODO support other plaforms
+#[cfg(unix)]
+impl HgMetadata {
+    pub fn from_metadata(metadata: Metadata) -> Self {
+        use std::os::unix::fs::MetadataExt;
+        Self {
+            st_dev: metadata.dev(),
+            st_mode: metadata.mode(),
+            st_nlink: metadata.nlink(),
+            st_size: metadata.size(),
+            st_mtime: metadata.mtime(),
+            st_ctime: metadata.ctime(),
+        }
+    }
+}
+
 #[cfg(test)]
 mod tests {
+    use super::*;
+
     #[test]
     fn find_dirs_some() {
-        let mut dirs = super::find_dirs(b"foo/bar/baz");
-        assert_eq!(dirs.next(), Some(b"foo/bar".as_ref()));
-        assert_eq!(dirs.next(), Some(b"foo".as_ref()));
-        assert_eq!(dirs.next(), Some(b"".as_ref()));
+        let mut dirs = super::find_dirs(HgPath::new(b"foo/bar/baz"));
+        assert_eq!(dirs.next(), Some(HgPath::new(b"foo/bar")));
+        assert_eq!(dirs.next(), Some(HgPath::new(b"foo")));
+        assert_eq!(dirs.next(), Some(HgPath::new(b"")));
         assert_eq!(dirs.next(), None);
         assert_eq!(dirs.next(), None);
     }
@@ -75,8 +120,8 @@
     #[test]
     fn find_dirs_empty() {
         // looks weird, but mercurial.util.finddirs(b"") yields b""
-        let mut dirs = super::find_dirs(b"");
-        assert_eq!(dirs.next(), Some(b"".as_ref()));
+        let mut dirs = super::find_dirs(HgPath::new(b""));
+        assert_eq!(dirs.next(), Some(HgPath::new(b"")));
         assert_eq!(dirs.next(), None);
         assert_eq!(dirs.next(), None);
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/utils/hg_path.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,402 @@
+// hg_path.rs
+//
+// Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use std::borrow::Borrow;
+use std::ffi::{OsStr, OsString};
+use std::ops::Deref;
+use std::path::{Path, PathBuf};
+
+#[derive(Debug, Eq, PartialEq)]
+pub enum HgPathError {
+    /// Bytes from the invalid `HgPath`
+    LeadingSlash(Vec<u8>),
+    /// Bytes and index of the second slash
+    ConsecutiveSlashes(Vec<u8>, usize),
+    /// Bytes and index of the null byte
+    ContainsNullByte(Vec<u8>, usize),
+    /// Bytes
+    DecodeError(Vec<u8>),
+}
+
+impl ToString for HgPathError {
+    fn to_string(&self) -> String {
+        match self {
+            HgPathError::LeadingSlash(bytes) => {
+                format!("Invalid HgPath '{:?}': has a leading slash.", bytes)
+            }
+            HgPathError::ConsecutiveSlashes(bytes, pos) => format!(
+                "Invalid HgPath '{:?}': consecutive slahes at pos {}.",
+                bytes, pos
+            ),
+            HgPathError::ContainsNullByte(bytes, pos) => format!(
+                "Invalid HgPath '{:?}': contains null byte at pos {}.",
+                bytes, pos
+            ),
+            HgPathError::DecodeError(bytes) => {
+                format!("Invalid HgPath '{:?}': could not be decoded.", bytes)
+            }
+        }
+    }
+}
+
+impl From<HgPathError> for std::io::Error {
+    fn from(e: HgPathError) -> Self {
+        std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string())
+    }
+}
+
+/// This is a repository-relative path (or canonical path):
+///     - no null characters
+///     - `/` separates directories
+///     - no consecutive slashes
+///     - no leading slash,
+///     - no `.` nor `..` of special meaning
+///     - stored in repository and shared across platforms
+///
+/// Note: there is no guarantee of any `HgPath` being well-formed at any point
+/// in its lifetime for performance reasons and to ease ergonomics. It is
+/// however checked using the `check_state` method before any file-system
+/// operation.
+///
+/// This allows us to be encoding-transparent as much as possible, until really
+/// needed; `HgPath` can be transformed into a platform-specific path (`OsStr`
+/// or `Path`) whenever more complex operations are needed:
+/// On Unix, it's just byte-to-byte conversion. On Windows, it has to be
+/// decoded from MBCS to WTF-8. If WindowsUTF8Plan is implemented, the source
+/// character encoding will be determined on a per-repository basis.
+//
+// FIXME: (adapted from a comment in the stdlib)
+// `HgPath::new()` current implementation relies on `Slice` being
+// layout-compatible with `[u8]`.
+// When attribute privacy is implemented, `Slice` should be annotated as
+// `#[repr(transparent)]`.
+// Anyway, `Slice` representation and layout are considered implementation
+// detail, are not documented and must not be relied upon.
+#[derive(Eq, Ord, PartialEq, PartialOrd, Debug, Hash)]
+pub struct HgPath {
+    inner: [u8],
+}
+
+impl HgPath {
+    pub fn new<S: AsRef<[u8]> + ?Sized>(s: &S) -> &Self {
+        unsafe { &*(s.as_ref() as *const [u8] as *const Self) }
+    }
+    pub fn is_empty(&self) -> bool {
+        self.inner.is_empty()
+    }
+    pub fn len(&self) -> usize {
+        self.inner.len()
+    }
+    fn to_hg_path_buf(&self) -> HgPathBuf {
+        HgPathBuf {
+            inner: self.inner.to_owned(),
+        }
+    }
+    pub fn bytes(&self) -> std::slice::Iter<u8> {
+        self.inner.iter()
+    }
+    pub fn to_ascii_uppercase(&self) -> HgPathBuf {
+        HgPathBuf::from(self.inner.to_ascii_uppercase())
+    }
+    pub fn to_ascii_lowercase(&self) -> HgPathBuf {
+        HgPathBuf::from(self.inner.to_ascii_lowercase())
+    }
+    pub fn as_bytes(&self) -> &[u8] {
+        &self.inner
+    }
+    pub fn contains(&self, other: u8) -> bool {
+        self.inner.contains(&other)
+    }
+    pub fn join<T: ?Sized + AsRef<HgPath>>(&self, other: &T) -> HgPathBuf {
+        let mut inner = self.inner.to_owned();
+        if inner.len() != 0 && inner.last() != Some(&b'/') {
+            inner.push(b'/');
+        }
+        inner.extend(other.as_ref().bytes());
+        HgPathBuf::from_bytes(&inner)
+    }
+    /// Checks for errors in the path, short-circuiting at the first one.
+    /// This generates fine-grained errors useful for debugging.
+    /// To simply check if the path is valid during tests, use `is_valid`.
+    pub fn check_state(&self) -> Result<(), HgPathError> {
+        if self.len() == 0 {
+            return Ok(());
+        }
+        let bytes = self.as_bytes();
+        let mut previous_byte = None;
+
+        if bytes[0] == b'/' {
+            return Err(HgPathError::LeadingSlash(bytes.to_vec()));
+        }
+        for (index, byte) in bytes.iter().enumerate() {
+            match byte {
+                0 => {
+                    return Err(HgPathError::ContainsNullByte(
+                        bytes.to_vec(),
+                        index,
+                    ))
+                }
+                b'/' => {
+                    if previous_byte.is_some() && previous_byte == Some(b'/') {
+                        return Err(HgPathError::ConsecutiveSlashes(
+                            bytes.to_vec(),
+                            index,
+                        ));
+                    }
+                }
+                _ => (),
+            };
+            previous_byte = Some(*byte);
+        }
+        Ok(())
+    }
+
+    #[cfg(test)]
+    /// Only usable during tests to force developers to handle invalid states
+    fn is_valid(&self) -> bool {
+        self.check_state().is_ok()
+    }
+}
+
+#[derive(Eq, Ord, Clone, PartialEq, PartialOrd, Debug, Hash)]
+pub struct HgPathBuf {
+    inner: Vec<u8>,
+}
+
+impl HgPathBuf {
+    pub fn new() -> Self {
+        Self { inner: Vec::new() }
+    }
+    pub fn push(&mut self, byte: u8) {
+        self.inner.push(byte);
+    }
+    pub fn from_bytes(s: &[u8]) -> HgPathBuf {
+        HgPath::new(s).to_owned()
+    }
+    pub fn into_vec(self) -> Vec<u8> {
+        self.inner
+    }
+    pub fn as_ref(&self) -> &[u8] {
+        self.inner.as_ref()
+    }
+}
+
+impl Deref for HgPathBuf {
+    type Target = HgPath;
+
+    #[inline]
+    fn deref(&self) -> &HgPath {
+        &HgPath::new(&self.inner)
+    }
+}
+
+impl From<Vec<u8>> for HgPathBuf {
+    fn from(vec: Vec<u8>) -> Self {
+        Self { inner: vec }
+    }
+}
+
+impl<T: ?Sized + AsRef<HgPath>> From<&T> for HgPathBuf {
+    fn from(s: &T) -> HgPathBuf {
+        s.as_ref().to_owned()
+    }
+}
+
+impl Into<Vec<u8>> for HgPathBuf {
+    fn into(self) -> Vec<u8> {
+        self.inner
+    }
+}
+
+impl Borrow<HgPath> for HgPathBuf {
+    fn borrow(&self) -> &HgPath {
+        &HgPath::new(self.as_bytes())
+    }
+}
+
+impl ToOwned for HgPath {
+    type Owned = HgPathBuf;
+
+    fn to_owned(&self) -> HgPathBuf {
+        self.to_hg_path_buf()
+    }
+}
+
+impl AsRef<HgPath> for HgPath {
+    fn as_ref(&self) -> &HgPath {
+        self
+    }
+}
+
+impl AsRef<HgPath> for HgPathBuf {
+    fn as_ref(&self) -> &HgPath {
+        self
+    }
+}
+
+impl Extend<u8> for HgPathBuf {
+    fn extend<T: IntoIterator<Item = u8>>(&mut self, iter: T) {
+        self.inner.extend(iter);
+    }
+}
+
+/// TODO: Once https://www.mercurial-scm.org/wiki/WindowsUTF8Plan is
+/// implemented, these conversion utils will have to work differently depending
+/// on the repository encoding: either `UTF-8` or `MBCS`.
+
+pub fn hg_path_to_os_string<P: AsRef<HgPath>>(
+    hg_path: P,
+) -> Result<OsString, HgPathError> {
+    hg_path.as_ref().check_state()?;
+    let os_str;
+    #[cfg(unix)]
+    {
+        use std::os::unix::ffi::OsStrExt;
+        os_str = std::ffi::OsStr::from_bytes(&hg_path.as_ref().as_bytes());
+    }
+    // TODO Handle other platforms
+    // TODO: convert from WTF8 to Windows MBCS (ANSI encoding).
+    Ok(os_str.to_os_string())
+}
+
+pub fn hg_path_to_path_buf<P: AsRef<HgPath>>(
+    hg_path: P,
+) -> Result<PathBuf, HgPathError> {
+    Ok(Path::new(&hg_path_to_os_string(hg_path)?).to_path_buf())
+}
+
+pub fn os_string_to_hg_path_buf<S: AsRef<OsStr>>(
+    os_string: S,
+) -> Result<HgPathBuf, HgPathError> {
+    let buf;
+    #[cfg(unix)]
+    {
+        use std::os::unix::ffi::OsStrExt;
+        buf = HgPathBuf::from_bytes(&os_string.as_ref().as_bytes());
+    }
+    // TODO Handle other platforms
+    // TODO: convert from WTF8 to Windows MBCS (ANSI encoding).
+
+    buf.check_state()?;
+    Ok(buf)
+}
+
+pub fn path_to_hg_path_buf<P: AsRef<Path>>(
+    path: P,
+) -> Result<HgPathBuf, HgPathError> {
+    let buf;
+    let os_str = path.as_ref().as_os_str();
+    #[cfg(unix)]
+    {
+        use std::os::unix::ffi::OsStrExt;
+        buf = HgPathBuf::from_bytes(&os_str.as_bytes());
+    }
+    // TODO Handle other platforms
+    // TODO: convert from WTF8 to Windows MBCS (ANSI encoding).
+
+    buf.check_state()?;
+    Ok(buf)
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_path_states() {
+        assert_eq!(
+            Err(HgPathError::LeadingSlash(b"/".to_vec())),
+            HgPath::new(b"/").check_state()
+        );
+        assert_eq!(
+            Err(HgPathError::ConsecutiveSlashes(b"a/b//c".to_vec(), 4)),
+            HgPath::new(b"a/b//c").check_state()
+        );
+        assert_eq!(
+            Err(HgPathError::ContainsNullByte(b"a/b/\0c".to_vec(), 4)),
+            HgPath::new(b"a/b/\0c").check_state()
+        );
+        // TODO test HgPathError::DecodeError for the Windows implementation.
+        assert_eq!(true, HgPath::new(b"").is_valid());
+        assert_eq!(true, HgPath::new(b"a/b/c").is_valid());
+        // Backslashes in paths are not significant, but allowed
+        assert_eq!(true, HgPath::new(br"a\b/c").is_valid());
+        // Dots in paths are not significant, but allowed
+        assert_eq!(true, HgPath::new(b"a/b/../c/").is_valid());
+        assert_eq!(true, HgPath::new(b"./a/b/../c/").is_valid());
+    }
+
+    #[test]
+    fn test_iter() {
+        let path = HgPath::new(b"a");
+        let mut iter = path.bytes();
+        assert_eq!(Some(&b'a'), iter.next());
+        assert_eq!(None, iter.next_back());
+        assert_eq!(None, iter.next());
+
+        let path = HgPath::new(b"a");
+        let mut iter = path.bytes();
+        assert_eq!(Some(&b'a'), iter.next_back());
+        assert_eq!(None, iter.next_back());
+        assert_eq!(None, iter.next());
+
+        let path = HgPath::new(b"abc");
+        let mut iter = path.bytes();
+        assert_eq!(Some(&b'a'), iter.next());
+        assert_eq!(Some(&b'c'), iter.next_back());
+        assert_eq!(Some(&b'b'), iter.next_back());
+        assert_eq!(None, iter.next_back());
+        assert_eq!(None, iter.next());
+
+        let path = HgPath::new(b"abc");
+        let mut iter = path.bytes();
+        assert_eq!(Some(&b'a'), iter.next());
+        assert_eq!(Some(&b'b'), iter.next());
+        assert_eq!(Some(&b'c'), iter.next());
+        assert_eq!(None, iter.next_back());
+        assert_eq!(None, iter.next());
+
+        let path = HgPath::new(b"abc");
+        let iter = path.bytes();
+        let mut vec = Vec::new();
+        vec.extend(iter);
+        assert_eq!(vec![b'a', b'b', b'c'], vec);
+
+        let path = HgPath::new(b"abc");
+        let mut iter = path.bytes();
+        assert_eq!(Some(2), iter.rposition(|c| *c == b'c'));
+
+        let path = HgPath::new(b"abc");
+        let mut iter = path.bytes();
+        assert_eq!(None, iter.rposition(|c| *c == b'd'));
+    }
+
+    #[test]
+    fn test_join() {
+        let path = HgPathBuf::from_bytes(b"a").join(HgPath::new(b"b"));
+        assert_eq!(b"a/b", path.as_bytes());
+
+        let path = HgPathBuf::from_bytes(b"a/").join(HgPath::new(b"b/c"));
+        assert_eq!(b"a/b/c", path.as_bytes());
+
+        // No leading slash if empty before join
+        let path = HgPathBuf::new().join(HgPath::new(b"b/c"));
+        assert_eq!(b"b/c", path.as_bytes());
+
+        // The leading slash is an invalid representation of an `HgPath`, but
+        // it can happen. This creates another invalid representation of
+        // consecutive bytes.
+        // TODO What should be done in this case? Should we silently remove
+        // the extra slash? Should we change the signature to a problematic
+        // `Result<HgPathBuf, HgPathError>`, or should we just keep it so and
+        // let the error happen upon filesystem interaction?
+        let path = HgPathBuf::from_bytes(b"a/").join(HgPath::new(b"/b"));
+        assert_eq!(b"a//b", path.as_bytes());
+        let path = HgPathBuf::from_bytes(b"a").join(HgPath::new(b"/b"));
+        assert_eq!(b"a//b", path.as_bytes());
+    }
+}
--- a/rust/hg-cpython/Cargo.toml	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-cpython/Cargo.toml	Mon Oct 21 11:09:48 2019 -0400
@@ -11,25 +11,19 @@
 [features]
 default = ["python27"]
 
-python27 = ["cpython/python27-sys",
-            "cpython/extension-module-2-7",
-            "python27-sys",
-            ]
+# Features to build an extension module:
+python27 = ["cpython/python27-sys", "cpython/extension-module-2-7"]
+python3 = ["cpython/python3-sys", "cpython/extension-module"]
 
-python3 = ["python3-sys", "cpython/python3-sys", "cpython/extension-module"]
+# Enable one of these features to build a test executable linked to libpython:
+# e.g. cargo test --no-default-features --features python27-bin
+python27-bin = ["cpython/python27-sys"]
+python3-bin = ["cpython/python3-sys"]
 
 [dependencies]
 hg-core = { path = "../hg-core" }
 libc = '*'
 
 [dependencies.cpython]
-version = "*"
+version = "0.3"
 default-features = false
-
-[dependencies.python27-sys]
-version = "0.2.1"
-optional = true
-
-[dependencies.python3-sys]
-version = "0.2.1"
-optional = true
--- a/rust/hg-cpython/src/ancestors.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-cpython/src/ancestors.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -35,9 +35,7 @@
 //! [`MissingAncestors`]: struct.MissingAncestors.html
 //! [`AncestorsIterator`]: struct.AncestorsIterator.html
 use crate::{
-    cindex::Index,
-    conversion::{py_set, rev_pyiter_collect},
-    exceptions::GraphError,
+    cindex::Index, conversion::rev_pyiter_collect, exceptions::GraphError,
 };
 use cpython::{
     ObjectProtocol, PyClone, PyDict, PyList, PyModule, PyObject, PyResult,
@@ -146,13 +144,13 @@
         Ok(py.None())
     }
 
-    def bases(&self) -> PyResult<PyObject> {
-        py_set(py, self.inner(py).borrow().get_bases())
+    def bases(&self) -> PyResult<HashSet<Revision>> {
+        Ok(self.inner(py).borrow().get_bases().clone())
     }
 
-    def basesheads(&self) -> PyResult<PyObject> {
+    def basesheads(&self) -> PyResult<HashSet<Revision>> {
         let inner = self.inner(py).borrow();
-        py_set(py, &inner.bases_heads().map_err(|e| GraphError::pynew(py, e))?)
+        inner.bases_heads().map_err(|e| GraphError::pynew(py, e))
     }
 
     def removeancestorsfrom(&self, revs: PyObject) -> PyResult<PyObject> {
--- a/rust/hg-cpython/src/cindex.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-cpython/src/cindex.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -9,23 +9,20 @@
 //!
 //! Ideally, we should use an Index entirely implemented in Rust,
 //! but this will take some time to get there.
-#[cfg(feature = "python27")]
-use python27_sys as python_sys;
-#[cfg(feature = "python3")]
-use python3_sys as python_sys;
 
-use cpython::{PyClone, PyErr, PyObject, PyResult, Python};
+use cpython::{PyClone, PyObject, PyResult, Python};
 use hg::{Graph, GraphError, Revision, WORKING_DIRECTORY_REVISION};
 use libc::c_int;
-use python_sys::PyCapsule_Import;
-use std::ffi::CStr;
-use std::mem::transmute;
 
-type IndexParentsFn = unsafe extern "C" fn(
-    index: *mut python_sys::PyObject,
-    rev: c_int,
-    ps: *mut [c_int; 2],
-) -> c_int;
+py_capsule_fn!(
+    from mercurial.cext.parsers import index_get_parents_CAPI
+        as get_parents_capi
+        signature (
+            index: *mut RawPyObject,
+            rev: c_int,
+            ps: *mut [c_int; 2],
+        ) -> c_int
+);
 
 /// A `Graph` backed up by objects and functions from revlog.c
 ///
@@ -61,14 +58,14 @@
 /// mechanisms in other contexts.
 pub struct Index {
     index: PyObject,
-    parents: IndexParentsFn,
+    parents: get_parents_capi::CapsuleFn,
 }
 
 impl Index {
     pub fn new(py: Python, index: PyObject) -> PyResult<Self> {
         Ok(Index {
             index: index,
-            parents: decapsule_parents_fn(py)?,
+            parents: get_parents_capi::retrieve(py)?,
         })
     }
 }
@@ -103,31 +100,3 @@
         }
     }
 }
-
-/// Return the `index_get_parents` function of the parsers C Extension module.
-///
-/// A pointer to the function is stored in the `parsers` module as a
-/// standard [Python capsule](https://docs.python.org/2/c-api/capsule.html).
-///
-/// This function retrieves the capsule and casts the function pointer
-///
-/// Casting function pointers is one of the rare cases of
-/// legitimate use cases of `mem::transmute()` (see
-/// https://doc.rust-lang.org/std/mem/fn.transmute.html of
-/// `mem::transmute()`.
-/// It is inappropriate for architectures where
-/// function and data pointer sizes differ (so-called "Harvard
-/// architectures"), but these are nowadays mostly DSPs
-/// and microcontrollers, hence out of our scope.
-fn decapsule_parents_fn(py: Python) -> PyResult<IndexParentsFn> {
-    unsafe {
-        let caps_name = CStr::from_bytes_with_nul_unchecked(
-            b"mercurial.cext.parsers.index_get_parents_CAPI\0",
-        );
-        let from_caps = PyCapsule_Import(caps_name.as_ptr(), 0);
-        if from_caps.is_null() {
-            return Err(PyErr::fetch(py));
-        }
-        Ok(transmute(from_caps))
-    }
-}
--- a/rust/hg-cpython/src/conversion.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-cpython/src/conversion.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -8,12 +8,8 @@
 //! Bindings for the hg::ancestors module provided by the
 //! `hg-core` crate. From Python, this will be seen as `rustext.ancestor`
 
-use cpython::{
-    ObjectProtocol, PyDict, PyObject, PyResult, PyTuple, Python, PythonObject,
-    ToPyObject,
-};
+use cpython::{ObjectProtocol, PyObject, PyResult, Python};
 use hg::Revision;
-use std::collections::HashSet;
 use std::iter::FromIterator;
 
 /// Utility function to convert a Python iterable into various collections
@@ -30,21 +26,3 @@
         .map(|r| r.and_then(|o| o.extract::<Revision>(py)))
         .collect()
 }
-
-/// Copy and convert an `HashSet<Revision>` in a Python set
-///
-/// This will probably turn useless once `PySet` support lands in
-/// `rust-cpython`.
-///
-/// This builds a Python tuple, then calls Python's "set()" on it
-pub fn py_set(py: Python, set: &HashSet<Revision>) -> PyResult<PyObject> {
-    let as_vec: Vec<PyObject> = set
-        .iter()
-        .map(|rev| rev.to_py_object(py).into_object())
-        .collect();
-    let as_pytuple = PyTuple::new(py, as_vec.as_slice());
-
-    let locals = PyDict::new(py);
-    locals.set_item(py, "obj", as_pytuple.to_py_object(py))?;
-    py.eval("set(obj)", None, Some(&locals))
-}
--- a/rust/hg-cpython/src/dagops.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-cpython/src/dagops.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -10,9 +10,7 @@
 //!
 //! From Python, this will be seen as `mercurial.rustext.dagop`
 use crate::{
-    cindex::Index,
-    conversion::{py_set, rev_pyiter_collect},
-    exceptions::GraphError,
+    cindex::Index, conversion::rev_pyiter_collect, exceptions::GraphError,
 };
 use cpython::{PyDict, PyModule, PyObject, PyResult, Python};
 use hg::dagops;
@@ -26,11 +24,11 @@
     py: Python,
     index: PyObject,
     revs: PyObject,
-) -> PyResult<PyObject> {
+) -> PyResult<HashSet<Revision>> {
     let mut as_set: HashSet<Revision> = rev_pyiter_collect(py, &revs)?;
     dagops::retain_heads(&Index::new(py, index)?, &mut as_set)
         .map_err(|e| GraphError::pynew(py, e))?;
-    py_set(py, &as_set)
+    Ok(as_set)
 }
 
 /// Create the module, with `__package__` given from parent
--- a/rust/hg-cpython/src/dirstate.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-cpython/src/dirstate.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -9,117 +9,84 @@
 //! `hg-core` package.
 //!
 //! From Python, this will be seen as `mercurial.rustext.dirstate`
-
+mod copymap;
+mod dirs_multiset;
+mod dirstate_map;
+mod status;
+use crate::dirstate::{
+    dirs_multiset::Dirs, dirstate_map::DirstateMap, status::status_wrapper,
+};
 use cpython::{
-    exc, ObjectProtocol, PyBytes, PyDict, PyErr, PyInt, PyModule, PyObject,
-    PyResult, PySequence, PyTuple, Python, PythonObject, ToPyObject,
+    exc, PyBytes, PyDict, PyErr, PyList, PyModule, PyObject, PyResult,
+    PySequence, Python,
 };
 use hg::{
-    pack_dirstate, parse_dirstate, CopyVecEntry, DirsIterable, DirsMultiset,
-    DirstateEntry, DirstateMapError, DirstatePackError, DirstateParents,
-    DirstateParseError, DirstateVec,
+    utils::hg_path::HgPathBuf, DirstateEntry, DirstateParseError, EntryState,
+    StateMap,
 };
 use libc::{c_char, c_int};
-#[cfg(feature = "python27")]
-use python27_sys::PyCapsule_Import;
-#[cfg(feature = "python3")]
-use python3_sys::PyCapsule_Import;
-use std::cell::RefCell;
-use std::collections::HashMap;
-use std::ffi::CStr;
-use std::mem::transmute;
+use std::convert::TryFrom;
+
+// C code uses a custom `dirstate_tuple` type, checks in multiple instances
+// for this type, and raises a Python `Exception` if the check does not pass.
+// Because this type differs only in name from the regular Python tuple, it
+// would be a good idea in the near future to remove it entirely to allow
+// for a pure Python tuple of the same effective structure to be used,
+// rendering this type and the capsule below useless.
+py_capsule_fn!(
+    from mercurial.cext.parsers import make_dirstate_tuple_CAPI
+        as make_dirstate_tuple_capi
+        signature (
+            state: c_char,
+            mode: c_int,
+            size: c_int,
+            mtime: c_int,
+        ) -> *mut RawPyObject
+);
 
-/// C code uses a custom `dirstate_tuple` type, checks in multiple instances
-/// for this type, and raises a Python `Exception` if the check does not pass.
-/// Because this type differs only in name from the regular Python tuple, it
-/// would be a good idea in the near future to remove it entirely to allow
-/// for a pure Python tuple of the same effective structure to be used,
-/// rendering this type and the capsule below useless.
-type MakeDirstateTupleFn = extern "C" fn(
-    state: c_char,
-    mode: c_int,
-    size: c_int,
-    mtime: c_int,
-) -> PyObject;
+pub fn make_dirstate_tuple(
+    py: Python,
+    entry: &DirstateEntry,
+) -> PyResult<PyObject> {
+    // might be silly to retrieve capsule function in hot loop
+    let make = make_dirstate_tuple_capi::retrieve(py)?;
 
-/// This is largely a copy/paste from cindex.rs, pending the merge of a
-/// `py_capsule_fn!` macro in the rust-cpython project:
-/// https://github.com/dgrunwald/rust-cpython/pull/169
-fn decapsule_make_dirstate_tuple(py: Python) -> PyResult<MakeDirstateTupleFn> {
-    unsafe {
-        let caps_name = CStr::from_bytes_with_nul_unchecked(
-            b"mercurial.cext.parsers.make_dirstate_tuple_CAPI\0",
-        );
-        let from_caps = PyCapsule_Import(caps_name.as_ptr(), 0);
-        if from_caps.is_null() {
-            return Err(PyErr::fetch(py));
-        }
-        Ok(transmute(from_caps))
-    }
+    let &DirstateEntry {
+        state,
+        mode,
+        size,
+        mtime,
+    } = entry;
+    // Explicitly go through u8 first, then cast to platform-specific `c_char`
+    // because Into<u8> has a specific implementation while `as c_char` would
+    // just do a naive enum cast.
+    let state_code: u8 = state.into();
+
+    let maybe_obj = unsafe {
+        let ptr = make(state_code as c_char, mode, size, mtime);
+        PyObject::from_owned_ptr_opt(py, ptr)
+    };
+    maybe_obj.ok_or_else(|| PyErr::fetch(py))
 }
 
-fn parse_dirstate_wrapper(
-    py: Python,
-    dmap: PyDict,
-    copymap: PyDict,
-    st: PyBytes,
-) -> PyResult<PyTuple> {
-    match parse_dirstate(st.data(py)) {
-        Ok((parents, dirstate_vec, copies)) => {
-            for (filename, entry) in dirstate_vec {
-                dmap.set_item(
-                    py,
-                    PyBytes::new(py, &filename[..]),
-                    decapsule_make_dirstate_tuple(py)?(
-                        entry.state as c_char,
-                        entry.mode,
-                        entry.size,
-                        entry.mtime,
-                    ),
-                )?;
-            }
-            for CopyVecEntry { path, copy_path } in copies {
-                copymap.set_item(
-                    py,
-                    PyBytes::new(py, path),
-                    PyBytes::new(py, copy_path),
-                )?;
-            }
-            Ok((PyBytes::new(py, parents.p1), PyBytes::new(py, parents.p2))
-                .to_py_object(py))
-        }
-        Err(e) => Err(PyErr::new::<exc::ValueError, _>(
-            py,
-            match e {
-                DirstateParseError::TooLittleData => {
-                    "too little data for parents".to_string()
-                }
-                DirstateParseError::Overflow => {
-                    "overflow in dirstate".to_string()
-                }
-                DirstateParseError::CorruptedEntry(e) => e,
-            },
-        )),
-    }
-}
-
-fn extract_dirstate_vec(
-    py: Python,
-    dmap: &PyDict,
-) -> Result<DirstateVec, PyErr> {
+pub fn extract_dirstate(py: Python, dmap: &PyDict) -> Result<StateMap, PyErr> {
     dmap.items(py)
         .iter()
         .map(|(filename, stats)| {
             let stats = stats.extract::<PySequence>(py)?;
             let state = stats.get_item(py, 0)?.extract::<PyBytes>(py)?;
-            let state = state.data(py)[0] as i8;
+            let state = EntryState::try_from(state.data(py)[0]).map_err(
+                |e: DirstateParseError| {
+                    PyErr::new::<exc::ValueError, _>(py, e.to_string())
+                },
+            )?;
             let mode = stats.get_item(py, 1)?.extract(py)?;
             let size = stats.get_item(py, 2)?.extract(py)?;
             let mtime = stats.get_item(py, 3)?.extract(py)?;
             let filename = filename.extract::<PyBytes>(py)?;
             let filename = filename.data(py);
             Ok((
-                filename.to_owned(),
+                HgPathBuf::from(filename.to_owned()),
                 DirstateEntry {
                     state,
                     mode,
@@ -131,167 +98,6 @@
         .collect()
 }
 
-fn pack_dirstate_wrapper(
-    py: Python,
-    dmap: PyDict,
-    copymap: PyDict,
-    pl: PyTuple,
-    now: PyInt,
-) -> PyResult<PyBytes> {
-    let p1 = pl.get_item(py, 0).extract::<PyBytes>(py)?;
-    let p1: &[u8] = p1.data(py);
-    let p2 = pl.get_item(py, 1).extract::<PyBytes>(py)?;
-    let p2: &[u8] = p2.data(py);
-
-    let dirstate_vec = extract_dirstate_vec(py, &dmap)?;
-
-    let copies: Result<HashMap<Vec<u8>, Vec<u8>>, PyErr> = copymap
-        .items(py)
-        .iter()
-        .map(|(key, value)| {
-            Ok((
-                key.extract::<PyBytes>(py)?.data(py).to_owned(),
-                value.extract::<PyBytes>(py)?.data(py).to_owned(),
-            ))
-        })
-        .collect();
-
-    match pack_dirstate(
-        &dirstate_vec,
-        &copies?,
-        DirstateParents { p1, p2 },
-        now.as_object().extract::<i32>(py)?,
-    ) {
-        Ok((packed, new_dirstate_vec)) => {
-            for (
-                filename,
-                DirstateEntry {
-                    state,
-                    mode,
-                    size,
-                    mtime,
-                },
-            ) in new_dirstate_vec
-            {
-                dmap.set_item(
-                    py,
-                    PyBytes::new(py, &filename[..]),
-                    decapsule_make_dirstate_tuple(py)?(
-                        state as c_char,
-                        mode,
-                        size,
-                        mtime,
-                    ),
-                )?;
-            }
-            Ok(PyBytes::new(py, &packed))
-        }
-        Err(error) => Err(PyErr::new::<exc::ValueError, _>(
-            py,
-            match error {
-                DirstatePackError::CorruptedParent => {
-                    "expected a 20-byte hash".to_string()
-                }
-                DirstatePackError::CorruptedEntry(e) => e,
-                DirstatePackError::BadSize(expected, actual) => {
-                    format!("bad dirstate size: {} != {}", actual, expected)
-                }
-            },
-        )),
-    }
-}
-
-py_class!(pub class Dirs |py| {
-    data dirs_map: RefCell<DirsMultiset>;
-
-    // `map` is either a `dict` or a flat iterator (usually a `set`, sometimes
-    // a `list`)
-    def __new__(
-        _cls,
-        map: PyObject,
-        skip: Option<PyObject> = None
-    ) -> PyResult<Self> {
-        let mut skip_state: Option<i8> = None;
-        if let Some(skip) = skip {
-            skip_state = Some(skip.extract::<PyBytes>(py)?.data(py)[0] as i8);
-        }
-        let dirs_map;
-
-        if let Ok(map) = map.cast_as::<PyDict>(py) {
-            let dirstate_vec = extract_dirstate_vec(py, &map)?;
-            dirs_map = DirsMultiset::new(
-                DirsIterable::Dirstate(dirstate_vec),
-                skip_state,
-            )
-        } else {
-            let map: Result<Vec<Vec<u8>>, PyErr> = map
-                .iter(py)?
-                .map(|o| Ok(o?.extract::<PyBytes>(py)?.data(py).to_owned()))
-                .collect();
-            dirs_map = DirsMultiset::new(
-                DirsIterable::Manifest(map?),
-                skip_state,
-            )
-        }
-
-        Self::create_instance(py, RefCell::new(dirs_map))
-    }
-
-    def addpath(&self, path: PyObject) -> PyResult<PyObject> {
-        self.dirs_map(py).borrow_mut().add_path(
-            path.extract::<PyBytes>(py)?.data(py),
-        );
-        Ok(py.None())
-    }
-
-    def delpath(&self, path: PyObject) -> PyResult<PyObject> {
-        self.dirs_map(py).borrow_mut().delete_path(
-            path.extract::<PyBytes>(py)?.data(py),
-        )
-            .and(Ok(py.None()))
-            .or_else(|e| {
-                match e {
-                    DirstateMapError::PathNotFound(_p) => {
-                        Err(PyErr::new::<exc::ValueError, _>(
-                            py,
-                            "expected a value, found none".to_string(),
-                        ))
-                    }
-                    DirstateMapError::EmptyPath => {
-                        Ok(py.None())
-                    }
-                }
-            })
-    }
-
-    // This is really inefficient on top of being ugly, but it's an easy way
-    // of having it work to continue working on the rest of the module
-    // hopefully bypassing Python entirely pretty soon.
-    def __iter__(&self) -> PyResult<PyObject> {
-        let dict = PyDict::new(py);
-
-        for (key, value) in self.dirs_map(py).borrow().iter() {
-            dict.set_item(
-                py,
-                PyBytes::new(py, &key[..]),
-                value.to_py_object(py),
-            )?;
-        }
-
-        let locals = PyDict::new(py);
-        locals.set_item(py, "obj", dict)?;
-
-        py.eval("iter(obj)", None, Some(&locals))
-    }
-
-    def __contains__(&self, item: PyObject) -> PyResult<bool> {
-        Ok(self
-            .dirs_map(py)
-            .borrow()
-            .contains_key(item.extract::<PyBytes>(py)?.data(py).as_ref()))
-    }
-});
-
 /// Create the module, with `__package__` given from parent
 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
     let dotted_name = &format!("{}.dirstate", package);
@@ -299,30 +105,25 @@
 
     m.add(py, "__package__", package)?;
     m.add(py, "__doc__", "Dirstate - Rust implementation")?;
+
+    m.add_class::<Dirs>(py)?;
+    m.add_class::<DirstateMap>(py)?;
     m.add(
         py,
-        "parse_dirstate",
+        "status",
         py_fn!(
             py,
-            parse_dirstate_wrapper(dmap: PyDict, copymap: PyDict, st: PyBytes)
-        ),
-    )?;
-    m.add(
-        py,
-        "pack_dirstate",
-        py_fn!(
-            py,
-            pack_dirstate_wrapper(
-                dmap: PyDict,
-                copymap: PyDict,
-                pl: PyTuple,
-                now: PyInt
+            status_wrapper(
+                dmap: DirstateMap,
+                root_dir: PyObject,
+                files: PyList,
+                list_clean: bool,
+                last_normal_time: i64,
+                check_exec: bool
             )
         ),
     )?;
 
-    m.add_class::<Dirs>(py)?;
-
     let sys = PyModule::import(py, "sys")?;
     let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
     sys_modules.set_item(py, dotted_name, &m)?;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/dirstate/copymap.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,117 @@
+// copymap.rs
+//
+// Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Bindings for `hg::dirstate::dirstate_map::CopyMap` provided by the
+//! `hg-core` package.
+
+use cpython::{PyBytes, PyClone, PyDict, PyObject, PyResult, Python};
+use std::cell::RefCell;
+
+use crate::dirstate::dirstate_map::DirstateMap;
+use crate::ref_sharing::PyLeakedRef;
+use hg::{utils::hg_path::HgPathBuf, CopyMapIter};
+
+py_class!(pub class CopyMap |py| {
+    data dirstate_map: DirstateMap;
+
+    def __getitem__(&self, key: PyObject) -> PyResult<PyBytes> {
+        (*self.dirstate_map(py)).copymapgetitem(py, key)
+    }
+
+    def __len__(&self) -> PyResult<usize> {
+        self.dirstate_map(py).copymaplen(py)
+    }
+
+    def __contains__(&self, key: PyObject) -> PyResult<bool> {
+        self.dirstate_map(py).copymapcontains(py, key)
+    }
+
+    def get(
+        &self,
+        key: PyObject,
+        default: Option<PyObject> = None
+    ) -> PyResult<Option<PyObject>> {
+        self.dirstate_map(py).copymapget(py, key, default)
+    }
+
+    def pop(
+        &self,
+        key: PyObject,
+        default: Option<PyObject> = None
+    ) -> PyResult<Option<PyObject>> {
+        self.dirstate_map(py).copymappop(py, key, default)
+    }
+
+    def __iter__(&self) -> PyResult<CopyMapKeysIterator> {
+        self.dirstate_map(py).copymapiter(py)
+    }
+
+    // Python's `dict()` builtin works with either a subclass of dict
+    // or an abstract mapping. Said mapping needs to implement `__getitem__`
+    // and `keys`.
+    def keys(&self) -> PyResult<CopyMapKeysIterator> {
+        self.dirstate_map(py).copymapiter(py)
+    }
+
+    def items(&self) -> PyResult<CopyMapItemsIterator> {
+        self.dirstate_map(py).copymapitemsiter(py)
+    }
+
+    def iteritems(&self) -> PyResult<CopyMapItemsIterator> {
+        self.dirstate_map(py).copymapitemsiter(py)
+    }
+
+    def __setitem__(
+        &self,
+        key: PyObject,
+        item: PyObject
+    ) -> PyResult<()> {
+        self.dirstate_map(py).copymapsetitem(py, key, item)?;
+        Ok(())
+    }
+
+    def copy(&self) -> PyResult<PyDict> {
+        self.dirstate_map(py).copymapcopy(py)
+    }
+
+});
+
+impl CopyMap {
+    pub fn from_inner(py: Python, dm: DirstateMap) -> PyResult<Self> {
+        Self::create_instance(py, dm)
+    }
+    fn translate_key(
+        py: Python,
+        res: (&HgPathBuf, &HgPathBuf),
+    ) -> PyResult<Option<PyBytes>> {
+        Ok(Some(PyBytes::new(py, res.0.as_ref())))
+    }
+    fn translate_key_value(
+        py: Python,
+        res: (&HgPathBuf, &HgPathBuf),
+    ) -> PyResult<Option<(PyBytes, PyBytes)>> {
+        let (k, v) = res;
+        Ok(Some((
+            PyBytes::new(py, k.as_ref()),
+            PyBytes::new(py, v.as_ref()),
+        )))
+    }
+}
+
+py_shared_iterator!(
+    CopyMapKeysIterator,
+    PyLeakedRef<CopyMapIter<'static>>,
+    CopyMap::translate_key,
+    Option<PyBytes>
+);
+
+py_shared_iterator!(
+    CopyMapItemsIterator,
+    PyLeakedRef<CopyMapIter<'static>>,
+    CopyMap::translate_key_value,
+    Option<(PyBytes, PyBytes)>
+);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/dirstate/dirs_multiset.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,129 @@
+// dirs_multiset.rs
+//
+// Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Bindings for the `hg::dirstate::dirs_multiset` file provided by the
+//! `hg-core` package.
+
+use std::cell::RefCell;
+use std::convert::TryInto;
+
+use cpython::{
+    exc, ObjectProtocol, PyBytes, PyClone, PyDict, PyErr, PyObject, PyResult,
+    Python,
+};
+
+use crate::dirstate::extract_dirstate;
+use crate::ref_sharing::{PyLeakedRef, PySharedRefCell};
+use hg::{
+    utils::hg_path::{HgPath, HgPathBuf},
+    DirsMultiset, DirsMultisetIter, DirstateMapError, DirstateParseError,
+    EntryState,
+};
+
+py_class!(pub class Dirs |py| {
+    data inner: PySharedRefCell<DirsMultiset>;
+
+    // `map` is either a `dict` or a flat iterator (usually a `set`, sometimes
+    // a `list`)
+    def __new__(
+        _cls,
+        map: PyObject,
+        skip: Option<PyObject> = None
+    ) -> PyResult<Self> {
+        let mut skip_state: Option<EntryState> = None;
+        if let Some(skip) = skip {
+            skip_state = Some(
+                skip.extract::<PyBytes>(py)?.data(py)[0]
+                    .try_into()
+                    .map_err(|e: DirstateParseError| {
+                        PyErr::new::<exc::ValueError, _>(py, e.to_string())
+                    })?,
+            );
+        }
+        let inner = if let Ok(map) = map.cast_as::<PyDict>(py) {
+            let dirstate = extract_dirstate(py, &map)?;
+            DirsMultiset::from_dirstate(&dirstate, skip_state)
+        } else {
+            let map: Result<Vec<HgPathBuf>, PyErr> = map
+                .iter(py)?
+                .map(|o| {
+                    Ok(HgPathBuf::from_bytes(
+                        o?.extract::<PyBytes>(py)?.data(py),
+                    ))
+                })
+                .collect();
+            DirsMultiset::from_manifest(&map?)
+        };
+
+        Self::create_instance(
+            py,
+            PySharedRefCell::new(inner),
+        )
+    }
+
+    def addpath(&self, path: PyObject) -> PyResult<PyObject> {
+        self.inner_shared(py).borrow_mut()?.add_path(
+            HgPath::new(path.extract::<PyBytes>(py)?.data(py)),
+        );
+        Ok(py.None())
+    }
+
+    def delpath(&self, path: PyObject) -> PyResult<PyObject> {
+        self.inner_shared(py).borrow_mut()?.delete_path(
+            HgPath::new(path.extract::<PyBytes>(py)?.data(py)),
+        )
+            .and(Ok(py.None()))
+            .or_else(|e| {
+                match e {
+                    DirstateMapError::PathNotFound(_p) => {
+                        Err(PyErr::new::<exc::ValueError, _>(
+                            py,
+                            "expected a value, found none".to_string(),
+                        ))
+                    }
+                    DirstateMapError::EmptyPath => {
+                        Ok(py.None())
+                    }
+                }
+            })
+    }
+    def __iter__(&self) -> PyResult<DirsMultisetKeysIterator> {
+        let leaked_ref = self.inner_shared(py).leak_immutable()?;
+        DirsMultisetKeysIterator::from_inner(
+            py,
+            unsafe { leaked_ref.map(py, |o| o.iter()) },
+        )
+    }
+
+    def __contains__(&self, item: PyObject) -> PyResult<bool> {
+        Ok(self.inner_shared(py).borrow().contains(HgPath::new(
+            item.extract::<PyBytes>(py)?.data(py).as_ref(),
+        )))
+    }
+});
+
+py_shared_ref!(Dirs, DirsMultiset, inner, inner_shared);
+
+impl Dirs {
+    pub fn from_inner(py: Python, d: DirsMultiset) -> PyResult<Self> {
+        Self::create_instance(py, PySharedRefCell::new(d))
+    }
+
+    fn translate_key(
+        py: Python,
+        res: &HgPathBuf,
+    ) -> PyResult<Option<PyBytes>> {
+        Ok(Some(PyBytes::new(py, res.as_ref())))
+    }
+}
+
+py_shared_iterator!(
+    DirsMultisetKeysIterator,
+    PyLeakedRef<DirsMultisetIter<'static>>,
+    Dirs::translate_key,
+    Option<PyBytes>
+);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,504 @@
+// dirstate_map.rs
+//
+// Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Bindings for the `hg::dirstate::dirstate_map` file provided by the
+//! `hg-core` package.
+
+use std::cell::{Ref, RefCell};
+use std::convert::TryInto;
+use std::time::Duration;
+
+use cpython::{
+    exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyObject,
+    PyResult, PyTuple, Python, PythonObject, ToPyObject,
+};
+
+use crate::{
+    dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
+    dirstate::{dirs_multiset::Dirs, make_dirstate_tuple},
+    ref_sharing::{PyLeakedRef, PySharedRefCell},
+};
+use hg::{
+    utils::hg_path::{HgPath, HgPathBuf},
+    DirsMultiset, DirstateEntry, DirstateMap as RustDirstateMap,
+    DirstateParents, DirstateParseError, EntryState, StateMapIter,
+    PARENT_SIZE,
+};
+
+// TODO
+//     This object needs to share references to multiple members of its Rust
+//     inner struct, namely `copy_map`, `dirs` and `all_dirs`.
+//     Right now `CopyMap` is done, but it needs to have an explicit reference
+//     to `RustDirstateMap` which itself needs to have an encapsulation for
+//     every method in `CopyMap` (copymapcopy, etc.).
+//     This is ugly and hard to maintain.
+//     The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
+//     `py_class!` is already implemented and does not mention
+//     `RustDirstateMap`, rightfully so.
+//     All attributes also have to have a separate refcount data attribute for
+//     leaks, with all methods that go along for reference sharing.
+py_class!(pub class DirstateMap |py| {
+    data inner: PySharedRefCell<RustDirstateMap>;
+
+    def __new__(_cls, _root: PyObject) -> PyResult<Self> {
+        let inner = RustDirstateMap::default();
+        Self::create_instance(
+            py,
+            PySharedRefCell::new(inner),
+        )
+    }
+
+    def clear(&self) -> PyResult<PyObject> {
+        self.inner_shared(py).borrow_mut()?.clear();
+        Ok(py.None())
+    }
+
+    def get(
+        &self,
+        key: PyObject,
+        default: Option<PyObject> = None
+    ) -> PyResult<Option<PyObject>> {
+        let key = key.extract::<PyBytes>(py)?;
+        match self.inner_shared(py).borrow().get(HgPath::new(key.data(py))) {
+            Some(entry) => {
+                Ok(Some(make_dirstate_tuple(py, entry)?))
+            },
+            None => Ok(default)
+        }
+    }
+
+    def addfile(
+        &self,
+        f: PyObject,
+        oldstate: PyObject,
+        state: PyObject,
+        mode: PyObject,
+        size: PyObject,
+        mtime: PyObject
+    ) -> PyResult<PyObject> {
+        self.inner_shared(py).borrow_mut()?.add_file(
+            HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
+            oldstate.extract::<PyBytes>(py)?.data(py)[0]
+                .try_into()
+                .map_err(|e: DirstateParseError| {
+                    PyErr::new::<exc::ValueError, _>(py, e.to_string())
+                })?,
+            DirstateEntry {
+                state: state.extract::<PyBytes>(py)?.data(py)[0]
+                    .try_into()
+                    .map_err(|e: DirstateParseError| {
+                        PyErr::new::<exc::ValueError, _>(py, e.to_string())
+                    })?,
+                mode: mode.extract(py)?,
+                size: size.extract(py)?,
+                mtime: mtime.extract(py)?,
+            },
+        );
+        Ok(py.None())
+    }
+
+    def removefile(
+        &self,
+        f: PyObject,
+        oldstate: PyObject,
+        size: PyObject
+    ) -> PyResult<PyObject> {
+        self.inner_shared(py).borrow_mut()?
+            .remove_file(
+                HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
+                oldstate.extract::<PyBytes>(py)?.data(py)[0]
+                    .try_into()
+                    .map_err(|e: DirstateParseError| {
+                        PyErr::new::<exc::ValueError, _>(py, e.to_string())
+                    })?,
+                size.extract(py)?,
+            )
+            .or_else(|_| {
+                Err(PyErr::new::<exc::OSError, _>(
+                    py,
+                    "Dirstate error".to_string(),
+                ))
+            })?;
+        Ok(py.None())
+    }
+
+    def dropfile(
+        &self,
+        f: PyObject,
+        oldstate: PyObject
+    ) -> PyResult<PyBool> {
+        self.inner_shared(py).borrow_mut()?
+            .drop_file(
+                HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
+                oldstate.extract::<PyBytes>(py)?.data(py)[0]
+                    .try_into()
+                    .map_err(|e: DirstateParseError| {
+                        PyErr::new::<exc::ValueError, _>(py, e.to_string())
+                    })?,
+            )
+            .and_then(|b| Ok(b.to_py_object(py)))
+            .or_else(|_| {
+                Err(PyErr::new::<exc::OSError, _>(
+                    py,
+                    "Dirstate error".to_string(),
+                ))
+            })
+    }
+
+    def clearambiguoustimes(
+        &self,
+        files: PyObject,
+        now: PyObject
+    ) -> PyResult<PyObject> {
+        let files: PyResult<Vec<HgPathBuf>> = files
+            .iter(py)?
+            .map(|filename| {
+                Ok(HgPathBuf::from_bytes(
+                    filename?.extract::<PyBytes>(py)?.data(py),
+                ))
+            })
+            .collect();
+        self.inner_shared(py).borrow_mut()?
+            .clear_ambiguous_times(files?, now.extract(py)?);
+        Ok(py.None())
+    }
+
+    // TODO share the reference
+    def nonnormalentries(&self) -> PyResult<PyObject> {
+        let (non_normal, other_parent) =
+            self.inner_shared(py).borrow().non_normal_other_parent_entries();
+
+        let locals = PyDict::new(py);
+        locals.set_item(
+            py,
+            "non_normal",
+            non_normal
+                .iter()
+                .map(|v| PyBytes::new(py, v.as_ref()))
+                .collect::<Vec<PyBytes>>()
+                .to_py_object(py),
+        )?;
+        locals.set_item(
+            py,
+            "other_parent",
+            other_parent
+                .iter()
+                .map(|v| PyBytes::new(py, v.as_ref()))
+                .collect::<Vec<PyBytes>>()
+                .to_py_object(py),
+        )?;
+
+        py.eval("set(non_normal), set(other_parent)", None, Some(&locals))
+    }
+
+    def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
+        let d = d.extract::<PyBytes>(py)?;
+        Ok(self.inner_shared(py).borrow_mut()?
+            .has_tracked_dir(HgPath::new(d.data(py)))
+            .to_py_object(py))
+    }
+
+    def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
+        let d = d.extract::<PyBytes>(py)?;
+        Ok(self.inner_shared(py).borrow_mut()?
+            .has_dir(HgPath::new(d.data(py)))
+            .to_py_object(py))
+    }
+
+    def parents(&self, st: PyObject) -> PyResult<PyTuple> {
+        self.inner_shared(py).borrow_mut()?
+            .parents(st.extract::<PyBytes>(py)?.data(py))
+            .and_then(|d| {
+                Ok((PyBytes::new(py, &d.p1), PyBytes::new(py, &d.p2))
+                    .to_py_object(py))
+            })
+            .or_else(|_| {
+                Err(PyErr::new::<exc::OSError, _>(
+                    py,
+                    "Dirstate error".to_string(),
+                ))
+            })
+    }
+
+    def setparents(&self, p1: PyObject, p2: PyObject) -> PyResult<PyObject> {
+        let p1 = extract_node_id(py, &p1)?;
+        let p2 = extract_node_id(py, &p2)?;
+
+        self.inner_shared(py).borrow_mut()?
+            .set_parents(&DirstateParents { p1, p2 });
+        Ok(py.None())
+    }
+
+    def read(&self, st: PyObject) -> PyResult<Option<PyObject>> {
+        match self.inner_shared(py).borrow_mut()?
+            .read(st.extract::<PyBytes>(py)?.data(py))
+        {
+            Ok(Some(parents)) => Ok(Some(
+                (PyBytes::new(py, &parents.p1), PyBytes::new(py, &parents.p2))
+                    .to_py_object(py)
+                    .into_object(),
+            )),
+            Ok(None) => Ok(Some(py.None())),
+            Err(_) => Err(PyErr::new::<exc::OSError, _>(
+                py,
+                "Dirstate error".to_string(),
+            )),
+        }
+    }
+    def write(
+        &self,
+        p1: PyObject,
+        p2: PyObject,
+        now: PyObject
+    ) -> PyResult<PyBytes> {
+        let now = Duration::new(now.extract(py)?, 0);
+        let parents = DirstateParents {
+            p1: extract_node_id(py, &p1)?,
+            p2: extract_node_id(py, &p2)?,
+        };
+
+        match self.inner_shared(py).borrow_mut()?.pack(parents, now) {
+            Ok(packed) => Ok(PyBytes::new(py, &packed)),
+            Err(_) => Err(PyErr::new::<exc::OSError, _>(
+                py,
+                "Dirstate error".to_string(),
+            )),
+        }
+    }
+
+    def filefoldmapasdict(&self) -> PyResult<PyDict> {
+        let dict = PyDict::new(py);
+        for (key, value) in
+            self.inner_shared(py).borrow_mut()?.build_file_fold_map().iter()
+        {
+            dict.set_item(py, key.as_ref().to_vec(), value.as_ref().to_vec())?;
+        }
+        Ok(dict)
+    }
+
+    def __len__(&self) -> PyResult<usize> {
+        Ok(self.inner_shared(py).borrow().len())
+    }
+
+    def __contains__(&self, key: PyObject) -> PyResult<bool> {
+        let key = key.extract::<PyBytes>(py)?;
+        Ok(self.inner_shared(py).borrow().contains_key(HgPath::new(key.data(py))))
+    }
+
+    def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
+        let key = key.extract::<PyBytes>(py)?;
+        let key = HgPath::new(key.data(py));
+        match self.inner_shared(py).borrow().get(key) {
+            Some(entry) => {
+                Ok(make_dirstate_tuple(py, entry)?)
+            },
+            None => Err(PyErr::new::<exc::KeyError, _>(
+                py,
+                String::from_utf8_lossy(key.as_bytes()),
+            )),
+        }
+    }
+
+    def keys(&self) -> PyResult<DirstateMapKeysIterator> {
+        let leaked_ref = self.inner_shared(py).leak_immutable()?;
+        DirstateMapKeysIterator::from_inner(
+            py,
+            unsafe { leaked_ref.map(py, |o| o.iter()) },
+        )
+    }
+
+    def items(&self) -> PyResult<DirstateMapItemsIterator> {
+        let leaked_ref = self.inner_shared(py).leak_immutable()?;
+        DirstateMapItemsIterator::from_inner(
+            py,
+            unsafe { leaked_ref.map(py, |o| o.iter()) },
+        )
+    }
+
+    def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
+        let leaked_ref = self.inner_shared(py).leak_immutable()?;
+        DirstateMapKeysIterator::from_inner(
+            py,
+            unsafe { leaked_ref.map(py, |o| o.iter()) },
+        )
+    }
+
+    def getdirs(&self) -> PyResult<Dirs> {
+        // TODO don't copy, share the reference
+        self.inner_shared(py).borrow_mut()?.set_dirs();
+        Dirs::from_inner(
+            py,
+            DirsMultiset::from_dirstate(
+                &self.inner_shared(py).borrow(),
+                Some(EntryState::Removed),
+            ),
+        )
+    }
+    def getalldirs(&self) -> PyResult<Dirs> {
+        // TODO don't copy, share the reference
+        self.inner_shared(py).borrow_mut()?.set_all_dirs();
+        Dirs::from_inner(
+            py,
+            DirsMultiset::from_dirstate(
+                &self.inner_shared(py).borrow(),
+                None,
+            ),
+        )
+    }
+
+    // TODO all copymap* methods, see docstring above
+    def copymapcopy(&self) -> PyResult<PyDict> {
+        let dict = PyDict::new(py);
+        for (key, value) in self.inner_shared(py).borrow().copy_map.iter() {
+            dict.set_item(
+                py,
+                PyBytes::new(py, key.as_ref()),
+                PyBytes::new(py, value.as_ref()),
+            )?;
+        }
+        Ok(dict)
+    }
+
+    def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
+        let key = key.extract::<PyBytes>(py)?;
+        match self.inner_shared(py).borrow().copy_map.get(HgPath::new(key.data(py))) {
+            Some(copy) => Ok(PyBytes::new(py, copy.as_ref())),
+            None => Err(PyErr::new::<exc::KeyError, _>(
+                py,
+                String::from_utf8_lossy(key.data(py)),
+            )),
+        }
+    }
+    def copymap(&self) -> PyResult<CopyMap> {
+        CopyMap::from_inner(py, self.clone_ref(py))
+    }
+
+    def copymaplen(&self) -> PyResult<usize> {
+        Ok(self.inner_shared(py).borrow().copy_map.len())
+    }
+    def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
+        let key = key.extract::<PyBytes>(py)?;
+        Ok(self
+            .inner_shared(py)
+            .borrow()
+            .copy_map
+            .contains_key(HgPath::new(key.data(py))))
+    }
+    def copymapget(
+        &self,
+        key: PyObject,
+        default: Option<PyObject>
+    ) -> PyResult<Option<PyObject>> {
+        let key = key.extract::<PyBytes>(py)?;
+        match self
+            .inner_shared(py)
+            .borrow()
+            .copy_map
+            .get(HgPath::new(key.data(py)))
+        {
+            Some(copy) => Ok(Some(
+                PyBytes::new(py, copy.as_ref()).into_object(),
+            )),
+            None => Ok(default),
+        }
+    }
+    def copymapsetitem(
+        &self,
+        key: PyObject,
+        value: PyObject
+    ) -> PyResult<PyObject> {
+        let key = key.extract::<PyBytes>(py)?;
+        let value = value.extract::<PyBytes>(py)?;
+        self.inner_shared(py).borrow_mut()?.copy_map.insert(
+            HgPathBuf::from_bytes(key.data(py)),
+            HgPathBuf::from_bytes(value.data(py)),
+        );
+        Ok(py.None())
+    }
+    def copymappop(
+        &self,
+        key: PyObject,
+        default: Option<PyObject>
+    ) -> PyResult<Option<PyObject>> {
+        let key = key.extract::<PyBytes>(py)?;
+        match self
+            .inner_shared(py)
+            .borrow_mut()?
+            .copy_map
+            .remove(HgPath::new(key.data(py)))
+        {
+            Some(_) => Ok(None),
+            None => Ok(default),
+        }
+    }
+
+    def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
+        let leaked_ref = self.inner_shared(py).leak_immutable()?;
+        CopyMapKeysIterator::from_inner(
+            py,
+            unsafe { leaked_ref.map(py, |o| o.copy_map.iter()) },
+        )
+    }
+
+    def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
+        let leaked_ref = self.inner_shared(py).leak_immutable()?;
+        CopyMapItemsIterator::from_inner(
+            py,
+            unsafe { leaked_ref.map(py, |o| o.copy_map.iter()) },
+        )
+    }
+
+});
+
+impl DirstateMap {
+    pub fn get_inner<'a>(
+        &'a self,
+        py: Python<'a>,
+    ) -> Ref<'a, RustDirstateMap> {
+        self.inner_shared(py).borrow()
+    }
+    fn translate_key(
+        py: Python,
+        res: (&HgPathBuf, &DirstateEntry),
+    ) -> PyResult<Option<PyBytes>> {
+        Ok(Some(PyBytes::new(py, res.0.as_ref())))
+    }
+    fn translate_key_value(
+        py: Python,
+        res: (&HgPathBuf, &DirstateEntry),
+    ) -> PyResult<Option<(PyBytes, PyObject)>> {
+        let (f, entry) = res;
+        Ok(Some((
+            PyBytes::new(py, f.as_ref()),
+            make_dirstate_tuple(py, entry)?,
+        )))
+    }
+}
+
+py_shared_ref!(DirstateMap, RustDirstateMap, inner, inner_shared);
+
+py_shared_iterator!(
+    DirstateMapKeysIterator,
+    PyLeakedRef<StateMapIter<'static>>,
+    DirstateMap::translate_key,
+    Option<PyBytes>
+);
+
+py_shared_iterator!(
+    DirstateMapItemsIterator,
+    PyLeakedRef<StateMapIter<'static>>,
+    DirstateMap::translate_key_value,
+    Option<(PyBytes, PyObject)>
+);
+
+fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<[u8; PARENT_SIZE]> {
+    let bytes = obj.extract::<PyBytes>(py)?;
+    match bytes.data(py).try_into() {
+        Ok(s) => Ok(s),
+        Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/dirstate/status.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,82 @@
+// status.rs
+//
+// Copyright 2019, Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Bindings for the `hg::status` module provided by the
+//! `hg-core` crate. From Python, this will be seen as `rustext.dirstate.status`.
+//!
+
+use crate::dirstate::DirstateMap;
+use cpython::exc::ValueError;
+use cpython::{
+    PyBytes, PyErr, PyList, PyObject, PyResult, Python, PythonObject,
+    ToPyObject,
+};
+use hg::utils::files::get_path_from_bytes;
+
+use hg::utils::hg_path::HgPath;
+use hg::{status, utils::hg_path::HgPathBuf};
+
+/// This will be useless once trait impls for collection are added to `PyBytes`
+/// upstream.
+fn collect_pybytes_list<P: AsRef<HgPath>>(
+    py: Python,
+    collection: &[P],
+) -> PyList {
+    let list = PyList::new(py, &[]);
+
+    for (i, path) in collection.iter().enumerate() {
+        list.insert_item(
+            py,
+            i,
+            PyBytes::new(py, path.as_ref().as_bytes()).into_object(),
+        )
+    }
+
+    list
+}
+
+pub fn status_wrapper(
+    py: Python,
+    dmap: DirstateMap,
+    root_dir: PyObject,
+    files: PyList,
+    list_clean: bool,
+    last_normal_time: i64,
+    check_exec: bool,
+) -> PyResult<(PyList, PyList, PyList, PyList, PyList, PyList, PyList)> {
+    let bytes = root_dir.extract::<PyBytes>(py)?;
+    let root_dir = get_path_from_bytes(bytes.data(py));
+
+    let dmap: DirstateMap = dmap.to_py_object(py);
+    let dmap = dmap.get_inner(py);
+
+    let files: PyResult<Vec<HgPathBuf>> = files
+        .iter(py)
+        .map(|f| Ok(HgPathBuf::from_bytes(f.extract::<PyBytes>(py)?.data(py))))
+        .collect();
+    let files = files?;
+
+    let (lookup, status_res) = status(
+        &dmap,
+        &root_dir,
+        &files,
+        list_clean,
+        last_normal_time,
+        check_exec,
+    )
+    .map_err(|e| PyErr::new::<ValueError, _>(py, e.to_string()))?;
+
+    let modified = collect_pybytes_list(py, status_res.modified.as_ref());
+    let added = collect_pybytes_list(py, status_res.added.as_ref());
+    let removed = collect_pybytes_list(py, status_res.removed.as_ref());
+    let deleted = collect_pybytes_list(py, status_res.deleted.as_ref());
+    let clean = collect_pybytes_list(py, status_res.clean.as_ref());
+    let lookup = collect_pybytes_list(py, lookup.as_ref());
+    let unknown = PyList::new(py, &[]);
+
+    Ok((lookup, modified, added, removed, deleted, unknown, clean))
+}
--- a/rust/hg-cpython/src/discovery.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-cpython/src/discovery.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -13,32 +13,39 @@
 //!   `mercurial.setdiscovery.partialdiscovery`.
 
 use crate::{
-    cindex::Index,
-    conversion::{py_set, rev_pyiter_collect},
-    exceptions::GraphError,
+    cindex::Index, conversion::rev_pyiter_collect, exceptions::GraphError,
 };
 use cpython::{
-    ObjectProtocol, PyDict, PyModule, PyObject, PyResult, Python,
+    ObjectProtocol, PyDict, PyModule, PyObject, PyResult, PyTuple, Python,
     PythonObject, ToPyObject,
 };
 use hg::discovery::PartialDiscovery as CorePartialDiscovery;
 use hg::Revision;
+use std::collections::HashSet;
 
 use std::cell::RefCell;
 
 py_class!(pub class PartialDiscovery |py| {
     data inner: RefCell<Box<CorePartialDiscovery<Index>>>;
 
+    // `_respectsize` is currently only here to replicate the Python API and
+    // will be used in future patches inside methods that are yet to be
+    // implemented.
     def __new__(
         _cls,
-        index: PyObject,
-        targetheads: PyObject
+        repo: PyObject,
+        targetheads: PyObject,
+        respectsize: bool,
+        randomize: bool = true
     ) -> PyResult<PartialDiscovery> {
+        let index = repo.getattr(py, "changelog")?.getattr(py, "index")?;
         Self::create_instance(
             py,
             RefCell::new(Box::new(CorePartialDiscovery::new(
                 Index::new(py, index)?,
                 rev_pyiter_collect(py, &targetheads)?,
+                respectsize,
+                randomize,
             )))
         )
     }
@@ -98,13 +105,36 @@
         Ok(as_dict)
     }
 
-    def commonheads(&self) -> PyResult<PyObject> {
-        py_set(
-            py,
-            &self.inner(py).borrow().common_heads()
-                .map_err(|e| GraphError::pynew(py, e))?
-        )
+    def commonheads(&self) -> PyResult<HashSet<Revision>> {
+        self.inner(py).borrow().common_heads()
+            .map_err(|e| GraphError::pynew(py, e))
     }
+
+    def takefullsample(&self, _headrevs: PyObject,
+                       size: usize) -> PyResult<PyObject> {
+        let mut inner = self.inner(py).borrow_mut();
+        let sample = inner.take_full_sample(size)
+            .map_err(|e| GraphError::pynew(py, e))?;
+        let as_vec: Vec<PyObject> = sample
+            .iter()
+            .map(|rev| rev.to_py_object(py).into_object())
+            .collect();
+        Ok(PyTuple::new(py, as_vec.as_slice()).into_object())
+    }
+
+    def takequicksample(&self, headrevs: PyObject,
+                        size: usize) -> PyResult<PyObject> {
+        let mut inner = self.inner(py).borrow_mut();
+        let revsvec: Vec<Revision> = rev_pyiter_collect(py, &headrevs)?;
+        let sample = inner.take_quick_sample(revsvec, size)
+            .map_err(|e| GraphError::pynew(py, e))?;
+        let as_vec: Vec<PyObject> = sample
+            .iter()
+            .map(|rev| rev.to_py_object(py).into_object())
+            .collect();
+        Ok(PyTuple::new(py, as_vec.as_slice()).into_object())
+    }
+
 });
 
 /// Create the module, with __package__ given from parent
--- a/rust/hg-cpython/src/exceptions.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-cpython/src/exceptions.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -41,6 +41,7 @@
 
 py_exception!(rustext, PatternError, RuntimeError);
 py_exception!(rustext, PatternFileError, RuntimeError);
+py_exception!(rustext, HgPathPyError, RuntimeError);
 
 impl PatternError {
     pub fn pynew(py: Python, inner: hg::PatternError) -> PyErr {
@@ -67,3 +68,5 @@
         }
     }
 }
+
+py_exception!(shared_ref, AlreadyBorrowed, RuntimeError);
--- a/rust/hg-cpython/src/filepatterns.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-cpython/src/filepatterns.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -8,13 +8,19 @@
 
 //! Bindings for the `hg::filepatterns` module provided by the
 //! `hg-core` crate. From Python, this will be seen as `rustext.filepatterns`
-//! and can be used as replacement for the the pure `filepatterns` Python module.
+//! and can be used as replacement for the the pure `filepatterns` Python
+//! module.
 //!
 use crate::exceptions::{PatternError, PatternFileError};
 use cpython::{
-    PyBytes, PyDict, PyModule, PyObject, PyResult, PyTuple, Python, ToPyObject,
+    PyBytes, PyDict, PyModule, PyObject, PyResult, PyString, PyTuple, Python,
+    ToPyObject,
 };
-use hg::{build_single_regex, read_pattern_file, LineNumber, PatternTuple};
+use hg::{
+    build_single_regex, read_pattern_file, utils::files::get_path_from_bytes,
+    LineNumber, PatternTuple,
+};
+use std::path::PathBuf;
 
 /// Rust does not like functions with different return signatures.
 /// The 3-tuple version is always returned by the hg-core function,
@@ -32,7 +38,9 @@
     warn: bool,
     source_info: bool,
 ) -> PyResult<PyTuple> {
-    match read_pattern_file(file_path.extract::<PyBytes>(py)?.data(py), warn) {
+    let bytes = file_path.extract::<PyBytes>(py)?;
+    let path = get_path_from_bytes(bytes.data(py));
+    match read_pattern_file(path, warn) {
         Ok((patterns, warnings)) => {
             if source_info {
                 let itemgetter = |x: &PatternTuple| {
@@ -57,11 +65,16 @@
 
 fn warnings_to_py_bytes(
     py: Python,
-    warnings: &[(Vec<u8>, Vec<u8>)],
-) -> Vec<(PyBytes, PyBytes)> {
+    warnings: &[(PathBuf, Vec<u8>)],
+) -> Vec<(PyString, PyBytes)> {
     warnings
         .iter()
-        .map(|(path, syn)| (PyBytes::new(py, path), PyBytes::new(py, syn)))
+        .map(|(path, syn)| {
+            (
+                PyString::new(py, &path.to_string_lossy()),
+                PyBytes::new(py, syn),
+            )
+        })
         .collect()
 }
 
--- a/rust/hg-cpython/src/lib.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-cpython/src/lib.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -27,11 +27,15 @@
 pub mod ancestors;
 mod cindex;
 mod conversion;
+#[macro_use]
+pub mod ref_sharing;
 pub mod dagops;
 pub mod dirstate;
 pub mod discovery;
 pub mod exceptions;
 pub mod filepatterns;
+pub mod parsers;
+pub mod utils;
 
 py_module_initializer!(rustext, initrustext, PyInit_rustext, |py, m| {
     m.add(
@@ -50,6 +54,11 @@
         "filepatterns",
         filepatterns::init_module(py, &dotted_name)?,
     )?;
+    m.add(
+        py,
+        "parsers",
+        parsers::init_parsers_module(py, &dotted_name)?,
+    )?;
     m.add(py, "GraphError", py.get_type::<exceptions::GraphError>())?;
     m.add(
         py,
@@ -63,3 +72,10 @@
     )?;
     Ok(())
 });
+
+#[cfg(not(any(feature = "python27-bin", feature = "python3-bin")))]
+#[test]
+#[ignore]
+fn libpython_must_be_linked_to_run_tests() {
+    // stub function to tell that some tests wouldn't run
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/parsers.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,176 @@
+// parsers.rs
+//
+// Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Bindings for the `hg::dirstate::parsers` module provided by the
+//! `hg-core` package.
+//!
+//! From Python, this will be seen as `mercurial.rustext.parsers`
+//!
+use cpython::{
+    exc, PyBytes, PyDict, PyErr, PyInt, PyModule, PyResult, PyTuple, Python,
+    PythonObject, ToPyObject,
+};
+use hg::{
+    pack_dirstate, parse_dirstate, utils::hg_path::HgPathBuf,
+    DirstatePackError, DirstateParents, DirstateParseError, PARENT_SIZE,
+};
+use std::collections::HashMap;
+use std::convert::TryInto;
+
+use crate::dirstate::{extract_dirstate, make_dirstate_tuple};
+use std::time::Duration;
+
+fn parse_dirstate_wrapper(
+    py: Python,
+    dmap: PyDict,
+    copymap: PyDict,
+    st: PyBytes,
+) -> PyResult<PyTuple> {
+    let mut dirstate_map = HashMap::new();
+    let mut copies = HashMap::new();
+
+    match parse_dirstate(&mut dirstate_map, &mut copies, st.data(py)) {
+        Ok(parents) => {
+            for (filename, entry) in &dirstate_map {
+                dmap.set_item(
+                    py,
+                    PyBytes::new(py, filename.as_ref()),
+                    make_dirstate_tuple(py, entry)?,
+                )?;
+            }
+            for (path, copy_path) in copies {
+                copymap.set_item(
+                    py,
+                    PyBytes::new(py, path.as_ref()),
+                    PyBytes::new(py, copy_path.as_ref()),
+                )?;
+            }
+            Ok(
+                (PyBytes::new(py, &parents.p1), PyBytes::new(py, &parents.p2))
+                    .to_py_object(py),
+            )
+        }
+        Err(e) => Err(PyErr::new::<exc::ValueError, _>(
+            py,
+            match e {
+                DirstateParseError::TooLittleData => {
+                    "too little data for parents".to_string()
+                }
+                DirstateParseError::Overflow => {
+                    "overflow in dirstate".to_string()
+                }
+                DirstateParseError::CorruptedEntry(e) => e,
+                DirstateParseError::Damaged => {
+                    "dirstate appears to be damaged".to_string()
+                }
+            },
+        )),
+    }
+}
+
+fn pack_dirstate_wrapper(
+    py: Python,
+    dmap: PyDict,
+    copymap: PyDict,
+    pl: PyTuple,
+    now: PyInt,
+) -> PyResult<PyBytes> {
+    let p1 = pl.get_item(py, 0).extract::<PyBytes>(py)?;
+    let p1: &[u8] = p1.data(py);
+    let p2 = pl.get_item(py, 1).extract::<PyBytes>(py)?;
+    let p2: &[u8] = p2.data(py);
+
+    let mut dirstate_map = extract_dirstate(py, &dmap)?;
+
+    let copies: Result<HashMap<HgPathBuf, HgPathBuf>, PyErr> = copymap
+        .items(py)
+        .iter()
+        .map(|(key, value)| {
+            Ok((
+                HgPathBuf::from_bytes(key.extract::<PyBytes>(py)?.data(py)),
+                HgPathBuf::from_bytes(value.extract::<PyBytes>(py)?.data(py)),
+            ))
+        })
+        .collect();
+
+    if p1.len() != PARENT_SIZE || p2.len() != PARENT_SIZE {
+        return Err(PyErr::new::<exc::ValueError, _>(
+            py,
+            "expected a 20-byte hash".to_string(),
+        ));
+    }
+
+    match pack_dirstate(
+        &mut dirstate_map,
+        &copies?,
+        DirstateParents {
+            p1: p1.try_into().unwrap(),
+            p2: p2.try_into().unwrap(),
+        },
+        Duration::from_secs(now.as_object().extract::<u64>(py)?),
+    ) {
+        Ok(packed) => {
+            for (filename, entry) in &dirstate_map {
+                dmap.set_item(
+                    py,
+                    PyBytes::new(py, filename.as_ref()),
+                    make_dirstate_tuple(py, entry)?,
+                )?;
+            }
+            Ok(PyBytes::new(py, &packed))
+        }
+        Err(error) => Err(PyErr::new::<exc::ValueError, _>(
+            py,
+            match error {
+                DirstatePackError::CorruptedParent => {
+                    "expected a 20-byte hash".to_string()
+                }
+                DirstatePackError::CorruptedEntry(e) => e,
+                DirstatePackError::BadSize(expected, actual) => {
+                    format!("bad dirstate size: {} != {}", actual, expected)
+                }
+            },
+        )),
+    }
+}
+
+/// Create the module, with `__package__` given from parent
+pub fn init_parsers_module(py: Python, package: &str) -> PyResult<PyModule> {
+    let dotted_name = &format!("{}.parsers", package);
+    let m = PyModule::new(py, dotted_name)?;
+
+    m.add(py, "__package__", package)?;
+    m.add(py, "__doc__", "Parsers - Rust implementation")?;
+
+    m.add(
+        py,
+        "parse_dirstate",
+        py_fn!(
+            py,
+            parse_dirstate_wrapper(dmap: PyDict, copymap: PyDict, st: PyBytes)
+        ),
+    )?;
+    m.add(
+        py,
+        "pack_dirstate",
+        py_fn!(
+            py,
+            pack_dirstate_wrapper(
+                dmap: PyDict,
+                copymap: PyDict,
+                pl: PyTuple,
+                now: PyInt
+            )
+        ),
+    )?;
+
+    let sys = PyModule::import(py, "sys")?;
+    let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
+    sys_modules.set_item(py, dotted_name, &m)?;
+
+    Ok(m)
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/ref_sharing.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,523 @@
+// ref_sharing.rs
+//
+// Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+//! Macros for use in the `hg-cpython` bridge library.
+
+use crate::exceptions::AlreadyBorrowed;
+use cpython::{PyClone, PyObject, PyResult, Python};
+use std::cell::{Cell, Ref, RefCell, RefMut};
+
+/// Manages the shared state between Python and Rust
+#[derive(Debug, Default)]
+struct PySharedState {
+    leak_count: Cell<usize>,
+    mutably_borrowed: Cell<bool>,
+}
+
+// &PySharedState can be Send because any access to inner cells is
+// synchronized by the GIL.
+unsafe impl Sync for PySharedState {}
+
+impl PySharedState {
+    fn borrow_mut<'a, T>(
+        &'a self,
+        py: Python<'a>,
+        pyrefmut: RefMut<'a, T>,
+    ) -> PyResult<PyRefMut<'a, T>> {
+        if self.mutably_borrowed.get() {
+            return Err(AlreadyBorrowed::new(
+                py,
+                "Cannot borrow mutably while there exists another \
+                 mutable reference in a Python object",
+            ));
+        }
+        match self.leak_count.get() {
+            0 => {
+                self.mutably_borrowed.replace(true);
+                Ok(PyRefMut::new(py, pyrefmut, self))
+            }
+            // TODO
+            // For now, this works differently than Python references
+            // in the case of iterators.
+            // Python does not complain when the data an iterator
+            // points to is modified if the iterator is never used
+            // afterwards.
+            // Here, we are stricter than this by refusing to give a
+            // mutable reference if it is already borrowed.
+            // While the additional safety might be argued for, it
+            // breaks valid programming patterns in Python and we need
+            // to fix this issue down the line.
+            _ => Err(AlreadyBorrowed::new(
+                py,
+                "Cannot borrow mutably while there are \
+                 immutable references in Python objects",
+            )),
+        }
+    }
+
+    /// Return a reference to the wrapped data and its state with an
+    /// artificial static lifetime.
+    /// We need to be protected by the GIL for thread-safety.
+    ///
+    /// # Safety
+    ///
+    /// This is highly unsafe since the lifetime of the given data can be
+    /// extended. Do not call this function directly.
+    unsafe fn leak_immutable<T>(
+        &self,
+        py: Python,
+        data: &PySharedRefCell<T>,
+    ) -> PyResult<(&'static T, &'static PySharedState)> {
+        if self.mutably_borrowed.get() {
+            return Err(AlreadyBorrowed::new(
+                py,
+                "Cannot borrow immutably while there is a \
+                 mutable reference in Python objects",
+            ));
+        }
+        // TODO: it's weird that self is data.py_shared_state. Maybe we
+        // can move stuff to PySharedRefCell?
+        let ptr = data.as_ptr();
+        let state_ptr: *const PySharedState = &data.py_shared_state;
+        self.leak_count.replace(self.leak_count.get() + 1);
+        Ok((&*ptr, &*state_ptr))
+    }
+
+    /// # Safety
+    ///
+    /// It's up to you to make sure the reference is about to be deleted
+    /// when updating the leak count.
+    fn decrease_leak_count(&self, _py: Python, mutable: bool) {
+        if mutable {
+            assert_eq!(self.leak_count.get(), 0);
+            assert!(self.mutably_borrowed.get());
+            self.mutably_borrowed.replace(false);
+        } else {
+            let count = self.leak_count.get();
+            assert!(count > 0);
+            self.leak_count.replace(count - 1);
+        }
+    }
+}
+
+/// `RefCell` wrapper to be safely used in conjunction with `PySharedState`.
+///
+/// This object can be stored in a `py_class!` object as a data field. Any
+/// operation is allowed through the `PySharedRef` interface.
+#[derive(Debug)]
+pub struct PySharedRefCell<T> {
+    inner: RefCell<T>,
+    py_shared_state: PySharedState,
+}
+
+impl<T> PySharedRefCell<T> {
+    pub fn new(value: T) -> PySharedRefCell<T> {
+        Self {
+            inner: RefCell::new(value),
+            py_shared_state: PySharedState::default(),
+        }
+    }
+
+    fn borrow<'a>(&'a self, _py: Python<'a>) -> Ref<'a, T> {
+        // py_shared_state isn't involved since
+        // - inner.borrow() would fail if self is mutably borrowed,
+        // - and inner.borrow_mut() would fail while self is borrowed.
+        self.inner.borrow()
+    }
+
+    fn as_ptr(&self) -> *mut T {
+        self.inner.as_ptr()
+    }
+
+    // TODO: maybe this should be named as try_borrow_mut(), and use
+    // inner.try_borrow_mut(). The current implementation panics if
+    // self.inner has been borrowed, but returns error if py_shared_state
+    // refuses to borrow.
+    fn borrow_mut<'a>(&'a self, py: Python<'a>) -> PyResult<PyRefMut<'a, T>> {
+        self.py_shared_state.borrow_mut(py, self.inner.borrow_mut())
+    }
+}
+
+/// Sharable data member of type `T` borrowed from the `PyObject`.
+pub struct PySharedRef<'a, T> {
+    py: Python<'a>,
+    owner: &'a PyObject,
+    data: &'a PySharedRefCell<T>,
+}
+
+impl<'a, T> PySharedRef<'a, T> {
+    /// # Safety
+    ///
+    /// The `data` must be owned by the `owner`. Otherwise, the leak count
+    /// would get wrong.
+    pub unsafe fn new(
+        py: Python<'a>,
+        owner: &'a PyObject,
+        data: &'a PySharedRefCell<T>,
+    ) -> Self {
+        Self { py, owner, data }
+    }
+
+    pub fn borrow(&self) -> Ref<'a, T> {
+        self.data.borrow(self.py)
+    }
+
+    pub fn borrow_mut(&self) -> PyResult<PyRefMut<'a, T>> {
+        self.data.borrow_mut(self.py)
+    }
+
+    /// Returns a leaked reference.
+    pub fn leak_immutable(&self) -> PyResult<PyLeakedRef<&'static T>> {
+        let state = &self.data.py_shared_state;
+        unsafe {
+            let (static_ref, static_state_ref) =
+                state.leak_immutable(self.py, self.data)?;
+            Ok(PyLeakedRef::new(
+                self.py,
+                self.owner,
+                static_ref,
+                static_state_ref,
+            ))
+        }
+    }
+}
+
+/// Holds a mutable reference to data shared between Python and Rust.
+pub struct PyRefMut<'a, T> {
+    py: Python<'a>,
+    inner: RefMut<'a, T>,
+    py_shared_state: &'a PySharedState,
+}
+
+impl<'a, T> PyRefMut<'a, T> {
+    // Must be constructed by PySharedState after checking its leak_count.
+    // Otherwise, drop() would incorrectly update the state.
+    fn new(
+        py: Python<'a>,
+        inner: RefMut<'a, T>,
+        py_shared_state: &'a PySharedState,
+    ) -> Self {
+        Self {
+            py,
+            inner,
+            py_shared_state,
+        }
+    }
+}
+
+impl<'a, T> std::ops::Deref for PyRefMut<'a, T> {
+    type Target = RefMut<'a, T>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.inner
+    }
+}
+impl<'a, T> std::ops::DerefMut for PyRefMut<'a, T> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        &mut self.inner
+    }
+}
+
+impl<'a, T> Drop for PyRefMut<'a, T> {
+    fn drop(&mut self) {
+        self.py_shared_state.decrease_leak_count(self.py, true);
+    }
+}
+
+/// Allows a `py_class!` generated struct to share references to one of its
+/// data members with Python.
+///
+/// # Warning
+///
+/// TODO allow Python container types: for now, integration with the garbage
+///     collector does not extend to Rust structs holding references to Python
+///     objects. Should the need surface, `__traverse__` and `__clear__` will
+///     need to be written as per the `rust-cpython` docs on GC integration.
+///
+/// # Parameters
+///
+/// * `$name` is the same identifier used in for `py_class!` macro call.
+/// * `$inner_struct` is the identifier of the underlying Rust struct
+/// * `$data_member` is the identifier of the data member of `$inner_struct`
+/// that will be shared.
+/// * `$shared_accessor` is the function name to be generated, which allows
+/// safe access to the data member.
+///
+/// # Safety
+///
+/// `$data_member` must persist while the `$name` object is alive. In other
+/// words, it must be an accessor to a data field of the Python object.
+///
+/// # Example
+///
+/// ```
+/// struct MyStruct {
+///     inner: Vec<u32>;
+/// }
+///
+/// py_class!(pub class MyType |py| {
+///     data inner: PySharedRefCell<MyStruct>;
+/// });
+///
+/// py_shared_ref!(MyType, MyStruct, inner, inner_shared);
+/// ```
+macro_rules! py_shared_ref {
+    (
+        $name: ident,
+        $inner_struct: ident,
+        $data_member: ident,
+        $shared_accessor: ident
+    ) => {
+        impl $name {
+            /// Returns a safe reference to the shared `$data_member`.
+            ///
+            /// This function guarantees that `PySharedRef` is created with
+            /// the valid `self` and `self.$data_member(py)` pair.
+            fn $shared_accessor<'a>(
+                &'a self,
+                py: Python<'a>,
+            ) -> $crate::ref_sharing::PySharedRef<'a, $inner_struct> {
+                use cpython::PythonObject;
+                use $crate::ref_sharing::PySharedRef;
+                let owner = self.as_object();
+                let data = self.$data_member(py);
+                unsafe { PySharedRef::new(py, owner, data) }
+            }
+        }
+    };
+}
+
+/// Manage immutable references to `PyObject` leaked into Python iterators.
+pub struct PyLeakedRef<T> {
+    inner: PyObject,
+    data: Option<T>,
+    py_shared_state: &'static PySharedState,
+}
+
+// DO NOT implement Deref for PyLeakedRef<T>! Dereferencing PyLeakedRef
+// without taking Python GIL wouldn't be safe.
+
+impl<T> PyLeakedRef<T> {
+    /// # Safety
+    ///
+    /// The `py_shared_state` must be owned by the `inner` Python object.
+    fn new(
+        py: Python,
+        inner: &PyObject,
+        data: T,
+        py_shared_state: &'static PySharedState,
+    ) -> Self {
+        Self {
+            inner: inner.clone_ref(py),
+            data: Some(data),
+            py_shared_state,
+        }
+    }
+
+    /// Returns an immutable reference to the inner value.
+    pub fn get_ref<'a>(&'a self, _py: Python<'a>) -> &'a T {
+        self.data.as_ref().unwrap()
+    }
+
+    /// Returns a mutable reference to the inner value.
+    ///
+    /// Typically `T` is an iterator. If `T` is an immutable reference,
+    /// `get_mut()` is useless since the inner value can't be mutated.
+    pub fn get_mut<'a>(&'a mut self, _py: Python<'a>) -> &'a mut T {
+        self.data.as_mut().unwrap()
+    }
+
+    /// Converts the inner value by the given function.
+    ///
+    /// Typically `T` is a static reference to a container, and `U` is an
+    /// iterator of that container.
+    ///
+    /// # Safety
+    ///
+    /// The lifetime of the object passed in to the function `f` is cheated.
+    /// It's typically a static reference, but is valid only while the
+    /// corresponding `PyLeakedRef` is alive. Do not copy it out of the
+    /// function call.
+    pub unsafe fn map<U>(
+        mut self,
+        py: Python,
+        f: impl FnOnce(T) -> U,
+    ) -> PyLeakedRef<U> {
+        // f() could make the self.data outlive. That's why map() is unsafe.
+        // In order to make this function safe, maybe we'll need a way to
+        // temporarily restrict the lifetime of self.data and translate the
+        // returned object back to Something<'static>.
+        let new_data = f(self.data.take().unwrap());
+        PyLeakedRef {
+            inner: self.inner.clone_ref(py),
+            data: Some(new_data),
+            py_shared_state: self.py_shared_state,
+        }
+    }
+}
+
+impl<T> Drop for PyLeakedRef<T> {
+    fn drop(&mut self) {
+        // py_shared_state should be alive since we do have
+        // a Python reference to the owner object. Taking GIL makes
+        // sure that the state is only accessed by this thread.
+        let gil = Python::acquire_gil();
+        let py = gil.python();
+        if self.data.is_none() {
+            return; // moved to another PyLeakedRef
+        }
+        self.py_shared_state.decrease_leak_count(py, false);
+    }
+}
+
+/// Defines a `py_class!` that acts as a Python iterator over a Rust iterator.
+///
+/// TODO: this is a bit awkward to use, and a better (more complicated)
+///     procedural macro would simplify the interface a lot.
+///
+/// # Parameters
+///
+/// * `$name` is the identifier to give to the resulting Rust struct.
+/// * `$leaked` corresponds to `$leaked` in the matching `py_shared_ref!` call.
+/// * `$iterator_type` is the type of the Rust iterator.
+/// * `$success_func` is a function for processing the Rust `(key, value)`
+/// tuple on iteration success, turning it into something Python understands.
+/// * `$success_func` is the return type of `$success_func`
+///
+/// # Example
+///
+/// ```
+/// struct MyStruct {
+///     inner: HashMap<Vec<u8>, Vec<u8>>;
+/// }
+///
+/// py_class!(pub class MyType |py| {
+///     data inner: PySharedRefCell<MyStruct>;
+///
+///     def __iter__(&self) -> PyResult<MyTypeItemsIterator> {
+///         let leaked_ref = self.inner_shared(py).leak_immutable()?;
+///         MyTypeItemsIterator::from_inner(
+///             py,
+///             unsafe { leaked_ref.map(py, |o| o.iter()) },
+///         )
+///     }
+/// });
+///
+/// impl MyType {
+///     fn translate_key_value(
+///         py: Python,
+///         res: (&Vec<u8>, &Vec<u8>),
+///     ) -> PyResult<Option<(PyBytes, PyBytes)>> {
+///         let (f, entry) = res;
+///         Ok(Some((
+///             PyBytes::new(py, f),
+///             PyBytes::new(py, entry),
+///         )))
+///     }
+/// }
+///
+/// py_shared_ref!(MyType, MyStruct, inner, MyTypeLeakedRef);
+///
+/// py_shared_iterator!(
+///     MyTypeItemsIterator,
+///     PyLeakedRef<HashMap<'static, Vec<u8>, Vec<u8>>>,
+///     MyType::translate_key_value,
+///     Option<(PyBytes, PyBytes)>
+/// );
+/// ```
+macro_rules! py_shared_iterator {
+    (
+        $name: ident,
+        $leaked: ty,
+        $success_func: expr,
+        $success_type: ty
+    ) => {
+        py_class!(pub class $name |py| {
+            data inner: RefCell<Option<$leaked>>;
+
+            def __next__(&self) -> PyResult<$success_type> {
+                let mut inner_opt = self.inner(py).borrow_mut();
+                if let Some(leaked) = inner_opt.as_mut() {
+                    match leaked.get_mut(py).next() {
+                        None => {
+                            // replace Some(inner) by None, drop $leaked
+                            inner_opt.take();
+                            Ok(None)
+                        }
+                        Some(res) => {
+                            $success_func(py, res)
+                        }
+                    }
+                } else {
+                    Ok(None)
+                }
+            }
+
+            def __iter__(&self) -> PyResult<Self> {
+                Ok(self.clone_ref(py))
+            }
+        });
+
+        impl $name {
+            pub fn from_inner(
+                py: Python,
+                leaked: $leaked,
+            ) -> PyResult<Self> {
+                Self::create_instance(
+                    py,
+                    RefCell::new(Some(leaked)),
+                )
+            }
+        }
+    };
+}
+
+#[cfg(test)]
+#[cfg(any(feature = "python27-bin", feature = "python3-bin"))]
+mod test {
+    use super::*;
+    use cpython::{GILGuard, Python};
+
+    py_class!(class Owner |py| {
+        data string: PySharedRefCell<String>;
+    });
+    py_shared_ref!(Owner, String, string, string_shared);
+
+    fn prepare_env() -> (GILGuard, Owner) {
+        let gil = Python::acquire_gil();
+        let py = gil.python();
+        let owner =
+            Owner::create_instance(py, PySharedRefCell::new("new".to_owned()))
+                .unwrap();
+        (gil, owner)
+    }
+
+    #[test]
+    fn test_borrow_mut_while_leaked() {
+        let (gil, owner) = prepare_env();
+        let py = gil.python();
+        assert!(owner.string_shared(py).borrow_mut().is_ok());
+        let _leaked = owner.string_shared(py).leak_immutable().unwrap();
+        // TODO: will be allowed
+        assert!(owner.string_shared(py).borrow_mut().is_err());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/utils.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,13 @@
+use cpython::{PyDict, PyObject, PyResult, PyTuple, Python};
+
+#[allow(unused)]
+pub fn print_python_trace(py: Python) -> PyResult<PyObject> {
+    eprintln!("===============================");
+    eprintln!("Printing Python stack from Rust");
+    eprintln!("===============================");
+    let traceback = py.import("traceback")?;
+    let sys = py.import("sys")?;
+    let kwargs = PyDict::new(py);
+    kwargs.set_item(py, "file", sys.get(py, "stderr")?)?;
+    traceback.call(py, "print_stack", PyTuple::new(py, &[]), Some(&kwargs))
+}
--- a/rust/hg-direct-ffi/src/ancestors.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hg-direct-ffi/src/ancestors.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -36,9 +36,7 @@
 
 impl Index {
     pub fn new(index: IndexPtr) -> Self {
-        Index {
-            index: index,
-        }
+        Index { index: index }
     }
 }
 
@@ -46,8 +44,13 @@
     /// wrap a call to the C extern parents function
     fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
         let mut res: [c_int; 2] = [0; 2];
-        let code =
-            unsafe { HgRevlogIndex_GetParents(self.index, rev, &mut res as *mut [c_int; 2]) };
+        let code = unsafe {
+            HgRevlogIndex_GetParents(
+                self.index,
+                rev,
+                &mut res as *mut [c_int; 2],
+            )
+        };
         match code {
             0 => Ok(res),
             _ => Err(GraphError::ParentOutOfRange(rev)),
@@ -98,22 +101,26 @@
 
     let slice = slice::from_raw_parts(initrevs, initrevslen);
 
-    Box::into_raw(Box::new(match AncestorsIterator::new(
-        graph,
-        slice.into_iter().map(|&r| r as Revision),
-        stoprev as Revision,
-        inclb,
-    ) {
-        Ok(it) => it,
-        Err(_) => {
-            return null_mut();
-        }
-    }))
+    Box::into_raw(Box::new(
+        match AncestorsIterator::new(
+            graph,
+            slice.into_iter().map(|&r| r as Revision),
+            stoprev as Revision,
+            inclb,
+        ) {
+            Ok(it) => it,
+            Err(_) => {
+                return null_mut();
+            }
+        },
+    ))
 }
 
 /// Deallocator to be called from C code
 #[no_mangle]
-pub extern "C" fn rustlazyancestors_drop(raw_iter: *mut AncestorsIterator<Index>) {
+pub extern "C" fn rustlazyancestors_drop(
+    raw_iter: *mut AncestorsIterator<Index>,
+) {
     raw_drop(raw_iter);
 }
 
@@ -131,7 +138,9 @@
 /// it will be up to the C wrapper to convert that back into a Python end of
 /// iteration
 #[no_mangle]
-pub extern "C" fn rustlazyancestors_next(raw: *mut AncestorsIterator<Index>) -> c_long {
+pub extern "C" fn rustlazyancestors_next(
+    raw: *mut AncestorsIterator<Index>,
+) -> c_long {
     raw_next(raw)
 }
 
@@ -227,7 +236,9 @@
         let mut initrevs: Vec<c_long> = vec![11, 13];
         let initrevs_len = initrevs.len();
         let initrevs_ptr = initrevs.as_mut_ptr() as usize;
-        let handler = thread::spawn(move || stub_raw_init(initrevs_len, initrevs_ptr, 0, 1));
+        let handler = thread::spawn(move || {
+            stub_raw_init(initrevs_len, initrevs_ptr, 0, 1)
+        });
         let raw = handler.join().unwrap() as *mut AncestorsIterator<Stub>;
 
         assert_eq!(raw_next(raw), 13);
--- a/rust/hgcli/src/main.rs	Wed Oct 02 12:20:36 2019 -0400
+++ b/rust/hgcli/src/main.rs	Mon Oct 21 11:09:48 2019 -0400
@@ -104,19 +104,6 @@
     }
 }
 
-fn update_encoding(_py: Python, _sys_mod: &PyModule) {
-    // Call sys.setdefaultencoding("undefined") if HGUNICODEPEDANTRY is set.
-    let pedantry = env::var("HGUNICODEPEDANTRY").is_ok();
-
-    if pedantry {
-        // site.py removes the sys.setdefaultencoding attribute. So we need
-        // to reload the module to get a handle on it. This is a lesser
-        // used feature and we'll support this later.
-        // TODO support this
-        panic!("HGUNICODEPEDANTRY is not yet supported");
-    }
-}
-
 fn update_modules_path(env: &Environment, py: Python, sys_mod: &PyModule) {
     let sys_path = sys_mod.get(py, "path").unwrap();
     sys_path
@@ -210,7 +197,6 @@
 fn run_py(env: &Environment, py: Python) -> PyResult<()> {
     let sys_mod = py.import("sys").unwrap();
 
-    update_encoding(py, &sys_mod);
     update_modules_path(&env, py, &sys_mod);
 
     // TODO consider a better error message on failure to import.
--- a/setup.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/setup.py	Mon Oct 21 11:09:48 2019 -0400
@@ -17,50 +17,63 @@
     #
     # TODO: when we actually work on Python 3, use this string as the
     # actual supportedpy string.
-    supportedpy = ','.join([
-        '>=2.7',
-        '!=3.0.*',
-        '!=3.1.*',
-        '!=3.2.*',
-        '!=3.3.*',
-        '!=3.4.*',
-        '!=3.5.0',
-        '!=3.5.1',
-        '!=3.5.2',
-        '!=3.6.0',
-        '!=3.6.1',
-    ])
+    supportedpy = ','.join(
+        [
+            '>=2.7',
+            '!=3.0.*',
+            '!=3.1.*',
+            '!=3.2.*',
+            '!=3.3.*',
+            '!=3.4.*',
+            '!=3.5.0',
+            '!=3.5.1',
+            '!=3.5.2',
+            '!=3.6.0',
+            '!=3.6.1',
+        ]
+    )
 
 import sys, platform
 import sysconfig
+
 if sys.version_info[0] >= 3:
     printf = eval('print')
     libdir_escape = 'unicode_escape'
+
     def sysstr(s):
         return s.decode('latin-1')
+
+
 else:
     libdir_escape = 'string_escape'
+
     def printf(*args, **kwargs):
         f = kwargs.get('file', sys.stdout)
         end = kwargs.get('end', '\n')
         f.write(b' '.join(args) + end)
+
     def sysstr(s):
         return s
 
+
 # Attempt to guide users to a modern pip - this means that 2.6 users
 # should have a chance of getting a 4.2 release, and when we ratchet
 # the version requirement forward again hopefully everyone will get
 # something that works for them.
 if sys.version_info < (2, 7, 0, 'final'):
-    pip_message = ('This may be due to an out of date pip. '
-                   'Make sure you have pip >= 9.0.1.')
+    pip_message = (
+        'This may be due to an out of date pip. '
+        'Make sure you have pip >= 9.0.1.'
+    )
     try:
         import pip
+
         pip_version = tuple([int(x) for x in pip.__version__.split('.')[:3]])
-        if pip_version < (9, 0, 1) :
+        if pip_version < (9, 0, 1):
             pip_message = (
                 'Your pip version is out of date, please install '
-                'pip >= 9.0.1. pip {} detected.'.format(pip.__version__))
+                'pip >= 9.0.1. pip {} detected.'.format(pip.__version__)
+            )
         else:
             # pip is new enough - it must be something else
             pip_message = ''
@@ -70,7 +83,9 @@
 Mercurial does not support Python older than 2.7.
 Python {py} detected.
 {pip}
-""".format(py=sys.version_info, pip=pip_message)
+""".format(
+        py=sys.version_info, pip=pip_message
+    )
     printf(error, file=sys.stderr)
     sys.exit(1)
 
@@ -100,7 +115,9 @@
 
 See https://www.mercurial-scm.org/wiki/Python3 for more on Mercurial's
 Python 3 support.
-""".format(py='.'.join('%d' % x for x in sys.version_info[0:2]))
+""".format(
+            py='.'.join('%d' % x for x in sys.version_info[0:2])
+        )
 
         printf(error, file=sys.stderr)
         sys.exit(1)
@@ -114,27 +131,33 @@
 # Solaris Python packaging brain damage
 try:
     import hashlib
+
     sha = hashlib.sha1()
 except ImportError:
     try:
         import sha
-        sha.sha # silence unused import warning
+
+        sha.sha  # silence unused import warning
     except ImportError:
         raise SystemExit(
-            "Couldn't import standard hashlib (incomplete Python install).")
+            "Couldn't import standard hashlib (incomplete Python install)."
+        )
 
 try:
     import zlib
-    zlib.compressobj # silence unused import warning
+
+    zlib.compressobj  # silence unused import warning
 except ImportError:
     raise SystemExit(
-        "Couldn't import standard zlib (incomplete Python install).")
+        "Couldn't import standard zlib (incomplete Python install)."
+    )
 
 # The base IronPython distribution (as of 2.7.1) doesn't support bz2
 isironpython = False
 try:
-    isironpython = (platform.python_implementation()
-                    .lower().find("ironpython") != -1)
+    isironpython = (
+        platform.python_implementation().lower().find("ironpython") != -1
+    )
 except AttributeError:
     pass
 
@@ -143,10 +166,12 @@
 else:
     try:
         import bz2
-        bz2.BZ2Compressor # silence unused import warning
+
+        bz2.BZ2Compressor  # silence unused import warning
     except ImportError:
         raise SystemExit(
-            "Couldn't import standard bz2 (incomplete Python install).")
+            "Couldn't import standard bz2 (incomplete Python install)."
+        )
 
 ispypy = "PyPy" in sys.version
 
@@ -163,10 +188,11 @@
 import shutil
 import tempfile
 from distutils import log
+
 # We have issues with setuptools on some platforms and builders. Until
 # those are resolved, setuptools is opt-in except for platforms where
 # we don't have issues.
-issetuptools = (os.name == 'nt' or 'FORCE_SETUPTOOLS' in os.environ)
+issetuptools = os.name == 'nt' or 'FORCE_SETUPTOOLS' in os.environ
 if issetuptools:
     from setuptools import setup
 else:
@@ -194,6 +220,7 @@
 # Explain to distutils.StrictVersion how our release candidates are versionned
 StrictVersion.version_re = re.compile(r'^(\d+)\.(\d+)(\.(\d+))?-?(rc(\d+))?$')
 
+
 def write_if_changed(path, content):
     """Write content to a file iff the content hasn't changed."""
     if os.path.exists(path):
@@ -206,11 +233,13 @@
         with open(path, 'wb') as fh:
             fh.write(content)
 
+
 scripts = ['hg']
 if os.name == 'nt':
     # We remove hg.bat if we are able to build hg.exe.
     scripts.append('contrib/win32/hg.bat')
 
+
 def cancompile(cc, code):
     tmpdir = tempfile.mkdtemp(prefix='hg-install-')
     devnull = oldstderr = None
@@ -238,32 +267,39 @@
             devnull.close()
         shutil.rmtree(tmpdir)
 
+
 # simplified version of distutils.ccompiler.CCompiler.has_function
 # that actually removes its temporary files.
 def hasfunction(cc, funcname):
     code = 'int main(void) { %s(); }\n' % funcname
     return cancompile(cc, code)
 
+
 def hasheader(cc, headername):
     code = '#include <%s>\nint main(void) { return 0; }\n' % headername
     return cancompile(cc, code)
 
+
 # py2exe needs to be installed to work
 try:
     import py2exe
-    py2exe.Distribution # silence unused import warning
+
+    py2exe.Distribution  # silence unused import warning
     py2exeloaded = True
     # import py2exe's patched Distribution class
     from distutils.core import Distribution
 except ImportError:
     py2exeloaded = False
 
+
 def runcmd(cmd, env, cwd=None):
-    p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
-                         stderr=subprocess.PIPE, env=env, cwd=cwd)
+    p = subprocess.Popen(
+        cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=cwd
+    )
     out, err = p.communicate()
     return p.returncode, out, err
 
+
 class hgcommand(object):
     def __init__(self, cmd, env):
         self.cmd = cmd
@@ -279,22 +315,31 @@
             return ''
         return out
 
+
 def filterhgerr(err):
     # If root is executing setup.py, but the repository is owned by
     # another user (as in "sudo python setup.py install") we will get
     # trust warnings since the .hg/hgrc file is untrusted. That is
     # fine, we don't want to load it anyway.  Python may warn about
     # a missing __init__.py in mercurial/locale, we also ignore that.
-    err = [e for e in err.splitlines()
-           if (not e.startswith(b'not trusting file')
-               and not e.startswith(b'warning: Not importing')
-               and not e.startswith(b'obsolete feature not enabled')
-               and not e.startswith(b'*** failed to import extension')
-               and not e.startswith(b'devel-warn:')
-               and not (e.startswith(b'(third party extension')
-                        and e.endswith(b'or newer of Mercurial; disabling)')))]
+    err = [
+        e
+        for e in err.splitlines()
+        if (
+            not e.startswith(b'not trusting file')
+            and not e.startswith(b'warning: Not importing')
+            and not e.startswith(b'obsolete feature not enabled')
+            and not e.startswith(b'*** failed to import extension')
+            and not e.startswith(b'devel-warn:')
+            and not (
+                e.startswith(b'(third party extension')
+                and e.endswith(b'or newer of Mercurial; disabling)')
+            )
+        )
+    ]
     return b'\n'.join(b'  ' + e for e in err)
 
+
 def findhg():
     """Try to figure out how we should invoke hg for examining the local
     repository contents.
@@ -334,18 +379,23 @@
     if retcode == 0 and not filterhgerr(err):
         return hgcommand(hgcmd, hgenv)
 
-    raise SystemExit('Unable to find a working hg binary to extract the '
-                     'version from the repository tags')
+    raise SystemExit(
+        'Unable to find a working hg binary to extract the '
+        'version from the repository tags'
+    )
+
 
 def localhgenv():
     """Get an environment dictionary to use for invoking or importing
     mercurial from the local repository."""
     # Execute hg out of this directory with a custom environment which takes
     # care to not use any hgrc files and do no localization.
-    env = {'HGMODULEPOLICY': 'py',
-           'HGRCPATH': '',
-           'LANGUAGE': 'C',
-           'PATH': ''} # make pypi modules that use os.environ['PATH'] happy
+    env = {
+        'HGMODULEPOLICY': 'py',
+        'HGRCPATH': '',
+        'LANGUAGE': 'C',
+        'PATH': '',
+    }  # make pypi modules that use os.environ['PATH'] happy
     if 'LD_LIBRARY_PATH' in os.environ:
         env['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH']
     if 'SystemRoot' in os.environ:
@@ -354,6 +404,7 @@
         env['SystemRoot'] = os.environ['SystemRoot']
     return env
 
+
 version = ''
 
 if os.path.isdir('.hg'):
@@ -367,11 +418,11 @@
         # Continuing with an invalid version number will break extensions
         # that define minimumhgversion.
         raise SystemExit('Unable to determine hg version from local repository')
-    if numerictags: # tag(s) found
+    if numerictags:  # tag(s) found
         version = numerictags[-1]
-        if hgid.endswith('+'): # propagate the dirty status to the tag
+        if hgid.endswith('+'):  # propagate the dirty status to the tag
             version += '+'
-    else: # no tag found
+    else:  # no tag found
         ltagcmd = ['parents', '--template', '{latesttag}']
         ltag = sysstr(hg.run(ltagcmd))
         changessincecmd = ['log', '-T', 'x\n', '-r', "only(.,'%s')" % ltag]
@@ -380,8 +431,9 @@
     if version.endswith('+'):
         version += time.strftime('%Y%m%d')
 elif os.path.exists('.hg_archival.txt'):
-    kw = dict([[t.strip() for t in l.split(':', 1)]
-               for l in open('.hg_archival.txt')])
+    kw = dict(
+        [[t.strip() for t in l.split(':', 1)] for l in open('.hg_archival.txt')]
+    )
     if 'tag' in kw:
         version = kw['tag']
     elif 'latesttag' in kw:
@@ -397,15 +449,21 @@
     if not isinstance(versionb, bytes):
         versionb = versionb.encode('ascii')
 
-    write_if_changed('mercurial/__version__.py', b''.join([
-        b'# this file is autogenerated by setup.py\n'
-        b'version = b"%s"\n' % versionb,
-    ]))
+    write_if_changed(
+        'mercurial/__version__.py',
+        b''.join(
+            [
+                b'# this file is autogenerated by setup.py\n'
+                b'version = b"%s"\n' % versionb,
+            ]
+        ),
+    )
 
 try:
     oldpolicy = os.environ.get('HGMODULEPOLICY', None)
     os.environ['HGMODULEPOLICY'] = 'py'
     from mercurial import __version__
+
     version = __version__.version
 except ImportError:
     version = b'unknown'
@@ -415,19 +473,23 @@
     else:
         os.environ['HGMODULEPOLICY'] = oldpolicy
 
+
 class hgbuild(build):
     # Insert hgbuildmo first so that files in mercurial/locale/ are found
     # when build_py is run next.
     sub_commands = [('build_mo', None)] + build.sub_commands
 
+
 class hgbuildmo(build):
 
     description = "build translations (.mo files)"
 
     def run(self):
         if not find_executable('msgfmt'):
-            self.warn("could not find msgfmt executable, no translations "
-                     "will be built")
+            self.warn(
+                "could not find msgfmt executable, no translations "
+                "will be built"
+            )
             return
 
         podir = 'i18n'
@@ -466,18 +528,23 @@
         # too late for some cases
         return not self.pure and Distribution.has_ext_modules(self)
 
+
 # This is ugly as a one-liner. So use a variable.
 buildextnegops = dict(getattr(build_ext, 'negative_options', {}))
 buildextnegops['no-zstd'] = 'zstd'
 buildextnegops['no-rust'] = 'rust'
 
+
 class hgbuildext(build_ext):
     user_options = build_ext.user_options + [
         ('zstd', None, 'compile zstd bindings [default]'),
         ('no-zstd', None, 'do not compile zstd bindings'),
-        ('rust', None,
-         'compile Rust extensions if they are in use '
-         '(requires Cargo) [default]'),
+        (
+            'rust',
+            None,
+            'compile Rust extensions if they are in use '
+            '(requires Cargo) [default]',
+        ),
         ('no-rust', None, 'do not compile Rust extensions'),
     ]
 
@@ -490,15 +557,26 @@
 
         return build_ext.initialize_options(self)
 
+    def finalize_options(self):
+        # Unless overridden by the end user, build extensions in parallel.
+        # Only influences behavior on Python 3.5+.
+        if getattr(self, 'parallel', None) is None:
+            self.parallel = True
+
+        return build_ext.finalize_options(self)
+
     def build_extensions(self):
-        ruststandalones = [e for e in self.extensions
-                           if isinstance(e, RustStandaloneExtension)]
-        self.extensions = [e for e in self.extensions
-                           if e not in ruststandalones]
+        ruststandalones = [
+            e for e in self.extensions if isinstance(e, RustStandaloneExtension)
+        ]
+        self.extensions = [
+            e for e in self.extensions if e not in ruststandalones
+        ]
         # Filter out zstd if disabled via argument.
         if not self.zstd:
-            self.extensions = [e for e in self.extensions
-                               if e.name != 'mercurial.zstd']
+            self.extensions = [
+                e for e in self.extensions if e.name != 'mercurial.zstd'
+            ]
 
         # Build Rust standalon extensions if it'll be used
         # and its build is not explictely disabled (for external build
@@ -510,16 +588,21 @@
         return build_ext.build_extensions(self)
 
     def build_extension(self, ext):
-        if (self.distribution.rust and self.rust
-            and isinstance(ext, RustExtension)):
-                ext.rustbuild()
+        if (
+            self.distribution.rust
+            and self.rust
+            and isinstance(ext, RustExtension)
+        ):
+            ext.rustbuild()
         try:
             build_ext.build_extension(self, ext)
         except CCompilerError:
             if not getattr(ext, 'optional', False):
                 raise
-            log.warn("Failed to build optional extension '%s' (skipping)",
-                     ext.name)
+            log.warn(
+                "Failed to build optional extension '%s' (skipping)", ext.name
+            )
+
 
 class hgbuildscripts(build_scripts):
     def run(self):
@@ -546,6 +629,7 @@
 
         return build_scripts.run(self)
 
+
 class hgbuildpy(build_py):
     def finalize_options(self):
         build_py.finalize_options(self)
@@ -557,18 +641,24 @@
                 bdiffbuild,
                 mpatchbuild,
             )
-            exts = [mpatchbuild.ffi.distutils_extension(),
-                    bdiffbuild.ffi.distutils_extension()]
+
+            exts = [
+                mpatchbuild.ffi.distutils_extension(),
+                bdiffbuild.ffi.distutils_extension(),
+            ]
             # cffi modules go here
             if sys.platform == 'darwin':
                 from mercurial.cffi import osutilbuild
+
                 exts.append(osutilbuild.ffi.distutils_extension())
             self.distribution.ext_modules = exts
         else:
             h = os.path.join(get_python_inc(), 'Python.h')
             if not os.path.exists(h):
-                raise SystemExit('Python headers are required to build '
-                                 'Mercurial but weren\'t found in %s' % h)
+                raise SystemExit(
+                    'Python headers are required to build '
+                    'Mercurial but weren\'t found in %s' % h
+                )
 
     def run(self):
         basepath = os.path.join(self.build_lib, 'mercurial')
@@ -583,15 +673,17 @@
         else:
             modulepolicy = 'rust+c' if rust else 'c'
 
-        content = b''.join([
-            b'# this file is autogenerated by setup.py\n',
-            b'modulepolicy = b"%s"\n' % modulepolicy.encode('ascii'),
-        ])
-        write_if_changed(os.path.join(basepath, '__modulepolicy__.py'),
-                         content)
+        content = b''.join(
+            [
+                b'# this file is autogenerated by setup.py\n',
+                b'modulepolicy = b"%s"\n' % modulepolicy.encode('ascii'),
+            ]
+        )
+        write_if_changed(os.path.join(basepath, '__modulepolicy__.py'), content)
 
         build_py.run(self)
 
+
 class buildhgextindex(Command):
     description = 'generate prebuilt index of hgext (for frozen package)'
     user_options = []
@@ -609,10 +701,13 @@
                 f.write('# empty\n')
 
         # here no extension enabled, disabled() lists up everything
-        code = ('import pprint; from mercurial import extensions; '
-                'pprint.pprint(extensions.disabled())')
-        returncode, out, err = runcmd([sys.executable, '-c', code],
-                                      localhgenv())
+        code = (
+            'import pprint; from mercurial import extensions; '
+            'pprint.pprint(extensions.disabled())'
+        )
+        returncode, out, err = runcmd(
+            [sys.executable, '-c', code], localhgenv()
+        )
         if err or returncode != 0:
             raise DistutilsExecError(err)
 
@@ -621,12 +716,17 @@
             f.write(b'docs = ')
             f.write(out)
 
+
 class buildhgexe(build_ext):
     description = 'compile hg.exe from mercurial/exewrapper.c'
     user_options = build_ext.user_options + [
-        ('long-paths-support', None, 'enable support for long paths on '
-                                     'Windows (off by default and '
-                                     'experimental)'),
+        (
+            'long-paths-support',
+            None,
+            'enable support for long paths on '
+            'Windows (off by default and '
+            'experimental)',
+        ),
     ]
 
     LONG_PATHS_MANIFEST = """
@@ -648,34 +748,39 @@
         if os.name != 'nt':
             return
         if isinstance(self.compiler, HackedMingw32CCompiler):
-            self.compiler.compiler_so = self.compiler.compiler # no -mdll
-            self.compiler.dll_libraries = [] # no -lmsrvc90
+            self.compiler.compiler_so = self.compiler.compiler  # no -mdll
+            self.compiler.dll_libraries = []  # no -lmsrvc90
 
         # Different Python installs can have different Python library
         # names. e.g. the official CPython distribution uses pythonXY.dll
         # and MinGW uses libpythonX.Y.dll.
         _kernel32 = ctypes.windll.kernel32
-        _kernel32.GetModuleFileNameA.argtypes = [ctypes.c_void_p,
-                                                 ctypes.c_void_p,
-                                                 ctypes.c_ulong]
+        _kernel32.GetModuleFileNameA.argtypes = [
+            ctypes.c_void_p,
+            ctypes.c_void_p,
+            ctypes.c_ulong,
+        ]
         _kernel32.GetModuleFileNameA.restype = ctypes.c_ulong
         size = 1000
         buf = ctypes.create_string_buffer(size + 1)
-        filelen = _kernel32.GetModuleFileNameA(sys.dllhandle, ctypes.byref(buf),
-                                               size)
+        filelen = _kernel32.GetModuleFileNameA(
+            sys.dllhandle, ctypes.byref(buf), size
+        )
 
         if filelen > 0 and filelen != size:
             dllbasename = os.path.basename(buf.value)
             if not dllbasename.lower().endswith(b'.dll'):
-                raise SystemExit('Python DLL does not end with .dll: %s' %
-                                 dllbasename)
+                raise SystemExit(
+                    'Python DLL does not end with .dll: %s' % dllbasename
+                )
             pythonlib = dllbasename[:-4]
         else:
-            log.warn('could not determine Python DLL filename; '
-                     'assuming pythonXY')
+            log.warn(
+                'could not determine Python DLL filename; ' 'assuming pythonXY'
+            )
 
             hv = sys.hexversion
-            pythonlib = 'python%d%d' % (hv >> 24, (hv >> 16) & 0xff)
+            pythonlib = 'python%d%d' % (hv >> 24, (hv >> 16) & 0xFF)
 
         log.info('using %s as Python library name' % pythonlib)
         with open('mercurial/hgpythonlib.h', 'wb') as f:
@@ -686,14 +791,16 @@
         if sys.version_info[0] >= 3:
             macros = [('_UNICODE', None), ('UNICODE', None)]
 
-        objects = self.compiler.compile(['mercurial/exewrapper.c'],
-                                         output_dir=self.build_temp,
-                                         macros=macros)
+        objects = self.compiler.compile(
+            ['mercurial/exewrapper.c'],
+            output_dir=self.build_temp,
+            macros=macros,
+        )
         dir = os.path.dirname(self.get_ext_fullpath('dummy'))
         self.hgtarget = os.path.join(dir, 'hg')
-        self.compiler.link_executable(objects, self.hgtarget,
-                                      libraries=[],
-                                      output_dir=self.build_temp)
+        self.compiler.link_executable(
+            objects, self.hgtarget, libraries=[], output_dir=self.build_temp
+        )
         if self.long_paths_support:
             self.addlongpathsmanifest()
 
@@ -725,8 +832,16 @@
         log.info("running mt.exe to update hg.exe's manifest in-place")
         # supplying both -manifest and -inputresource to mt.exe makes
         # it merge the embedded and supplied manifests in the -outputresource
-        self.spawn(['mt.exe', '-nologo', '-manifest', manfname,
-                    inputresource, outputresource])
+        self.spawn(
+            [
+                'mt.exe',
+                '-nologo',
+                '-manifest',
+                manfname,
+                inputresource,
+                outputresource,
+            ]
+        )
         log.info("done updating hg.exe's manifest")
         os.remove(manfname)
 
@@ -735,6 +850,7 @@
         dir = os.path.dirname(self.get_ext_fullpath('dummy'))
         return os.path.join(self.build_temp, dir, 'hg.exe')
 
+
 class hgbuilddoc(Command):
     description = 'build documentation'
     user_options = [
@@ -774,12 +890,12 @@
             txt = 'doc/%s.txt' % root
             log.info('generating %s' % txt)
             res, out, err = runcmd(
-                [sys.executable, 'gendoc.py', root],
-                os.environ,
-                cwd='doc')
+                [sys.executable, 'gendoc.py', root], os.environ, cwd='doc'
+            )
             if res:
-                raise SystemExit('error running gendoc.py: %s' %
-                                 '\n'.join([out, err]))
+                raise SystemExit(
+                    'error running gendoc.py: %s' % '\n'.join([out, err])
+                )
 
             with open(txt, 'wb') as fh:
                 fh.write(out)
@@ -791,10 +907,12 @@
             res, out, err = runcmd(
                 [sys.executable, 'gendoc.py', '%s.gendoc' % root],
                 os.environ,
-                cwd='doc')
+                cwd='doc',
+            )
             if res:
-                raise SystemExit('error running gendoc: %s' %
-                                 '\n'.join([out, err]))
+                raise SystemExit(
+                    'error running gendoc: %s' % '\n'.join([out, err])
+                )
 
             with open(gendoc, 'wb') as fh:
                 fh.write(out)
@@ -802,34 +920,58 @@
         def genman(root):
             log.info('generating doc/%s' % root)
             res, out, err = runcmd(
-                [sys.executable, 'runrst', 'hgmanpage', '--halt', 'warning',
-                 '--strip-elements-with-class', 'htmlonly',
-                 '%s.txt' % root, root],
+                [
+                    sys.executable,
+                    'runrst',
+                    'hgmanpage',
+                    '--halt',
+                    'warning',
+                    '--strip-elements-with-class',
+                    'htmlonly',
+                    '%s.txt' % root,
+                    root,
+                ],
                 os.environ,
-                cwd='doc')
+                cwd='doc',
+            )
             if res:
-                raise SystemExit('error running runrst: %s' %
-                                 '\n'.join([out, err]))
+                raise SystemExit(
+                    'error running runrst: %s' % '\n'.join([out, err])
+                )
 
             normalizecrlf('doc/%s' % root)
 
         def genhtml(root):
             log.info('generating doc/%s.html' % root)
             res, out, err = runcmd(
-                [sys.executable, 'runrst', 'html', '--halt', 'warning',
-                 '--link-stylesheet', '--stylesheet-path', 'style.css',
-                 '%s.txt' % root, '%s.html' % root],
+                [
+                    sys.executable,
+                    'runrst',
+                    'html',
+                    '--halt',
+                    'warning',
+                    '--link-stylesheet',
+                    '--stylesheet-path',
+                    'style.css',
+                    '%s.txt' % root,
+                    '%s.html' % root,
+                ],
                 os.environ,
-                cwd='doc')
+                cwd='doc',
+            )
             if res:
-                raise SystemExit('error running runrst: %s' %
-                                 '\n'.join([out, err]))
+                raise SystemExit(
+                    'error running runrst: %s' % '\n'.join([out, err])
+                )
 
             normalizecrlf('doc/%s.html' % root)
 
         # This logic is duplicated in doc/Makefile.
-        sources = set(f for f in os.listdir('mercurial/help')
-                      if re.search(r'[0-9]\.txt$', f))
+        sources = set(
+            f
+            for f in os.listdir('mercurial/help')
+            if re.search(r'[0-9]\.txt$', f)
+        )
 
         # common.txt is a one-off.
         gentxt('common')
@@ -846,13 +988,20 @@
             if self.html:
                 genhtml(root)
 
+
 class hginstall(install):
 
     user_options = install.user_options + [
-        ('old-and-unmanageable', None,
-         'noop, present for eggless setuptools compat'),
-        ('single-version-externally-managed', None,
-         'noop, present for eggless setuptools compat'),
+        (
+            'old-and-unmanageable',
+            None,
+            'noop, present for eggless setuptools compat',
+        ),
+        (
+            'single-version-externally-managed',
+            None,
+            'noop, present for eggless setuptools compat',
+        ),
     ]
 
     # Also helps setuptools not be sad while we refuse to create eggs.
@@ -865,6 +1014,7 @@
         excl = set(['bdist_egg'])
         return filter(lambda x: x not in excl, install.get_sub_commands(self))
 
+
 class hginstalllib(install_lib):
     '''
     This is a specialization of install_lib that replaces the copy_file used
@@ -879,6 +1029,7 @@
 
     def run(self):
         realcopyfile = file_util.copy_file
+
         def copyfileandsetmode(*args, **kwargs):
             src, dst = args[0], args[1]
             dst, copied = realcopyfile(*args, **kwargs)
@@ -893,12 +1044,14 @@
                 m = stat.S_IMODE(st[stat.ST_MODE])
                 m = (m & ~int('0777', 8)) | setmode
                 os.chmod(dst, m)
+
         file_util.copy_file = copyfileandsetmode
         try:
             install_lib.run(self)
         finally:
             file_util.copy_file = realcopyfile
 
+
 class hginstallscripts(install_scripts):
     '''
     This is a specialization of install_scripts that replaces the @LIBDIR@ with
@@ -913,8 +1066,7 @@
 
     def finalize_options(self):
         install_scripts.finalize_options(self)
-        self.set_undefined_options('install',
-                                   ('install_lib', 'install_lib'))
+        self.set_undefined_options('install', ('install_lib', 'install_lib'))
 
     def run(self):
         install_scripts.run(self)
@@ -938,17 +1090,19 @@
         # we can't reliably set the libdir in wheels: the default behavior
         # of looking in sys.path must do.
 
-        if (os.path.splitdrive(self.install_dir)[0] !=
-            os.path.splitdrive(self.install_lib)[0]):
+        if (
+            os.path.splitdrive(self.install_dir)[0]
+            != os.path.splitdrive(self.install_lib)[0]
+        ):
             # can't make relative paths from one drive to another, so use an
             # absolute path instead
             libdir = self.install_lib
         else:
             common = os.path.commonprefix((self.install_dir, self.install_lib))
-            rest = self.install_dir[len(common):]
+            rest = self.install_dir[len(common) :]
             uplevel = len([n for n in os.path.split(rest) if n])
 
-            libdir = uplevel * ('..' + os.sep) + self.install_lib[len(common):]
+            libdir = uplevel * ('..' + os.sep) + self.install_lib[len(common) :]
 
         for outfile in self.outfiles:
             with open(outfile, 'rb') as fp:
@@ -962,14 +1116,17 @@
             # install path. During wheel packaging, the shebang has a special
             # value.
             if data.startswith(b'#!python'):
-                log.info('not rewriting @LIBDIR@ in %s because install path '
-                         'not known' % outfile)
+                log.info(
+                    'not rewriting @LIBDIR@ in %s because install path '
+                    'not known' % outfile
+                )
                 continue
 
             data = data.replace(b'@LIBDIR@', libdir.encode(libdir_escape))
             with open(outfile, 'wb') as fp:
                 fp.write(data)
 
+
 # virtualenv installs custom distutils/__init__.py and
 # distutils/distutils.cfg files which essentially proxy back to the
 # "real" distutils in the main Python install. The presence of this
@@ -1012,8 +1169,10 @@
             res.modules = modules
 
             import opcode
-            distutilsreal = os.path.join(os.path.dirname(opcode.__file__),
-                                         'distutils')
+
+            distutilsreal = os.path.join(
+                os.path.dirname(opcode.__file__), 'distutils'
+            )
 
             for root, dirs, files in os.walk(distutilsreal):
                 for f in sorted(files):
@@ -1034,59 +1193,74 @@
                         continue
 
                     if modname.endswith('.__init__'):
-                        modname = modname[:-len('.__init__')]
+                        modname = modname[: -len('.__init__')]
                         path = os.path.dirname(full)
                     else:
                         path = None
 
-                    res.modules[modname] = py2exemodule(modname, full,
-                                                        path=path)
+                    res.modules[modname] = py2exemodule(
+                        modname, full, path=path
+                    )
 
             if 'distutils' not in res.modules:
                 raise SystemExit('could not find distutils modules')
 
             return res
 
-cmdclass = {'build': hgbuild,
-            'build_doc': hgbuilddoc,
-            'build_mo': hgbuildmo,
-            'build_ext': hgbuildext,
-            'build_py': hgbuildpy,
-            'build_scripts': hgbuildscripts,
-            'build_hgextindex': buildhgextindex,
-            'install': hginstall,
-            'install_lib': hginstalllib,
-            'install_scripts': hginstallscripts,
-            'build_hgexe': buildhgexe,
-            }
+
+cmdclass = {
+    'build': hgbuild,
+    'build_doc': hgbuilddoc,
+    'build_mo': hgbuildmo,
+    'build_ext': hgbuildext,
+    'build_py': hgbuildpy,
+    'build_scripts': hgbuildscripts,
+    'build_hgextindex': buildhgextindex,
+    'install': hginstall,
+    'install_lib': hginstalllib,
+    'install_scripts': hginstallscripts,
+    'build_hgexe': buildhgexe,
+}
 
 if py2exehacked:
     cmdclass['py2exe'] = hgbuildpy2exe
 
-packages = ['mercurial',
-            'mercurial.cext',
-            'mercurial.cffi',
-            'mercurial.hgweb',
-            'mercurial.pure',
-            'mercurial.thirdparty',
-            'mercurial.thirdparty.attr',
-            'mercurial.thirdparty.zope',
-            'mercurial.thirdparty.zope.interface',
-            'mercurial.utils',
-            'mercurial.revlogutils',
-            'mercurial.testing',
-            'hgext', 'hgext.convert', 'hgext.fsmonitor',
-            'hgext.fastannotate',
-            'hgext.fsmonitor.pywatchman',
-            'hgext.infinitepush',
-            'hgext.highlight',
-            'hgext.largefiles', 'hgext.lfs', 'hgext.narrow',
-            'hgext.remotefilelog',
-            'hgext.zeroconf', 'hgext3rd',
-            'hgdemandimport']
+packages = [
+    'mercurial',
+    'mercurial.cext',
+    'mercurial.cffi',
+    'mercurial.hgweb',
+    'mercurial.interfaces',
+    'mercurial.pure',
+    'mercurial.thirdparty',
+    'mercurial.thirdparty.attr',
+    'mercurial.thirdparty.zope',
+    'mercurial.thirdparty.zope.interface',
+    'mercurial.utils',
+    'mercurial.revlogutils',
+    'mercurial.testing',
+    'hgext',
+    'hgext.convert',
+    'hgext.fsmonitor',
+    'hgext.fastannotate',
+    'hgext.fsmonitor.pywatchman',
+    'hgext.highlight',
+    'hgext.infinitepush',
+    'hgext.largefiles',
+    'hgext.lfs',
+    'hgext.narrow',
+    'hgext.remotefilelog',
+    'hgext.zeroconf',
+    'hgext3rd',
+    'hgdemandimport',
+]
 if sys.version_info[0] == 2:
-    packages.extend(['mercurial.thirdparty.concurrent',
-                     'mercurial.thirdparty.concurrent.futures'])
+    packages.extend(
+        [
+            'mercurial.thirdparty.concurrent',
+            'mercurial.thirdparty.concurrent.futures',
+        ]
+    )
 
 if 'HG_PY2EXE_EXTRA_INSTALL_PACKAGES' in os.environ:
     # py2exe can't cope with namespace packages very well, so we have to
@@ -1094,9 +1268,11 @@
     # image here. This is gross, but you gotta do what you gotta do.
     packages.extend(os.environ['HG_PY2EXE_EXTRA_INSTALL_PACKAGES'].split(' '))
 
-common_depends = ['mercurial/bitmanipulation.h',
-                  'mercurial/compat.h',
-                  'mercurial/cext/util.h']
+common_depends = [
+    'mercurial/bitmanipulation.h',
+    'mercurial/compat.h',
+    'mercurial/cext/util.h',
+]
 common_include_dirs = ['mercurial']
 
 osutil_cflags = []
@@ -1108,16 +1284,24 @@
         osutil_cflags.append('-DHAVE_%s' % func.upper())
 
 for plat, macro, code in [
-    ('bsd|darwin', 'BSD_STATFS', '''
+    (
+        'bsd|darwin',
+        'BSD_STATFS',
+        '''
      #include <sys/param.h>
      #include <sys/mount.h>
      int main() { struct statfs s; return sizeof(s.f_fstypename); }
-     '''),
-    ('linux', 'LINUX_STATFS', '''
+     ''',
+    ),
+    (
+        'linux',
+        'LINUX_STATFS',
+        '''
      #include <linux/magic.h>
      #include <sys/vfs.h>
      int main() { struct statfs s; return sizeof(s.f_type); }
-     '''),
+     ''',
+    ),
 ]:
     if re.search(plat, sys.platform) and cancompile(new_compiler(), code):
         osutil_cflags.append('-DHAVE_%s' % macro)
@@ -1141,17 +1325,20 @@
     'mercurial/thirdparty/xdiff/xutils.h',
 ]
 
+
 class RustCompilationError(CCompilerError):
     """Exception class for Rust compilation errors."""
 
+
 class RustExtension(Extension):
     """Base classes for concrete Rust Extension classes.
     """
 
     rusttargetdir = os.path.join('rust', 'target', 'release')
 
-    def __init__(self, mpath, sources, rustlibname, subcrate,
-                 py3_features=None, **kw):
+    def __init__(
+        self, mpath, sources, rustlibname, subcrate, py3_features=None, **kw
+    ):
         Extension.__init__(self, mpath, sources, **kw)
         srcdir = self.rustsrcdir = os.path.join('rust', subcrate)
         self.py3_features = py3_features
@@ -1163,9 +1350,11 @@
         if os.path.exists(cargo_lock):
             self.depends.append(cargo_lock)
         for dirpath, subdir, fnames in os.walk(os.path.join(srcdir, 'src')):
-            self.depends.extend(os.path.join(dirpath, fname)
-                                for fname in fnames
-                                if os.path.splitext(fname)[1] == '.rs')
+            self.depends.extend(
+                os.path.join(dirpath, fname)
+                for fname in fnames
+                if os.path.splitext(fname)[1] == '.rs'
+            )
 
     @staticmethod
     def rustdylibsuffix():
@@ -1193,16 +1382,19 @@
             # Unix only fix (os.path.expanduser not really reliable if
             # HOME is shadowed like this)
             import pwd
+
             env['HOME'] = pwd.getpwuid(os.getuid()).pw_dir
 
         cargocmd = ['cargo', 'rustc', '-vv', '--release']
         if sys.version_info[0] == 3 and self.py3_features is not None:
-            cargocmd.extend(('--features', self.py3_features,
-                             '--no-default-features'))
+            cargocmd.extend(
+                ('--features', self.py3_features, '--no-default-features')
+            )
         cargocmd.append('--')
         if sys.platform == 'darwin':
-            cargocmd.extend(("-C", "link-arg=-undefined",
-                             "-C", "link-arg=dynamic_lookup"))
+            cargocmd.extend(
+                ("-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup")
+            )
         try:
             subprocess.check_call(cargocmd, env=env, cwd=self.rustsrcdir)
         except OSError as exc:
@@ -1210,14 +1402,17 @@
                 raise RustCompilationError("Cargo not found")
             elif exc.errno == errno.EACCES:
                 raise RustCompilationError(
-                    "Cargo found, but permisssion to execute it is denied")
+                    "Cargo found, but permisssion to execute it is denied"
+                )
             else:
                 raise
         except subprocess.CalledProcessError:
             raise RustCompilationError(
                 "Cargo failed. Working directory: %r, "
                 "command: %r, environment: %r"
-                % (self.rustsrcdir, cargocmd, env))
+                % (self.rustsrcdir, cargocmd, env)
+            )
+
 
 class RustEnhancedExtension(RustExtension):
     """A C Extension, conditionally enhanced with Rust code.
@@ -1228,8 +1423,9 @@
     """
 
     def __init__(self, mpath, sources, rustlibname, subcrate, **kw):
-        RustExtension.__init__(self, mpath, sources, rustlibname, subcrate,
-                               **kw)
+        RustExtension.__init__(
+            self, mpath, sources, rustlibname, subcrate, **kw
+        )
         if hgrustext != 'direct-ffi':
             return
         self.extra_compile_args.append('-DWITH_RUST')
@@ -1240,11 +1436,12 @@
         if hgrustext == 'direct-ffi':
             RustExtension.rustbuild(self)
 
+
 class RustStandaloneExtension(RustExtension):
-
     def __init__(self, pydottedname, rustcrate, dylibname, **kw):
-        RustExtension.__init__(self, pydottedname, [], dylibname, rustcrate,
-                               **kw)
+        RustExtension.__init__(
+            self, pydottedname, [], dylibname, rustcrate, **kw
+        )
         self.dylibname = dylibname
 
     def build(self, target_dir):
@@ -1252,58 +1449,85 @@
         target = [target_dir]
         target.extend(self.name.split('.'))
         target[-1] += DYLIB_SUFFIX
-        shutil.copy2(os.path.join(self.rusttargetdir,
-                                  self.dylibname + self.rustdylibsuffix()),
-                     os.path.join(*target))
+        shutil.copy2(
+            os.path.join(
+                self.rusttargetdir, self.dylibname + self.rustdylibsuffix()
+            ),
+            os.path.join(*target),
+        )
 
 
 extmodules = [
-    Extension('mercurial.cext.base85', ['mercurial/cext/base85.c'],
-              include_dirs=common_include_dirs,
-              depends=common_depends),
-    Extension('mercurial.cext.bdiff', ['mercurial/bdiff.c',
-                                       'mercurial/cext/bdiff.c'] + xdiff_srcs,
-              include_dirs=common_include_dirs,
-              depends=common_depends + ['mercurial/bdiff.h'] + xdiff_headers),
-    Extension('mercurial.cext.mpatch', ['mercurial/mpatch.c',
-                                        'mercurial/cext/mpatch.c'],
-              include_dirs=common_include_dirs,
-              depends=common_depends),
+    Extension(
+        'mercurial.cext.base85',
+        ['mercurial/cext/base85.c'],
+        include_dirs=common_include_dirs,
+        depends=common_depends,
+    ),
+    Extension(
+        'mercurial.cext.bdiff',
+        ['mercurial/bdiff.c', 'mercurial/cext/bdiff.c'] + xdiff_srcs,
+        include_dirs=common_include_dirs,
+        depends=common_depends + ['mercurial/bdiff.h'] + xdiff_headers,
+    ),
+    Extension(
+        'mercurial.cext.mpatch',
+        ['mercurial/mpatch.c', 'mercurial/cext/mpatch.c'],
+        include_dirs=common_include_dirs,
+        depends=common_depends,
+    ),
     RustEnhancedExtension(
-        'mercurial.cext.parsers', ['mercurial/cext/charencode.c',
-                                   'mercurial/cext/dirs.c',
-                                   'mercurial/cext/manifest.c',
-                                   'mercurial/cext/parsers.c',
-                                   'mercurial/cext/pathencode.c',
-                                   'mercurial/cext/revlog.c'],
+        'mercurial.cext.parsers',
+        [
+            'mercurial/cext/charencode.c',
+            'mercurial/cext/dirs.c',
+            'mercurial/cext/manifest.c',
+            'mercurial/cext/parsers.c',
+            'mercurial/cext/pathencode.c',
+            'mercurial/cext/revlog.c',
+        ],
         'hgdirectffi',
         'hg-direct-ffi',
         include_dirs=common_include_dirs,
-        depends=common_depends + ['mercurial/cext/charencode.h',
-                                  'mercurial/cext/revlog.h',
-                                  'rust/hg-core/src/ancestors.rs',
-                                  'rust/hg-core/src/lib.rs']),
-    Extension('mercurial.cext.osutil', ['mercurial/cext/osutil.c'],
-              include_dirs=common_include_dirs,
-              extra_compile_args=osutil_cflags,
-              extra_link_args=osutil_ldflags,
-              depends=common_depends),
+        depends=common_depends
+        + [
+            'mercurial/cext/charencode.h',
+            'mercurial/cext/revlog.h',
+            'rust/hg-core/src/ancestors.rs',
+            'rust/hg-core/src/lib.rs',
+        ],
+    ),
     Extension(
-        'mercurial.thirdparty.zope.interface._zope_interface_coptimizations', [
-        'mercurial/thirdparty/zope/interface/_zope_interface_coptimizations.c',
-        ]),
-    Extension('hgext.fsmonitor.pywatchman.bser',
-              ['hgext/fsmonitor/pywatchman/bser.c']),
-    RustStandaloneExtension('mercurial.rustext', 'hg-cpython', 'librusthg',
-                            py3_features='python3'),
-    ]
+        'mercurial.cext.osutil',
+        ['mercurial/cext/osutil.c'],
+        include_dirs=common_include_dirs,
+        extra_compile_args=osutil_cflags,
+        extra_link_args=osutil_ldflags,
+        depends=common_depends,
+    ),
+    Extension(
+        'mercurial.thirdparty.zope.interface._zope_interface_coptimizations',
+        [
+            'mercurial/thirdparty/zope/interface/_zope_interface_coptimizations.c',
+        ],
+    ),
+    Extension(
+        'hgext.fsmonitor.pywatchman.bser', ['hgext/fsmonitor/pywatchman/bser.c']
+    ),
+    RustStandaloneExtension(
+        'mercurial.rustext', 'hg-cpython', 'librusthg', py3_features='python3'
+    ),
+]
 
 
 sys.path.insert(0, 'contrib/python-zstandard')
 import setup_zstd
-extmodules.append(setup_zstd.get_c_extension(
-    name='mercurial.zstd',
-    root=os.path.abspath(os.path.dirname(__file__))))
+
+extmodules.append(
+    setup_zstd.get_c_extension(
+        name='mercurial.zstd', root=os.path.abspath(os.path.dirname(__file__))
+    )
+)
 
 try:
     from distutils import cygwinccompiler
@@ -1328,6 +1552,7 @@
     class HackedMingw32CCompiler(object):
         pass
 
+
 if os.name == 'nt':
     # Allow compiler/linker flags to be added to Visual Studio builds.  Passing
     # extra_link_args to distutils.extensions.Extension() doesn't have any
@@ -1345,15 +1570,21 @@
 
     msvccompiler.MSVCCompiler = HackedMSVCCompiler
 
-packagedata = {'mercurial': ['locale/*/LC_MESSAGES/hg.mo',
-                             'help/*.txt',
-                             'help/internals/*.txt',
-                             'default.d/*.rc',
-                             'dummycert.pem']}
+packagedata = {
+    'mercurial': [
+        'locale/*/LC_MESSAGES/hg.mo',
+        'help/*.txt',
+        'help/internals/*.txt',
+        'default.d/*.rc',
+        'dummycert.pem',
+    ]
+}
+
 
 def ordinarypath(p):
     return p and p[0] != '.' and p[-1] != '~'
 
+
 for root in ('templates',):
     for curdir, dirs, files in os.walk(os.path.join('mercurial', root)):
         curdir = curdir.split(os.sep, 1)[1]
@@ -1393,9 +1624,12 @@
 
 if py2exeloaded:
     extra['console'] = [
-        {'script':'hg',
-         'copyright':'Copyright (C) 2005-2019 Matt Mackall and others',
-         'product_version':version}]
+        {
+            'script': 'hg',
+            'copyright': 'Copyright (C) 2005-2019 Matt Mackall and others',
+            'product_version': version,
+        }
+    ]
     # Sub command of 'build' because 'py2exe' does not handle sub_commands.
     # Need to override hgbuild because it has a private copy of
     # build.sub_commands.
@@ -1429,8 +1663,9 @@
         version = version[0]
         if sys.version_info[0] == 3:
             version = version.decode('utf-8')
-        xcode4 = (version.startswith('Xcode') and
-                  StrictVersion(version.split()[1]) >= StrictVersion('4.0'))
+        xcode4 = version.startswith('Xcode') and StrictVersion(
+            version.split()[1]
+        ) >= StrictVersion('4.0')
         xcode51 = re.match(r'^Xcode\s+5\.1', version) is not None
     else:
         # xcodebuild returns empty on OS X Lion with XCode 4.3 not
@@ -1454,59 +1689,66 @@
         cflags = get_config_var('CFLAGS')
         if cflags and re.search(r'-mno-fused-madd\b', cflags) is not None:
             os.environ['CFLAGS'] = (
-                os.environ.get('CFLAGS', '') + ' -Qunused-arguments')
+                os.environ.get('CFLAGS', '') + ' -Qunused-arguments'
+            )
 
-setup(name='mercurial',
-      version=setupversion,
-      author='Matt Mackall and many others',
-      author_email='mercurial@mercurial-scm.org',
-      url='https://mercurial-scm.org/',
-      download_url='https://mercurial-scm.org/release/',
-      description=('Fast scalable distributed SCM (revision control, version '
-                   'control) system'),
-      long_description=('Mercurial is a distributed SCM tool written in Python.'
-                        ' It is used by a number of large projects that require'
-                        ' fast, reliable distributed revision control, such as '
-                        'Mozilla.'),
-      license='GNU GPLv2 or any later version',
-      classifiers=[
-          'Development Status :: 6 - Mature',
-          'Environment :: Console',
-          'Intended Audience :: Developers',
-          'Intended Audience :: System Administrators',
-          'License :: OSI Approved :: GNU General Public License (GPL)',
-          'Natural Language :: Danish',
-          'Natural Language :: English',
-          'Natural Language :: German',
-          'Natural Language :: Italian',
-          'Natural Language :: Japanese',
-          'Natural Language :: Portuguese (Brazilian)',
-          'Operating System :: Microsoft :: Windows',
-          'Operating System :: OS Independent',
-          'Operating System :: POSIX',
-          'Programming Language :: C',
-          'Programming Language :: Python',
-          'Topic :: Software Development :: Version Control',
-      ],
-      scripts=scripts,
-      packages=packages,
-      ext_modules=extmodules,
-      data_files=datafiles,
-      package_data=packagedata,
-      cmdclass=cmdclass,
-      distclass=hgdist,
-      options={
-          'py2exe': {
-              'bundle_files': 3,
-              'dll_excludes': py2exedllexcludes,
-              'excludes': py2exeexcludes,
-              'packages': py2exepackages,
-          },
-          'bdist_mpkg': {
-              'zipdist': False,
-              'license': 'COPYING',
-              'readme': 'contrib/packaging/macosx/Readme.html',
-              'welcome': 'contrib/packaging/macosx/Welcome.html',
-          },
-      },
-      **extra)
+setup(
+    name='mercurial',
+    version=setupversion,
+    author='Matt Mackall and many others',
+    author_email='mercurial@mercurial-scm.org',
+    url='https://mercurial-scm.org/',
+    download_url='https://mercurial-scm.org/release/',
+    description=(
+        'Fast scalable distributed SCM (revision control, version '
+        'control) system'
+    ),
+    long_description=(
+        'Mercurial is a distributed SCM tool written in Python.'
+        ' It is used by a number of large projects that require'
+        ' fast, reliable distributed revision control, such as '
+        'Mozilla.'
+    ),
+    license='GNU GPLv2 or any later version',
+    classifiers=[
+        'Development Status :: 6 - Mature',
+        'Environment :: Console',
+        'Intended Audience :: Developers',
+        'Intended Audience :: System Administrators',
+        'License :: OSI Approved :: GNU General Public License (GPL)',
+        'Natural Language :: Danish',
+        'Natural Language :: English',
+        'Natural Language :: German',
+        'Natural Language :: Italian',
+        'Natural Language :: Japanese',
+        'Natural Language :: Portuguese (Brazilian)',
+        'Operating System :: Microsoft :: Windows',
+        'Operating System :: OS Independent',
+        'Operating System :: POSIX',
+        'Programming Language :: C',
+        'Programming Language :: Python',
+        'Topic :: Software Development :: Version Control',
+    ],
+    scripts=scripts,
+    packages=packages,
+    ext_modules=extmodules,
+    data_files=datafiles,
+    package_data=packagedata,
+    cmdclass=cmdclass,
+    distclass=hgdist,
+    options={
+        'py2exe': {
+            'bundle_files': 3,
+            'dll_excludes': py2exedllexcludes,
+            'excludes': py2exeexcludes,
+            'packages': py2exepackages,
+        },
+        'bdist_mpkg': {
+            'zipdist': False,
+            'license': 'COPYING',
+            'readme': 'contrib/packaging/macosx/Readme.html',
+            'welcome': 'contrib/packaging/macosx/Welcome.html',
+        },
+    },
+    **extra
+)
--- a/tests/artifacts/scripts/generate-churning-bundle.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/artifacts/scripts/generate-churning-bundle.py	Mon Oct 21 11:09:48 2019 -0400
@@ -39,15 +39,17 @@
 #
 # At each revision, the beginning on the file change,
 # and set of other lines changes too.
-FILENAME='SPARSE-REVLOG-TEST-FILE'
+FILENAME = 'SPARSE-REVLOG-TEST-FILE'
 NB_LINES = 10500
 ALWAYS_CHANGE_LINES = 500
 OTHER_CHANGES = 300
 
+
 def nextcontent(previous_content):
     """utility to produce a new file content from the previous one"""
     return hashlib.md5(previous_content).hexdigest()
 
+
 def filecontent(iteridx, oldcontent):
     """generate a new file content
 
@@ -60,7 +62,7 @@
     else:
         current = str(iteridx)
 
-    for idx in xrange(NB_LINES):
+    for idx in range(NB_LINES):
         do_change_line = True
         if oldcontent is not None and ALWAYS_CHANGE_LINES < idx:
             do_change_line = not ((idx - iteridx) % OTHER_CHANGES)
@@ -72,6 +74,7 @@
             to_write = oldcontent[idx]
         yield to_write
 
+
 def updatefile(filename, idx):
     """update <filename> to be at appropriate content for iteration <idx>"""
     existing = None
@@ -82,6 +85,7 @@
         for line in filecontent(idx, existing):
             target.write(line)
 
+
 def hg(command, *args):
     """call a mercurial command with appropriate config and argument"""
     env = os.environ.copy()
@@ -101,6 +105,7 @@
     env['HGRCPATH'] = ''
     return subprocess.check_call(full_cmd, env=env)
 
+
 def run(target):
     tmpdir = tempfile.mkdtemp(prefix='tmp-hg-test-big-file-bundle-')
     try:
@@ -108,7 +113,7 @@
         hg('init')
         updatefile(FILENAME, None)
         hg('commit', '--addremove', '--message', 'initial commit')
-        for idx in xrange(1, NB_CHANGESET + 1):
+        for idx in range(1, NB_CHANGESET + 1):
             if sys.stdout.isatty():
                 print("generating commit #%d/%d" % (idx, NB_CHANGESET))
             if (idx % PERIOD_BRANCHING) == 0:
@@ -131,8 +136,8 @@
         shutil.rmtree(tmpdir)
     return 0
 
+
 if __name__ == '__main__':
     orig = os.path.realpath(os.path.dirname(sys.argv[0]))
     target = os.path.join(orig, os.pardir, 'cache', BUNDLE_NAME)
     sys.exit(run(target))
-
--- a/tests/autodiff.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/autodiff.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,9 +13,12 @@
 cmdtable = {}
 command = registrar.command(cmdtable)
 
-@command(b'autodiff',
+
+@command(
+    b'autodiff',
     [(b'', b'git', b'', b'git upgrade mode (yes/no/auto/warn/abort)')],
-    b'[OPTION]... [FILE]...')
+    b'[OPTION]... [FILE]...',
+)
 def autodiff(ui, repo, *pats, **opts):
     opts = pycompat.byteskwargs(opts)
     diffopts = patch.difffeatureopts(ui, opts)
@@ -31,21 +34,31 @@
     elif git == b'warn':
         diffopts.git = False
         diffopts.upgrade = True
+
         def losedatafn(fn=None, **kwargs):
             brokenfiles.add(fn)
             return True
+
     elif git == b'abort':
         diffopts.git = False
         diffopts.upgrade = True
+
         def losedatafn(fn=None, **kwargs):
             raise error.Abort(b'losing data for %s' % fn)
+
     else:
         raise error.Abort(b'--git must be yes, no or auto')
 
     ctx1, ctx2 = scmutil.revpair(repo, [])
     m = scmutil.match(ctx2, pats, opts)
-    it = patch.diff(repo, ctx1.node(), ctx2.node(), match=m, opts=diffopts,
-                    losedatafn=losedatafn)
+    it = patch.diff(
+        repo,
+        ctx1.node(),
+        ctx2.node(),
+        match=m,
+        opts=diffopts,
+        losedatafn=losedatafn,
+    )
     for chunk in it:
         ui.write(chunk)
     for fn in sorted(brokenfiles):
--- a/tests/badserverext.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/badserverext.py	Mon Oct 21 11:09:48 2019 -0400
@@ -33,29 +33,27 @@
 
 import socket
 
-from mercurial import(
+from mercurial import (
     pycompat,
     registrar,
 )
 
-from mercurial.hgweb import (
-    server,
-)
+from mercurial.hgweb import server
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem(b'badserver', b'closeafteraccept',
-    default=False,
+configitem(
+    b'badserver', b'closeafteraccept', default=False,
 )
-configitem(b'badserver', b'closeafterrecvbytes',
-    default=b'0',
+configitem(
+    b'badserver', b'closeafterrecvbytes', default=b'0',
 )
-configitem(b'badserver', b'closeaftersendbytes',
-    default=b'0',
+configitem(
+    b'badserver', b'closeaftersendbytes', default=b'0',
 )
-configitem(b'badserver', b'closebeforeaccept',
-    default=False,
+configitem(
+    b'badserver', b'closebeforeaccept', default=False,
 )
 
 # We can't adjust __class__ on a socket instance. So we define a proxy type.
@@ -67,8 +65,9 @@
         '_closeaftersendbytes',
     )
 
-    def __init__(self, obj, logfp, closeafterrecvbytes=0,
-                 closeaftersendbytes=0):
+    def __init__(
+        self, obj, logfp, closeafterrecvbytes=0, closeaftersendbytes=0
+    ):
         object.__setattr__(self, '_orig', obj)
         object.__setattr__(self, '_logfp', logfp)
         object.__setattr__(self, '_closeafterrecvbytes', closeafterrecvbytes)
@@ -97,14 +96,19 @@
         f = object.__getattribute__(self, '_orig').makefile(mode, bufsize)
 
         logfp = object.__getattribute__(self, '_logfp')
-        closeafterrecvbytes = object.__getattribute__(self,
-                                                      '_closeafterrecvbytes')
-        closeaftersendbytes = object.__getattribute__(self,
-                                                      '_closeaftersendbytes')
+        closeafterrecvbytes = object.__getattribute__(
+            self, '_closeafterrecvbytes'
+        )
+        closeaftersendbytes = object.__getattribute__(
+            self, '_closeaftersendbytes'
+        )
 
-        return fileobjectproxy(f, logfp,
-                               closeafterrecvbytes=closeafterrecvbytes,
-                               closeaftersendbytes=closeaftersendbytes)
+        return fileobjectproxy(
+            f,
+            logfp,
+            closeafterrecvbytes=closeafterrecvbytes,
+            closeaftersendbytes=closeaftersendbytes,
+        )
 
     def sendall(self, data, flags=0):
         remaining = object.__getattribute__(self, '_closeaftersendbytes')
@@ -124,8 +128,10 @@
 
         result = object.__getattribute__(self, '_orig').sendall(newdata, flags)
 
-        self._writelog(b'sendall(%d from %d) -> (%d) %s' % (
-            len(newdata), len(data), remaining, newdata))
+        self._writelog(
+            b'sendall(%d from %d) -> (%d) %s'
+            % (len(newdata), len(data), remaining, newdata)
+        )
 
         object.__setattr__(self, '_closeaftersendbytes', remaining)
 
@@ -147,8 +153,9 @@
         '_closeaftersendbytes',
     )
 
-    def __init__(self, obj, logfp, closeafterrecvbytes=0,
-                 closeaftersendbytes=0):
+    def __init__(
+        self, obj, logfp, closeafterrecvbytes=0, closeaftersendbytes=0
+    ):
         object.__setattr__(self, '_orig', obj)
         object.__setattr__(self, '_logfp', logfp)
         object.__setattr__(self, '_closeafterrecvbytes', closeafterrecvbytes)
@@ -192,9 +199,9 @@
         # No read limit. Call original function.
         if not remaining:
             result = object.__getattribute__(self, '_orig').read(size)
-            self._writelog(b'read(%d) -> (%d) (%s) %s' % (size,
-                                                          len(result),
-                                                          result))
+            self._writelog(
+                b'read(%d) -> (%d) (%s) %s' % (size, len(result), result)
+            )
             return result
 
         origsize = size
@@ -207,8 +214,10 @@
         result = object.__getattribute__(self, '_orig').read(size)
         remaining -= len(result)
 
-        self._writelog(b'read(%d from %d) -> (%d) %s' % (
-            size, origsize, len(result), result))
+        self._writelog(
+            b'read(%d from %d) -> (%d) %s'
+            % (size, origsize, len(result), result)
+        )
 
         object.__setattr__(self, '_closeafterrecvbytes', remaining)
 
@@ -227,8 +236,9 @@
         # No read limit. Call original function.
         if not remaining:
             result = object.__getattribute__(self, '_orig').readline(size)
-            self._writelog(b'readline(%d) -> (%d) %s' % (
-                size, len(result), result))
+            self._writelog(
+                b'readline(%d) -> (%d) %s' % (size, len(result), result)
+            )
             return result
 
         origsize = size
@@ -241,8 +251,10 @@
         result = object.__getattribute__(self, '_orig').readline(size)
         remaining -= len(result)
 
-        self._writelog(b'readline(%d from %d) -> (%d) %s' % (
-            size, origsize, len(result), result))
+        self._writelog(
+            b'readline(%d from %d) -> (%d) %s'
+            % (size, origsize, len(result), result)
+        )
 
         object.__setattr__(self, '_closeafterrecvbytes', remaining)
 
@@ -271,8 +283,10 @@
 
         remaining -= len(newdata)
 
-        self._writelog(b'write(%d from %d) -> (%d) %s' % (
-            len(newdata), len(data), remaining, newdata))
+        self._writelog(
+            b'write(%d from %d) -> (%d) %s'
+            % (len(newdata), len(data), remaining, newdata)
+        )
 
         result = object.__getattribute__(self, '_orig').write(newdata)
 
@@ -286,6 +300,7 @@
 
         return result
 
+
 def extsetup(ui):
     # Change the base HTTP server class so various events can be performed.
     # See SocketServer.BaseServer for how the specially named methods work.
@@ -310,8 +325,9 @@
                     elif name.lower() == 'server':
                         value = 'badhttpserver'
 
-                    return super(badrequesthandler, self).send_header(name,
-                                                                      value)
+                    return super(badrequesthandler, self).send_header(
+                        name, value
+                    )
 
             self.RequestHandlerClass = badrequesthandler
 
@@ -348,9 +364,12 @@
                 closeaftersendbytes = 0
 
             if closeafterrecvbytes or closeaftersendbytes:
-                socket = socketproxy(socket, self.errorlog,
-                                     closeafterrecvbytes=closeafterrecvbytes,
-                                     closeaftersendbytes=closeaftersendbytes)
+                socket = socketproxy(
+                    socket,
+                    self.errorlog,
+                    closeafterrecvbytes=closeafterrecvbytes,
+                    closeaftersendbytes=closeaftersendbytes,
+                )
 
             return super(badserver, self).process_request(socket, address)
 
--- a/tests/basic_test_result.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/basic_test_result.py	Mon Oct 21 11:09:48 2019 -0400
@@ -2,8 +2,8 @@
 
 import unittest
 
+
 class TestResult(unittest._TextTestResult):
-
     def __init__(self, options, *args, **kwargs):
         super(TestResult, self).__init__(*args, **kwargs)
         self._options = options
--- a/tests/blackbox-readonly-dispatch.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/blackbox-readonly-dispatch.py	Mon Oct 21 11:09:48 2019 -0400
@@ -6,6 +6,7 @@
     ui as uimod,
 )
 
+
 def testdispatch(cmd):
     """Simple wrapper around dispatch.dispatch()
 
@@ -13,10 +14,11 @@
     """
     ui = uimod.ui.load()
     extensions.populateui(ui)
-    ui.status(b"running: %s\n" % cmd)
+    ui.statusnoi18n(b"running: %s\n" % cmd)
     req = dispatch.request(cmd.split(), ui)
     result = dispatch.dispatch(req)
-    ui.status(b"result: %r\n" % result)
+    ui.statusnoi18n(b"result: %r\n" % result)
+
 
 # create file 'foo', add and commit
 f = open(b'foo', 'wb')
--- a/tests/bruterebase.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/bruterebase.py	Mon Oct 21 11:09:48 2019 -0400
@@ -23,6 +23,7 @@
 cmdtable = {}
 command = registrar.command(cmdtable)
 
+
 @command(b'debugbruterebase')
 def debugbruterebase(ui, repo, source, dest):
     """for every non-empty subset of source, run rebase -r subset -d dest
@@ -45,7 +46,7 @@
             subset = [rev for j, rev in enumerate(srevs) if i & (1 << j) != 0]
             spec = revsetlang.formatspec(b'%ld', subset)
             tr = repo.transaction(b'rebase')
-            tr._report = lambda x: 0 # hide "transaction abort"
+            tr._report = lambda x: 0  # hide "transaction abort"
 
             ui.pushbuffer()
             try:
@@ -70,5 +71,5 @@
             repo.vfs.tryunlink(b'rebasestate')
 
             subsetdesc = b''.join(getdesc(rev) for rev in subset)
-            ui.write((b'%s: %s\n') % (subsetdesc.rjust(len(srevs)), summary))
+            ui.write(b'%s: %s\n' % (subsetdesc.rjust(len(srevs)), summary))
             tr.abort()
--- a/tests/check-perf-code.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/check-perf-code.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,23 +9,31 @@
 
 # write static check patterns here
 perfpypats = [
-  [
-    (r'(branchmap|repoview|repoviewutil)\.subsettable',
-     "use getbranchmapsubsettable() for early Mercurial"),
-    (r'\.(vfs|svfs|opener|sopener)',
-     "use getvfs()/getsvfs() for early Mercurial"),
-    (r'ui\.configint',
-     "use getint() instead of ui.configint() for early Mercurial"),
-  ],
-  # warnings
-  [
-  ]
+    [
+        (
+            r'(branchmap|repoview|repoviewutil)\.subsettable',
+            "use getbranchmapsubsettable() for early Mercurial",
+        ),
+        (
+            r'\.(vfs|svfs|opener|sopener)',
+            "use getvfs()/getsvfs() for early Mercurial",
+        ),
+        (
+            r'ui\.configint',
+            "use getint() instead of ui.configint() for early Mercurial",
+        ),
+    ],
+    # warnings
+    [],
 ]
 
+
 def modulewhitelist(names):
-    replacement = [('.py', ''), ('.c', ''), # trim suffix
-                   ('mercurial%s' % ('/'), ''), # trim "mercurial/" path
-                  ]
+    replacement = [
+        ('.py', ''),
+        ('.c', ''),  # trim suffix
+        ('mercurial%s' % '/', ''),  # trim "mercurial/" path
+    ]
     ignored = {'__init__'}
     modules = {}
 
@@ -45,6 +53,7 @@
 
     return whitelist
 
+
 if __name__ == "__main__":
     # in this case, it is assumed that result of "hg files" at
     # multiple revisions is given via stdin
@@ -61,10 +70,14 @@
         #        bar,
         #        baz
         #    )
-        ((r'from mercurial import [(][a-z0-9, \n#]*\n(?! *%s,|^[ #]*\n|[)])'
-          % ',| *'.join(whitelist)),
-         "import newer module separately in try clause for early Mercurial"
-         ))
+        (
+            (
+                r'from mercurial import [(][a-z0-9, \n#]*\n(?! *%s,|^[ #]*\n|[)])'
+                % ',| *'.join(whitelist)
+            ),
+            "import newer module separately in try clause for early Mercurial",
+        )
+    )
 
     # import contrib/check-code.py as checkcode
     assert 'RUNTESTDIR' in os.environ, "use check-perf-code.py in *.t script"
@@ -73,7 +86,8 @@
     checkcode = __import__('check-code')
 
     # register perf.py specific entry with "checks" in check-code.py
-    checkcode.checks.append(('perf.py', r'contrib/perf.py$', '',
-                             checkcode.pyfilters, perfpypats))
+    checkcode.checks.append(
+        ('perf.py', r'contrib/perf.py$', '', checkcode.pyfilters, perfpypats)
+    )
 
     sys.exit(checkcode.main())
--- a/tests/common-pattern.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/common-pattern.py	Mon Oct 21 11:09:48 2019 -0400
@@ -5,110 +5,115 @@
 
 substitutions = [
     # list of possible compressions
-    (br'(zstd,)?zlib,none,bzip2',
-     br'$USUAL_COMPRESSIONS$'
-    ),
-    (br'=(zstd,)?zlib',
-     br'=$BUNDLE2_COMPRESSIONS$'
-    ),
+    (br'(zstd,)?zlib,none,bzip2', br'$USUAL_COMPRESSIONS$'),
+    (br'=(zstd,)?zlib', br'=$BUNDLE2_COMPRESSIONS$'),
     # capabilities sent through http
-    (br'bundlecaps=HG20%2Cbundle2%3DHG20%250A'
-     br'bookmarks%250A'
-     br'changegroup%253D01%252C02%250A'
-     br'digests%253Dmd5%252Csha1%252Csha512%250A'
-     br'error%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250A'
-     br'hgtagsfnodes%250A'
-     br'listkeys%250A'
-     br'phases%253Dheads%250A'
-     br'pushkey%250A'
-     br'remote-changegroup%253Dhttp%252Chttps%250A'
-     br'rev-branch-cache%250A'
-     br'stream%253Dv2',
-     # (the replacement patterns)
-     br'$USUAL_BUNDLE_CAPS$'
+    (
+        br'bundlecaps=HG20%2Cbundle2%3DHG20%250A'
+        br'bookmarks%250A'
+        br'changegroup%253D01%252C02%250A'
+        br'digests%253Dmd5%252Csha1%252Csha512%250A'
+        br'error%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250A'
+        br'hgtagsfnodes%250A'
+        br'listkeys%250A'
+        br'phases%253Dheads%250A'
+        br'pushkey%250A'
+        br'remote-changegroup%253Dhttp%252Chttps%250A'
+        br'rev-branch-cache%250A'
+        br'stream%253Dv2',
+        # (the replacement patterns)
+        br'$USUAL_BUNDLE_CAPS$',
     ),
-    (br'bundlecaps=HG20%2Cbundle2%3DHG20%250A'
-     br'bookmarks%250A'
-     br'changegroup%253D01%252C02%250A'
-     br'digests%253Dmd5%252Csha1%252Csha512%250A'
-     br'error%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250A'
-     br'hgtagsfnodes%250A'
-     br'listkeys%250A'
-     br'phases%253Dheads%250A'
-     br'pushkey%250A'
-     br'remote-changegroup%253Dhttp%252Chttps',
-     # (the replacement patterns)
-     br'$USUAL_BUNDLE_CAPS_SERVER$'
-     ),
+    (
+        br'bundlecaps=HG20%2Cbundle2%3DHG20%250A'
+        br'bookmarks%250A'
+        br'changegroup%253D01%252C02%250A'
+        br'digests%253Dmd5%252Csha1%252Csha512%250A'
+        br'error%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250A'
+        br'hgtagsfnodes%250A'
+        br'listkeys%250A'
+        br'phases%253Dheads%250A'
+        br'pushkey%250A'
+        br'remote-changegroup%253Dhttp%252Chttps',
+        # (the replacement patterns)
+        br'$USUAL_BUNDLE_CAPS_SERVER$',
+    ),
     # bundle2 capabilities sent through ssh
-    (br'bundle2=HG20%0A'
-     br'bookmarks%0A'
-     br'changegroup%3D01%2C02%0A'
-     br'digests%3Dmd5%2Csha1%2Csha512%0A'
-     br'error%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0A'
-     br'hgtagsfnodes%0A'
-     br'listkeys%0A'
-     br'phases%3Dheads%0A'
-     br'pushkey%0A'
-     br'remote-changegroup%3Dhttp%2Chttps%0A'
-     br'rev-branch-cache%0A'
-     br'stream%3Dv2',
-     # (replacement patterns)
-     br'$USUAL_BUNDLE2_CAPS$'
+    (
+        br'bundle2=HG20%0A'
+        br'bookmarks%0A'
+        br'changegroup%3D01%2C02%0A'
+        br'digests%3Dmd5%2Csha1%2Csha512%0A'
+        br'error%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0A'
+        br'hgtagsfnodes%0A'
+        br'listkeys%0A'
+        br'phases%3Dheads%0A'
+        br'pushkey%0A'
+        br'remote-changegroup%3Dhttp%2Chttps%0A'
+        br'rev-branch-cache%0A'
+        br'stream%3Dv2',
+        # (replacement patterns)
+        br'$USUAL_BUNDLE2_CAPS$',
     ),
     # bundle2 capabilities advertised by the server
-    (br'bundle2=HG20%0A'
-     br'bookmarks%0A'
-     br'changegroup%3D01%2C02%0A'
-     br'digests%3Dmd5%2Csha1%2Csha512%0A'
-     br'error%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0A'
-     br'hgtagsfnodes%0A'
-     br'listkeys%0A'
-     br'phases%3Dheads%0A'
-     br'pushkey%0A'
-     br'remote-changegroup%3Dhttp%2Chttps%0A'
-     br'rev-branch-cache',
-     # (replacement patterns)
-     br'$USUAL_BUNDLE2_CAPS_SERVER$'
-     ),
     (
-     br'bundle2=HG20%0A'
-     br'bookmarks%0A'
-     br'changegroup%3D01%2C02%0A'
-     br'digests%3Dmd5%2Csha1%2Csha512%0A'
-     br'error%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0A'
-     br'hgtagsfnodes%0A'
-     br'listkeys%0A'
-     br'pushkey%0A'
-     br'remote-changegroup%3Dhttp%2Chttps%0A'
-     br'rev-branch-cache%0A'
-     br'stream%3Dv2',
-     # (replacement patterns)
-     br'$USUAL_BUNDLE2_CAPS_NO_PHASES$'
+        br'bundle2=HG20%0A'
+        br'bookmarks%0A'
+        br'changegroup%3D01%2C02%0A'
+        br'digests%3Dmd5%2Csha1%2Csha512%0A'
+        br'error%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0A'
+        br'hgtagsfnodes%0A'
+        br'listkeys%0A'
+        br'phases%3Dheads%0A'
+        br'pushkey%0A'
+        br'remote-changegroup%3Dhttp%2Chttps%0A'
+        br'rev-branch-cache',
+        # (replacement patterns)
+        br'$USUAL_BUNDLE2_CAPS_SERVER$',
+    ),
+    (
+        br'bundle2=HG20%0A'
+        br'bookmarks%0A'
+        br'changegroup%3D01%2C02%0A'
+        br'digests%3Dmd5%2Csha1%2Csha512%0A'
+        br'error%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0A'
+        br'hgtagsfnodes%0A'
+        br'listkeys%0A'
+        br'pushkey%0A'
+        br'remote-changegroup%3Dhttp%2Chttps%0A'
+        br'rev-branch-cache%0A'
+        br'stream%3Dv2',
+        # (replacement patterns)
+        br'$USUAL_BUNDLE2_CAPS_NO_PHASES$',
     ),
     # HTTP access log dates
-    (br' - - \[\d\d/.../2\d\d\d \d\d:\d\d:\d\d] "(GET|PUT|POST)',
-     lambda m: br' - - [$LOGDATE$] "' + m.group(1)
+    (
+        br' - - \[\d\d/.../2\d\d\d \d\d:\d\d:\d\d] "(GET|PUT|POST)',
+        lambda m: br' - - [$LOGDATE$] "' + m.group(1),
     ),
     # HTTP error log dates
-    (br' - - \[\d\d/.../2\d\d\d \d\d:\d\d:\d\d] (HG error:|Exception)',
-     lambda m: br' - - [$ERRDATE$] ' + m.group(1)
+    (
+        br' - - \[\d\d/.../2\d\d\d \d\d:\d\d:\d\d] (HG error:|Exception)',
+        lambda m: br' - - [$ERRDATE$] ' + m.group(1),
     ),
     # HTTP header dates- RFC 1123
-    (br'([Dd]ate): [A-Za-z]{3}, \d\d [A-Za-z]{3} \d{4} \d\d:\d\d:\d\d GMT',
-     lambda m: br'%s: $HTTP_DATE$' % m.group(1)
+    (
+        br'([Dd]ate): [A-Za-z]{3}, \d\d [A-Za-z]{3} \d{4} \d\d:\d\d:\d\d GMT',
+        lambda m: br'%s: $HTTP_DATE$' % m.group(1),
     ),
     # LFS expiration value
-    (br'"expires_at": "\d{4}-\d\d-\d\dT\d\d:\d\d:\d\dZ"',
-     br'"expires_at": "$ISO_8601_DATE_TIME$"'
+    (
+        br'"expires_at": "\d{4}-\d\d-\d\dT\d\d:\d\d:\d\dZ"',
+        br'"expires_at": "$ISO_8601_DATE_TIME$"',
     ),
     # Windows has an extra '/' in the following lines that get globbed away:
     #   pushing to file:/*/$TESTTMP/r2 (glob)
     #   comparing with file:/*/$TESTTMP/r2 (glob)
     #   sub/maybelarge.dat: largefile 34..9c not available from
     #       file:/*/$TESTTMP/largefiles-repo (glob)
-    (br'(.*file:/)/?(/\$TESTTMP.*)',
-     lambda m: m.group(1) + b'*' + m.group(2) + b' (glob)'
+    (
+        br'(.*file:/)/?(/\$TESTTMP.*)',
+        lambda m: m.group(1) + b'*' + m.group(2) + b' (glob)',
     ),
 ]
 
@@ -119,31 +124,26 @@
         # than in Rust, and automatic conversion is not possible
         # because of module member privacy.
         br'No such file or directory \(os error 2\)',
-
         # strerror()
         br'No such file or directory',
-
         # FormatMessage(ERROR_FILE_NOT_FOUND)
         br'The system cannot find the file specified',
     ),
     br'$ENOTDIR$': (
         # strerror()
         br'Not a directory',
-
         # FormatMessage(ERROR_PATH_NOT_FOUND)
         br'The system cannot find the path specified',
     ),
     br'$ECONNRESET$': (
         # strerror()
         br'Connection reset by peer',
-
         # FormatMessage(WSAECONNRESET)
         br'An existing connection was forcibly closed by the remote host',
     ),
     br'$EADDRINUSE$': (
         # strerror()
         br'Address already in use',
-
         # FormatMessage(WSAEADDRINUSE)
         br'Only one usage of each socket address'
         br' \(protocol/network address/port\) is normally permitted',
@@ -151,9 +151,8 @@
     br'$EADDRNOTAVAIL$': (
         # strerror()
         br'Cannot assign requested address',
-
         # FormatMessage(WSAEADDRNOTAVAIL)
-    )
+    ),
 }
 
 for replace, msgs in _errors.items():
@@ -165,41 +164,35 @@
     # cloning subrepo s\ss from $TESTTMP/t/s/ss
     # cloning subrepo foo\bar from http://localhost:$HGPORT/foo/bar
     br'(?m)^cloning subrepo \S+\\.*',
-
     # pulling from $TESTTMP\issue1852a
     br'(?m)^pulling from \$TESTTMP\\.*',
-
     # pushing to $TESTTMP\a
     br'(?m)^pushing to \$TESTTMP\\.*',
-
     # pushing subrepo s\ss to $TESTTMP/t/s/ss
     br'(?m)^pushing subrepo \S+\\\S+ to.*',
-
     # moving d1\d11\a1 to d3/d11/a1
     br'(?m)^moving \S+\\.*',
-
     # d1\a: not recording move - dummy does not exist
     br'\S+\\\S+: not recording move .+',
-
     # reverting s\a
     br'(?m)^reverting (?!subrepo ).*\\.*',
-
     # saved backup bundle to
     #     $TESTTMP\test\.hg\strip-backup/443431ffac4f-2fc5398a-backup.hg
     br'(?m)^saved backup bundle to \$TESTTMP.*\.hg',
-
     # no changes made to subrepo s\ss since last push to ../tcc/s/ss
     br'(?m)^no changes made to subrepo \S+\\\S+ since.*',
-
     # changeset 5:9cc5aa7204f0: stuff/maybelarge.dat references missing
     #     $TESTTMP\largefiles-repo-hg\.hg\largefiles\76..38
     br'(?m)^changeset .* references (corrupted|missing) \$TESTTMP\\.*',
-
     # stuff/maybelarge.dat: largefile 76..38 not available from
     #     file:/*/$TESTTMP\largefiles-repo (glob)
     br'.*: largefile \S+ not available from file:/\*/.+',
 ]
 
 if os.name == 'nt':
-    substitutions.extend([(s, lambda match: match.group().replace(b'\\', b'/'))
-                          for s in _winpathfixes])
+    substitutions.extend(
+        [
+            (s, lambda match: match.group().replace(b'\\', b'/'))
+            for s in _winpathfixes
+        ]
+    )
--- a/tests/crashgetbundler.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/crashgetbundler.py	Mon Oct 21 11:09:48 2019 -0400
@@ -1,14 +1,12 @@
 from __future__ import absolute_import
 
 from mercurial.i18n import _
-from mercurial import (
-        changegroup,
-        error,
-        extensions
-    )
+from mercurial import changegroup, error, extensions
+
 
 def abort(orig, *args, **kwargs):
     raise error.Abort(_('this is an exercise'))
 
+
 def uisetup(ui):
     extensions.wrapfunction(changegroup, 'getbundler', abort)
--- a/tests/drawdag.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/drawdag.py	Mon Oct 21 11:09:48 2019 -0400
@@ -102,14 +102,19 @@
 command = registrar.command(cmdtable)
 
 _pipechars = b'\\/+-|'
-_nonpipechars = b''.join(pycompat.bytechr(i) for i in range(33, 127)
-                         if pycompat.bytechr(i) not in _pipechars)
+_nonpipechars = b''.join(
+    pycompat.bytechr(i)
+    for i in range(33, 127)
+    if pycompat.bytechr(i) not in _pipechars
+)
+
 
 def _isname(ch):
     """char -> bool. return True if ch looks like part of a name, False
     otherwise"""
     return ch in _nonpipechars
 
+
 def _parseasciigraph(text):
     r"""str -> {str : [str]}. convert the ASCII graph to edges
 
@@ -166,7 +171,7 @@
         if x < 0 or y < 0:
             return b' '
         try:
-            return lines[y][x:x + 1] or b' '
+            return lines[y][x : x + 1] or b' '
         except IndexError:
             return b' '
 
@@ -261,6 +266,7 @@
 
     return dict(edges)
 
+
 class simplefilectx(object):
     def __init__(self, path, data):
         self._data = data
@@ -281,6 +287,7 @@
     def flags(self):
         return b''
 
+
 class simplecommitctx(context.committablectx):
     def __init__(self, repo, name, parentctxs, added):
         opts = {
@@ -306,6 +313,7 @@
     def p2copies(self):
         return {}
 
+
 def _walkgraph(edges):
     """yield node, parents in topologically order"""
     visible = set(edges.keys())
@@ -327,6 +335,7 @@
                 if leaf in v:
                     v.remove(leaf)
 
+
 def _getcomments(text):
     r"""
     >>> [pycompat.sysstr(s) for s in _getcomments(br'''
@@ -345,6 +354,7 @@
             continue
         yield line.split(b' # ', 1)[1].split(b' # ')[0].strip()
 
+
 @command(b'debugdrawdag', [])
 def debugdrawdag(ui, repo, **opts):
     r"""read an ASCII graph from stdin and create changesets
@@ -368,11 +378,10 @@
     edges = _parseasciigraph(text)
     for k, v in edges.items():
         if len(v) > 2:
-            raise error.Abort(_('%s: too many parents: %s')
-                              % (k, b' '.join(v)))
+            raise error.Abort(_('%s: too many parents: %s') % (k, b' '.join(v)))
 
     # parse comments to get extra file content instructions
-    files = collections.defaultdict(dict) # {(name, path): content}
+    files = collections.defaultdict(dict)  # {(name, path): content}
     comments = list(_getcomments(text))
     filere = re.compile(br'^(\w+)/([\w/]+)\s*=\s*(.*)$', re.M)
     for name, path, content in filere.findall(b'\n'.join(comments)):
@@ -410,14 +419,15 @@
         ctx = simplecommitctx(repo, name, pctxs, added)
         n = ctx.commit()
         committed[name] = n
-        tagsmod.tag(repo, [name], n, message=None, user=None, date=None,
-                    local=True)
+        tagsmod.tag(
+            repo, [name], n, message=None, user=None, date=None, local=True
+        )
 
     # handle special comments
     with repo.wlock(), repo.lock(), repo.transaction(b'drawdag'):
         getctx = lambda x: repo.unfiltered()[committed[x.strip()]]
         for comment in comments:
-            rels = [] # obsolete relationships
+            rels = []  # obsolete relationships
             args = comment.split(b':', 1)
             if len(args) <= 1:
                 continue
--- a/tests/dumbhttp.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/dumbhttp.py	Mon Oct 21 11:09:48 2019 -0400
@@ -23,36 +23,64 @@
 OptionParser = optparse.OptionParser
 
 if os.environ.get('HGIPV6', '0') == '1':
+
     class simplehttpserver(httpserver.httpserver):
         address_family = socket.AF_INET6
+
+
 else:
     simplehttpserver = httpserver.httpserver
 
+
 class _httprequesthandler(httpserver.simplehttprequesthandler):
     def log_message(self, format, *args):
         httpserver.simplehttprequesthandler.log_message(self, format, *args)
         sys.stderr.flush()
 
+
 class simplehttpservice(object):
     def __init__(self, host, port):
         self.address = (host, port)
+
     def init(self):
         self.httpd = simplehttpserver(self.address, _httprequesthandler)
+
     def run(self):
         self.httpd.serve_forever()
 
+
 if __name__ == '__main__':
     parser = OptionParser()
-    parser.add_option('-p', '--port', dest='port', type='int', default=8000,
-        help='TCP port to listen on', metavar='PORT')
-    parser.add_option('-H', '--host', dest='host', default='localhost',
-        help='hostname or IP to listen on', metavar='HOST')
+    parser.add_option(
+        '-p',
+        '--port',
+        dest='port',
+        type='int',
+        default=8000,
+        help='TCP port to listen on',
+        metavar='PORT',
+    )
+    parser.add_option(
+        '-H',
+        '--host',
+        dest='host',
+        default='localhost',
+        help='hostname or IP to listen on',
+        metavar='HOST',
+    )
     parser.add_option('--logfile', help='file name of access/error log')
-    parser.add_option('--pid', dest='pid',
-        help='file name where the PID of the server is stored')
-    parser.add_option('-f', '--foreground', dest='foreground',
+    parser.add_option(
+        '--pid',
+        dest='pid',
+        help='file name where the PID of the server is stored',
+    )
+    parser.add_option(
+        '-f',
+        '--foreground',
+        dest='foreground',
         action='store_true',
-        help='do not start the HTTP server in the background')
+        help='do not start the HTTP server in the background',
+    )
     parser.add_option('--daemon-postexec', action='append')
 
     (options, args) = parser.parse_args()
@@ -60,18 +88,26 @@
     signal.signal(signal.SIGTERM, lambda x, y: sys.exit(0))
 
     if options.foreground and options.logfile:
-        parser.error("options --logfile and --foreground are mutually "
-                     "exclusive")
+        parser.error(
+            "options --logfile and --foreground are mutually " "exclusive"
+        )
     if options.foreground and options.pid:
         parser.error("options --pid and --foreground are mutually exclusive")
 
-    opts = {b'pid_file': options.pid,
-            b'daemon': not options.foreground,
-            b'daemon_postexec': pycompat.rapply(encoding.strtolocal,
-                                                options.daemon_postexec)}
+    opts = {
+        b'pid_file': options.pid,
+        b'daemon': not options.foreground,
+        b'daemon_postexec': pycompat.rapply(
+            encoding.strtolocal, options.daemon_postexec
+        ),
+    }
     service = simplehttpservice(options.host, options.port)
     runargs = [sys.executable, __file__] + sys.argv[1:]
     runargs = [pycompat.fsencode(a) for a in runargs]
-    server.runservice(opts, initfn=service.init, runfn=service.run,
-                      logfile=options.logfile,
-                      runargs=runargs)
+    server.runservice(
+        opts,
+        initfn=service.init,
+        runfn=service.run,
+        logfile=options.logfile,
+        runargs=runargs,
+    )
--- a/tests/dummysmtpd.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/dummysmtpd.py	Mon Oct 21 11:09:48 2019 -0400
@@ -18,10 +18,12 @@
     ui as uimod,
 )
 
+
 def log(msg):
     sys.stdout.write(msg)
     sys.stdout.flush()
 
+
 class dummysmtpserver(smtpd.SMTPServer):
     def __init__(self, localaddr):
         smtpd.SMTPServer.__init__(self, localaddr, remoteaddr=None)
@@ -38,6 +40,7 @@
         # the expected way, and the server is available for subsequent requests.
         traceback.print_exc()
 
+
 class dummysmtpsecureserver(dummysmtpserver):
     def __init__(self, localaddr, certfile):
         dummysmtpserver.__init__(self, localaddr)
@@ -58,25 +61,30 @@
             return
         smtpd.SMTPChannel(self, conn, addr)
 
+
 def run():
     try:
         asyncore.loop()
     except KeyboardInterrupt:
         pass
 
+
 def _encodestrsonly(v):
     if isinstance(v, type(u'')):
         return v.encode('ascii')
     return v
 
+
 def bytesvars(obj):
     unidict = vars(obj)
     bd = {k.encode('ascii'): _encodestrsonly(v) for k, v in unidict.items()}
     if bd[b'daemon_postexec'] is not None:
         bd[b'daemon_postexec'] = [
-            _encodestrsonly(v) for v in bd[b'daemon_postexec']]
+            _encodestrsonly(v) for v in bd[b'daemon_postexec']
+        ]
     return bd
 
+
 def main():
     op = optparse.OptionParser()
     op.add_option('-d', '--daemon', action='store_true')
@@ -92,6 +100,7 @@
         op.error('--certificate must be specified')
 
     addr = (opts.address, opts.port)
+
     def init():
         if opts.tls == 'none':
             dummysmtpserver(addr)
@@ -100,9 +109,13 @@
         log('listening at %s:%d\n' % addr)
 
     server.runservice(
-        bytesvars(opts), initfn=init, runfn=run,
-        runargs=[pycompat.sysexecutable,
-                 pycompat.fsencode(__file__)] + pycompat.sysargv[1:])
+        bytesvars(opts),
+        initfn=init,
+        runfn=run,
+        runargs=[pycompat.sysexecutable, pycompat.fsencode(__file__)]
+        + pycompat.sysargv[1:],
+    )
+
 
 if __name__ == '__main__':
     main()
--- a/tests/failfilemerge.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/failfilemerge.py	Mon Oct 21 11:09:48 2019 -0400
@@ -8,12 +8,13 @@
     filemerge,
 )
 
-def failfilemerge(filemergefn,
-                  premerge, repo, wctx, mynode, orig, fcd, fco, fca,
-                  labels=None):
+
+def failfilemerge(
+    filemergefn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
+):
     raise error.Abort("^C")
     return filemergefn(premerge, repo, mynode, orig, fcd, fco, fca, labels)
 
+
 def extsetup(ui):
-    extensions.wrapfunction(filemerge, '_filemerge',
-                            failfilemerge)
+    extensions.wrapfunction(filemerge, '_filemerge', failfilemerge)
--- a/tests/fakedirstatewritetime.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/fakedirstatewritetime.py	Mon Oct 21 11:09:48 2019 -0400
@@ -18,6 +18,7 @@
 
 try:
     from mercurial import rustext
+
     rustext.__name__  # force actual import (see hgdemandimport)
 except ImportError:
     rustext = None
@@ -25,11 +26,13 @@
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem(b'fakedirstatewritetime', b'fakenow',
-    default=None,
+configitem(
+    b'fakedirstatewritetime', b'fakenow', default=None,
 )
 
 parsers = policy.importmod(r'parsers')
+rustmod = policy.importrust(r'parsers')
+
 
 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
     # execute what original parsers.pack_dirstate should do actually
@@ -42,6 +45,7 @@
 
     return orig(dmap, copymap, pl, fakenow)
 
+
 def fakewrite(ui, func):
     # fake "now" of 'pack_dirstate' only if it is invoked while 'func'
 
@@ -57,16 +61,21 @@
     # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
     fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
 
-    if rustext is not None:
-        orig_module = rustext.dirstate
-        orig_pack_dirstate = rustext.dirstate.pack_dirstate
-    else:
-        orig_module = parsers
-        orig_pack_dirstate = parsers.pack_dirstate
+    if rustmod is not None:
+        # The Rust implementation does not use public parse/pack dirstate
+        # to prevent conversion round-trips
+        orig_dirstatemap_write = dirstate.dirstatemap.write
+        wrapper = lambda self, st, now: orig_dirstatemap_write(
+            self, st, fakenow
+        )
+        dirstate.dirstatemap.write = wrapper
 
     orig_dirstate_getfsnow = dirstate._getfsnow
     wrapper = lambda *args: pack_dirstate(fakenow, orig_pack_dirstate, *args)
 
+    orig_module = parsers
+    orig_pack_dirstate = parsers.pack_dirstate
+
     orig_module.pack_dirstate = wrapper
     dirstate._getfsnow = lambda *args: fakenow
     try:
@@ -74,17 +83,22 @@
     finally:
         orig_module.pack_dirstate = orig_pack_dirstate
         dirstate._getfsnow = orig_dirstate_getfsnow
+        if rustmod is not None:
+            dirstate.dirstatemap.write = orig_dirstatemap_write
+
 
 def _poststatusfixup(orig, workingctx, status, fixup):
     ui = workingctx.repo().ui
-    return fakewrite(ui, lambda : orig(workingctx, status, fixup))
+    return fakewrite(ui, lambda: orig(workingctx, status, fixup))
+
 
 def markcommitted(orig, committablectx, node):
     ui = committablectx.repo().ui
-    return fakewrite(ui, lambda : orig(committablectx, node))
+    return fakewrite(ui, lambda: orig(committablectx, node))
+
 
 def extsetup(ui):
-    extensions.wrapfunction(context.workingctx, '_poststatusfixup',
-                            _poststatusfixup)
-    extensions.wrapfunction(context.workingctx, 'markcommitted',
-                            markcommitted)
+    extensions.wrapfunction(
+        context.workingctx, '_poststatusfixup', _poststatusfixup
+    )
+    extensions.wrapfunction(context.workingctx, 'markcommitted', markcommitted)
--- a/tests/fakemergerecord.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/fakemergerecord.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,9 +12,15 @@
 cmdtable = {}
 command = registrar.command(cmdtable)
 
-@command(b'fakemergerecord',
-         [(b'X', b'mandatory', None, b'add a fake mandatory record'),
-          (b'x', b'advisory', None, b'add a fake advisory record')], '')
+
+@command(
+    b'fakemergerecord',
+    [
+        (b'X', b'mandatory', None, b'add a fake mandatory record'),
+        (b'x', b'advisory', None, b'add a fake advisory record'),
+    ],
+    '',
+)
 def fakemergerecord(ui, repo, *pats, **opts):
     with repo.wlock():
         ms = merge.mergestate.read(repo)
--- a/tests/fakepatchtime.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/fakepatchtime.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,18 +13,34 @@
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem(b'fakepatchtime', b'fakenow',
-    default=None,
+configitem(
+    b'fakepatchtime', b'fakenow', default=None,
 )
 
-def internalpatch(orig, ui, repo, patchobj, strip,
-                  prefix=b'', files=None,
-                  eolmode=b'strict', similarity=0):
+
+def internalpatch(
+    orig,
+    ui,
+    repo,
+    patchobj,
+    strip,
+    prefix=b'',
+    files=None,
+    eolmode=b'strict',
+    similarity=0,
+):
     if files is None:
         files = set()
-    r = orig(ui, repo, patchobj, strip,
-             prefix=prefix, files=files,
-             eolmode=eolmode, similarity=similarity)
+    r = orig(
+        ui,
+        repo,
+        patchobj,
+        strip,
+        prefix=prefix,
+        files=files,
+        eolmode=eolmode,
+        similarity=similarity,
+    )
 
     fakenow = ui.config(b'fakepatchtime', b'fakenow')
     if fakenow:
@@ -36,5 +52,6 @@
 
     return r
 
+
 def extsetup(ui):
     extensions.wrapfunction(patchmod, 'internalpatch', internalpatch)
--- a/tests/filterpyflakes.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/filterpyflakes.py	Mon Oct 21 11:09:48 2019 -0400
@@ -22,7 +22,7 @@
     for pat in pats:
         if re.search(pat, line):
             keep = False
-            break # pattern matches
+            break  # pattern matches
     if keep:
         fn = line.split(':', 1)[0]
         f = open(fn)
--- a/tests/flagprocessorext.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/flagprocessorext.py	Mon Oct 21 11:09:48 2019 -0400
@@ -12,35 +12,48 @@
     revlog,
     util,
 )
+from mercurial.revlogutils import flagutil
 
 # Test only: These flags are defined here only in the context of testing the
 # behavior of the flag processor. The canonical way to add flags is to get in
 # touch with the community and make them known in revlog.
-REVIDX_NOOP = (1 << 3)
-REVIDX_BASE64 = (1 << 2)
-REVIDX_GZIP = (1 << 1)
+REVIDX_NOOP = 1 << 3
+REVIDX_BASE64 = 1 << 2
+REVIDX_GZIP = 1 << 1
 REVIDX_FAIL = 1
 
+
 def validatehash(self, text):
     return True
 
+
 def bypass(self, text):
     return False
 
-def noopdonothing(self, text):
+
+def noopdonothing(self, text, sidedata):
     return (text, True)
 
-def b64encode(self, text):
+
+def noopdonothingread(self, text):
+    return (text, True, {})
+
+
+def b64encode(self, text, sidedata):
     return (base64.b64encode(text), False)
 
+
 def b64decode(self, text):
-    return (base64.b64decode(text), True)
+    return (base64.b64decode(text), True, {})
 
-def gzipcompress(self, text):
+
+def gzipcompress(self, text, sidedata):
     return (zlib.compress(text), False)
 
+
 def gzipdecompress(self, text):
-    return (zlib.decompress(text), True)
+    return (zlib.decompress(text), True, {})
+
 
 def supportedoutgoingversions(orig, repo):
     versions = orig(repo)
@@ -49,16 +62,26 @@
     versions.add(b'03')
     return versions
 
+
 def allsupportedversions(orig, ui):
     versions = orig(ui)
     versions.add(b'03')
     return versions
 
+
 def makewrappedfile(obj):
     class wrappedfile(obj.__class__):
-        def addrevision(self, text, transaction, link, p1, p2,
-                        cachedelta=None, node=None,
-                        flags=revlog.REVIDX_DEFAULT_FLAGS):
+        def addrevision(
+            self,
+            text,
+            transaction,
+            link,
+            p1,
+            p2,
+            cachedelta=None,
+            node=None,
+            flags=flagutil.REVIDX_DEFAULT_FLAGS,
+        ):
             if b'[NOOP]' in text:
                 flags |= REVIDX_NOOP
 
@@ -73,14 +96,20 @@
             if b'[FAIL]' in text:
                 flags |= REVIDX_FAIL
 
-            return super(wrappedfile, self).addrevision(text, transaction, link,
-                                                        p1, p2,
-                                                        cachedelta=cachedelta,
-                                                        node=node,
-                                                        flags=flags)
+            return super(wrappedfile, self).addrevision(
+                text,
+                transaction,
+                link,
+                p1,
+                p2,
+                cachedelta=cachedelta,
+                node=node,
+                flags=flags,
+            )
 
     obj.__class__ = wrappedfile
 
+
 def reposetup(ui, repo):
     class wrappingflagprocessorrepo(repo.__class__):
         def file(self, f):
@@ -90,19 +119,18 @@
 
     repo.__class__ = wrappingflagprocessorrepo
 
+
 def extsetup(ui):
     # Enable changegroup3 for flags to be sent over the wire
     wrapfunction = extensions.wrapfunction
-    wrapfunction(changegroup,
-                 'supportedoutgoingversions',
-                 supportedoutgoingversions)
-    wrapfunction(changegroup,
-                 'allsupportedversions',
-                 allsupportedversions)
+    wrapfunction(
+        changegroup, 'supportedoutgoingversions', supportedoutgoingversions
+    )
+    wrapfunction(changegroup, 'allsupportedversions', allsupportedversions)
 
     # Teach revlog about our test flags
     flags = [REVIDX_NOOP, REVIDX_BASE64, REVIDX_GZIP, REVIDX_FAIL]
-    revlog.REVIDX_KNOWN_FLAGS |= util.bitsfrom(flags)
+    flagutil.REVIDX_KNOWN_FLAGS |= util.bitsfrom(flags)
     revlog.REVIDX_FLAGS_ORDER.extend(flags)
 
     # Teach exchange to use changegroup 3
@@ -110,27 +138,12 @@
         exchange._bundlespeccontentopts[k][b"cg.version"] = b"03"
 
     # Register flag processors for each extension
-    revlog.addflagprocessor(
-        REVIDX_NOOP,
-        (
-            noopdonothing,
-            noopdonothing,
-            validatehash,
-        )
+    flagutil.addflagprocessor(
+        REVIDX_NOOP, (noopdonothingread, noopdonothing, validatehash,)
     )
-    revlog.addflagprocessor(
-        REVIDX_BASE64,
-        (
-            b64decode,
-            b64encode,
-            bypass,
-        ),
+    flagutil.addflagprocessor(
+        REVIDX_BASE64, (b64decode, b64encode, bypass,),
     )
-    revlog.addflagprocessor(
-        REVIDX_GZIP,
-        (
-            gzipdecompress,
-            gzipcompress,
-            bypass
-        )
+    flagutil.addflagprocessor(
+        REVIDX_GZIP, (gzipdecompress, gzipcompress, bypass)
     )
--- a/tests/fsmonitor-run-tests.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/fsmonitor-run-tests.py	Mon Oct 21 11:09:48 2019 -0400
@@ -28,14 +28,18 @@
 
 if sys.version_info > (3, 5, 0):
     PYTHON3 = True
-    xrange = range # we use xrange in one place, and we'd rather not use range
+    xrange = range  # we use xrange in one place, and we'd rather not use range
+
     def _bytespath(p):
         return p.encode('utf-8')
 
+
 elif sys.version_info >= (3, 0, 0):
-    print('%s is only supported on Python 3.5+ and 2.7, not %s' %
-          (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
-    sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
+    print(
+        '%s is only supported on Python 3.5+ and 2.7, not %s'
+        % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
+    )
+    sys.exit(70)  # EX_SOFTWARE from `man 3 sysexit`
 else:
     PYTHON3 = False
 
@@ -46,21 +50,29 @@
     def _bytespath(p):
         return p
 
+
 def getparser():
     """Obtain the argument parser used by the CLI."""
     parser = argparse.ArgumentParser(
         description='Run tests with fsmonitor enabled.',
-        epilog='Unrecognized options are passed to run-tests.py.')
+        epilog='Unrecognized options are passed to run-tests.py.',
+    )
     # - keep these sorted
     # - none of these options should conflict with any in run-tests.py
-    parser.add_argument('--keep-fsmonitor-tmpdir', action='store_true',
-        help='keep temporary directory with fsmonitor state')
-    parser.add_argument('--watchman',
+    parser.add_argument(
+        '--keep-fsmonitor-tmpdir',
+        action='store_true',
+        help='keep temporary directory with fsmonitor state',
+    )
+    parser.add_argument(
+        '--watchman',
         help='location of watchman binary (default: watchman in PATH)',
-        default='watchman')
+        default='watchman',
+    )
 
     return parser
 
+
 @contextlib.contextmanager
 def watchman(args):
     basedir = tempfile.mkdtemp(prefix='hg-fsmonitor')
@@ -82,19 +94,24 @@
 
         argv = [
             args.watchman,
-            '--sockname', sockfile,
-            '--logfile', logfile,
-            '--pidfile', pidfile,
-            '--statefile', statefile,
+            '--sockname',
+            sockfile,
+            '--logfile',
+            logfile,
+            '--pidfile',
+            pidfile,
+            '--statefile',
+            statefile,
             '--foreground',
-            '--log-level=2', # debug logging for watchman
+            '--log-level=2',  # debug logging for watchman
         ]
 
         envb = osenvironb.copy()
         envb[b'WATCHMAN_CONFIG_FILE'] = _bytespath(cfgfile)
         with open(clilogfile, 'wb') as f:
             proc = subprocess.Popen(
-                argv, env=envb, stdin=None, stdout=f, stderr=f)
+                argv, env=envb, stdin=None, stdout=f, stderr=f
+            )
             try:
                 yield sockfile
             finally:
@@ -106,6 +123,7 @@
         else:
             shutil.rmtree(basedir, ignore_errors=True)
 
+
 def run():
     parser = getparser()
     args, runtestsargv = parser.parse_known_args()
@@ -120,20 +138,23 @@
         blacklist = os.path.join(runtestdir, 'blacklists', 'fsmonitor')
 
         runtestsargv.insert(0, runtests)
-        runtestsargv.extend([
-            '--extra-config',
-            'extensions.fsmonitor=',
-            # specify fsmonitor.mode=paranoid always in order to force
-            # fsmonitor extension execute "paranoid" code path
-            #
-            # TODO: make fsmonitor-run-tests.py accept specific options
-            '--extra-config',
-            'fsmonitor.mode=paranoid',
-            '--blacklist',
-            blacklist,
-        ])
+        runtestsargv.extend(
+            [
+                '--extra-config',
+                'extensions.fsmonitor=',
+                # specify fsmonitor.mode=paranoid always in order to force
+                # fsmonitor extension execute "paranoid" code path
+                #
+                # TODO: make fsmonitor-run-tests.py accept specific options
+                '--extra-config',
+                'fsmonitor.mode=paranoid',
+                '--blacklist',
+                blacklist,
+            ]
+        )
 
         return subprocess.call(runtestsargv)
 
+
 if __name__ == '__main__':
     sys.exit(run())
--- a/tests/generate-working-copy-states.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/generate-working-copy-states.py	Mon Oct 21 11:09:48 2019 -0400
@@ -43,16 +43,27 @@
     depth = len(parentcontents)
     if depth == maxchangesets + 1:
         for tracked in (b'untracked', b'tracked'):
-            filename = b"_".join([(content is None and b'missing' or content)
-                                for content in parentcontents]) + b"-" + tracked
+            filename = (
+                b"_".join(
+                    [
+                        (content is None and b'missing' or content)
+                        for content in parentcontents
+                    ]
+                )
+                + b"-"
+                + tracked
+            )
             yield (filename, parentcontents)
     else:
-        for content in ({None, b'content' + (b"%d" % (depth + 1))} |
-                      set(parentcontents)):
-            for combination in generatestates(maxchangesets,
-                                              parentcontents + [content]):
+        for content in {None, b'content' + (b"%d" % (depth + 1))} | set(
+            parentcontents
+        ):
+            for combination in generatestates(
+                maxchangesets, parentcontents + [content]
+            ):
                 yield combination
 
+
 # retrieve the command line arguments
 target = sys.argv[1]
 maxchangesets = int(sys.argv[2])
--- a/tests/get-with-headers.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/get-with-headers.py	Mon Oct 21 11:09:48 2019 -0400
@@ -19,6 +19,7 @@
 
 try:
     import msvcrt
+
     msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
     msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
 except ImportError:
@@ -31,11 +32,14 @@
 parser.add_argument('--headeronly', action='store_true')
 parser.add_argument('--json', action='store_true')
 parser.add_argument('--hgproto')
-parser.add_argument('--requestheader', nargs='*', default=[],
-                    help='Send an additional HTTP request header. Argument '
-                         'value is <header>=<value>')
-parser.add_argument('--bodyfile',
-                    help='Write HTTP response body to a file')
+parser.add_argument(
+    '--requestheader',
+    nargs='*',
+    default=[],
+    help='Send an additional HTTP request header. Argument '
+    'value is <header>=<value>',
+)
+parser.add_argument('--bodyfile', help='Write HTTP response body to a file')
 parser.add_argument('host')
 parser.add_argument('path')
 parser.add_argument('show', nargs='*')
@@ -49,6 +53,8 @@
 requestheaders = args.requestheader
 
 tag = None
+
+
 def request(host, path, show):
     assert not path.startswith('/'), path
     global tag
@@ -65,15 +71,19 @@
     conn = httplib.HTTPConnection(host)
     conn.request("GET", '/' + path, None, headers)
     response = conn.getresponse()
-    stdout.write(b'%d %s\n' % (response.status,
-                               response.reason.encode('ascii')))
+    stdout.write(
+        b'%d %s\n' % (response.status, response.reason.encode('ascii'))
+    )
     if show[:1] == ['-']:
-        show = sorted(h for h, v in response.getheaders()
-                      if h.lower() not in show)
+        show = sorted(
+            h for h, v in response.getheaders() if h.lower() not in show
+        )
     for h in [h.lower() for h in show]:
         if response.getheader(h, None) is not None:
-            stdout.write(b"%s: %s\n" % (h.encode('ascii'),
-                                        response.getheader(h).encode('ascii')))
+            stdout.write(
+                b"%s: %s\n"
+                % (h.encode('ascii'), response.getheader(h).encode('ascii'))
+            )
     if not headeronly:
         stdout.write(b'\n')
         data = response.read()
@@ -104,6 +114,7 @@
 
     return response.status
 
+
 status = request(args.host, args.path, args.show)
 if twice:
     status = request(args.host, args.path, args.show)
--- a/tests/heredoctest.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/heredoctest.py	Mon Oct 21 11:09:48 2019 -0400
@@ -2,10 +2,12 @@
 
 import sys
 
+
 def flush():
     sys.stdout.flush()
     sys.stderr.flush()
 
+
 globalvars = {}
 lines = sys.stdin.readlines()
 while lines:
--- a/tests/hghave.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/hghave.py	Mon Oct 21 11:09:48 2019 -0400
@@ -1,4 +1,4 @@
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
 
 import os
 import re
@@ -17,6 +17,7 @@
 
 try:
     import msvcrt
+
     msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
     msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
 except ImportError:
@@ -26,6 +27,7 @@
 stderr = getattr(sys.stderr, 'buffer', sys.stderr)
 
 if sys.version_info[0] >= 3:
+
     def _bytespath(p):
         if p is None:
             return p
@@ -35,35 +37,51 @@
         if p is None:
             return p
         return p.decode('utf-8')
+
+
 else:
+
     def _bytespath(p):
         return p
 
     _strpath = _bytespath
 
+
 def check(name, desc):
     """Registers a check function for a feature."""
+
     def decorator(func):
         checks[name] = (func, desc)
         return func
+
     return decorator
 
+
 def checkvers(name, desc, vers):
     """Registers a check function for each of a series of versions.
 
-    vers can be a list or an iterator"""
+    vers can be a list or an iterator.
+
+    Produces a series of feature checks that have the form <name><vers> without
+    any punctuation (even if there's punctuation in 'vers'; i.e. this produces
+    'py38', not 'py3.8' or 'py-38')."""
+
     def decorator(func):
         def funcv(v):
             def f():
                 return func(v)
+
             return f
+
         for v in vers:
             v = str(v)
             f = funcv(v)
             checks['%s%s' % (name, v.replace('.', ''))] = (f, desc % v)
         return func
+
     return decorator
 
+
 def checkfeatures(features):
     result = {
         'error': [],
@@ -94,13 +112,15 @@
 
     return result
 
+
 def require(features):
     """Require that features are available, exiting if not."""
     result = checkfeatures(features)
 
     for missing in result['missing']:
-        stderr.write(('skipped: unknown feature: %s\n'
-                      % missing).encode('utf-8'))
+        stderr.write(
+            ('skipped: unknown feature: %s\n' % missing).encode('utf-8')
+        )
     for msg in result['skipped']:
         stderr.write(('skipped: %s\n' % msg).encode('utf-8'))
     for msg in result['error']:
@@ -112,21 +132,25 @@
     if result['skipped'] or result['error']:
         sys.exit(1)
 
+
 def matchoutput(cmd, regexp, ignorestatus=False):
     """Return the match object if cmd executes successfully and its output
     is matched by the supplied regular expression.
     """
     r = re.compile(regexp)
     p = subprocess.Popen(
-        cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+        cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
+    )
     s = p.communicate()[0]
     ret = p.returncode
     return (ignorestatus or not ret) and r.search(s)
 
+
 @check("baz", "GNU Arch baz client")
 def has_baz():
     return matchoutput('baz --version 2>&1', br'baz Bazaar version')
 
+
 @check("bzr", "Canonical's Bazaar client")
 def has_bzr():
     try:
@@ -135,48 +159,61 @@
         import bzrlib.errors
         import bzrlib.revision
         import bzrlib.revisionspec
+
         bzrlib.revisionspec.RevisionSpec
         return bzrlib.__doc__ is not None
     except (AttributeError, ImportError):
         return False
 
+
 @checkvers("bzr", "Canonical's Bazaar client >= %s", (1.14,))
 def has_bzr_range(v):
     major, minor = v.split('rc')[0].split('.')[0:2]
     try:
         import bzrlib
-        return (bzrlib.__doc__ is not None
-                and bzrlib.version_info[:2] >= (int(major), int(minor)))
+
+        return bzrlib.__doc__ is not None and bzrlib.version_info[:2] >= (
+            int(major),
+            int(minor),
+        )
     except ImportError:
         return False
 
+
 @check("chg", "running with chg")
 def has_chg():
     return 'CHGHG' in os.environ
 
+
 @check("cvs", "cvs client/server")
 def has_cvs():
     re = br'Concurrent Versions System.*?server'
     return matchoutput('cvs --version 2>&1', re) and not has_msys()
 
+
 @check("cvs112", "cvs client/server 1.12.* (not cvsnt)")
 def has_cvs112():
     re = br'Concurrent Versions System \(CVS\) 1.12.*?server'
     return matchoutput('cvs --version 2>&1', re) and not has_msys()
 
+
 @check("cvsnt", "cvsnt client/server")
 def has_cvsnt():
     re = br'Concurrent Versions System \(CVSNT\) (\d+).(\d+).*\(client/server\)'
     return matchoutput('cvsnt --version 2>&1', re)
 
+
 @check("darcs", "darcs client")
 def has_darcs():
     return matchoutput('darcs --version', br'\b2\.([2-9]|\d{2})', True)
 
+
 @check("mtn", "monotone client (>= 1.0)")
 def has_mtn():
     return matchoutput('mtn --version', br'monotone', True) and not matchoutput(
-        'mtn --version', br'monotone 0\.', True)
+        'mtn --version', br'monotone 0\.', True
+    )
+
 
 @check("eol-in-paths", "end-of-lines in paths")
 def has_eol_in_paths():
@@ -188,6 +225,7 @@
     except (IOError, OSError):
         return False
 
+
 @check("execbit", "executable bit")
 def has_executablebit():
     try:
@@ -198,7 +236,7 @@
             m = os.stat(fn).st_mode & 0o777
             new_file_has_exec = m & EXECFLAGS
             os.chmod(fn, m ^ EXECFLAGS)
-            exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0o777) == m)
+            exec_flags_cannot_flip = (os.stat(fn).st_mode & 0o777) == m
         finally:
             os.unlink(fn)
     except (IOError, OSError):
@@ -206,6 +244,7 @@
         return False
     return not (new_file_has_exec or exec_flags_cannot_flip)
 
+
 @check("icasefs", "case insensitive file system")
 def has_icasefs():
     # Stolen from mercurial.util
@@ -225,6 +264,7 @@
     finally:
         os.remove(path)
 
+
 @check("fifo", "named pipes")
 def has_fifo():
     if getattr(os, "mkfifo", None) is None:
@@ -237,10 +277,12 @@
     except OSError:
         return False
 
+
 @check("killdaemons", 'killdaemons.py support')
 def has_killdaemons():
     return True
 
+
 @check("cacheable", "cacheable filesystem")
 def has_cacheable_fs():
     from mercurial import util
@@ -252,59 +294,71 @@
     finally:
         os.remove(path)
 
+
 @check("lsprof", "python lsprof module")
 def has_lsprof():
     try:
         import _lsprof
-        _lsprof.Profiler # silence unused import warning
+
+        _lsprof.Profiler  # silence unused import warning
         return True
     except ImportError:
         return False
 
+
 def gethgversion():
     m = matchoutput('hg --version --quiet 2>&1', br'(\d+)\.(\d+)')
     if not m:
         return (0, 0)
     return (int(m.group(1)), int(m.group(2)))
 
-@checkvers("hg", "Mercurial >= %s",
-            list([(1.0 * x) / 10 for x in range(9, 99)]))
+
+@checkvers(
+    "hg", "Mercurial >= %s", list([(1.0 * x) / 10 for x in range(9, 99)])
+)
 def has_hg_range(v):
     major, minor = v.split('.')[0:2]
     return gethgversion() >= (int(major), int(minor))
 
+
 @check("hg08", "Mercurial >= 0.8")
 def has_hg08():
     if checks["hg09"][0]():
         return True
     return matchoutput('hg help annotate 2>&1', '--date')
 
+
 @check("hg07", "Mercurial >= 0.7")
 def has_hg07():
     if checks["hg08"][0]():
         return True
     return matchoutput('hg --version --quiet 2>&1', 'Mercurial Distributed SCM')
 
+
 @check("hg06", "Mercurial >= 0.6")
 def has_hg06():
     if checks["hg07"][0]():
         return True
     return matchoutput('hg --version --quiet 2>&1', 'Mercurial version')
 
+
 @check("gettext", "GNU Gettext (msgfmt)")
 def has_gettext():
     return matchoutput('msgfmt --version', br'GNU gettext-tools')
 
+
 @check("git", "git command line client")
 def has_git():
     return matchoutput('git --version 2>&1', br'^git version')
 
+
 def getgitversion():
     m = matchoutput('git --version 2>&1', br'git version (\d+)\.(\d+)')
     if not m:
         return (0, 0)
     return (int(m.group(1)), int(m.group(2)))
 
+
 # https://github.com/git-lfs/lfs-test-server
 @check("lfs-test-server", "git-lfs test server")
 def has_lfsserver():
@@ -316,40 +370,49 @@
         for path in os.environ["PATH"].split(os.pathsep)
     )
 
+
 @checkvers("git", "git client (with ext::sh support) version >= %s", (1.9,))
 def has_git_range(v):
     major, minor = v.split('.')[0:2]
     return getgitversion() >= (int(major), int(minor))
 
+
 @check("docutils", "Docutils text processing library")
 def has_docutils():
     try:
         import docutils.core
-        docutils.core.publish_cmdline # silence unused import
+
+        docutils.core.publish_cmdline  # silence unused import
         return True
     except ImportError:
         return False
 
+
 def getsvnversion():
     m = matchoutput('svn --version --quiet 2>&1', br'^(\d+)\.(\d+)')
     if not m:
         return (0, 0)
     return (int(m.group(1)), int(m.group(2)))
 
+
 @checkvers("svn", "subversion client and admin tools >= %s", (1.3, 1.5))
 def has_svn_range(v):
     major, minor = v.split('.')[0:2]
     return getsvnversion() >= (int(major), int(minor))
 
+
 @check("svn", "subversion client and admin tools")
 def has_svn():
-    return (matchoutput('svn --version 2>&1', br'^svn, version') and
-            matchoutput('svnadmin --version 2>&1', br'^svnadmin, version'))
+    return matchoutput('svn --version 2>&1', br'^svn, version') and matchoutput(
+        'svnadmin --version 2>&1', br'^svnadmin, version'
+    )
+
 
 @check("svn-bindings", "subversion python bindings")
 def has_svn_bindings():
     try:
         import svn.core
+
         version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
         if version < (1, 4):
             return False
@@ -357,10 +420,13 @@
     except ImportError:
         return False
 
+
 @check("p4", "Perforce server and client")
 def has_p4():
-    return (matchoutput('p4 -V', br'Rev\. P4/') and
-            matchoutput('p4d -V', br'Rev\. P4D/'))
+    return matchoutput('p4 -V', br'Rev\. P4/') and matchoutput(
+        'p4d -V', br'Rev\. P4D/'
+    )
+
 
 @check("symlink", "symbolic links")
 def has_symlink():
@@ -374,9 +440,11 @@
     except (OSError, AttributeError):
         return False
 
+
 @check("hardlink", "hardlinks")
 def has_hardlink():
     from mercurial import util
+
     fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix)
     os.close(fh)
     name = tempfile.mktemp(dir='.', prefix=tempprefix)
@@ -389,15 +457,18 @@
     finally:
         os.unlink(fn)
 
+
 @check("hardlink-whitelisted", "hardlinks on whitelisted filesystems")
 def has_hardlink_whitelisted():
     from mercurial import util
+
     try:
         fstype = util.getfstype(b'.')
     except OSError:
         return False
     return fstype in util._hardlinkfswhitelist
 
+
 @check("rmcwd", "can remove current working directory")
 def has_rmcwd():
     ocwd = os.getcwd()
@@ -418,22 +489,27 @@
         except OSError:
             pass
 
+
 @check("tla", "GNU Arch tla client")
 def has_tla():
     return matchoutput('tla --version 2>&1', br'The GNU Arch Revision')
 
+
 @check("gpg", "gpg client")
 def has_gpg():
     return matchoutput('gpg --version 2>&1', br'GnuPG')
 
+
 @check("gpg2", "gpg client v2")
 def has_gpg2():
     return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.')
 
+
 @check("gpg21", "gpg client v2.1+")
 def has_gpg21():
     return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.(?!0)')
 
+
 @check("unix-permissions", "unix-style permissions")
 def has_unix_permissions():
     d = tempfile.mkdtemp(dir='.', prefix=tempprefix)
@@ -451,25 +527,30 @@
     finally:
         os.rmdir(d)
 
+
 @check("unix-socket", "AF_UNIX socket family")
 def has_unix_socket():
     return getattr(socket, 'AF_UNIX', None) is not None
 
+
 @check("root", "root permissions")
 def has_root():
     return getattr(os, 'geteuid', None) and os.geteuid() == 0
 
+
 @check("pyflakes", "Pyflakes python linter")
 def has_pyflakes():
-    return matchoutput("sh -c \"echo 'import re' 2>&1 | pyflakes\"",
-                       br"<stdin>:1: 're' imported but unused",
-                       True)
+    return matchoutput(
+        "sh -c \"echo 'import re' 2>&1 | pyflakes\"",
+        br"<stdin>:1: 're' imported but unused",
+        True,
+    )
+
 
 @check("pylint", "Pylint python linter")
 def has_pylint():
-    return matchoutput("pylint --help",
-                       br"Usage:  pylint",
-                       True)
+    return matchoutput("pylint --help", br"Usage:  pylint", True)
+
 
 @check("clang-format", "clang-format C code formatter")
 def has_clang_format():
@@ -477,49 +558,59 @@
     # style changed somewhere between 4.x and 6.x
     return m and int(m.group(1)) >= 6
 
+
 @check("jshint", "JSHint static code analysis tool")
 def has_jshint():
     return matchoutput("jshint --version 2>&1", br"jshint v")
 
+
 @check("pygments", "Pygments source highlighting library")
 def has_pygments():
     try:
         import pygments
-        pygments.highlight # silence unused import warning
+
+        pygments.highlight  # silence unused import warning
         return True
     except ImportError:
         return False
 
+
 @check("outer-repo", "outer repo")
 def has_outer_repo():
     # failing for other reasons than 'no repo' imply that there is a repo
-    return not matchoutput('hg root 2>&1',
-                           br'abort: no repository found', True)
+    return not matchoutput('hg root 2>&1', br'abort: no repository found', True)
+
 
 @check("ssl", "ssl module available")
 def has_ssl():
     try:
         import ssl
+
         ssl.CERT_NONE
         return True
     except ImportError:
         return False
 
+
 @check("sslcontext", "python >= 2.7.9 ssl")
 def has_sslcontext():
     try:
         import ssl
+
         ssl.SSLContext
         return True
     except (ImportError, AttributeError):
         return False
 
+
 @check("defaultcacerts", "can verify SSL certs by system's CA certs store")
 def has_defaultcacerts():
     from mercurial import sslutil, ui as uimod
+
     ui = uimod.ui.load()
     return sslutil._defaultcacerts(ui) or sslutil._canloaddefaultcerts
 
+
 @check("defaultcacertsloaded", "detected presence of loaded system CA certs")
 def has_defaultcacertsloaded():
     import ssl
@@ -540,67 +631,82 @@
 
     return len(ctx.get_ca_certs()) > 0
 
+
 @check("tls1.2", "TLS 1.2 protocol support")
 def has_tls1_2():
     from mercurial import sslutil
+
     return b'tls1.2' in sslutil.supportedprotocols
 
+
 @check("windows", "Windows")
 def has_windows():
     return os.name == 'nt'
 
+
 @check("system-sh", "system() uses sh")
 def has_system_sh():
     return os.name != 'nt'
 
+
 @check("serve", "platform and python can manage 'hg serve -d'")
 def has_serve():
     return True
 
+
 @check("test-repo", "running tests from repository")
 def has_test_repo():
     t = os.environ["TESTDIR"]
     return os.path.isdir(os.path.join(t, "..", ".hg"))
 
+
 @check("tic", "terminfo compiler and curses module")
 def has_tic():
     try:
         import curses
+
         curses.COLOR_BLUE
         return matchoutput('test -x "`which tic`"', br'')
     except ImportError:
         return False
 
+
 @check("msys", "Windows with MSYS")
 def has_msys():
     return os.getenv('MSYSTEM')
 
+
 @check("aix", "AIX")
 def has_aix():
     return sys.platform.startswith("aix")
 
+
 @check("osx", "OS X")
 def has_osx():
     return sys.platform == 'darwin'
 
+
 @check("osxpackaging", "OS X packaging tools")
 def has_osxpackaging():
     try:
-        return (matchoutput('pkgbuild', br'Usage: pkgbuild ', ignorestatus=1)
-                and matchoutput(
-                    'productbuild', br'Usage: productbuild ',
-                    ignorestatus=1)
-                and matchoutput('lsbom', br'Usage: lsbom', ignorestatus=1)
-                and matchoutput(
-                    'xar --help', br'Usage: xar', ignorestatus=1))
+        return (
+            matchoutput('pkgbuild', br'Usage: pkgbuild ', ignorestatus=1)
+            and matchoutput(
+                'productbuild', br'Usage: productbuild ', ignorestatus=1
+            )
+            and matchoutput('lsbom', br'Usage: lsbom', ignorestatus=1)
+            and matchoutput('xar --help', br'Usage: xar', ignorestatus=1)
+        )
     except ImportError:
         return False
 
+
 @check('linuxormacos', 'Linux or MacOS')
 def has_linuxormacos():
     # This isn't a perfect test for MacOS. But it is sufficient for our needs.
     return sys.platform.startswith(('linux', 'darwin'))
 
+
 @check("docker", "docker support")
 def has_docker():
     pat = br'A self-sufficient runtime for'
@@ -618,34 +724,45 @@
         return True
     return False
 
+
 @check("debhelper", "debian packaging tools")
 def has_debhelper():
     # Some versions of dpkg say `dpkg', some say 'dpkg' (` vs ' on the first
     # quote), so just accept anything in that spot.
-    dpkg = matchoutput('dpkg --version',
-                       br"Debian .dpkg' package management program")
-    dh = matchoutput('dh --help',
-                     br'dh is a part of debhelper.', ignorestatus=True)
-    dh_py2 = matchoutput('dh_python2 --help',
-                         br'other supported Python versions')
+    dpkg = matchoutput(
+        'dpkg --version', br"Debian .dpkg' package management program"
+    )
+    dh = matchoutput(
+        'dh --help', br'dh is a part of debhelper.', ignorestatus=True
+    )
+    dh_py2 = matchoutput(
+        'dh_python2 --help', br'other supported Python versions'
+    )
     # debuild comes from the 'devscripts' package, though you might want
     # the 'build-debs' package instead, which has a dependency on devscripts.
-    debuild = matchoutput('debuild --help',
-                          br'to run debian/rules with given parameter')
+    debuild = matchoutput(
+        'debuild --help', br'to run debian/rules with given parameter'
+    )
     return dpkg and dh and dh_py2 and debuild
 
-@check("debdeps",
-       "debian build dependencies (run dpkg-checkbuilddeps in contrib/)")
+
+@check(
+    "debdeps", "debian build dependencies (run dpkg-checkbuilddeps in contrib/)"
+)
 def has_debdeps():
     # just check exit status (ignoring output)
     path = '%s/../contrib/packaging/debian/control' % os.environ['TESTDIR']
     return matchoutput('dpkg-checkbuilddeps %s' % path, br'')
 
+
 @check("demandimport", "demandimport enabled")
 def has_demandimport():
     # chg disables demandimport intentionally for performance wins.
-    return ((not has_chg()) and os.environ.get('HGDEMANDIMPORT') != 'disable')
+    return (not has_chg()) and os.environ.get('HGDEMANDIMPORT') != 'disable'
+
 
+# Add "py27", "py35", ... as possible feature checks. Note that there's no
+# punctuation here.
 @checkvers("py", "Python >= %s", (2.7, 3.5, 3.6, 3.7, 3.8, 3.9))
 def has_python_range(v):
     major, minor = v.split('.')[0:2]
@@ -653,73 +770,91 @@
 
     return (py_major, py_minor) >= (int(major), int(minor))
 
+
 @check("py3", "running with Python 3.x")
 def has_py3():
     return 3 == sys.version_info[0]
 
+
 @check("py3exe", "a Python 3.x interpreter is available")
 def has_python3exe():
     return matchoutput('python3 -V', br'^Python 3.(5|6|7|8|9)')
 
+
 @check("pure", "running with pure Python code")
 def has_pure():
-    return any([
-        os.environ.get("HGMODULEPOLICY") == "py",
-        os.environ.get("HGTEST_RUN_TESTS_PURE") == "--pure",
-    ])
+    return any(
+        [
+            os.environ.get("HGMODULEPOLICY") == "py",
+            os.environ.get("HGTEST_RUN_TESTS_PURE") == "--pure",
+        ]
+    )
+
 
 @check("slow", "allow slow tests (use --allow-slow-tests)")
 def has_slow():
     return os.environ.get('HGTEST_SLOW') == 'slow'
 
+
 @check("hypothesis", "Hypothesis automated test generation")
 def has_hypothesis():
     try:
         import hypothesis
+
         hypothesis.given
         return True
     except ImportError:
         return False
 
+
 @check("unziplinks", "unzip(1) understands and extracts symlinks")
 def unzip_understands_symlinks():
     return matchoutput('unzip --help', br'Info-ZIP')
 
+
 @check("zstd", "zstd Python module available")
 def has_zstd():
     try:
         import mercurial.zstd
+
         mercurial.zstd.__version__
         return True
     except ImportError:
         return False
 
+
 @check("devfull", "/dev/full special file")
 def has_dev_full():
     return os.path.exists('/dev/full')
 
+
 @check("virtualenv", "Python virtualenv support")
 def has_virtualenv():
     try:
         import virtualenv
+
         virtualenv.ACTIVATE_SH
         return True
     except ImportError:
         return False
 
+
 @check("fsmonitor", "running tests with fsmonitor")
 def has_fsmonitor():
     return 'HGFSMONITOR_TESTS' in os.environ
 
+
 @check("fuzzywuzzy", "Fuzzy string matching library")
 def has_fuzzywuzzy():
     try:
         import fuzzywuzzy
+
         fuzzywuzzy.__version__
         return True
     except ImportError:
         return False
 
+
 @check("clang-libfuzzer", "clang new enough to include libfuzzer")
 def has_clang_libfuzzer():
     mat = matchoutput('clang --version', br'clang version (\d)')
@@ -728,23 +863,28 @@
         return int(mat.group(1)) > 5
     return False
 
+
 @check("clang-6.0", "clang 6.0 with version suffix (libfuzzer included)")
 def has_clang60():
     return matchoutput('clang-6.0 --version', br'clang version 6\.')
 
+
 @check("xdiff", "xdiff algorithm")
 def has_xdiff():
     try:
         from mercurial import policy
+
         bdiff = policy.importmod('bdiff')
         return bdiff.xdiffblocks(b'', b'') == [(0, 0, 0, 0)]
     except (ImportError, AttributeError):
         return False
 
+
 @check('extraextensions', 'whether tests are running with extra extensions')
 def has_extraextensions():
     return 'HGTESTEXTRAEXTENSIONS' in os.environ
 
+
 def getrepofeatures():
     """Obtain set of repository features in use.
 
@@ -783,26 +923,32 @@
 
     return features
 
+
 @check('reporevlogstore', 'repository using the default revlog store')
 def has_reporevlogstore():
     return 'revlogstore' in getrepofeatures()
 
+
 @check('reposimplestore', 'repository using simple storage extension')
 def has_reposimplestore():
     return 'simplestore' in getrepofeatures()
 
+
 @check('repobundlerepo', 'whether we can open bundle files as repos')
 def has_repobundlerepo():
     return 'bundlerepo' in getrepofeatures()
 
+
 @check('repofncache', 'repository has an fncache')
 def has_repofncache():
     return 'fncache' in getrepofeatures()
 
+
 @check('sqlite', 'sqlite3 module is available')
 def has_sqlite():
     try:
         import sqlite3
+
         version = sqlite3.sqlite_version_info
     except ImportError:
         return False
@@ -813,19 +959,33 @@
 
     return matchoutput('sqlite3 -version', br'^3\.\d+')
 
+
 @check('vcr', 'vcr http mocking library')
 def has_vcr():
     try:
         import vcr
+
         vcr.VCR
         return True
     except (ImportError, AttributeError):
         pass
     return False
 
+
 @check('emacs', 'GNU Emacs')
 def has_emacs():
     # Our emacs lisp uses `with-eval-after-load` which is new in emacs
     # 24.4, so we allow emacs 24.4, 24.5, and 25+ (24.5 was the last
     # 24 release)
     return matchoutput('emacs --version', b'GNU Emacs 2(4.4|4.5|5|6|7|8|9)')
+
+
+# @check('black', 'the black formatter for python')
+@check('grey', 'grey, the fork of the black formatter for python')
+def has_black():
+    # use that to actual black as soon as possible
+    # blackcmd = 'black --version'
+    blackcmd = 'python3 $RUNTESTDIR/../contrib/grey.py --version'
+    # version_regex = b'black, version \d'
+    version_regex = b'grey.py, version \d'
+    return matchoutput(blackcmd, version_regex)
--- a/tests/hgweberror.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/hgweberror.py	Mon Oct 21 11:09:48 2019 -0400
@@ -2,9 +2,8 @@
 
 from __future__ import absolute_import
 
-from mercurial.hgweb import (
-    webcommands,
-)
+from mercurial.hgweb import webcommands
+
 
 def raiseerror(web):
     '''Dummy web command that raises an uncaught Exception.'''
@@ -19,6 +18,7 @@
 
     raise AttributeError('I am an uncaught error!')
 
+
 def extsetup(ui):
     setattr(webcommands, 'raiseerror', raiseerror)
     webcommands.__all__.append(b'raiseerror')
--- a/tests/httpserverauth.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/httpserverauth.py	Mon Oct 21 11:09:48 2019 -0400
@@ -4,9 +4,8 @@
 import hashlib
 
 from mercurial.hgweb import common
-from mercurial import (
-    node,
-)
+from mercurial import node
+
 
 def parse_keqv_list(req, l):
     """Parse list of key=value strings where keys are not duplicated."""
@@ -18,6 +17,7 @@
         parsed[k] = v
     return parsed
 
+
 class digestauthserver(object):
     def __init__(self):
         self._user_hashes = {}
@@ -42,8 +42,10 @@
         # We aren't testing the protocol here, just that the bytes make the
         # proper round trip.  So hardcoded seems fine.
         nonce = b'064af982c5b571cea6450d8eda91c20d'
-        return b'realm="%s", nonce="%s", algorithm=MD5, qop="auth"' % (realm,
-                                                                       nonce)
+        return b'realm="%s", nonce="%s", algorithm=MD5, qop="auth"' % (
+            realm,
+            nonce,
+        )
 
     def checkauth(self, req, header):
         log = req.rawenv[b'wsgi.errors']
@@ -53,8 +55,9 @@
 
         if resp.get(b'algorithm', b'MD5').upper() != b'MD5':
             log.write(b'Unsupported algorithm: %s' % resp.get(b'algorithm'))
-            raise common.ErrorResponse(common.HTTP_FORBIDDEN,
-                                       b"unknown algorithm")
+            raise common.ErrorResponse(
+                common.HTTP_FORBIDDEN, b"unknown algorithm"
+            )
         user = resp[b'username']
         realm = resp[b'realm']
         nonce = resp[b'nonce']
@@ -79,22 +82,29 @@
 
         respdig = kd(ha1, noncebit)
         if respdig != resp[b'response']:
-            log.write(b'User/realm "%s/%s" gave %s, but expected %s'
-                      % (user, realm, resp[b'response'], respdig))
+            log.write(
+                b'User/realm "%s/%s" gave %s, but expected %s'
+                % (user, realm, resp[b'response'], respdig)
+            )
             return False
 
         return True
 
+
 digest = digestauthserver()
 
+
 def perform_authentication(hgweb, req, op):
     auth = req.headers.get(b'Authorization')
 
     if req.headers.get(b'X-HgTest-AuthType') == b'Digest':
         if not auth:
             challenge = digest.makechallenge(b'mercurial')
-            raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, b'who',
-                    [(b'WWW-Authenticate', b'Digest %s' % challenge)])
+            raise common.ErrorResponse(
+                common.HTTP_UNAUTHORIZED,
+                b'who',
+                [(b'WWW-Authenticate', b'Digest %s' % challenge)],
+            )
 
         if not digest.checkauth(req, auth[7:]):
             raise common.ErrorResponse(common.HTTP_FORBIDDEN, b'no')
@@ -102,12 +112,16 @@
         return
 
     if not auth:
-        raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, b'who',
-                [(b'WWW-Authenticate', b'Basic Realm="mercurial"')])
+        raise common.ErrorResponse(
+            common.HTTP_UNAUTHORIZED,
+            b'who',
+            [(b'WWW-Authenticate', b'Basic Realm="mercurial"')],
+        )
 
     if base64.b64decode(auth.split()[1]).split(b':', 1) != [b'user', b'pass']:
         raise common.ErrorResponse(common.HTTP_FORBIDDEN, b'no')
 
+
 def extsetup(ui):
     common.permhooks.insert(0, perform_authentication)
     digest.adduser(b'user', b'pass', b'mercurial')
--- a/tests/hypothesishelpers.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/hypothesishelpers.py	Mon Oct 21 11:09:48 2019 -0400
@@ -21,14 +21,14 @@
 from hypothesis import given
 
 # hypothesis store data regarding generate example and code
-set_hypothesis_home_dir(os.path.join(
-    os.getenv('TESTTMP'), ".hypothesis"
-))
+set_hypothesis_home_dir(os.path.join(os.getenv('TESTTMP'), ".hypothesis"))
+
 
 def check(*args, **kwargs):
     """decorator to make a function a hypothesis test
 
     Decorated function are run immediately (to be used doctest style)"""
+
     def accept(f):
         # Workaround for https://github.com/DRMacIver/hypothesis/issues/206
         # Fixed in version 1.13 (released 2015 october 29th)
@@ -39,22 +39,24 @@
         except Exception:
             traceback.print_exc(file=sys.stdout)
             sys.exit(1)
+
     return accept
 
 
 def roundtrips(data, decode, encode):
     """helper to tests function that must do proper encode/decode roundtripping
     """
+
     @given(data)
     def testroundtrips(value):
         encoded = encode(value)
         decoded = decode(encoded)
         if decoded != value:
             raise ValueError(
-                "Round trip failed: %s(%r) -> %s(%r) -> %r" % (
-                    encode.__name__, value, decode.__name__, encoded,
-                    decoded
-                ))
+                "Round trip failed: %s(%r) -> %s(%r) -> %r"
+                % (encode.__name__, value, decode.__name__, encoded, decoded)
+            )
+
     try:
         testroundtrips()
     except Exception:
@@ -66,6 +68,9 @@
 
 # strategy for generating bytestring that might be an issue for Mercurial
 bytestrings = (
-    st.builds(lambda s, e: s.encode(e), st.text(), st.sampled_from([
-        'utf-8', 'utf-16',
-    ]))) | st.binary()
+    st.builds(
+        lambda s, e: s.encode(e),
+        st.text(),
+        st.sampled_from(['utf-8', 'utf-16',]),
+    )
+) | st.binary()
--- a/tests/killdaemons.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/killdaemons.py	Mon Oct 21 11:09:48 2019 -0400
@@ -7,7 +7,7 @@
 import sys
 import time
 
-if os.name =='nt':
+if os.name == 'nt':
     import ctypes
 
     _BOOL = ctypes.c_long
@@ -46,15 +46,17 @@
         WAIT_TIMEOUT = 258
         WAIT_FAILED = _DWORD(0xFFFFFFFF).value
         handle = ctypes.windll.kernel32.OpenProcess(
-                PROCESS_TERMINATE|SYNCHRONIZE|PROCESS_QUERY_INFORMATION,
-                False, pid)
+            PROCESS_TERMINATE | SYNCHRONIZE | PROCESS_QUERY_INFORMATION,
+            False,
+            pid,
+        )
         if handle is None:
-            _check(0, 87) # err 87 when process not found
-            return # process not found, already finished
+            _check(0, 87)  # err 87 when process not found
+            return  # process not found, already finished
         try:
             r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100)
             if r == WAIT_OBJECT_0:
-                pass # terminated, but process handle still available
+                pass  # terminated, but process handle still available
             elif r == WAIT_TIMEOUT:
                 _check(ctypes.windll.kernel32.TerminateProcess(handle, -1))
             elif r == WAIT_FAILED:
@@ -63,19 +65,21 @@
             # TODO?: forcefully kill when timeout
             #        and ?shorter waiting time? when tryhard==True
             r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100)
-                                                       # timeout = 100 ms
+            # timeout = 100 ms
             if r == WAIT_OBJECT_0:
-                pass # process is terminated
+                pass  # process is terminated
             elif r == WAIT_TIMEOUT:
                 logfn('# Daemon process %d is stuck')
             elif r == WAIT_FAILED:
                 _check(0)  # err stored in GetLastError()
-        except: #re-raises
-            ctypes.windll.kernel32.CloseHandle(handle) # no _check, keep error
+        except:  # re-raises
+            ctypes.windll.kernel32.CloseHandle(handle)  # no _check, keep error
             raise
         _check(ctypes.windll.kernel32.CloseHandle(handle))
 
+
 else:
+
     def kill(pid, logfn, tryhard=True):
         try:
             os.kill(pid, 0)
@@ -94,6 +98,7 @@
             if err.errno != errno.ESRCH:
                 raise
 
+
 def killdaemons(pidfile, tryhard=True, remove=False, logfn=None):
     if not logfn:
         logfn = lambda s: s
@@ -107,8 +112,10 @@
                     if pid <= 0:
                         raise ValueError
                 except ValueError:
-                    logfn('# Not killing daemon process %s - invalid pid'
-                          % line.rstrip())
+                    logfn(
+                        '# Not killing daemon process %s - invalid pid'
+                        % line.rstrip()
+                    )
                     continue
                 pids.append(pid)
         for pid in pids:
@@ -118,9 +125,10 @@
     except IOError:
         pass
 
+
 if __name__ == '__main__':
     if len(sys.argv) > 1:
-        path, = sys.argv[1:]
+        (path,) = sys.argv[1:]
     else:
         path = os.environ["DAEMON_PIDS"]
 
--- a/tests/list-tree.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/list-tree.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,6 +10,7 @@
 ap.add_argument('path', nargs='+')
 opts = ap.parse_args()
 
+
 def gather():
     for p in opts.path:
         if not os.path.exists(p):
@@ -24,4 +25,5 @@
         else:
             yield p
 
+
 print('\n'.join(sorted(gather(), key=lambda x: x.replace(os.path.sep, '/'))))
--- a/tests/lockdelay.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/lockdelay.py	Mon Oct 21 11:09:48 2019 -0400
@@ -7,8 +7,8 @@
 import os
 import time
 
+
 def reposetup(ui, repo):
-
     class delayedlockrepo(repo.__class__):
         def lock(self):
             delay = float(os.environ.get('HGPRELOCKDELAY', '0.0'))
@@ -19,4 +19,5 @@
             if delay:
                 time.sleep(delay)
             return res
+
     repo.__class__ = delayedlockrepo
--- a/tests/logexceptions.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/logexceptions.py	Mon Oct 21 11:09:48 2019 -0400
@@ -18,14 +18,16 @@
     extensions,
 )
 
+
 def handleexception(orig, ui):
     res = orig(ui)
 
     if not ui.environ.get(b'HGEXCEPTIONSDIR'):
         return res
 
-    dest = os.path.join(ui.environ[b'HGEXCEPTIONSDIR'],
-                        str(uuid.uuid4()).encode('ascii'))
+    dest = os.path.join(
+        ui.environ[b'HGEXCEPTIONSDIR'], str(uuid.uuid4()).encode('ascii')
+    )
 
     exc_type, exc_value, exc_tb = sys.exc_info()
 
@@ -69,6 +71,6 @@
         ]
         fh.write(b'\0'.join(p.encode('utf-8', 'replace') for p in parts))
 
+
 def extsetup(ui):
-    extensions.wrapfunction(dispatch, 'handlecommandexception',
-                            handleexception)
+    extensions.wrapfunction(dispatch, 'handlecommandexception', handleexception)
--- a/tests/ls-l.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/ls-l.py	Mon Oct 21 11:09:48 2019 -0400
@@ -8,6 +8,7 @@
 import stat
 import sys
 
+
 def modestr(st):
     mode = st.st_mode
     result = ''
@@ -23,6 +24,7 @@
                 result += '-'
     return result
 
+
 def sizestr(st):
     if st.st_mode & stat.S_IFREG:
         return '%7d' % st.st_size
@@ -30,6 +32,7 @@
         # do not show size for non regular files
         return ' ' * 7
 
+
 os.chdir((sys.argv[1:] + ['.'])[0])
 
 for name in sorted(os.listdir('.')):
--- a/tests/md5sum.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/md5sum.py	Mon Oct 21 11:09:48 2019 -0400
@@ -14,6 +14,7 @@
 
 try:
     import msvcrt
+
     msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
     msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
 except ImportError:
--- a/tests/mockblackbox.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/mockblackbox.py	Mon Oct 21 11:09:48 2019 -0400
@@ -1,14 +1,15 @@
 from __future__ import absolute_import
-from mercurial.utils import (
-    procutil,
-)
+from mercurial.utils import procutil
 
 # XXX: we should probably offer a devel option to do this in blackbox directly
 def getuser():
     return b'bob'
+
+
 def getpid():
     return 5000
 
+
 # mock the date and user apis so the output is always the same
 def uisetup(ui):
     procutil.getuser = getuser
--- a/tests/mockmakedate.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/mockmakedate.py	Mon Oct 21 11:09:48 2019 -0400
@@ -7,6 +7,7 @@
 from mercurial import pycompat
 from mercurial.utils import dateutil
 
+
 def mockmakedate():
     filename = os.path.join(os.environ['TESTTMP'], 'testtime')
     try:
@@ -18,4 +19,5 @@
         timef.write(pycompat.bytestr(time))
     return (time, 0)
 
+
 dateutil.makedate = mockmakedate
--- a/tests/mocktime.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/mocktime.py	Mon Oct 21 11:09:48 2019 -0400
@@ -3,6 +3,7 @@
 import os
 import time
 
+
 class mocktime(object):
     def __init__(self, increment):
         self.time = 0
@@ -14,5 +15,6 @@
         self.pos += 1
         return self.time
 
+
 def uisetup(ui):
     time.time = mocktime(os.environ.get('MOCKTIME', '0.1'))
--- a/tests/notcapable	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/notcapable	Mon Oct 21 11:09:48 2019 -0400
@@ -6,7 +6,8 @@
 fi
 
 cat > notcapable-$CAP.py << EOF
-from mercurial import extensions, localrepo, repository
+from mercurial import extensions, localrepo
+from mercurial.interfaces import repository
 def extsetup(ui):
     extensions.wrapfunction(repository.peer, 'capable', wrapcapable)
     extensions.wrapfunction(localrepo.localrepository, 'peer', wrappeer)
--- a/tests/phabricator/accept-4564.json	Wed Oct 02 12:20:36 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,138 +0,0 @@
-{
-    "version": 1, 
-    "interactions": [
-        {
-            "request": {
-                "body": "api.token=cli-hahayouwish&ids%5B0%5D=4564", 
-                "headers": {
-                    "content-length": [
-                        "58"
-                    ], 
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ], 
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ], 
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ], 
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 4.7.1+861-aa7e312375cf)"
-                    ]
-                }, 
-                "method": "POST", 
-                "uri": "https://phab.mercurial-scm.org//api/differential.query"
-            }, 
-            "response": {
-                "status": {
-                    "message": "OK", 
-                    "code": 200
-                }, 
-                "headers": {
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ], 
-                    "x-content-type-options": [
-                        "nosniff"
-                    ], 
-                    "set-cookie": [
-                        "phsid=A%2F24j2baem5tmap4tvfdz7ufmca2lhm3wx4agyqv4w; expires=Thu, 14-Sep-2023 04:24:35 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
-                    ], 
-                    "strict-transport-security": [
-                        "max-age=0; includeSubdomains; preload"
-                    ], 
-                    "server": [
-                        "Apache/2.4.10 (Debian)"
-                    ], 
-                    "connection": [
-                        "close"
-                    ], 
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ], 
-                    "cache-control": [
-                        "no-store"
-                    ], 
-                    "date": [
-                        "Sat, 15 Sep 2018 04:24:35 GMT"
-                    ], 
-                    "x-frame-options": [
-                        "Deny"
-                    ], 
-                    "content-type": [
-                        "application/json"
-                    ]
-                }, 
-                "body": {
-                    "string": "{\"result\":[{\"id\":\"4564\",\"phid\":\"PHID-DREV-6cgnf5fyeeqhntbxgfb7\",\"title\":\"localrepo: move some vfs initialization out of __init__\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D4564\",\"dateCreated\":\"1536856174\",\"dateModified\":\"1536856175\",\"authorPHID\":\"PHID-USER-p54bpwbifxx7sbgpx47d\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":[],\"branch\":null,\"summary\":\"In order to make repository types more dynamic, we'll need to move the\\nlogic for determining repository behavior out of\\nlocalrepository.__init__ so we can influence behavior before the type\\nis instantiated.\\n\\nThis commit starts that process by moving working directory and .hg\\/\\nvfs initialization to our new standalone function for instantiating\\nlocal repositories.\\n\\nAside from API changes, behavior should be fully backwards compatible.\\n\\n.. api::\\n\\n   localrepository.__init__ now does less work and accepts new args\\n\\n   Use ``hg.repository()``, ``localrepo.instance()``, or\\n   ``localrepo.makelocalrepository()`` to obtain a new local repository\\n   instance instead of calling the ``localrepository`` constructor\\n   directly.\",\"testPlan\":\"\",\"lineCount\":\"64\",\"activeDiffPHID\":\"PHID-DIFF-ir6bizkdou7fm7xhuo6v\",\"diffs\":[\"11002\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[\"PHID-DREV-gqp33hnxg65vkl3xioka\"]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\"}],\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        }, 
-        {
-            "request": {
-                "body": "api.token=cli-hahayouwish&objectIdentifier=PHID-DREV-6cgnf5fyeeqhntbxgfb7&transactions%5B0%5D%5Btype%5D=accept&transactions%5B0%5D%5Bvalue%5D=true&transactions%5B1%5D%5Btype%5D=comment&transactions%5B1%5D%5Bvalue%5D=I+think+I+like+where+this+is+headed.+Will+read+rest+of+series+later.", 
-                "headers": {
-                    "content-length": [
-                        "301"
-                    ], 
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ], 
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ], 
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ], 
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 4.7.1+861-aa7e312375cf)"
-                    ]
-                }, 
-                "method": "POST", 
-                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit"
-            }, 
-            "response": {
-                "status": {
-                    "message": "OK", 
-                    "code": 200
-                }, 
-                "headers": {
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ], 
-                    "x-content-type-options": [
-                        "nosniff"
-                    ], 
-                    "set-cookie": [
-                        "phsid=A%2Fcna7xx3xon5xxyoasbveqlfz4fswd2risihw7dff; expires=Thu, 14-Sep-2023 04:24:36 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
-                    ], 
-                    "strict-transport-security": [
-                        "max-age=0; includeSubdomains; preload"
-                    ], 
-                    "server": [
-                        "Apache/2.4.10 (Debian)"
-                    ], 
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ], 
-                    "cache-control": [
-                        "no-store"
-                    ], 
-                    "date": [
-                        "Sat, 15 Sep 2018 04:24:36 GMT"
-                    ], 
-                    "x-frame-options": [
-                        "Deny"
-                    ], 
-                    "content-type": [
-                        "application/json"
-                    ]
-                }, 
-                "body": {
-                    "string": "{\"result\":{\"object\":{\"id\":\"4564\",\"phid\":\"PHID-DREV-6cgnf5fyeeqhntbxgfb7\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-nfqswjwwfuzdrhb\"},{\"phid\":\"PHID-XACT-DREV-oqb5pkqsdify6nm\"},{\"phid\":\"PHID-XACT-DREV-i6epvc7avyv3ve7\"},{\"phid\":\"PHID-XACT-DREV-du5hbg5rege3i5w\"}]},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        }
-    ]
-}
--- a/tests/phabricator/phab-conduit.json	Wed Oct 02 12:20:36 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,73 +0,0 @@
-{
-    "interactions": [
-        {
-            "response": {
-                "status": {
-                    "message": "OK", 
-                    "code": 200
-                }, 
-                "headers": {
-                    "content-type": [
-                        "application/json"
-                    ], 
-                    "date": [
-                        "Fri, 21 Dec 2018 22:19:11 GMT"
-                    ], 
-                    "x-content-type-options": [
-                        "nosniff"
-                    ], 
-                    "cache-control": [
-                        "no-store"
-                    ], 
-                    "strict-transport-security": [
-                        "max-age=0; includeSubdomains; preload"
-                    ], 
-                    "x-frame-options": [
-                        "Deny"
-                    ], 
-                    "set-cookie": [
-                        "phsid=A%2Fdv22bpksbdis3vfeksluagfslhfojblbnkro7we4; expires=Wed, 20-Dec-2023 22:19:11 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
-                    ], 
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ], 
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ], 
-                    "transfer-encoding": [
-                        "chunked"
-                    ], 
-                    "server": [
-                        "Apache/2.4.10 (Debian)"
-                    ]
-                }, 
-                "body": {
-                    "string": "{\"result\":{\"data\":[],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
-                }
-            }, 
-            "request": {
-                "method": "POST", 
-                "headers": {
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ], 
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ], 
-                    "content-length": [
-                        "70"
-                    ], 
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ], 
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 4.8.1+564-6f483b107eb5+20181221)"
-                    ]
-                }, 
-                "uri": "https://phab.mercurial-scm.org//api/user.search", 
-                "body": "constraints%5BisBot%5D=true&api.token=cli-hahayouwish"
-            }
-        }
-    ], 
-    "version": 1
-}
\ No newline at end of file
--- a/tests/phabricator/phabread-4480.json	Wed Oct 02 12:20:36 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,200 +0,0 @@
-{
-    "version": 1, 
-    "interactions": [
-        {
-            "response": {
-                "headers": {
-                    "cache-control": [
-                        "no-store"
-                    ], 
-                    "set-cookie": [
-                        "phsid=A%2F6ypywsajmaqsclzrydncbnegfczzct2m5c4wovqw; expires=Thu, 14-Sep-2023 04:15:56 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
-                    ], 
-                    "server": [
-                        "Apache/2.4.10 (Debian)"
-                    ], 
-                    "content-type": [
-                        "application/json"
-                    ], 
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ], 
-                    "x-content-type-options": [
-                        "nosniff"
-                    ], 
-                    "date": [
-                        "Sat, 15 Sep 2018 04:15:56 GMT"
-                    ], 
-                    "strict-transport-security": [
-                        "max-age=0; includeSubdomains; preload"
-                    ], 
-                    "x-frame-options": [
-                        "Deny"
-                    ], 
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                }, 
-                "status": {
-                    "code": 200, 
-                    "message": "OK"
-                }, 
-                "body": {
-                    "string": "{\"result\":[{\"id\":\"4480\",\"phid\":\"PHID-DREV-gsa7dkuimmam7nafw7h3\",\"title\":\"exchangev2: start to implement pull with wire protocol v2\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D4480\",\"dateCreated\":\"1536164431\",\"dateModified\":\"1536981352\",\"authorPHID\":\"PHID-USER-p54bpwbifxx7sbgpx47d\",\"status\":\"3\",\"statusName\":\"Closed\",\"properties\":{\"wasAcceptedBeforeClose\":false},\"branch\":null,\"summary\":\"Wire protocol version 2 will take a substantially different\\napproach to exchange than version 1 (at least as far as pulling\\nis concerned).\\n\\nThis commit establishes a new exchangev2 module for holding\\ncode related to exchange using wire protocol v2. I could have\\nadded things to the existing exchange module. But it is already\\nquite big. And doing things inline isn't in question because\\nthe existing code is already littered with conditional code\\nfor various states of support for the existing wire protocol\\nas it evolved over 10+ years. A new module gives us a chance\\nto make a clean break.\\n\\nThis approach does mean we'll end up writing some duplicate\\ncode. And there's a significant chance we'll miss functionality\\nas code is ported. The plan is to eventually add #testcase's\\nto existing tests so the new wire protocol is tested side-by-side\\nwith the existing one. This will hopefully tease out any\\nfeatures that weren't ported properly. But before we get there,\\nwe need to build up support for the new exchange methods.\\n\\nOur journey towards implementing a new exchange begins with pulling.\\nAnd pulling begins with discovery.\\n\\nThe discovery code added to exchangev2 is heavily drawn from\\nthe following functions:\\n\\n* exchange._pulldiscoverychangegroup\\n* discovery.findcommonincoming\\n\\nFor now, we build on top of existing discovery mechanisms. The\\nnew wire protocol should be capable of doing things more efficiently.\\nBut I'd rather defer on this problem.\\n\\nTo foster the transition, we invent a fake capability on the HTTPv2\\npeer and have the main pull code in exchange.py call into exchangev2\\nwhen the new wire protocol is being used.\",\"testPlan\":\"\",\"lineCount\":\"145\",\"activeDiffPHID\":\"PHID-DIFF-kg2rt6kiekgo5rgyeu5n\",\"diffs\":[\"11058\",\"10961\",\"10793\"],\"commits\":[\"PHID-CMIT-kvz2f3rczvi6exmvtyaq\"],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-cgcdlc6c3gpxapbmkwa2\",\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[\"PHID-DREV-a77jfv32jtxfwxngd6bd\"]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\"}],\"error_code\":null,\"error_info\":null}"
-                }
-            }, 
-            "request": {
-                "headers": {
-                    "content-length": [
-                        "58"
-                    ], 
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ], 
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ], 
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 4.7.1+861-aa7e312375cf)"
-                    ], 
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ]
-                }, 
-                "uri": "https://phab.mercurial-scm.org//api/differential.query", 
-                "method": "POST", 
-                "body": "ids%5B0%5D=4480&api.token=cli-hahayouwish"
-            }
-        }, 
-        {
-            "response": {
-                "headers": {
-                    "cache-control": [
-                        "no-store"
-                    ], 
-                    "set-cookie": [
-                        "phsid=A%2Floppdxhbjv46vg5mwnf2squrj4vgegsce5fwhhb6; expires=Thu, 14-Sep-2023 04:15:57 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
-                    ], 
-                    "server": [
-                        "Apache/2.4.10 (Debian)"
-                    ], 
-                    "content-type": [
-                        "application/json"
-                    ], 
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ], 
-                    "x-content-type-options": [
-                        "nosniff"
-                    ], 
-                    "date": [
-                        "Sat, 15 Sep 2018 04:15:57 GMT"
-                    ], 
-                    "strict-transport-security": [
-                        "max-age=0; includeSubdomains; preload"
-                    ], 
-                    "x-frame-options": [
-                        "Deny"
-                    ], 
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                }, 
-                "status": {
-                    "code": 200, 
-                    "message": "OK"
-                }, 
-                "body": {
-                    "string": "{\"result\":{\"11058\":{\"id\":\"11058\",\"revisionID\":\"4480\",\"dateCreated\":\"1536771503\",\"dateModified\":\"1536981352\",\"sourceControlBaseRevision\":\"a5de21c9e3703f8e8eb064bd7d893ff2f703c66a\",\"sourceControlPath\":null,\"sourceControlSystem\":\"hg\",\"branch\":null,\"bookmark\":null,\"creationMethod\":\"commit\",\"description\":\"rHGa86d21e70b2b79d5e7e1085e5e755b4b26b8676d\",\"unitStatus\":\"6\",\"lintStatus\":\"6\",\"changes\":[{\"id\":\"24371\",\"metadata\":{\"line:first\":59},\"oldPath\":\"tests\\/wireprotohelpers.sh\",\"currentPath\":\"tests\\/wireprotohelpers.sh\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":[],\"type\":\"2\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"7\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"1\",\"newOffset\":\"1\",\"oldLength\":\"58\",\"newLength\":\"65\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\" HTTPV2=exp-http-v2-0001\\n MEDIATYPE=application\\/mercurial-exp-framing-0005\\n \\n sendhttpraw() {\\n   hg --verbose debugwireproto --peer raw http:\\/\\/$LOCALIP:$HGPORT\\/\\n }\\n \\n sendhttpv2peer() {\\n   hg --verbose debugwireproto --nologhandshake --peer http2 http:\\/\\/$LOCALIP:$HGPORT\\/\\n }\\n \\n sendhttpv2peerhandshake() {\\n   hg --verbose debugwireproto --peer http2 http:\\/\\/$LOCALIP:$HGPORT\\/\\n }\\n \\n cat \\u003e dummycommands.py \\u003c\\u003c EOF\\n from mercurial import (\\n     wireprototypes,\\n     wireprotov1server,\\n     wireprotov2server,\\n )\\n \\n @wireprotov1server.wireprotocommand(b'customreadonly', permission=b'pull')\\n def customreadonlyv1(repo, proto):\\n     return wireprototypes.bytesresponse(b'customreadonly bytes response')\\n \\n @wireprotov2server.wireprotocommand(b'customreadonly', permission=b'pull')\\n def customreadonlyv2(repo, proto):\\n     yield b'customreadonly bytes response'\\n \\n @wireprotov1server.wireprotocommand(b'customreadwrite', permission=b'push')\\n def customreadwrite(repo, proto):\\n     return wireprototypes.bytesresponse(b'customreadwrite bytes response')\\n \\n @wireprotov2server.wireprotocommand(b'customreadwrite', permission=b'push')\\n def customreadwritev2(repo, proto):\\n     yield b'customreadwrite bytes response'\\n EOF\\n \\n cat \\u003e\\u003e $HGRCPATH \\u003c\\u003c EOF\\n [extensions]\\n drawdag = $TESTDIR\\/drawdag.py\\n EOF\\n \\n enabledummycommands() {\\n   cat \\u003e\\u003e $HGRCPATH \\u003c\\u003c EOF\\n [extensions]\\n dummycommands = $TESTTMP\\/dummycommands.py\\n EOF\\n }\\n \\n enablehttpv2() {\\n   cat \\u003e\\u003e $1\\/.hg\\/hgrc \\u003c\\u003c EOF\\n [experimental]\\n web.apiserver = true\\n web.api.http-v2 = true\\n EOF\\n }\\n+\\n+enablehttpv2client() {\\n+  cat \\u003e\\u003e $HGRCPATH \\u003c\\u003c EOF\\n+[experimental]\\n+httppeer.advertise-v2 = true\\n+EOF\\n+}\\n\"}]},{\"id\":\"24370\",\"metadata\":{\"line:first\":1},\"oldPath\":null,\"currentPath\":\"tests\\/test-wireproto-exchangev2.t\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":{\"unix:filemode\":\"100644\"},\"type\":\"1\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"53\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"0\",\"newOffset\":\"1\",\"oldLength\":\"0\",\"newLength\":\"53\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\"+Tests for wire protocol version 2 exchange.\\n+Tests in this file should be folded into existing tests once protocol\\n+v2 has enough features that it can be enabled via #testcase in existing\\n+tests.\\n+\\n+  $ . $TESTDIR\\/wireprotohelpers.sh\\n+  $ enablehttpv2client\\n+\\n+  $ hg init server-simple\\n+  $ enablehttpv2 server-simple\\n+  $ cd server-simple\\n+  $ cat \\u003e\\u003e .hg\\/hgrc \\u003c\\u003c EOF\\n+  \\u003e [phases]\\n+  \\u003e publish = false\\n+  \\u003e EOF\\n+  $ echo a0 \\u003e a\\n+  $ echo b0 \\u003e b\\n+  $ hg -q commit -A -m 'commit 0'\\n+\\n+  $ echo a1 \\u003e a\\n+  $ hg commit -m 'commit 1'\\n+  $ hg phase --public -r .\\n+  $ echo a2 \\u003e a\\n+  $ hg commit -m 'commit 2'\\n+\\n+  $ hg -q up -r 0\\n+  $ echo b1 \\u003e b\\n+  $ hg -q commit -m 'head 2 commit 1'\\n+  $ echo b2 \\u003e b\\n+  $ hg -q commit -m 'head 2 commit 2'\\n+\\n+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log\\n+  $ cat hg.pid \\u003e $DAEMON_PIDS\\n+\\n+  $ cd ..\\n+\\n+Test basic clone\\n+\\n+  $ hg --debug clone -U http:\\/\\/localhost:$HGPORT client-simple\\n+  using http:\\/\\/localhost:$HGPORT\\/\\n+  sending capabilities command\\n+  query 1; heads\\n+  sending 2 commands\\n+  sending command heads: {}\\n+  sending command known: {\\n+    'nodes': []\\n+  }\\n+  received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)\\n+  received frame(size=43; request=1; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)\\n+  received frame(size=11; request=3; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=1; request=3; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)\\n\"}]},{\"id\":\"24369\",\"metadata\":{\"line:first\":805},\"oldPath\":\"mercurial\\/httppeer.py\",\"currentPath\":\"mercurial\\/httppeer.py\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":[],\"type\":\"2\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"2\",\"delLines\":\"1\",\"hunks\":[{\"oldOffset\":\"1\",\"newOffset\":\"1\",\"oldLength\":\"1006\",\"newLength\":\"1007\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\" # httppeer.py - HTTP repository proxy classes for mercurial\\n #\\n # Copyright 2005, 2006 Matt Mackall \\u003cmpm@selenic.com\\u003e\\n # Copyright 2006 Vadim Gelfer \\u003cvadim.gelfer@gmail.com\\u003e\\n #\\n # This software may be used and distributed according to the terms of the\\n # GNU General Public License version 2 or any later version.\\n \\n from __future__ import absolute_import\\n \\n import errno\\n import io\\n import os\\n import socket\\n import struct\\n import weakref\\n \\n from .i18n import _\\n from . import (\\n     bundle2,\\n     error,\\n     httpconnection,\\n     pycompat,\\n     repository,\\n     statichttprepo,\\n     url as urlmod,\\n     util,\\n     wireprotoframing,\\n     wireprototypes,\\n     wireprotov1peer,\\n     wireprotov2peer,\\n     wireprotov2server,\\n )\\n from .utils import (\\n     cborutil,\\n     interfaceutil,\\n     stringutil,\\n )\\n \\n httplib = util.httplib\\n urlerr = util.urlerr\\n urlreq = util.urlreq\\n \\n def encodevalueinheaders(value, header, limit):\\n     \\\"\\\"\\\"Encode a string value into multiple HTTP headers.\\n \\n     ``value`` will be encoded into 1 or more HTTP headers with the names\\n     ``header-\\u003cN\\u003e`` where ``\\u003cN\\u003e`` is an integer starting at 1. Each header\\n     name + value will be at most ``limit`` bytes long.\\n \\n     Returns an iterable of 2-tuples consisting of header names and\\n     values as native strings.\\n     \\\"\\\"\\\"\\n     # HTTP Headers are ASCII. Python 3 requires them to be unicodes,\\n     # not bytes. This function always takes bytes in as arguments.\\n     fmt = pycompat.strurl(header) + r'-%s'\\n     # Note: it is *NOT* a bug that the last bit here is a bytestring\\n     # and not a unicode: we're just getting the encoded length anyway,\\n     # and using an r-string to make it portable between Python 2 and 3\\n     # doesn't work because then the \\\\r is a literal backslash-r\\n     # instead of a carriage return.\\n     valuelen = limit - len(fmt % r'000') - len(': \\\\r\\\\n')\\n     result = []\\n \\n     n = 0\\n     for i in pycompat.xrange(0, len(value), valuelen):\\n         n += 1\\n         result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))\\n \\n     return result\\n \\n def _wraphttpresponse(resp):\\n     \\\"\\\"\\\"Wrap an HTTPResponse with common error handlers.\\n \\n     This ensures that any I\\/O from any consumer raises the appropriate\\n     error and messaging.\\n     \\\"\\\"\\\"\\n     origread = resp.read\\n \\n     class readerproxy(resp.__class__):\\n         def read(self, size=None):\\n             try:\\n                 return origread(size)\\n             except httplib.IncompleteRead as e:\\n                 # e.expected is an integer if length known or None otherwise.\\n                 if e.expected:\\n                     got = len(e.partial)\\n                     total = e.expected + got\\n                     msg = _('HTTP request error (incomplete response; '\\n                             'expected %d bytes got %d)') % (total, got)\\n                 else:\\n                     msg = _('HTTP request error (incomplete response)')\\n \\n                 raise error.PeerTransportError(\\n                     msg,\\n                     hint=_('this may be an intermittent network failure; '\\n                            'if the error persists, consider contacting the '\\n                            'network or server operator'))\\n             except httplib.HTTPException as e:\\n                 raise error.PeerTransportError(\\n                     _('HTTP request error (%s)') % e,\\n                     hint=_('this may be an intermittent network failure; '\\n                            'if the error persists, consider contacting the '\\n                            'network or server operator'))\\n \\n     resp.__class__ = readerproxy\\n \\n class _multifile(object):\\n     def __init__(self, *fileobjs):\\n         for f in fileobjs:\\n             if not util.safehasattr(f, 'length'):\\n                 raise ValueError(\\n                     '_multifile only supports file objects that '\\n                     'have a length but this one does not:', type(f), f)\\n         self._fileobjs = fileobjs\\n         self._index = 0\\n \\n     @property\\n     def length(self):\\n         return sum(f.length for f in self._fileobjs)\\n \\n     def read(self, amt=None):\\n         if amt \\u003c= 0:\\n             return ''.join(f.read() for f in self._fileobjs)\\n         parts = []\\n         while amt and self._index \\u003c len(self._fileobjs):\\n             parts.append(self._fileobjs[self._index].read(amt))\\n             got = len(parts[-1])\\n             if got \\u003c amt:\\n                 self._index += 1\\n             amt -= got\\n         return ''.join(parts)\\n \\n     def seek(self, offset, whence=os.SEEK_SET):\\n         if whence != os.SEEK_SET:\\n             raise NotImplementedError(\\n                 '_multifile does not support anything other'\\n                 ' than os.SEEK_SET for whence on seek()')\\n         if offset != 0:\\n             raise NotImplementedError(\\n                 '_multifile only supports seeking to start, but that '\\n                 'could be fixed if you need it')\\n         for f in self._fileobjs:\\n             f.seek(0)\\n         self._index = 0\\n \\n def makev1commandrequest(ui, requestbuilder, caps, capablefn,\\n                          repobaseurl, cmd, args):\\n     \\\"\\\"\\\"Make an HTTP request to run a command for a version 1 client.\\n \\n     ``caps`` is a set of known server capabilities. The value may be\\n     None if capabilities are not yet known.\\n \\n     ``capablefn`` is a function to evaluate a capability.\\n \\n     ``cmd``, ``args``, and ``data`` define the command, its arguments, and\\n     raw data to pass to it.\\n     \\\"\\\"\\\"\\n     if cmd == 'pushkey':\\n         args['data'] = ''\\n     data = args.pop('data', None)\\n     headers = args.pop('headers', {})\\n \\n     ui.debug(\\\"sending %s command\\\\n\\\" % cmd)\\n     q = [('cmd', cmd)]\\n     headersize = 0\\n     # Important: don't use self.capable() here or else you end up\\n     # with infinite recursion when trying to look up capabilities\\n     # for the first time.\\n     postargsok = caps is not None and 'httppostargs' in caps\\n \\n     # Send arguments via POST.\\n     if postargsok and args:\\n         strargs = urlreq.urlencode(sorted(args.items()))\\n         if not data:\\n             data = strargs\\n         else:\\n             if isinstance(data, bytes):\\n                 i = io.BytesIO(data)\\n                 i.length = len(data)\\n                 data = i\\n             argsio = io.BytesIO(strargs)\\n             argsio.length = len(strargs)\\n             data = _multifile(argsio, data)\\n         headers[r'X-HgArgs-Post'] = len(strargs)\\n     elif args:\\n         # Calling self.capable() can infinite loop if we are calling\\n         # \\\"capabilities\\\". But that command should never accept wire\\n         # protocol arguments. So this should never happen.\\n         assert cmd != 'capabilities'\\n         httpheader = capablefn('httpheader')\\n         if httpheader:\\n             headersize = int(httpheader.split(',', 1)[0])\\n \\n         # Send arguments via HTTP headers.\\n         if headersize \\u003e 0:\\n             # The headers can typically carry more data than the URL.\\n             encargs = urlreq.urlencode(sorted(args.items()))\\n             for header, value in encodevalueinheaders(encargs, 'X-HgArg',\\n                                                       headersize):\\n                 headers[header] = value\\n         # Send arguments via query string (Mercurial \\u003c1.9).\\n         else:\\n             q += sorted(args.items())\\n \\n     qs = '?%s' % urlreq.urlencode(q)\\n     cu = \\\"%s%s\\\" % (repobaseurl, qs)\\n     size = 0\\n     if util.safehasattr(data, 'length'):\\n         size = data.length\\n     elif data is not None:\\n         size = len(data)\\n     if data is not None and r'Content-Type' not in headers:\\n         headers[r'Content-Type'] = r'application\\/mercurial-0.1'\\n \\n     # Tell the server we accept application\\/mercurial-0.2 and multiple\\n     # compression formats if the server is capable of emitting those\\n     # payloads.\\n     # Note: Keep this set empty by default, as client advertisement of\\n     # protocol parameters should only occur after the handshake.\\n     protoparams = set()\\n \\n     mediatypes = set()\\n     if caps is not None:\\n         mt = capablefn('httpmediatype')\\n         if mt:\\n             protoparams.add('0.1')\\n             mediatypes = set(mt.split(','))\\n \\n         protoparams.add('partial-pull')\\n \\n     if '0.2tx' in mediatypes:\\n         protoparams.add('0.2')\\n \\n     if '0.2tx' in mediatypes and capablefn('compression'):\\n         # We \\/could\\/ compare supported compression formats and prune\\n         # non-mutually supported or error if nothing is mutually supported.\\n         # For now, send the full list to the server and have it error.\\n         comps = [e.wireprotosupport().name for e in\\n                  util.compengines.supportedwireengines(util.CLIENTROLE)]\\n         protoparams.add('comp=%s' % ','.join(comps))\\n \\n     if protoparams:\\n         protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)),\\n                                             'X-HgProto',\\n                                             headersize or 1024)\\n         for header, value in protoheaders:\\n             headers[header] = value\\n \\n     varyheaders = []\\n     for header in headers:\\n         if header.lower().startswith(r'x-hg'):\\n             varyheaders.append(header)\\n \\n     if varyheaders:\\n         headers[r'Vary'] = r','.join(sorted(varyheaders))\\n \\n     req = requestbuilder(pycompat.strurl(cu), data, headers)\\n \\n     if data is not None:\\n         ui.debug(\\\"sending %d bytes\\\\n\\\" % size)\\n         req.add_unredirected_header(r'Content-Length', r'%d' % size)\\n \\n     return req, cu, qs\\n \\n def _reqdata(req):\\n     \\\"\\\"\\\"Get request data, if any. If no data, returns None.\\\"\\\"\\\"\\n     if pycompat.ispy3:\\n         return req.data\\n     if not req.has_data():\\n         return None\\n     return req.get_data()\\n \\n def sendrequest(ui, opener, req):\\n     \\\"\\\"\\\"Send a prepared HTTP request.\\n \\n     Returns the response object.\\n     \\\"\\\"\\\"\\n     dbg = ui.debug\\n     if (ui.debugflag\\n         and ui.configbool('devel', 'debug.peer-request')):\\n         line = 'devel-peer-request: %s\\\\n'\\n         dbg(line % '%s %s' % (pycompat.bytesurl(req.get_method()),\\n                               pycompat.bytesurl(req.get_full_url())))\\n         hgargssize = None\\n \\n         for header, value in sorted(req.header_items()):\\n             header = pycompat.bytesurl(header)\\n             value = pycompat.bytesurl(value)\\n             if header.startswith('X-hgarg-'):\\n                 if hgargssize is None:\\n                     hgargssize = 0\\n                 hgargssize += len(value)\\n             else:\\n                 dbg(line % '  %s %s' % (header, value))\\n \\n         if hgargssize is not None:\\n             dbg(line % '  %d bytes of commands arguments in headers'\\n                 % hgargssize)\\n         data = _reqdata(req)\\n         if data is not None:\\n             length = getattr(data, 'length', None)\\n             if length is None:\\n                 length = len(data)\\n             dbg(line % '  %d bytes of data' % length)\\n \\n         start = util.timer()\\n \\n     res = None\\n     try:\\n         res = opener.open(req)\\n     except urlerr.httperror as inst:\\n         if inst.code == 401:\\n             raise error.Abort(_('authorization failed'))\\n         raise\\n     except httplib.HTTPException as inst:\\n         ui.debug('http error requesting %s\\\\n' %\\n                  util.hidepassword(req.get_full_url()))\\n         ui.traceback()\\n         raise IOError(None, inst)\\n     finally:\\n         if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):\\n             code = res.code if res else -1\\n             dbg(line % '  finished in %.4f seconds (%d)'\\n                 % (util.timer() - start, code))\\n \\n     # Insert error handlers for common I\\/O failures.\\n     _wraphttpresponse(res)\\n \\n     return res\\n \\n class RedirectedRepoError(error.RepoError):\\n     def __init__(self, msg, respurl):\\n         super(RedirectedRepoError, self).__init__(msg)\\n         self.respurl = respurl\\n \\n def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible,\\n                            allowcbor=False):\\n     # record the url we got redirected to\\n     redirected = False\\n     respurl = pycompat.bytesurl(resp.geturl())\\n     if respurl.endswith(qs):\\n         respurl = respurl[:-len(qs)]\\n         qsdropped = False\\n     else:\\n         qsdropped = True\\n \\n     if baseurl.rstrip('\\/') != respurl.rstrip('\\/'):\\n         redirected = True\\n         if not ui.quiet:\\n             ui.warn(_('real URL is %s\\\\n') % respurl)\\n \\n     try:\\n         proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))\\n     except AttributeError:\\n         proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))\\n \\n     safeurl = util.hidepassword(baseurl)\\n     if proto.startswith('application\\/hg-error'):\\n         raise error.OutOfBandError(resp.read())\\n \\n     # Pre 1.0 versions of Mercurial used text\\/plain and\\n     # application\\/hg-changegroup. We don't support such old servers.\\n     if not proto.startswith('application\\/mercurial-'):\\n         ui.debug(\\\"requested URL: '%s'\\\\n\\\" % util.hidepassword(requrl))\\n         msg = _(\\\"'%s' does not appear to be an hg repository:\\\\n\\\"\\n                 \\\"---%%\\u003c--- (%s)\\\\n%s\\\\n---%%\\u003c---\\\\n\\\") % (\\n             safeurl, proto or 'no content-type', resp.read(1024))\\n \\n         # Some servers may strip the query string from the redirect. We\\n         # raise a special error type so callers can react to this specially.\\n         if redirected and qsdropped:\\n             raise RedirectedRepoError(msg, respurl)\\n         else:\\n             raise error.RepoError(msg)\\n \\n     try:\\n         subtype = proto.split('-', 1)[1]\\n \\n         # Unless we end up supporting CBOR in the legacy wire protocol,\\n         # this should ONLY be encountered for the initial capabilities\\n         # request during handshake.\\n         if subtype == 'cbor':\\n             if allowcbor:\\n                 return respurl, proto, resp\\n             else:\\n                 raise error.RepoError(_('unexpected CBOR response from '\\n                                         'server'))\\n \\n         version_info = tuple([int(n) for n in subtype.split('.')])\\n     except ValueError:\\n         raise error.RepoError(_(\\\"'%s' sent a broken Content-Type \\\"\\n                                 \\\"header (%s)\\\") % (safeurl, proto))\\n \\n     # TODO consider switching to a decompression reader that uses\\n     # generators.\\n     if version_info == (0, 1):\\n         if compressible:\\n             resp = util.compengines['zlib'].decompressorreader(resp)\\n \\n     elif version_info == (0, 2):\\n         # application\\/mercurial-0.2 always identifies the compression\\n         # engine in the payload header.\\n         elen = struct.unpack('B', util.readexactly(resp, 1))[0]\\n         ename = util.readexactly(resp, elen)\\n         engine = util.compengines.forwiretype(ename)\\n \\n         resp = engine.decompressorreader(resp)\\n     else:\\n         raise error.RepoError(_(\\\"'%s' uses newer protocol %s\\\") %\\n                               (safeurl, subtype))\\n \\n     return respurl, proto, resp\\n \\n class httppeer(wireprotov1peer.wirepeer):\\n     def __init__(self, ui, path, url, opener, requestbuilder, caps):\\n         self.ui = ui\\n         self._path = path\\n         self._url = url\\n         self._caps = caps\\n         self._urlopener = opener\\n         self._requestbuilder = requestbuilder\\n \\n     def __del__(self):\\n         for h in self._urlopener.handlers:\\n             h.close()\\n             getattr(h, \\\"close_all\\\", lambda: None)()\\n \\n     # Begin of ipeerconnection interface.\\n \\n     def url(self):\\n         return self._path\\n \\n     def local(self):\\n         return None\\n \\n     def peer(self):\\n         return self\\n \\n     def canpush(self):\\n         return True\\n \\n     def close(self):\\n         pass\\n \\n     # End of ipeerconnection interface.\\n \\n     # Begin of ipeercommands interface.\\n \\n     def capabilities(self):\\n         return self._caps\\n \\n     # End of ipeercommands interface.\\n \\n     def _callstream(self, cmd, _compressible=False, **args):\\n         args = pycompat.byteskwargs(args)\\n \\n         req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder,\\n                                            self._caps, self.capable,\\n                                            self._url, cmd, args)\\n \\n         resp = sendrequest(self.ui, self._urlopener, req)\\n \\n         self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs,\\n                                                      resp, _compressible)\\n \\n         return resp\\n \\n     def _call(self, cmd, **args):\\n         fp = self._callstream(cmd, **args)\\n         try:\\n             return fp.read()\\n         finally:\\n             # if using keepalive, allow connection to be reused\\n             fp.close()\\n \\n     def _callpush(self, cmd, cg, **args):\\n         # have to stream bundle to a temp file because we do not have\\n         # http 1.1 chunked transfer.\\n \\n         types = self.capable('unbundle')\\n         try:\\n             types = types.split(',')\\n         except AttributeError:\\n             # servers older than d1b16a746db6 will send 'unbundle' as a\\n             # boolean capability. They only support headerless\\/uncompressed\\n             # bundles.\\n             types = [\\\"\\\"]\\n         for x in types:\\n             if x in bundle2.bundletypes:\\n                 type = x\\n                 break\\n \\n         tempname = bundle2.writebundle(self.ui, cg, None, type)\\n         fp = httpconnection.httpsendfile(self.ui, tempname, \\\"rb\\\")\\n         headers = {r'Content-Type': r'application\\/mercurial-0.1'}\\n \\n         try:\\n             r = self._call(cmd, data=fp, headers=headers, **args)\\n             vals = r.split('\\\\n', 1)\\n             if len(vals) \\u003c 2:\\n                 raise error.ResponseError(_(\\\"unexpected response:\\\"), r)\\n             return vals\\n         except urlerr.httperror:\\n             # Catch and re-raise these so we don't try and treat them\\n             # like generic socket errors. They lack any values in\\n             # .args on Python 3 which breaks our socket.error block.\\n             raise\\n         except socket.error as err:\\n             if err.args[0] in (errno.ECONNRESET, errno.EPIPE):\\n                 raise error.Abort(_('push failed: %s') % err.args[1])\\n             raise error.Abort(err.args[1])\\n         finally:\\n             fp.close()\\n             os.unlink(tempname)\\n \\n     def _calltwowaystream(self, cmd, fp, **args):\\n         fh = None\\n         fp_ = None\\n         filename = None\\n         try:\\n             # dump bundle to disk\\n             fd, filename = pycompat.mkstemp(prefix=\\\"hg-bundle-\\\", suffix=\\\".hg\\\")\\n             fh = os.fdopen(fd, r\\\"wb\\\")\\n             d = fp.read(4096)\\n             while d:\\n                 fh.write(d)\\n                 d = fp.read(4096)\\n             fh.close()\\n             # start http push\\n             fp_ = httpconnection.httpsendfile(self.ui, filename, \\\"rb\\\")\\n             headers = {r'Content-Type': r'application\\/mercurial-0.1'}\\n             return self._callstream(cmd, data=fp_, headers=headers, **args)\\n         finally:\\n             if fp_ is not None:\\n                 fp_.close()\\n             if fh is not None:\\n                 fh.close()\\n                 os.unlink(filename)\\n \\n     def _callcompressable(self, cmd, **args):\\n         return self._callstream(cmd, _compressible=True, **args)\\n \\n     def _abort(self, exception):\\n         raise exception\\n \\n def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests):\\n     reactor = wireprotoframing.clientreactor(hasmultiplesend=False,\\n                                              buffersends=True)\\n \\n     handler = wireprotov2peer.clienthandler(ui, reactor)\\n \\n     url = '%s\\/%s' % (apiurl, permission)\\n \\n     if len(requests) \\u003e 1:\\n         url += '\\/multirequest'\\n     else:\\n         url += '\\/%s' % requests[0][0]\\n \\n     ui.debug('sending %d commands\\\\n' % len(requests))\\n     for command, args, f in requests:\\n         ui.debug('sending command %s: %s\\\\n' % (\\n             command, stringutil.pprint(args, indent=2)))\\n         assert not list(handler.callcommand(command, args, f))\\n \\n     # TODO stream this.\\n     body = b''.join(map(bytes, handler.flushcommands()))\\n \\n     # TODO modify user-agent to reflect v2\\n     headers = {\\n         r'Accept': wireprotov2server.FRAMINGTYPE,\\n         r'Content-Type': wireprotov2server.FRAMINGTYPE,\\n     }\\n \\n     req = requestbuilder(pycompat.strurl(url), body, headers)\\n     req.add_unredirected_header(r'Content-Length', r'%d' % len(body))\\n \\n     try:\\n         res = opener.open(req)\\n     except urlerr.httperror as e:\\n         if e.code == 401:\\n             raise error.Abort(_('authorization failed'))\\n \\n         raise\\n     except httplib.HTTPException as e:\\n         ui.traceback()\\n         raise IOError(None, e)\\n \\n     return handler, res\\n \\n class queuedcommandfuture(pycompat.futures.Future):\\n     \\\"\\\"\\\"Wraps result() on command futures to trigger submission on call.\\\"\\\"\\\"\\n \\n     def result(self, timeout=None):\\n         if self.done():\\n             return pycompat.futures.Future.result(self, timeout)\\n \\n         self._peerexecutor.sendcommands()\\n \\n         # sendcommands() will restore the original __class__ and self.result\\n         # will resolve to Future.result.\\n         return self.result(timeout)\\n \\n @interfaceutil.implementer(repository.ipeercommandexecutor)\\n class httpv2executor(object):\\n     def __init__(self, ui, opener, requestbuilder, apiurl, descriptor):\\n         self._ui = ui\\n         self._opener = opener\\n         self._requestbuilder = requestbuilder\\n         self._apiurl = apiurl\\n         self._descriptor = descriptor\\n         self._sent = False\\n         self._closed = False\\n         self._neededpermissions = set()\\n         self._calls = []\\n         self._futures = weakref.WeakSet()\\n         self._responseexecutor = None\\n         self._responsef = None\\n \\n     def __enter__(self):\\n         return self\\n \\n     def __exit__(self, exctype, excvalue, exctb):\\n         self.close()\\n \\n     def callcommand(self, command, args):\\n         if self._sent:\\n             raise error.ProgrammingError('callcommand() cannot be used after '\\n                                          'commands are sent')\\n \\n         if self._closed:\\n             raise error.ProgrammingError('callcommand() cannot be used after '\\n                                          'close()')\\n \\n         # The service advertises which commands are available. So if we attempt\\n         # to call an unknown command or pass an unknown argument, we can screen\\n         # for this.\\n         if command not in self._descriptor['commands']:\\n             raise error.ProgrammingError(\\n                 'wire protocol command %s is not available' % command)\\n \\n         cmdinfo = self._descriptor['commands'][command]\\n         unknownargs = set(args.keys()) - set(cmdinfo.get('args', {}))\\n \\n         if unknownargs:\\n             raise error.ProgrammingError(\\n                 'wire protocol command %s does not accept argument: %s' % (\\n                     command, ', '.join(sorted(unknownargs))))\\n \\n         self._neededpermissions |= set(cmdinfo['permissions'])\\n \\n         # TODO we \\/could\\/ also validate types here, since the API descriptor\\n         # includes types...\\n \\n         f = pycompat.futures.Future()\\n \\n         # Monkeypatch it so result() triggers sendcommands(), otherwise result()\\n         # could deadlock.\\n         f.__class__ = queuedcommandfuture\\n         f._peerexecutor = self\\n \\n         self._futures.add(f)\\n         self._calls.append((command, args, f))\\n \\n         return f\\n \\n     def sendcommands(self):\\n         if self._sent:\\n             return\\n \\n         if not self._calls:\\n             return\\n \\n         self._sent = True\\n \\n         # Unhack any future types so caller sees a clean type and so we\\n         # break reference cycle.\\n         for f in self._futures:\\n             if isinstance(f, queuedcommandfuture):\\n                 f.__class__ = pycompat.futures.Future\\n                 f._peerexecutor = None\\n \\n         # Mark the future as running and filter out cancelled futures.\\n         calls = [(command, args, f)\\n                  for command, args, f in self._calls\\n                  if f.set_running_or_notify_cancel()]\\n \\n         # Clear out references, prevent improper object usage.\\n         self._calls = None\\n \\n         if not calls:\\n             return\\n \\n         permissions = set(self._neededpermissions)\\n \\n         if 'push' in permissions and 'pull' in permissions:\\n             permissions.remove('pull')\\n \\n         if len(permissions) \\u003e 1:\\n             raise error.RepoError(_('cannot make request requiring multiple '\\n                                     'permissions: %s') %\\n                                   _(', ').join(sorted(permissions)))\\n \\n         permission = {\\n             'push': 'rw',\\n             'pull': 'ro',\\n         }[permissions.pop()]\\n \\n         handler, resp = sendv2request(\\n             self._ui, self._opener, self._requestbuilder, self._apiurl,\\n             permission, calls)\\n \\n         # TODO we probably want to validate the HTTP code, media type, etc.\\n \\n         self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)\\n         self._responsef = self._responseexecutor.submit(self._handleresponse,\\n                                                         handler, resp)\\n \\n     def close(self):\\n         if self._closed:\\n             return\\n \\n         self.sendcommands()\\n \\n         self._closed = True\\n \\n         if not self._responsef:\\n             return\\n \\n         # TODO ^C here may not result in immediate program termination.\\n \\n         try:\\n             self._responsef.result()\\n         finally:\\n             self._responseexecutor.shutdown(wait=True)\\n             self._responsef = None\\n             self._responseexecutor = None\\n \\n             # If any of our futures are still in progress, mark them as\\n             # errored, otherwise a result() could wait indefinitely.\\n             for f in self._futures:\\n                 if not f.done():\\n                     f.set_exception(error.ResponseError(\\n                         _('unfulfilled command response')))\\n \\n             self._futures = None\\n \\n     def _handleresponse(self, handler, resp):\\n         # Called in a thread to read the response.\\n \\n         while handler.readframe(resp):\\n             pass\\n \\n # TODO implement interface for version 2 peers\\n @interfaceutil.implementer(repository.ipeerconnection,\\n                            repository.ipeercapabilities,\\n                            repository.ipeerrequests)\\n class httpv2peer(object):\\n     def __init__(self, ui, repourl, apipath, opener, requestbuilder,\\n                  apidescriptor):\\n         self.ui = ui\\n \\n         if repourl.endswith('\\/'):\\n             repourl = repourl[:-1]\\n \\n         self._url = repourl\\n         self._apipath = apipath\\n         self._apiurl = '%s\\/%s' % (repourl, apipath)\\n         self._opener = opener\\n         self._requestbuilder = requestbuilder\\n         self._descriptor = apidescriptor\\n \\n     # Start of ipeerconnection.\\n \\n     def url(self):\\n         return self._url\\n \\n     def local(self):\\n         return None\\n \\n     def peer(self):\\n         return self\\n \\n     def canpush(self):\\n         # TODO change once implemented.\\n         return False\\n \\n     def close(self):\\n         pass\\n \\n     # End of ipeerconnection.\\n \\n     # Start of ipeercapabilities.\\n \\n     def capable(self, name):\\n         # The capabilities used internally historically map to capabilities\\n         # advertised from the \\\"capabilities\\\" wire protocol command. However,\\n         # version 2 of that command works differently.\\n \\n         # Maps to commands that are available.\\n         if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'):\\n             return True\\n \\n         # Other concepts.\\n-        if name in ('bundle2',):\\n+        # TODO remove exchangev2 once we have a command implemented.\\n+        if name in ('bundle2', 'exchangev2'):\\n             return True\\n \\n         # Alias command-* to presence of command of that name.\\n         if name.startswith('command-'):\\n             return name[len('command-'):] in self._descriptor['commands']\\n \\n         return False\\n \\n     def requirecap(self, name, purpose):\\n         if self.capable(name):\\n             return\\n \\n         raise error.CapabilityError(\\n             _('cannot %s; client or remote repository does not support the %r '\\n               'capability') % (purpose, name))\\n \\n     # End of ipeercapabilities.\\n \\n     def _call(self, name, **args):\\n         with self.commandexecutor() as e:\\n             return e.callcommand(name, args).result()\\n \\n     def commandexecutor(self):\\n         return httpv2executor(self.ui, self._opener, self._requestbuilder,\\n                               self._apiurl, self._descriptor)\\n \\n # Registry of API service names to metadata about peers that handle it.\\n #\\n # The following keys are meaningful:\\n #\\n # init\\n #    Callable receiving (ui, repourl, servicepath, opener, requestbuilder,\\n #                        apidescriptor) to create a peer.\\n #\\n # priority\\n #    Integer priority for the service. If we could choose from multiple\\n #    services, we choose the one with the highest priority.\\n API_PEERS = {\\n     wireprototypes.HTTP_WIREPROTO_V2: {\\n         'init': httpv2peer,\\n         'priority': 50,\\n     },\\n }\\n \\n def performhandshake(ui, url, opener, requestbuilder):\\n     # The handshake is a request to the capabilities command.\\n \\n     caps = None\\n     def capable(x):\\n         raise error.ProgrammingError('should not be called')\\n \\n     args = {}\\n \\n     # The client advertises support for newer protocols by adding an\\n     # X-HgUpgrade-* header with a list of supported APIs and an\\n     # X-HgProto-* header advertising which serializing formats it supports.\\n     # We only support the HTTP version 2 transport and CBOR responses for\\n     # now.\\n     advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2')\\n \\n     if advertisev2:\\n         args['headers'] = {\\n             r'X-HgProto-1': r'cbor',\\n         }\\n \\n         args['headers'].update(\\n             encodevalueinheaders(' '.join(sorted(API_PEERS)),\\n                                  'X-HgUpgrade',\\n                                  # We don't know the header limit this early.\\n                                  # So make it small.\\n                                  1024))\\n \\n     req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,\\n                                            capable, url, 'capabilities',\\n                                            args)\\n     resp = sendrequest(ui, opener, req)\\n \\n     # The server may redirect us to the repo root, stripping the\\n     # ?cmd=capabilities query string from the URL. The server would likely\\n     # return HTML in this case and ``parsev1commandresponse()`` would raise.\\n     # We catch this special case and re-issue the capabilities request against\\n     # the new URL.\\n     #\\n     # We should ideally not do this, as a redirect that drops the query\\n     # string from the URL is arguably a server bug. (Garbage in, garbage out).\\n     # However,  Mercurial clients for several years appeared to handle this\\n     # issue without behavior degradation. And according to issue 5860, it may\\n     # be a longstanding bug in some server implementations. So we allow a\\n     # redirect that drops the query string to \\\"just work.\\\"\\n     try:\\n         respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,\\n                                                    compressible=False,\\n                                                    allowcbor=advertisev2)\\n     except RedirectedRepoError as e:\\n         req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,\\n                                                capable, e.respurl,\\n                                                'capabilities', args)\\n         resp = sendrequest(ui, opener, req)\\n         respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,\\n                                                    compressible=False,\\n                                                    allowcbor=advertisev2)\\n \\n     try:\\n         rawdata = resp.read()\\n     finally:\\n         resp.close()\\n \\n     if not ct.startswith('application\\/mercurial-'):\\n         raise error.ProgrammingError('unexpected content-type: %s' % ct)\\n \\n     if advertisev2:\\n         if ct == 'application\\/mercurial-cbor':\\n             try:\\n                 info = cborutil.decodeall(rawdata)[0]\\n             except cborutil.CBORDecodeError:\\n                 raise error.Abort(_('error decoding CBOR from remote server'),\\n                                   hint=_('try again and consider contacting '\\n                                          'the server operator'))\\n \\n         # We got a legacy response. That's fine.\\n         elif ct in ('application\\/mercurial-0.1', 'application\\/mercurial-0.2'):\\n             info = {\\n                 'v1capabilities': set(rawdata.split())\\n             }\\n \\n         else:\\n             raise error.RepoError(\\n                 _('unexpected response type from server: %s') % ct)\\n     else:\\n         info = {\\n             'v1capabilities': set(rawdata.split())\\n         }\\n \\n     return respurl, info\\n \\n def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):\\n     \\\"\\\"\\\"Construct an appropriate HTTP peer instance.\\n \\n     ``opener`` is an ``url.opener`` that should be used to establish\\n     connections, perform HTTP requests.\\n \\n     ``requestbuilder`` is the type used for constructing HTTP requests.\\n     It exists as an argument so extensions can override the default.\\n     \\\"\\\"\\\"\\n     u = util.url(path)\\n     if u.query or u.fragment:\\n         raise error.Abort(_('unsupported URL component: \\\"%s\\\"') %\\n                           (u.query or u.fragment))\\n \\n     # urllib cannot handle URLs with embedded user or passwd.\\n     url, authinfo = u.authinfo()\\n     ui.debug('using %s\\\\n' % url)\\n \\n     opener = opener or urlmod.opener(ui, authinfo)\\n \\n     respurl, info = performhandshake(ui, url, opener, requestbuilder)\\n \\n     # Given the intersection of APIs that both we and the server support,\\n     # sort by their advertised priority and pick the first one.\\n     #\\n     # TODO consider making this request-based and interface driven. For\\n     # example, the caller could say \\\"I want a peer that does X.\\\" It's quite\\n     # possible that not all peers would do that. Since we know the service\\n     # capabilities, we could filter out services not meeting the\\n     # requirements. Possibly by consulting the interfaces defined by the\\n     # peer type.\\n     apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys())\\n \\n     preferredchoices = sorted(apipeerchoices,\\n                               key=lambda x: API_PEERS[x]['priority'],\\n                               reverse=True)\\n \\n     for service in preferredchoices:\\n         apipath = '%s\\/%s' % (info['apibase'].rstrip('\\/'), service)\\n \\n         return API_PEERS[service]['init'](ui, respurl, apipath, opener,\\n                                           requestbuilder,\\n                                           info['apis'][service])\\n \\n     # Failed to construct an API peer. Fall back to legacy.\\n     return httppeer(ui, path, respurl, opener, requestbuilder,\\n                     info['v1capabilities'])\\n \\n def instance(ui, path, create, intents=None, createopts=None):\\n     if create:\\n         raise error.Abort(_('cannot create new http repository'))\\n     try:\\n         if path.startswith('https:') and not urlmod.has_https:\\n             raise error.Abort(_('Python support for SSL and HTTPS '\\n                                 'is not installed'))\\n \\n         inst = makepeer(ui, path)\\n \\n         return inst\\n     except error.RepoError as httpexception:\\n         try:\\n             r = statichttprepo.instance(ui, \\\"static-\\\" + path, create)\\n             ui.note(_('(falling back to static-http)\\\\n'))\\n             return r\\n         except error.RepoError:\\n             raise httpexception # use the original http RepoError instead\\n\"}]},{\"id\":\"24368\",\"metadata\":{\"line:first\":1,\"copy:lines\":{\"4\":[\"mercurial\\/exchange.py\",4,\" \"],\"5\":[\"mercurial\\/exchange.py\",5,\" \"],\"6\":[\"mercurial\\/exchange.py\",6,\" \"],\"7\":[\"mercurial\\/exchange.py\",7,\" \"],\"8\":[\"mercurial\\/exchange.py\",8,\" \"],\"9\":[\"mercurial\\/exchange.py\",9,\" \"]}},\"oldPath\":null,\"currentPath\":\"mercurial\\/exchangev2.py\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":{\"unix:filemode\":\"100644\"},\"type\":\"1\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"55\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"0\",\"newOffset\":\"1\",\"oldLength\":\"0\",\"newLength\":\"55\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\"+# exchangev2.py - repository exchange for wire protocol version 2\\n+#\\n+# Copyright 2018 Gregory Szorc \\u003cgregory.szorc@gmail.com\\u003e\\n+#\\n+# This software may be used and distributed according to the terms of the\\n+# GNU General Public License version 2 or any later version.\\n+\\n+from __future__ import absolute_import\\n+\\n+from .node import (\\n+    nullid,\\n+)\\n+from . import (\\n+    setdiscovery,\\n+)\\n+\\n+def pull(pullop):\\n+    \\\"\\\"\\\"Pull using wire protocol version 2.\\\"\\\"\\\"\\n+    repo = pullop.repo\\n+    remote = pullop.remote\\n+\\n+    # Figure out what needs to be fetched.\\n+    common, fetch, remoteheads = _pullchangesetdiscovery(\\n+        repo, remote, pullop.heads, abortwhenunrelated=pullop.force)\\n+\\n+def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):\\n+    \\\"\\\"\\\"Determine which changesets need to be pulled.\\\"\\\"\\\"\\n+\\n+    if heads:\\n+        knownnode = repo.changelog.hasnode\\n+        if all(knownnode(head) for head in heads):\\n+            return heads, False, heads\\n+\\n+    # TODO wire protocol version 2 is capable of more efficient discovery\\n+    # than setdiscovery. Consider implementing something better.\\n+    common, fetch, remoteheads = setdiscovery.findcommonheads(\\n+        repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated)\\n+\\n+    common = set(common)\\n+    remoteheads = set(remoteheads)\\n+\\n+    # If a remote head is filtered locally, put it back in the common set.\\n+    # See the comment in exchange._pulldiscoverychangegroup() for more.\\n+\\n+    if fetch and remoteheads:\\n+        nodemap = repo.unfiltered().changelog.nodemap\\n+\\n+        common |= {head for head in remoteheads if head in nodemap}\\n+\\n+        if set(remoteheads).issubset(common):\\n+            fetch = []\\n+\\n+    common.discard(nullid)\\n+\\n+    return common, fetch, remoteheads\\n\"}]},{\"id\":\"24367\",\"metadata\":{\"line:first\":29,\"copy:lines\":{\"1514\":[\"\",1509,\"-\"],\"1515\":[\"\",1510,\"-\"],\"1516\":[\"\",1511,\"-\"],\"1517\":[\"\",1512,\"-\"],\"1518\":[\"\",1513,\"-\"],\"1519\":[\"\",1514,\"-\"],\"1520\":[\"\",1515,\"-\"],\"1521\":[\"\",1516,\"-\"],\"1522\":[\"\",1517,\"-\"],\"1523\":[\"\",1518,\"-\"],\"1524\":[\"\",1519,\"-\"],\"1525\":[\"\",1520,\" \"],\"1526\":[\"\",1521,\" \"],\"1527\":[\"\",1522,\" \"],\"1528\":[\"\",1523,\" \"],\"1529\":[\"\",1524,\" \"],\"1530\":[\"\",1525,\" \"],\"1531\":[\"\",1526,\" \"],\"1532\":[\"\",1527,\" \"],\"1533\":[\"\",1528,\" \"],\"1534\":[\"\",1529,\" \"],\"1535\":[\"\",1530,\" \"],\"1536\":[\"\",1531,\" \"],\"1537\":[\"\",1532,\" \"],\"1538\":[\"\",1533,\" \"],\"1539\":[\"\",1534,\" \"],\"1540\":[\"\",1535,\" \"],\"1541\":[\"\",1536,\" \"],\"1542\":[\"\",1537,\" \"],\"1543\":[\"\",1538,\" \"],\"1544\":[\"\",1539,\" \"],\"1545\":[\"\",1540,\" \"],\"1546\":[\"\",1541,\" \"],\"1547\":[\"\",1542,\" \"],\"1548\":[\"\",1543,\" \"],\"1549\":[\"\",1544,\" \"],\"1550\":[\"\",1545,\" \"],\"1551\":[\"\",1546,\" \"],\"1552\":[\"\",1547,\" \"],\"1553\":[\"\",1548,\" \"],\"1554\":[\"\",1549,\" \"],\"1555\":[\"\",1550,\" \"],\"1556\":[\"\",1551,\" \"],\"1557\":[\"\",1552,\" \"],\"1558\":[\"\",1553,\" \"],\"1559\":[\"\",1554,\" \"],\"1560\":[\"\",1555,\" \"],\"1561\":[\"\",1556,\" \"],\"1562\":[\"\",1557,\" \"],\"1563\":[\"\",1558,\" \"],\"1564\":[\"\",1559,\" \"],\"1565\":[\"\",1560,\" \"],\"1566\":[\"\",1561,\" \"],\"1567\":[\"\",1562,\" \"],\"1568\":[\"\",1563,\" \"],\"1569\":[\"\",1564,\" \"],\"1570\":[\"\",1565,\" \"],\"1571\":[\"\",1566,\" \"],\"1572\":[\"\",1567,\" \"],\"1573\":[\"\",1568,\" \"],\"1574\":[\"\",1569,\" \"],\"1575\":[\"\",1570,\" \"],\"1576\":[\"\",1571,\" \"],\"1577\":[\"\",1572,\" \"],\"1578\":[\"\",1573,\" \"],\"1579\":[\"\",1574,\" \"],\"1580\":[\"\",1575,\" \"],\"1581\":[\"\",1576,\" \"],\"1582\":[\"\",1577,\" \"],\"1583\":[\"\",1578,\" \"],\"1584\":[\"\",1579,\" \"],\"1585\":[\"\",1580,\" \"],\"1586\":[\"\",1581,\" \"],\"1587\":[\"\",1582,\" \"],\"1588\":[\"\",1583,\" \"],\"1589\":[\"\",1584,\" \"],\"1590\":[\"\",1585,\" \"],\"1591\":[\"\",1586,\" \"],\"1592\":[\"\",1587,\" \"],\"1593\":[\"\",1588,\" \"],\"1594\":[\"\",1589,\" \"],\"1595\":[\"\",1590,\" \"],\"1596\":[\"\",1591,\" \"],\"1597\":[\"\",1592,\" \"],\"1598\":[\"\",1593,\" \"],\"1599\":[\"\",1594,\" \"],\"1600\":[\"\",1595,\" \"],\"1601\":[\"\",1596,\" \"],\"1602\":[\"\",1597,\" \"],\"1603\":[\"\",1598,\" \"],\"1604\":[\"\",1599,\" \"],\"1605\":[\"\",1600,\" \"],\"1606\":[\"\",1601,\" \"],\"1607\":[\"\",1602,\" \"],\"1608\":[\"\",1603,\" \"],\"1609\":[\"\",1604,\" \"],\"1610\":[\"\",1605,\" \"],\"1611\":[\"\",1606,\" \"],\"1612\":[\"\",1607,\" \"],\"1613\":[\"\",1608,\" \"],\"1614\":[\"\",1609,\" \"],\"1615\":[\"\",1610,\" \"],\"1616\":[\"\",1611,\" \"],\"1617\":[\"\",1612,\" \"],\"1618\":[\"\",1613,\" \"],\"1619\":[\"\",1614,\" \"],\"1620\":[\"\",1615,\" \"],\"1621\":[\"\",1616,\" \"],\"1622\":[\"\",1617,\" \"],\"1623\":[\"\",1618,\" \"],\"1624\":[\"\",1619,\" \"],\"1625\":[\"\",1620,\" \"],\"1626\":[\"\",1621,\" \"],\"1627\":[\"\",1622,\" \"],\"1628\":[\"\",1623,\" \"],\"1629\":[\"\",1624,\" \"],\"1630\":[\"\",1625,\" \"],\"1631\":[\"\",1626,\" \"],\"1632\":[\"\",1627,\" \"],\"1633\":[\"\",1628,\" \"],\"1634\":[\"\",1629,\" \"],\"1635\":[\"\",1630,\" \"],\"1636\":[\"\",1631,\" \"],\"1637\":[\"\",1632,\" \"],\"1638\":[\"\",1633,\" \"],\"1639\":[\"\",1634,\" \"],\"1640\":[\"\",1635,\" \"],\"1641\":[\"\",1636,\" \"],\"1642\":[\"\",1637,\" \"],\"1643\":[\"\",1638,\" \"],\"1644\":[\"\",1639,\" \"],\"1645\":[\"\",1640,\" \"],\"1646\":[\"\",1641,\" \"],\"1647\":[\"\",1642,\" \"],\"1648\":[\"\",1643,\" \"],\"1649\":[\"\",1644,\" \"],\"1650\":[\"\",1645,\" \"],\"1651\":[\"\",1646,\" \"],\"1652\":[\"\",1647,\" \"],\"1653\":[\"\",1648,\" \"],\"1654\":[\"\",1649,\" \"],\"1655\":[\"\",1650,\" \"],\"1656\":[\"\",1651,\" \"],\"1657\":[\"\",1652,\" \"],\"1658\":[\"\",1653,\" \"],\"1659\":[\"\",1654,\" \"],\"1660\":[\"\",1655,\" \"],\"1661\":[\"\",1656,\" \"],\"1662\":[\"\",1657,\" \"],\"1663\":[\"\",1658,\" \"],\"1664\":[\"\",1659,\" \"],\"1665\":[\"\",1660,\" \"],\"1666\":[\"\",1661,\" \"],\"1667\":[\"\",1662,\" \"],\"1668\":[\"\",1663,\" \"],\"1669\":[\"\",1664,\" \"],\"1670\":[\"\",1665,\" \"],\"1671\":[\"\",1666,\" \"],\"1672\":[\"\",1667,\" \"],\"1673\":[\"\",1668,\" \"],\"1674\":[\"\",1669,\" \"],\"1675\":[\"\",1670,\" \"],\"1676\":[\"\",1671,\" \"],\"1677\":[\"\",1672,\" \"],\"1678\":[\"\",1673,\" \"],\"1679\":[\"\",1674,\" \"],\"1680\":[\"\",1675,\" \"],\"1681\":[\"\",1676,\" \"],\"1682\":[\"\",1677,\" \"],\"1683\":[\"\",1678,\" \"],\"1684\":[\"\",1679,\" \"],\"1685\":[\"\",1680,\" \"],\"1686\":[\"\",1681,\" \"],\"1687\":[\"\",1682,\" \"],\"1688\":[\"\",1683,\" \"],\"1689\":[\"\",1684,\" \"],\"1690\":[\"\",1685,\" \"],\"1691\":[\"\",1686,\" \"],\"1692\":[\"\",1687,\" \"],\"1693\":[\"\",1688,\" \"],\"1694\":[\"\",1689,\" \"],\"1695\":[\"\",1690,\" \"],\"1696\":[\"\",1691,\" \"],\"1697\":[\"\",1692,\" \"],\"1698\":[\"\",1693,\" \"],\"1699\":[\"\",1694,\" \"],\"1700\":[\"\",1695,\" \"],\"1701\":[\"\",1696,\" \"],\"1702\":[\"\",1697,\" \"],\"1703\":[\"\",1698,\" \"],\"1704\":[\"\",1699,\" \"],\"1705\":[\"\",1700,\" \"],\"1706\":[\"\",1701,\" \"],\"1707\":[\"\",1702,\" \"],\"1708\":[\"\",1703,\" \"],\"1709\":[\"\",1704,\" \"],\"1710\":[\"\",1705,\" \"],\"1711\":[\"\",1706,\" \"],\"1712\":[\"\",1707,\" \"],\"1713\":[\"\",1708,\" \"],\"1714\":[\"\",1709,\" \"],\"1715\":[\"\",1710,\" \"],\"1716\":[\"\",1711,\" \"],\"1717\":[\"\",1712,\" \"],\"1718\":[\"\",1713,\" \"],\"1719\":[\"\",1714,\" \"],\"1720\":[\"\",1715,\" \"],\"1721\":[\"\",1716,\" \"],\"1722\":[\"\",1717,\" \"],\"1723\":[\"\",1718,\" \"],\"1724\":[\"\",1719,\" \"],\"1725\":[\"\",1720,\" \"],\"1726\":[\"\",1721,\" \"],\"1727\":[\"\",1722,\" \"],\"1728\":[\"\",1723,\" \"],\"1729\":[\"\",1724,\" \"],\"1730\":[\"\",1725,\" \"],\"1731\":[\"\",1726,\" \"],\"1732\":[\"\",1727,\" \"],\"1733\":[\"\",1728,\" \"],\"1734\":[\"\",1729,\" \"],\"1735\":[\"\",1730,\" \"],\"1736\":[\"\",1731,\" \"],\"1737\":[\"\",1732,\" \"],\"1738\":[\"\",1733,\" \"],\"1739\":[\"\",1734,\" \"],\"1740\":[\"\",1735,\" \"],\"1741\":[\"\",1736,\" \"],\"1742\":[\"\",1737,\" \"],\"1743\":[\"\",1738,\" \"],\"1744\":[\"\",1739,\" \"],\"1745\":[\"\",1740,\" \"],\"1746\":[\"\",1741,\" \"],\"1747\":[\"\",1742,\" \"],\"1748\":[\"\",1743,\" \"],\"1749\":[\"\",1744,\" \"],\"1750\":[\"\",1745,\" \"],\"1751\":[\"\",1746,\" \"],\"1752\":[\"\",1747,\" \"],\"1753\":[\"\",1748,\" \"],\"1754\":[\"\",1749,\" \"],\"1755\":[\"\",1750,\" \"],\"1756\":[\"\",1751,\" \"],\"1757\":[\"\",1752,\" \"],\"1758\":[\"\",1753,\" \"],\"1759\":[\"\",1754,\" \"],\"1760\":[\"\",1755,\" \"],\"1761\":[\"\",1756,\" \"],\"1762\":[\"\",1757,\" \"],\"1763\":[\"\",1758,\" \"],\"1764\":[\"\",1759,\" \"],\"1765\":[\"\",1760,\" \"],\"1766\":[\"\",1761,\" \"],\"1767\":[\"\",1762,\" \"],\"1768\":[\"\",1763,\" \"],\"1769\":[\"\",1764,\" \"],\"1770\":[\"\",1765,\" \"],\"1771\":[\"\",1766,\" \"],\"1772\":[\"\",1767,\" \"],\"1773\":[\"\",1768,\" \"],\"1774\":[\"\",1769,\" \"],\"1775\":[\"\",1770,\" \"],\"1776\":[\"\",1771,\" \"],\"1777\":[\"\",1772,\" \"],\"1778\":[\"\",1773,\" \"],\"1779\":[\"\",1774,\" \"],\"1780\":[\"\",1775,\" \"],\"1781\":[\"\",1776,\" \"],\"1782\":[\"\",1777,\" \"],\"1783\":[\"\",1778,\" \"],\"1784\":[\"\",1779,\" \"],\"1785\":[\"\",1780,\" \"],\"1786\":[\"\",1781,\" \"],\"1787\":[\"\",1782,\" \"],\"1788\":[\"\",1783,\" \"],\"1789\":[\"\",1784,\" \"],\"1790\":[\"\",1785,\" \"],\"1791\":[\"\",1786,\" \"],\"1792\":[\"\",1787,\" \"],\"1793\":[\"\",1788,\" \"],\"1794\":[\"\",1789,\" \"],\"1795\":[\"\",1790,\" \"],\"1796\":[\"\",1791,\" \"],\"1797\":[\"\",1792,\" \"],\"1798\":[\"\",1793,\" \"],\"1799\":[\"\",1794,\" \"],\"1800\":[\"\",1795,\" \"],\"1801\":[\"\",1796,\" \"],\"1802\":[\"\",1797,\" \"],\"1803\":[\"\",1798,\" \"],\"1804\":[\"\",1799,\" \"],\"1805\":[\"\",1800,\" \"],\"1806\":[\"\",1801,\" \"],\"1807\":[\"\",1802,\" \"],\"1808\":[\"\",1803,\" \"],\"1809\":[\"\",1804,\" \"],\"1810\":[\"\",1805,\" \"],\"1811\":[\"\",1806,\" \"],\"1812\":[\"\",1807,\" \"],\"1813\":[\"\",1808,\" \"],\"1814\":[\"\",1809,\" \"],\"1815\":[\"\",1810,\" \"],\"1816\":[\"\",1811,\" \"],\"1817\":[\"\",1812,\" \"],\"1818\":[\"\",1813,\" \"],\"1819\":[\"\",1814,\" \"],\"1820\":[\"\",1815,\" \"],\"1821\":[\"\",1816,\" \"],\"1822\":[\"\",1817,\" \"],\"1823\":[\"\",1818,\" \"],\"1824\":[\"\",1819,\" \"],\"1825\":[\"\",1820,\" \"],\"1826\":[\"\",1821,\" \"],\"1827\":[\"\",1822,\" \"],\"1828\":[\"\",1823,\" \"],\"1829\":[\"\",1824,\" \"],\"1830\":[\"\",1825,\" \"],\"1831\":[\"\",1826,\" \"],\"1832\":[\"\",1827,\" \"],\"1833\":[\"\",1828,\" \"],\"1834\":[\"\",1829,\" \"],\"1835\":[\"\",1830,\" \"],\"1836\":[\"\",1831,\" \"],\"1837\":[\"\",1832,\" \"],\"1838\":[\"\",1833,\" \"],\"1839\":[\"\",1834,\" \"],\"1840\":[\"\",1835,\" \"],\"1841\":[\"\",1836,\" \"],\"1842\":[\"\",1837,\" \"],\"1843\":[\"\",1838,\" \"],\"1844\":[\"\",1839,\" \"],\"1845\":[\"\",1840,\" \"],\"1846\":[\"\",1841,\" \"],\"1847\":[\"\",1842,\" \"],\"1848\":[\"\",1843,\" \"],\"1849\":[\"\",1844,\" \"],\"1850\":[\"\",1845,\" \"],\"1851\":[\"\",1846,\" \"],\"1852\":[\"\",1847,\" \"],\"1853\":[\"\",1848,\" \"],\"1854\":[\"\",1849,\" \"],\"1855\":[\"\",1850,\" \"],\"1856\":[\"\",1851,\" \"],\"1857\":[\"\",1852,\" \"],\"1858\":[\"\",1853,\" \"],\"1859\":[\"\",1854,\" \"],\"1860\":[\"\",1855,\" \"],\"1861\":[\"\",1856,\" \"],\"1862\":[\"\",1857,\" \"],\"1863\":[\"\",1858,\" \"],\"1864\":[\"\",1859,\" \"],\"1865\":[\"\",1860,\" \"],\"1866\":[\"\",1861,\" \"],\"1867\":[\"\",1862,\" \"],\"1868\":[\"\",1863,\" \"],\"1869\":[\"\",1864,\" \"],\"1870\":[\"\",1865,\" \"],\"1871\":[\"\",1866,\" \"],\"1872\":[\"\",1867,\" \"],\"1873\":[\"\",1868,\" \"],\"1874\":[\"\",1869,\" \"],\"1875\":[\"\",1870,\" \"],\"1876\":[\"\",1871,\" \"],\"1877\":[\"\",1872,\" \"],\"1878\":[\"\",1873,\" \"],\"1879\":[\"\",1874,\" \"],\"1880\":[\"\",1875,\" \"],\"1881\":[\"\",1876,\" \"],\"1882\":[\"\",1877,\" \"],\"1883\":[\"\",1878,\" \"],\"1884\":[\"\",1879,\" \"],\"1885\":[\"\",1880,\" \"],\"1886\":[\"\",1881,\" \"],\"1887\":[\"\",1882,\" \"],\"1888\":[\"\",1883,\" \"],\"1889\":[\"\",1884,\" \"],\"1890\":[\"\",1885,\" \"],\"1891\":[\"\",1886,\" \"],\"1892\":[\"\",1887,\" \"],\"1893\":[\"\",1888,\" \"],\"1894\":[\"\",1889,\" \"],\"1895\":[\"\",1890,\" \"],\"1896\":[\"\",1891,\" \"],\"1897\":[\"\",1892,\" \"],\"1898\":[\"\",1893,\" \"],\"1899\":[\"\",1894,\" \"],\"1900\":[\"\",1895,\" \"],\"1901\":[\"\",1896,\" \"],\"1902\":[\"\",1897,\" \"],\"1903\":[\"\",1898,\" \"],\"1904\":[\"\",1899,\" \"],\"1905\":[\"\",1900,\" \"],\"1906\":[\"\",1901,\" \"],\"1907\":[\"\",1902,\" \"],\"1908\":[\"\",1903,\" \"],\"1909\":[\"\",1904,\" \"],\"1910\":[\"\",1905,\" \"],\"1911\":[\"\",1906,\" \"],\"1912\":[\"\",1907,\" \"],\"1913\":[\"\",1908,\" \"],\"1914\":[\"\",1909,\" \"],\"1915\":[\"\",1910,\" \"],\"1916\":[\"\",1911,\" \"],\"1917\":[\"\",1912,\" \"],\"1918\":[\"\",1913,\" \"],\"1919\":[\"\",1914,\" \"],\"1920\":[\"\",1915,\" \"],\"1921\":[\"\",1916,\" \"],\"1922\":[\"\",1917,\" \"],\"1923\":[\"\",1918,\" \"],\"1924\":[\"\",1919,\" \"],\"1925\":[\"\",1920,\" \"],\"1926\":[\"\",1921,\" \"],\"1927\":[\"\",1922,\" \"],\"1928\":[\"\",1923,\" \"],\"1929\":[\"\",1924,\" \"],\"1930\":[\"\",1925,\" \"],\"1931\":[\"\",1926,\" \"],\"1932\":[\"\",1927,\" \"],\"1933\":[\"\",1928,\" \"],\"1934\":[\"\",1929,\" \"],\"1935\":[\"\",1930,\" \"],\"1936\":[\"\",1931,\" \"],\"1937\":[\"\",1932,\" \"],\"1938\":[\"\",1933,\" \"],\"1939\":[\"\",1934,\" \"],\"1940\":[\"\",1935,\" \"],\"1941\":[\"\",1936,\" \"],\"1942\":[\"\",1937,\" \"],\"1943\":[\"\",1938,\" \"],\"1944\":[\"\",1939,\" \"],\"1945\":[\"\",1940,\" \"],\"1946\":[\"\",1941,\" \"],\"1947\":[\"\",1942,\" \"],\"1948\":[\"\",1943,\" \"],\"1949\":[\"\",1944,\" \"],\"1950\":[\"\",1945,\" \"],\"1951\":[\"\",1946,\" \"],\"1952\":[\"\",1947,\" \"],\"1953\":[\"\",1948,\" \"],\"1954\":[\"\",1949,\" \"],\"1955\":[\"\",1950,\" \"],\"1956\":[\"\",1951,\" \"],\"1957\":[\"\",1952,\" \"],\"1958\":[\"\",1953,\" \"],\"1959\":[\"\",1954,\" \"],\"1960\":[\"\",1955,\" \"],\"1961\":[\"\",1956,\" \"],\"1962\":[\"\",1957,\" \"],\"1963\":[\"\",1958,\" \"],\"1964\":[\"\",1959,\" \"],\"1965\":[\"\",1960,\" \"],\"1966\":[\"\",1961,\" \"],\"1967\":[\"\",1962,\" \"],\"1968\":[\"\",1963,\" \"],\"1969\":[\"\",1964,\" \"],\"1970\":[\"\",1965,\" \"],\"1971\":[\"\",1966,\" \"],\"1972\":[\"\",1967,\" \"],\"1973\":[\"\",1968,\" \"],\"1974\":[\"\",1969,\" \"],\"1975\":[\"\",1970,\" \"],\"1976\":[\"\",1971,\" \"],\"1977\":[\"\",1972,\" \"],\"1978\":[\"\",1973,\" \"],\"1979\":[\"\",1974,\" \"],\"1980\":[\"\",1975,\" \"],\"1981\":[\"\",1976,\" \"],\"1982\":[\"\",1977,\" \"],\"1983\":[\"\",1978,\" \"],\"1984\":[\"\",1979,\" \"],\"1985\":[\"\",1980,\" \"],\"1986\":[\"\",1981,\" \"],\"1987\":[\"\",1982,\" \"],\"1988\":[\"\",1983,\" \"],\"1989\":[\"\",1984,\" \"],\"1990\":[\"\",1985,\" \"],\"1991\":[\"\",1986,\" \"],\"1992\":[\"\",1987,\" \"],\"1993\":[\"\",1988,\" \"],\"1994\":[\"\",1989,\" \"],\"1995\":[\"\",1990,\" \"],\"1996\":[\"\",1991,\" \"],\"1997\":[\"\",1992,\" \"],\"1998\":[\"\",1993,\" \"],\"1999\":[\"\",1994,\" \"],\"2000\":[\"\",1995,\" \"],\"2001\":[\"\",1996,\" \"],\"2002\":[\"\",1997,\" \"],\"2003\":[\"\",1998,\" \"],\"2004\":[\"\",1999,\" \"],\"2005\":[\"\",2000,\" \"],\"2006\":[\"\",2001,\" \"],\"2007\":[\"\",2002,\" \"],\"2008\":[\"\",2003,\" \"],\"2009\":[\"\",2004,\" \"],\"2010\":[\"\",2005,\" \"],\"2011\":[\"\",2006,\" \"],\"2012\":[\"\",2007,\" \"],\"2013\":[\"\",2008,\" \"],\"2014\":[\"\",2009,\" \"],\"2015\":[\"\",2010,\" \"],\"2016\":[\"\",2011,\" \"],\"2017\":[\"\",2012,\" \"],\"2018\":[\"\",2013,\" \"],\"2019\":[\"\",2014,\" \"],\"2020\":[\"\",2015,\" \"],\"2021\":[\"\",2016,\" \"],\"2022\":[\"\",2017,\" \"],\"2023\":[\"\",2018,\" \"],\"2024\":[\"\",2019,\" \"],\"2025\":[\"\",2020,\" \"],\"2026\":[\"\",2021,\" \"],\"2027\":[\"\",2022,\" \"],\"2028\":[\"\",2023,\" \"],\"2029\":[\"\",2024,\" \"],\"2030\":[\"\",2025,\" \"],\"2031\":[\"\",2026,\" \"],\"2032\":[\"\",2027,\" \"],\"2033\":[\"\",2028,\" \"],\"2034\":[\"\",2029,\" \"],\"2035\":[\"\",2030,\" \"],\"2036\":[\"\",2031,\" \"],\"2037\":[\"\",2032,\" \"],\"2038\":[\"\",2033,\" \"],\"2039\":[\"\",2034,\" \"],\"2040\":[\"\",2035,\" \"],\"2041\":[\"\",2036,\" \"],\"2042\":[\"\",2037,\" \"],\"2043\":[\"\",2038,\" \"],\"2044\":[\"\",2039,\" \"],\"2045\":[\"\",2040,\" \"],\"2046\":[\"\",2041,\" \"],\"2047\":[\"\",2042,\" \"],\"2048\":[\"\",2043,\" \"],\"2049\":[\"\",2044,\" \"],\"2050\":[\"\",2045,\" \"],\"2051\":[\"\",2046,\" \"],\"2052\":[\"\",2047,\" \"],\"2053\":[\"\",2048,\" \"],\"2054\":[\"\",2049,\" \"],\"2055\":[\"\",2050,\" \"],\"2056\":[\"\",2051,\" \"],\"2057\":[\"\",2052,\" \"],\"2058\":[\"\",2053,\" \"],\"2059\":[\"\",2054,\" \"],\"2060\":[\"\",2055,\" \"],\"2061\":[\"\",2056,\" \"],\"2062\":[\"\",2057,\" \"],\"2063\":[\"\",2058,\" \"],\"2064\":[\"\",2059,\" \"],\"2065\":[\"\",2060,\" \"],\"2066\":[\"\",2061,\" \"],\"2067\":[\"\",2062,\" \"],\"2068\":[\"\",2063,\" \"],\"2069\":[\"\",2064,\" \"],\"2070\":[\"\",2065,\" \"],\"2071\":[\"\",2066,\" \"],\"2072\":[\"\",2067,\" \"],\"2073\":[\"\",2068,\" \"],\"2074\":[\"\",2069,\" \"],\"2075\":[\"\",2070,\" \"],\"2076\":[\"\",2071,\" \"],\"2077\":[\"\",2072,\" \"],\"2078\":[\"\",2073,\" \"],\"2079\":[\"\",2074,\" \"],\"2080\":[\"\",2075,\" \"],\"2081\":[\"\",2076,\" \"],\"2082\":[\"\",2077,\" \"],\"2083\":[\"\",2078,\" \"],\"2084\":[\"\",2079,\" \"],\"2085\":[\"\",2080,\" \"],\"2086\":[\"\",2081,\" \"],\"2087\":[\"\",2082,\" \"],\"2088\":[\"\",2083,\" \"],\"2089\":[\"\",2084,\" \"],\"2090\":[\"\",2085,\" \"],\"2091\":[\"\",2086,\" \"],\"2092\":[\"\",2087,\" \"],\"2093\":[\"\",2088,\" \"],\"2094\":[\"\",2089,\" \"],\"2095\":[\"\",2090,\" \"],\"2096\":[\"\",2091,\" \"],\"2097\":[\"\",2092,\" \"],\"2098\":[\"\",2093,\" \"],\"2099\":[\"\",2094,\" \"],\"2100\":[\"\",2095,\" \"],\"2101\":[\"\",2096,\" \"],\"2102\":[\"\",2097,\" \"],\"2103\":[\"\",2098,\" \"],\"2104\":[\"\",2099,\" \"],\"2105\":[\"\",2100,\" \"],\"2106\":[\"\",2101,\" \"],\"2107\":[\"\",2102,\" \"],\"2108\":[\"\",2103,\" \"],\"2109\":[\"\",2104,\" \"],\"2110\":[\"\",2105,\" \"],\"2111\":[\"\",2106,\" \"],\"2112\":[\"\",2107,\" \"],\"2113\":[\"\",2108,\" \"],\"2114\":[\"\",2109,\" \"],\"2115\":[\"\",2110,\" \"],\"2116\":[\"\",2111,\" \"],\"2117\":[\"\",2112,\" \"],\"2118\":[\"\",2113,\" \"],\"2119\":[\"\",2114,\" \"],\"2120\":[\"\",2115,\" \"],\"2121\":[\"\",2116,\" \"],\"2122\":[\"\",2117,\" \"],\"2123\":[\"\",2118,\" \"],\"2124\":[\"\",2119,\" \"],\"2125\":[\"\",2120,\" \"],\"2126\":[\"\",2121,\" \"],\"2127\":[\"\",2122,\" \"],\"2128\":[\"\",2123,\" \"],\"2129\":[\"\",2124,\" \"],\"2130\":[\"\",2125,\" \"],\"2131\":[\"\",2126,\" \"],\"2132\":[\"\",2127,\" \"],\"2133\":[\"\",2128,\" \"],\"2134\":[\"\",2129,\" \"],\"2135\":[\"\",2130,\" \"],\"2136\":[\"\",2131,\" \"],\"2137\":[\"\",2132,\" \"],\"2138\":[\"\",2133,\" \"],\"2139\":[\"\",2134,\" \"],\"2140\":[\"\",2135,\" \"],\"2141\":[\"\",2136,\" \"],\"2142\":[\"\",2137,\" \"],\"2143\":[\"\",2138,\" \"],\"2144\":[\"\",2139,\" \"],\"2145\":[\"\",2140,\" \"],\"2146\":[\"\",2141,\" \"],\"2147\":[\"\",2142,\" \"],\"2148\":[\"\",2143,\" \"],\"2149\":[\"\",2144,\" \"],\"2150\":[\"\",2145,\" \"],\"2151\":[\"\",2146,\" \"],\"2152\":[\"\",2147,\" \"],\"2153\":[\"\",2148,\" \"],\"2154\":[\"\",2149,\" \"],\"2155\":[\"\",2150,\" \"],\"2156\":[\"\",2151,\" \"],\"2157\":[\"\",2152,\" \"],\"2158\":[\"\",2153,\" \"],\"2159\":[\"\",2154,\" \"],\"2160\":[\"\",2155,\" \"],\"2161\":[\"\",2156,\" \"],\"2162\":[\"\",2157,\" \"],\"2163\":[\"\",2158,\" \"],\"2164\":[\"\",2159,\" \"],\"2165\":[\"\",2160,\" \"],\"2166\":[\"\",2161,\" \"],\"2167\":[\"\",2162,\" \"],\"2168\":[\"\",2163,\" \"],\"2169\":[\"\",2164,\" \"],\"2170\":[\"\",2165,\" \"],\"2171\":[\"\",2166,\" \"],\"2172\":[\"\",2167,\" \"],\"2173\":[\"\",2168,\" \"],\"2174\":[\"\",2169,\" \"],\"2175\":[\"\",2170,\" \"],\"2176\":[\"\",2171,\" \"],\"2177\":[\"\",2172,\" \"],\"2178\":[\"\",2173,\" \"],\"2179\":[\"\",2174,\" \"],\"2180\":[\"\",2175,\" \"],\"2181\":[\"\",2176,\" \"],\"2182\":[\"\",2177,\" \"],\"2183\":[\"\",2178,\" \"],\"2184\":[\"\",2179,\" \"],\"2185\":[\"\",2180,\" \"],\"2186\":[\"\",2181,\" \"],\"2187\":[\"\",2182,\" \"],\"2188\":[\"\",2183,\" \"],\"2189\":[\"\",2184,\" \"],\"2190\":[\"\",2185,\" \"],\"2191\":[\"\",2186,\" \"],\"2192\":[\"\",2187,\" \"],\"2193\":[\"\",2188,\" \"],\"2194\":[\"\",2189,\" \"],\"2195\":[\"\",2190,\" \"],\"2196\":[\"\",2191,\" \"],\"2197\":[\"\",2192,\" \"],\"2198\":[\"\",2193,\" \"],\"2199\":[\"\",2194,\" \"],\"2200\":[\"\",2195,\" \"],\"2201\":[\"\",2196,\" \"],\"2202\":[\"\",2197,\" \"],\"2203\":[\"\",2198,\" \"],\"2204\":[\"\",2199,\" \"],\"2205\":[\"\",2200,\" \"],\"2206\":[\"\",2201,\" \"],\"2207\":[\"\",2202,\" \"],\"2208\":[\"\",2203,\" \"],\"2209\":[\"\",2204,\" \"],\"2210\":[\"\",2205,\" \"],\"2211\":[\"\",2206,\" \"],\"2212\":[\"\",2207,\" \"],\"2213\":[\"\",2208,\" \"],\"2214\":[\"\",2209,\" \"],\"2215\":[\"\",2210,\" \"],\"2216\":[\"\",2211,\" \"],\"2217\":[\"\",2212,\" \"],\"2218\":[\"\",2213,\" \"],\"2219\":[\"\",2214,\" \"],\"2220\":[\"\",2215,\" \"],\"2221\":[\"\",2216,\" \"],\"2222\":[\"\",2217,\" \"],\"2223\":[\"\",2218,\" \"],\"2224\":[\"\",2219,\" \"],\"2225\":[\"\",2220,\" \"],\"2226\":[\"\",2221,\" \"],\"2227\":[\"\",2222,\" \"],\"2228\":[\"\",2223,\" \"],\"2229\":[\"\",2224,\" \"],\"2230\":[\"\",2225,\" \"],\"2231\":[\"\",2226,\" \"],\"2232\":[\"\",2227,\" \"],\"2233\":[\"\",2228,\" \"],\"2234\":[\"\",2229,\" \"],\"2235\":[\"\",2230,\" \"],\"2236\":[\"\",2231,\" \"],\"2237\":[\"\",2232,\" \"],\"2238\":[\"\",2233,\" \"],\"2239\":[\"\",2234,\" \"],\"2240\":[\"\",2235,\" \"],\"2241\":[\"\",2236,\" \"],\"2242\":[\"\",2237,\" \"],\"2243\":[\"\",2238,\" \"],\"2244\":[\"\",2239,\" \"],\"2245\":[\"\",2240,\" \"],\"2246\":[\"\",2241,\" \"],\"2247\":[\"\",2242,\" \"],\"2248\":[\"\",2243,\" \"],\"2249\":[\"\",2244,\" \"],\"2250\":[\"\",2245,\" \"],\"2251\":[\"\",2246,\" \"],\"2252\":[\"\",2247,\" \"],\"2253\":[\"\",2248,\" \"],\"2254\":[\"\",2249,\" \"],\"2255\":[\"\",2250,\" \"],\"2256\":[\"\",2251,\" \"],\"2257\":[\"\",2252,\" \"],\"2258\":[\"\",2253,\" \"],\"2259\":[\"\",2254,\" \"],\"2260\":[\"\",2255,\" \"],\"2261\":[\"\",2256,\" \"],\"2262\":[\"\",2257,\" \"],\"2263\":[\"\",2258,\" \"],\"2264\":[\"\",2259,\" \"],\"2265\":[\"\",2260,\" \"],\"2266\":[\"\",2261,\" \"],\"2267\":[\"\",2262,\" \"],\"2268\":[\"\",2263,\" \"],\"2269\":[\"\",2264,\" \"],\"2270\":[\"\",2265,\" \"],\"2271\":[\"\",2266,\" \"],\"2272\":[\"\",2267,\" \"],\"2273\":[\"\",2268,\" \"],\"2274\":[\"\",2269,\" \"],\"2275\":[\"\",2270,\" \"],\"2276\":[\"\",2271,\" \"],\"2277\":[\"\",2272,\" \"],\"2278\":[\"\",2273,\" \"],\"2279\":[\"\",2274,\" \"],\"2280\":[\"\",2275,\" \"],\"2281\":[\"\",2276,\" \"],\"2282\":[\"\",2277,\" \"],\"2283\":[\"\",2278,\" \"],\"2284\":[\"\",2279,\" \"],\"2285\":[\"\",2280,\" \"],\"2286\":[\"\",2281,\" \"],\"2287\":[\"\",2282,\" \"],\"2288\":[\"\",2283,\" \"],\"2289\":[\"\",2284,\" \"],\"2290\":[\"\",2285,\" \"],\"2291\":[\"\",2286,\" \"],\"2292\":[\"\",2287,\" \"],\"2293\":[\"\",2288,\" \"],\"2294\":[\"\",2289,\" \"],\"2295\":[\"\",2290,\" \"],\"2296\":[\"\",2291,\" \"],\"2297\":[\"\",2292,\" \"],\"2298\":[\"\",2293,\" \"],\"2299\":[\"\",2294,\" \"],\"2300\":[\"\",2295,\" \"],\"2301\":[\"\",2296,\" \"],\"2302\":[\"\",2297,\" \"],\"2303\":[\"\",2298,\" \"],\"2304\":[\"\",2299,\" \"],\"2305\":[\"\",2300,\" \"],\"2306\":[\"\",2301,\" \"],\"2307\":[\"\",2302,\" \"],\"2308\":[\"\",2303,\" \"],\"2309\":[\"\",2304,\" \"],\"2310\":[\"\",2305,\" \"],\"2311\":[\"\",2306,\" \"],\"2312\":[\"\",2307,\" \"],\"2313\":[\"\",2308,\" \"],\"2314\":[\"\",2309,\" \"],\"2315\":[\"\",2310,\" \"],\"2316\":[\"\",2311,\" \"],\"2317\":[\"\",2312,\" \"],\"2318\":[\"\",2313,\" \"],\"2319\":[\"\",2314,\" \"],\"2320\":[\"\",2315,\" \"],\"2321\":[\"\",2316,\" \"],\"2322\":[\"\",2317,\" \"],\"2323\":[\"\",2318,\" \"],\"2324\":[\"\",2319,\" \"],\"2325\":[\"\",2320,\" \"],\"2326\":[\"\",2321,\" \"],\"2327\":[\"\",2322,\" \"],\"2328\":[\"\",2323,\" \"],\"2329\":[\"\",2324,\" \"],\"2330\":[\"\",2325,\" \"],\"2331\":[\"\",2326,\" \"],\"2332\":[\"\",2327,\" \"],\"2333\":[\"\",2328,\" \"],\"2334\":[\"\",2329,\" \"],\"2335\":[\"\",2330,\" \"],\"2336\":[\"\",2331,\" \"],\"2337\":[\"\",2332,\" \"],\"2338\":[\"\",2333,\" \"],\"2339\":[\"\",2334,\" \"],\"2340\":[\"\",2335,\" \"],\"2341\":[\"\",2336,\" \"],\"2342\":[\"\",2337,\" \"],\"2343\":[\"\",2338,\" \"],\"2344\":[\"\",2339,\" \"],\"2345\":[\"\",2340,\" \"],\"2346\":[\"\",2341,\" \"],\"2347\":[\"\",2342,\" \"],\"2348\":[\"\",2343,\" \"],\"2349\":[\"\",2344,\" \"],\"2350\":[\"\",2345,\" \"],\"2351\":[\"\",2346,\" \"],\"2352\":[\"\",2347,\" \"],\"2353\":[\"\",2348,\" \"],\"2354\":[\"\",2349,\" \"],\"2355\":[\"\",2350,\" \"],\"2356\":[\"\",2351,\" \"],\"2357\":[\"\",2352,\" \"],\"2358\":[\"\",2353,\" \"],\"2359\":[\"\",2354,\" \"],\"2360\":[\"\",2355,\" \"],\"2361\":[\"\",2356,\" \"],\"2362\":[\"\",2357,\" \"],\"2363\":[\"\",2358,\" \"],\"2364\":[\"\",2359,\" \"],\"2365\":[\"\",2360,\" \"],\"2366\":[\"\",2361,\" \"],\"2367\":[\"\",2362,\" \"],\"2368\":[\"\",2363,\" \"],\"2369\":[\"\",2364,\" \"],\"2370\":[\"\",2365,\" \"],\"2371\":[\"\",2366,\" \"],\"2372\":[\"\",2367,\" \"],\"2373\":[\"\",2368,\" \"],\"2374\":[\"\",2369,\" \"],\"2375\":[\"\",2370,\" \"],\"2376\":[\"\",2371,\" \"],\"2377\":[\"\",2372,\" \"],\"2378\":[\"\",2373,\" \"],\"2379\":[\"\",2374,\" \"],\"2380\":[\"\",2375,\" \"],\"2381\":[\"\",2376,\" \"],\"2382\":[\"\",2377,\" \"],\"2383\":[\"\",2378,\" \"],\"2384\":[\"\",2379,\" \"],\"2385\":[\"\",2380,\" \"],\"2386\":[\"\",2381,\" \"],\"2387\":[\"\",2382,\" \"],\"2388\":[\"\",2383,\" \"],\"2389\":[\"\",2384,\" \"],\"2390\":[\"\",2385,\" \"],\"2391\":[\"\",2386,\" \"],\"2392\":[\"\",2387,\" \"],\"2393\":[\"\",2388,\" \"],\"2394\":[\"\",2389,\" \"],\"2395\":[\"\",2390,\" \"],\"2396\":[\"\",2391,\" \"],\"2397\":[\"\",2392,\" \"],\"2398\":[\"\",2393,\" \"],\"2399\":[\"\",2394,\" \"],\"2400\":[\"\",2395,\" \"],\"2401\":[\"\",2396,\" \"],\"2402\":[\"\",2397,\" \"],\"2403\":[\"\",2398,\" \"],\"2404\":[\"\",2399,\" \"],\"2405\":[\"\",2400,\" \"],\"2406\":[\"\",2401,\" \"],\"2407\":[\"\",2402,\" \"],\"2408\":[\"\",2403,\" \"],\"2409\":[\"\",2404,\" \"],\"2410\":[\"\",2405,\" \"],\"2411\":[\"\",2406,\" \"],\"2412\":[\"\",2407,\" \"],\"2413\":[\"\",2408,\" \"],\"2414\":[\"\",2409,\" \"],\"2415\":[\"\",2410,\" \"],\"2416\":[\"\",2411,\" \"],\"2417\":[\"\",2412,\" \"],\"2418\":[\"\",2413,\" \"],\"2419\":[\"\",2414,\" \"],\"2420\":[\"\",2415,\" \"],\"2421\":[\"\",2416,\" \"],\"2422\":[\"\",2417,\" \"],\"2423\":[\"\",2418,\" \"],\"2424\":[\"\",2419,\" \"],\"2425\":[\"\",2420,\" \"],\"2426\":[\"\",2421,\" \"],\"2427\":[\"\",2422,\" \"],\"2428\":[\"\",2423,\" \"],\"2429\":[\"\",2424,\" \"],\"2430\":[\"\",2425,\" \"],\"2431\":[\"\",2426,\" \"],\"2432\":[\"\",2427,\" \"],\"2433\":[\"\",2428,\" \"],\"2434\":[\"\",2429,\" \"],\"2435\":[\"\",2430,\" \"],\"2436\":[\"\",2431,\" \"],\"2437\":[\"\",2432,\" \"],\"2438\":[\"\",2433,\" \"],\"2439\":[\"\",2434,\" \"],\"2440\":[\"\",2435,\" \"],\"2441\":[\"\",2436,\" \"],\"2442\":[\"\",2437,\" \"],\"2443\":[\"\",2438,\" \"],\"2444\":[\"\",2439,\" \"],\"2445\":[\"\",2440,\" \"],\"2446\":[\"\",2441,\" \"],\"2447\":[\"\",2442,\" \"],\"2448\":[\"\",2443,\" \"],\"2449\":[\"\",2444,\" \"],\"2450\":[\"\",2445,\" \"],\"2451\":[\"\",2446,\" \"],\"2452\":[\"\",2447,\" \"],\"2453\":[\"\",2448,\" \"],\"2454\":[\"\",2449,\" \"],\"2455\":[\"\",2450,\" \"],\"2456\":[\"\",2451,\" \"],\"2457\":[\"\",2452,\" \"],\"2458\":[\"\",2453,\" \"],\"2459\":[\"\",2454,\" \"],\"2460\":[\"\",2455,\" \"],\"2461\":[\"\",2456,\" \"],\"2462\":[\"\",2457,\" \"],\"2463\":[\"\",2458,\" \"],\"2464\":[\"\",2459,\" \"],\"2465\":[\"\",2460,\" \"],\"2466\":[\"\",2461,\" \"],\"2467\":[\"\",2462,\" \"],\"2468\":[\"\",2463,\" \"],\"2469\":[\"\",2464,\" \"],\"2470\":[\"\",2465,\" \"],\"2471\":[\"\",2466,\" \"],\"2472\":[\"\",2467,\" \"],\"2473\":[\"\",2468,\" \"],\"2474\":[\"\",2469,\" \"],\"2475\":[\"\",2470,\" \"],\"2476\":[\"\",2471,\" \"],\"2477\":[\"\",2472,\" \"],\"2478\":[\"\",2473,\" \"],\"2479\":[\"\",2474,\" \"],\"2480\":[\"\",2475,\" \"],\"2481\":[\"\",2476,\" \"],\"2482\":[\"\",2477,\" \"],\"2483\":[\"\",2478,\" \"],\"2484\":[\"\",2479,\" \"],\"2485\":[\"\",2480,\" \"],\"2486\":[\"\",2481,\" \"],\"2487\":[\"\",2482,\" \"],\"2488\":[\"\",2483,\" \"],\"2489\":[\"\",2484,\" \"],\"2490\":[\"\",2485,\" \"],\"2491\":[\"\",2486,\" \"],\"2492\":[\"\",2487,\" \"],\"2493\":[\"\",2488,\" \"],\"2494\":[\"\",2489,\" \"],\"2495\":[\"\",2490,\" \"],\"2496\":[\"\",2491,\" \"],\"2497\":[\"\",2492,\" \"],\"2498\":[\"\",2493,\" \"],\"2499\":[\"\",2494,\" \"],\"2500\":[\"\",2495,\" \"],\"2501\":[\"\",2496,\" \"],\"2502\":[\"\",2497,\" \"],\"2503\":[\"\",2498,\" \"],\"2504\":[\"\",2499,\" \"],\"2505\":[\"\",2500,\" \"],\"2506\":[\"\",2501,\" \"],\"2507\":[\"\",2502,\" \"],\"2508\":[\"\",2503,\" \"],\"2509\":[\"\",2504,\" \"],\"2510\":[\"\",2505,\" \"],\"2511\":[\"\",2506,\" \"],\"2512\":[\"\",2507,\" \"],\"2513\":[\"\",2508,\" \"],\"2514\":[\"\",2509,\" \"],\"2515\":[\"\",2510,\" \"],\"2516\":[\"\",2511,\" \"],\"2517\":[\"\",2512,\" \"],\"2518\":[\"\",2513,\" \"],\"2519\":[\"\",2514,\" \"],\"2520\":[\"\",2515,\" \"],\"2521\":[\"\",2516,\" \"],\"2522\":[\"\",2517,\" \"],\"2523\":[\"\",2518,\" \"],\"2524\":[\"\",2519,\" \"],\"2525\":[\"\",2520,\" \"],\"2526\":[\"\",2521,\" \"],\"2527\":[\"\",2522,\" \"],\"2528\":[\"\",2523,\" \"],\"2529\":[\"\",2524,\" \"],\"2530\":[\"\",2525,\" \"],\"2531\":[\"\",2526,\" \"],\"2532\":[\"\",2527,\" \"],\"2533\":[\"\",2528,\" \"],\"2534\":[\"\",2529,\" \"],\"2535\":[\"\",2530,\" \"],\"2536\":[\"\",2531,\" \"],\"2537\":[\"\",2532,\" \"],\"2538\":[\"\",2533,\" \"],\"2539\":[\"\",2534,\" \"],\"2540\":[\"\",2535,\" \"],\"2541\":[\"\",2536,\" \"],\"2542\":[\"\",2537,\" \"],\"2543\":[\"\",2538,\" \"],\"2544\":[\"\",2539,\" \"],\"2545\":[\"\",2540,\" \"],\"2546\":[\"\",2541,\" \"],\"2547\":[\"\",2542,\" \"],\"2548\":[\"\",2543,\" \"],\"2549\":[\"\",2544,\" \"],\"2550\":[\"\",2545,\" \"],\"2551\":[\"\",2546,\" \"],\"2552\":[\"\",2547,\" \"],\"2553\":[\"\",2548,\" \"],\"2554\":[\"\",2549,\" \"],\"2555\":[\"\",2550,\" \"],\"2556\":[\"\",2551,\" \"],\"2557\":[\"\",2552,\" \"],\"2558\":[\"\",2553,\" \"],\"2559\":[\"\",2554,\" \"],\"2560\":[\"\",2555,\" \"],\"2561\":[\"\",2556,\" \"],\"2562\":[\"\",2557,\" \"],\"2563\":[\"\",2558,\" \"],\"2564\":[\"\",2559,\" \"],\"2565\":[\"\",2560,\" \"],\"2566\":[\"\",2561,\" \"],\"2567\":[\"\",2562,\" \"],\"2568\":[\"\",2563,\" \"],\"2569\":[\"\",2564,\" \"],\"2570\":[\"\",2565,\" \"],\"2571\":[\"\",2566,\" \"],\"2572\":[\"\",2567,\" \"],\"2573\":[\"\",2568,\" \"],\"2574\":[\"\",2569,\" \"],\"2575\":[\"\",2570,\" \"],\"2576\":[\"\",2571,\" \"],\"2577\":[\"\",2572,\" \"],\"2578\":[\"\",2573,\" \"],\"2579\":[\"\",2574,\" \"],\"2580\":[\"\",2575,\" \"],\"2581\":[\"\",2576,\" \"],\"2582\":[\"\",2577,\" \"],\"2583\":[\"\",2578,\" \"],\"2584\":[\"\",2579,\" \"],\"2585\":[\"\",2580,\" \"],\"2586\":[\"\",2581,\" \"],\"2587\":[\"\",2582,\" \"],\"2588\":[\"\",2583,\" \"],\"2589\":[\"\",2584,\" \"],\"2590\":[\"\",2585,\" \"],\"2591\":[\"\",2586,\" \"],\"2592\":[\"\",2587,\" \"],\"2593\":[\"\",2588,\" \"],\"2594\":[\"\",2589,\" \"],\"2595\":[\"\",2590,\" \"],\"2596\":[\"\",2591,\" \"],\"2597\":[\"\",2592,\" \"],\"2598\":[\"\",2593,\" \"],\"2599\":[\"\",2594,\" \"],\"2600\":[\"\",2595,\" \"],\"2601\":[\"\",2596,\" \"],\"2602\":[\"\",2597,\" \"],\"2603\":[\"\",2598,\" \"],\"2604\":[\"\",2599,\" \"],\"2605\":[\"\",2600,\" \"],\"2606\":[\"\",2601,\" \"],\"2607\":[\"\",2602,\" \"],\"2608\":[\"\",2603,\" \"],\"2609\":[\"\",2604,\" \"],\"2610\":[\"\",2605,\" \"],\"2611\":[\"\",2606,\" \"],\"2612\":[\"\",2607,\" \"],\"2613\":[\"\",2608,\" \"],\"2614\":[\"\",2609,\" \"],\"2615\":[\"\",2610,\" \"],\"2616\":[\"\",2611,\" \"],\"2617\":[\"\",2612,\" \"],\"2618\":[\"\",2613,\" \"],\"2619\":[\"\",2614,\" \"],\"2620\":[\"\",2615,\" \"],\"2621\":[\"\",2616,\" \"],\"2622\":[\"\",2617,\" \"],\"2623\":[\"\",2618,\" \"],\"2624\":[\"\",2619,\" \"],\"2625\":[\"\",2620,\" \"],\"2626\":[\"\",2621,\" \"],\"2627\":[\"\",2622,\" \"],\"2628\":[\"\",2623,\" \"],\"2629\":[\"\",2624,\" \"],\"2630\":[\"\",2625,\" \"],\"2631\":[\"\",2626,\" \"],\"2632\":[\"\",2627,\" \"],\"2633\":[\"\",2628,\" \"],\"2634\":[\"\",2629,\" \"],\"2635\":[\"\",2630,\" \"],\"2636\":[\"\",2631,\" \"],\"2637\":[\"\",2632,\" \"],\"2638\":[\"\",2633,\" \"],\"2639\":[\"\",2634,\" \"],\"2640\":[\"\",2635,\" \"],\"2641\":[\"\",2636,\" \"],\"2642\":[\"\",2637,\" \"],\"2643\":[\"\",2638,\" \"],\"2644\":[\"\",2639,\" \"],\"2645\":[\"\",2640,\" \"],\"2646\":[\"\",2641,\" \"],\"2647\":[\"\",2642,\" \"],\"2648\":[\"\",2643,\" \"],\"2649\":[\"\",2644,\" \"]}},\"oldPath\":\"mercurial\\/exchange.py\",\"currentPath\":\"mercurial\\/exchange.py\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":[],\"type\":\"2\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"16\",\"delLines\":\"11\",\"hunks\":[{\"oldOffset\":\"1\",\"newOffset\":\"1\",\"oldLength\":\"2644\",\"newLength\":\"2649\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\" # exchange.py - utility to exchange data between repos.\\n #\\n # Copyright 2005-2007 Matt Mackall \\u003cmpm@selenic.com\\u003e\\n #\\n # This software may be used and distributed according to the terms of the\\n # GNU General Public License version 2 or any later version.\\n \\n from __future__ import absolute_import\\n \\n import collections\\n import hashlib\\n \\n from .i18n import _\\n from .node import (\\n     bin,\\n     hex,\\n     nullid,\\n     nullrev,\\n )\\n from .thirdparty import (\\n     attr,\\n )\\n from . import (\\n     bookmarks as bookmod,\\n     bundle2,\\n     changegroup,\\n     discovery,\\n     error,\\n+    exchangev2,\\n     lock as lockmod,\\n     logexchange,\\n     narrowspec,\\n     obsolete,\\n     phases,\\n     pushkey,\\n     pycompat,\\n     repository,\\n     scmutil,\\n     sslutil,\\n     streamclone,\\n     url as urlmod,\\n     util,\\n )\\n from .utils import (\\n     stringutil,\\n )\\n \\n urlerr = util.urlerr\\n urlreq = util.urlreq\\n \\n _NARROWACL_SECTION = 'narrowhgacl'\\n \\n # Maps bundle version human names to changegroup versions.\\n _bundlespeccgversions = {'v1': '01',\\n                          'v2': '02',\\n                          'packed1': 's1',\\n                          'bundle2': '02', #legacy\\n                         }\\n \\n # Maps bundle version with content opts to choose which part to bundle\\n _bundlespeccontentopts = {\\n     'v1': {\\n         'changegroup': True,\\n         'cg.version': '01',\\n         'obsolescence': False,\\n         'phases': False,\\n         'tagsfnodescache': False,\\n         'revbranchcache': False\\n     },\\n     'v2': {\\n         'changegroup': True,\\n         'cg.version': '02',\\n         'obsolescence': False,\\n         'phases': False,\\n         'tagsfnodescache': True,\\n         'revbranchcache': True\\n     },\\n     'packed1' : {\\n         'cg.version': 's1'\\n     }\\n }\\n _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']\\n \\n _bundlespecvariants = {\\\"streamv2\\\": {\\\"changegroup\\\": False, \\\"streamv2\\\": True,\\n                                     \\\"tagsfnodescache\\\": False,\\n                                     \\\"revbranchcache\\\": False}}\\n \\n # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.\\n _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}\\n \\n @attr.s\\n class bundlespec(object):\\n     compression = attr.ib()\\n     wirecompression = attr.ib()\\n     version = attr.ib()\\n     wireversion = attr.ib()\\n     params = attr.ib()\\n     contentopts = attr.ib()\\n \\n def parsebundlespec(repo, spec, strict=True):\\n     \\\"\\\"\\\"Parse a bundle string specification into parts.\\n \\n     Bundle specifications denote a well-defined bundle\\/exchange format.\\n     The content of a given specification should not change over time in\\n     order to ensure that bundles produced by a newer version of Mercurial are\\n     readable from an older version.\\n \\n     The string currently has the form:\\n \\n        \\u003ccompression\\u003e-\\u003ctype\\u003e[;\\u003cparameter0\\u003e[;\\u003cparameter1\\u003e]]\\n \\n     Where \\u003ccompression\\u003e is one of the supported compression formats\\n     and \\u003ctype\\u003e is (currently) a version string. A \\\";\\\" can follow the type and\\n     all text afterwards is interpreted as URI encoded, \\\";\\\" delimited key=value\\n     pairs.\\n \\n     If ``strict`` is True (the default) \\u003ccompression\\u003e is required. Otherwise,\\n     it is optional.\\n \\n     Returns a bundlespec object of (compression, version, parameters).\\n     Compression will be ``None`` if not in strict mode and a compression isn't\\n     defined.\\n \\n     An ``InvalidBundleSpecification`` is raised when the specification is\\n     not syntactically well formed.\\n \\n     An ``UnsupportedBundleSpecification`` is raised when the compression or\\n     bundle type\\/version is not recognized.\\n \\n     Note: this function will likely eventually return a more complex data\\n     structure, including bundle2 part information.\\n     \\\"\\\"\\\"\\n     def parseparams(s):\\n         if ';' not in s:\\n             return s, {}\\n \\n         params = {}\\n         version, paramstr = s.split(';', 1)\\n \\n         for p in paramstr.split(';'):\\n             if '=' not in p:\\n                 raise error.InvalidBundleSpecification(\\n                     _('invalid bundle specification: '\\n                       'missing \\\"=\\\" in parameter: %s') % p)\\n \\n             key, value = p.split('=', 1)\\n             key = urlreq.unquote(key)\\n             value = urlreq.unquote(value)\\n             params[key] = value\\n \\n         return version, params\\n \\n \\n     if strict and '-' not in spec:\\n         raise error.InvalidBundleSpecification(\\n                 _('invalid bundle specification; '\\n                   'must be prefixed with compression: %s') % spec)\\n \\n     if '-' in spec:\\n         compression, version = spec.split('-', 1)\\n \\n         if compression not in util.compengines.supportedbundlenames:\\n             raise error.UnsupportedBundleSpecification(\\n                     _('%s compression is not supported') % compression)\\n \\n         version, params = parseparams(version)\\n \\n         if version not in _bundlespeccgversions:\\n             raise error.UnsupportedBundleSpecification(\\n                     _('%s is not a recognized bundle version') % version)\\n     else:\\n         # Value could be just the compression or just the version, in which\\n         # case some defaults are assumed (but only when not in strict mode).\\n         assert not strict\\n \\n         spec, params = parseparams(spec)\\n \\n         if spec in util.compengines.supportedbundlenames:\\n             compression = spec\\n             version = 'v1'\\n             # Generaldelta repos require v2.\\n             if 'generaldelta' in repo.requirements:\\n                 version = 'v2'\\n             # Modern compression engines require v2.\\n             if compression not in _bundlespecv1compengines:\\n                 version = 'v2'\\n         elif spec in _bundlespeccgversions:\\n             if spec == 'packed1':\\n                 compression = 'none'\\n             else:\\n                 compression = 'bzip2'\\n             version = spec\\n         else:\\n             raise error.UnsupportedBundleSpecification(\\n                     _('%s is not a recognized bundle specification') % spec)\\n \\n     # Bundle version 1 only supports a known set of compression engines.\\n     if version == 'v1' and compression not in _bundlespecv1compengines:\\n         raise error.UnsupportedBundleSpecification(\\n             _('compression engine %s is not supported on v1 bundles') %\\n             compression)\\n \\n     # The specification for packed1 can optionally declare the data formats\\n     # required to apply it. If we see this metadata, compare against what the\\n     # repo supports and error if the bundle isn't compatible.\\n     if version == 'packed1' and 'requirements' in params:\\n         requirements = set(params['requirements'].split(','))\\n         missingreqs = requirements - repo.supportedformats\\n         if missingreqs:\\n             raise error.UnsupportedBundleSpecification(\\n                     _('missing support for repository features: %s') %\\n                       ', '.join(sorted(missingreqs)))\\n \\n     # Compute contentopts based on the version\\n     contentopts = _bundlespeccontentopts.get(version, {}).copy()\\n \\n     # Process the variants\\n     if \\\"stream\\\" in params and params[\\\"stream\\\"] == \\\"v2\\\":\\n         variant = _bundlespecvariants[\\\"streamv2\\\"]\\n         contentopts.update(variant)\\n \\n     engine = util.compengines.forbundlename(compression)\\n     compression, wirecompression = engine.bundletype()\\n     wireversion = _bundlespeccgversions[version]\\n \\n     return bundlespec(compression, wirecompression, version, wireversion,\\n                       params, contentopts)\\n \\n def readbundle(ui, fh, fname, vfs=None):\\n     header = changegroup.readexactly(fh, 4)\\n \\n     alg = None\\n     if not fname:\\n         fname = \\\"stream\\\"\\n         if not header.startswith('HG') and header.startswith('\\\\0'):\\n             fh = changegroup.headerlessfixup(fh, header)\\n             header = \\\"HG10\\\"\\n             alg = 'UN'\\n     elif vfs:\\n         fname = vfs.join(fname)\\n \\n     magic, version = header[0:2], header[2:4]\\n \\n     if magic != 'HG':\\n         raise error.Abort(_('%s: not a Mercurial bundle') % fname)\\n     if version == '10':\\n         if alg is None:\\n             alg = changegroup.readexactly(fh, 2)\\n         return changegroup.cg1unpacker(fh, alg)\\n     elif version.startswith('2'):\\n         return bundle2.getunbundler(ui, fh, magicstring=magic + version)\\n     elif version == 'S1':\\n         return streamclone.streamcloneapplier(fh)\\n     else:\\n         raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))\\n \\n def getbundlespec(ui, fh):\\n     \\\"\\\"\\\"Infer the bundlespec from a bundle file handle.\\n \\n     The input file handle is seeked and the original seek position is not\\n     restored.\\n     \\\"\\\"\\\"\\n     def speccompression(alg):\\n         try:\\n             return util.compengines.forbundletype(alg).bundletype()[0]\\n         except KeyError:\\n             return None\\n \\n     b = readbundle(ui, fh, None)\\n     if isinstance(b, changegroup.cg1unpacker):\\n         alg = b._type\\n         if alg == '_truncatedBZ':\\n             alg = 'BZ'\\n         comp = speccompression(alg)\\n         if not comp:\\n             raise error.Abort(_('unknown compression algorithm: %s') % alg)\\n         return '%s-v1' % comp\\n     elif isinstance(b, bundle2.unbundle20):\\n         if 'Compression' in b.params:\\n             comp = speccompression(b.params['Compression'])\\n             if not comp:\\n                 raise error.Abort(_('unknown compression algorithm: %s') % comp)\\n         else:\\n             comp = 'none'\\n \\n         version = None\\n         for part in b.iterparts():\\n             if part.type == 'changegroup':\\n                 version = part.params['version']\\n                 if version in ('01', '02'):\\n                     version = 'v2'\\n                 else:\\n                     raise error.Abort(_('changegroup version %s does not have '\\n                                         'a known bundlespec') % version,\\n                                       hint=_('try upgrading your Mercurial '\\n                                               'client'))\\n             elif part.type == 'stream2' and version is None:\\n                 # A stream2 part requires to be part of a v2 bundle\\n                 version = \\\"v2\\\"\\n                 requirements = urlreq.unquote(part.params['requirements'])\\n                 splitted = requirements.split()\\n                 params = bundle2._formatrequirementsparams(splitted)\\n                 return 'none-v2;stream=v2;%s' % params\\n \\n         if not version:\\n             raise error.Abort(_('could not identify changegroup version in '\\n                                 'bundle'))\\n \\n         return '%s-%s' % (comp, version)\\n     elif isinstance(b, streamclone.streamcloneapplier):\\n         requirements = streamclone.readbundle1header(fh)[2]\\n         formatted = bundle2._formatrequirementsparams(requirements)\\n         return 'none-packed1;%s' % formatted\\n     else:\\n         raise error.Abort(_('unknown bundle type: %s') % b)\\n \\n def _computeoutgoing(repo, heads, common):\\n     \\\"\\\"\\\"Computes which revs are outgoing given a set of common\\n     and a set of heads.\\n \\n     This is a separate function so extensions can have access to\\n     the logic.\\n \\n     Returns a discovery.outgoing object.\\n     \\\"\\\"\\\"\\n     cl = repo.changelog\\n     if common:\\n         hasnode = cl.hasnode\\n         common = [n for n in common if hasnode(n)]\\n     else:\\n         common = [nullid]\\n     if not heads:\\n         heads = cl.heads()\\n     return discovery.outgoing(repo, common, heads)\\n \\n def _forcebundle1(op):\\n     \\\"\\\"\\\"return true if a pull\\/push must use bundle1\\n \\n     This function is used to allow testing of the older bundle version\\\"\\\"\\\"\\n     ui = op.repo.ui\\n     # The goal is this config is to allow developer to choose the bundle\\n     # version used during exchanged. This is especially handy during test.\\n     # Value is a list of bundle version to be picked from, highest version\\n     # should be used.\\n     #\\n     # developer config: devel.legacy.exchange\\n     exchange = ui.configlist('devel', 'legacy.exchange')\\n     forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange\\n     return forcebundle1 or not op.remote.capable('bundle2')\\n \\n class pushoperation(object):\\n     \\\"\\\"\\\"A object that represent a single push operation\\n \\n     Its purpose is to carry push related state and very common operations.\\n \\n     A new pushoperation should be created at the beginning of each push and\\n     discarded afterward.\\n     \\\"\\\"\\\"\\n \\n     def __init__(self, repo, remote, force=False, revs=None, newbranch=False,\\n                  bookmarks=(), pushvars=None):\\n         # repo we push from\\n         self.repo = repo\\n         self.ui = repo.ui\\n         # repo we push to\\n         self.remote = remote\\n         # force option provided\\n         self.force = force\\n         # revs to be pushed (None is \\\"all\\\")\\n         self.revs = revs\\n         # bookmark explicitly pushed\\n         self.bookmarks = bookmarks\\n         # allow push of new branch\\n         self.newbranch = newbranch\\n         # step already performed\\n         # (used to check what steps have been already performed through bundle2)\\n         self.stepsdone = set()\\n         # Integer version of the changegroup push result\\n         # - None means nothing to push\\n         # - 0 means HTTP error\\n         # - 1 means we pushed and remote head count is unchanged *or*\\n         #   we have outgoing changesets but refused to push\\n         # - other values as described by addchangegroup()\\n         self.cgresult = None\\n         # Boolean value for the bookmark push\\n         self.bkresult = None\\n         # discover.outgoing object (contains common and outgoing data)\\n         self.outgoing = None\\n         # all remote topological heads before the push\\n         self.remoteheads = None\\n         # Details of the remote branch pre and post push\\n         #\\n         # mapping: {'branch': ([remoteheads],\\n         #                      [newheads],\\n         #                      [unsyncedheads],\\n         #                      [discardedheads])}\\n         # - branch: the branch name\\n         # - remoteheads: the list of remote heads known locally\\n         #                None if the branch is new\\n         # - newheads: the new remote heads (known locally) with outgoing pushed\\n         # - unsyncedheads: the list of remote heads unknown locally.\\n         # - discardedheads: the list of remote heads made obsolete by the push\\n         self.pushbranchmap = None\\n         # testable as a boolean indicating if any nodes are missing locally.\\n         self.incoming = None\\n         # summary of the remote phase situation\\n         self.remotephases = None\\n         # phases changes that must be pushed along side the changesets\\n         self.outdatedphases = None\\n         # phases changes that must be pushed if changeset push fails\\n         self.fallbackoutdatedphases = None\\n         # outgoing obsmarkers\\n         self.outobsmarkers = set()\\n         # outgoing bookmarks\\n         self.outbookmarks = []\\n         # transaction manager\\n         self.trmanager = None\\n         # map { pushkey partid -\\u003e callback handling failure}\\n         # used to handle exception from mandatory pushkey part failure\\n         self.pkfailcb = {}\\n         # an iterable of pushvars or None\\n         self.pushvars = pushvars\\n \\n     @util.propertycache\\n     def futureheads(self):\\n         \\\"\\\"\\\"future remote heads if the changeset push succeeds\\\"\\\"\\\"\\n         return self.outgoing.missingheads\\n \\n     @util.propertycache\\n     def fallbackheads(self):\\n         \\\"\\\"\\\"future remote heads if the changeset push fails\\\"\\\"\\\"\\n         if self.revs is None:\\n             # not target to push, all common are relevant\\n             return self.outgoing.commonheads\\n         unfi = self.repo.unfiltered()\\n         # I want cheads = heads(::missingheads and ::commonheads)\\n         # (missingheads is revs with secret changeset filtered out)\\n         #\\n         # This can be expressed as:\\n         #     cheads = ( (missingheads and ::commonheads)\\n         #              + (commonheads and ::missingheads))\\\"\\n         #              )\\n         #\\n         # while trying to push we already computed the following:\\n         #     common = (::commonheads)\\n         #     missing = ((commonheads::missingheads) - commonheads)\\n         #\\n         # We can pick:\\n         # * missingheads part of common (::commonheads)\\n         common = self.outgoing.common\\n         nm = self.repo.changelog.nodemap\\n         cheads = [node for node in self.revs if nm[node] in common]\\n         # and\\n         # * commonheads parents on missing\\n         revset = unfi.set('%ln and parents(roots(%ln))',\\n                          self.outgoing.commonheads,\\n                          self.outgoing.missing)\\n         cheads.extend(c.node() for c in revset)\\n         return cheads\\n \\n     @property\\n     def commonheads(self):\\n         \\\"\\\"\\\"set of all common heads after changeset bundle push\\\"\\\"\\\"\\n         if self.cgresult:\\n             return self.futureheads\\n         else:\\n             return self.fallbackheads\\n \\n # mapping of message used when pushing bookmark\\n bookmsgmap = {'update': (_(\\\"updating bookmark %s\\\\n\\\"),\\n                          _('updating bookmark %s failed!\\\\n')),\\n               'export': (_(\\\"exporting bookmark %s\\\\n\\\"),\\n                          _('exporting bookmark %s failed!\\\\n')),\\n               'delete': (_(\\\"deleting remote bookmark %s\\\\n\\\"),\\n                          _('deleting remote bookmark %s failed!\\\\n')),\\n               }\\n \\n \\n def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),\\n          opargs=None):\\n     '''Push outgoing changesets (limited by revs) from a local\\n     repository to remote. Return an integer:\\n       - None means nothing to push\\n       - 0 means HTTP error\\n       - 1 means we pushed and remote head count is unchanged *or*\\n         we have outgoing changesets but refused to push\\n       - other values as described by addchangegroup()\\n     '''\\n     if opargs is None:\\n         opargs = {}\\n     pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,\\n                            **pycompat.strkwargs(opargs))\\n     if pushop.remote.local():\\n         missing = (set(pushop.repo.requirements)\\n                    - pushop.remote.local().supported)\\n         if missing:\\n             msg = _(\\\"required features are not\\\"\\n                     \\\" supported in the destination:\\\"\\n                     \\\" %s\\\") % (', '.join(sorted(missing)))\\n             raise error.Abort(msg)\\n \\n     if not pushop.remote.canpush():\\n         raise error.Abort(_(\\\"destination does not support push\\\"))\\n \\n     if not pushop.remote.capable('unbundle'):\\n         raise error.Abort(_('cannot push: destination does not support the '\\n                             'unbundle wire protocol command'))\\n \\n     # get lock as we might write phase data\\n     wlock = lock = None\\n     try:\\n         # bundle2 push may receive a reply bundle touching bookmarks or other\\n         # things requiring the wlock. Take it now to ensure proper ordering.\\n         maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')\\n         if (not _forcebundle1(pushop)) and maypushback:\\n             wlock = pushop.repo.wlock()\\n         lock = pushop.repo.lock()\\n         pushop.trmanager = transactionmanager(pushop.repo,\\n                                               'push-response',\\n                                               pushop.remote.url())\\n     except error.LockUnavailable as err:\\n         # source repo cannot be locked.\\n         # We do not abort the push, but just disable the local phase\\n         # synchronisation.\\n         msg = 'cannot lock source repository: %s\\\\n' % err\\n         pushop.ui.debug(msg)\\n \\n     with wlock or util.nullcontextmanager(), \\\\\\n             lock or util.nullcontextmanager(), \\\\\\n             pushop.trmanager or util.nullcontextmanager():\\n         pushop.repo.checkpush(pushop)\\n         _pushdiscovery(pushop)\\n         if not _forcebundle1(pushop):\\n             _pushbundle2(pushop)\\n         _pushchangeset(pushop)\\n         _pushsyncphase(pushop)\\n         _pushobsolete(pushop)\\n         _pushbookmark(pushop)\\n \\n     if repo.ui.configbool('experimental', 'remotenames'):\\n         logexchange.pullremotenames(repo, remote)\\n \\n     return pushop\\n \\n # list of steps to perform discovery before push\\n pushdiscoveryorder = []\\n \\n # Mapping between step name and function\\n #\\n # This exists to help extensions wrap steps if necessary\\n pushdiscoverymapping = {}\\n \\n def pushdiscovery(stepname):\\n     \\\"\\\"\\\"decorator for function performing discovery before push\\n \\n     The function is added to the step -\\u003e function mapping and appended to the\\n     list of steps.  Beware that decorated function will be added in order (this\\n     may matter).\\n \\n     You can only use this decorator for a new step, if you want to wrap a step\\n     from an extension, change the pushdiscovery dictionary directly.\\\"\\\"\\\"\\n     def dec(func):\\n         assert stepname not in pushdiscoverymapping\\n         pushdiscoverymapping[stepname] = func\\n         pushdiscoveryorder.append(stepname)\\n         return func\\n     return dec\\n \\n def _pushdiscovery(pushop):\\n     \\\"\\\"\\\"Run all discovery steps\\\"\\\"\\\"\\n     for stepname in pushdiscoveryorder:\\n         step = pushdiscoverymapping[stepname]\\n         step(pushop)\\n \\n @pushdiscovery('changeset')\\n def _pushdiscoverychangeset(pushop):\\n     \\\"\\\"\\\"discover the changeset that need to be pushed\\\"\\\"\\\"\\n     fci = discovery.findcommonincoming\\n     if pushop.revs:\\n         commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,\\n                         ancestorsof=pushop.revs)\\n     else:\\n         commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)\\n     common, inc, remoteheads = commoninc\\n     fco = discovery.findcommonoutgoing\\n     outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,\\n                    commoninc=commoninc, force=pushop.force)\\n     pushop.outgoing = outgoing\\n     pushop.remoteheads = remoteheads\\n     pushop.incoming = inc\\n \\n @pushdiscovery('phase')\\n def _pushdiscoveryphase(pushop):\\n     \\\"\\\"\\\"discover the phase that needs to be pushed\\n \\n     (computed for both success and failure case for changesets push)\\\"\\\"\\\"\\n     outgoing = pushop.outgoing\\n     unfi = pushop.repo.unfiltered()\\n     remotephases = listkeys(pushop.remote, 'phases')\\n \\n     if (pushop.ui.configbool('ui', '_usedassubrepo')\\n         and remotephases    # server supports phases\\n         and not pushop.outgoing.missing # no changesets to be pushed\\n         and remotephases.get('publishing', False)):\\n         # When:\\n         # - this is a subrepo push\\n         # - and remote support phase\\n         # - and no changeset are to be pushed\\n         # - and remote is publishing\\n         # We may be in issue 3781 case!\\n         # We drop the possible phase synchronisation done by\\n         # courtesy to publish changesets possibly locally draft\\n         # on the remote.\\n         pushop.outdatedphases = []\\n         pushop.fallbackoutdatedphases = []\\n         return\\n \\n     pushop.remotephases = phases.remotephasessummary(pushop.repo,\\n                                                      pushop.fallbackheads,\\n                                                      remotephases)\\n     droots = pushop.remotephases.draftroots\\n \\n     extracond = ''\\n     if not pushop.remotephases.publishing:\\n         extracond = ' and public()'\\n     revset = 'heads((%%ln::%%ln) %s)' % extracond\\n     # Get the list of all revs draft on remote by public here.\\n     # XXX Beware that revset break if droots is not strictly\\n     # XXX root we may want to ensure it is but it is costly\\n     fallback = list(unfi.set(revset, droots, pushop.fallbackheads))\\n     if not outgoing.missing:\\n         future = fallback\\n     else:\\n         # adds changeset we are going to push as draft\\n         #\\n         # should not be necessary for publishing server, but because of an\\n         # issue fixed in xxxxx we have to do it anyway.\\n         fdroots = list(unfi.set('roots(%ln  + %ln::)',\\n                        outgoing.missing, droots))\\n         fdroots = [f.node() for f in fdroots]\\n         future = list(unfi.set(revset, fdroots, pushop.futureheads))\\n     pushop.outdatedphases = future\\n     pushop.fallbackoutdatedphases = fallback\\n \\n @pushdiscovery('obsmarker')\\n def _pushdiscoveryobsmarkers(pushop):\\n     if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):\\n         return\\n \\n     if not pushop.repo.obsstore:\\n         return\\n \\n     if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):\\n         return\\n \\n     repo = pushop.repo\\n     # very naive computation, that can be quite expensive on big repo.\\n     # However: evolution is currently slow on them anyway.\\n     nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))\\n     pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)\\n \\n @pushdiscovery('bookmarks')\\n def _pushdiscoverybookmarks(pushop):\\n     ui = pushop.ui\\n     repo = pushop.repo.unfiltered()\\n     remote = pushop.remote\\n     ui.debug(\\\"checking for updated bookmarks\\\\n\\\")\\n     ancestors = ()\\n     if pushop.revs:\\n         revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)\\n         ancestors = repo.changelog.ancestors(revnums, inclusive=True)\\n \\n     remotebookmark = listkeys(remote, 'bookmarks')\\n \\n     explicit = set([repo._bookmarks.expandname(bookmark)\\n                     for bookmark in pushop.bookmarks])\\n \\n     remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)\\n     comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)\\n \\n     def safehex(x):\\n         if x is None:\\n             return x\\n         return hex(x)\\n \\n     def hexifycompbookmarks(bookmarks):\\n         return [(b, safehex(scid), safehex(dcid))\\n                 for (b, scid, dcid) in bookmarks]\\n \\n     comp = [hexifycompbookmarks(marks) for marks in comp]\\n     return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)\\n \\n def _processcompared(pushop, pushed, explicit, remotebms, comp):\\n     \\\"\\\"\\\"take decision on bookmark to pull from the remote bookmark\\n \\n     Exist to help extensions who want to alter this behavior.\\n     \\\"\\\"\\\"\\n     addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp\\n \\n     repo = pushop.repo\\n \\n     for b, scid, dcid in advsrc:\\n         if b in explicit:\\n             explicit.remove(b)\\n         if not pushed or repo[scid].rev() in pushed:\\n             pushop.outbookmarks.append((b, dcid, scid))\\n     # search added bookmark\\n     for b, scid, dcid in addsrc:\\n         if b in explicit:\\n             explicit.remove(b)\\n             pushop.outbookmarks.append((b, '', scid))\\n     # search for overwritten bookmark\\n     for b, scid, dcid in list(advdst) + list(diverge) + list(differ):\\n         if b in explicit:\\n             explicit.remove(b)\\n             pushop.outbookmarks.append((b, dcid, scid))\\n     # search for bookmark to delete\\n     for b, scid, dcid in adddst:\\n         if b in explicit:\\n             explicit.remove(b)\\n             # treat as \\\"deleted locally\\\"\\n             pushop.outbookmarks.append((b, dcid, ''))\\n     # identical bookmarks shouldn't get reported\\n     for b, scid, dcid in same:\\n         if b in explicit:\\n             explicit.remove(b)\\n \\n     if explicit:\\n         explicit = sorted(explicit)\\n         # we should probably list all of them\\n         pushop.ui.warn(_('bookmark %s does not exist on the local '\\n                          'or remote repository!\\\\n') % explicit[0])\\n         pushop.bkresult = 2\\n \\n     pushop.outbookmarks.sort()\\n \\n def _pushcheckoutgoing(pushop):\\n     outgoing = pushop.outgoing\\n     unfi = pushop.repo.unfiltered()\\n     if not outgoing.missing:\\n         # nothing to push\\n         scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)\\n         return False\\n     # something to push\\n     if not pushop.force:\\n         # if repo.obsstore == False --\\u003e no obsolete\\n         # then, save the iteration\\n         if unfi.obsstore:\\n             # this message are here for 80 char limit reason\\n             mso = _(\\\"push includes obsolete changeset: %s!\\\")\\n             mspd = _(\\\"push includes phase-divergent changeset: %s!\\\")\\n             mscd = _(\\\"push includes content-divergent changeset: %s!\\\")\\n             mst = {\\\"orphan\\\": _(\\\"push includes orphan changeset: %s!\\\"),\\n                    \\\"phase-divergent\\\": mspd,\\n                    \\\"content-divergent\\\": mscd}\\n             # If we are to push if there is at least one\\n             # obsolete or unstable changeset in missing, at\\n             # least one of the missinghead will be obsolete or\\n             # unstable. So checking heads only is ok\\n             for node in outgoing.missingheads:\\n                 ctx = unfi[node]\\n                 if ctx.obsolete():\\n                     raise error.Abort(mso % ctx)\\n                 elif ctx.isunstable():\\n                     # TODO print more than one instability in the abort\\n                     # message\\n                     raise error.Abort(mst[ctx.instabilities()[0]] % ctx)\\n \\n         discovery.checkheads(pushop)\\n     return True\\n \\n # List of names of steps to perform for an outgoing bundle2, order matters.\\n b2partsgenorder = []\\n \\n # Mapping between step name and function\\n #\\n # This exists to help extensions wrap steps if necessary\\n b2partsgenmapping = {}\\n \\n def b2partsgenerator(stepname, idx=None):\\n     \\\"\\\"\\\"decorator for function generating bundle2 part\\n \\n     The function is added to the step -\\u003e function mapping and appended to the\\n     list of steps.  Beware that decorated functions will be added in order\\n     (this may matter).\\n \\n     You can only use this decorator for new steps, if you want to wrap a step\\n     from an extension, attack the b2partsgenmapping dictionary directly.\\\"\\\"\\\"\\n     def dec(func):\\n         assert stepname not in b2partsgenmapping\\n         b2partsgenmapping[stepname] = func\\n         if idx is None:\\n             b2partsgenorder.append(stepname)\\n         else:\\n             b2partsgenorder.insert(idx, stepname)\\n         return func\\n     return dec\\n \\n def _pushb2ctxcheckheads(pushop, bundler):\\n     \\\"\\\"\\\"Generate race condition checking parts\\n \\n     Exists as an independent function to aid extensions\\n     \\\"\\\"\\\"\\n     # * 'force' do not check for push race,\\n     # * if we don't push anything, there are nothing to check.\\n     if not pushop.force and pushop.outgoing.missingheads:\\n         allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())\\n         emptyremote = pushop.pushbranchmap is None\\n         if not allowunrelated or emptyremote:\\n             bundler.newpart('check:heads', data=iter(pushop.remoteheads))\\n         else:\\n             affected = set()\\n             for branch, heads in pushop.pushbranchmap.iteritems():\\n                 remoteheads, newheads, unsyncedheads, discardedheads = heads\\n                 if remoteheads is not None:\\n                     remote = set(remoteheads)\\n                     affected |= set(discardedheads) & remote\\n                     affected |= remote - set(newheads)\\n             if affected:\\n                 data = iter(sorted(affected))\\n                 bundler.newpart('check:updated-heads', data=data)\\n \\n def _pushing(pushop):\\n     \\\"\\\"\\\"return True if we are pushing anything\\\"\\\"\\\"\\n     return bool(pushop.outgoing.missing\\n                 or pushop.outdatedphases\\n                 or pushop.outobsmarkers\\n                 or pushop.outbookmarks)\\n \\n @b2partsgenerator('check-bookmarks')\\n def _pushb2checkbookmarks(pushop, bundler):\\n     \\\"\\\"\\\"insert bookmark move checking\\\"\\\"\\\"\\n     if not _pushing(pushop) or pushop.force:\\n         return\\n     b2caps = bundle2.bundle2caps(pushop.remote)\\n     hasbookmarkcheck = 'bookmarks' in b2caps\\n     if not (pushop.outbookmarks and hasbookmarkcheck):\\n         return\\n     data = []\\n     for book, old, new in pushop.outbookmarks:\\n         old = bin(old)\\n         data.append((book, old))\\n     checkdata = bookmod.binaryencode(data)\\n     bundler.newpart('check:bookmarks', data=checkdata)\\n \\n @b2partsgenerator('check-phases')\\n def _pushb2checkphases(pushop, bundler):\\n     \\\"\\\"\\\"insert phase move checking\\\"\\\"\\\"\\n     if not _pushing(pushop) or pushop.force:\\n         return\\n     b2caps = bundle2.bundle2caps(pushop.remote)\\n     hasphaseheads = 'heads' in b2caps.get('phases', ())\\n     if pushop.remotephases is not None and hasphaseheads:\\n         # check that the remote phase has not changed\\n         checks = [[] for p in phases.allphases]\\n         checks[phases.public].extend(pushop.remotephases.publicheads)\\n         checks[phases.draft].extend(pushop.remotephases.draftroots)\\n         if any(checks):\\n             for nodes in checks:\\n                 nodes.sort()\\n             checkdata = phases.binaryencode(checks)\\n             bundler.newpart('check:phases', data=checkdata)\\n \\n @b2partsgenerator('changeset')\\n def _pushb2ctx(pushop, bundler):\\n     \\\"\\\"\\\"handle changegroup push through bundle2\\n \\n     addchangegroup result is stored in the ``pushop.cgresult`` attribute.\\n     \\\"\\\"\\\"\\n     if 'changesets' in pushop.stepsdone:\\n         return\\n     pushop.stepsdone.add('changesets')\\n     # Send known heads to the server for race detection.\\n     if not _pushcheckoutgoing(pushop):\\n         return\\n     pushop.repo.prepushoutgoinghooks(pushop)\\n \\n     _pushb2ctxcheckheads(pushop, bundler)\\n \\n     b2caps = bundle2.bundle2caps(pushop.remote)\\n     version = '01'\\n     cgversions = b2caps.get('changegroup')\\n     if cgversions:  # 3.1 and 3.2 ship with an empty value\\n         cgversions = [v for v in cgversions\\n                       if v in changegroup.supportedoutgoingversions(\\n                           pushop.repo)]\\n         if not cgversions:\\n             raise ValueError(_('no common changegroup version'))\\n         version = max(cgversions)\\n     cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,\\n                                       'push')\\n     cgpart = bundler.newpart('changegroup', data=cgstream)\\n     if cgversions:\\n         cgpart.addparam('version', version)\\n     if 'treemanifest' in pushop.repo.requirements:\\n         cgpart.addparam('treemanifest', '1')\\n     def handlereply(op):\\n         \\\"\\\"\\\"extract addchangegroup returns from server reply\\\"\\\"\\\"\\n         cgreplies = op.records.getreplies(cgpart.id)\\n         assert len(cgreplies['changegroup']) == 1\\n         pushop.cgresult = cgreplies['changegroup'][0]['return']\\n     return handlereply\\n \\n @b2partsgenerator('phase')\\n def _pushb2phases(pushop, bundler):\\n     \\\"\\\"\\\"handle phase push through bundle2\\\"\\\"\\\"\\n     if 'phases' in pushop.stepsdone:\\n         return\\n     b2caps = bundle2.bundle2caps(pushop.remote)\\n     ui = pushop.repo.ui\\n \\n     legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')\\n     haspushkey = 'pushkey' in b2caps\\n     hasphaseheads = 'heads' in b2caps.get('phases', ())\\n \\n     if hasphaseheads and not legacyphase:\\n         return _pushb2phaseheads(pushop, bundler)\\n     elif haspushkey:\\n         return _pushb2phasespushkey(pushop, bundler)\\n \\n def _pushb2phaseheads(pushop, bundler):\\n     \\\"\\\"\\\"push phase information through a bundle2 - binary part\\\"\\\"\\\"\\n     pushop.stepsdone.add('phases')\\n     if pushop.outdatedphases:\\n         updates = [[] for p in phases.allphases]\\n         updates[0].extend(h.node() for h in pushop.outdatedphases)\\n         phasedata = phases.binaryencode(updates)\\n         bundler.newpart('phase-heads', data=phasedata)\\n \\n def _pushb2phasespushkey(pushop, bundler):\\n     \\\"\\\"\\\"push phase information through a bundle2 - pushkey part\\\"\\\"\\\"\\n     pushop.stepsdone.add('phases')\\n     part2node = []\\n \\n     def handlefailure(pushop, exc):\\n         targetid = int(exc.partid)\\n         for partid, node in part2node:\\n             if partid == targetid:\\n                 raise error.Abort(_('updating %s to public failed') % node)\\n \\n     enc = pushkey.encode\\n     for newremotehead in pushop.outdatedphases:\\n         part = bundler.newpart('pushkey')\\n         part.addparam('namespace', enc('phases'))\\n         part.addparam('key', enc(newremotehead.hex()))\\n         part.addparam('old', enc('%d' % phases.draft))\\n         part.addparam('new', enc('%d' % phases.public))\\n         part2node.append((part.id, newremotehead))\\n         pushop.pkfailcb[part.id] = handlefailure\\n \\n     def handlereply(op):\\n         for partid, node in part2node:\\n             partrep = op.records.getreplies(partid)\\n             results = partrep['pushkey']\\n             assert len(results) \\u003c= 1\\n             msg = None\\n             if not results:\\n                 msg = _('server ignored update of %s to public!\\\\n') % node\\n             elif not int(results[0]['return']):\\n                 msg = _('updating %s to public failed!\\\\n') % node\\n             if msg is not None:\\n                 pushop.ui.warn(msg)\\n     return handlereply\\n \\n @b2partsgenerator('obsmarkers')\\n def _pushb2obsmarkers(pushop, bundler):\\n     if 'obsmarkers' in pushop.stepsdone:\\n         return\\n     remoteversions = bundle2.obsmarkersversion(bundler.capabilities)\\n     if obsolete.commonversion(remoteversions) is None:\\n         return\\n     pushop.stepsdone.add('obsmarkers')\\n     if pushop.outobsmarkers:\\n         markers = sorted(pushop.outobsmarkers)\\n         bundle2.buildobsmarkerspart(bundler, markers)\\n \\n @b2partsgenerator('bookmarks')\\n def _pushb2bookmarks(pushop, bundler):\\n     \\\"\\\"\\\"handle bookmark push through bundle2\\\"\\\"\\\"\\n     if 'bookmarks' in pushop.stepsdone:\\n         return\\n     b2caps = bundle2.bundle2caps(pushop.remote)\\n \\n     legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')\\n     legacybooks = 'bookmarks' in legacy\\n \\n     if not legacybooks and 'bookmarks' in b2caps:\\n         return _pushb2bookmarkspart(pushop, bundler)\\n     elif 'pushkey' in b2caps:\\n         return _pushb2bookmarkspushkey(pushop, bundler)\\n \\n def _bmaction(old, new):\\n     \\\"\\\"\\\"small utility for bookmark pushing\\\"\\\"\\\"\\n     if not old:\\n         return 'export'\\n     elif not new:\\n         return 'delete'\\n     return 'update'\\n \\n def _pushb2bookmarkspart(pushop, bundler):\\n     pushop.stepsdone.add('bookmarks')\\n     if not pushop.outbookmarks:\\n         return\\n \\n     allactions = []\\n     data = []\\n     for book, old, new in pushop.outbookmarks:\\n         new = bin(new)\\n         data.append((book, new))\\n         allactions.append((book, _bmaction(old, new)))\\n     checkdata = bookmod.binaryencode(data)\\n     bundler.newpart('bookmarks', data=checkdata)\\n \\n     def handlereply(op):\\n         ui = pushop.ui\\n         # if success\\n         for book, action in allactions:\\n             ui.status(bookmsgmap[action][0] % book)\\n \\n     return handlereply\\n \\n def _pushb2bookmarkspushkey(pushop, bundler):\\n     pushop.stepsdone.add('bookmarks')\\n     part2book = []\\n     enc = pushkey.encode\\n \\n     def handlefailure(pushop, exc):\\n         targetid = int(exc.partid)\\n         for partid, book, action in part2book:\\n             if partid == targetid:\\n                 raise error.Abort(bookmsgmap[action][1].rstrip() % book)\\n         # we should not be called for part we did not generated\\n         assert False\\n \\n     for book, old, new in pushop.outbookmarks:\\n         part = bundler.newpart('pushkey')\\n         part.addparam('namespace', enc('bookmarks'))\\n         part.addparam('key', enc(book))\\n         part.addparam('old', enc(old))\\n         part.addparam('new', enc(new))\\n         action = 'update'\\n         if not old:\\n             action = 'export'\\n         elif not new:\\n             action = 'delete'\\n         part2book.append((part.id, book, action))\\n         pushop.pkfailcb[part.id] = handlefailure\\n \\n     def handlereply(op):\\n         ui = pushop.ui\\n         for partid, book, action in part2book:\\n             partrep = op.records.getreplies(partid)\\n             results = partrep['pushkey']\\n             assert len(results) \\u003c= 1\\n             if not results:\\n                 pushop.ui.warn(_('server ignored bookmark %s update\\\\n') % book)\\n             else:\\n                 ret = int(results[0]['return'])\\n                 if ret:\\n                     ui.status(bookmsgmap[action][0] % book)\\n                 else:\\n                     ui.warn(bookmsgmap[action][1] % book)\\n                     if pushop.bkresult is not None:\\n                         pushop.bkresult = 1\\n     return handlereply\\n \\n @b2partsgenerator('pushvars', idx=0)\\n def _getbundlesendvars(pushop, bundler):\\n     '''send shellvars via bundle2'''\\n     pushvars = pushop.pushvars\\n     if pushvars:\\n         shellvars = {}\\n         for raw in pushvars:\\n             if '=' not in raw:\\n                 msg = (\\\"unable to parse variable '%s', should follow \\\"\\n                         \\\"'KEY=VALUE' or 'KEY=' format\\\")\\n                 raise error.Abort(msg % raw)\\n             k, v = raw.split('=', 1)\\n             shellvars[k] = v\\n \\n         part = bundler.newpart('pushvars')\\n \\n         for key, value in shellvars.iteritems():\\n             part.addparam(key, value, mandatory=False)\\n \\n def _pushbundle2(pushop):\\n     \\\"\\\"\\\"push data to the remote using bundle2\\n \\n     The only currently supported type of data is changegroup but this will\\n     evolve in the future.\\\"\\\"\\\"\\n     bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))\\n     pushback = (pushop.trmanager\\n                 and pushop.ui.configbool('experimental', 'bundle2.pushback'))\\n \\n     # create reply capability\\n     capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,\\n                                                       allowpushback=pushback,\\n                                                       role='client'))\\n     bundler.newpart('replycaps', data=capsblob)\\n     replyhandlers = []\\n     for partgenname in b2partsgenorder:\\n         partgen = b2partsgenmapping[partgenname]\\n         ret = partgen(pushop, bundler)\\n         if callable(ret):\\n             replyhandlers.append(ret)\\n     # do not push if nothing to push\\n     if bundler.nbparts \\u003c= 1:\\n         return\\n     stream = util.chunkbuffer(bundler.getchunks())\\n     try:\\n         try:\\n             with pushop.remote.commandexecutor() as e:\\n                 reply = e.callcommand('unbundle', {\\n                     'bundle': stream,\\n                     'heads': ['force'],\\n                     'url': pushop.remote.url(),\\n                 }).result()\\n         except error.BundleValueError as exc:\\n             raise error.Abort(_('missing support for %s') % exc)\\n         try:\\n             trgetter = None\\n             if pushback:\\n                 trgetter = pushop.trmanager.transaction\\n             op = bundle2.processbundle(pushop.repo, reply, trgetter)\\n         except error.BundleValueError as exc:\\n             raise error.Abort(_('missing support for %s') % exc)\\n         except bundle2.AbortFromPart as exc:\\n             pushop.ui.status(_('remote: %s\\\\n') % exc)\\n             if exc.hint is not None:\\n                 pushop.ui.status(_('remote: %s\\\\n') % ('(%s)' % exc.hint))\\n             raise error.Abort(_('push failed on remote'))\\n     except error.PushkeyFailed as exc:\\n         partid = int(exc.partid)\\n         if partid not in pushop.pkfailcb:\\n             raise\\n         pushop.pkfailcb[partid](pushop, exc)\\n     for rephand in replyhandlers:\\n         rephand(op)\\n \\n def _pushchangeset(pushop):\\n     \\\"\\\"\\\"Make the actual push of changeset bundle to remote repo\\\"\\\"\\\"\\n     if 'changesets' in pushop.stepsdone:\\n         return\\n     pushop.stepsdone.add('changesets')\\n     if not _pushcheckoutgoing(pushop):\\n         return\\n \\n     # Should have verified this in push().\\n     assert pushop.remote.capable('unbundle')\\n \\n     pushop.repo.prepushoutgoinghooks(pushop)\\n     outgoing = pushop.outgoing\\n     # TODO: get bundlecaps from remote\\n     bundlecaps = None\\n     # create a changegroup from local\\n     if pushop.revs is None and not (outgoing.excluded\\n                             or pushop.repo.changelog.filteredrevs):\\n         # push everything,\\n         # use the fast path, no race possible on push\\n         cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',\\n                 fastpath=True, bundlecaps=bundlecaps)\\n     else:\\n         cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',\\n                                         'push', bundlecaps=bundlecaps)\\n \\n     # apply changegroup to remote\\n     # local repo finds heads on server, finds out what\\n     # revs it must push. once revs transferred, if server\\n     # finds it has different heads (someone else won\\n     # commit\\/push race), server aborts.\\n     if pushop.force:\\n         remoteheads = ['force']\\n     else:\\n         remoteheads = pushop.remoteheads\\n     # ssh: return remote's addchangegroup()\\n     # http: return remote's addchangegroup() or 0 for error\\n     pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,\\n                                         pushop.repo.url())\\n \\n def _pushsyncphase(pushop):\\n     \\\"\\\"\\\"synchronise phase information locally and remotely\\\"\\\"\\\"\\n     cheads = pushop.commonheads\\n     # even when we don't push, exchanging phase data is useful\\n     remotephases = listkeys(pushop.remote, 'phases')\\n     if (pushop.ui.configbool('ui', '_usedassubrepo')\\n         and remotephases    # server supports phases\\n         and pushop.cgresult is None # nothing was pushed\\n         and remotephases.get('publishing', False)):\\n         # When:\\n         # - this is a subrepo push\\n         # - and remote support phase\\n         # - and no changeset was pushed\\n         # - and remote is publishing\\n         # We may be in issue 3871 case!\\n         # We drop the possible phase synchronisation done by\\n         # courtesy to publish changesets possibly locally draft\\n         # on the remote.\\n         remotephases = {'publishing': 'True'}\\n     if not remotephases: # old server or public only reply from non-publishing\\n         _localphasemove(pushop, cheads)\\n         # don't push any phase data as there is nothing to push\\n     else:\\n         ana = phases.analyzeremotephases(pushop.repo, cheads,\\n                                          remotephases)\\n         pheads, droots = ana\\n         ### Apply remote phase on local\\n         if remotephases.get('publishing', False):\\n             _localphasemove(pushop, cheads)\\n         else: # publish = False\\n             _localphasemove(pushop, pheads)\\n             _localphasemove(pushop, cheads, phases.draft)\\n         ### Apply local phase on remote\\n \\n         if pushop.cgresult:\\n             if 'phases' in pushop.stepsdone:\\n                 # phases already pushed though bundle2\\n                 return\\n             outdated = pushop.outdatedphases\\n         else:\\n             outdated = pushop.fallbackoutdatedphases\\n \\n         pushop.stepsdone.add('phases')\\n \\n         # filter heads already turned public by the push\\n         outdated = [c for c in outdated if c.node() not in pheads]\\n         # fallback to independent pushkey command\\n         for newremotehead in outdated:\\n             with pushop.remote.commandexecutor() as e:\\n                 r = e.callcommand('pushkey', {\\n                     'namespace': 'phases',\\n                     'key': newremotehead.hex(),\\n                     'old': '%d' % phases.draft,\\n                     'new': '%d' % phases.public\\n                 }).result()\\n \\n             if not r:\\n                 pushop.ui.warn(_('updating %s to public failed!\\\\n')\\n                                % newremotehead)\\n \\n def _localphasemove(pushop, nodes, phase=phases.public):\\n     \\\"\\\"\\\"move \\u003cnodes\\u003e to \\u003cphase\\u003e in the local source repo\\\"\\\"\\\"\\n     if pushop.trmanager:\\n         phases.advanceboundary(pushop.repo,\\n                                pushop.trmanager.transaction(),\\n                                phase,\\n                                nodes)\\n     else:\\n         # repo is not locked, do not change any phases!\\n         # Informs the user that phases should have been moved when\\n         # applicable.\\n         actualmoves = [n for n in nodes if phase \\u003c pushop.repo[n].phase()]\\n         phasestr = phases.phasenames[phase]\\n         if actualmoves:\\n             pushop.ui.status(_('cannot lock source repo, skipping '\\n                                'local %s phase update\\\\n') % phasestr)\\n \\n def _pushobsolete(pushop):\\n     \\\"\\\"\\\"utility function to push obsolete markers to a remote\\\"\\\"\\\"\\n     if 'obsmarkers' in pushop.stepsdone:\\n         return\\n     repo = pushop.repo\\n     remote = pushop.remote\\n     pushop.stepsdone.add('obsmarkers')\\n     if pushop.outobsmarkers:\\n         pushop.ui.debug('try to push obsolete markers to remote\\\\n')\\n         rslts = []\\n         remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))\\n         for key in sorted(remotedata, reverse=True):\\n             # reverse sort to ensure we end with dump0\\n             data = remotedata[key]\\n             rslts.append(remote.pushkey('obsolete', key, '', data))\\n         if [r for r in rslts if not r]:\\n             msg = _('failed to push some obsolete markers!\\\\n')\\n             repo.ui.warn(msg)\\n \\n def _pushbookmark(pushop):\\n     \\\"\\\"\\\"Update bookmark position on remote\\\"\\\"\\\"\\n     if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:\\n         return\\n     pushop.stepsdone.add('bookmarks')\\n     ui = pushop.ui\\n     remote = pushop.remote\\n \\n     for b, old, new in pushop.outbookmarks:\\n         action = 'update'\\n         if not old:\\n             action = 'export'\\n         elif not new:\\n             action = 'delete'\\n \\n         with remote.commandexecutor() as e:\\n             r = e.callcommand('pushkey', {\\n                 'namespace': 'bookmarks',\\n                 'key': b,\\n                 'old': old,\\n                 'new': new,\\n             }).result()\\n \\n         if r:\\n             ui.status(bookmsgmap[action][0] % b)\\n         else:\\n             ui.warn(bookmsgmap[action][1] % b)\\n             # discovery can have set the value form invalid entry\\n             if pushop.bkresult is not None:\\n                 pushop.bkresult = 1\\n \\n class pulloperation(object):\\n     \\\"\\\"\\\"A object that represent a single pull operation\\n \\n     It purpose is to carry pull related state and very common operation.\\n \\n     A new should be created at the beginning of each pull and discarded\\n     afterward.\\n     \\\"\\\"\\\"\\n \\n     def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),\\n                  remotebookmarks=None, streamclonerequested=None,\\n                  includepats=None, excludepats=None):\\n         # repo we pull into\\n         self.repo = repo\\n         # repo we pull from\\n         self.remote = remote\\n         # revision we try to pull (None is \\\"all\\\")\\n         self.heads = heads\\n         # bookmark pulled explicitly\\n         self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)\\n                                   for bookmark in bookmarks]\\n         # do we force pull?\\n         self.force = force\\n         # whether a streaming clone was requested\\n         self.streamclonerequested = streamclonerequested\\n         # transaction manager\\n         self.trmanager = None\\n         # set of common changeset between local and remote before pull\\n         self.common = None\\n         # set of pulled head\\n         self.rheads = None\\n         # list of missing changeset to fetch remotely\\n         self.fetch = None\\n         # remote bookmarks data\\n         self.remotebookmarks = remotebookmarks\\n         # result of changegroup pulling (used as return code by pull)\\n         self.cgresult = None\\n         # list of step already done\\n         self.stepsdone = set()\\n         # Whether we attempted a clone from pre-generated bundles.\\n         self.clonebundleattempted = False\\n         # Set of file patterns to include.\\n         self.includepats = includepats\\n         # Set of file patterns to exclude.\\n         self.excludepats = excludepats\\n \\n     @util.propertycache\\n     def pulledsubset(self):\\n         \\\"\\\"\\\"heads of the set of changeset target by the pull\\\"\\\"\\\"\\n         # compute target subset\\n         if self.heads is None:\\n             # We pulled every thing possible\\n             # sync on everything common\\n             c = set(self.common)\\n             ret = list(self.common)\\n             for n in self.rheads:\\n                 if n not in c:\\n                     ret.append(n)\\n             return ret\\n         else:\\n             # We pulled a specific subset\\n             # sync on this subset\\n             return self.heads\\n \\n     @util.propertycache\\n     def canusebundle2(self):\\n         return not _forcebundle1(self)\\n \\n     @util.propertycache\\n     def remotebundle2caps(self):\\n         return bundle2.bundle2caps(self.remote)\\n \\n     def gettransaction(self):\\n         # deprecated; talk to trmanager directly\\n         return self.trmanager.transaction()\\n \\n class transactionmanager(util.transactional):\\n     \\\"\\\"\\\"An object to manage the life cycle of a transaction\\n \\n     It creates the transaction on demand and calls the appropriate hooks when\\n     closing the transaction.\\\"\\\"\\\"\\n     def __init__(self, repo, source, url):\\n         self.repo = repo\\n         self.source = source\\n         self.url = url\\n         self._tr = None\\n \\n     def transaction(self):\\n         \\\"\\\"\\\"Return an open transaction object, constructing if necessary\\\"\\\"\\\"\\n         if not self._tr:\\n             trname = '%s\\\\n%s' % (self.source, util.hidepassword(self.url))\\n             self._tr = self.repo.transaction(trname)\\n             self._tr.hookargs['source'] = self.source\\n             self._tr.hookargs['url'] = self.url\\n         return self._tr\\n \\n     def close(self):\\n         \\\"\\\"\\\"close transaction if created\\\"\\\"\\\"\\n         if self._tr is not None:\\n             self._tr.close()\\n \\n     def release(self):\\n         \\\"\\\"\\\"release transaction if created\\\"\\\"\\\"\\n         if self._tr is not None:\\n             self._tr.release()\\n \\n def listkeys(remote, namespace):\\n     with remote.commandexecutor() as e:\\n         return e.callcommand('listkeys', {'namespace': namespace}).result()\\n \\n def _fullpullbundle2(repo, pullop):\\n     # The server may send a partial reply, i.e. when inlining\\n     # pre-computed bundles. In that case, update the common\\n     # set based on the results and pull another bundle.\\n     #\\n     # There are two indicators that the process is finished:\\n     # - no changeset has been added, or\\n     # - all remote heads are known locally.\\n     # The head check must use the unfiltered view as obsoletion\\n     # markers can hide heads.\\n     unfi = repo.unfiltered()\\n     unficl = unfi.changelog\\n     def headsofdiff(h1, h2):\\n         \\\"\\\"\\\"Returns heads(h1 % h2)\\\"\\\"\\\"\\n         res = unfi.set('heads(%ln %% %ln)', h1, h2)\\n         return set(ctx.node() for ctx in res)\\n     def headsofunion(h1, h2):\\n         \\\"\\\"\\\"Returns heads((h1 + h2) - null)\\\"\\\"\\\"\\n         res = unfi.set('heads((%ln + %ln - null))', h1, h2)\\n         return set(ctx.node() for ctx in res)\\n     while True:\\n         old_heads = unficl.heads()\\n         clstart = len(unficl)\\n         _pullbundle2(pullop)\\n         if repository.NARROW_REQUIREMENT in repo.requirements:\\n             # XXX narrow clones filter the heads on the server side during\\n             # XXX getbundle and result in partial replies as well.\\n             # XXX Disable pull bundles in this case as band aid to avoid\\n             # XXX extra round trips.\\n             break\\n         if clstart == len(unficl):\\n             break\\n         if all(unficl.hasnode(n) for n in pullop.rheads):\\n             break\\n         new_heads = headsofdiff(unficl.heads(), old_heads)\\n         pullop.common = headsofunion(new_heads, pullop.common)\\n         pullop.rheads = set(pullop.rheads) - pullop.common\\n \\n def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,\\n          streamclonerequested=None, includepats=None, excludepats=None):\\n     \\\"\\\"\\\"Fetch repository data from a remote.\\n \\n     This is the main function used to retrieve data from a remote repository.\\n \\n     ``repo`` is the local repository to clone into.\\n     ``remote`` is a peer instance.\\n     ``heads`` is an iterable of revisions we want to pull. ``None`` (the\\n     default) means to pull everything from the remote.\\n     ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By\\n     default, all remote bookmarks are pulled.\\n     ``opargs`` are additional keyword arguments to pass to ``pulloperation``\\n     initialization.\\n     ``streamclonerequested`` is a boolean indicating whether a \\\"streaming\\n     clone\\\" is requested. A \\\"streaming clone\\\" is essentially a raw file copy\\n     of revlogs from the server. This only works when the local repository is\\n     empty. The default value of ``None`` means to respect the server\\n     configuration for preferring stream clones.\\n     ``includepats`` and ``excludepats`` define explicit file patterns to\\n     include and exclude in storage, respectively. If not defined, narrow\\n     patterns from the repo instance are used, if available.\\n \\n     Returns the ``pulloperation`` created for this pull.\\n     \\\"\\\"\\\"\\n     if opargs is None:\\n         opargs = {}\\n \\n     # We allow the narrow patterns to be passed in explicitly to provide more\\n     # flexibility for API consumers.\\n     if includepats or excludepats:\\n         includepats = includepats or set()\\n         excludepats = excludepats or set()\\n     else:\\n         includepats, excludepats = repo.narrowpats\\n \\n     narrowspec.validatepatterns(includepats)\\n     narrowspec.validatepatterns(excludepats)\\n \\n     pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,\\n                            streamclonerequested=streamclonerequested,\\n                            includepats=includepats, excludepats=excludepats,\\n                            **pycompat.strkwargs(opargs))\\n \\n     peerlocal = pullop.remote.local()\\n     if peerlocal:\\n         missing = set(peerlocal.requirements) - pullop.repo.supported\\n         if missing:\\n             msg = _(\\\"required features are not\\\"\\n                     \\\" supported in the destination:\\\"\\n                     \\\" %s\\\") % (', '.join(sorted(missing)))\\n             raise error.Abort(msg)\\n \\n     pullop.trmanager = transactionmanager(repo, 'pull', remote.url())\\n     with repo.wlock(), repo.lock(), pullop.trmanager:\\n-        # This should ideally be in _pullbundle2(). However, it needs to run\\n-        # before discovery to avoid extra work.\\n-        _maybeapplyclonebundle(pullop)\\n-        streamclone.maybeperformlegacystreamclone(pullop)\\n-        _pulldiscovery(pullop)\\n-        if pullop.canusebundle2:\\n-            _fullpullbundle2(repo, pullop)\\n-        _pullchangeset(pullop)\\n-        _pullphase(pullop)\\n-        _pullbookmarks(pullop)\\n-        _pullobsolete(pullop)\\n+        # Use the modern wire protocol, if available.\\n+        if remote.capable('exchangev2'):\\n+            exchangev2.pull(pullop)\\n+        else:\\n+            # This should ideally be in _pullbundle2(). However, it needs to run\\n+            # before discovery to avoid extra work.\\n+            _maybeapplyclonebundle(pullop)\\n+            streamclone.maybeperformlegacystreamclone(pullop)\\n+            _pulldiscovery(pullop)\\n+            if pullop.canusebundle2:\\n+                _fullpullbundle2(repo, pullop)\\n+            _pullchangeset(pullop)\\n+            _pullphase(pullop)\\n+            _pullbookmarks(pullop)\\n+            _pullobsolete(pullop)\\n \\n     # storing remotenames\\n     if repo.ui.configbool('experimental', 'remotenames'):\\n         logexchange.pullremotenames(repo, remote)\\n \\n     return pullop\\n \\n # list of steps to perform discovery before pull\\n pulldiscoveryorder = []\\n \\n # Mapping between step name and function\\n #\\n # This exists to help extensions wrap steps if necessary\\n pulldiscoverymapping = {}\\n \\n def pulldiscovery(stepname):\\n     \\\"\\\"\\\"decorator for function performing discovery before pull\\n \\n     The function is added to the step -\\u003e function mapping and appended to the\\n     list of steps.  Beware that decorated function will be added in order (this\\n     may matter).\\n \\n     You can only use this decorator for a new step, if you want to wrap a step\\n     from an extension, change the pulldiscovery dictionary directly.\\\"\\\"\\\"\\n     def dec(func):\\n         assert stepname not in pulldiscoverymapping\\n         pulldiscoverymapping[stepname] = func\\n         pulldiscoveryorder.append(stepname)\\n         return func\\n     return dec\\n \\n def _pulldiscovery(pullop):\\n     \\\"\\\"\\\"Run all discovery steps\\\"\\\"\\\"\\n     for stepname in pulldiscoveryorder:\\n         step = pulldiscoverymapping[stepname]\\n         step(pullop)\\n \\n @pulldiscovery('b1:bookmarks')\\n def _pullbookmarkbundle1(pullop):\\n     \\\"\\\"\\\"fetch bookmark data in bundle1 case\\n \\n     If not using bundle2, we have to fetch bookmarks before changeset\\n     discovery to reduce the chance and impact of race conditions.\\\"\\\"\\\"\\n     if pullop.remotebookmarks is not None:\\n         return\\n     if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:\\n         # all known bundle2 servers now support listkeys, but lets be nice with\\n         # new implementation.\\n         return\\n     books = listkeys(pullop.remote, 'bookmarks')\\n     pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)\\n \\n \\n @pulldiscovery('changegroup')\\n def _pulldiscoverychangegroup(pullop):\\n     \\\"\\\"\\\"discovery phase for the pull\\n \\n     Current handle changeset discovery only, will change handle all discovery\\n     at some point.\\\"\\\"\\\"\\n     tmp = discovery.findcommonincoming(pullop.repo,\\n                                        pullop.remote,\\n                                        heads=pullop.heads,\\n                                        force=pullop.force)\\n     common, fetch, rheads = tmp\\n     nm = pullop.repo.unfiltered().changelog.nodemap\\n     if fetch and rheads:\\n         # If a remote heads is filtered locally, put in back in common.\\n         #\\n         # This is a hackish solution to catch most of \\\"common but locally\\n         # hidden situation\\\".  We do not performs discovery on unfiltered\\n         # repository because it end up doing a pathological amount of round\\n         # trip for w huge amount of changeset we do not care about.\\n         #\\n         # If a set of such \\\"common but filtered\\\" changeset exist on the server\\n         # but are not including a remote heads, we'll not be able to detect it,\\n         scommon = set(common)\\n         for n in rheads:\\n             if n in nm:\\n                 if n not in scommon:\\n                     common.append(n)\\n         if set(rheads).issubset(set(common)):\\n             fetch = []\\n     pullop.common = common\\n     pullop.fetch = fetch\\n     pullop.rheads = rheads\\n \\n def _pullbundle2(pullop):\\n     \\\"\\\"\\\"pull data using bundle2\\n \\n     For now, the only supported data are changegroup.\\\"\\\"\\\"\\n     kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}\\n \\n     # make ui easier to access\\n     ui = pullop.repo.ui\\n \\n     # At the moment we don't do stream clones over bundle2. If that is\\n     # implemented then here's where the check for that will go.\\n     streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]\\n \\n     # declare pull perimeters\\n     kwargs['common'] = pullop.common\\n     kwargs['heads'] = pullop.heads or pullop.rheads\\n \\n     if streaming:\\n         kwargs['cg'] = False\\n         kwargs['stream'] = True\\n         pullop.stepsdone.add('changegroup')\\n         pullop.stepsdone.add('phases')\\n \\n     else:\\n         # pulling changegroup\\n         pullop.stepsdone.add('changegroup')\\n \\n         kwargs['cg'] = pullop.fetch\\n \\n         legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')\\n         hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())\\n         if (not legacyphase and hasbinaryphase):\\n             kwargs['phases'] = True\\n             pullop.stepsdone.add('phases')\\n \\n         if 'listkeys' in pullop.remotebundle2caps:\\n             if 'phases' not in pullop.stepsdone:\\n                 kwargs['listkeys'] = ['phases']\\n \\n     bookmarksrequested = False\\n     legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')\\n     hasbinarybook = 'bookmarks' in pullop.remotebundle2caps\\n \\n     if pullop.remotebookmarks is not None:\\n         pullop.stepsdone.add('request-bookmarks')\\n \\n     if ('request-bookmarks' not in pullop.stepsdone\\n         and pullop.remotebookmarks is None\\n         and not legacybookmark and hasbinarybook):\\n         kwargs['bookmarks'] = True\\n         bookmarksrequested = True\\n \\n     if 'listkeys' in pullop.remotebundle2caps:\\n         if 'request-bookmarks' not in pullop.stepsdone:\\n             # make sure to always includes bookmark data when migrating\\n             # `hg incoming --bundle` to using this function.\\n             pullop.stepsdone.add('request-bookmarks')\\n             kwargs.setdefault('listkeys', []).append('bookmarks')\\n \\n     # If this is a full pull \\/ clone and the server supports the clone bundles\\n     # feature, tell the server whether we attempted a clone bundle. The\\n     # presence of this flag indicates the client supports clone bundles. This\\n     # will enable the server to treat clients that support clone bundles\\n     # differently from those that don't.\\n     if (pullop.remote.capable('clonebundles')\\n         and pullop.heads is None and list(pullop.common) == [nullid]):\\n         kwargs['cbattempted'] = pullop.clonebundleattempted\\n \\n     if streaming:\\n         pullop.repo.ui.status(_('streaming all changes\\\\n'))\\n     elif not pullop.fetch:\\n         pullop.repo.ui.status(_(\\\"no changes found\\\\n\\\"))\\n         pullop.cgresult = 0\\n     else:\\n         if pullop.heads is None and list(pullop.common) == [nullid]:\\n             pullop.repo.ui.status(_(\\\"requesting all changes\\\\n\\\"))\\n     if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):\\n         remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)\\n         if obsolete.commonversion(remoteversions) is not None:\\n             kwargs['obsmarkers'] = True\\n             pullop.stepsdone.add('obsmarkers')\\n     _pullbundle2extraprepare(pullop, kwargs)\\n \\n     with pullop.remote.commandexecutor() as e:\\n         args = dict(kwargs)\\n         args['source'] = 'pull'\\n         bundle = e.callcommand('getbundle', args).result()\\n \\n         try:\\n             op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,\\n                                          source='pull')\\n             op.modes['bookmarks'] = 'records'\\n             bundle2.processbundle(pullop.repo, bundle, op=op)\\n         except bundle2.AbortFromPart as exc:\\n             pullop.repo.ui.status(_('remote: abort: %s\\\\n') % exc)\\n             raise error.Abort(_('pull failed on remote'), hint=exc.hint)\\n         except error.BundleValueError as exc:\\n             raise error.Abort(_('missing support for %s') % exc)\\n \\n     if pullop.fetch:\\n         pullop.cgresult = bundle2.combinechangegroupresults(op)\\n \\n     # processing phases change\\n     for namespace, value in op.records['listkeys']:\\n         if namespace == 'phases':\\n             _pullapplyphases(pullop, value)\\n \\n     # processing bookmark update\\n     if bookmarksrequested:\\n         books = {}\\n         for record in op.records['bookmarks']:\\n             books[record['bookmark']] = record[\\\"node\\\"]\\n         pullop.remotebookmarks = books\\n     else:\\n         for namespace, value in op.records['listkeys']:\\n             if namespace == 'bookmarks':\\n                 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)\\n \\n     # bookmark data were either already there or pulled in the bundle\\n     if pullop.remotebookmarks is not None:\\n         _pullbookmarks(pullop)\\n \\n def _pullbundle2extraprepare(pullop, kwargs):\\n     \\\"\\\"\\\"hook function so that extensions can extend the getbundle call\\\"\\\"\\\"\\n \\n def _pullchangeset(pullop):\\n     \\\"\\\"\\\"pull changeset from unbundle into the local repo\\\"\\\"\\\"\\n     # We delay the open of the transaction as late as possible so we\\n     # don't open transaction for nothing or you break future useful\\n     # rollback call\\n     if 'changegroup' in pullop.stepsdone:\\n         return\\n     pullop.stepsdone.add('changegroup')\\n     if not pullop.fetch:\\n         pullop.repo.ui.status(_(\\\"no changes found\\\\n\\\"))\\n         pullop.cgresult = 0\\n         return\\n     tr = pullop.gettransaction()\\n     if pullop.heads is None and list(pullop.common) == [nullid]:\\n         pullop.repo.ui.status(_(\\\"requesting all changes\\\\n\\\"))\\n     elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):\\n         # issue1320, avoid a race if remote changed after discovery\\n         pullop.heads = pullop.rheads\\n \\n     if pullop.remote.capable('getbundle'):\\n         # TODO: get bundlecaps from remote\\n         cg = pullop.remote.getbundle('pull', common=pullop.common,\\n                                      heads=pullop.heads or pullop.rheads)\\n     elif pullop.heads is None:\\n         with pullop.remote.commandexecutor() as e:\\n             cg = e.callcommand('changegroup', {\\n                 'nodes': pullop.fetch,\\n                 'source': 'pull',\\n             }).result()\\n \\n     elif not pullop.remote.capable('changegroupsubset'):\\n         raise error.Abort(_(\\\"partial pull cannot be done because \\\"\\n                            \\\"other repository doesn't support \\\"\\n                            \\\"changegroupsubset.\\\"))\\n     else:\\n         with pullop.remote.commandexecutor() as e:\\n             cg = e.callcommand('changegroupsubset', {\\n                 'bases': pullop.fetch,\\n                 'heads': pullop.heads,\\n                 'source': 'pull',\\n             }).result()\\n \\n     bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',\\n                                    pullop.remote.url())\\n     pullop.cgresult = bundle2.combinechangegroupresults(bundleop)\\n \\n def _pullphase(pullop):\\n     # Get remote phases data from remote\\n     if 'phases' in pullop.stepsdone:\\n         return\\n     remotephases = listkeys(pullop.remote, 'phases')\\n     _pullapplyphases(pullop, remotephases)\\n \\n def _pullapplyphases(pullop, remotephases):\\n     \\\"\\\"\\\"apply phase movement from observed remote state\\\"\\\"\\\"\\n     if 'phases' in pullop.stepsdone:\\n         return\\n     pullop.stepsdone.add('phases')\\n     publishing = bool(remotephases.get('publishing', False))\\n     if remotephases and not publishing:\\n         # remote is new and non-publishing\\n         pheads, _dr = phases.analyzeremotephases(pullop.repo,\\n                                                  pullop.pulledsubset,\\n                                                  remotephases)\\n         dheads = pullop.pulledsubset\\n     else:\\n         # Remote is old or publishing all common changesets\\n         # should be seen as public\\n         pheads = pullop.pulledsubset\\n         dheads = []\\n     unfi = pullop.repo.unfiltered()\\n     phase = unfi._phasecache.phase\\n     rev = unfi.changelog.nodemap.get\\n     public = phases.public\\n     draft = phases.draft\\n \\n     # exclude changesets already public locally and update the others\\n     pheads = [pn for pn in pheads if phase(unfi, rev(pn)) \\u003e public]\\n     if pheads:\\n         tr = pullop.gettransaction()\\n         phases.advanceboundary(pullop.repo, tr, public, pheads)\\n \\n     # exclude changesets already draft locally and update the others\\n     dheads = [pn for pn in dheads if phase(unfi, rev(pn)) \\u003e draft]\\n     if dheads:\\n         tr = pullop.gettransaction()\\n         phases.advanceboundary(pullop.repo, tr, draft, dheads)\\n \\n def _pullbookmarks(pullop):\\n     \\\"\\\"\\\"process the remote bookmark information to update the local one\\\"\\\"\\\"\\n     if 'bookmarks' in pullop.stepsdone:\\n         return\\n     pullop.stepsdone.add('bookmarks')\\n     repo = pullop.repo\\n     remotebookmarks = pullop.remotebookmarks\\n     bookmod.updatefromremote(repo.ui, repo, remotebookmarks,\\n                              pullop.remote.url(),\\n                              pullop.gettransaction,\\n                              explicit=pullop.explicitbookmarks)\\n \\n def _pullobsolete(pullop):\\n     \\\"\\\"\\\"utility function to pull obsolete markers from a remote\\n \\n     The `gettransaction` is function that return the pull transaction, creating\\n     one if necessary. We return the transaction to inform the calling code that\\n     a new transaction have been created (when applicable).\\n \\n     Exists mostly to allow overriding for experimentation purpose\\\"\\\"\\\"\\n     if 'obsmarkers' in pullop.stepsdone:\\n         return\\n     pullop.stepsdone.add('obsmarkers')\\n     tr = None\\n     if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):\\n         pullop.repo.ui.debug('fetching remote obsolete markers\\\\n')\\n         remoteobs = listkeys(pullop.remote, 'obsolete')\\n         if 'dump0' in remoteobs:\\n             tr = pullop.gettransaction()\\n             markers = []\\n             for key in sorted(remoteobs, reverse=True):\\n                 if key.startswith('dump'):\\n                     data = util.b85decode(remoteobs[key])\\n                     version, newmarks = obsolete._readmarkers(data)\\n                     markers += newmarks\\n             if markers:\\n                 pullop.repo.obsstore.add(tr, markers)\\n             pullop.repo.invalidatevolatilesets()\\n     return tr\\n \\n def applynarrowacl(repo, kwargs):\\n     \\\"\\\"\\\"Apply narrow fetch access control.\\n \\n     This massages the named arguments for getbundle wire protocol commands\\n     so requested data is filtered through access control rules.\\n     \\\"\\\"\\\"\\n     ui = repo.ui\\n     # TODO this assumes existence of HTTP and is a layering violation.\\n     username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())\\n     user_includes = ui.configlist(\\n         _NARROWACL_SECTION, username + '.includes',\\n         ui.configlist(_NARROWACL_SECTION, 'default.includes'))\\n     user_excludes = ui.configlist(\\n         _NARROWACL_SECTION, username + '.excludes',\\n         ui.configlist(_NARROWACL_SECTION, 'default.excludes'))\\n     if not user_includes:\\n         raise error.Abort(_(\\\"{} configuration for user {} is empty\\\")\\n                           .format(_NARROWACL_SECTION, username))\\n \\n     user_includes = [\\n         'path:.' if p == '*' else 'path:' + p for p in user_includes]\\n     user_excludes = [\\n         'path:.' if p == '*' else 'path:' + p for p in user_excludes]\\n \\n     req_includes = set(kwargs.get(r'includepats', []))\\n     req_excludes = set(kwargs.get(r'excludepats', []))\\n \\n     req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(\\n         req_includes, req_excludes, user_includes, user_excludes)\\n \\n     if invalid_includes:\\n         raise error.Abort(\\n             _(\\\"The following includes are not accessible for {}: {}\\\")\\n             .format(username, invalid_includes))\\n \\n     new_args = {}\\n     new_args.update(kwargs)\\n     new_args[r'narrow'] = True\\n     new_args[r'includepats'] = req_includes\\n     if req_excludes:\\n         new_args[r'excludepats'] = req_excludes\\n \\n     return new_args\\n \\n def _computeellipsis(repo, common, heads, known, match, depth=None):\\n     \\\"\\\"\\\"Compute the shape of a narrowed DAG.\\n \\n     Args:\\n       repo: The repository we're transferring.\\n       common: The roots of the DAG range we're transferring.\\n               May be just [nullid], which means all ancestors of heads.\\n       heads: The heads of the DAG range we're transferring.\\n       match: The narrowmatcher that allows us to identify relevant changes.\\n       depth: If not None, only consider nodes to be full nodes if they are at\\n              most depth changesets away from one of heads.\\n \\n     Returns:\\n       A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:\\n \\n         visitnodes: The list of nodes (either full or ellipsis) which\\n                     need to be sent to the client.\\n         relevant_nodes: The set of changelog nodes which change a file inside\\n                  the narrowspec. The client needs these as non-ellipsis nodes.\\n         ellipsisroots: A dict of {rev: parents} that is used in\\n                        narrowchangegroup to produce ellipsis nodes with the\\n                        correct parents.\\n     \\\"\\\"\\\"\\n     cl = repo.changelog\\n     mfl = repo.manifestlog\\n \\n     clrev = cl.rev\\n \\n     commonrevs = {clrev(n) for n in common} | {nullrev}\\n     headsrevs = {clrev(n) for n in heads}\\n \\n     if depth:\\n         revdepth = {h: 0 for h in headsrevs}\\n \\n     ellipsisheads = collections.defaultdict(set)\\n     ellipsisroots = collections.defaultdict(set)\\n \\n     def addroot(head, curchange):\\n         \\\"\\\"\\\"Add a root to an ellipsis head, splitting heads with 3 roots.\\\"\\\"\\\"\\n         ellipsisroots[head].add(curchange)\\n         # Recursively split ellipsis heads with 3 roots by finding the\\n         # roots' youngest common descendant which is an elided merge commit.\\n         # That descendant takes 2 of the 3 roots as its own, and becomes a\\n         # root of the head.\\n         while len(ellipsisroots[head]) \\u003e 2:\\n             child, roots = splithead(head)\\n             splitroots(head, child, roots)\\n             head = child  # Recurse in case we just added a 3rd root\\n \\n     def splitroots(head, child, roots):\\n         ellipsisroots[head].difference_update(roots)\\n         ellipsisroots[head].add(child)\\n         ellipsisroots[child].update(roots)\\n         ellipsisroots[child].discard(child)\\n \\n     def splithead(head):\\n         r1, r2, r3 = sorted(ellipsisroots[head])\\n         for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):\\n             mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',\\n                             nr1, head, nr2, head)\\n             for j in mid:\\n                 if j == nr2:\\n                     return nr2, (nr1, nr2)\\n                 if j not in ellipsisroots or len(ellipsisroots[j]) \\u003c 2:\\n                     return j, (nr1, nr2)\\n         raise error.Abort(_('Failed to split up ellipsis node! head: %d, '\\n                             'roots: %d %d %d') % (head, r1, r2, r3))\\n \\n     missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))\\n     visit = reversed(missing)\\n     relevant_nodes = set()\\n     visitnodes = [cl.node(m) for m in missing]\\n     required = set(headsrevs) | known\\n     for rev in visit:\\n         clrev = cl.changelogrevision(rev)\\n         ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]\\n         if depth is not None:\\n             curdepth = revdepth[rev]\\n             for p in ps:\\n                 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))\\n         needed = False\\n         shallow_enough = depth is None or revdepth[rev] \\u003c= depth\\n         if shallow_enough:\\n             curmf = mfl[clrev.manifest].read()\\n             if ps:\\n                 # We choose to not trust the changed files list in\\n                 # changesets because it's not always correct. TODO: could\\n                 # we trust it for the non-merge case?\\n                 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()\\n                 needed = bool(curmf.diff(p1mf, match))\\n                 if not needed and len(ps) \\u003e 1:\\n                     # For merge changes, the list of changed files is not\\n                     # helpful, since we need to emit the merge if a file\\n                     # in the narrow spec has changed on either side of the\\n                     # merge. As a result, we do a manifest diff to check.\\n                     p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()\\n                     needed = bool(curmf.diff(p2mf, match))\\n             else:\\n                 # For a root node, we need to include the node if any\\n                 # files in the node match the narrowspec.\\n                 needed = any(curmf.walk(match))\\n \\n         if needed:\\n             for head in ellipsisheads[rev]:\\n                 addroot(head, rev)\\n             for p in ps:\\n                 required.add(p)\\n             relevant_nodes.add(cl.node(rev))\\n         else:\\n             if not ps:\\n                 ps = [nullrev]\\n             if rev in required:\\n                 for head in ellipsisheads[rev]:\\n                     addroot(head, rev)\\n                 for p in ps:\\n                     ellipsisheads[p].add(rev)\\n             else:\\n                 for p in ps:\\n                     ellipsisheads[p] |= ellipsisheads[rev]\\n \\n     # add common changesets as roots of their reachable ellipsis heads\\n     for c in commonrevs:\\n         for head in ellipsisheads[c]:\\n             addroot(head, c)\\n     return visitnodes, relevant_nodes, ellipsisroots\\n \\n def caps20to10(repo, role):\\n     \\\"\\\"\\\"return a set with appropriate options to use bundle20 during getbundle\\\"\\\"\\\"\\n     caps = {'HG20'}\\n     capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))\\n     caps.add('bundle2=' + urlreq.quote(capsblob))\\n     return caps\\n \\n # List of names of steps to perform for a bundle2 for getbundle, order matters.\\n getbundle2partsorder = []\\n \\n # Mapping between step name and function\\n #\\n # This exists to help extensions wrap steps if necessary\\n getbundle2partsmapping = {}\\n \\n def getbundle2partsgenerator(stepname, idx=None):\\n     \\\"\\\"\\\"decorator for function generating bundle2 part for getbundle\\n \\n     The function is added to the step -\\u003e function mapping and appended to the\\n     list of steps.  Beware that decorated functions will be added in order\\n     (this may matter).\\n \\n     You can only use this decorator for new steps, if you want to wrap a step\\n     from an extension, attack the getbundle2partsmapping dictionary directly.\\\"\\\"\\\"\\n     def dec(func):\\n         assert stepname not in getbundle2partsmapping\\n         getbundle2partsmapping[stepname] = func\\n         if idx is None:\\n             getbundle2partsorder.append(stepname)\\n         else:\\n             getbundle2partsorder.insert(idx, stepname)\\n         return func\\n     return dec\\n \\n def bundle2requested(bundlecaps):\\n     if bundlecaps is not None:\\n         return any(cap.startswith('HG2') for cap in bundlecaps)\\n     return False\\n \\n def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,\\n                     **kwargs):\\n     \\\"\\\"\\\"Return chunks constituting a bundle's raw data.\\n \\n     Could be a bundle HG10 or a bundle HG20 depending on bundlecaps\\n     passed.\\n \\n     Returns a 2-tuple of a dict with metadata about the generated bundle\\n     and an iterator over raw chunks (of varying sizes).\\n     \\\"\\\"\\\"\\n     kwargs = pycompat.byteskwargs(kwargs)\\n     info = {}\\n     usebundle2 = bundle2requested(bundlecaps)\\n     # bundle10 case\\n     if not usebundle2:\\n         if bundlecaps and not kwargs.get('cg', True):\\n             raise ValueError(_('request for bundle10 must include changegroup'))\\n \\n         if kwargs:\\n             raise ValueError(_('unsupported getbundle arguments: %s')\\n                              % ', '.join(sorted(kwargs.keys())))\\n         outgoing = _computeoutgoing(repo, heads, common)\\n         info['bundleversion'] = 1\\n         return info, changegroup.makestream(repo, outgoing, '01', source,\\n                                             bundlecaps=bundlecaps)\\n \\n     # bundle20 case\\n     info['bundleversion'] = 2\\n     b2caps = {}\\n     for bcaps in bundlecaps:\\n         if bcaps.startswith('bundle2='):\\n             blob = urlreq.unquote(bcaps[len('bundle2='):])\\n             b2caps.update(bundle2.decodecaps(blob))\\n     bundler = bundle2.bundle20(repo.ui, b2caps)\\n \\n     kwargs['heads'] = heads\\n     kwargs['common'] = common\\n \\n     for name in getbundle2partsorder:\\n         func = getbundle2partsmapping[name]\\n         func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,\\n              **pycompat.strkwargs(kwargs))\\n \\n     info['prefercompressed'] = bundler.prefercompressed\\n \\n     return info, bundler.getchunks()\\n \\n @getbundle2partsgenerator('stream2')\\n def _getbundlestream2(bundler, repo, *args, **kwargs):\\n     return bundle2.addpartbundlestream2(bundler, repo, **kwargs)\\n \\n @getbundle2partsgenerator('changegroup')\\n def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,\\n                               b2caps=None, heads=None, common=None, **kwargs):\\n     \\\"\\\"\\\"add a changegroup part to the requested bundle\\\"\\\"\\\"\\n     if not kwargs.get(r'cg', True):\\n         return\\n \\n     version = '01'\\n     cgversions = b2caps.get('changegroup')\\n     if cgversions:  # 3.1 and 3.2 ship with an empty value\\n         cgversions = [v for v in cgversions\\n                       if v in changegroup.supportedoutgoingversions(repo)]\\n         if not cgversions:\\n             raise ValueError(_('no common changegroup version'))\\n         version = max(cgversions)\\n \\n     outgoing = _computeoutgoing(repo, heads, common)\\n     if not outgoing.missing:\\n         return\\n \\n     if kwargs.get(r'narrow', False):\\n         include = sorted(filter(bool, kwargs.get(r'includepats', [])))\\n         exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))\\n         filematcher = narrowspec.match(repo.root, include=include,\\n                                        exclude=exclude)\\n     else:\\n         filematcher = None\\n \\n     cgstream = changegroup.makestream(repo, outgoing, version, source,\\n                                       bundlecaps=bundlecaps,\\n                                       filematcher=filematcher)\\n \\n     part = bundler.newpart('changegroup', data=cgstream)\\n     if cgversions:\\n         part.addparam('version', version)\\n \\n     part.addparam('nbchanges', '%d' % len(outgoing.missing),\\n                   mandatory=False)\\n \\n     if 'treemanifest' in repo.requirements:\\n         part.addparam('treemanifest', '1')\\n \\n     if kwargs.get(r'narrow', False) and (include or exclude):\\n         narrowspecpart = bundler.newpart('narrow:spec')\\n         if include:\\n             narrowspecpart.addparam(\\n                 'include', '\\\\n'.join(include), mandatory=True)\\n         if exclude:\\n             narrowspecpart.addparam(\\n                 'exclude', '\\\\n'.join(exclude), mandatory=True)\\n \\n @getbundle2partsgenerator('bookmarks')\\n def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,\\n                               b2caps=None, **kwargs):\\n     \\\"\\\"\\\"add a bookmark part to the requested bundle\\\"\\\"\\\"\\n     if not kwargs.get(r'bookmarks', False):\\n         return\\n     if 'bookmarks' not in b2caps:\\n         raise ValueError(_('no common bookmarks exchange method'))\\n     books  = bookmod.listbinbookmarks(repo)\\n     data = bookmod.binaryencode(books)\\n     if data:\\n         bundler.newpart('bookmarks', data=data)\\n \\n @getbundle2partsgenerator('listkeys')\\n def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,\\n                             b2caps=None, **kwargs):\\n     \\\"\\\"\\\"add parts containing listkeys namespaces to the requested bundle\\\"\\\"\\\"\\n     listkeys = kwargs.get(r'listkeys', ())\\n     for namespace in listkeys:\\n         part = bundler.newpart('listkeys')\\n         part.addparam('namespace', namespace)\\n         keys = repo.listkeys(namespace).items()\\n         part.data = pushkey.encodekeys(keys)\\n \\n @getbundle2partsgenerator('obsmarkers')\\n def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,\\n                             b2caps=None, heads=None, **kwargs):\\n     \\\"\\\"\\\"add an obsolescence markers part to the requested bundle\\\"\\\"\\\"\\n     if kwargs.get(r'obsmarkers', False):\\n         if heads is None:\\n             heads = repo.heads()\\n         subset = [c.node() for c in repo.set('::%ln', heads)]\\n         markers = repo.obsstore.relevantmarkers(subset)\\n         markers = sorted(markers)\\n         bundle2.buildobsmarkerspart(bundler, markers)\\n \\n @getbundle2partsgenerator('phases')\\n def _getbundlephasespart(bundler, repo, source, bundlecaps=None,\\n                             b2caps=None, heads=None, **kwargs):\\n     \\\"\\\"\\\"add phase heads part to the requested bundle\\\"\\\"\\\"\\n     if kwargs.get(r'phases', False):\\n         if not 'heads' in b2caps.get('phases'):\\n             raise ValueError(_('no common phases exchange method'))\\n         if heads is None:\\n             heads = repo.heads()\\n \\n         headsbyphase = collections.defaultdict(set)\\n         if repo.publishing():\\n             headsbyphase[phases.public] = heads\\n         else:\\n             # find the appropriate heads to move\\n \\n             phase = repo._phasecache.phase\\n             node = repo.changelog.node\\n             rev = repo.changelog.rev\\n             for h in heads:\\n                 headsbyphase[phase(repo, rev(h))].add(h)\\n             seenphases = list(headsbyphase.keys())\\n \\n             # We do not handle anything but public and draft phase for now)\\n             if seenphases:\\n                 assert max(seenphases) \\u003c= phases.draft\\n \\n             # if client is pulling non-public changesets, we need to find\\n             # intermediate public heads.\\n             draftheads = headsbyphase.get(phases.draft, set())\\n             if draftheads:\\n                 publicheads = headsbyphase.get(phases.public, set())\\n \\n                 revset = 'heads(only(%ln, %ln) and public())'\\n                 extraheads = repo.revs(revset, draftheads, publicheads)\\n                 for r in extraheads:\\n                     headsbyphase[phases.public].add(node(r))\\n \\n         # transform data in a format used by the encoding function\\n         phasemapping = []\\n         for phase in phases.allphases:\\n             phasemapping.append(sorted(headsbyphase[phase]))\\n \\n         # generate the actual part\\n         phasedata = phases.binaryencode(phasemapping)\\n         bundler.newpart('phase-heads', data=phasedata)\\n \\n @getbundle2partsgenerator('hgtagsfnodes')\\n def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,\\n                          b2caps=None, heads=None, common=None,\\n                          **kwargs):\\n     \\\"\\\"\\\"Transfer the .hgtags filenodes mapping.\\n \\n     Only values for heads in this bundle will be transferred.\\n \\n     The part data consists of pairs of 20 byte changeset node and .hgtags\\n     filenodes raw values.\\n     \\\"\\\"\\\"\\n     # Don't send unless:\\n     # - changeset are being exchanged,\\n     # - the client supports it.\\n     if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):\\n         return\\n \\n     outgoing = _computeoutgoing(repo, heads, common)\\n     bundle2.addparttagsfnodescache(repo, bundler, outgoing)\\n \\n @getbundle2partsgenerator('cache:rev-branch-cache')\\n def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,\\n                              b2caps=None, heads=None, common=None,\\n                              **kwargs):\\n     \\\"\\\"\\\"Transfer the rev-branch-cache mapping\\n \\n     The payload is a series of data related to each branch\\n \\n     1) branch name length\\n     2) number of open heads\\n     3) number of closed heads\\n     4) open heads nodes\\n     5) closed heads nodes\\n     \\\"\\\"\\\"\\n     # Don't send unless:\\n     # - changeset are being exchanged,\\n     # - the client supports it.\\n     # - narrow bundle isn't in play (not currently compatible).\\n     if (not kwargs.get(r'cg', True)\\n         or 'rev-branch-cache' not in b2caps\\n         or kwargs.get(r'narrow', False)\\n         or repo.ui.has_section(_NARROWACL_SECTION)):\\n         return\\n \\n     outgoing = _computeoutgoing(repo, heads, common)\\n     bundle2.addpartrevbranchcache(repo, bundler, outgoing)\\n \\n def check_heads(repo, their_heads, context):\\n     \\\"\\\"\\\"check if the heads of a repo have been modified\\n \\n     Used by peer for unbundling.\\n     \\\"\\\"\\\"\\n     heads = repo.heads()\\n     heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()\\n     if not (their_heads == ['force'] or their_heads == heads or\\n             their_heads == ['hashed', heads_hash]):\\n         # someone else committed\\/pushed\\/unbundled while we\\n         # were transferring data\\n         raise error.PushRaced('repository changed while %s - '\\n                               'please try again' % context)\\n \\n def unbundle(repo, cg, heads, source, url):\\n     \\\"\\\"\\\"Apply a bundle to a repo.\\n \\n     this function makes sure the repo is locked during the application and have\\n     mechanism to check that no push race occurred between the creation of the\\n     bundle and its application.\\n \\n     If the push was raced as PushRaced exception is raised.\\\"\\\"\\\"\\n     r = 0\\n     # need a transaction when processing a bundle2 stream\\n     # [wlock, lock, tr] - needs to be an array so nested functions can modify it\\n     lockandtr = [None, None, None]\\n     recordout = None\\n     # quick fix for output mismatch with bundle2 in 3.4\\n     captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')\\n     if url.startswith('remote:http:') or url.startswith('remote:https:'):\\n         captureoutput = True\\n     try:\\n         # note: outside bundle1, 'heads' is expected to be empty and this\\n         # 'check_heads' call wil be a no-op\\n         check_heads(repo, heads, 'uploading changes')\\n         # push can proceed\\n         if not isinstance(cg, bundle2.unbundle20):\\n             # legacy case: bundle1 (changegroup 01)\\n             txnname = \\\"\\\\n\\\".join([source, util.hidepassword(url)])\\n             with repo.lock(), repo.transaction(txnname) as tr:\\n                 op = bundle2.applybundle(repo, cg, tr, source, url)\\n                 r = bundle2.combinechangegroupresults(op)\\n         else:\\n             r = None\\n             try:\\n                 def gettransaction():\\n                     if not lockandtr[2]:\\n                         lockandtr[0] = repo.wlock()\\n                         lockandtr[1] = repo.lock()\\n                         lockandtr[2] = repo.transaction(source)\\n                         lockandtr[2].hookargs['source'] = source\\n                         lockandtr[2].hookargs['url'] = url\\n                         lockandtr[2].hookargs['bundle2'] = '1'\\n                     return lockandtr[2]\\n \\n                 # Do greedy locking by default until we're satisfied with lazy\\n                 # locking.\\n                 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):\\n                     gettransaction()\\n \\n                 op = bundle2.bundleoperation(repo, gettransaction,\\n                                              captureoutput=captureoutput,\\n                                              source='push')\\n                 try:\\n                     op = bundle2.processbundle(repo, cg, op=op)\\n                 finally:\\n                     r = op.reply\\n                     if captureoutput and r is not None:\\n                         repo.ui.pushbuffer(error=True, subproc=True)\\n                         def recordout(output):\\n                             r.newpart('output', data=output, mandatory=False)\\n                 if lockandtr[2] is not None:\\n                     lockandtr[2].close()\\n             except BaseException as exc:\\n                 exc.duringunbundle2 = True\\n                 if captureoutput and r is not None:\\n                     parts = exc._bundle2salvagedoutput = r.salvageoutput()\\n                     def recordout(output):\\n                         part = bundle2.bundlepart('output', data=output,\\n                                                   mandatory=False)\\n                         parts.append(part)\\n                 raise\\n     finally:\\n         lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])\\n         if recordout is not None:\\n             recordout(repo.ui.popbuffer())\\n     return r\\n \\n def _maybeapplyclonebundle(pullop):\\n     \\\"\\\"\\\"Apply a clone bundle from a remote, if possible.\\\"\\\"\\\"\\n \\n     repo = pullop.repo\\n     remote = pullop.remote\\n \\n     if not repo.ui.configbool('ui', 'clonebundles'):\\n         return\\n \\n     # Only run if local repo is empty.\\n     if len(repo):\\n         return\\n \\n     if pullop.heads:\\n         return\\n \\n     if not remote.capable('clonebundles'):\\n         return\\n \\n     with remote.commandexecutor() as e:\\n         res = e.callcommand('clonebundles', {}).result()\\n \\n     # If we call the wire protocol command, that's good enough to record the\\n     # attempt.\\n     pullop.clonebundleattempted = True\\n \\n     entries = parseclonebundlesmanifest(repo, res)\\n     if not entries:\\n         repo.ui.note(_('no clone bundles available on remote; '\\n                        'falling back to regular clone\\\\n'))\\n         return\\n \\n     entries = filterclonebundleentries(\\n         repo, entries, streamclonerequested=pullop.streamclonerequested)\\n \\n     if not entries:\\n         # There is a thundering herd concern here. However, if a server\\n         # operator doesn't advertise bundles appropriate for its clients,\\n         # they deserve what's coming. Furthermore, from a client's\\n         # perspective, no automatic fallback would mean not being able to\\n         # clone!\\n         repo.ui.warn(_('no compatible clone bundles available on server; '\\n                        'falling back to regular clone\\\\n'))\\n         repo.ui.warn(_('(you may want to report this to the server '\\n                        'operator)\\\\n'))\\n         return\\n \\n     entries = sortclonebundleentries(repo.ui, entries)\\n \\n     url = entries[0]['URL']\\n     repo.ui.status(_('applying clone bundle from %s\\\\n') % url)\\n     if trypullbundlefromurl(repo.ui, repo, url):\\n         repo.ui.status(_('finished applying clone bundle\\\\n'))\\n     # Bundle failed.\\n     #\\n     # We abort by default to avoid the thundering herd of\\n     # clients flooding a server that was expecting expensive\\n     # clone load to be offloaded.\\n     elif repo.ui.configbool('ui', 'clonebundlefallback'):\\n         repo.ui.warn(_('falling back to normal clone\\\\n'))\\n     else:\\n         raise error.Abort(_('error applying bundle'),\\n                           hint=_('if this error persists, consider contacting '\\n                                  'the server operator or disable clone '\\n                                  'bundles via '\\n                                  '\\\"--config ui.clonebundles=false\\\"'))\\n \\n def parseclonebundlesmanifest(repo, s):\\n     \\\"\\\"\\\"Parses the raw text of a clone bundles manifest.\\n \\n     Returns a list of dicts. The dicts have a ``URL`` key corresponding\\n     to the URL and other keys are the attributes for the entry.\\n     \\\"\\\"\\\"\\n     m = []\\n     for line in s.splitlines():\\n         fields = line.split()\\n         if not fields:\\n             continue\\n         attrs = {'URL': fields[0]}\\n         for rawattr in fields[1:]:\\n             key, value = rawattr.split('=', 1)\\n             key = urlreq.unquote(key)\\n             value = urlreq.unquote(value)\\n             attrs[key] = value\\n \\n             # Parse BUNDLESPEC into components. This makes client-side\\n             # preferences easier to specify since you can prefer a single\\n             # component of the BUNDLESPEC.\\n             if key == 'BUNDLESPEC':\\n                 try:\\n                     bundlespec = parsebundlespec(repo, value)\\n                     attrs['COMPRESSION'] = bundlespec.compression\\n                     attrs['VERSION'] = bundlespec.version\\n                 except error.InvalidBundleSpecification:\\n                     pass\\n                 except error.UnsupportedBundleSpecification:\\n                     pass\\n \\n         m.append(attrs)\\n \\n     return m\\n \\n def isstreamclonespec(bundlespec):\\n     # Stream clone v1\\n     if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):\\n         return True\\n \\n     # Stream clone v2\\n     if (bundlespec.wirecompression == 'UN' and \\\\\\n         bundlespec.wireversion == '02' and \\\\\\n         bundlespec.contentopts.get('streamv2')):\\n         return True\\n \\n     return False\\n \\n def filterclonebundleentries(repo, entries, streamclonerequested=False):\\n     \\\"\\\"\\\"Remove incompatible clone bundle manifest entries.\\n \\n     Accepts a list of entries parsed with ``parseclonebundlesmanifest``\\n     and returns a new list consisting of only the entries that this client\\n     should be able to apply.\\n \\n     There is no guarantee we'll be able to apply all returned entries because\\n     the metadata we use to filter on may be missing or wrong.\\n     \\\"\\\"\\\"\\n     newentries = []\\n     for entry in entries:\\n         spec = entry.get('BUNDLESPEC')\\n         if spec:\\n             try:\\n                 bundlespec = parsebundlespec(repo, spec, strict=True)\\n \\n                 # If a stream clone was requested, filter out non-streamclone\\n                 # entries.\\n                 if streamclonerequested and not isstreamclonespec(bundlespec):\\n                     repo.ui.debug('filtering %s because not a stream clone\\\\n' %\\n                                   entry['URL'])\\n                     continue\\n \\n             except error.InvalidBundleSpecification as e:\\n                 repo.ui.debug(stringutil.forcebytestr(e) + '\\\\n')\\n                 continue\\n             except error.UnsupportedBundleSpecification as e:\\n                 repo.ui.debug('filtering %s because unsupported bundle '\\n                               'spec: %s\\\\n' % (\\n                                   entry['URL'], stringutil.forcebytestr(e)))\\n                 continue\\n         # If we don't have a spec and requested a stream clone, we don't know\\n         # what the entry is so don't attempt to apply it.\\n         elif streamclonerequested:\\n             repo.ui.debug('filtering %s because cannot determine if a stream '\\n                           'clone bundle\\\\n' % entry['URL'])\\n             continue\\n \\n         if 'REQUIRESNI' in entry and not sslutil.hassni:\\n             repo.ui.debug('filtering %s because SNI not supported\\\\n' %\\n                           entry['URL'])\\n             continue\\n \\n         newentries.append(entry)\\n \\n     return newentries\\n \\n class clonebundleentry(object):\\n     \\\"\\\"\\\"Represents an item in a clone bundles manifest.\\n \\n     This rich class is needed to support sorting since sorted() in Python 3\\n     doesn't support ``cmp`` and our comparison is complex enough that ``key=``\\n     won't work.\\n     \\\"\\\"\\\"\\n \\n     def __init__(self, value, prefers):\\n         self.value = value\\n         self.prefers = prefers\\n \\n     def _cmp(self, other):\\n         for prefkey, prefvalue in self.prefers:\\n             avalue = self.value.get(prefkey)\\n             bvalue = other.value.get(prefkey)\\n \\n             # Special case for b missing attribute and a matches exactly.\\n             if avalue is not None and bvalue is None and avalue == prefvalue:\\n                 return -1\\n \\n             # Special case for a missing attribute and b matches exactly.\\n             if bvalue is not None and avalue is None and bvalue == prefvalue:\\n                 return 1\\n \\n             # We can't compare unless attribute present on both.\\n             if avalue is None or bvalue is None:\\n                 continue\\n \\n             # Same values should fall back to next attribute.\\n             if avalue == bvalue:\\n                 continue\\n \\n             # Exact matches come first.\\n             if avalue == prefvalue:\\n                 return -1\\n             if bvalue == prefvalue:\\n                 return 1\\n \\n             # Fall back to next attribute.\\n             continue\\n \\n         # If we got here we couldn't sort by attributes and prefers. Fall\\n         # back to index order.\\n         return 0\\n \\n     def __lt__(self, other):\\n         return self._cmp(other) \\u003c 0\\n \\n     def __gt__(self, other):\\n         return self._cmp(other) \\u003e 0\\n \\n     def __eq__(self, other):\\n         return self._cmp(other) == 0\\n \\n     def __le__(self, other):\\n         return self._cmp(other) \\u003c= 0\\n \\n     def __ge__(self, other):\\n         return self._cmp(other) \\u003e= 0\\n \\n     def __ne__(self, other):\\n         return self._cmp(other) != 0\\n \\n def sortclonebundleentries(ui, entries):\\n     prefers = ui.configlist('ui', 'clonebundleprefers')\\n     if not prefers:\\n         return list(entries)\\n \\n     prefers = [p.split('=', 1) for p in prefers]\\n \\n     items = sorted(clonebundleentry(v, prefers) for v in entries)\\n     return [i.value for i in items]\\n \\n def trypullbundlefromurl(ui, repo, url):\\n     \\\"\\\"\\\"Attempt to apply a bundle from a URL.\\\"\\\"\\\"\\n     with repo.lock(), repo.transaction('bundleurl') as tr:\\n         try:\\n             fh = urlmod.open(ui, url)\\n             cg = readbundle(ui, fh, 'stream')\\n \\n             if isinstance(cg, streamclone.streamcloneapplier):\\n                 cg.apply(repo)\\n             else:\\n                 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)\\n             return True\\n         except urlerr.httperror as e:\\n             ui.warn(_('HTTP error fetching bundle: %s\\\\n') %\\n                     stringutil.forcebytestr(e))\\n         except urlerr.urlerror as e:\\n             ui.warn(_('error fetching bundle: %s\\\\n') %\\n                     stringutil.forcebytestr(e.reason))\\n \\n         return False\\n\"}]}],\"properties\":[]}},\"error_code\":null,\"error_info\":null}"
-                }
-            }, 
-            "request": {
-                "headers": {
-                    "content-length": [
-                        "59"
-                    ], 
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ], 
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ], 
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 4.7.1+861-aa7e312375cf)"
-                    ], 
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ]
-                }, 
-                "uri": "https://phab.mercurial-scm.org//api/differential.querydiffs", 
-                "method": "POST", 
-                "body": "ids%5B0%5D=11058&api.token=cli-hahayouwish"
-            }
-        }, 
-        {
-            "response": {
-                "headers": {
-                    "cache-control": [
-                        "no-store"
-                    ], 
-                    "set-cookie": [
-                        "phsid=A%2Fsh6hsdu5dzfurm5gsiy2cmi6kqw33cqikoawcqz2; expires=Thu, 14-Sep-2023 04:15:58 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
-                    ], 
-                    "server": [
-                        "Apache/2.4.10 (Debian)"
-                    ], 
-                    "content-type": [
-                        "application/json"
-                    ], 
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ], 
-                    "x-content-type-options": [
-                        "nosniff"
-                    ], 
-                    "date": [
-                        "Sat, 15 Sep 2018 04:15:58 GMT"
-                    ], 
-                    "strict-transport-security": [
-                        "max-age=0; includeSubdomains; preload"
-                    ], 
-                    "x-frame-options": [
-                        "Deny"
-                    ], 
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                }, 
-                "status": {
-                    "code": 200, 
-                    "message": "OK"
-                }, 
-                "body": {
-                    "string": "{\"result\":\"diff --git a\\/tests\\/wireprotohelpers.sh b\\/tests\\/wireprotohelpers.sh\\n--- a\\/tests\\/wireprotohelpers.sh\\n+++ b\\/tests\\/wireprotohelpers.sh\\n@@ -56,3 +56,10 @@\\n web.api.http-v2 = true\\n EOF\\n }\\n+\\n+enablehttpv2client() {\\n+  cat \\u003e\\u003e $HGRCPATH \\u003c\\u003c EOF\\n+[experimental]\\n+httppeer.advertise-v2 = true\\n+EOF\\n+}\\ndiff --git a\\/tests\\/test-wireproto-exchangev2.t b\\/tests\\/test-wireproto-exchangev2.t\\nnew file mode 100644\\n--- \\/dev\\/null\\n+++ b\\/tests\\/test-wireproto-exchangev2.t\\n@@ -0,0 +1,53 @@\\n+Tests for wire protocol version 2 exchange.\\n+Tests in this file should be folded into existing tests once protocol\\n+v2 has enough features that it can be enabled via #testcase in existing\\n+tests.\\n+\\n+  $ . $TESTDIR\\/wireprotohelpers.sh\\n+  $ enablehttpv2client\\n+\\n+  $ hg init server-simple\\n+  $ enablehttpv2 server-simple\\n+  $ cd server-simple\\n+  $ cat \\u003e\\u003e .hg\\/hgrc \\u003c\\u003c EOF\\n+  \\u003e [phases]\\n+  \\u003e publish = false\\n+  \\u003e EOF\\n+  $ echo a0 \\u003e a\\n+  $ echo b0 \\u003e b\\n+  $ hg -q commit -A -m 'commit 0'\\n+\\n+  $ echo a1 \\u003e a\\n+  $ hg commit -m 'commit 1'\\n+  $ hg phase --public -r .\\n+  $ echo a2 \\u003e a\\n+  $ hg commit -m 'commit 2'\\n+\\n+  $ hg -q up -r 0\\n+  $ echo b1 \\u003e b\\n+  $ hg -q commit -m 'head 2 commit 1'\\n+  $ echo b2 \\u003e b\\n+  $ hg -q commit -m 'head 2 commit 2'\\n+\\n+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log\\n+  $ cat hg.pid \\u003e $DAEMON_PIDS\\n+\\n+  $ cd ..\\n+\\n+Test basic clone\\n+\\n+  $ hg --debug clone -U http:\\/\\/localhost:$HGPORT client-simple\\n+  using http:\\/\\/localhost:$HGPORT\\/\\n+  sending capabilities command\\n+  query 1; heads\\n+  sending 2 commands\\n+  sending command heads: {}\\n+  sending command known: {\\n+    'nodes': []\\n+  }\\n+  received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)\\n+  received frame(size=43; request=1; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)\\n+  received frame(size=11; request=3; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=1; request=3; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)\\ndiff --git a\\/mercurial\\/httppeer.py b\\/mercurial\\/httppeer.py\\n--- a\\/mercurial\\/httppeer.py\\n+++ b\\/mercurial\\/httppeer.py\\n@@ -802,7 +802,8 @@\\n             return True\\n \\n         # Other concepts.\\n-        if name in ('bundle2',):\\n+        # TODO remove exchangev2 once we have a command implemented.\\n+        if name in ('bundle2', 'exchangev2'):\\n             return True\\n \\n         # Alias command-* to presence of command of that name.\\ndiff --git a\\/mercurial\\/exchangev2.py b\\/mercurial\\/exchangev2.py\\nnew file mode 100644\\n--- \\/dev\\/null\\n+++ b\\/mercurial\\/exchangev2.py\\n@@ -0,0 +1,55 @@\\n+# exchangev2.py - repository exchange for wire protocol version 2\\n+#\\n+# Copyright 2018 Gregory Szorc \\u003cgregory.szorc@gmail.com\\u003e\\n+#\\n+# This software may be used and distributed according to the terms of the\\n+# GNU General Public License version 2 or any later version.\\n+\\n+from __future__ import absolute_import\\n+\\n+from .node import (\\n+    nullid,\\n+)\\n+from . import (\\n+    setdiscovery,\\n+)\\n+\\n+def pull(pullop):\\n+    \\\"\\\"\\\"Pull using wire protocol version 2.\\\"\\\"\\\"\\n+    repo = pullop.repo\\n+    remote = pullop.remote\\n+\\n+    # Figure out what needs to be fetched.\\n+    common, fetch, remoteheads = _pullchangesetdiscovery(\\n+        repo, remote, pullop.heads, abortwhenunrelated=pullop.force)\\n+\\n+def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):\\n+    \\\"\\\"\\\"Determine which changesets need to be pulled.\\\"\\\"\\\"\\n+\\n+    if heads:\\n+        knownnode = repo.changelog.hasnode\\n+        if all(knownnode(head) for head in heads):\\n+            return heads, False, heads\\n+\\n+    # TODO wire protocol version 2 is capable of more efficient discovery\\n+    # than setdiscovery. Consider implementing something better.\\n+    common, fetch, remoteheads = setdiscovery.findcommonheads(\\n+        repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated)\\n+\\n+    common = set(common)\\n+    remoteheads = set(remoteheads)\\n+\\n+    # If a remote head is filtered locally, put it back in the common set.\\n+    # See the comment in exchange._pulldiscoverychangegroup() for more.\\n+\\n+    if fetch and remoteheads:\\n+        nodemap = repo.unfiltered().changelog.nodemap\\n+\\n+        common |= {head for head in remoteheads if head in nodemap}\\n+\\n+        if set(remoteheads).issubset(common):\\n+            fetch = []\\n+\\n+    common.discard(nullid)\\n+\\n+    return common, fetch, remoteheads\\ndiff --git a\\/mercurial\\/exchange.py b\\/mercurial\\/exchange.py\\n--- a\\/mercurial\\/exchange.py\\n+++ b\\/mercurial\\/exchange.py\\n@@ -26,6 +26,7 @@\\n     changegroup,\\n     discovery,\\n     error,\\n+    exchangev2,\\n     lock as lockmod,\\n     logexchange,\\n     narrowspec,\\n@@ -1506,17 +1507,21 @@\\n \\n     pullop.trmanager = transactionmanager(repo, 'pull', remote.url())\\n     with repo.wlock(), repo.lock(), pullop.trmanager:\\n-        # This should ideally be in _pullbundle2(). However, it needs to run\\n-        # before discovery to avoid extra work.\\n-        _maybeapplyclonebundle(pullop)\\n-        streamclone.maybeperformlegacystreamclone(pullop)\\n-        _pulldiscovery(pullop)\\n-        if pullop.canusebundle2:\\n-            _fullpullbundle2(repo, pullop)\\n-        _pullchangeset(pullop)\\n-        _pullphase(pullop)\\n-        _pullbookmarks(pullop)\\n-        _pullobsolete(pullop)\\n+        # Use the modern wire protocol, if available.\\n+        if remote.capable('exchangev2'):\\n+            exchangev2.pull(pullop)\\n+        else:\\n+            # This should ideally be in _pullbundle2(). However, it needs to run\\n+            # before discovery to avoid extra work.\\n+            _maybeapplyclonebundle(pullop)\\n+            streamclone.maybeperformlegacystreamclone(pullop)\\n+            _pulldiscovery(pullop)\\n+            if pullop.canusebundle2:\\n+                _fullpullbundle2(repo, pullop)\\n+            _pullchangeset(pullop)\\n+            _pullphase(pullop)\\n+            _pullbookmarks(pullop)\\n+            _pullobsolete(pullop)\\n \\n     # storing remotenames\\n     if repo.ui.configbool('experimental', 'remotenames'):\\n\\n\",\"error_code\":null,\"error_info\":null}"
-                }
-            }, 
-            "request": {
-                "headers": {
-                    "content-length": [
-                        "55"
-                    ], 
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ], 
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ], 
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 4.7.1+861-aa7e312375cf)"
-                    ], 
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ]
-                }, 
-                "uri": "https://phab.mercurial-scm.org//api/differential.getrawdiff", 
-                "method": "POST", 
-                "body": "diffID=11058&api.token=cli-hahayouwish"
-            }
-        }
-    ]
-}
--- a/tests/phabricator/phabread-conduit-error.json	Wed Oct 02 12:20:36 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,70 +0,0 @@
-{
-    "interactions": [
-        {
-            "response": {
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":\"ERR-INVALID-AUTH\",\"error_info\":\"API token \\\"cli-notavalidtoken\\\" has the wrong length. API tokens should be 32 characters long.\"}"
-                }, 
-                "headers": {
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ], 
-                    "x-content-type-options": [
-                        "nosniff"
-                    ], 
-                    "set-cookie": [
-                        "phsid=A%2F6jvmizfvgaa6bkls264secsim5nlgid4vj55jpe6; expires=Thu, 14-Sep-2023 04:38:21 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
-                    ], 
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ], 
-                    "content-type": [
-                        "application/json"
-                    ], 
-                    "cache-control": [
-                        "no-store"
-                    ], 
-                    "date": [
-                        "Sat, 15 Sep 2018 04:38:21 GMT"
-                    ], 
-                    "strict-transport-security": [
-                        "max-age=0; includeSubdomains; preload"
-                    ], 
-                    "server": [
-                        "Apache/2.4.10 (Debian)"
-                    ], 
-                    "x-frame-options": [
-                        "Deny"
-                    ]
-                }, 
-                "status": {
-                    "message": "OK", 
-                    "code": 200
-                }
-            }, 
-            "request": {
-                "body": "api.token=cli-notavalidtoken&ids%5B0%5D=4480", 
-                "headers": {
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ], 
-                    "content-length": [
-                        "44"
-                    ], 
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 4.7.1+861-aa7e312375cf)"
-                    ], 
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ], 
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                }, 
-                "method": "POST", 
-                "uri": "https://phab.mercurial-scm.org//api/differential.query"
-            }
-        }
-    ], 
-    "version": 1
-}
\ No newline at end of file
--- a/tests/phabricator/phabread-str-time.json	Wed Oct 02 12:20:36 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,221 +0,0 @@
-{
-    "version": 1,
-    "interactions": [
-        {
-            "request": {
-                "body": "api.token=cli-hahayouwish&ids%5B0%5D=1285",
-                "headers": {
-                    "content-length": [
-                        "58"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 4.8.2)"
-                    ]
-                },
-                "method": "POST",
-                "uri": "https://phab.mercurial-scm.org//api/differential.query"
-            },
-            "response": {
-                "status": {
-                    "message": "OK",
-                    "code": 200
-                },
-                "headers": {
-                    "content-length": [
-                        "822"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "date": [
-                        "Mon, 01 Jul 2019 22:36:40 GMT"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":[{\"id\":\"1285\",\"phid\":\"PHID-DREV-uefuzc6kbhhkoqhr347g\",\"title\":\"test string time\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D1285\",\"dateCreated\":\"1562019861\",\"dateModified\":\"1562019862\",\"authorPHID\":\"PHID-USER-qmzis76vb2yh3ogldu6r\",\"status\":\"0\",\"statusName\":\"Draft\",\"properties\":{\"draft.broadcast\":false,\"lines.added\":1,\"lines.removed\":0,\"buildables\":{\"PHID-HMBB-lhjiovrsqtbft2fz4lua\":{\"status\":\"passed\"}}},\"branch\":null,\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"1\",\"activeDiffPHID\":\"PHID-DIFF-dkgwbpgcv37pymqieyyv\",\"diffs\":[\"2069\"],\"commits\":[],\"reviewers\":[],\"ccs\":[],\"hashes\":[],\"auxiliary\":{\"bugzilla.bug-id\":null,\"phabricator:projects\":[\"PHID-PROJ-f2a3wl5wxtqdtfgdjqzk\"],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":null}],\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "body": "api.token=cli-hahayouwish&ids%5B0%5D=2069",
-                "headers": {
-                    "content-length": [
-                        "58"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 4.8.2)"
-                    ]
-                },
-                "method": "POST",
-                "uri": "https://phab.mercurial-scm.org//api/differential.querydiffs"
-            },
-            "response": {
-                "status": {
-                    "message": "OK",
-                    "code": 200
-                },
-                "headers": {
-                    "content-length": [
-                        "1137"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "date": [
-                        "Mon, 01 Jul 2019 22:36:41 GMT"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"2069\":{\"id\":\"2069\",\"revisionID\":\"1285\",\"dateCreated\":\"1562019858\",\"dateModified\":\"1562019861\",\"sourceControlBaseRevision\":null,\"sourceControlPath\":null,\"sourceControlSystem\":null,\"branch\":null,\"bookmark\":null,\"creationMethod\":\"web\",\"description\":null,\"unitStatus\":\"4\",\"lintStatus\":\"4\",\"changes\":[{\"id\":\"5416\",\"metadata\":{\"line:first\":1,\"hash.effect\":\"ei3Zy6KS2Wut\"},\"oldPath\":null,\"currentPath\":\"test\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":{\"unix:filemode\":\"100644\"},\"type\":\"1\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"1\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"0\",\"newOffset\":\"1\",\"oldLength\":\"0\",\"newLength\":\"1\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\"+test\\n\"}]}],\"properties\":{\"local:commits\":{\"da5c8c6bf23a36b6e3af011bc3734460692c23ce\":{\"author\":\"test\",\"authorEmail\":\"test\",\"branch\":\"default\",\"commit\":\"da5c8c6bf23a36b6e3af011bc3734460692c23ce\",\"rev\":\"da5c8c6bf23a36b6e3af011bc3734460692c23ce\",\"parents\":[\"1f634396406d03e565ed645370e5fecd062cf215\"],\"time\":\"1562019844\"}}},\"authorName\":\"test\",\"authorEmail\":\"test\"}},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "body": "diffID=2069&api.token=cli-hahayouwish",
-                "headers": {
-                    "content-length": [
-                        "54"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 4.8.2)"
-                    ]
-                },
-                "method": "POST",
-                "uri": "https://phab.mercurial-scm.org//api/differential.getrawdiff"
-            },
-            "response": {
-                "status": {
-                    "message": "OK",
-                    "code": 200
-                },
-                "headers": {
-                    "content-length": [
-                        "153"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "date": [
-                        "Mon, 01 Jul 2019 22:36:42 GMT"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":\"diff --git a\\/test b\\/test\\nnew file mode 100644\\n--- \\/dev\\/null\\n+++ b\\/test\\n@@ -0,0 +1 @@\\n+test\\n\\n\",\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        }
-    ]
-}
\ No newline at end of file
--- a/tests/phabricator/phabsend-comment-created.json	Wed Oct 02 12:20:36 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,653 +0,0 @@
-{
-    "version": 1,
-    "interactions": [
-        {
-            "response": {
-                "status": {
-                    "message": "OK",
-                    "code": 200
-                },
-                "body": {
-                    "string": "{\"result\":{\"data\":[{\"id\":12,\"type\":\"REPO\",\"phid\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"fields\":{\"name\":\"Mercurial\",\"vcs\":\"hg\",\"callsign\":\"HG\",\"shortName\":\"Mercurial\",\"status\":\"active\",\"isImporting\":false,\"almanacServicePHID\":null,\"refRules\":{\"fetchRules\":[],\"trackRules\":[],\"permanentRefRules\":[]},\"spacePHID\":null,\"dateCreated\":1523292927,\"dateModified\":1523297359,\"policy\":{\"view\":\"public\",\"edit\":\"admin\",\"diffusion.push\":\"users\"}},\"attachments\":{}}],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "date": [
-                        "Fri, 07 Jun 2019 20:23:04 GMT"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-length": [
-                        "587"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ]
-                }
-            },
-            "request": {
-                "method": "POST",
-                "uri": "https://phab.mercurial-scm.org//api/diffusion.repository.search",
-                "body": "constraints%5Bcallsigns%5D%5B0%5D=HG&api.token=cli-hahayouwish",
-                "headers": {
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "81"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ]
-                }
-            }
-        },
-        {
-            "response": {
-                "status": {
-                    "message": "OK",
-                    "code": 200
-                },
-                "body": {
-                    "string": "{\"result\":{\"id\":1989,\"phid\":\"PHID-DIFF-3mtjdk4tjjkaw4arccah\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/1989\\/\"},\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "date": [
-                        "Fri, 07 Jun 2019 20:23:05 GMT"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-length": [
-                        "172"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ]
-                }
-            },
-            "request": {
-                "method": "POST",
-                "uri": "https://phab.mercurial-scm.org//api/differential.createrawdiff",
-                "body": "repositoryPHID=PHID-REPO-bvunnehri4u2isyr7bc3&diff=diff+--git+a%2Fcomment+b%2Fcomment%0Anew+file+mode+100644%0A---+%2Fdev%2Fnull%0A%2B%2B%2B+b%2Fcomment%0A%40%40+-0%2C0+%2B1%2C1+%40%40%0A%2Bcomment%0A&api.token=cli-hahayouwish",
-                "headers": {
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "243"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ]
-                }
-            }
-        },
-        {
-            "response": {
-                "status": {
-                    "message": "OK",
-                    "code": 200
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "date": [
-                        "Fri, 07 Jun 2019 20:23:06 GMT"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ]
-                }
-            },
-            "request": {
-                "method": "POST",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "body": "api.token=cli-hahayouwish&data=%7B%22branch%22%3A+%22default%22%2C+%22date%22%3A+%220+0%22%2C+%22node%22%3A+%22a7ee4bac036ae424bfc9e1a4228c4fa06d637f53%22%2C+%22parent%22%3A+%22a19f1434f9a578325eb9799c9961b5465d4e6e40%22%2C+%22user%22%3A+%22test%22%7D&name=hg%3Ameta&diff_id=1989",
-                "headers": {
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "296"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ]
-                }
-            }
-        },
-        {
-            "response": {
-                "status": {
-                    "message": "OK",
-                    "code": 200
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "date": [
-                        "Fri, 07 Jun 2019 20:23:07 GMT"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ]
-                }
-            },
-            "request": {
-                "method": "POST",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "body": "api.token=cli-hahayouwish&data=%7B%22a7ee4bac036ae424bfc9e1a4228c4fa06d637f53%22%3A+%7B%22author%22%3A+%22test%22%2C+%22authorEmail%22%3A+%22test%22%2C+%22branch%22%3A+%22default%22%2C+%22commit%22%3A+%22a7ee4bac036ae424bfc9e1a4228c4fa06d637f53%22%2C+%22parents%22%3A+%5B%22a19f1434f9a578325eb9799c9961b5465d4e6e40%22%5D%2C+%22time%22%3A+0%7D%7D&name=local%3Acommits&diff_id=1989",
-                "headers": {
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "396"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ]
-                }
-            }
-        },
-        {
-            "response": {
-                "status": {
-                    "message": "OK",
-                    "code": 200
-                },
-                "body": {
-                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create comment for phabricator test\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"create comment for phabricator test\"}]},\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "date": [
-                        "Fri, 07 Jun 2019 20:23:07 GMT"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-length": [
-                        "288"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ]
-                }
-            },
-            "request": {
-                "method": "POST",
-                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage",
-                "body": "corpus=create+comment+for+phabricator+test&api.token=cli-hahayouwish",
-                "headers": {
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "85"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ]
-                }
-            }
-        },
-        {
-            "response": {
-                "status": {
-                    "message": "OK",
-                    "code": 200
-                },
-                "body": {
-                    "string": "{\"result\":{\"object\":{\"id\":1253,\"phid\":\"PHID-DREV-4rhqd6v3yxbtodc7wbv7\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-g73sutb5nezcyh6\"},{\"phid\":\"PHID-XACT-DREV-yg6ysul7pcxtqce\"},{\"phid\":\"PHID-XACT-DREV-vxhpgk64u3kax45\"},{\"phid\":\"PHID-XACT-DREV-mkt5rq3racrpzhe\"},{\"phid\":\"PHID-XACT-DREV-s7la723tgqhwovt\"}]},\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "date": [
-                        "Fri, 07 Jun 2019 20:23:08 GMT"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-length": [
-                        "336"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ]
-                }
-            },
-            "request": {
-                "method": "POST",
-                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit",
-                "body": "transactions%5B0%5D%5Bvalue%5D=PHID-DIFF-3mtjdk4tjjkaw4arccah&transactions%5B0%5D%5Btype%5D=update&transactions%5B1%5D%5Bvalue%5D=For+default+branch&transactions%5B1%5D%5Btype%5D=comment&transactions%5B2%5D%5Bvalue%5D=create+comment+for+phabricator+test&transactions%5B2%5D%5Btype%5D=title&api.token=cli-hahayouwish",
-                "headers": {
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "332"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ]
-                }
-            }
-        },
-        {
-            "response": {
-                "status": {
-                    "message": "OK",
-                    "code": 200
-                },
-                "body": {
-                    "string": "{\"result\":[{\"id\":\"1253\",\"phid\":\"PHID-DREV-4rhqd6v3yxbtodc7wbv7\",\"title\":\"create comment for phabricator test\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D1253\",\"dateCreated\":\"1559938988\",\"dateModified\":\"1559938988\",\"authorPHID\":\"PHID-USER-qmzis76vb2yh3ogldu6r\",\"status\":\"0\",\"statusName\":\"Draft\",\"properties\":{\"draft.broadcast\":false,\"lines.added\":1,\"lines.removed\":0},\"branch\":null,\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"1\",\"activeDiffPHID\":\"PHID-DIFF-3mtjdk4tjjkaw4arccah\",\"diffs\":[\"1989\"],\"commits\":[],\"reviewers\":[],\"ccs\":[],\"hashes\":[],\"auxiliary\":{\"bugzilla.bug-id\":null,\"phabricator:projects\":[\"PHID-PROJ-f2a3wl5wxtqdtfgdjqzk\"],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":null}],\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "date": [
-                        "Fri, 07 Jun 2019 20:23:09 GMT"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-length": [
-                        "773"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ]
-                }
-            },
-            "request": {
-                "method": "POST",
-                "uri": "https://phab.mercurial-scm.org//api/differential.query",
-                "body": "api.token=cli-hahayouwish&ids%5B0%5D=1253",
-                "headers": {
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "58"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ]
-                }
-            }
-        },
-        {
-            "response": {
-                "status": {
-                    "message": "OK",
-                    "code": 200
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "date": [
-                        "Fri, 07 Jun 2019 20:23:10 GMT"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ]
-                }
-            },
-            "request": {
-                "method": "POST",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "body": "api.token=cli-hahayouwish&data=%7B%22branch%22%3A+%22default%22%2C+%22date%22%3A+%220+0%22%2C+%22node%22%3A+%2281fce7de1b7d8ea6b8309a58058d3b5793506c34%22%2C+%22parent%22%3A+%22a19f1434f9a578325eb9799c9961b5465d4e6e40%22%2C+%22user%22%3A+%22test%22%7D&name=hg%3Ameta&diff_id=1989",
-                "headers": {
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "296"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ]
-                }
-            }
-        },
-        {
-            "response": {
-                "status": {
-                    "message": "OK",
-                    "code": 200
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "date": [
-                        "Fri, 07 Jun 2019 20:23:10 GMT"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ]
-                }
-            },
-            "request": {
-                "method": "POST",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "body": "api.token=cli-hahayouwish&data=%7B%2281fce7de1b7d8ea6b8309a58058d3b5793506c34%22%3A+%7B%22author%22%3A+%22test%22%2C+%22authorEmail%22%3A+%22test%22%2C+%22branch%22%3A+%22default%22%2C+%22commit%22%3A+%2281fce7de1b7d8ea6b8309a58058d3b5793506c34%22%2C+%22parents%22%3A+%5B%22a19f1434f9a578325eb9799c9961b5465d4e6e40%22%5D%2C+%22time%22%3A+0%7D%7D&name=local%3Acommits&diff_id=1989",
-                "headers": {
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "396"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ]
-                }
-            }
-        }
-    ]
-}
\ No newline at end of file
--- a/tests/phabricator/phabsend-comment-updated.json	Wed Oct 02 12:20:36 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,581 +0,0 @@
-{
-    "interactions": [
-        {
-            "request": {
-                "method": "POST",
-                "body": "api.token=cli-hahayouwish&revisionIDs%5B0%5D=1253",
-                "uri": "https://phab.mercurial-scm.org//api/differential.querydiffs",
-                "headers": {
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "66"
-                    ]
-                }
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "body": {
-                    "string": "{\"result\":{\"1989\":{\"id\":\"1989\",\"revisionID\":\"1253\",\"dateCreated\":\"1559938985\",\"dateModified\":\"1559938988\",\"sourceControlBaseRevision\":null,\"sourceControlPath\":null,\"sourceControlSystem\":null,\"branch\":null,\"bookmark\":null,\"creationMethod\":\"web\",\"description\":null,\"unitStatus\":\"4\",\"lintStatus\":\"4\",\"changes\":[{\"id\":\"5273\",\"metadata\":{\"line:first\":1,\"hash.effect\":\"mzg_LBhhVYqb\"},\"oldPath\":null,\"currentPath\":\"comment\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":{\"unix:filemode\":\"100644\"},\"type\":\"1\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"1\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"0\",\"newOffset\":\"1\",\"oldLength\":\"0\",\"newLength\":\"1\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\"+comment\\n\"}]}],\"properties\":{\"hg:meta\":{\"branch\":\"default\",\"date\":\"0 0\",\"node\":\"0025df7d064f9c916862d19e207429a0f799fa7d\",\"parent\":\"a19f1434f9a578325eb9799c9961b5465d4e6e40\",\"user\":\"test\"},\"local:commits\":{\"0025df7d064f9c916862d19e207429a0f799fa7d\":{\"author\":\"test\",\"authorEmail\":\"test\",\"branch\":\"default\",\"commit\":\"0025df7d064f9c916862d19e207429a0f799fa7d\",\"parents\":[\"a19f1434f9a578325eb9799c9961b5465d4e6e40\"],\"time\":0}}},\"authorName\":\"test\",\"authorEmail\":\"test\"}},\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "date": [
-                        "Fri, 07 Jun 2019 20:26:57 GMT"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "content-length": [
-                        "1243"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ]
-                }
-            }
-        },
-        {
-            "request": {
-                "method": "POST",
-                "body": "constraints%5Bcallsigns%5D%5B0%5D=HG&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/diffusion.repository.search",
-                "headers": {
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "81"
-                    ]
-                }
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "body": {
-                    "string": "{\"result\":{\"data\":[{\"id\":12,\"type\":\"REPO\",\"phid\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"fields\":{\"name\":\"Mercurial\",\"vcs\":\"hg\",\"callsign\":\"HG\",\"shortName\":\"Mercurial\",\"status\":\"active\",\"isImporting\":false,\"almanacServicePHID\":null,\"refRules\":{\"fetchRules\":[],\"trackRules\":[],\"permanentRefRules\":[]},\"spacePHID\":null,\"dateCreated\":1523292927,\"dateModified\":1523297359,\"policy\":{\"view\":\"public\",\"edit\":\"admin\",\"diffusion.push\":\"users\"}},\"attachments\":{}}],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "date": [
-                        "Fri, 07 Jun 2019 20:26:58 GMT"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "content-length": [
-                        "587"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ]
-                }
-            }
-        },
-        {
-            "request": {
-                "method": "POST",
-                "body": "repositoryPHID=PHID-REPO-bvunnehri4u2isyr7bc3&api.token=cli-hahayouwish&diff=diff+--git+a%2Fcomment+b%2Fcomment%0Anew+file+mode+100644%0A---+%2Fdev%2Fnull%0A%2B%2B%2B+b%2Fcomment%0A%40%40+-0%2C0+%2B1%2C2+%40%40%0A%2Bcomment%0A%2Bcomment2%0A",
-                "uri": "https://phab.mercurial-scm.org//api/differential.createrawdiff",
-                "headers": {
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "257"
-                    ]
-                }
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "body": {
-                    "string": "{\"result\":{\"id\":1990,\"phid\":\"PHID-DIFF-xfa4yzc5h2cvjfhpx4dv\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/1990\\/\"},\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "date": [
-                        "Fri, 07 Jun 2019 20:26:59 GMT"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "content-length": [
-                        "172"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ]
-                }
-            }
-        },
-        {
-            "request": {
-                "method": "POST",
-                "body": "diff_id=1990&data=%7B%22branch%22%3A+%22default%22%2C+%22date%22%3A+%220+0%22%2C+%22node%22%3A+%221acd4b60af38c934182468719a8a431248f49bef%22%2C+%22parent%22%3A+%22a19f1434f9a578325eb9799c9961b5465d4e6e40%22%2C+%22user%22%3A+%22test%22%7D&api.token=cli-hahayouwish&name=hg%3Ameta",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "headers": {
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "296"
-                    ]
-                }
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "date": [
-                        "Fri, 07 Jun 2019 20:26:59 GMT"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ]
-                }
-            }
-        },
-        {
-            "request": {
-                "method": "POST",
-                "body": "diff_id=1990&data=%7B%221acd4b60af38c934182468719a8a431248f49bef%22%3A+%7B%22author%22%3A+%22test%22%2C+%22authorEmail%22%3A+%22test%22%2C+%22branch%22%3A+%22default%22%2C+%22commit%22%3A+%221acd4b60af38c934182468719a8a431248f49bef%22%2C+%22parents%22%3A+%5B%22a19f1434f9a578325eb9799c9961b5465d4e6e40%22%5D%2C+%22time%22%3A+0%7D%7D&api.token=cli-hahayouwish&name=local%3Acommits",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "headers": {
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "396"
-                    ]
-                }
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "date": [
-                        "Fri, 07 Jun 2019 20:27:00 GMT"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ]
-                }
-            }
-        },
-        {
-            "request": {
-                "method": "POST",
-                "body": "api.token=cli-hahayouwish&corpus=create+comment+for+phabricator+test%0A%0ADifferential+Revision%3A+https%3A%2F%2Fphab.mercurial-scm.org%2FD1253",
-                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage",
-                "headers": {
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "165"
-                    ]
-                }
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "body": {
-                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create comment for phabricator test\",\"revisionID\":1253},\"revisionIDFieldInfo\":{\"value\":1253,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"create comment for phabricator test\"}]},\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "date": [
-                        "Fri, 07 Jun 2019 20:27:01 GMT"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "content-length": [
-                        "306"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ]
-                }
-            }
-        },
-        {
-            "request": {
-                "method": "POST",
-                "body": "api.token=cli-hahayouwish&transactions%5B0%5D%5Btype%5D=update&transactions%5B0%5D%5Bvalue%5D=PHID-DIFF-xfa4yzc5h2cvjfhpx4dv&transactions%5B1%5D%5Btype%5D=comment&transactions%5B1%5D%5Bvalue%5D=Address+review+comments&transactions%5B2%5D%5Btype%5D=title&transactions%5B2%5D%5Bvalue%5D=create+comment+for+phabricator+test&objectIdentifier=1253",
-                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit",
-                "headers": {
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "359"
-                    ]
-                }
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "body": {
-                    "string": "{\"result\":{\"object\":{\"id\":1253,\"phid\":\"PHID-DREV-4rhqd6v3yxbtodc7wbv7\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-punz3dredrxghth\"},{\"phid\":\"PHID-XACT-DREV-ykwxppmzdgrtgye\"}]},\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "date": [
-                        "Fri, 07 Jun 2019 20:27:02 GMT"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "content-length": [
-                        "210"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ]
-                }
-            }
-        },
-        {
-            "request": {
-                "method": "POST",
-                "body": "api.token=cli-hahayouwish&ids%5B0%5D=1253",
-                "uri": "https://phab.mercurial-scm.org//api/differential.query",
-                "headers": {
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0.1+253-f2ebe61e9a8e+20190607)"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ],
-                    "content-length": [
-                        "58"
-                    ]
-                }
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "body": {
-                    "string": "{\"result\":[{\"id\":\"1253\",\"phid\":\"PHID-DREV-4rhqd6v3yxbtodc7wbv7\",\"title\":\"create comment for phabricator test\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D1253\",\"dateCreated\":\"1559938988\",\"dateModified\":\"1559939221\",\"authorPHID\":\"PHID-USER-qmzis76vb2yh3ogldu6r\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":2,\"lines.removed\":0,\"buildables\":{\"PHID-HMBB-hsvjwe4uccbkgjpvffhz\":{\"status\":\"passed\"}}},\"branch\":null,\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"2\",\"activeDiffPHID\":\"PHID-DIFF-xfa4yzc5h2cvjfhpx4dv\",\"diffs\":[\"1990\",\"1989\"],\"commits\":[],\"reviewers\":[],\"ccs\":[],\"hashes\":[],\"auxiliary\":{\"bugzilla.bug-id\":null,\"phabricator:projects\":[],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":null}],\"error_code\":null,\"error_info\":null}"
-                },
-                "headers": {
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "date": [
-                        "Fri, 07 Jun 2019 20:27:02 GMT"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "referrer-policy": [
-                        "no-referrer",
-                        "strict-origin-when-cross-origin"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "content-length": [
-                        "822"
-                    ],
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ]
-                }
-            }
-        }
-    ],
-    "version": 1
-}
\ No newline at end of file
--- a/tests/phabricator/phabsend-create-alpha.json	Wed Oct 02 12:20:36 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,617 +0,0 @@
-{
-    "version": 1,
-    "interactions": [
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "93"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&constraints%5Bcallsigns%5D%5B0%5D=HG",
-                "uri": "https://phab.mercurial-scm.org//api/diffusion.repository.search",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:00 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "549"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"data\":[{\"id\":10,\"type\":\"REPO\",\"phid\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"fields\":{\"name\":\"Mercurial\",\"vcs\":\"hg\",\"callsign\":\"HG\",\"shortName\":\"Mercurial\",\"status\":\"active\",\"isImporting\":false,\"almanacServicePHID\":null,\"spacePHID\":null,\"dateCreated\":1507817156,\"dateModified\":1529613276,\"policy\":{\"view\":\"public\",\"edit\":\"admin\",\"diffusion.push\":\"users\"}},\"attachments\":{}}],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "235"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&diff=diff+--git+a%2Falpha+b%2Falpha%0Anew+file+mode+100644%0A---+%2Fdev%2Fnull%0A%2B%2B%2B+b%2Falpha%0A%40%40+-0%2C0+%2B1%2C1+%40%40%0A%2Balpha%0A&repositoryPHID=PHID-REPO-bvunnehri4u2isyr7bc3",
-                "uri": "https://phab.mercurial-scm.org//api/differential.createrawdiff",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:01 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "172"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"id\":1899,\"phid\":\"PHID-DIFF-gpg57jico75ouhl2bux2\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/1899\\/\"},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "296"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "diff_id=1899&data=%7B%22branch%22%3A+%22default%22%2C+%22date%22%3A+%220+0%22%2C+%22node%22%3A+%22d386117f30e6b1282897bdbde75ac21e095163d4%22%2C+%22parent%22%3A+%220000000000000000000000000000000000000000%22%2C+%22user%22%3A+%22test%22%7D&name=hg%3Ameta&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:02 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "257"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "diff_id=1899&data=%7B%22d386117f30e6b1282897bdbde75ac21e095163d4%22%3A+%7B%22author%22%3A+%22test%22%2C+%22authorEmail%22%3A+%22test%22%2C+%22branch%22%3A+%22default%22%2C+%22commit%22%3A+%22d386117f30e6b1282897bdbde75ac21e095163d4%22%2C+%22parents%22%3A+%5B%220000000000000000000000000000000000000000%22%5D%2C+%22time%22%3A+0%7D%7D&name=local%3Acommits&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:02 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "93"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&corpus=create+alpha+for+phabricator+test+%E2%82%AC",
-                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:03 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "298"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create alpha for phabricator test \\u20ac\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"create alpha for phabricator test \\u20ac\"}]},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "252"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&transactions%5B0%5D%5Btype%5D=update&transactions%5B0%5D%5Bvalue%5D=PHID-DIFF-gpg57jico75ouhl2bux2&transactions%5B1%5D%5Btype%5D=title&transactions%5B1%5D%5Bvalue%5D=create+alpha+for+phabricator+test+%E2%82%AC",
-                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:04 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "294"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"object\":{\"id\":1190,\"phid\":\"PHID-DREV-kikesmfxhzpfaxbzgj3l\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-od4nnssrqj57m6x\"},{\"phid\":\"PHID-XACT-DREV-2prb5lagzng6uqt\"},{\"phid\":\"PHID-XACT-DREV-qu7o6fgwssovbwb\"},{\"phid\":\"PHID-XACT-DREV-uynfy6n3u6new5f\"}]},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "58"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "ids%5B0%5D=1190&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.query",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:05 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "778"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":[{\"id\":\"1190\",\"phid\":\"PHID-DREV-kikesmfxhzpfaxbzgj3l\",\"title\":\"create alpha for phabricator test \\u20ac\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D1190\",\"dateCreated\":\"1557063064\",\"dateModified\":\"1557063064\",\"authorPHID\":\"PHID-USER-qmzis76vb2yh3ogldu6r\",\"status\":\"0\",\"statusName\":\"Draft\",\"properties\":{\"draft.broadcast\":false,\"lines.added\":1,\"lines.removed\":0},\"branch\":null,\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"1\",\"activeDiffPHID\":\"PHID-DIFF-gpg57jico75ouhl2bux2\",\"diffs\":[\"1899\"],\"commits\":[],\"reviewers\":[],\"ccs\":[],\"hashes\":[],\"auxiliary\":{\"bugzilla.bug-id\":null,\"phabricator:projects\":[\"PHID-PROJ-f2a3wl5wxtqdtfgdjqzk\"],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":null}],\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "296"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "diff_id=1899&data=%7B%22branch%22%3A+%22default%22%2C+%22date%22%3A+%220+0%22%2C+%22node%22%3A+%22a86ed7d85e866f01161e9f55cee5d116272f508f%22%2C+%22parent%22%3A+%220000000000000000000000000000000000000000%22%2C+%22user%22%3A+%22test%22%7D&name=hg%3Ameta&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:06 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "257"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "diff_id=1899&data=%7B%22a86ed7d85e866f01161e9f55cee5d116272f508f%22%3A+%7B%22author%22%3A+%22test%22%2C+%22authorEmail%22%3A+%22test%22%2C+%22branch%22%3A+%22default%22%2C+%22commit%22%3A+%22a86ed7d85e866f01161e9f55cee5d116272f508f%22%2C+%22parents%22%3A+%5B%220000000000000000000000000000000000000000%22%5D%2C+%22time%22%3A+0%7D%7D&name=local%3Acommits&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:06 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        }
-    ]
-}
--- a/tests/phabricator/phabsend-create-public.json	Wed Oct 02 12:20:36 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,957 +0,0 @@
-{
-    "version": 1,
-    "interactions": [
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "93"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&constraints%5Bcallsigns%5D%5B0%5D=HG",
-                "uri": "https://phab.mercurial-scm.org//api/diffusion.repository.search",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:20 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "549"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"data\":[{\"id\":10,\"type\":\"REPO\",\"phid\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"fields\":{\"name\":\"Mercurial\",\"vcs\":\"hg\",\"callsign\":\"HG\",\"shortName\":\"Mercurial\",\"status\":\"active\",\"isImporting\":false,\"almanacServicePHID\":null,\"spacePHID\":null,\"dateCreated\":1507817156,\"dateModified\":1529613276,\"policy\":{\"view\":\"public\",\"edit\":\"admin\",\"diffusion.push\":\"users\"}},\"attachments\":{}}],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "220"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&diff=diff+--git+a%2Fbeta+b%2Fbeta%0A---+a%2Fbeta%0A%2B%2B%2B+b%2Fbeta%0A%40%40+-1%2C1+%2B1%2C1+%40%40%0A-beta%0A%2Bpublic+change%0A&repositoryPHID=PHID-REPO-bvunnehri4u2isyr7bc3",
-                "uri": "https://phab.mercurial-scm.org//api/differential.createrawdiff",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:21 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "172"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"id\":1902,\"phid\":\"PHID-DIFF-uuzq4s7s72y4ts7ijduc\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/1902\\/\"},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "296"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "diff_id=1902&data=%7B%22branch%22%3A+%22default%22%2C+%22date%22%3A+%220+0%22%2C+%22node%22%3A+%2224ffd6bca53a1e05369ed5b8834587c2b2b364da%22%2C+%22parent%22%3A+%222837deb84f4ab1315c1197b8aef10c620465e352%22%2C+%22user%22%3A+%22test%22%7D&name=hg%3Ameta&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:22 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "257"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "diff_id=1902&data=%7B%2224ffd6bca53a1e05369ed5b8834587c2b2b364da%22%3A+%7B%22author%22%3A+%22test%22%2C+%22authorEmail%22%3A+%22test%22%2C+%22branch%22%3A+%22default%22%2C+%22commit%22%3A+%2224ffd6bca53a1e05369ed5b8834587c2b2b364da%22%2C+%22parents%22%3A+%5B%222837deb84f4ab1315c1197b8aef10c620465e352%22%5D%2C+%22time%22%3A+0%7D%7D&name=local%3Acommits&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:23 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "94"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&corpus=create+public+change+for+phabricator+testing",
-                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:23 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "306"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create public change for phabricator testing\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"create public change for phabricator testing\"}]},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "253"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&transactions%5B0%5D%5Btype%5D=update&transactions%5B0%5D%5Bvalue%5D=PHID-DIFF-uuzq4s7s72y4ts7ijduc&transactions%5B1%5D%5Btype%5D=title&transactions%5B1%5D%5Bvalue%5D=create+public+change+for+phabricator+testing",
-                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:24 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "294"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"object\":{\"id\":1192,\"phid\":\"PHID-DREV-qb4xy3abx7eu4puizvjl\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-n2zlzs5qmdlvfbx\"},{\"phid\":\"PHID-XACT-DREV-dwojtdj2d3geffe\"},{\"phid\":\"PHID-XACT-DREV-gr4vgeynol22tgf\"},{\"phid\":\"PHID-XACT-DREV-aighrcyai72tgzv\"}]},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "232"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&diff=diff+--git+a%2Falpha+b%2Falpha%0A---+a%2Falpha%0A%2B%2B%2B+b%2Falpha%0A%40%40+-1%2C2+%2B1%2C1+%40%40%0A-alpha%0A-more%0A%2Bdraft+change%0A&repositoryPHID=PHID-REPO-bvunnehri4u2isyr7bc3",
-                "uri": "https://phab.mercurial-scm.org//api/differential.createrawdiff",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:25 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "172"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"id\":1903,\"phid\":\"PHID-DIFF-4pugk2zedyh2xm27uuvh\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/1903\\/\"},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "296"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "diff_id=1903&data=%7B%22branch%22%3A+%22default%22%2C+%22date%22%3A+%220+0%22%2C+%22node%22%3A+%22ac331633be793e0d4159d5525b404a9782f54904%22%2C+%22parent%22%3A+%2224ffd6bca53a1e05369ed5b8834587c2b2b364da%22%2C+%22user%22%3A+%22test%22%7D&name=hg%3Ameta&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:26 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "257"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "diff_id=1903&data=%7B%22ac331633be793e0d4159d5525b404a9782f54904%22%3A+%7B%22author%22%3A+%22test%22%2C+%22authorEmail%22%3A+%22test%22%2C+%22branch%22%3A+%22default%22%2C+%22commit%22%3A+%22ac331633be793e0d4159d5525b404a9782f54904%22%2C+%22parents%22%3A+%5B%2224ffd6bca53a1e05369ed5b8834587c2b2b364da%22%5D%2C+%22time%22%3A+0%7D%7D&name=local%3Acommits&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:27 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "93"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&corpus=create+draft+change+for+phabricator+testing",
-                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:27 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "304"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create draft change for phabricator testing\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"create draft change for phabricator testing\"}]},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "409"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&transactions%5B0%5D%5Btype%5D=update&transactions%5B0%5D%5Bvalue%5D=PHID-DIFF-4pugk2zedyh2xm27uuvh&transactions%5B1%5D%5Btype%5D=parents.set&transactions%5B1%5D%5Bvalue%5D%5B0%5D=PHID-DREV-qb4xy3abx7eu4puizvjl&transactions%5B2%5D%5Btype%5D=title&transactions%5B2%5D%5Bvalue%5D=create+draft+change+for+phabricator+testing",
-                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:29 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "420"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"object\":{\"id\":1193,\"phid\":\"PHID-DREV-shdibf6gnumia7pou4wo\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-5lh4bjyat7sopph\"},{\"phid\":\"PHID-XACT-DREV-ihh5mnfq4lfd7z6\"},{\"phid\":\"PHID-XACT-DREV-jqgmk2a3klvofsk\"},{\"phid\":\"PHID-XACT-DREV-w5t5g4ke6kjynf3\"},{\"phid\":\"PHID-XACT-DREV-ro7ijohdoyaes55\"},{\"phid\":\"PHID-XACT-DREV-4g3uhii5akj24he\"},{\"phid\":\"PHID-XACT-DREV-44imsawbkha5nqw\"}]},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "74"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "ids%5B0%5D=1192&ids%5B1%5D=1193&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.query",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:29 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "1522"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":[{\"id\":\"1193\",\"phid\":\"PHID-DREV-shdibf6gnumia7pou4wo\",\"title\":\"create draft change for phabricator testing\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D1193\",\"dateCreated\":\"1557063088\",\"dateModified\":\"1557063088\",\"authorPHID\":\"PHID-USER-qmzis76vb2yh3ogldu6r\",\"status\":\"0\",\"statusName\":\"Draft\",\"properties\":{\"draft.broadcast\":false,\"lines.added\":1,\"lines.removed\":2},\"branch\":null,\"summary\":\" \",\"testPlan\":\"\",\"lineCount\":\"3\",\"activeDiffPHID\":\"PHID-DIFF-4pugk2zedyh2xm27uuvh\",\"diffs\":[\"1903\"],\"commits\":[],\"reviewers\":[],\"ccs\":[],\"hashes\":[],\"auxiliary\":{\"bugzilla.bug-id\":null,\"phabricator:projects\":[\"PHID-PROJ-f2a3wl5wxtqdtfgdjqzk\"],\"phabricator:depends-on\":[\"PHID-DREV-qb4xy3abx7eu4puizvjl\"]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":null},{\"id\":\"1192\",\"phid\":\"PHID-DREV-qb4xy3abx7eu4puizvjl\",\"title\":\"create public change for phabricator testing\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D1192\",\"dateCreated\":\"1557063084\",\"dateModified\":\"1557063088\",\"authorPHID\":\"PHID-USER-qmzis76vb2yh3ogldu6r\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":1,\"lines.removed\":1},\"branch\":null,\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"2\",\"activeDiffPHID\":\"PHID-DIFF-uuzq4s7s72y4ts7ijduc\",\"diffs\":[\"1902\"],\"commits\":[],\"reviewers\":[],\"ccs\":[],\"hashes\":[],\"auxiliary\":{\"bugzilla.bug-id\":null,\"phabricator:projects\":[],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":null}],\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "296"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "diff_id=1903&data=%7B%22branch%22%3A+%22default%22%2C+%22date%22%3A+%220+0%22%2C+%22node%22%3A+%22a19f1434f9a578325eb9799c9961b5465d4e6e40%22%2C+%22parent%22%3A+%2224ffd6bca53a1e05369ed5b8834587c2b2b364da%22%2C+%22user%22%3A+%22test%22%7D&name=hg%3Ameta&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:30 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "257"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "diff_id=1903&data=%7B%22a19f1434f9a578325eb9799c9961b5465d4e6e40%22%3A+%7B%22author%22%3A+%22test%22%2C+%22authorEmail%22%3A+%22test%22%2C+%22branch%22%3A+%22default%22%2C+%22commit%22%3A+%22a19f1434f9a578325eb9799c9961b5465d4e6e40%22%2C+%22parents%22%3A+%5B%2224ffd6bca53a1e05369ed5b8834587c2b2b364da%22%5D%2C+%22time%22%3A+0%7D%7D&name=local%3Acommits&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:31 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        }
-    ]
-}
--- a/tests/phabricator/phabsend-update-alpha-create-beta.json	Wed Oct 02 12:20:36 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1025 +0,0 @@
-{
-    "version": 1,
-    "interactions": [
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "66"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&revisionIDs%5B0%5D=1190",
-                "uri": "https://phab.mercurial-scm.org//api/differential.querydiffs",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:08 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "1132"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"1899\":{\"id\":\"1899\",\"revisionID\":\"1190\",\"dateCreated\":\"1557063061\",\"dateModified\":\"1557063064\",\"sourceControlBaseRevision\":null,\"sourceControlPath\":null,\"sourceControlSystem\":null,\"branch\":null,\"bookmark\":null,\"creationMethod\":\"web\",\"description\":null,\"unitStatus\":\"4\",\"lintStatus\":\"4\",\"changes\":[{\"id\":\"4355\",\"metadata\":{\"line:first\":1,\"hash.effect\":\"g6dr_XSxA9EP\"},\"oldPath\":null,\"currentPath\":\"alpha\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":{\"unix:filemode\":\"100644\"},\"type\":\"1\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"1\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"0\",\"newOffset\":\"1\",\"oldLength\":\"0\",\"newLength\":\"1\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\"+alpha\\n\"}]}],\"properties\":{\"hg:meta\":{\"branch\":\"default\",\"date\":\"0 0\",\"node\":\"53fe3a1e0f42670a88ad845247b2ed4d5e645434\",\"parent\":\"0000000000000000000000000000000000000000\",\"user\":\"test\"},\"local:commits\":{\"53fe3a1e0f42670a88ad845247b2ed4d5e645434\":{\"author\":\"test\",\"authorEmail\":\"test\",\"branch\":\"default\",\"time\":0}}},\"authorName\":\"test\",\"authorEmail\":\"test\"}},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "93"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&constraints%5Bcallsigns%5D%5B0%5D=HG",
-                "uri": "https://phab.mercurial-scm.org//api/diffusion.repository.search",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:09 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "549"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"data\":[{\"id\":10,\"type\":\"REPO\",\"phid\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"fields\":{\"name\":\"Mercurial\",\"vcs\":\"hg\",\"callsign\":\"HG\",\"shortName\":\"Mercurial\",\"status\":\"active\",\"isImporting\":false,\"almanacServicePHID\":null,\"spacePHID\":null,\"dateCreated\":1507817156,\"dateModified\":1529613276,\"policy\":{\"view\":\"public\",\"edit\":\"admin\",\"diffusion.push\":\"users\"}},\"attachments\":{}}],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "245"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&diff=diff+--git+a%2Falpha+b%2Falpha%0Anew+file+mode+100644%0A---+%2Fdev%2Fnull%0A%2B%2B%2B+b%2Falpha%0A%40%40+-0%2C0+%2B1%2C2+%40%40%0A%2Balpha%0A%2Bmore%0A&repositoryPHID=PHID-REPO-bvunnehri4u2isyr7bc3",
-                "uri": "https://phab.mercurial-scm.org//api/differential.createrawdiff",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:09 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "172"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"id\":1900,\"phid\":\"PHID-DIFF-gra4b3ivsgebktbeoxxx\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/1900\\/\"},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "296"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "diff_id=1900&data=%7B%22branch%22%3A+%22default%22%2C+%22date%22%3A+%220+0%22%2C+%22node%22%3A+%22d940d39fb603f29ea5df4b7c15f315fe6ff4e346%22%2C+%22parent%22%3A+%220000000000000000000000000000000000000000%22%2C+%22user%22%3A+%22test%22%7D&name=hg%3Ameta&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:10 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "257"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "diff_id=1900&data=%7B%22d940d39fb603f29ea5df4b7c15f315fe6ff4e346%22%3A+%7B%22author%22%3A+%22test%22%2C+%22authorEmail%22%3A+%22test%22%2C+%22branch%22%3A+%22default%22%2C+%22commit%22%3A+%22d940d39fb603f29ea5df4b7c15f315fe6ff4e346%22%2C+%22parents%22%3A+%5B%220000000000000000000000000000000000000000%22%5D%2C+%22time%22%3A+0%7D%7D&name=local%3Acommits&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:11 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "173"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&corpus=create+alpha+for+phabricator+test+%E2%82%AC%0A%0ADifferential+Revision%3A+https%3A%2F%2Fphab.mercurial-scm.org%2FD1190",
-                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:11 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "316"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create alpha for phabricator test \\u20ac\",\"revisionID\":1190},\"revisionIDFieldInfo\":{\"value\":1190,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"create alpha for phabricator test \\u20ac\"}]},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "274"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&objectIdentifier=1190&transactions%5B0%5D%5Btype%5D=update&transactions%5B0%5D%5Bvalue%5D=PHID-DIFF-gra4b3ivsgebktbeoxxx&transactions%5B1%5D%5Btype%5D=title&transactions%5B1%5D%5Bvalue%5D=create+alpha+for+phabricator+test+%E2%82%AC",
-                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:12 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "168"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"object\":{\"id\":1190,\"phid\":\"PHID-DREV-kikesmfxhzpfaxbzgj3l\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-tk6ciodgzlwo2v6\"}]},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "231"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&diff=diff+--git+a%2Fbeta+b%2Fbeta%0Anew+file+mode+100644%0A---+%2Fdev%2Fnull%0A%2B%2B%2B+b%2Fbeta%0A%40%40+-0%2C0+%2B1%2C1+%40%40%0A%2Bbeta%0A&repositoryPHID=PHID-REPO-bvunnehri4u2isyr7bc3",
-                "uri": "https://phab.mercurial-scm.org//api/differential.createrawdiff",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:13 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "172"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"id\":1901,\"phid\":\"PHID-DIFF-uhbyhoejzbniwwzj2q5c\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/1901\\/\"},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "296"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "diff_id=1901&data=%7B%22branch%22%3A+%22default%22%2C+%22date%22%3A+%220+0%22%2C+%22node%22%3A+%224b2486dfc8c7b238e70f8b022f9e09a0ea220415%22%2C+%22parent%22%3A+%22d940d39fb603f29ea5df4b7c15f315fe6ff4e346%22%2C+%22user%22%3A+%22test%22%7D&name=hg%3Ameta&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:14 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "257"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "diff_id=1901&data=%7B%224b2486dfc8c7b238e70f8b022f9e09a0ea220415%22%3A+%7B%22author%22%3A+%22test%22%2C+%22authorEmail%22%3A+%22test%22%2C+%22branch%22%3A+%22default%22%2C+%22commit%22%3A+%224b2486dfc8c7b238e70f8b022f9e09a0ea220415%22%2C+%22parents%22%3A+%5B%22d940d39fb603f29ea5df4b7c15f315fe6ff4e346%22%5D%2C+%22time%22%3A+0%7D%7D&name=local%3Acommits&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:15 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "82"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&corpus=create+beta+for+phabricator+test",
-                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:15 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "282"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create beta for phabricator test\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"create beta for phabricator test\"}]},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "398"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "api.token=cli-hahayouwish&transactions%5B0%5D%5Btype%5D=update&transactions%5B0%5D%5Bvalue%5D=PHID-DIFF-uhbyhoejzbniwwzj2q5c&transactions%5B1%5D%5Btype%5D=parents.set&transactions%5B1%5D%5Bvalue%5D%5B0%5D=PHID-DREV-kikesmfxhzpfaxbzgj3l&transactions%5B2%5D%5Btype%5D=title&transactions%5B2%5D%5Bvalue%5D=create+beta+for+phabricator+test",
-                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:17 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "420"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":{\"object\":{\"id\":1191,\"phid\":\"PHID-DREV-uuyrww2k3weorn2jwcaz\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-erc62kc5d5t53dw\"},{\"phid\":\"PHID-XACT-DREV-56jxoj2nev5we3e\"},{\"phid\":\"PHID-XACT-DREV-cajnfsuigdcmfpn\"},{\"phid\":\"PHID-XACT-DREV-expntfzlv44va6h\"},{\"phid\":\"PHID-XACT-DREV-hzrgd55fpfjcan7\"},{\"phid\":\"PHID-XACT-DREV-v4baqr7c5ydtltr\"},{\"phid\":\"PHID-XACT-DREV-ge6dwwrvrkluq2q\"}]},\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "74"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "ids%5B0%5D=1190&ids%5B1%5D=1191&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.query",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:17 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "1514"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":[{\"id\":\"1191\",\"phid\":\"PHID-DREV-uuyrww2k3weorn2jwcaz\",\"title\":\"create beta for phabricator test\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D1191\",\"dateCreated\":\"1557063076\",\"dateModified\":\"1557063077\",\"authorPHID\":\"PHID-USER-qmzis76vb2yh3ogldu6r\",\"status\":\"0\",\"statusName\":\"Draft\",\"properties\":{\"draft.broadcast\":false,\"lines.added\":1,\"lines.removed\":0},\"branch\":null,\"summary\":\" \",\"testPlan\":\"\",\"lineCount\":\"1\",\"activeDiffPHID\":\"PHID-DIFF-uhbyhoejzbniwwzj2q5c\",\"diffs\":[\"1901\"],\"commits\":[],\"reviewers\":[],\"ccs\":[],\"hashes\":[],\"auxiliary\":{\"bugzilla.bug-id\":null,\"phabricator:projects\":[\"PHID-PROJ-f2a3wl5wxtqdtfgdjqzk\"],\"phabricator:depends-on\":[\"PHID-DREV-kikesmfxhzpfaxbzgj3l\"]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":null},{\"id\":\"1190\",\"phid\":\"PHID-DREV-kikesmfxhzpfaxbzgj3l\",\"title\":\"create alpha for phabricator test \\u20ac\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D1190\",\"dateCreated\":\"1557063064\",\"dateModified\":\"1557063076\",\"authorPHID\":\"PHID-USER-qmzis76vb2yh3ogldu6r\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":2,\"lines.removed\":0},\"branch\":null,\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"2\",\"activeDiffPHID\":\"PHID-DIFF-gra4b3ivsgebktbeoxxx\",\"diffs\":[\"1900\",\"1899\"],\"commits\":[],\"reviewers\":[],\"ccs\":[],\"hashes\":[],\"auxiliary\":{\"bugzilla.bug-id\":null,\"phabricator:projects\":[],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":null}],\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "296"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "diff_id=1901&data=%7B%22branch%22%3A+%22default%22%2C+%22date%22%3A+%220+0%22%2C+%22node%22%3A+%222837deb84f4ab1315c1197b8aef10c620465e352%22%2C+%22parent%22%3A+%22d940d39fb603f29ea5df4b7c15f315fe6ff4e346%22%2C+%22user%22%3A+%22test%22%7D&name=hg%3Ameta&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:18 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        },
-        {
-            "request": {
-                "headers": {
-                    "user-agent": [
-                        "mercurial/proto-1.0 (Mercurial 5.0+93-d811f17090a3+20190505)"
-                    ],
-                    "content-length": [
-                        "257"
-                    ],
-                    "content-type": [
-                        "application/x-www-form-urlencoded"
-                    ],
-                    "accept": [
-                        "application/mercurial-0.1"
-                    ],
-                    "host": [
-                        "phab.mercurial-scm.org"
-                    ]
-                },
-                "body": "diff_id=1901&data=%7B%222837deb84f4ab1315c1197b8aef10c620465e352%22%3A+%7B%22author%22%3A+%22test%22%2C+%22authorEmail%22%3A+%22test%22%2C+%22branch%22%3A+%22default%22%2C+%22commit%22%3A+%222837deb84f4ab1315c1197b8aef10c620465e352%22%2C+%22parents%22%3A+%5B%22d940d39fb603f29ea5df4b7c15f315fe6ff4e346%22%5D%2C+%22time%22%3A+0%7D%7D&name=local%3Acommits&api.token=cli-hahayouwish",
-                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
-                "method": "POST"
-            },
-            "response": {
-                "status": {
-                    "code": 200,
-                    "message": "OK"
-                },
-                "headers": {
-                    "x-xss-protection": [
-                        "1; mode=block"
-                    ],
-                    "cache-control": [
-                        "no-store"
-                    ],
-                    "content-type": [
-                        "application/json"
-                    ],
-                    "date": [
-                        "Sun, 05 May 2019 13:31:19 GMT"
-                    ],
-                    "connection": [
-                        "keep-alive"
-                    ],
-                    "strict-transport-security": [
-                        "max-age=31536000; includeSubdomains; preload"
-                    ],
-                    "vary": [
-                        "Accept-Encoding"
-                    ],
-                    "x-frame-options": [
-                        "Deny"
-                    ],
-                    "content-length": [
-                        "51"
-                    ],
-                    "x-content-type-options": [
-                        "nosniff"
-                    ],
-                    "expires": [
-                        "Sat, 01 Jan 2000 00:00:00 GMT"
-                    ]
-                },
-                "body": {
-                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
-                }
-            }
-        }
-    ]
-}
--- a/tests/printenv.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/printenv.py	Mon Oct 21 11:09:48 2019 -0400
@@ -19,6 +19,7 @@
 
 try:
     import msvcrt
+
     msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
     msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
     msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
@@ -52,8 +53,7 @@
 
 # variables with empty values may not exist on all platforms, filter
 # them now for portability sake.
-env = [(k, v) for k, v in os.environ.items()
-       if k.startswith("HG_") and v]
+env = [(k, v) for k, v in os.environ.items() if k.startswith("HG_") and v]
 env.sort()
 
 out.write(b"%s hook: " % args.name.encode('ascii'))
@@ -62,8 +62,9 @@
 else:
     filter = lambda x: x
 
-vars = [b"%s=%s" % (k.encode('ascii'), filter(v).encode('ascii'))
-        for k, v in env]
+vars = [
+    b"%s=%s" % (k.encode('ascii'), filter(v).encode('ascii')) for k, v in env
+]
 
 # Print variables on out
 if not args.line:
--- a/tests/printrevset.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/printrevset.py	Mon Oct 21 11:09:48 2019 -0400
@@ -1,16 +1,15 @@
 from __future__ import absolute_import
 from mercurial import (
-  cmdutil,
-  commands,
-  extensions,
-  logcmdutil,
-  revsetlang,
-  smartset,
+    cmdutil,
+    commands,
+    extensions,
+    logcmdutil,
+    revsetlang,
+    smartset,
 )
 
-from mercurial.utils import (
-  stringutil,
-)
+from mercurial.utils import stringutil
+
 
 def logrevset(repo, pats, opts):
     revs = logcmdutil._initialrevs(repo, opts)
@@ -19,6 +18,7 @@
     match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts)
     return logcmdutil._makerevset(repo, match, pats, slowpath, opts)
 
+
 def uisetup(ui):
     def printrevset(orig, repo, pats, opts):
         revs, filematcher = orig(repo, pats, opts)
@@ -35,7 +35,14 @@
             ui.write(stringutil.prettyrepr(revs) + b'\n')
             revs = smartset.baseset()  # display no revisions
         return revs, filematcher
+
     extensions.wrapfunction(logcmdutil, 'getrevs', printrevset)
     aliases, entry = cmdutil.findcmd(b'log', commands.table)
-    entry[1].append((b'', b'print-revset', False,
-                     b'print generated revset and exit (DEPRECATED)'))
+    entry[1].append(
+        (
+            b'',
+            b'print-revset',
+            False,
+            b'print generated revset and exit (DEPRECATED)',
+        )
+    )
--- a/tests/pullext.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/pullext.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,8 +13,9 @@
     error,
     extensions,
     localrepo,
-    repository,
 )
+from mercurial.interfaces import repository
+
 
 def clonecommand(orig, ui, repo, *args, **kwargs):
     if kwargs.get(r'include') or kwargs.get(r'exclude'):
@@ -28,9 +29,11 @@
 
     return orig(ui, repo, *args, **kwargs)
 
+
 def featuresetup(ui, features):
     features.add(repository.NARROW_REQUIREMENT)
 
+
 def extsetup(ui):
     entry = extensions.wrapcommand(commands.table, b'clone', clonecommand)
 
@@ -38,13 +41,16 @@
     hasdepth = any(x[1] == b'depth' for x in entry[1])
 
     if not hasinclude:
-        entry[1].append((b'', b'include', [],
-                         _(b'pattern of file/directory to clone')))
-        entry[1].append((b'', b'exclude', [],
-                         _(b'pattern of file/directory to not clone')))
+        entry[1].append(
+            (b'', b'include', [], _(b'pattern of file/directory to clone'))
+        )
+        entry[1].append(
+            (b'', b'exclude', [], _(b'pattern of file/directory to not clone'))
+        )
 
     if not hasdepth:
-        entry[1].append((b'', b'depth', b'',
-                         _(b'ancestry depth of changesets to fetch')))
+        entry[1].append(
+            (b'', b'depth', b'', _(b'ancestry depth of changesets to fetch'))
+        )
 
     localrepo.featuresetupfuncs.add(featuresetup)
--- a/tests/remotefilelog-getflogheads.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/remotefilelog-getflogheads.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,9 +9,8 @@
 cmdtable = {}
 command = registrar.command(cmdtable)
 
-@command(b'getflogheads',
-         [],
-         b'path')
+
+@command(b'getflogheads', [], b'path')
 def getflogheads(ui, repo, path):
     """
     Extension printing a remotefilelog's heads
--- a/tests/revlog-formatv0.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/revlog-formatv0.py	Mon Oct 21 11:09:48 2019 -0400
@@ -23,29 +23,39 @@
 import sys
 
 files = [
-    (b'formatv0/.hg/00changelog.i',
-     b'000000000000004400000000000000000000000000000000000000'
-     b'000000000000000000000000000000000000000000000000000000'
-     b'0000a1ef0b125355d27765928be600cfe85784284ab3'),
-    (b'formatv0/.hg/00changelog.d',
-     b'756163613935613961356635353036303562366138343738336237'
-     b'61623536363738616436356635380a757365720a3020300a656d70'
-     b'74790a0a656d7074792066696c65'),
-    (b'formatv0/.hg/00manifest.i',
-     b'000000000000003000000000000000000000000000000000000000'
-     b'000000000000000000000000000000000000000000000000000000'
-     b'0000aca95a9a5f550605b6a84783b7ab56678ad65f58'),
-    (b'formatv0/.hg/00manifest.d',
-     b'75656d707479006238306465356431333837353835343163356630'
-     b'35323635616431343461623966613836643164620a'),
-    (b'formatv0/.hg/data/empty.i',
-     b'000000000000000000000000000000000000000000000000000000'
-     b'000000000000000000000000000000000000000000000000000000'
-     b'0000b80de5d138758541c5f05265ad144ab9fa86d1db'),
-    (b'formatv0/.hg/data/empty.d',
-     b''),
+    (
+        b'formatv0/.hg/00changelog.i',
+        b'000000000000004400000000000000000000000000000000000000'
+        b'000000000000000000000000000000000000000000000000000000'
+        b'0000a1ef0b125355d27765928be600cfe85784284ab3',
+    ),
+    (
+        b'formatv0/.hg/00changelog.d',
+        b'756163613935613961356635353036303562366138343738336237'
+        b'61623536363738616436356635380a757365720a3020300a656d70'
+        b'74790a0a656d7074792066696c65',
+    ),
+    (
+        b'formatv0/.hg/00manifest.i',
+        b'000000000000003000000000000000000000000000000000000000'
+        b'000000000000000000000000000000000000000000000000000000'
+        b'0000aca95a9a5f550605b6a84783b7ab56678ad65f58',
+    ),
+    (
+        b'formatv0/.hg/00manifest.d',
+        b'75656d707479006238306465356431333837353835343163356630'
+        b'35323635616431343461623966613836643164620a',
+    ),
+    (
+        b'formatv0/.hg/data/empty.i',
+        b'000000000000000000000000000000000000000000000000000000'
+        b'000000000000000000000000000000000000000000000000000000'
+        b'0000b80de5d138758541c5f05265ad144ab9fa86d1db',
+    ),
+    (b'formatv0/.hg/data/empty.d', b''),
 ]
 
+
 def makedirs(name):
     """recursive directory creation"""
     parent = os.path.dirname(name)
@@ -53,6 +63,7 @@
         makedirs(parent)
     os.mkdir(name)
 
+
 makedirs(os.path.join(*'formatv0/.hg/data'.split('/')))
 
 for name, data in files:
--- a/tests/revnamesext.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/revnamesext.py	Mon Oct 21 11:09:48 2019 -0400
@@ -2,17 +2,20 @@
 
 from __future__ import absolute_import
 
-from mercurial import (
-    namespaces,
-)
+from mercurial import namespaces
+
 
 def reposetup(ui, repo):
     names = {b'r%d' % rev: repo[rev].node() for rev in repo}
     namemap = lambda r, name: names.get(name)
     nodemap = lambda r, node: [b'r%d' % repo[node].rev()]
 
-    ns = namespaces.namespace(b'revnames', templatename=b'revname',
-                              logname=b'revname',
-                              listnames=lambda r: names.keys(),
-                              namemap=namemap, nodemap=nodemap)
+    ns = namespaces.namespace(
+        b'revnames',
+        templatename=b'revname',
+        logname=b'revname',
+        listnames=lambda r: names.keys(),
+        namemap=namemap,
+        nodemap=nodemap,
+    )
     repo.names.addnamespace(ns)
--- a/tests/run-tests.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/run-tests.py	Mon Oct 21 11:09:48 2019 -0400
@@ -75,30 +75,26 @@
 
 try:
     import shlex
+
     shellquote = shlex.quote
 except (ImportError, AttributeError):
     import pipes
+
     shellquote = pipes.quote
 
-if os.environ.get('RTUNICODEPEDANTRY', False):
-    try:
-        reload(sys)
-        sys.setdefaultencoding("undefined")
-    except NameError:
-        pass
-
 processlock = threading.Lock()
 
 pygmentspresent = False
 # ANSI color is unsupported prior to Windows 10
 if os.name != 'nt':
-    try: # is pygments installed
+    try:  # is pygments installed
         import pygments
         import pygments.lexers as lexers
         import pygments.lexer as lexer
         import pygments.formatters as formatters
         import pygments.token as token
         import pygments.style as style
+
         pygmentspresent = True
         difflexer = lexers.DiffLexer()
         terminal256formatter = formatters.Terminal256Formatter()
@@ -106,6 +102,7 @@
         pass
 
 if pygmentspresent:
+
     class TestRunnerStyle(style.Style):
         default_style = ""
         skipped = token.string_to_tokentype("Token.Generic.Skipped")
@@ -113,10 +110,10 @@
         skippedname = token.string_to_tokentype("Token.Generic.SName")
         failedname = token.string_to_tokentype("Token.Generic.FName")
         styles = {
-            skipped:         '#e5e5e5',
-            skippedname:     '#00ffff',
-            failed:          '#7f0000',
-            failedname:      '#ff0000',
+            skipped: '#e5e5e5',
+            skippedname: '#00ffff',
+            failed: '#7f0000',
+            failedname: '#ff0000',
         }
 
     class TestRunnerLexer(lexer.RegexLexer):
@@ -134,7 +131,7 @@
             'failed': [
                 (testpattern, token.Generic.FName),
                 (r'(:| ).*', token.Generic.Failed),
-            ]
+            ],
         }
 
     runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
@@ -144,7 +141,8 @@
 
 if sys.version_info > (3, 5, 0):
     PYTHON3 = True
-    xrange = range # we use xrange in one place, and we'd rather not use range
+    xrange = range  # we use xrange in one place, and we'd rather not use range
+
     def _bytespath(p):
         if p is None:
             return p
@@ -165,20 +163,27 @@
                 self.__len__ = strenv.__len__
                 self.clear = strenv.clear
                 self._strenv = strenv
+
             def __getitem__(self, k):
                 v = self._strenv.__getitem__(_strpath(k))
                 return _bytespath(v)
+
             def __setitem__(self, k, v):
                 self._strenv.__setitem__(_strpath(k), _strpath(v))
+
             def __delitem__(self, k):
                 self._strenv.__delitem__(_strpath(k))
+
             def __contains__(self, k):
                 return self._strenv.__contains__(_strpath(k))
+
             def __iter__(self):
                 return iter([_bytespath(k) for k in iter(self._strenv)])
+
             def get(self, k, default=None):
                 v = self._strenv.get(_strpath(k), _strpath(default))
                 return _bytespath(v)
+
             def pop(self, k, default=None):
                 v = self._strenv.pop(_strpath(k), _strpath(default))
                 return _bytespath(v)
@@ -190,9 +195,11 @@
         getcwdb = lambda: _bytespath(os.getcwd())
 
 elif sys.version_info >= (3, 0, 0):
-    print('%s is only supported on Python 3.5+ and 2.7, not %s' %
-          (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
-    sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
+    print(
+        '%s is only supported on Python 3.5+ and 2.7, not %s'
+        % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
+    )
+    sys.exit(70)  # EX_SOFTWARE from `man 3 sysexit`
 else:
     PYTHON3 = False
 
@@ -235,9 +242,11 @@
     else:
         return False
 
+
 # useipv6 will be set by parseargs
 useipv6 = None
 
+
 def checkportisavailable(port):
     """return true if a port seems free to bind on localhost"""
     if useipv6:
@@ -250,19 +259,31 @@
         s.close()
         return True
     except socket.error as exc:
-        if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
-                             errno.EPROTONOSUPPORT):
+        if exc.errno not in (
+            errno.EADDRINUSE,
+            errno.EADDRNOTAVAIL,
+            errno.EPROTONOSUPPORT,
+        ):
             raise
     return False
 
+
 closefds = os.name == 'posix'
+
+
 def Popen4(cmd, wd, timeout, env=None):
     processlock.acquire()
-    p = subprocess.Popen(_strpath(cmd), shell=True, bufsize=-1,
-                         cwd=_strpath(wd), env=env,
-                         close_fds=closefds,
-                         stdin=subprocess.PIPE, stdout=subprocess.PIPE,
-                         stderr=subprocess.STDOUT)
+    p = subprocess.Popen(
+        _strpath(cmd),
+        shell=True,
+        bufsize=-1,
+        cwd=_strpath(wd),
+        env=env,
+        close_fds=closefds,
+        stdin=subprocess.PIPE,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.STDOUT,
+    )
     processlock.release()
 
     p.fromchild = p.stdout
@@ -271,17 +292,20 @@
 
     p.timeout = False
     if timeout:
+
         def t():
             start = time.time()
             while time.time() - start < timeout and p.returncode is None:
-                time.sleep(.1)
+                time.sleep(0.1)
             p.timeout = True
             if p.returncode is None:
                 terminate(p)
+
         threading.Thread(target=t).start()
 
     return p
 
+
 if sys.executable:
     sysexecutable = sys.executable
 elif os.environ.get('PYTHONEXECUTABLE'):
@@ -304,9 +328,11 @@
     'shell': ('HGTEST_SHELL', 'sh'),
 }
 
+
 def canonpath(path):
     return os.path.realpath(os.path.expanduser(path))
 
+
 def parselistfiles(files, listtype, warn=True):
     entries = dict()
     for filename in files:
@@ -328,6 +354,7 @@
         f.close()
     return entries
 
+
 def parsettestcases(path):
     """read a .t test file, return a set of test case names
 
@@ -344,131 +371,262 @@
             raise
     return cases
 
+
 def getparser():
     """Obtain the OptionParser used by the CLI."""
     parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
 
     selection = parser.add_argument_group('Test Selection')
-    selection.add_argument('--allow-slow-tests', action='store_true',
-        help='allow extremely slow tests')
-    selection.add_argument("--blacklist", action="append",
-        help="skip tests listed in the specified blacklist file")
-    selection.add_argument("--changed",
-        help="run tests that are changed in parent rev or working directory")
-    selection.add_argument("-k", "--keywords",
-        help="run tests matching keywords")
-    selection.add_argument("-r", "--retest", action="store_true",
-        help = "retest failed tests")
-    selection.add_argument("--test-list", action="append",
-        help="read tests to run from the specified file")
-    selection.add_argument("--whitelist", action="append",
-        help="always run tests listed in the specified whitelist file")
-    selection.add_argument('tests', metavar='TESTS', nargs='*',
-                        help='Tests to run')
+    selection.add_argument(
+        '--allow-slow-tests',
+        action='store_true',
+        help='allow extremely slow tests',
+    )
+    selection.add_argument(
+        "--blacklist",
+        action="append",
+        help="skip tests listed in the specified blacklist file",
+    )
+    selection.add_argument(
+        "--changed",
+        help="run tests that are changed in parent rev or working directory",
+    )
+    selection.add_argument(
+        "-k", "--keywords", help="run tests matching keywords"
+    )
+    selection.add_argument(
+        "-r", "--retest", action="store_true", help="retest failed tests"
+    )
+    selection.add_argument(
+        "--test-list",
+        action="append",
+        help="read tests to run from the specified file",
+    )
+    selection.add_argument(
+        "--whitelist",
+        action="append",
+        help="always run tests listed in the specified whitelist file",
+    )
+    selection.add_argument(
+        'tests', metavar='TESTS', nargs='*', help='Tests to run'
+    )
 
     harness = parser.add_argument_group('Test Harness Behavior')
-    harness.add_argument('--bisect-repo',
-                        metavar='bisect_repo',
-                        help=("Path of a repo to bisect. Use together with "
-                              "--known-good-rev"))
-    harness.add_argument("-d", "--debug", action="store_true",
+    harness.add_argument(
+        '--bisect-repo',
+        metavar='bisect_repo',
+        help=(
+            "Path of a repo to bisect. Use together with " "--known-good-rev"
+        ),
+    )
+    harness.add_argument(
+        "-d",
+        "--debug",
+        action="store_true",
         help="debug mode: write output of test scripts to console"
-             " rather than capturing and diffing it (disables timeout)")
-    harness.add_argument("-f", "--first", action="store_true",
-        help="exit on the first test failure")
-    harness.add_argument("-i", "--interactive", action="store_true",
-        help="prompt to accept changed output")
-    harness.add_argument("-j", "--jobs", type=int,
+        " rather than capturing and diffing it (disables timeout)",
+    )
+    harness.add_argument(
+        "-f",
+        "--first",
+        action="store_true",
+        help="exit on the first test failure",
+    )
+    harness.add_argument(
+        "-i",
+        "--interactive",
+        action="store_true",
+        help="prompt to accept changed output",
+    )
+    harness.add_argument(
+        "-j",
+        "--jobs",
+        type=int,
         help="number of jobs to run in parallel"
-             " (default: $%s or %d)" % defaults['jobs'])
-    harness.add_argument("--keep-tmpdir", action="store_true",
-        help="keep temporary directory after running tests")
-    harness.add_argument('--known-good-rev',
-                        metavar="known_good_rev",
-                        help=("Automatically bisect any failures using this "
-                              "revision as a known-good revision."))
-    harness.add_argument("--list-tests", action="store_true",
-        help="list tests instead of running them")
-    harness.add_argument("--loop", action="store_true",
-        help="loop tests repeatedly")
-    harness.add_argument('--random', action="store_true",
-        help='run tests in random order')
-    harness.add_argument('--order-by-runtime', action="store_true",
-        help='run slowest tests first, according to .testtimes')
-    harness.add_argument("-p", "--port", type=int,
+        " (default: $%s or %d)" % defaults['jobs'],
+    )
+    harness.add_argument(
+        "--keep-tmpdir",
+        action="store_true",
+        help="keep temporary directory after running tests",
+    )
+    harness.add_argument(
+        '--known-good-rev',
+        metavar="known_good_rev",
+        help=(
+            "Automatically bisect any failures using this "
+            "revision as a known-good revision."
+        ),
+    )
+    harness.add_argument(
+        "--list-tests",
+        action="store_true",
+        help="list tests instead of running them",
+    )
+    harness.add_argument(
+        "--loop", action="store_true", help="loop tests repeatedly"
+    )
+    harness.add_argument(
+        '--random', action="store_true", help='run tests in random order'
+    )
+    harness.add_argument(
+        '--order-by-runtime',
+        action="store_true",
+        help='run slowest tests first, according to .testtimes',
+    )
+    harness.add_argument(
+        "-p",
+        "--port",
+        type=int,
         help="port on which servers should listen"
-             " (default: $%s or %d)" % defaults['port'])
-    harness.add_argument('--profile-runner', action='store_true',
-                        help='run statprof on run-tests')
-    harness.add_argument("-R", "--restart", action="store_true",
-        help="restart at last error")
-    harness.add_argument("--runs-per-test", type=int, dest="runs_per_test",
-        help="run each test N times (default=1)", default=1)
-    harness.add_argument("--shell",
-        help="shell to use (default: $%s or %s)" % defaults['shell'])
-    harness.add_argument('--showchannels', action='store_true',
-                        help='show scheduling channels')
-    harness.add_argument("--slowtimeout", type=int,
+        " (default: $%s or %d)" % defaults['port'],
+    )
+    harness.add_argument(
+        '--profile-runner',
+        action='store_true',
+        help='run statprof on run-tests',
+    )
+    harness.add_argument(
+        "-R", "--restart", action="store_true", help="restart at last error"
+    )
+    harness.add_argument(
+        "--runs-per-test",
+        type=int,
+        dest="runs_per_test",
+        help="run each test N times (default=1)",
+        default=1,
+    )
+    harness.add_argument(
+        "--shell", help="shell to use (default: $%s or %s)" % defaults['shell']
+    )
+    harness.add_argument(
+        '--showchannels', action='store_true', help='show scheduling channels'
+    )
+    harness.add_argument(
+        "--slowtimeout",
+        type=int,
         help="kill errant slow tests after SLOWTIMEOUT seconds"
-             " (default: $%s or %d)" % defaults['slowtimeout'])
-    harness.add_argument("-t", "--timeout", type=int,
+        " (default: $%s or %d)" % defaults['slowtimeout'],
+    )
+    harness.add_argument(
+        "-t",
+        "--timeout",
+        type=int,
         help="kill errant tests after TIMEOUT seconds"
-             " (default: $%s or %d)" % defaults['timeout'])
-    harness.add_argument("--tmpdir",
+        " (default: $%s or %d)" % defaults['timeout'],
+    )
+    harness.add_argument(
+        "--tmpdir",
         help="run tests in the given temporary directory"
-             " (implies --keep-tmpdir)")
-    harness.add_argument("-v", "--verbose", action="store_true",
-        help="output verbose messages")
+        " (implies --keep-tmpdir)",
+    )
+    harness.add_argument(
+        "-v", "--verbose", action="store_true", help="output verbose messages"
+    )
 
     hgconf = parser.add_argument_group('Mercurial Configuration')
-    hgconf.add_argument("--chg", action="store_true",
-        help="install and use chg wrapper in place of hg")
-    hgconf.add_argument("--compiler",
-        help="compiler to build with")
-    hgconf.add_argument('--extra-config-opt', action="append", default=[],
-        help='set the given config opt in the test hgrc')
-    hgconf.add_argument("-l", "--local", action="store_true",
+    hgconf.add_argument(
+        "--chg",
+        action="store_true",
+        help="install and use chg wrapper in place of hg",
+    )
+    hgconf.add_argument("--compiler", help="compiler to build with")
+    hgconf.add_argument(
+        '--extra-config-opt',
+        action="append",
+        default=[],
+        help='set the given config opt in the test hgrc',
+    )
+    hgconf.add_argument(
+        "-l",
+        "--local",
+        action="store_true",
         help="shortcut for --with-hg=<testdir>/../hg, "
-             "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set")
-    hgconf.add_argument("--ipv6", action="store_true",
-        help="prefer IPv6 to IPv4 for network related tests")
-    hgconf.add_argument("--pure", action="store_true",
-        help="use pure Python code instead of C extensions")
-    hgconf.add_argument("-3", "--py3-warnings", action="store_true",
-        help="enable Py3k warnings on Python 2.7+")
-    hgconf.add_argument("--with-chg", metavar="CHG",
-        help="use specified chg wrapper in place of hg")
-    hgconf.add_argument("--with-hg",
+        "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
+    )
+    hgconf.add_argument(
+        "--ipv6",
+        action="store_true",
+        help="prefer IPv6 to IPv4 for network related tests",
+    )
+    hgconf.add_argument(
+        "--pure",
+        action="store_true",
+        help="use pure Python code instead of C extensions",
+    )
+    hgconf.add_argument(
+        "-3",
+        "--py3-warnings",
+        action="store_true",
+        help="enable Py3k warnings on Python 2.7+",
+    )
+    hgconf.add_argument(
+        "--with-chg",
+        metavar="CHG",
+        help="use specified chg wrapper in place of hg",
+    )
+    hgconf.add_argument(
+        "--with-hg",
         metavar="HG",
         help="test using specified hg script rather than a "
-             "temporary installation")
+        "temporary installation",
+    )
 
     reporting = parser.add_argument_group('Results Reporting')
-    reporting.add_argument("-C", "--annotate", action="store_true",
-        help="output files annotated with coverage")
-    reporting.add_argument("--color", choices=["always", "auto", "never"],
+    reporting.add_argument(
+        "-C",
+        "--annotate",
+        action="store_true",
+        help="output files annotated with coverage",
+    )
+    reporting.add_argument(
+        "--color",
+        choices=["always", "auto", "never"],
         default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
-        help="colorisation: always|auto|never (default: auto)")
-    reporting.add_argument("-c", "--cover", action="store_true",
-        help="print a test coverage report")
-    reporting.add_argument('--exceptions', action='store_true',
-        help='log all exceptions and generate an exception report')
-    reporting.add_argument("-H", "--htmlcov", action="store_true",
-        help="create an HTML report of the coverage of the files")
-    reporting.add_argument("--json", action="store_true",
-        help="store test result data in 'report.json' file")
-    reporting.add_argument("--outputdir",
-        help="directory to write error logs to (default=test directory)")
-    reporting.add_argument("-n", "--nodiff", action="store_true",
-        help="skip showing test changes")
-    reporting.add_argument("-S", "--noskips", action="store_true",
-        help="don't report skip tests verbosely")
-    reporting.add_argument("--time", action="store_true",
-        help="time how long each test takes")
-    reporting.add_argument("--view",
-        help="external diff viewer")
-    reporting.add_argument("--xunit",
-        help="record xunit results at specified path")
+        help="colorisation: always|auto|never (default: auto)",
+    )
+    reporting.add_argument(
+        "-c",
+        "--cover",
+        action="store_true",
+        help="print a test coverage report",
+    )
+    reporting.add_argument(
+        '--exceptions',
+        action='store_true',
+        help='log all exceptions and generate an exception report',
+    )
+    reporting.add_argument(
+        "-H",
+        "--htmlcov",
+        action="store_true",
+        help="create an HTML report of the coverage of the files",
+    )
+    reporting.add_argument(
+        "--json",
+        action="store_true",
+        help="store test result data in 'report.json' file",
+    )
+    reporting.add_argument(
+        "--outputdir",
+        help="directory to write error logs to (default=test directory)",
+    )
+    reporting.add_argument(
+        "-n", "--nodiff", action="store_true", help="skip showing test changes"
+    )
+    reporting.add_argument(
+        "-S",
+        "--noskips",
+        action="store_true",
+        help="don't report skip tests verbosely",
+    )
+    reporting.add_argument(
+        "--time", action="store_true", help="time how long each test takes"
+    )
+    reporting.add_argument("--view", help="external diff viewer")
+    reporting.add_argument(
+        "--xunit", help="record xunit results at specified path"
+    )
 
     for option, (envvar, default) in defaults.items():
         defaults[option] = type(default)(os.environ.get(envvar, default))
@@ -476,6 +634,7 @@
 
     return parser
 
+
 def parseargs(args, parser):
     """Parse arguments with our OptionParser and validate results."""
     options = parser.parse_args(args)
@@ -484,15 +643,9 @@
     if 'java' in sys.platform or '__pypy__' in sys.modules:
         options.pure = True
 
-    if options.with_hg:
-        options.with_hg = canonpath(_bytespath(options.with_hg))
-        if not (os.path.isfile(options.with_hg) and
-                os.access(options.with_hg, os.X_OK)):
-            parser.error('--with-hg must specify an executable hg script')
-        if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
-            sys.stderr.write('warning: --with-hg should specify an hg script\n')
-            sys.stderr.flush()
     if options.local:
+        if options.with_hg or options.with_chg:
+            parser.error('--local cannot be used with --with-hg or --with-chg')
         testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
         reporootdir = os.path.dirname(testdir)
         pathandattrs = [(b'hg', 'with_hg')]
@@ -501,26 +654,45 @@
         for relpath, attr in pathandattrs:
             binpath = os.path.join(reporootdir, relpath)
             if os.name != 'nt' and not os.access(binpath, os.X_OK):
-                parser.error('--local specified, but %r not found or '
-                             'not executable' % binpath)
-            setattr(options, attr, binpath)
+                parser.error(
+                    '--local specified, but %r not found or '
+                    'not executable' % binpath
+                )
+            setattr(options, attr, _strpath(binpath))
+
+    if options.with_hg:
+        options.with_hg = canonpath(_bytespath(options.with_hg))
+        if not (
+            os.path.isfile(options.with_hg)
+            and os.access(options.with_hg, os.X_OK)
+        ):
+            parser.error('--with-hg must specify an executable hg script')
+        if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
+            sys.stderr.write('warning: --with-hg should specify an hg script\n')
+            sys.stderr.flush()
 
     if (options.chg or options.with_chg) and os.name == 'nt':
         parser.error('chg does not work on %s' % os.name)
     if options.with_chg:
         options.chg = False  # no installation to temporary location
         options.with_chg = canonpath(_bytespath(options.with_chg))
-        if not (os.path.isfile(options.with_chg) and
-                os.access(options.with_chg, os.X_OK)):
+        if not (
+            os.path.isfile(options.with_chg)
+            and os.access(options.with_chg, os.X_OK)
+        ):
             parser.error('--with-chg must specify a chg executable')
     if options.chg and options.with_hg:
         # chg shares installation location with hg
-        parser.error('--chg does not work when --with-hg is specified '
-                     '(use --with-chg instead)')
+        parser.error(
+            '--chg does not work when --with-hg is specified '
+            '(use --with-chg instead)'
+        )
 
     if options.color == 'always' and not pygmentspresent:
-        sys.stderr.write('warning: --color=always ignored because '
-                         'pygments is not installed\n')
+        sys.stderr.write(
+            'warning: --color=always ignored because '
+            'pygments is not installed\n'
+        )
 
     if options.bisect_repo and not options.known_good_rev:
         parser.error("--bisect-repo cannot be used without --known-good-rev")
@@ -530,13 +702,15 @@
         useipv6 = checksocketfamily('AF_INET6')
     else:
         # only use IPv6 if IPv4 is unavailable and IPv6 is available
-        useipv6 = ((not checksocketfamily('AF_INET'))
-                   and checksocketfamily('AF_INET6'))
+        useipv6 = (not checksocketfamily('AF_INET')) and checksocketfamily(
+            'AF_INET6'
+        )
 
     options.anycoverage = options.cover or options.annotate or options.htmlcov
     if options.anycoverage:
         try:
             import coverage
+
             covver = version.StrictVersion(coverage.__version__).version
             if covver < (3, 3):
                 parser.error('coverage options require coverage 3.3 or later')
@@ -545,12 +719,14 @@
 
     if options.anycoverage and options.local:
         # this needs some path mangling somewhere, I guess
-        parser.error("sorry, coverage options do not work when --local "
-                     "is specified")
+        parser.error(
+            "sorry, coverage options do not work when --local " "is specified"
+        )
 
     if options.anycoverage and options.with_hg:
-        parser.error("sorry, coverage options do not work when --with-hg "
-                     "is specified")
+        parser.error(
+            "sorry, coverage options do not work when --with-hg " "is specified"
+        )
 
     global verbose
     if options.verbose:
@@ -565,17 +741,16 @@
         parser.error("-i/--interactive and -d/--debug are incompatible")
     if options.debug:
         if options.timeout != defaults['timeout']:
-            sys.stderr.write(
-                'warning: --timeout option ignored with --debug\n')
+            sys.stderr.write('warning: --timeout option ignored with --debug\n')
         if options.slowtimeout != defaults['slowtimeout']:
             sys.stderr.write(
-                'warning: --slowtimeout option ignored with --debug\n')
+                'warning: --slowtimeout option ignored with --debug\n'
+            )
         options.timeout = 0
         options.slowtimeout = 0
     if options.py3_warnings:
         if PYTHON3:
-            parser.error(
-                '--py3-warnings can only be used on Python 2.7')
+            parser.error('--py3-warnings can only be used on Python 2.7')
 
     if options.blacklist:
         options.blacklist = parselistfiles(options.blacklist, 'blacklist')
@@ -589,6 +764,7 @@
 
     return options
 
+
 def rename(src, dst):
     """Like os.rename(), trade atomicity and opened files friendliness
     for existing destination support.
@@ -596,6 +772,7 @@
     shutil.copy(src, dst)
     os.remove(src)
 
+
 def makecleanable(path):
     """Try to fix directory permission recursively so that the entire tree
     can be deleted"""
@@ -607,11 +784,14 @@
             except OSError:
                 pass
 
+
 _unified_diff = difflib.unified_diff
 if PYTHON3:
     import functools
+
     _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
 
+
 def getdiff(expected, output, ref, err):
     servefail = False
     lines = []
@@ -622,12 +802,16 @@
                 line = line[:-2] + b'\n'
         lines.append(line)
         if not servefail and line.startswith(
-                             b'+  abort: child process failed to start'):
+            b'+  abort: child process failed to start'
+        ):
             servefail = True
 
     return servefail, lines
 
+
 verbose = False
+
+
 def vlog(*msg):
     """Log only when in verbose mode."""
     if verbose is False:
@@ -635,6 +819,7 @@
 
     return log(*msg)
 
+
 # Bytes that break XML even in a CDATA block: control characters 0-31
 # sans \t, \n and \r
 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
@@ -645,6 +830,7 @@
 #   output..output (feature !)\n
 optline = re.compile(br'(.*) \((.+?) !\)\n$')
 
+
 def cdatasafe(data):
     """Make a string safe to include in a CDATA block.
 
@@ -655,6 +841,7 @@
     """
     return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
 
+
 def log(*msg):
     """Log something to stdout.
 
@@ -668,12 +855,15 @@
         print()
         sys.stdout.flush()
 
+
 def highlightdiff(line, color):
     if not color:
         return line
     assert pygmentspresent
-    return pygments.highlight(line.decode('latin1'), difflexer,
-                              terminal256formatter).encode('latin1')
+    return pygments.highlight(
+        line.decode('latin1'), difflexer, terminal256formatter
+    ).encode('latin1')
+
 
 def highlightmsg(msg, color):
     if not color:
@@ -681,6 +871,7 @@
     assert pygmentspresent
     return pygments.highlight(msg, runnerlexer, runnerformatter)
 
+
 def terminate(proc):
     """Terminate subprocess"""
     vlog('# Terminating process %d' % proc.pid)
@@ -689,10 +880,12 @@
     except OSError:
         pass
 
+
 def killdaemons(pidfile):
     import killdaemons as killmod
-    return killmod.killdaemons(pidfile, tryhard=False, remove=True,
-                               logfn=vlog)
+
+    return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog)
+
 
 class Test(unittest.TestCase):
     """Encapsulates a single, runnable test.
@@ -705,14 +898,24 @@
     # Status code reserved for skipped tests (used by hghave).
     SKIPPED_STATUS = 80
 
-    def __init__(self, path, outputdir, tmpdir, keeptmpdir=False,
-                 debug=False,
-                 first=False,
-                 timeout=None,
-                 startport=None, extraconfigopts=None,
-                 py3warnings=False, shell=None, hgcommand=None,
-                 slowtimeout=None, usechg=False,
-                 useipv6=False):
+    def __init__(
+        self,
+        path,
+        outputdir,
+        tmpdir,
+        keeptmpdir=False,
+        debug=False,
+        first=False,
+        timeout=None,
+        startport=None,
+        extraconfigopts=None,
+        py3warnings=False,
+        shell=None,
+        hgcommand=None,
+        slowtimeout=None,
+        usechg=False,
+        useipv6=False,
+    ):
         """Create a test from parameters.
 
         path is the full path to the file defining the test.
@@ -787,7 +990,7 @@
         # If we're not in --debug mode and reference output file exists,
         # check test output against it.
         if self._debug:
-            return None # to match "out is None"
+            return None  # to match "out is None"
         elif os.path.exists(self.refpath):
             with open(self.refpath, 'rb') as f:
                 return f.read().splitlines(True)
@@ -834,8 +1037,9 @@
                     raise
 
         if self._usechg:
-            self._chgsockdir = os.path.join(self._threadtmp,
-                                            b'%s.chgsock' % name)
+            self._chgsockdir = os.path.join(
+                self._threadtmp, b'%s.chgsock' % name
+            )
             os.mkdir(self._chgsockdir)
 
     def run(self, result):
@@ -918,7 +1122,7 @@
         self._skipped = False
 
         if ret == self.SKIPPED_STATUS:
-            if out is None: # Debug mode, nothing to parse.
+            if out is None:  # Debug mode, nothing to parse.
                 missing = ['unknown']
                 failed = None
             else:
@@ -938,8 +1142,11 @@
             self.fail('no result code from test')
         elif out != self._refout:
             # Diff generation may rely on written .err file.
-            if ((ret != 0 or out != self._refout) and not self._skipped
-                and not self._debug):
+            if (
+                (ret != 0 or out != self._refout)
+                and not self._skipped
+                and not self._debug
+            ):
                 with open(self.errpath, 'wb') as f:
                     for line in out:
                         f.write(line)
@@ -969,9 +1176,13 @@
         self._daemonpids = []
 
         if self._keeptmpdir:
-            log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
-                (self._testtmp.decode('utf-8'),
-                 self._threadtmp.decode('utf-8')))
+            log(
+                '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s'
+                % (
+                    self._testtmp.decode('utf-8'),
+                    self._threadtmp.decode('utf-8'),
+                )
+            )
         else:
             try:
                 shutil.rmtree(self._testtmp)
@@ -987,8 +1198,12 @@
             # files are deleted
             shutil.rmtree(self._chgsockdir, True)
 
-        if ((self._ret != 0 or self._out != self._refout) and not self._skipped
-            and not self._debug and self._out):
+        if (
+            (self._ret != 0 or self._out != self._refout)
+            and not self._skipped
+            and not self._debug
+            and self._out
+        ):
             with open(self.errpath, 'wb') as f:
                 for line in self._out:
                     f.write(line)
@@ -1021,7 +1236,7 @@
             self._portmap(2),
             (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
             (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
-            ]
+        ]
         r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
 
         replacementfile = os.path.join(self._testdir, b'common-pattern.py')
@@ -1042,10 +1257,15 @@
 
     def _escapepath(self, p):
         if os.name == 'nt':
-            return (
-                (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
-                    c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
-                    for c in [p[i:i + 1] for i in range(len(p))]))
+            return b''.join(
+                c.isalpha()
+                and b'[%s%s]' % (c.lower(), c.upper())
+                or c in b'/\\'
+                and br'[/\\]'
+                or c.isdigit()
+                and c
+                or b'\\' + c
+                for c in [p[i : i + 1] for i in range(len(p))]
             )
         else:
             return re.escape(p)
@@ -1087,9 +1307,11 @@
 
     def _getenv(self):
         """Obtain environment variables to use during test execution."""
+
         def defineport(i):
             offset = '' if i == 0 else '%s' % i
             env["HGPORT%s" % offset] = '%s' % (self._startport + i)
+
         env = os.environ.copy()
         env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
         env['HGEMITWARNINGS'] = '1'
@@ -1101,11 +1323,13 @@
             # This list should be parallel to _portmap in _getreplacements
             defineport(port)
         env["HGRCPATH"] = _strpath(os.path.join(self._threadtmp, b'.hgrc'))
-        env["DAEMON_PIDS"] = _strpath(os.path.join(self._threadtmp,
-                                                   b'daemon.pids'))
-        env["HGEDITOR"] = ('"' + sysexecutable + '"'
-                           + ' -c "import sys; sys.exit(0)"')
-        env["HGUSER"]   = "test"
+        env["DAEMON_PIDS"] = _strpath(
+            os.path.join(self._threadtmp, b'daemon.pids')
+        )
+        env["HGEDITOR"] = (
+            '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"'
+        )
+        env["HGUSER"] = "test"
         env["HGENCODING"] = "ascii"
         env["HGENCODINGMODE"] = "strict"
         env["HGHOSTNAME"] = "test-hostname"
@@ -1115,7 +1339,8 @@
             # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
             # non-test one in as a default, otherwise set to devnull
             env['HGTESTCATAPULTSERVERPIPE'] = env.get(
-                'HGCATAPULTSERVERPIPE', os.devnull)
+                'HGCATAPULTSERVERPIPE', os.devnull
+            )
 
         extraextensions = []
         for opt in self._extraconfigopts:
@@ -1191,11 +1416,15 @@
             hgrc.write(b'all-warnings = true\n')
             hgrc.write(b'default-date = 0 0\n')
             hgrc.write(b'[largefiles]\n')
-            hgrc.write(b'usercache = %s\n' %
-                       (os.path.join(self._testtmp, b'.cache/largefiles')))
+            hgrc.write(
+                b'usercache = %s\n'
+                % (os.path.join(self._testtmp, b'.cache/largefiles'))
+            )
             hgrc.write(b'[lfs]\n')
-            hgrc.write(b'usercache = %s\n' %
-                       (os.path.join(self._testtmp, b'.cache/lfs')))
+            hgrc.write(
+                b'usercache = %s\n'
+                % (os.path.join(self._testtmp, b'.cache/lfs'))
+            )
             hgrc.write(b'[web]\n')
             hgrc.write(b'address = localhost\n')
             hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
@@ -1203,8 +1432,9 @@
 
             for opt in self._extraconfigopts:
                 section, key = opt.encode('utf-8').split(b'.', 1)
-                assert b'=' in key, ('extra config opt %s must '
-                                     'have an = for assignment' % opt)
+                assert b'=' in key, (
+                    'extra config opt %s must ' 'have an = for assignment' % opt
+                )
                 hgrc.write(b'[%s]\n%s\n' % (section, key))
 
     def fail(self, msg):
@@ -1219,13 +1449,14 @@
         Return a tuple (exitcode, output). output is None in debug mode.
         """
         if self._debug:
-            proc = subprocess.Popen(_strpath(cmd), shell=True,
-                                    cwd=_strpath(self._testtmp),
-                                    env=env)
+            proc = subprocess.Popen(
+                _strpath(cmd), shell=True, cwd=_strpath(self._testtmp), env=env
+            )
             ret = proc.wait()
             return (ret, None)
 
         proc = Popen4(cmd, self._testtmp, self._timeout, env)
+
         def cleanup():
             terminate(proc)
             ret = proc.wait()
@@ -1261,6 +1492,7 @@
 
         return ret, output.splitlines(True)
 
+
 class PythonTest(Test):
     """A Python-based test."""
 
@@ -1274,13 +1506,13 @@
         cmd = b'"%s"%s "%s"' % (PYTHON, py3switch, self.path)
         vlog("# Running", cmd)
         normalizenewlines = os.name == 'nt'
-        result = self._runcommand(cmd, env,
-                                  normalizenewlines=normalizenewlines)
+        result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines)
         if self._aborted:
             raise KeyboardInterrupt()
 
         return result
 
+
 # Some glob patterns apply only in some circumstances, so the script
 # might want to remove (glob) annotations that otherwise should be
 # retained.
@@ -1299,6 +1531,17 @@
 if PYTHON3:
     bchr = lambda x: bytes([x])
 
+WARN_UNDEFINED = 1
+WARN_YES = 2
+WARN_NO = 3
+
+MARK_OPTIONAL = b" (?)\n"
+
+
+def isoptional(line):
+    return line.endswith(MARK_OPTIONAL)
+
+
 class TTest(Test):
     """A "t test" is a test backed by a .t file."""
 
@@ -1371,9 +1614,12 @@
         # TODO do something smarter when all other uses of hghave are gone.
         runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
         tdir = runtestdir.replace(b'\\', b'/')
-        proc = Popen4(b'%s -c "%s/hghave %s"' %
-                      (self._shell, tdir, allreqs),
-                      self._testtmp, 0, self._getenv())
+        proc = Popen4(
+            b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
+            self._testtmp,
+            0,
+            self._getenv(),
+        )
         stdout, stderr = proc.communicate()
         ret = proc.wait()
         if wifexited(ret):
@@ -1414,25 +1660,30 @@
         # up script results with our source. These markers include input
         # line number and the last return code.
         salt = b"SALT%d" % time.time()
+
         def addsalt(line, inpython):
             if inpython:
                 script.append(b'%s %d 0\n' % (salt, line))
             else:
                 script.append(b'echo %s %d $?\n' % (salt, line))
+
         activetrace = []
         session = str(uuid.uuid4())
         if PYTHON3:
             session = session.encode('ascii')
-        hgcatapult = (os.getenv('HGTESTCATAPULTSERVERPIPE') or
-                      os.getenv('HGCATAPULTSERVERPIPE'))
+        hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or os.getenv(
+            'HGCATAPULTSERVERPIPE'
+        )
+
         def toggletrace(cmd=None):
             if not hgcatapult or hgcatapult == os.devnull:
                 return
 
             if activetrace:
                 script.append(
-                    b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (
-                        session, activetrace[0]))
+                    b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
+                    % (session, activetrace[0])
+                )
             if cmd is None:
                 return
 
@@ -1442,8 +1693,9 @@
                 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
             quoted = quoted.replace(b'\\', b'\\\\')
             script.append(
-                b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (
-                    session, quoted))
+                b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
+                % (session, quoted)
+            )
             activetrace[0:] = [quoted]
 
         script = []
@@ -1545,21 +1797,21 @@
                 after.setdefault(pos, []).append(l)
             elif skipping:
                 after.setdefault(pos, []).append(l)
-            elif l.startswith(b'  >>> '): # python inlines
+            elif l.startswith(b'  >>> '):  # python inlines
                 after.setdefault(pos, []).append(l)
                 prepos = pos
                 pos = n
                 if not inpython:
                     # We've just entered a Python block. Add the header.
                     inpython = True
-                    addsalt(prepos, False) # Make sure we report the exit code.
+                    addsalt(prepos, False)  # Make sure we report the exit code.
                     script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
                 addsalt(n, True)
                 script.append(l[2:])
-            elif l.startswith(b'  ... '): # python inlines
+            elif l.startswith(b'  ... '):  # python inlines
                 after.setdefault(prepos, []).append(l)
                 script.append(l[2:])
-            elif l.startswith(b'  $ '): # commands
+            elif l.startswith(b'  $ '):  # commands
                 if inpython:
                     script.append(b'EOF\n')
                     inpython = False
@@ -1573,10 +1825,10 @@
                 if len(cmd) == 2 and cmd[0] == b'cd':
                     l = b'  $ cd %s || exit 1\n' % cmd[1]
                 script.append(rawcmd)
-            elif l.startswith(b'  > '): # continuations
+            elif l.startswith(b'  > '):  # continuations
                 after.setdefault(prepos, []).append(l)
                 script.append(l[4:])
-            elif l.startswith(b'  '): # results
+            elif l.startswith(b'  '):  # results
                 # Queue up a list of expected results.
                 expected.setdefault(pos, []).append(l[2:])
             else:
@@ -1598,114 +1850,124 @@
 
     def _processoutput(self, exitcode, output, salt, after, expected):
         # Merge the script output back into a unified test.
-        warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
+        warnonly = WARN_UNDEFINED  # 1: not yet; 2: yes; 3: for sure not
         if exitcode != 0:
-            warnonly = 3
+            warnonly = WARN_NO
 
         pos = -1
         postout = []
-        for l in output:
-            lout, lcmd = l, None
-            if salt in l:
-                lout, lcmd = l.split(salt, 1)
-
-            while lout:
-                if not lout.endswith(b'\n'):
-                    lout += b' (no-eol)\n'
-
-                # Find the expected output at the current position.
-                els = [None]
-                if expected.get(pos, None):
-                    els = expected[pos]
-
-                optional = []
-                for i, el in enumerate(els):
-                    r = False
-                    if el:
-                        r, exact = self.linematch(el, lout)
-                    if isinstance(r, str):
-                        if r == '-glob':
-                            lout = ''.join(el.rsplit(' (glob)', 1))
-                            r = '' # Warn only this line.
-                        elif r == "retry":
-                            postout.append(b'  ' + el)
-                        else:
-                            log('\ninfo, unknown linematch result: %r\n' % r)
-                            r = False
-                    if r:
-                        els.pop(i)
-                        break
-                    if el:
-                        if el.endswith(b" (?)\n"):
-                            optional.append(i)
-                        else:
-                            m = optline.match(el)
-                            if m:
-                                conditions = [
-                                    c for c in m.group(2).split(b' ')]
-
-                                if not self._iftest(conditions):
-                                    optional.append(i)
-                        if exact:
-                            # Don't allow line to be matches against a later
-                            # line in the output
-                            els.pop(i)
-                            break
-
-                if r:
-                    if r == "retry":
-                        continue
-                    # clean up any optional leftovers
-                    for i in optional:
-                        postout.append(b'  ' + els[i])
-                    for i in reversed(optional):
-                        del els[i]
-                    postout.append(b'  ' + el)
-                else:
-                    if self.NEEDESCAPE(lout):
-                        lout = TTest._stringescape(b'%s (esc)\n' %
-                                                   lout.rstrip(b'\n'))
-                    postout.append(b'  ' + lout) # Let diff deal with it.
-                    if r != '': # If line failed.
-                        warnonly = 3 # for sure not
-                    elif warnonly == 1: # Is "not yet" and line is warn only.
-                        warnonly = 2 # Yes do warn.
-                break
-            else:
-                # clean up any optional leftovers
-                while expected.get(pos, None):
-                    el = expected[pos].pop(0)
-                    if el:
-                        if not el.endswith(b" (?)\n"):
-                            m = optline.match(el)
-                            if m:
-                                conditions = [c for c in m.group(2).split(b' ')]
-
-                                if self._iftest(conditions):
-                                    # Don't append as optional line
-                                    continue
-                            else:
-                                continue
-                    postout.append(b'  ' + el)
-
-            if lcmd:
-                # Add on last return code.
-                ret = int(lcmd.split()[1])
-                if ret != 0:
-                    postout.append(b'  [%d]\n' % ret)
-                if pos in after:
-                    # Merge in non-active test bits.
-                    postout += after.pop(pos)
-                pos = int(lcmd.split()[0])
+        for out_rawline in output:
+            out_line, cmd_line = out_rawline, None
+            if salt in out_rawline:
+                out_line, cmd_line = out_rawline.split(salt, 1)
+
+            pos, postout, warnonly = self._process_out_line(
+                out_line, pos, postout, expected, warnonly
+            )
+            pos, postout = self._process_cmd_line(cmd_line, pos, postout, after)
 
         if pos in after:
             postout += after.pop(pos)
 
-        if warnonly == 2:
-            exitcode = False # Set exitcode to warned.
+        if warnonly == WARN_YES:
+            exitcode = False  # Set exitcode to warned.
 
         return exitcode, postout
 
+    def _process_out_line(self, out_line, pos, postout, expected, warnonly):
+        while out_line:
+            if not out_line.endswith(b'\n'):
+                out_line += b' (no-eol)\n'
+
+            # Find the expected output at the current position.
+            els = [None]
+            if expected.get(pos, None):
+                els = expected[pos]
+
+            optional = []
+            for i, el in enumerate(els):
+                r = False
+                if el:
+                    r, exact = self.linematch(el, out_line)
+                if isinstance(r, str):
+                    if r == '-glob':
+                        out_line = ''.join(el.rsplit(' (glob)', 1))
+                        r = ''  # Warn only this line.
+                    elif r == "retry":
+                        postout.append(b'  ' + el)
+                    else:
+                        log('\ninfo, unknown linematch result: %r\n' % r)
+                        r = False
+                if r:
+                    els.pop(i)
+                    break
+                if el:
+                    if isoptional(el):
+                        optional.append(i)
+                    else:
+                        m = optline.match(el)
+                        if m:
+                            conditions = [c for c in m.group(2).split(b' ')]
+
+                            if not self._iftest(conditions):
+                                optional.append(i)
+                    if exact:
+                        # Don't allow line to be matches against a later
+                        # line in the output
+                        els.pop(i)
+                        break
+
+            if r:
+                if r == "retry":
+                    continue
+                # clean up any optional leftovers
+                for i in optional:
+                    postout.append(b'  ' + els[i])
+                for i in reversed(optional):
+                    del els[i]
+                postout.append(b'  ' + el)
+            else:
+                if self.NEEDESCAPE(out_line):
+                    out_line = TTest._stringescape(
+                        b'%s (esc)\n' % out_line.rstrip(b'\n')
+                    )
+                postout.append(b'  ' + out_line)  # Let diff deal with it.
+                if r != '':  # If line failed.
+                    warnonly = WARN_NO
+                elif warnonly == WARN_UNDEFINED:
+                    warnonly = WARN_YES
+            break
+        else:
+            # clean up any optional leftovers
+            while expected.get(pos, None):
+                el = expected[pos].pop(0)
+                if el:
+                    if not isoptional(el):
+                        m = optline.match(el)
+                        if m:
+                            conditions = [c for c in m.group(2).split(b' ')]
+
+                            if self._iftest(conditions):
+                                # Don't append as optional line
+                                continue
+                        else:
+                            continue
+                postout.append(b'  ' + el)
+        return pos, postout, warnonly
+
+    def _process_cmd_line(self, cmd_line, pos, postout, after):
+        """process a "command" part of a line from unified test output"""
+        if cmd_line:
+            # Add on last return code.
+            ret = int(cmd_line.split()[1])
+            if ret != 0:
+                postout.append(b'  [%d]\n' % ret)
+            if pos in after:
+                # Merge in non-active test bits.
+                postout += after.pop(pos)
+            pos = int(cmd_line.split()[0])
+        return pos, postout
+
     @staticmethod
     def rematch(el, l):
         try:
@@ -1734,10 +1996,10 @@
         i, n = 0, len(el)
         res = b''
         while i < n:
-            c = el[i:i + 1]
+            c = el[i : i + 1]
             i += 1
-            if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
-                res += el[i - 1:i + 1]
+            if c == b'\\' and i < n and el[i : i + 1] in b'*?\\/':
+                res += el[i - 1 : i + 1]
                 i += 1
             elif c == b'*':
                 res += b'.*'
@@ -1750,12 +2012,12 @@
         return TTest.rematch(res, l)
 
     def linematch(self, el, l):
-        if el == l: # perfect match (fast)
+        if el == l:  # perfect match (fast)
             return True, True
         retry = False
-        if el.endswith(b" (?)\n"):
+        if isoptional(el):
             retry = "retry"
-            el = el[:-5] + b"\n"
+            el = el[: -len(MARK_OPTIONAL)] + b"\n"
         else:
             m = optline.match(el)
             if m:
@@ -1799,10 +2061,12 @@
         for line in lines:
             if line.startswith(TTest.SKIPPED_PREFIX):
                 line = line.splitlines()[0]
-                missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
+                missing.append(
+                    line[len(TTest.SKIPPED_PREFIX) :].decode('utf-8')
+                )
             elif line.startswith(TTest.FAILED_PREFIX):
                 line = line.splitlines()[0]
-                failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
+                failed.append(line[len(TTest.FAILED_PREFIX) :].decode('utf-8'))
 
         return missing, failed
 
@@ -1814,12 +2078,15 @@
     def _stringescape(s):
         return TTest.ESCAPESUB(TTest._escapef, s)
 
+
 iolock = threading.RLock()
 firstlock = threading.RLock()
 firsterror = False
 
+
 class TestResult(unittest._TextTestResult):
     """Holds results when executing via unittest."""
+
     # Don't worry too much about accessing the non-public _TextTestResult.
     # It is relatively common in Python testing tools.
     def __init__(self, options, *args, **kwargs):
@@ -1846,7 +2113,7 @@
             self.color = pygmentspresent and self.stream.isatty()
         elif options.color == 'never':
             self.color = False
-        else: # 'always', for testing purposes
+        else:  # 'always', for testing purposes
             self.color = pygmentspresent
 
     def onStart(self, test):
@@ -1924,12 +2191,15 @@
                 pass
             elif self._options.view:
                 v = self._options.view
-                subprocess.call(r'"%s" "%s" "%s"' %
-                                (v, _strpath(test.refpath),
-                                 _strpath(test.errpath)), shell=True)
+                subprocess.call(
+                    r'"%s" "%s" "%s"'
+                    % (v, _strpath(test.refpath), _strpath(test.errpath)),
+                    shell=True,
+                )
             else:
-                servefail, lines = getdiff(expected, got,
-                                           test.refpath, test.errpath)
+                servefail, lines = getdiff(
+                    expected, got, test.refpath, test.errpath
+                )
                 self.stream.write('\n')
                 for line in lines:
                     line = highlightdiff(line, self.color)
@@ -1943,14 +2213,16 @@
 
                 if servefail:
                     raise test.failureException(
-                        'server failed to start (HGPORT=%s)' % test._startport)
+                        'server failed to start (HGPORT=%s)' % test._startport
+                    )
 
             # handle interactive prompt without releasing iolock
             if self._options.interactive:
                 if test.readrefout() != expected:
                     self.stream.write(
                         'Reference output has changed (run again to prompt '
-                        'changes)')
+                        'changes)'
+                    )
                 else:
                     self.stream.write('Accept this change? [n] ')
                     self.stream.flush()
@@ -1974,7 +2246,7 @@
         # This module has one limitation. It can only work for Linux user
         # and not for Windows.
         test.started = os.times()
-        if self._firststarttime is None: # thread racy but irrelevant
+        if self._firststarttime is None:  # thread racy but irrelevant
             self._firststarttime = test.started[4]
 
     def stopTest(self, test, interrupted=False):
@@ -1985,18 +2257,24 @@
         starttime = test.started
         endtime = test.stopped
         origin = self._firststarttime
-        self.times.append((test.name,
-                           endtime[2] - starttime[2], # user space CPU time
-                           endtime[3] - starttime[3], # sys  space CPU time
-                           endtime[4] - starttime[4], # real time
-                           starttime[4] - origin, # start date in run context
-                           endtime[4] - origin, # end date in run context
-                           ))
+        self.times.append(
+            (
+                test.name,
+                endtime[2] - starttime[2],  # user space CPU time
+                endtime[3] - starttime[3],  # sys  space CPU time
+                endtime[4] - starttime[4],  # real time
+                starttime[4] - origin,  # start date in run context
+                endtime[4] - origin,  # end date in run context
+            )
+        )
 
         if interrupted:
             with iolock:
-                self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
-                    test.name, self.times[-1][3]))
+                self.stream.writeln(
+                    'INTERRUPTED: %s (after %d seconds)'
+                    % (test.name, self.times[-1][3])
+                )
+
 
 def getTestResult():
     """
@@ -2008,13 +2286,25 @@
     else:
         return TestResult
 
+
 class TestSuite(unittest.TestSuite):
     """Custom unittest TestSuite that knows how to execute Mercurial tests."""
 
-    def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
-                 retest=False, keywords=None, loop=False, runs_per_test=1,
-                 loadtest=None, showchannels=False,
-                 *args, **kwargs):
+    def __init__(
+        self,
+        testdir,
+        jobs=1,
+        whitelist=None,
+        blacklist=None,
+        retest=False,
+        keywords=None,
+        loop=False,
+        runs_per_test=1,
+        loadtest=None,
+        showchannels=False,
+        *args,
+        **kwargs
+    ):
         """Create a new instance that can run tests with a configuration.
 
         testdir specifies the directory where tests are executed from. This
@@ -2061,11 +2351,13 @@
         tests = []
         num_tests = [0]
         for test in self._tests:
+
             def get():
                 num_tests[0] += 1
                 if getattr(test, 'should_reload', False):
                     return self._loadtest(test, num_tests[0])
                 return test
+
             if not os.path.exists(test.path):
                 result.addSkip(test, "Doesn't exist")
                 continue
@@ -2113,7 +2405,7 @@
                 done.put(None)
             except KeyboardInterrupt:
                 pass
-            except: # re-raises
+            except:  # re-raises
                 done.put(('!', test, 'run-test raised an error, see traceback'))
                 raise
             finally:
@@ -2138,7 +2430,7 @@
                     sys.stdout.flush()
                 for x in xrange(10):
                     if channels:
-                        time.sleep(.1)
+                        time.sleep(0.1)
                 count += 1
 
         stoppedearly = False
@@ -2163,15 +2455,15 @@
                     if self._loop:
                         if getattr(test, 'should_reload', False):
                             num_tests[0] += 1
-                            tests.append(
-                                self._loadtest(test, num_tests[0]))
+                            tests.append(self._loadtest(test, num_tests[0]))
                         else:
                             tests.append(test)
                     if self._jobs == 1:
                         job(test, result)
                     else:
-                        t = threading.Thread(target=job, name=test.name,
-                                             args=(test, result))
+                        t = threading.Thread(
+                            target=job, name=test.name, args=(test, result)
+                        )
                         t.start()
                     running += 1
 
@@ -2194,24 +2486,28 @@
 
         return result
 
+
 # Save the most recent 5 wall-clock runtimes of each test to a
 # human-readable text file named .testtimes. Tests are sorted
 # alphabetically, while times for each test are listed from oldest to
 # newest.
 
+
 def loadtimes(outputdir):
     times = []
     try:
         with open(os.path.join(outputdir, b'.testtimes')) as fp:
             for line in fp:
                 m = re.match('(.*?) ([0-9. ]+)', line)
-                times.append((m.group(1),
-                              [float(t) for t in m.group(2).split()]))
+                times.append(
+                    (m.group(1), [float(t) for t in m.group(2).split()])
+                )
     except IOError as err:
         if err.errno != errno.ENOENT:
             raise
     return times
 
+
 def savetimes(outputdir, result):
     saved = dict(loadtimes(outputdir))
     maxruns = 5
@@ -2223,8 +2519,9 @@
             ts.append(real)
             ts[:] = ts[-maxruns:]
 
-    fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
-                                   dir=outputdir, text=True)
+    fd, tmpname = tempfile.mkstemp(
+        prefix=b'.testtimes', dir=outputdir, text=True
+    )
     with os.fdopen(fd, 'w') as fp:
         for name, ts in sorted(saved.items()):
             fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
@@ -2238,6 +2535,7 @@
     except OSError:
         pass
 
+
 class TextTestRunner(unittest.TextTestRunner):
     """Custom unittest test runner that uses appropriate settings."""
 
@@ -2246,8 +2544,9 @@
 
         self._runner = runner
 
-        self._result = getTestResult()(self._runner.options, self.stream,
-                                       self.descriptions, self.verbosity)
+        self._result = getTestResult()(
+            self._runner.options, self.stream, self.descriptions, self.verbosity
+        )
 
     def listtests(self, test):
         test = sorted(test, key=lambda t: t.name)
@@ -2281,17 +2580,20 @@
             self.stream.writeln('')
 
             if not self._runner.options.noskips:
-                for test, msg in sorted(self._result.skipped,
-                                        key=lambda s: s[0].name):
+                for test, msg in sorted(
+                    self._result.skipped, key=lambda s: s[0].name
+                ):
                     formatted = 'Skipped %s: %s\n' % (test.name, msg)
                     msg = highlightmsg(formatted, self._result.color)
                     self.stream.write(msg)
-            for test, msg in sorted(self._result.failures,
-                                    key=lambda f: f[0].name):
+            for test, msg in sorted(
+                self._result.failures, key=lambda f: f[0].name
+            ):
                 formatted = 'Failed %s: %s\n' % (test.name, msg)
                 self.stream.write(highlightmsg(formatted, self._result.color))
-            for test, msg in sorted(self._result.errors,
-                                    key=lambda e: e[0].name):
+            for test, msg in sorted(
+                self._result.errors, key=lambda e: e[0].name
+            ):
                 self.stream.writeln('Errored %s: %s' % (test.name, msg))
 
             if self._runner.options.xunit:
@@ -2311,31 +2613,41 @@
                 self._bisecttests(t for t, m in self._result.failures)
             self.stream.writeln(
                 '# Ran %d tests, %d skipped, %d failed.'
-                % (self._result.testsRun, skipped + ignored, failed))
+                % (self._result.testsRun, skipped + ignored, failed)
+            )
             if failed:
-                self.stream.writeln('python hash seed: %s' %
-                    os.environ['PYTHONHASHSEED'])
+                self.stream.writeln(
+                    'python hash seed: %s' % os.environ['PYTHONHASHSEED']
+                )
             if self._runner.options.time:
                 self.printtimes(self._result.times)
 
             if self._runner.options.exceptions:
                 exceptions = aggregateexceptions(
-                    os.path.join(self._runner._outputdir, b'exceptions'))
+                    os.path.join(self._runner._outputdir, b'exceptions')
+                )
 
                 self.stream.writeln('Exceptions Report:')
-                self.stream.writeln('%d total from %d frames' %
-                                    (exceptions['total'],
-                                     len(exceptions['exceptioncounts'])))
+                self.stream.writeln(
+                    '%d total from %d frames'
+                    % (exceptions['total'], len(exceptions['exceptioncounts']))
+                )
                 combined = exceptions['combined']
                 for key in sorted(combined, key=combined.get, reverse=True):
                     frame, line, exc = key
                     totalcount, testcount, leastcount, leasttest = combined[key]
 
-                    self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
-                                        % (totalcount,
-                                           testcount,
-                                           frame, exc,
-                                           leasttest, leastcount))
+                    self.stream.writeln(
+                        '%d (%d tests)\t%s: %s (%s - %d total)'
+                        % (
+                            totalcount,
+                            testcount,
+                            frame,
+                            exc,
+                            leasttest,
+                            leastcount,
+                        )
+                    )
 
             self.stream.flush()
 
@@ -2346,14 +2658,17 @@
         bisectrepo = self._runner.options.bisect_repo
         if bisectrepo:
             bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
+
         def pread(args):
             env = os.environ.copy()
             env['HGPLAIN'] = '1'
-            p = subprocess.Popen(args, stderr=subprocess.STDOUT,
-                                 stdout=subprocess.PIPE, env=env)
+            p = subprocess.Popen(
+                args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env
+            )
             data = p.stdout.read()
             p.wait()
             return data
+
         for test in tests:
             pread(bisectcmd + ['--reset']),
             pread(bisectcmd + ['--bad', '.'])
@@ -2364,32 +2679,43 @@
             withhg = self._runner.options.with_hg
             if withhg:
                 opts += ' --with-hg=%s ' % shellquote(_strpath(withhg))
-            rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts,
-                                   test)
+            rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test)
             data = pread(bisectcmd + ['--command', rtc])
             m = re.search(
-                (br'\nThe first (?P<goodbad>bad|good) revision '
-                 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
-                 br'summary: +(?P<summary>[^\n]+)\n'),
-                data, (re.MULTILINE | re.DOTALL))
+                (
+                    br'\nThe first (?P<goodbad>bad|good) revision '
+                    br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
+                    br'summary: +(?P<summary>[^\n]+)\n'
+                ),
+                data,
+                (re.MULTILINE | re.DOTALL),
+            )
             if m is None:
                 self.stream.writeln(
-                    'Failed to identify failure point for %s' % test)
+                    'Failed to identify failure point for %s' % test
+                )
                 continue
             dat = m.groupdict()
             verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
             self.stream.writeln(
-                '%s %s by %s (%s)' % (
-                    test, verb, dat['node'].decode('ascii'),
-                    dat['summary'].decode('utf8', 'ignore')))
+                '%s %s by %s (%s)'
+                % (
+                    test,
+                    verb,
+                    dat['node'].decode('ascii'),
+                    dat['summary'].decode('utf8', 'ignore'),
+                )
+            )
 
     def printtimes(self, times):
         # iolock held by run
         self.stream.writeln('# Producing time report')
         times.sort(key=lambda t: (t[3]))
         cols = '%7.3f %7.3f %7.3f %7.3f %7.3f   %s'
-        self.stream.writeln('%-7s %-7s %-7s %-7s %-7s   %s' %
-                            ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
+        self.stream.writeln(
+            '%-7s %-7s %-7s %-7s %-7s   %s'
+            % ('start', 'end', 'cuser', 'csys', 'real', 'Test')
+        )
         for tdata in times:
             test = tdata[0]
             cuser, csys, real, start, end = tdata[1:6]
@@ -2401,11 +2727,12 @@
         timesd = dict((t[0], t[3]) for t in result.times)
         doc = minidom.Document()
         s = doc.createElement('testsuite')
-        s.setAttribute('errors', "0") # TODO
+        s.setAttribute('errors', "0")  # TODO
         s.setAttribute('failures', str(len(result.failures)))
         s.setAttribute('name', 'run-tests')
-        s.setAttribute('skipped', str(len(result.skipped) +
-                                      len(result.ignored)))
+        s.setAttribute(
+            'skipped', str(len(result.skipped) + len(result.ignored))
+        )
         s.setAttribute('tests', str(result.testsRun))
         doc.appendChild(s)
         for tc in result.successes:
@@ -2456,10 +2783,11 @@
             timesd[test] = tdata[1:]
 
         outcome = {}
-        groups = [('success', ((tc, None)
-                   for tc in result.successes)),
-                  ('failure', result.failures),
-                  ('skip', result.skipped)]
+        groups = [
+            ('success', ((tc, None) for tc in result.successes)),
+            ('failure', result.failures),
+            ('skip', result.skipped),
+        ]
         for res, testcases in groups:
             for tc, __ in testcases:
                 if tc.name in timesd:
@@ -2468,23 +2796,26 @@
                         diff = diff.decode('unicode_escape')
                     except UnicodeDecodeError as e:
                         diff = '%r decoding diff, sorry' % e
-                    tres = {'result': res,
-                            'time': ('%0.3f' % timesd[tc.name][2]),
-                            'cuser': ('%0.3f' % timesd[tc.name][0]),
-                            'csys': ('%0.3f' % timesd[tc.name][1]),
-                            'start': ('%0.3f' % timesd[tc.name][3]),
-                            'end': ('%0.3f' % timesd[tc.name][4]),
-                            'diff': diff,
-                            }
+                    tres = {
+                        'result': res,
+                        'time': ('%0.3f' % timesd[tc.name][2]),
+                        'cuser': ('%0.3f' % timesd[tc.name][0]),
+                        'csys': ('%0.3f' % timesd[tc.name][1]),
+                        'start': ('%0.3f' % timesd[tc.name][3]),
+                        'end': ('%0.3f' % timesd[tc.name][4]),
+                        'diff': diff,
+                    }
                 else:
                     # blacklisted test
                     tres = {'result': res}
 
                 outcome[tc.name] = tres
-        jsonout = json.dumps(outcome, sort_keys=True, indent=4,
-                             separators=(',', ': '))
+        jsonout = json.dumps(
+            outcome, sort_keys=True, indent=4, separators=(',', ': ')
+        )
         outf.writelines(("testreport =", jsonout))
 
+
 def sorttests(testdescs, previoustimes, shuffle=False):
     """Do an in-place sort of tests."""
     if shuffle:
@@ -2492,29 +2823,32 @@
         return
 
     if previoustimes:
+
         def sortkey(f):
             f = f['path']
             if f in previoustimes:
                 # Use most recent time as estimate
-                return -previoustimes[f][-1]
+                return -(previoustimes[f][-1])
             else:
                 # Default to a rather arbitrary value of 1 second for new tests
                 return -1.0
+
     else:
         # keywords for slow tests
-        slow = {b'svn': 10,
-                b'cvs': 10,
-                b'hghave': 10,
-                b'largefiles-update': 10,
-                b'run-tests': 10,
-                b'corruption': 10,
-                b'race': 10,
-                b'i18n': 10,
-                b'check': 100,
-                b'gendoc': 100,
-                b'contrib-perf': 200,
-                b'merge-combination': 100,
-                }
+        slow = {
+            b'svn': 10,
+            b'cvs': 10,
+            b'hghave': 10,
+            b'largefiles-update': 10,
+            b'run-tests': 10,
+            b'corruption': 10,
+            b'race': 10,
+            b'i18n': 10,
+            b'check': 100,
+            b'gendoc': 100,
+            b'contrib-perf': 200,
+            b'merge-combination': 100,
+        }
         perf = {}
 
         def sortkey(f):
@@ -2540,6 +2874,7 @@
 
     testdescs.sort(key=sortkey)
 
+
 class TestRunner(object):
     """Holds context for executing tests.
 
@@ -2596,6 +2931,7 @@
             testdescs = self.findtests(tests)
             if options.profile_runner:
                 import statprof
+
                 statprof.start()
             result = self._run(testdescs)
             if options.profile_runner:
@@ -2650,8 +2986,7 @@
                 d = osenvironb.get(b'TMP', None)
             tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
 
-        self._hgtmp = osenvironb[b'HGTMP'] = (
-            os.path.realpath(tmpdir))
+        self._hgtmp = osenvironb[b'HGTMP'] = os.path.realpath(tmpdir)
 
         if self.options.with_hg:
             self._installdir = None
@@ -2773,7 +3108,8 @@
             osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
             logexceptions = os.path.join(self._testdir, b'logexceptions.py')
             self.options.extra_config_opt.append(
-                'extensions.logexceptions=%s' % logexceptions.decode('utf-8'))
+                'extensions.logexceptions=%s' % logexceptions.decode('utf-8')
+            )
 
         vlog("# Using TESTDIR", self._testdir)
         vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
@@ -2785,7 +3121,7 @@
         try:
             return self._runtests(testdescs) or 0
         finally:
-            time.sleep(.1)
+            time.sleep(0.1)
             self._cleanup()
 
     def findtests(self, args):
@@ -2796,8 +3132,12 @@
         """
         if not args:
             if self.options.changed:
-                proc = Popen4(b'hg st --rev "%s" -man0 .' %
-                              _bytespath(self.options.changed), None, 0)
+                proc = Popen4(
+                    b'hg st --rev "%s" -man0 .'
+                    % _bytespath(self.options.changed),
+                    None,
+                    0,
+                )
                 stdout, stderr = proc.communicate()
                 args = stdout.strip(b'\0').split(b'\0')
             else:
@@ -2814,13 +3154,16 @@
         args = expanded_args
 
         testcasepattern = re.compile(
-            br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-\.#]+))')
+            br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-\.#]+))'
+        )
         tests = []
         for t in args:
             case = []
 
-            if not (os.path.basename(t).startswith(b'test-')
-                    and (t.endswith(b'.py') or t.endswith(b'.t'))):
+            if not (
+                os.path.basename(t).startswith(b'test-')
+                and (t.endswith(b'.py') or t.endswith(b'.t'))
+            ):
 
                 m = testcasepattern.match(os.path.basename(t))
                 if m is not None:
@@ -2836,12 +3179,14 @@
                 casedimensions = parsettestcases(t)
                 if casedimensions:
                     cases = []
+
                     def addcases(case, casedimensions):
                         if not casedimensions:
                             cases.append(case)
                         else:
                             for c in casedimensions[0]:
                                 addcases(case + [c], casedimensions[1:])
+
                     addcases([], casedimensions)
                     if case and case in cases:
                         cases = [case]
@@ -2895,16 +3240,19 @@
             if kws is not None and PYTHON3:
                 kws = kws.encode('utf-8')
 
-            suite = TestSuite(self._testdir,
-                              jobs=jobs,
-                              whitelist=self.options.whitelisted,
-                              blacklist=self.options.blacklist,
-                              retest=self.options.retest,
-                              keywords=kws,
-                              loop=self.options.loop,
-                              runs_per_test=self.options.runs_per_test,
-                              showchannels=self.options.showchannels,
-                              tests=tests, loadtest=_reloadtest)
+            suite = TestSuite(
+                self._testdir,
+                jobs=jobs,
+                whitelist=self.options.whitelisted,
+                blacklist=self.options.blacklist,
+                retest=self.options.retest,
+                keywords=kws,
+                loop=self.options.loop,
+                runs_per_test=self.options.runs_per_test,
+                showchannels=self.options.showchannels,
+                tests=tests,
+                loadtest=_reloadtest,
+            )
             verbosity = 1
             if self.options.list_tests:
                 verbosity = 0
@@ -2924,8 +3272,10 @@
                     assert self._installdir
                     self._installchg()
 
-                log('running %d tests using %d parallel processes' % (
-                    num_tests, jobs))
+                log(
+                    'running %d tests using %d parallel processes'
+                    % (num_tests, jobs)
+                )
 
                 result = runner.run(suite)
 
@@ -2944,7 +3294,7 @@
             return 1
 
     def _getport(self, count):
-        port = self._ports.get(count) # do we have a cached entry?
+        port = self._ports.get(count)  # do we have a cached entry?
         if port is None:
             portneeded = 3
             # above 100 tries we just give up and let test reports failure
@@ -2982,18 +3332,23 @@
         # extra keyword parameters. 'case' is used by .t tests
         kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc)
 
-        t = testcls(refpath, self._outputdir, tmpdir,
-                    keeptmpdir=self.options.keep_tmpdir,
-                    debug=self.options.debug,
-                    first=self.options.first,
-                    timeout=self.options.timeout,
-                    startport=self._getport(count),
-                    extraconfigopts=self.options.extra_config_opt,
-                    py3warnings=self.options.py3_warnings,
-                    shell=self.options.shell,
-                    hgcommand=self._hgcommand,
-                    usechg=bool(self.options.with_chg or self.options.chg),
-                    useipv6=useipv6, **kwds)
+        t = testcls(
+            refpath,
+            self._outputdir,
+            tmpdir,
+            keeptmpdir=self.options.keep_tmpdir,
+            debug=self.options.debug,
+            first=self.options.first,
+            timeout=self.options.timeout,
+            startport=self._getport(count),
+            extraconfigopts=self.options.extra_config_opt,
+            py3warnings=self.options.py3_warnings,
+            shell=self.options.shell,
+            hgcommand=self._hgcommand,
+            usechg=bool(self.options.with_chg or self.options.chg),
+            useipv6=useipv6,
+            **kwds
+        )
         t.should_reload = True
         return t
 
@@ -3018,8 +3373,10 @@
         # os.symlink() is a thing with py3 on Windows, but it requires
         # Administrator rights.
         if getattr(os, 'symlink', None) and os.name != 'nt':
-            vlog("# Making python executable in test path a symlink to '%s'" %
-                 sysexecutable)
+            vlog(
+                "# Making python executable in test path a symlink to '%s'"
+                % sysexecutable
+            )
             mypython = os.path.join(self._tmpbindir, pyexename)
             try:
                 if os.readlink(mypython) == sysexecutable:
@@ -3038,8 +3395,10 @@
                         raise
         else:
             exedir, exename = os.path.split(sysexecutable)
-            vlog("# Modifying search path to find %s as %s in '%s'" %
-                 (exename, pyexename, exedir))
+            vlog(
+                "# Modifying search path to find %s as %s in '%s'"
+                % (exename, pyexename, exedir)
+            )
             path = os.environ['PATH'].split(os.pathsep)
             while exedir in path:
                 path.remove(exedir)
@@ -3079,17 +3438,24 @@
             # least on Windows for now, deal with .pydistutils.cfg bugs
             # when they happen.
             nohome = b''
-        cmd = (b'"%(exe)s" setup.py %(pure)s clean --all'
-               b' build %(compiler)s --build-base="%(base)s"'
-               b' install --force --prefix="%(prefix)s"'
-               b' --install-lib="%(libdir)s"'
-               b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
-               % {b'exe': exe, b'pure': pure,
-                  b'compiler': compiler,
-                  b'base': os.path.join(self._hgtmp, b"build"),
-                  b'prefix': self._installdir, b'libdir': self._pythondir,
-                  b'bindir': self._bindir,
-                  b'nohome': nohome, b'logfile': installerrs})
+        cmd = (
+            b'"%(exe)s" setup.py %(pure)s clean --all'
+            b' build %(compiler)s --build-base="%(base)s"'
+            b' install --force --prefix="%(prefix)s"'
+            b' --install-lib="%(libdir)s"'
+            b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
+            % {
+                b'exe': exe,
+                b'pure': pure,
+                b'compiler': compiler,
+                b'base': os.path.join(self._hgtmp, b"build"),
+                b'prefix': self._installdir,
+                b'libdir': self._pythondir,
+                b'bindir': self._bindir,
+                b'nohome': nohome,
+                b'logfile': installerrs,
+            }
+        )
 
         # setuptools requires install directories to exist.
         def makedirs(p):
@@ -3098,6 +3464,7 @@
             except OSError as e:
                 if e.errno != errno.EEXIST:
                     raise
+
         makedirs(self._pythondir)
         makedirs(self._bindir)
 
@@ -3137,44 +3504,51 @@
             with open(hgbat, 'rb') as f:
                 data = f.read()
             if br'"%~dp0..\python" "%~dp0hg" %*' in data:
-                data = data.replace(br'"%~dp0..\python" "%~dp0hg" %*',
-                                    b'"%~dp0python" "%~dp0hg" %*')
+                data = data.replace(
+                    br'"%~dp0..\python" "%~dp0hg" %*',
+                    b'"%~dp0python" "%~dp0hg" %*',
+                )
                 with open(hgbat, 'wb') as f:
                     f.write(data)
             else:
                 print('WARNING: cannot fix hg.bat reference to python.exe')
 
         if self.options.anycoverage:
-            custom = os.path.join(self._testdir, 'sitecustomize.py')
-            target = os.path.join(self._pythondir, 'sitecustomize.py')
+            custom = os.path.join(
+                osenvironb[b'RUNTESTDIR'], b'sitecustomize.py'
+            )
+            target = os.path.join(self._pythondir, b'sitecustomize.py')
             vlog('# Installing coverage trigger to %s' % target)
             shutil.copyfile(custom, target)
-            rc = os.path.join(self._testdir, '.coveragerc')
+            rc = os.path.join(self._testdir, b'.coveragerc')
             vlog('# Installing coverage rc to %s' % rc)
-            os.environ['COVERAGE_PROCESS_START'] = rc
-            covdir = os.path.join(self._installdir, '..', 'coverage')
+            osenvironb[b'COVERAGE_PROCESS_START'] = rc
+            covdir = os.path.join(self._installdir, b'..', b'coverage')
             try:
                 os.mkdir(covdir)
             except OSError as e:
                 if e.errno != errno.EEXIST:
                     raise
 
-            os.environ['COVERAGE_DIR'] = covdir
+            osenvironb[b'COVERAGE_DIR'] = covdir
 
     def _checkhglib(self, verb):
         """Ensure that the 'mercurial' package imported by python is
         the one we expect it to be.  If not, print a warning to stderr."""
-        if ((self._bindir == self._pythondir) and
-            (self._bindir != self._tmpbindir)):
+        if (self._bindir == self._pythondir) and (
+            self._bindir != self._tmpbindir
+        ):
             # The pythondir has been inferred from --with-hg flag.
             # We cannot expect anything sensible here.
             return
         expecthg = os.path.join(self._pythondir, b'mercurial')
         actualhg = self._gethgpath()
         if os.path.abspath(actualhg) != os.path.abspath(expecthg):
-            sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
-                             '         (expected %s)\n'
-                             % (verb, actualhg, expecthg))
+            sys.stderr.write(
+                'warning: %s with unexpected mercurial lib: %s\n'
+                '         (expected %s)\n' % (verb, actualhg, expecthg)
+            )
+
     def _gethgpath(self):
         """Return the path to the mercurial package that is actually found by
         the current Python interpreter."""
@@ -3198,14 +3572,20 @@
         vlog('# Performing temporary installation of CHG')
         assert os.path.dirname(self._bindir) == self._installdir
         assert self._hgroot, 'must be called after _installhg()'
-        cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
-               % {b'make': b'make',  # TODO: switch by option or environment?
-                  b'prefix': self._installdir})
+        cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % {
+            b'make': b'make',  # TODO: switch by option or environment?
+            b'prefix': self._installdir,
+        }
         cwd = os.path.join(self._hgroot, b'contrib', b'chg')
         vlog("# Running", cmd)
-        proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
-                                stdin=subprocess.PIPE, stdout=subprocess.PIPE,
-                                stderr=subprocess.STDOUT)
+        proc = subprocess.Popen(
+            cmd,
+            shell=True,
+            cwd=cwd,
+            stdin=subprocess.PIPE,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT,
+        )
         out, _err = proc.communicate()
         if proc.returncode != 0:
             if PYTHON3:
@@ -3217,28 +3597,32 @@
     def _outputcoverage(self):
         """Produce code coverage output."""
         import coverage
+
         coverage = coverage.coverage
 
         vlog('# Producing coverage report')
         # chdir is the easiest way to get short, relative paths in the
         # output.
         os.chdir(self._hgroot)
-        covdir = os.path.join(self._installdir, '..', 'coverage')
+        covdir = os.path.join(_strpath(self._installdir), '..', 'coverage')
         cov = coverage(data_file=os.path.join(covdir, 'cov'))
 
         # Map install directory paths back to source directory.
-        cov.config.paths['srcdir'] = ['.', self._pythondir]
+        cov.config.paths['srcdir'] = ['.', _strpath(self._pythondir)]
 
         cov.combine()
 
-        omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
+        omit = [
+            _strpath(os.path.join(x, b'*'))
+            for x in [self._bindir, self._testdir]
+        ]
         cov.report(ignore_errors=True, omit=omit)
 
         if self.options.htmlcov:
-            htmldir = os.path.join(self._outputdir, 'htmlcov')
+            htmldir = os.path.join(_strpath(self._outputdir), 'htmlcov')
             cov.html_report(directory=htmldir, omit=omit)
         if self.options.annotate:
-            adir = os.path.join(self._outputdir, 'annotated')
+            adir = os.path.join(_strpath(self._outputdir), 'annotated')
             if not os.path.isdir(adir):
                 os.mkdir(adir)
             cov.annotate(directory=adir, omit=omit)
@@ -3262,8 +3646,11 @@
             if found:
                 vlog("# Found prerequisite", p, "at", found)
             else:
-                print("WARNING: Did not find prerequisite tool: %s " %
-                      p.decode("utf-8"))
+                print(
+                    "WARNING: Did not find prerequisite tool: %s "
+                    % p.decode("utf-8")
+                )
+
 
 def aggregateexceptions(path):
     exceptioncounts = collections.Counter()
@@ -3304,10 +3691,12 @@
     # impacted tests.
     combined = {}
     for key in exceptioncounts:
-        combined[key] = (exceptioncounts[key],
-                         len(testsbyfailure[key]),
-                         leastfailing[key][0],
-                         leastfailing[key][1])
+        combined[key] = (
+            exceptioncounts[key],
+            len(testsbyfailure[key]),
+            leastfailing[key][0],
+            leastfailing[key][1],
+        )
 
     return {
         'exceptioncounts': exceptioncounts,
@@ -3318,11 +3707,13 @@
         'bytest': failuresbytest,
     }
 
+
 if __name__ == '__main__':
     runner = TestRunner()
 
     try:
         import msvcrt
+
         msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
         msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
         msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
--- a/tests/seq.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/seq.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,6 +13,7 @@
 
 try:
     import msvcrt
+
     msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
     msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
     msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
--- a/tests/silenttestrunner.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/silenttestrunner.py	Mon Oct 21 11:09:48 2019 -0400
@@ -3,6 +3,7 @@
 import sys
 import unittest
 
+
 def main(modulename):
     '''run the tests found in module, printing nothing when all tests pass'''
     module = sys.modules[modulename]
@@ -20,5 +21,6 @@
             sys.stdout.write(exc)
         sys.exit(1)
 
+
 if os.environ.get('SILENT_BE_NOISY'):
     main = unittest.main
--- a/tests/simplestorerepo.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/simplestorerepo.py	Mon Oct 21 11:09:48 2019 -0400
@@ -21,9 +21,7 @@
     nullid,
     nullrev,
 )
-from mercurial.thirdparty import (
-    attr,
-)
+from mercurial.thirdparty import attr
 from mercurial import (
     ancestor,
     bundlerepo,
@@ -32,16 +30,19 @@
     localrepo,
     mdiff,
     pycompat,
-    repository,
     revlog,
     store,
     verify,
 )
+from mercurial.interfaces import (
+    repository,
+    util as interfaceutil,
+)
 from mercurial.utils import (
     cborutil,
-    interfaceutil,
     storageutil,
 )
+from mercurial.revlogutils import flagutil
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
@@ -51,6 +52,7 @@
 
 REQUIREMENT = 'testonly-simplestore'
 
+
 def validatenode(node):
     if isinstance(node, int):
         raise ValueError('expected node; got int')
@@ -58,13 +60,16 @@
     if len(node) != 20:
         raise ValueError('expected 20 byte node')
 
+
 def validaterev(rev):
     if not isinstance(rev, int):
         raise ValueError('expected int')
 
+
 class simplestoreerror(error.StorageError):
     pass
 
+
 @interfaceutil.implementer(repository.irevisiondelta)
 @attr.s(slots=True)
 class simplestorerevisiondelta(object):
@@ -78,6 +83,7 @@
     delta = attr.ib()
     linknode = attr.ib(default=None)
 
+
 @interfaceutil.implementer(repository.iverifyproblem)
 @attr.s(frozen=True)
 class simplefilestoreproblem(object):
@@ -85,6 +91,7 @@
     error = attr.ib(default=None)
     node = attr.ib(default=None)
 
+
 @interfaceutil.implementer(repository.ifilestorage)
 class filestorage(object):
     """Implements storage for a tracked path.
@@ -97,6 +104,8 @@
     Fulltext data is stored in files having names of the node.
     """
 
+    _flagserrorclass = simplestoreerror
+
     def __init__(self, svfs, path):
         self._svfs = svfs
         self._path = path
@@ -114,6 +123,8 @@
         self._index = []
         self._refreshindex()
 
+        self._flagprocessors = dict(flagutil.flagprocessors)
+
     def _refreshindex(self):
         self._indexbynode.clear()
         self._indexbyrev.clear()
@@ -143,8 +154,9 @@
             p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
 
             # start, length, rawsize, chainbase, linkrev, p1, p2, node
-            self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
-                                entry[b'node']))
+            self._index.append(
+                (0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev, entry[b'node'])
+            )
 
         self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
 
@@ -252,57 +264,20 @@
         validaterev(baserev)
         validaterev(rev)
 
-        if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
-            or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
+        if (self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS) or (
+            self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS
+        ):
             return False
 
         return True
 
-    def _processflags(self, text, flags, operation, raw=False):
-        if flags == 0:
-            return text, True
-
-        if flags & ~revlog.REVIDX_KNOWN_FLAGS:
-            raise simplestoreerror(_("incompatible revision flag '%#x'") %
-                                   (flags & ~revlog.REVIDX_KNOWN_FLAGS))
-
-        validatehash = True
-        # Depending on the operation (read or write), the order might be
-        # reversed due to non-commutative transforms.
-        orderedflags = revlog.REVIDX_FLAGS_ORDER
-        if operation == 'write':
-            orderedflags = reversed(orderedflags)
-
-        for flag in orderedflags:
-            # If a flagprocessor has been registered for a known flag, apply the
-            # related operation transform and update result tuple.
-            if flag & flags:
-                vhash = True
-
-                if flag not in revlog._flagprocessors:
-                    message = _("missing processor for flag '%#x'") % (flag)
-                    raise simplestoreerror(message)
-
-                processor = revlog._flagprocessors[flag]
-                if processor is not None:
-                    readtransform, writetransform, rawtransform = processor
-
-                    if raw:
-                        vhash = rawtransform(self, text)
-                    elif operation == 'read':
-                        text, vhash = readtransform(self, text)
-                    else:  # write operation
-                        text, vhash = writetransform(self, text)
-                validatehash = validatehash and vhash
-
-        return text, validatehash
-
     def checkhash(self, text, node, p1=None, p2=None, rev=None):
         if p1 is None and p2 is None:
             p1, p2 = self.parents(node)
         if node != storageutil.hashrevisionsha1(text, p1, p2):
-            raise simplestoreerror(_("integrity check failed on %s") %
-                self._path)
+            raise simplestoreerror(
+                _("integrity check failed on %s") % self._path
+            )
 
     def revision(self, nodeorrev, raw=False):
         if isinstance(nodeorrev, int):
@@ -320,12 +295,20 @@
         path = b'/'.join([self._storepath, hex(node)])
         rawtext = self._svfs.read(path)
 
-        text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
+        if raw:
+            validatehash = flagutil.processflagsraw(self, rawtext, flags)
+            text = rawtext
+        else:
+            r = flagutil.processflagsread(self, rawtext, flags)
+            text, validatehash, sidedata = r
         if validatehash:
             self.checkhash(text, node, rev=rev)
 
         return text
 
+    def rawdata(self, nodeorrev):
+        return self.revision(raw=True)
+
     def read(self, node):
         validatenode(node)
 
@@ -335,7 +318,7 @@
             return revision
 
         start = revision.index(b'\1\n', 2)
-        return revision[start + 2:]
+        return revision[start + 2 :]
 
     def renamed(self, node):
         validatenode(node)
@@ -427,9 +410,14 @@
 
         return [b'/'.join((self._storepath, f)) for f in entries]
 
-    def storageinfo(self, exclusivefiles=False, sharedfiles=False,
-                    revisionscount=False, trackedsize=False,
-                    storedsize=False):
+    def storageinfo(
+        self,
+        exclusivefiles=False,
+        sharedfiles=False,
+        revisionscount=False,
+        trackedsize=False,
+        storedsize=False,
+    ):
         # TODO do a real implementation of this
         return {
             'exclusivefiles': [],
@@ -447,22 +435,31 @@
                 self.revision(node)
             except Exception as e:
                 yield simplefilestoreproblem(
-                    error='unpacking %s: %s' % (node, e),
-                    node=node)
+                    error='unpacking %s: %s' % (node, e), node=node
+                )
                 state['skipread'].add(node)
 
-    def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
-                      assumehaveparentrevisions=False,
-                      deltamode=repository.CG_DELTAMODE_STD):
+    def emitrevisions(
+        self,
+        nodes,
+        nodesorder=None,
+        revisiondata=False,
+        assumehaveparentrevisions=False,
+        deltamode=repository.CG_DELTAMODE_STD,
+    ):
         # TODO this will probably break on some ordering options.
         nodes = [n for n in nodes if n != nullid]
         if not nodes:
             return
         for delta in storageutil.emitrevisions(
-                self, nodes, nodesorder, simplestorerevisiondelta,
-                revisiondata=revisiondata,
-                assumehaveparentrevisions=assumehaveparentrevisions,
-                deltamode=deltamode):
+            self,
+            nodes,
+            nodesorder,
+            simplestorerevisiondelta,
+            revisiondata=revisiondata,
+            assumehaveparentrevisions=assumehaveparentrevisions,
+            deltamode=deltamode,
+        ):
             yield delta
 
     def add(self, text, meta, transaction, linkrev, p1, p2):
@@ -471,15 +468,24 @@
 
         return self.addrevision(text, transaction, linkrev, p1, p2)
 
-    def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
-                    flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
+    def addrevision(
+        self,
+        text,
+        transaction,
+        linkrev,
+        p1,
+        p2,
+        node=None,
+        flags=revlog.REVIDX_DEFAULT_FLAGS,
+        cachedelta=None,
+    ):
         validatenode(p1)
         validatenode(p2)
 
         if flags:
             node = node or storageutil.hashrevisionsha1(text, p1, p2)
 
-        rawtext, validatehash = self._processflags(text, flags, 'write')
+        rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
 
         node = node or storageutil.hashrevisionsha1(text, p1, p2)
 
@@ -489,8 +495,9 @@
         if validatehash:
             self.checkhash(rawtext, node, p1=p1, p2=p2)
 
-        return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
-                                    flags)
+        return self._addrawrevision(
+            node, rawtext, transaction, linkrev, p1, p2, flags
+        )
 
     def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
         transaction.addbackup(self._indexpath)
@@ -499,13 +506,15 @@
 
         self._svfs.write(path, rawtext)
 
-        self._indexdata.append({
-            b'node': node,
-            b'p1': p1,
-            b'p2': p2,
-            b'linkrev': link,
-            b'flags': flags,
-        })
+        self._indexdata.append(
+            {
+                b'node': node,
+                b'p1': p1,
+                b'p2': p2,
+                b'linkrev': link,
+                b'flags': flags,
+            }
+        )
 
         self._reflectindexupdate()
 
@@ -513,14 +522,22 @@
 
     def _reflectindexupdate(self):
         self._refreshindex()
-        self._svfs.write(self._indexpath,
-                         ''.join(cborutil.streamencode(self._indexdata)))
+        self._svfs.write(
+            self._indexpath, ''.join(cborutil.streamencode(self._indexdata))
+        )
 
-    def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
-                 maybemissingparents=False):
+    def addgroup(
+        self,
+        deltas,
+        linkmapper,
+        transaction,
+        addrevisioncb=None,
+        maybemissingparents=False,
+    ):
         if maybemissingparents:
-            raise error.Abort(_('simple store does not support missing parents '
-                                'write mode'))
+            raise error.Abort(
+                _('simple store does not support missing parents ' 'write mode')
+            )
 
         nodes = []
 
@@ -541,8 +558,9 @@
             else:
                 text = mdiff.patch(self.revision(deltabase), delta)
 
-            self._addrawrevision(node, text, transaction, linkrev, p1, p2,
-                                 flags)
+            self._addrawrevision(
+                node, text, transaction, linkrev, p1, p2, flags
+            )
 
             if addrevisioncb:
                 addrevisioncb(self, node)
@@ -557,8 +575,7 @@
             revishead[self.rev(entry[b'p1'])] = False
             revishead[self.rev(entry[b'p2'])] = False
 
-        return [rev for rev, ishead in sorted(revishead.items())
-                if ishead]
+        return [rev for rev, ishead in sorted(revishead.items()) if ishead]
 
     def heads(self, start=None, stop=None):
         # This is copied from revlog.py.
@@ -606,8 +623,12 @@
 
     def getstrippoint(self, minlink):
         return storageutil.resolvestripinfo(
-            minlink, len(self) - 1, self._headrevs(), self.linkrev,
-            self.parentrevs)
+            minlink,
+            len(self) - 1,
+            self._headrevs(),
+            self.linkrev,
+            self.parentrevs,
+        )
 
     def strip(self, minlink, transaction):
         if not len(self):
@@ -621,6 +642,7 @@
         self._indexdata[rev:] = []
         self._reflectindexupdate()
 
+
 def issimplestorefile(f, kind, st):
     if kind != stat.S_IFREG:
         return False
@@ -635,6 +657,7 @@
     # Otherwise assume it belongs to the simple store.
     return True
 
+
 class simplestore(store.encodedstore):
     def datafiles(self):
         for x in super(simplestore, self).datafiles():
@@ -651,6 +674,7 @@
 
             yield unencoded, encoded, size
 
+
 def reposetup(ui, repo):
     if not repo.local():
         return
@@ -664,9 +688,11 @@
 
     repo.__class__ = simplestorerepo
 
+
 def featuresetup(ui, supported):
     supported.add(REQUIREMENT)
 
+
 def newreporequirements(orig, ui, createopts):
     """Modifies default requirements for new repos to use the simple store."""
     requirements = orig(ui, createopts)
@@ -674,21 +700,23 @@
     # These requirements are only used to affect creation of the store
     # object. We have our own store. So we can remove them.
     # TODO do this once we feel like taking the test hit.
-    #if 'fncache' in requirements:
+    # if 'fncache' in requirements:
     #    requirements.remove('fncache')
-    #if 'dotencode' in requirements:
+    # if 'dotencode' in requirements:
     #    requirements.remove('dotencode')
 
     requirements.add(REQUIREMENT)
 
     return requirements
 
+
 def makestore(orig, requirements, path, vfstype):
     if REQUIREMENT not in requirements:
         return orig(requirements, path, vfstype)
 
     return simplestore(path, vfstype)
 
+
 def verifierinit(orig, self, *args, **kwargs):
     orig(self, *args, **kwargs)
 
@@ -696,10 +724,12 @@
     # advertised. So suppress these warnings.
     self.warnorphanstorefiles = False
 
+
 def extsetup(ui):
     localrepo.featuresetupfuncs.add(featuresetup)
 
-    extensions.wrapfunction(localrepo, 'newreporequirements',
-                            newreporequirements)
+    extensions.wrapfunction(
+        localrepo, 'newreporequirements', newreporequirements
+    )
     extensions.wrapfunction(localrepo, 'makestore', makestore)
     extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
--- a/tests/sitecustomize.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/sitecustomize.py	Mon Oct 21 11:09:48 2019 -0400
@@ -6,8 +6,9 @@
         import coverage
         import uuid
 
-        covpath = os.path.join(os.environ['COVERAGE_DIR'],
-                               'cov.%s' % uuid.uuid1())
+        covpath = os.path.join(
+            os.environ['COVERAGE_DIR'], 'cov.%s' % uuid.uuid1()
+        )
         cov = coverage.coverage(data_file=covpath, auto_data=True)
         cov._warn_no_data = False
         cov._warn_unimported_source = False
--- a/tests/sshprotoext.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/sshprotoext.py	Mon Oct 21 11:09:48 2019 -0400
@@ -25,14 +25,17 @@
 configitem(b'sshpeer', b'mode', default=None)
 configitem(b'sshpeer', b'handshake-mode', default=None)
 
+
 class bannerserver(wireprotoserver.sshserver):
     """Server that sends a banner to stdout."""
+
     def serve_forever(self):
         for i in range(10):
             self._fout.write(b'banner: line %d\n' % i)
 
         super(bannerserver, self).serve_forever()
 
+
 class prehelloserver(wireprotoserver.sshserver):
     """Tests behavior when connecting to <0.9.1 servers.
 
@@ -41,6 +44,7 @@
     to SSH servers. This mock server tests behavior of the handshake
     when ``hello`` is not supported.
     """
+
     def serve_forever(self):
         l = self._fin.readline()
         assert l == b'hello\n'
@@ -48,13 +52,15 @@
         wireprotoserver._sshv1respondbytes(self._fout, b'')
         l = self._fin.readline()
         assert l == b'between\n'
-        proto = wireprotoserver.sshv1protocolhandler(self._ui, self._fin,
-                                                     self._fout)
+        proto = wireprotoserver.sshv1protocolhandler(
+            self._ui, self._fin, self._fout
+        )
         rsp = wireprotov1server.dispatch(self._repo, proto, b'between')
         wireprotoserver._sshv1respondbytes(self._fout, rsp.data)
 
         super(prehelloserver, self).serve_forever()
 
+
 def performhandshake(orig, ui, stdin, stdout, stderr):
     """Wrapped version of sshpeer._performhandshake to send extra commands."""
     mode = ui.config(b'sshpeer', b'handshake-mode')
@@ -73,8 +79,8 @@
         stdin.flush()
         return orig(ui, stdin, stdout, stderr)
     else:
-        raise error.ProgrammingError(b'unknown HANDSHAKECOMMANDMODE: %s' %
-                                     mode)
+        raise error.ProgrammingError(b'unknown HANDSHAKECOMMANDMODE: %s' % mode)
+
 
 def extsetup(ui):
     # It's easier for tests to define the server behavior via environment
--- a/tests/svnurlof.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/svnurlof.py	Mon Oct 21 11:09:48 2019 -0400
@@ -6,6 +6,7 @@
     util,
 )
 
+
 def main(argv):
     enc = util.urlreq.quote(pycompat.sysbytes(argv[1]))
     if pycompat.iswindows:
@@ -14,5 +15,6 @@
         fmt = 'file://%s'
     print(fmt % pycompat.sysstr(enc))
 
+
 if __name__ == '__main__':
     main(sys.argv)
--- a/tests/svnxml.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/svnxml.py	Mon Oct 21 11:09:48 2019 -0400
@@ -5,10 +5,10 @@
 import sys
 import xml.dom.minidom
 
+
 def xmltext(e):
-    return ''.join(c.data for c
-                   in e.childNodes
-                   if c.nodeType == c.TEXT_NODE)
+    return ''.join(c.data for c in e.childNodes if c.nodeType == c.TEXT_NODE)
+
 
 def parseentry(entry):
     e = {}
@@ -27,6 +27,7 @@
             e['paths'].append((path, action, frompath, fromrev))
     return e
 
+
 def parselog(data):
     entries = []
     doc = xml.dom.minidom.parseString(data)
@@ -34,6 +35,7 @@
         entries.append(parseentry(e))
     return entries
 
+
 def printentries(entries):
     try:
         fp = sys.stdout.buffer
@@ -49,8 +51,8 @@
             p = b' %s %s%s\n' % (action, path, frominfo)
             fp.write(p)
 
+
 if __name__ == '__main__':
     data = sys.stdin.read()
     entries = parselog(data)
     printentries(entries)
-
--- a/tests/test-absorb-filefixupstate.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-absorb-filefixupstate.py	Mon Oct 21 11:09:48 2019 -0400
@@ -4,6 +4,7 @@
 from mercurial import pycompat
 from hgext import absorb
 
+
 class simplefctx(object):
     def __init__(self, content):
         self.content = content
@@ -11,6 +12,7 @@
     def data(self):
         return self.content
 
+
 def insertreturns(x):
     # insert "\n"s after each single char
     if isinstance(x, bytes):
@@ -18,6 +20,7 @@
     else:
         return pycompat.maplist(insertreturns, x)
 
+
 def removereturns(x):
     # the revert of "insertreturns"
     if isinstance(x, bytes):
@@ -25,10 +28,14 @@
     else:
         return pycompat.maplist(removereturns, x)
 
+
 def assertlistequal(lhs, rhs, decorator=lambda x: x):
     if lhs != rhs:
-        raise RuntimeError('mismatch:\n actual:   %r\n expected: %r'
-                           % tuple(map(decorator, [lhs, rhs])))
+        raise RuntimeError(
+            'mismatch:\n actual:   %r\n expected: %r'
+            % tuple(map(decorator, [lhs, rhs]))
+        )
+
 
 def testfilefixup(oldcontents, workingcopy, expectedcontents, fixups=None):
     """([str], str, [str], [(rev, a1, a2, b1, b2)]?) -> None
@@ -43,22 +50,24 @@
     expectedcontents = insertreturns(expectedcontents)
     oldcontents = insertreturns(oldcontents)
     workingcopy = insertreturns(workingcopy)
-    state = absorb.filefixupstate(pycompat.maplist(simplefctx, oldcontents),
-                                  'path')
+    state = absorb.filefixupstate(
+        pycompat.maplist(simplefctx, oldcontents), 'path'
+    )
     state.diffwith(simplefctx(workingcopy))
     if fixups is not None:
         assertlistequal(state.fixups, fixups)
     state.apply()
     assertlistequal(state.finalcontents, expectedcontents, removereturns)
 
+
 def buildcontents(linesrevs):
     # linesrevs: [(linecontent : str, revs : [int])]
     revs = set(itertools.chain(*[revs for line, revs in linesrevs]))
     return [b''] + [
-        b''.join([l for l, rs in linesrevs if r in rs])
-        for r in sorted(revs)
+        b''.join([l for l, rs in linesrevs if r in rs]) for r in sorted(revs)
     ]
 
+
 # input case 0: one single commit
 case0 = [b'', b'11']
 
@@ -69,11 +78,7 @@
 testfilefixup(case0, b'222', [b'', b'222'])
 
 # input case 1: 3 lines, each commit adds one line
-case1 = buildcontents([
-    (b'1', [1, 2, 3]),
-    (b'2', [   2, 3]),
-    (b'3', [      3]),
-])
+case1 = buildcontents([(b'1', [1, 2, 3]), (b'2', [2, 3]), (b'3', [3]),])
 
 # 1:1 line mapping
 testfilefixup(case1, b'123', case1)
@@ -90,10 +95,10 @@
 testfilefixup(case1, b'ab', case1)
 
 # deletion
-testfilefixup(case1, b'',   [b'', b'', b'', b''])
-testfilefixup(case1, b'1',  [b'', b'1', b'1', b'1'])
-testfilefixup(case1, b'2',  [b'', b'', b'2', b'2'])
-testfilefixup(case1, b'3',  [b'', b'', b'', b'3'])
+testfilefixup(case1, b'', [b'', b'', b'', b''])
+testfilefixup(case1, b'1', [b'', b'1', b'1', b'1'])
+testfilefixup(case1, b'2', [b'', b'', b'2', b'2'])
+testfilefixup(case1, b'3', [b'', b'', b'', b'3'])
 testfilefixup(case1, b'13', [b'', b'1', b'1', b'13'])
 
 # replaces
@@ -116,15 +121,12 @@
 testfilefixup(case1, b'12b3', case1)
 
 # input case 2: delete in the middle
-case2 = buildcontents([
-    (b'11', [1, 2]),
-    (b'22', [1   ]),
-    (b'33', [1, 2]),
-])
+case2 = buildcontents([(b'11', [1, 2]), (b'22', [1]), (b'33', [1, 2]),])
 
 # deletion (optimize code should make it 2 chunks)
-testfilefixup(case2, b'', [b'', b'22', b''],
-              fixups=[(4, 0, 2, 0, 0), (4, 2, 4, 0, 0)])
+testfilefixup(
+    case2, b'', [b'', b'22', b''], fixups=[(4, 0, 2, 0, 0), (4, 2, 4, 0, 0)]
+)
 
 # 1:1 line mapping
 testfilefixup(case2, b'aaaa', [b'', b'aa22aa', b'aaaa'])
@@ -134,11 +136,7 @@
 testfilefixup(case2, b'aaa', case2)
 
 # input case 3: rev 3 reverts rev 2
-case3 = buildcontents([
-    (b'1', [1, 2, 3]),
-    (b'2', [   2   ]),
-    (b'3', [1, 2, 3]),
-])
+case3 = buildcontents([(b'1', [1, 2, 3]), (b'2', [2]), (b'3', [1, 2, 3]),])
 
 # 1:1 line mapping
 testfilefixup(case3, b'13', case3)
@@ -157,24 +155,26 @@
 testfilefixup(case3, b'a13c', [b'', b'a13c', b'a123c', b'a13c'])
 
 # input case 4: a slightly complex case
-case4 = buildcontents([
-    (b'1', [1, 2, 3]),
-    (b'2', [   2, 3]),
-    (b'3', [1, 2,  ]),
-    (b'4', [1,    3]),
-    (b'5', [      3]),
-    (b'6', [   2, 3]),
-    (b'7', [   2   ]),
-    (b'8', [   2, 3]),
-    (b'9', [      3]),
-])
+case4 = buildcontents(
+    [
+        (b'1', [1, 2, 3]),
+        (b'2', [2, 3]),
+        (b'3', [1, 2,]),
+        (b'4', [1, 3]),
+        (b'5', [3]),
+        (b'6', [2, 3]),
+        (b'7', [2]),
+        (b'8', [2, 3]),
+        (b'9', [3]),
+    ]
+)
 
 testfilefixup(case4, b'1245689', case4)
 testfilefixup(case4, b'1a2456bbb', case4)
 testfilefixup(case4, b'1abc5689', case4)
 testfilefixup(case4, b'1ab5689', [b'', b'134', b'1a3678', b'1ab5689'])
 testfilefixup(case4, b'aa2bcd8ee', [b'', b'aa34', b'aa23d78', b'aa2bcd8ee'])
-testfilefixup(case4, b'aa2bcdd8ee',[b'', b'aa34', b'aa23678', b'aa24568ee'])
+testfilefixup(case4, b'aa2bcdd8ee', [b'', b'aa34', b'aa23678', b'aa24568ee'])
 testfilefixup(case4, b'aaaaaa', case4)
 testfilefixup(case4, b'aa258b', [b'', b'aa34', b'aa2378', b'aa258b'])
 testfilefixup(case4, b'25bb', [b'', b'34', b'23678', b'25689'])
@@ -183,11 +183,7 @@
 testfilefixup(case4, b'', [b'', b'34', b'37', b''])
 
 # input case 5: replace a small chunk which is near a deleted line
-case5 = buildcontents([
-    (b'12', [1, 2]),
-    (b'3',  [1]),
-    (b'4',  [1, 2]),
-])
+case5 = buildcontents([(b'12', [1, 2]), (b'3', [1]), (b'4', [1, 2]),])
 
 testfilefixup(case5, b'1cd4', [b'', b'1cd34', b'1cd4'])
 
--- a/tests/test-acl.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-acl.t	Mon Oct 21 11:09:48 2019 -0400
@@ -131,17 +131,17 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   bundle2-input-part: total payload size 1553
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   updating the branch cache
+  added 3 changesets with 3 changes to 3 files
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   listing keys for "phases"
   repository tip rolled back to revision 0 (undo push)
   0:6675d58eff77
@@ -196,19 +196,19 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: changes have source "push" - skipping
   bundle2-input-part: total payload size 1553
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   updating the branch cache
+  added 3 changesets with 3 changes to 3 files
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   listing keys for "phases"
   repository tip rolled back to revision 0 (undo push)
   0:6675d58eff77
@@ -263,7 +263,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "fred"
   acl: acl.allow.branches not enabled
@@ -279,13 +278,14 @@
   bundle2-input-part: total payload size 1553
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   updating the branch cache
+  added 3 changesets with 3 changes to 3 files
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   listing keys for "phases"
   repository tip rolled back to revision 0 (undo push)
   0:6675d58eff77
@@ -340,7 +340,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "fred"
   acl: acl.allow.branches not enabled
@@ -351,7 +350,7 @@
   error: pretxnchangegroup.acl hook failed: acl: user "fred" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
   bundle2-input-part: total payload size 1553
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
   abort: acl: user "fred" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
@@ -409,7 +408,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "fred"
   acl: acl.allow.branches not enabled
@@ -424,7 +422,7 @@
   error: pretxnchangegroup.acl hook failed: acl: user "fred" not allowed on "quux/file.py" (changeset "911600dab2ae")
   bundle2-input-part: total payload size 1553
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
   abort: acl: user "fred" not allowed on "quux/file.py" (changeset "911600dab2ae")
@@ -483,7 +481,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "barney"
   acl: acl.allow.branches not enabled
@@ -494,7 +491,7 @@
   error: pretxnchangegroup.acl hook failed: acl: user "barney" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
   bundle2-input-part: total payload size 1553
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
   abort: acl: user "barney" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
@@ -554,7 +551,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "fred"
   acl: acl.allow.branches not enabled
@@ -569,7 +565,7 @@
   error: pretxnchangegroup.acl hook failed: acl: user "fred" not allowed on "quux/file.py" (changeset "911600dab2ae")
   bundle2-input-part: total payload size 1553
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
   abort: acl: user "fred" not allowed on "quux/file.py" (changeset "911600dab2ae")
@@ -630,7 +626,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "fred"
   acl: acl.allow.branches not enabled
@@ -643,7 +638,7 @@
   error: pretxnchangegroup.acl hook failed: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
   bundle2-input-part: total payload size 1553
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
   abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
@@ -703,7 +698,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "barney"
   acl: acl.allow.branches not enabled
@@ -714,7 +708,7 @@
   error: pretxnchangegroup.acl hook failed: acl: user "barney" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
   bundle2-input-part: total payload size 1553
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
   abort: acl: user "barney" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
@@ -775,7 +769,6 @@
   adding manifests
   adding file changes
   adding foo/file.txt revisions
-  added 1 changesets with 1 changes to 1 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "fred"
   acl: acl.allow.branches not enabled
@@ -794,13 +787,14 @@
   acl: acl.allow.bookmarks not enabled
   acl: acl.deny.bookmarks not enabled
   acl: bookmark access granted: "ef1ea85a6374b77d6da9dcda9541f498f2d17df7" on bookmark "moving-bookmark"
-  bundle2-input-bundle: 6 parts total
+  bundle2-input-bundle: 7 parts total
   updating the branch cache
+  added 1 changesets with 1 changes to 1 files
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   updating bookmark moving-bookmark
   listing keys for "phases"
   repository tip rolled back to revision 0 (undo push)
@@ -861,7 +855,6 @@
   adding manifests
   adding file changes
   adding foo/file.txt revisions
-  added 1 changesets with 1 changes to 1 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "fred"
   acl: acl.allow.branches not enabled
@@ -880,7 +873,7 @@
   acl: acl.allow.bookmarks not enabled
   acl: acl.deny.bookmarks enabled, 1 entries for user fred
   error: prepushkey.acl hook failed: acl: user "fred" denied on bookmark "moving-bookmark" (changeset "ef1ea85a6374b77d6da9dcda9541f498f2d17df7")
-  bundle2-input-bundle: 6 parts total
+  bundle2-input-bundle: 7 parts total
   transaction abort!
   rollback completed
   abort: acl: user "fred" denied on bookmark "moving-bookmark" (changeset "ef1ea85a6374b77d6da9dcda9541f498f2d17df7")
@@ -950,7 +943,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "barney"
   acl: acl.allow.branches not enabled
@@ -966,13 +958,14 @@
   bundle2-input-part: total payload size 1553
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   updating the branch cache
+  added 3 changesets with 3 changes to 3 files
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   listing keys for "phases"
   repository tip rolled back to revision 0 (undo push)
   0:6675d58eff77
@@ -1034,7 +1027,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "wilma"
   acl: acl.allow.branches not enabled
@@ -1049,7 +1041,7 @@
   error: pretxnchangegroup.acl hook failed: acl: user "wilma" not allowed on "quux/file.py" (changeset "911600dab2ae")
   bundle2-input-part: total payload size 1553
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
   abort: acl: user "wilma" not allowed on "quux/file.py" (changeset "911600dab2ae")
@@ -1116,13 +1108,12 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "barney"
   error: pretxnchangegroup.acl hook raised an exception: [Errno *] * (glob)
   bundle2-input-part: total payload size 1553
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
   abort: $ENOENT$: '../acl.config'
@@ -1193,7 +1184,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "betty"
   acl: acl.allow.branches not enabled
@@ -1208,7 +1198,7 @@
   error: pretxnchangegroup.acl hook failed: acl: user "betty" not allowed on "quux/file.py" (changeset "911600dab2ae")
   bundle2-input-part: total payload size 1553
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
   abort: acl: user "betty" not allowed on "quux/file.py" (changeset "911600dab2ae")
@@ -1281,7 +1271,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "barney"
   acl: acl.allow.branches not enabled
@@ -1297,13 +1286,14 @@
   bundle2-input-part: total payload size 1553
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   updating the branch cache
+  added 3 changesets with 3 changes to 3 files
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   listing keys for "phases"
   repository tip rolled back to revision 0 (undo push)
   0:6675d58eff77
@@ -1369,7 +1359,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "fred"
   acl: acl.allow.branches not enabled
@@ -1385,13 +1374,14 @@
   bundle2-input-part: total payload size 1553
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   updating the branch cache
+  added 3 changesets with 3 changes to 3 files
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   listing keys for "phases"
   repository tip rolled back to revision 0 (undo push)
   0:6675d58eff77
@@ -1453,7 +1443,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "fred"
   acl: acl.allow.branches not enabled
@@ -1466,7 +1455,7 @@
   error: pretxnchangegroup.acl hook failed: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
   bundle2-input-part: total payload size 1553
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
   abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
@@ -1534,7 +1523,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "fred"
   acl: acl.allow.branches not enabled
@@ -1551,13 +1539,14 @@
   bundle2-input-part: total payload size 1553
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   updating the branch cache
+  added 3 changesets with 3 changes to 3 files
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   listing keys for "phases"
   repository tip rolled back to revision 0 (undo push)
   0:6675d58eff77
@@ -1619,7 +1608,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 3 changesets with 3 changes to 3 files
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "fred"
   acl: acl.allow.branches not enabled
@@ -1634,7 +1622,7 @@
   error: pretxnchangegroup.acl hook failed: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
   bundle2-input-part: total payload size 1553
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
   abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
@@ -1743,7 +1731,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 4 changesets with 4 changes to 4 files (+1 heads)
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "astro"
   acl: acl.allow.branches not enabled
@@ -1761,13 +1748,14 @@
   bundle2-input-part: total payload size 2068
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 48
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   updating the branch cache
+  added 4 changesets with 4 changes to 4 files (+1 heads)
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   listing keys for "phases"
   repository tip rolled back to revision 2 (undo push)
   2:fb35475503ef
@@ -1829,7 +1817,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 4 changesets with 4 changes to 4 files (+1 heads)
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "astro"
   acl: acl.allow.branches not enabled
@@ -1845,7 +1832,7 @@
   error: pretxnchangegroup.acl hook failed: acl: user "astro" denied on branch "foobar" (changeset "e8fc755d4d82")
   bundle2-input-part: total payload size 2068
   bundle2-input-part: total payload size 48
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
   abort: acl: user "astro" denied on branch "foobar" (changeset "e8fc755d4d82")
@@ -1908,7 +1895,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 4 changesets with 4 changes to 4 files (+1 heads)
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "astro"
   acl: acl.allow.branches enabled, 0 entries for user astro
@@ -1918,7 +1904,7 @@
   error: pretxnchangegroup.acl hook failed: acl: user "astro" not allowed on branch "default" (changeset "ef1ea85a6374")
   bundle2-input-part: total payload size 2068
   bundle2-input-part: total payload size 48
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
   abort: acl: user "astro" not allowed on branch "default" (changeset "ef1ea85a6374")
@@ -1983,7 +1969,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 4 changesets with 4 changes to 4 files (+1 heads)
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "astro"
   acl: acl.allow.branches enabled, 0 entries for user astro
@@ -1993,7 +1978,7 @@
   error: pretxnchangegroup.acl hook failed: acl: user "astro" not allowed on branch "default" (changeset "ef1ea85a6374")
   bundle2-input-part: total payload size 2068
   bundle2-input-part: total payload size 48
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
   abort: acl: user "astro" not allowed on branch "default" (changeset "ef1ea85a6374")
@@ -2052,7 +2037,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 4 changesets with 4 changes to 4 files (+1 heads)
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "george"
   acl: acl.allow.branches enabled, 1 entries for user george
@@ -2070,13 +2054,14 @@
   bundle2-input-part: total payload size 2068
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 48
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   updating the branch cache
+  added 4 changesets with 4 changes to 4 files (+1 heads)
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   listing keys for "phases"
   repository tip rolled back to revision 2 (undo push)
   2:fb35475503ef
@@ -2143,7 +2128,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 4 changesets with 4 changes to 4 files (+1 heads)
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "george"
   acl: acl.allow.branches enabled, 1 entries for user george
@@ -2161,13 +2145,14 @@
   bundle2-input-part: total payload size 2068
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 48
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   updating the branch cache
+  added 4 changesets with 4 changes to 4 files (+1 heads)
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   listing keys for "phases"
   repository tip rolled back to revision 2 (undo push)
   2:fb35475503ef
@@ -2233,7 +2218,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 4 changesets with 4 changes to 4 files (+1 heads)
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "george"
   acl: acl.allow.branches not enabled
@@ -2243,7 +2227,7 @@
   error: pretxnchangegroup.acl hook failed: acl: user "george" denied on branch "default" (changeset "ef1ea85a6374")
   bundle2-input-part: total payload size 2068
   bundle2-input-part: total payload size 48
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
   abort: acl: user "george" denied on branch "default" (changeset "ef1ea85a6374")
@@ -2307,7 +2291,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 4 changesets with 4 changes to 4 files (+1 heads)
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "astro"
   acl: acl.allow.branches not enabled
@@ -2325,13 +2308,14 @@
   bundle2-input-part: total payload size 2068
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 48
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   updating the branch cache
+  added 4 changesets with 4 changes to 4 files (+1 heads)
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   listing keys for "phases"
   repository tip rolled back to revision 2 (undo push)
   2:fb35475503ef
@@ -2391,7 +2375,6 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  added 4 changesets with 4 changes to 4 files (+1 heads)
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "george"
   acl: acl.allow.branches not enabled
@@ -2401,7 +2384,7 @@
   error: pretxnchangegroup.acl hook failed: acl: user "george" denied on branch "default" (changeset "ef1ea85a6374")
   bundle2-input-part: total payload size 2068
   bundle2-input-part: total payload size 48
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
   abort: acl: user "george" denied on branch "default" (changeset "ef1ea85a6374")
--- a/tests/test-amend.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-amend.t	Mon Oct 21 11:09:48 2019 -0400
@@ -450,6 +450,13 @@
   abort: --date and --currentdate are mutually exclusive
   [255]
 
+Close branch
+
+  $ hg amend --secret --close-branch
+  $ hg log --limit 1 -T 'close={get(extras, "close")}\nphase={phase}\n'
+  close=1
+  phase=secret
+
   $ cd ..
 
 Corner case of amend from issue6157:
--- a/tests/test-ancestor.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-ancestor.py	Mon Oct 21 11:09:48 2019 -0400
@@ -22,6 +22,7 @@
     long = int
     xrange = range
 
+
 def buildgraph(rng, nodes=100, rootprob=0.05, mergeprob=0.2, prevprob=0.7):
     '''nodes: total number of nodes in the graph
     rootprob: probability that a new node (not 0) will be a root
@@ -51,6 +52,7 @@
 
     return graph
 
+
 def buildancestorsets(graph):
     ancs = [None] * len(graph)
     for i in xrange(len(graph)):
@@ -61,17 +63,21 @@
             ancs[i].update(ancs[p])
     return ancs
 
+
 class naiveincrementalmissingancestors(object):
     def __init__(self, ancs, bases):
         self.ancs = ancs
         self.bases = set(bases)
+
     def addbases(self, newbases):
         self.bases.update(newbases)
+
     def removeancestorsfrom(self, revs):
         for base in self.bases:
             if base != nullrev:
                 revs.difference_update(self.ancs[base])
         revs.discard(nullrev)
+
     def missingancestors(self, revs):
         res = set()
         for rev in revs:
@@ -82,6 +88,7 @@
                 res.difference_update(self.ancs[base])
         return sorted(res)
 
+
 def test_missingancestors(seed, rng):
     # empirically observed to take around 1 second
     graphcount = 100
@@ -138,8 +145,14 @@
                     inc.removeancestorsfrom(hrevs)
                     naiveinc.removeancestorsfrom(rrevs)
                     if hrevs != rrevs:
-                        err(seed, graph, bases, seq, sorted(hrevs),
-                            sorted(rrevs))
+                        err(
+                            seed,
+                            graph,
+                            bases,
+                            seq,
+                            sorted(hrevs),
+                            sorted(rrevs),
+                        )
                 else:
                     revs = samplerevs(graphnodes)
                     seq.append(('missingancestors', revs))
@@ -148,6 +161,7 @@
                     if h != r:
                         err(seed, graph, bases, seq, h, r)
 
+
 # graph is a dict of child->parent adjacency lists for this graph:
 # o  13
 # |
@@ -177,9 +191,23 @@
 # |
 # o  0
 
-graph = {0: [-1, -1], 1: [0, -1], 2: [1, -1], 3: [1, -1], 4: [2, -1],
-         5: [4, -1], 6: [4, -1], 7: [4, -1], 8: [-1, -1], 9: [6, 7],
-         10: [5, -1], 11: [3, 7], 12: [9, -1], 13: [8, -1]}
+graph = {
+    0: [-1, -1],
+    1: [0, -1],
+    2: [1, -1],
+    3: [1, -1],
+    4: [2, -1],
+    5: [4, -1],
+    6: [4, -1],
+    7: [4, -1],
+    8: [-1, -1],
+    9: [6, 7],
+    10: [5, -1],
+    11: [3, 7],
+    12: [9, -1],
+    13: [8, -1],
+}
+
 
 def test_missingancestors_explicit():
     """A few explicit cases, easier to check for catching errors in refactors.
@@ -187,43 +215,128 @@
     The bigger graph at the end has been produced by the random generator
     above, and we have some evidence that the other tests don't cover it.
     """
-    for i, (bases, revs) in enumerate((({1, 2, 3, 4, 7}, set(xrange(10))),
-                                       ({10}, set({11, 12, 13, 14})),
-                                       ({7}, set({1, 2, 3, 4, 5})),
-                                       )):
+    for i, (bases, revs) in enumerate(
+        (
+            ({1, 2, 3, 4, 7}, set(xrange(10))),
+            ({10}, set({11, 12, 13, 14})),
+            ({7}, set({1, 2, 3, 4, 5})),
+        )
+    ):
         print("%% removeancestorsfrom(), example %d" % (i + 1))
         missanc = ancestor.incrementalmissingancestors(graph.get, bases)
         missanc.removeancestorsfrom(revs)
         print("remaining (sorted): %s" % sorted(list(revs)))
 
-    for i, (bases, revs) in enumerate((({10}, {11}),
-                                       ({11}, {10}),
-                                       ({7}, {9, 11}),
-                                       )):
+    for i, (bases, revs) in enumerate(
+        (({10}, {11}), ({11}, {10}), ({7}, {9, 11}),)
+    ):
         print("%% missingancestors(), example %d" % (i + 1))
         missanc = ancestor.incrementalmissingancestors(graph.get, bases)
         print("return %s" % missanc.missingancestors(revs))
 
     print("% removeancestorsfrom(), bigger graph")
     vecgraph = [
-        [-1, -1], [0, -1], [1, 0], [2, 1], [3, -1], [4, -1], [5, 1],
-        [2, -1], [7, -1], [8, -1], [9, -1], [10, 1], [3, -1], [12, -1],
-        [13, -1], [14, -1], [4, -1], [16, -1], [17, -1], [18, -1],
-        [19, 11], [20, -1], [21, -1], [22, -1], [23, -1], [2, -1],
-        [3, -1], [26, 24], [27, -1], [28, -1], [12, -1], [1, -1], [1, 9],
-        [32, -1], [33, -1], [34, 31], [35, -1], [36, 26], [37, -1],
-        [38, -1], [39, -1], [40, -1], [41, -1], [42, 26], [0, -1],
-        [44, -1], [45, 4], [40, -1], [47, -1], [36, 0], [49, -1],
-        [-1, -1], [51, -1], [52, -1], [53, -1], [14, -1],
-        [55, -1], [15, -1], [23, -1], [58, -1], [59, -1], [2, -1],
-        [61, 59], [62, -1], [63, -1], [-1, -1], [65, -1],
-        [66, -1], [67, -1], [68, -1], [37, 28], [69, 25],
-        [71, -1], [72, -1], [50, 2], [74, -1], [12, -1],
-        [18, -1], [77, -1], [78, -1], [79, -1], [43, 33],
-        [81, -1], [82, -1], [83, -1], [84, 45], [85, -1],
-        [86, -1], [-1, -1], [88, -1], [-1, -1], [76, 83], [44, -1],
-        [92, -1], [93, -1], [9, -1], [95, 67], [96, -1], [97, -1],
-        [-1, -1]]
+        [-1, -1],
+        [0, -1],
+        [1, 0],
+        [2, 1],
+        [3, -1],
+        [4, -1],
+        [5, 1],
+        [2, -1],
+        [7, -1],
+        [8, -1],
+        [9, -1],
+        [10, 1],
+        [3, -1],
+        [12, -1],
+        [13, -1],
+        [14, -1],
+        [4, -1],
+        [16, -1],
+        [17, -1],
+        [18, -1],
+        [19, 11],
+        [20, -1],
+        [21, -1],
+        [22, -1],
+        [23, -1],
+        [2, -1],
+        [3, -1],
+        [26, 24],
+        [27, -1],
+        [28, -1],
+        [12, -1],
+        [1, -1],
+        [1, 9],
+        [32, -1],
+        [33, -1],
+        [34, 31],
+        [35, -1],
+        [36, 26],
+        [37, -1],
+        [38, -1],
+        [39, -1],
+        [40, -1],
+        [41, -1],
+        [42, 26],
+        [0, -1],
+        [44, -1],
+        [45, 4],
+        [40, -1],
+        [47, -1],
+        [36, 0],
+        [49, -1],
+        [-1, -1],
+        [51, -1],
+        [52, -1],
+        [53, -1],
+        [14, -1],
+        [55, -1],
+        [15, -1],
+        [23, -1],
+        [58, -1],
+        [59, -1],
+        [2, -1],
+        [61, 59],
+        [62, -1],
+        [63, -1],
+        [-1, -1],
+        [65, -1],
+        [66, -1],
+        [67, -1],
+        [68, -1],
+        [37, 28],
+        [69, 25],
+        [71, -1],
+        [72, -1],
+        [50, 2],
+        [74, -1],
+        [12, -1],
+        [18, -1],
+        [77, -1],
+        [78, -1],
+        [79, -1],
+        [43, 33],
+        [81, -1],
+        [82, -1],
+        [83, -1],
+        [84, 45],
+        [85, -1],
+        [86, -1],
+        [-1, -1],
+        [88, -1],
+        [-1, -1],
+        [76, 83],
+        [44, -1],
+        [92, -1],
+        [93, -1],
+        [9, -1],
+        [95, 67],
+        [96, -1],
+        [97, -1],
+        [-1, -1],
+    ]
     problem_rev = 28
     problem_base = 70
     # problem_rev is a parent of problem_base, but a faulty implementation
@@ -239,16 +352,24 @@
     else:
         print("Ok")
 
+
 def genlazyancestors(revs, stoprev=0, inclusive=False):
-    print(("%% lazy ancestor set for %s, stoprev = %s, inclusive = %s" %
-           (revs, stoprev, inclusive)))
-    return ancestor.lazyancestors(graph.get, revs, stoprev=stoprev,
-                                  inclusive=inclusive)
+    print(
+        (
+            "%% lazy ancestor set for %s, stoprev = %s, inclusive = %s"
+            % (revs, stoprev, inclusive)
+        )
+    )
+    return ancestor.lazyancestors(
+        graph.get, revs, stoprev=stoprev, inclusive=inclusive
+    )
+
 
 def printlazyancestors(s, l):
     print('membership: %r' % [n for n in l if n in s])
     print('iteration:  %r' % list(s))
 
+
 def test_lazyancestors():
     # Empty revs
     s = genlazyancestors([])
@@ -282,6 +403,7 @@
     s = genlazyancestors([10, 1], inclusive=True)
     printlazyancestors(s, [2, 10, 4, 5, -1, 0, 1])
 
+
 # The C gca algorithm requires a real repo. These are textual descriptions of
 # DAGs that have been known to be problematic, and, optionally, known pairs
 # of revisions and their expected ancestor list.
@@ -290,6 +412,8 @@
     (b'+3*3/*2*2/*4*4/*4/2*4/2*2', {}),
     (b'+2*2*/2*4*/4*/3*2/4', {(6, 7): [3, 5]}),
 ]
+
+
 def test_gca():
     u = uimod.ui.load()
     for i, (dag, tests) in enumerate(dagtests):
@@ -312,19 +436,21 @@
                 if (a, b) in tests:
                     expected = tests[(a, b)]
                 if cgcas != pygcas or (expected and cgcas != expected):
-                    print("test_gca: for dag %s, gcas for %d, %d:"
-                          % (dag, a, b))
+                    print(
+                        "test_gca: for dag %s, gcas for %d, %d:" % (dag, a, b)
+                    )
                     print("  C returned:      %s" % cgcas)
                     print("  Python returned: %s" % pygcas)
                     if expected:
                         print("  expected:        %s" % expected)
 
+
 def main():
     seed = None
     opts, args = getopt.getopt(sys.argv[1:], 's:', ['seed='])
     for o, a in opts:
         if o in ('-s', '--seed'):
-            seed = long(a, base=0) # accepts base 10 or 16 strings
+            seed = long(a, base=0)  # accepts base 10 or 16 strings
 
     if seed is None:
         try:
@@ -338,5 +464,6 @@
     test_lazyancestors()
     test_gca()
 
+
 if __name__ == '__main__':
     main()
--- a/tests/test-annotate.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-annotate.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,15 +13,19 @@
     _annotatepair,
 )
 
+
 def tr(a):
-    return [annotateline(fctx, lineno, skip)
-            for fctx, lineno, skip in zip(a.fctxs, a.linenos, a.skips)]
+    return [
+        annotateline(fctx, lineno, skip)
+        for fctx, lineno, skip in zip(a.fctxs, a.linenos, a.skips)
+    ]
+
 
 class AnnotateTests(unittest.TestCase):
     """Unit tests for annotate code."""
 
     def testannotatepair(self):
-        self.maxDiff = None # camelcase-required
+        self.maxDiff = None  # camelcase-required
 
         oldfctx = b'old'
         p1fctx, p2fctx, childfctx = b'p1', b'p2', b'c'
@@ -41,70 +45,94 @@
         oldann = decorate(olddata, oldfctx)
         p1ann = decorate(p1data, p1fctx)
         p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
-        self.assertEqual(tr(p1ann), [
-            annotateline(b'old', 1),
-            annotateline(b'old', 2),
-            annotateline(b'p1', 3),
-        ])
+        self.assertEqual(
+            tr(p1ann),
+            [
+                annotateline(b'old', 1),
+                annotateline(b'old', 2),
+                annotateline(b'p1', 3),
+            ],
+        )
 
         p2ann = decorate(p2data, p2fctx)
         p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
-        self.assertEqual(tr(p2ann), [
-            annotateline(b'old', 1),
-            annotateline(b'p2', 2),
-            annotateline(b'p2', 3),
-        ])
+        self.assertEqual(
+            tr(p2ann),
+            [
+                annotateline(b'old', 1),
+                annotateline(b'p2', 2),
+                annotateline(b'p2', 3),
+            ],
+        )
 
         # Test with multiple parents (note the difference caused by ordering)
 
         childann = decorate(childdata, childfctx)
-        childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
-                                 diffopts)
-        self.assertEqual(tr(childann), [
-            annotateline(b'old', 1),
-            annotateline(b'c', 2),
-            annotateline(b'p2', 2),
-            annotateline(b'c', 4),
-            annotateline(b'p2', 3),
-        ])
+        childann = _annotatepair(
+            [p1ann, p2ann], childfctx, childann, False, diffopts
+        )
+        self.assertEqual(
+            tr(childann),
+            [
+                annotateline(b'old', 1),
+                annotateline(b'c', 2),
+                annotateline(b'p2', 2),
+                annotateline(b'c', 4),
+                annotateline(b'p2', 3),
+            ],
+        )
 
         childann = decorate(childdata, childfctx)
-        childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
-                                 diffopts)
-        self.assertEqual(tr(childann), [
-            annotateline(b'old', 1),
-            annotateline(b'c', 2),
-            annotateline(b'p1', 3),
-            annotateline(b'c', 4),
-            annotateline(b'p2', 3),
-        ])
+        childann = _annotatepair(
+            [p2ann, p1ann], childfctx, childann, False, diffopts
+        )
+        self.assertEqual(
+            tr(childann),
+            [
+                annotateline(b'old', 1),
+                annotateline(b'c', 2),
+                annotateline(b'p1', 3),
+                annotateline(b'c', 4),
+                annotateline(b'p2', 3),
+            ],
+        )
 
         # Test with skipchild (note the difference caused by ordering)
 
         childann = decorate(childdata, childfctx)
-        childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
-                                 diffopts)
-        self.assertEqual(tr(childann), [
-            annotateline(b'old', 1),
-            annotateline(b'old', 2, True),
-            # note that this line was carried over from earlier so it is *not*
-            # marked skipped
-            annotateline(b'p2', 2),
-            annotateline(b'p2', 2, True),
-            annotateline(b'p2', 3),
-        ])
+        childann = _annotatepair(
+            [p1ann, p2ann], childfctx, childann, True, diffopts
+        )
+        self.assertEqual(
+            tr(childann),
+            [
+                annotateline(b'old', 1),
+                annotateline(b'old', 2, True),
+                # note that this line was carried over from earlier so it is *not*
+                # marked skipped
+                annotateline(b'p2', 2),
+                annotateline(b'p2', 2, True),
+                annotateline(b'p2', 3),
+            ],
+        )
 
         childann = decorate(childdata, childfctx)
-        childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
-                                 diffopts)
-        self.assertEqual(tr(childann), [
-            annotateline(b'old', 1),
-            annotateline(b'old', 2, True),
-            annotateline(b'p1', 3),
-            annotateline(b'p1', 3, True),
-            annotateline(b'p2', 3),
-        ])
+        childann = _annotatepair(
+            [p2ann, p1ann], childfctx, childann, True, diffopts
+        )
+        self.assertEqual(
+            tr(childann),
+            [
+                annotateline(b'old', 1),
+                annotateline(b'old', 2, True),
+                annotateline(b'p1', 3),
+                annotateline(b'p1', 3, True),
+                annotateline(b'p2', 3),
+            ],
+        )
+
 
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-archive.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-archive.t	Mon Oct 21 11:09:48 2019 -0400
@@ -131,7 +131,8 @@
   server: testing stub value
   transfer-encoding: chunked
   
-  body: size=408, sha1=8fa06531bddecc365a9f5edb0f88b65974bfe505
+  body: size=408, sha1=8fa06531bddecc365a9f5edb0f88b65974bfe505 (no-py38 !)
+  body: size=506, sha1=70926a04cb8887d0bcccf5380488100a10222def (py38 !)
   % tar.bz2 and zip disallowed should both give 403
   403 Archive type not allowed: bz2
   content-type: text/html; charset=ascii
@@ -159,7 +160,8 @@
   server: testing stub value
   transfer-encoding: chunked
   
-  body: size=426, sha1=8d87f5aba6e14f1bfea6c232985982c278b2fb0b
+  body: size=426, sha1=8d87f5aba6e14f1bfea6c232985982c278b2fb0b (no-py38 !)
+  body: size=506, sha1=1bd1f8e8d3701704bd4385038bd9c09b81c77f4e (py38 !)
   % zip and tar.gz disallowed should both give 403
   403 Archive type not allowed: zip
   content-type: text/html; charset=ascii
@@ -218,7 +220,8 @@
   server: testing stub value
   transfer-encoding: chunked
   
-  body: size=408, sha1=8fa06531bddecc365a9f5edb0f88b65974bfe505
+  body: size=408, sha1=8fa06531bddecc365a9f5edb0f88b65974bfe505 (no-py38 !)
+  body: size=506, sha1=70926a04cb8887d0bcccf5380488100a10222def (py38 !)
   % tar.bz2 and zip disallowed should both give 403
   403 Archive type not allowed: bz2
   content-type: text/html; charset=ascii
@@ -246,7 +249,8 @@
   server: testing stub value
   transfer-encoding: chunked
   
-  body: size=426, sha1=8d87f5aba6e14f1bfea6c232985982c278b2fb0b
+  body: size=426, sha1=8d87f5aba6e14f1bfea6c232985982c278b2fb0b (no-py38 !)
+  body: size=506, sha1=1bd1f8e8d3701704bd4385038bd9c09b81c77f4e (py38 !)
   % zip and tar.gz disallowed should both give 403
   403 Archive type not allowed: zip
   content-type: text/html; charset=ascii
@@ -566,6 +570,19 @@
   *172*80*00:00*old/.hg_archival.txt (glob)
   *0*80*00:00*old/old (glob)
 
+test xz support only available in Python 3.4
+
+#if py3
+  $ hg archive ../archive.txz
+  $ xz -l ../archive.txz | head -n1
+  Strms  Blocks   Compressed Uncompressed  Ratio  Check   Filename
+  $ rm -f ../archive.txz
+#else
+  $ hg archive ../archive.txz
+  abort: xz compression is only available in Python 3
+  [255]
+#endif
+
 show an error when a provided pattern matches no files
 
   $ hg archive -I file_that_does_not_exist.foo ../empty.zip
--- a/tests/test-atomictempfile.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-atomictempfile.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,11 +11,13 @@
     pycompat,
     util,
 )
+
 atomictempfile = util.atomictempfile
 
 if pycompat.ispy3:
     xrange = range
 
+
 class testatomictempfile(unittest.TestCase):
     def setUp(self):
         self._testdir = tempfile.mkdtemp(b'atomictempfiletest')
@@ -28,15 +30,19 @@
         file = atomictempfile(self._filename)
         self.assertFalse(os.path.isfile(self._filename))
         tempfilename = file._tempname
-        self.assertTrue(tempfilename in glob.glob(
-            os.path.join(self._testdir, b'.testfilename-*')))
+        self.assertTrue(
+            tempfilename
+            in glob.glob(os.path.join(self._testdir, b'.testfilename-*'))
+        )
 
         file.write(b'argh\n')
         file.close()
 
         self.assertTrue(os.path.isfile(self._filename))
-        self.assertTrue(tempfilename not in glob.glob(
-            os.path.join(self._testdir, b'.testfilename-*')))
+        self.assertTrue(
+            tempfilename
+            not in glob.glob(os.path.join(self._testdir, b'.testfilename-*'))
+        )
 
     # discard() removes the temp file without making the write permanent
     def testdiscard(self):
@@ -84,7 +90,7 @@
 
             # st_mtime should be advanced "repetition" times, because
             # all atomicwrite() occurred at same time (in sec)
-            oldtime = (oldstat[stat.ST_MTIME] + repetition) & 0x7fffffff
+            oldtime = (oldstat[stat.ST_MTIME] + repetition) & 0x7FFFFFFF
             self.assertTrue(newstat[stat.ST_MTIME] == oldtime)
             # no more examination is needed, if assumption above is true
             break
@@ -120,6 +126,8 @@
             pass
         self.assertFalse(os.path.isfile(b'foo'))
 
+
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-bad-extension.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-bad-extension.t	Mon Oct 21 11:09:48 2019 -0400
@@ -62,11 +62,14 @@
   *** failed to import extension badext2: No module named *badext2* (glob)
   Traceback (most recent call last):
   ImportError: No module named badext2 (no-py3 !)
-  ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
+  ImportError: No module named 'hgext.badext2' (py3 no-py36 !)
+  ModuleNotFoundError: No module named 'hgext.badext2' (py36 !)
   Traceback (most recent call last): (py3 !)
-  ModuleNotFoundError: No module named 'hgext3rd.badext2' (py3 !)
+  ImportError: No module named 'hgext3rd.badext2' (py3 no-py36 !)
+  ModuleNotFoundError: No module named 'hgext3rd.badext2' (py36 !)
   Traceback (most recent call last): (py3 !)
-  ModuleNotFoundError: No module named 'badext2' (py3 !)
+  ImportError: No module named 'badext2' (py3 no-py36 !)
+  ModuleNotFoundError: No module named 'badext2' (py36 !)
 
 names of extensions failed to load can be accessed via extensions.notloaded()
 
@@ -108,20 +111,26 @@
   YYYY/MM/DD HH:MM:SS (PID)>     - could not import hgext.badext2 (No module named *badext2*): trying hgext3rd.badext2 (glob)
   Traceback (most recent call last):
   ImportError: No module named badext2 (no-py3 !)
-  ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
+  ImportError: No module named 'hgext.badext2' (py3 no-py36 !)
+  ModuleNotFoundError: No module named 'hgext.badext2' (py36 !)
   YYYY/MM/DD HH:MM:SS (PID)>     - could not import hgext3rd.badext2 (No module named *badext2*): trying badext2 (glob)
   Traceback (most recent call last):
   ImportError: No module named badext2 (no-py3 !)
-  ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
+  ImportError: No module named 'hgext.badext2' (py3 no-py36 !)
+  ModuleNotFoundError: No module named 'hgext.badext2' (py36 !)
   Traceback (most recent call last): (py3 !)
-  ModuleNotFoundError: No module named 'hgext3rd.badext2' (py3 !)
+  ImportError: No module named 'hgext3rd.badext2' (py3 no-py36 !)
+  ModuleNotFoundError: No module named 'hgext3rd.badext2' (py36 !)
   *** failed to import extension badext2: No module named *badext2* (glob)
   Traceback (most recent call last):
-  ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
+  ImportError: No module named 'hgext.badext2' (py3 no-py36 !)
+  ModuleNotFoundError: No module named 'hgext.badext2' (py36 !)
   Traceback (most recent call last): (py3 !)
-  ModuleNotFoundError: No module named 'hgext3rd.badext2' (py3 !)
+  ImportError: No module named 'hgext3rd.badext2' (py3 no-py36 !)
+  ModuleNotFoundError: No module named 'hgext3rd.badext2' (py36 !)
   Traceback (most recent call last): (py3 !)
-  ModuleNotFoundError: No module named 'badext2' (py3 !)
+  ModuleNotFoundError: No module named 'badext2' (py36 !)
+  ImportError: No module named 'badext2' (py3 no-py36 !)
   ImportError: No module named badext2 (no-py3 !)
   YYYY/MM/DD HH:MM:SS (PID)> > loaded 2 extensions, total time * (glob)
   YYYY/MM/DD HH:MM:SS (PID)> - loading configtable attributes
--- a/tests/test-batching.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-batching.py	Mon Oct 21 11:09:48 2019 -0400
@@ -15,22 +15,27 @@
     wireprotov1peer,
 )
 
+
 def bprint(*bs):
     print(*[pycompat.sysstr(b) for b in bs])
 
+
 # equivalent of repo.repository
 class thing(object):
     def hello(self):
         return b"Ready."
 
+
 # equivalent of localrepo.localrepository
 class localthing(thing):
     def foo(self, one, two=None):
         if one:
             return b"%s and %s" % (one, two,)
         return b"Nope"
+
     def bar(self, b, a):
         return b"%s und %s" % (b, a,)
+
     def greet(self, name=None):
         return b"Hello, %s" % name
 
@@ -42,6 +47,7 @@
         finally:
             e.close()
 
+
 # usage of "thing" interface
 def use(it):
 
@@ -63,6 +69,7 @@
     bprint(fbar.result())
     bprint(fbar2.result())
 
+
 # local usage
 mylocal = localthing()
 print()
@@ -73,18 +80,24 @@
 
 # shared
 
+
 def escapearg(plain):
-    return (plain
-            .replace(b':', b'::')
-            .replace(b',', b':,')
-            .replace(b';', b':;')
-            .replace(b'=', b':='))
+    return (
+        plain.replace(b':', b'::')
+        .replace(b',', b':,')
+        .replace(b';', b':;')
+        .replace(b'=', b':=')
+    )
+
+
 def unescapearg(escaped):
-    return (escaped
-            .replace(b':=', b'=')
-            .replace(b':;', b';')
-            .replace(b':,', b',')
-            .replace(b'::', b':'))
+    return (
+        escaped.replace(b':=', b'=')
+        .replace(b':;', b';')
+        .replace(b':,', b',')
+        .replace(b'::', b':')
+    )
+
 
 # server side
 
@@ -92,9 +105,11 @@
 class server(object):
     def __init__(self, local):
         self.local = local
+
     def _call(self, name, args):
         args = dict(arg.split(b'=', 1) for arg in args)
         return getattr(self, name)(**args)
+
     def perform(self, req):
         bprint(b"REQ:", req)
         name, args = req.split(b'?', 1)
@@ -103,6 +118,7 @@
         res = getattr(self, pycompat.sysstr(name))(**pycompat.strkwargs(vals))
         bprint(b"  ->", res)
         return res
+
     def batch(self, cmds):
         res = []
         for pair in cmds.split(b';'):
@@ -112,15 +128,25 @@
                 if a:
                     n, v = a.split(b'=')
                     vals[n] = unescapearg(v)
-            res.append(escapearg(getattr(self, pycompat.sysstr(name))(
-                **pycompat.strkwargs(vals))))
+            res.append(
+                escapearg(
+                    getattr(self, pycompat.sysstr(name))(
+                        **pycompat.strkwargs(vals)
+                    )
+                )
+            )
         return b';'.join(res)
+
     def foo(self, one, two):
         return mangle(self.local.foo(unmangle(one), unmangle(two)))
+
     def bar(self, b, a):
         return mangle(self.local.bar(unmangle(b), unmangle(a)))
+
     def greet(self, name):
         return mangle(self.local.greet(unmangle(name)))
+
+
 myserver = server(mylocal)
 
 # local side
@@ -129,16 +155,21 @@
 # here we just transform the strings a bit to check we're properly en-/decoding
 def mangle(s):
     return b''.join(pycompat.bytechr(ord(c) + 1) for c in pycompat.bytestr(s))
+
+
 def unmangle(s):
     return b''.join(pycompat.bytechr(ord(c) - 1) for c in pycompat.bytestr(s))
 
+
 # equivalent of wireproto.wirerepository and something like http's wire format
 class remotething(thing):
     def __init__(self, server):
         self.server = server
+
     def _submitone(self, name, args):
         req = name + b'?' + b'&'.join([b'%s=%s' % (n, v) for n, v in args])
         return self.server.perform(req)
+
     def _submitbatch(self, cmds):
         req = []
         for name, args in cmds:
@@ -176,6 +207,7 @@
     def greet(self, name=None):
         return unmangle(self._submitone(b'greet', [(b'name', mangle(name),)]))
 
+
 # demo remote usage
 
 myproxy = remotething(myserver)
--- a/tests/test-bdiff.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-bdiff.py	Mon Oct 21 11:09:48 2019 -0400
@@ -3,25 +3,28 @@
 import struct
 import unittest
 
-from mercurial import (
-    mdiff,
-)
+from mercurial import mdiff
 
-class diffreplace(
-    collections.namedtuple('diffreplace', 'start end from_ to')):
+
+class diffreplace(collections.namedtuple('diffreplace', 'start end from_ to')):
     def __repr__(self):
         return 'diffreplace(%r, %r, %r, %r)' % self
 
+
 class BdiffTests(unittest.TestCase):
-
     def assert_bdiff_applies(self, a, b):
         d = mdiff.textdiff(a, b)
         c = a
         if d:
             c = mdiff.patches(a, [d])
         self.assertEqual(
-            c, b, ("bad diff+patch result from\n  %r to\n  "
-                   "%r: \nbdiff: %r\npatched: %r" % (a, b, d, c[:200])))
+            c,
+            b,
+            (
+                "bad diff+patch result from\n  %r to\n  "
+                "%r: \nbdiff: %r\npatched: %r" % (a, b, d, c[:200])
+            ),
+        )
 
     def assert_bdiff(self, a, b):
         self.assert_bdiff_applies(a, b)
@@ -58,11 +61,11 @@
         q = 0
         actions = []
         while pos < len(bin):
-            p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
+            p1, p2, l = struct.unpack(">lll", bin[pos : pos + 12])
             pos += 12
             if p1:
                 actions.append(a[q:p1])
-            actions.append(diffreplace(p1, p2, a[p1:p2], bin[pos:pos + l]))
+            actions.append(diffreplace(p1, p2, a[p1:p2], bin[pos : pos + l]))
             pos += l
             q = p2
         if q < len(a):
@@ -71,37 +74,55 @@
 
     def test_issue1295(self):
         cases = [
-            (b"x\n\nx\n\nx\n\nx\n\nz\n", b"x\n\nx\n\ny\n\nx\n\nx\n\nz\n",
-             [b'x\n\nx\n\n',
-              diffreplace(6, 6, b'', b'y\n\n'),
-              b'x\n\nx\n\nz\n']),
-            (b"x\n\nx\n\nx\n\nx\n\nz\n", b"x\n\nx\n\ny\n\nx\n\ny\n\nx\n\nz\n",
-             [b'x\n\nx\n\n',
-              diffreplace(6, 6, b'', b'y\n\n'),
-              b'x\n\n',
-              diffreplace(9, 9, b'', b'y\n\n'),
-              b'x\n\nz\n']),
+            (
+                b"x\n\nx\n\nx\n\nx\n\nz\n",
+                b"x\n\nx\n\ny\n\nx\n\nx\n\nz\n",
+                [
+                    b'x\n\nx\n\n',
+                    diffreplace(6, 6, b'', b'y\n\n'),
+                    b'x\n\nx\n\nz\n',
+                ],
+            ),
+            (
+                b"x\n\nx\n\nx\n\nx\n\nz\n",
+                b"x\n\nx\n\ny\n\nx\n\ny\n\nx\n\nz\n",
+                [
+                    b'x\n\nx\n\n',
+                    diffreplace(6, 6, b'', b'y\n\n'),
+                    b'x\n\n',
+                    diffreplace(9, 9, b'', b'y\n\n'),
+                    b'x\n\nz\n',
+                ],
+            ),
         ]
         for old, new, want in cases:
             self.assertEqual(self.showdiff(old, new), want)
 
     def test_issue1295_varies_on_pure(self):
-            # we should pick up abbbc. rather than bc.de as the longest match
-        got = self.showdiff(b"a\nb\nb\nb\nc\n.\nd\ne\n.\nf\n",
-                            b"a\nb\nb\na\nb\nb\nb\nc\n.\nb\nc\n.\nd\ne\nf\n")
-        want_c = [b'a\nb\nb\n',
-                  diffreplace(6, 6, b'', b'a\nb\nb\nb\nc\n.\n'),
-                  b'b\nc\n.\nd\ne\n',
-                  diffreplace(16, 18, b'.\n', b''),
-                  b'f\n']
-        want_pure = [diffreplace(0, 0, b'', b'a\nb\nb\n'),
-                     b'a\nb\nb\nb\nc\n.\n',
-                     diffreplace(12, 12, b'', b'b\nc\n.\n'),
-                     b'd\ne\n',
-                     diffreplace(16, 18, b'.\n', b''), b'f\n']
-        self.assertTrue(got in (want_c, want_pure),
-                        'got: %r, wanted either %r or %r' % (
-                            got, want_c, want_pure))
+        # we should pick up abbbc. rather than bc.de as the longest match
+        got = self.showdiff(
+            b"a\nb\nb\nb\nc\n.\nd\ne\n.\nf\n",
+            b"a\nb\nb\na\nb\nb\nb\nc\n.\nb\nc\n.\nd\ne\nf\n",
+        )
+        want_c = [
+            b'a\nb\nb\n',
+            diffreplace(6, 6, b'', b'a\nb\nb\nb\nc\n.\n'),
+            b'b\nc\n.\nd\ne\n',
+            diffreplace(16, 18, b'.\n', b''),
+            b'f\n',
+        ]
+        want_pure = [
+            diffreplace(0, 0, b'', b'a\nb\nb\n'),
+            b'a\nb\nb\nb\nc\n.\n',
+            diffreplace(12, 12, b'', b'b\nc\n.\n'),
+            b'd\ne\n',
+            diffreplace(16, 18, b'.\n', b''),
+            b'f\n',
+        ]
+        self.assertTrue(
+            got in (want_c, want_pure),
+            'got: %r, wanted either %r or %r' % (got, want_c, want_pure),
+        )
 
     def test_fixws(self):
         cases = [
@@ -113,39 +134,55 @@
         for a, b, allws in cases:
             c = mdiff.fixws(a, allws)
             self.assertEqual(
-                c, b, 'fixws(%r) want %r got %r (allws=%r)' % (a, b, c, allws))
+                c, b, 'fixws(%r) want %r got %r (allws=%r)' % (a, b, c, allws)
+            )
 
     def test_nice_diff_for_trivial_change(self):
-        self.assertEqual(self.showdiff(
-            b''.join(b'<%d\n-\n' % i for i in range(5)),
-            b''.join(b'>%d\n-\n' % i for i in range(5))),
-                         [diffreplace(0, 3, b'<0\n', b'>0\n'),
-                          b'-\n',
-                          diffreplace(5, 8, b'<1\n', b'>1\n'),
-                          b'-\n',
-                          diffreplace(10, 13, b'<2\n', b'>2\n'),
-                          b'-\n',
-                          diffreplace(15, 18, b'<3\n', b'>3\n'),
-                          b'-\n',
-                          diffreplace(20, 23, b'<4\n', b'>4\n'),
-                          b'-\n'])
+        self.assertEqual(
+            self.showdiff(
+                b''.join(b'<%d\n-\n' % i for i in range(5)),
+                b''.join(b'>%d\n-\n' % i for i in range(5)),
+            ),
+            [
+                diffreplace(0, 3, b'<0\n', b'>0\n'),
+                b'-\n',
+                diffreplace(5, 8, b'<1\n', b'>1\n'),
+                b'-\n',
+                diffreplace(10, 13, b'<2\n', b'>2\n'),
+                b'-\n',
+                diffreplace(15, 18, b'<3\n', b'>3\n'),
+                b'-\n',
+                diffreplace(20, 23, b'<4\n', b'>4\n'),
+                b'-\n',
+            ],
+        )
 
     def test_prefer_appending(self):
         # 1 line to 3 lines
-        self.assertEqual(self.showdiff(b'a\n', b'a\n' * 3),
-                         [b'a\n', diffreplace(2, 2, b'', b'a\na\n')])
+        self.assertEqual(
+            self.showdiff(b'a\n', b'a\n' * 3),
+            [b'a\n', diffreplace(2, 2, b'', b'a\na\n')],
+        )
         # 1 line to 5 lines
-        self.assertEqual(self.showdiff(b'a\n', b'a\n' * 5),
-                         [b'a\n', diffreplace(2, 2, b'', b'a\na\na\na\n')])
+        self.assertEqual(
+            self.showdiff(b'a\n', b'a\n' * 5),
+            [b'a\n', diffreplace(2, 2, b'', b'a\na\na\na\n')],
+        )
 
     def test_prefer_removing_trailing(self):
         # 3 lines to 1 line
-        self.assertEqual(self.showdiff(b'a\n' * 3, b'a\n'),
-                         [b'a\n', diffreplace(2, 6, b'a\na\n', b'')])
+        self.assertEqual(
+            self.showdiff(b'a\n' * 3, b'a\n'),
+            [b'a\n', diffreplace(2, 6, b'a\na\n', b'')],
+        )
         # 5 lines to 1 line
-        self.assertEqual(self.showdiff(b'a\n' * 5, b'a\n'),
-                         [b'a\n', diffreplace(2, 10, b'a\na\na\na\n', b'')])
+        self.assertEqual(
+            self.showdiff(b'a\n' * 5, b'a\n'),
+            [b'a\n', diffreplace(2, 10, b'a\na\na\na\n', b'')],
+        )
+
 
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-bisect.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-bisect.t	Mon Oct 21 11:09:48 2019 -0400
@@ -581,6 +581,7 @@
 ---------------------
 
   $ hg debugobsolete `hg id --debug -i -r tip`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg bisect --reset
   $ hg bisect --good 15
@@ -609,6 +610,7 @@
   $ hg commit -m 'msg 30 -- fixed'
   created new head
   $ hg debugobsolete `hg id --debug -i -r 30` `hg id --debug -i -r .`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg bisect
   The first bad revision is:
--- a/tests/test-blackbox.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-blackbox.t	Mon Oct 21 11:09:48 2019 -0400
@@ -140,7 +140,7 @@
   comparing with $TESTTMP/blackboxtest
   query 1; heads
   searching for changes
-  all local heads known remotely
+  all local changesets known remotely
   changeset:   2:d02f48003e62c24e2659d97d30f2a83abe5d5d51
   tag:         tip
   phase:       draft
--- a/tests/test-bookflow.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-bookflow.t	Mon Oct 21 11:09:48 2019 -0400
@@ -242,8 +242,8 @@
   $ echo "more" >> test
   $ hg pull -u 2>&1 | fgrep -v TESTTMP| fgrep -v "searching for changes" | fgrep -v adding
   pulling from $TESTTMP/a
+  updating bookmark X
   added 1 changesets with 0 changes to 0 files (+1 heads)
-  updating bookmark X
   new changesets * (glob)
   updating to active bookmark X
   merging test
--- a/tests/test-bookmarks-corner-case.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-bookmarks-corner-case.t	Mon Oct 21 11:09:48 2019 -0400
@@ -119,7 +119,7 @@
   > import atexit
   > import os
   > import time
-  > from mercurial import error, extensions, bookmarks
+  > from mercurial import bookmarks, error, extensions
   > 
   > def wait(repo):
   >     if not os.path.exists('push-A-started'):
@@ -200,8 +200,8 @@
   $ cat push-output.txt
   pushing to ssh://user@dummy/bookrace-server
   searching for changes
+  remote: setting raced push up
   remote has heads on branch 'default' that are not known locally: f26c3b5167d1
-  remote: setting raced push up
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
--- a/tests/test-bookmarks-merge.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-bookmarks-merge.t	Mon Oct 21 11:09:48 2019 -0400
@@ -58,7 +58,7 @@
   (leaving bookmark e)
   $ hg merge
   abort: heads are bookmarked - please merge with an explicit rev
-  (run 'hg heads' to see all heads)
+  (run 'hg heads' to see all heads, specify rev with -r)
   [255]
 
 # our revision is bookmarked
@@ -68,7 +68,7 @@
   (activating bookmark e)
   $ hg merge
   abort: no matching bookmark to merge - please merge with an explicit rev or bookmark
-  (run 'hg heads' to see all heads)
+  (run 'hg heads' to see all heads, specify rev with -r)
   [255]
 
 # merge bookmark heads
@@ -148,5 +148,5 @@
   
   $ hg merge
   abort: heads are bookmarked - please merge with an explicit rev
-  (run 'hg heads' to see all heads)
+  (run 'hg heads' to see all heads, specify rev with -r)
   [255]
--- a/tests/test-bookmarks-pushpull.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-bookmarks-pushpull.t	Mon Oct 21 11:09:48 2019 -0400
@@ -51,10 +51,10 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   adding remote bookmark X
   updating bookmark Y
   adding remote bookmark Z
+  added 1 changesets with 1 changes to 1 files
   new changesets 4e3505fd9583 (1 drafts)
   test-hook-bookmark: X:   -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
   test-hook-bookmark: Y:  0000000000000000000000000000000000000000 -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
@@ -193,7 +193,7 @@
   bundle2-input: payload chunk size: 0
   bundle2-input: part header size: 0
   bundle2-input: end of bundle2 stream
-  bundle2-input-bundle: 3 parts total
+  bundle2-input-bundle: 4 parts total
   running hook txnclose-bookmark.test: sh $TESTTMP/hook.sh
   test-hook-bookmark: W:  0000000000000000000000000000000000000000 -> 
   bundle2-output-bundle: "HG20", 1 parts total
@@ -219,7 +219,7 @@
   bundle2-input: payload chunk size: 0
   bundle2-input: part header size: 0
   bundle2-input: end of bundle2 stream
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   deleting remote bookmark W
   listing keys for "phases"
   [1]
@@ -307,7 +307,7 @@
   bundle2-input-part: total payload size 23
   bundle2-input: part header size: 0
   bundle2-input: end of bundle2 stream
-  bundle2-input-bundle: 3 parts total
+  bundle2-input-bundle: 4 parts total
   running hook txnclose-bookmark.test: sh $TESTTMP/hook.sh
   test-hook-bookmark: W:  0000000000000000000000000000000000000000 -> 
   bundle2-output-bundle: "HG20", 0 parts total
@@ -414,10 +414,10 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files (+1 heads)
   divergent bookmark @ stored as @foo
   divergent bookmark X stored as X@foo
   updating bookmark Z
+  added 1 changesets with 1 changes to 1 files (+1 heads)
   new changesets 0d2164f0ce0d (1 drafts)
   test-hook-bookmark: @foo:   -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
   test-hook-bookmark: X@foo:   -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
@@ -580,8 +580,8 @@
   adding changesets
   adding manifests
   adding file changes
+  updating bookmark Y
   added 1 changesets with 1 changes to 1 files
-  updating bookmark Y
   new changesets b0a5eff05604 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg book
@@ -629,8 +629,8 @@
   adding changesets
   adding manifests
   adding file changes
+  updating bookmark Y
   added 1 changesets with 1 changes to 1 files
-  updating bookmark Y
   new changesets 35d1ef0a8d1b (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg book
@@ -672,8 +672,8 @@
   adding changesets
   adding manifests
   adding file changes
+  updating bookmark Y
   added 1 changesets with 1 changes to 1 files
-  updating bookmark Y
   new changesets 0d60821d2197 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg book
@@ -742,6 +742,7 @@
 Unrelated marker does not alter the decision
 
   $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
+  1 new obsolescence markers
   $ hg push http://localhost:$HGPORT2/
   pushing to http://localhost:$HGPORT2/
   searching for changes
@@ -763,8 +764,10 @@
   $ hg id --debug -r 5
   c922c0139ca03858f655e4a2af4dd02796a63969 tip Y
   $ hg debugobsolete f6fc62dde3c0771e29704af56ba4d8af77abcc2f cccccccccccccccccccccccccccccccccccccccc
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete cccccccccccccccccccccccccccccccccccccccc 4efff6d98829d9c824c621afd6e3f01865f5439f
+  1 new obsolescence markers
   $ hg push http://localhost:$HGPORT2/
   pushing to http://localhost:$HGPORT2/
   searching for changes
--- a/tests/test-bookmarks.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-bookmarks.t	Mon Oct 21 11:09:48 2019 -0400
@@ -762,9 +762,9 @@
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 2 changes to 2 files (+1 heads)
   updating bookmark Y
   updating bookmark Z
+  added 2 changesets with 2 changes to 2 files (+1 heads)
   new changesets 125c9a1d6df6:9ba5f110a0b3
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
@@ -788,9 +788,9 @@
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 2 changes to 2 files (+1 heads)
   updating bookmark Y
   updating bookmark Z
+  added 2 changesets with 2 changes to 2 files (+1 heads)
   new changesets 125c9a1d6df6:9ba5f110a0b3
   updating to active bookmark Y
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -813,9 +813,9 @@
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 2 changes to 2 files (+1 heads)
   updating bookmark Y
   updating bookmark Z
+  added 2 changesets with 2 changes to 2 files (+1 heads)
   new changesets 125c9a1d6df6:9ba5f110a0b3
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg -R ../cloned-bookmarks-manual-update-with-divergence update
@@ -996,11 +996,11 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   divergent bookmark Z stored as Z@default
   adding remote bookmark foo
   adding remote bookmark four
   adding remote bookmark should-end-on-two
+  added 1 changesets with 1 changes to 1 files
   new changesets 5fb12f0f2d51
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg -R ../cloned-bookmarks-update parents -T "{rev}:{node|short}\n"
@@ -1023,8 +1023,8 @@
   adding changesets
   adding manifests
   adding file changes
+  divergent bookmark Z stored as Z@default
   added 1 changesets with 1 changes to 1 files
-  divergent bookmark Z stored as Z@default
   new changesets 81dcce76aa0b
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   updating bookmark Y
--- a/tests/test-bundle2-exchange.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-bundle2-exchange.t	Mon Oct 21 11:09:48 2019 -0400
@@ -58,8 +58,8 @@
   adding changesets
   adding manifests
   adding file changes
+  pre-close-tip:02de42196ebe draft 
   added 8 changesets with 7 changes to 7 files (+3 heads)
-  pre-close-tip:02de42196ebe draft 
   new changesets cd010b8cd998:02de42196ebe (8 drafts)
   postclose-tip:02de42196ebe draft 
   txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=unbundle HG_TXNID=TXN:$ID$ HG_TXNNAME=unbundle
@@ -75,10 +75,12 @@
 
   $ hg -R main debugobsolete -d '0 0' 1111111111111111111111111111111111111111 `getmainid 9520eea781bc`
   pre-close-tip:02de42196ebe draft 
+  1 new obsolescence markers
   postclose-tip:02de42196ebe draft 
   txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
   $ hg -R main debugobsolete -d '0 0' 2222222222222222222222222222222222222222 `getmainid 24b6387c8c8c`
   pre-close-tip:02de42196ebe draft 
+  1 new obsolescence markers
   postclose-tip:02de42196ebe draft 
   txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
 
@@ -92,9 +94,9 @@
   adding changesets
   adding manifests
   adding file changes
+  pre-close-tip:9520eea781bc draft 
   added 2 changesets with 2 changes to 2 files
   1 new obsolescence markers
-  pre-close-tip:9520eea781bc draft 
   new changesets cd010b8cd998:9520eea781bc (1 drafts)
   postclose-tip:9520eea781bc draft 
   txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=9520eea781bcca16c1e15acc0ba14335a0e8e5ba HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
@@ -121,9 +123,9 @@
   adding changesets
   adding manifests
   adding file changes
+  pre-close-tip:24b6387c8c8c draft 
   added 1 changesets with 1 changes to 1 files (+1 heads)
   1 new obsolescence markers
-  pre-close-tip:24b6387c8c8c draft 
   new changesets 24b6387c8c8c (1 drafts)
   postclose-tip:24b6387c8c8c draft 
   txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_NODE_LAST=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
@@ -193,6 +195,7 @@
   txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
   $ hg -R main debugobsolete -d '0 0' 3333333333333333333333333333333333333333 `getmainid eea13746799a`
   pre-close-tip:02de42196ebe draft 
+  1 new obsolescence markers
   postclose-tip:02de42196ebe draft 
   txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
   $ hg -R main bookmark --rev 02de42196ebe book_02de
@@ -201,6 +204,7 @@
   txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
   $ hg -R main debugobsolete -d '0 0' 4444444444444444444444444444444444444444 `getmainid 02de42196ebe`
   pre-close-tip:02de42196ebe draft book_02de
+  1 new obsolescence markers
   postclose-tip:02de42196ebe draft book_02de
   txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
   $ hg -R main bookmark --rev 42ccdea3bb16 book_42cc
@@ -209,6 +213,7 @@
   txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
   $ hg -R main debugobsolete -d '0 0' 5555555555555555555555555555555555555555 `getmainid 42ccdea3bb16`
   pre-close-tip:02de42196ebe draft book_02de
+  1 new obsolescence markers
   postclose-tip:02de42196ebe draft book_02de
   txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
   $ hg -R main bookmark --rev 5fddd98957c8 book_5fdd
@@ -217,6 +222,7 @@
   txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
   $ hg -R main debugobsolete -d '0 0' 6666666666666666666666666666666666666666 `getmainid 5fddd98957c8`
   pre-close-tip:02de42196ebe draft book_02de
+  1 new obsolescence markers
   postclose-tip:02de42196ebe draft book_02de
   txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
   $ hg -R main bookmark --rev 32af7686d403 book_32af
@@ -225,6 +231,7 @@
   txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
   $ hg -R main debugobsolete -d '0 0' 7777777777777777777777777777777777777777 `getmainid 32af7686d403`
   pre-close-tip:02de42196ebe draft book_02de
+  1 new obsolescence markers
   postclose-tip:02de42196ebe draft book_02de
   txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
 
@@ -261,9 +268,9 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
+  remote: pre-close-tip:eea13746799a public book_eea1
   remote: added 1 changesets with 0 changes to 0 files (-1 heads)
   remote: 1 new obsolescence markers
-  remote: pre-close-tip:eea13746799a public book_eea1
   remote: pushkey: lock state after "bookmarks"
   remote: lock:  free
   remote: wlock: free
@@ -296,10 +303,10 @@
   adding changesets
   adding manifests
   adding file changes
+  updating bookmark book_02de
+  pre-close-tip:02de42196ebe draft book_02de
   added 1 changesets with 1 changes to 1 files (+1 heads)
   1 new obsolescence markers
-  updating bookmark book_02de
-  pre-close-tip:02de42196ebe draft book_02de
   new changesets 02de42196ebe (1 drafts)
   postclose-tip:02de42196ebe draft book_02de
   txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
@@ -322,10 +329,10 @@
   adding changesets
   adding manifests
   adding file changes
+  updating bookmark book_42cc
+  pre-close-tip:42ccdea3bb16 draft book_42cc
   added 1 changesets with 1 changes to 1 files (+1 heads)
   1 new obsolescence markers
-  updating bookmark book_42cc
-  pre-close-tip:42ccdea3bb16 draft book_42cc
   new changesets 42ccdea3bb16 (1 drafts)
   postclose-tip:42ccdea3bb16 draft book_42cc
   txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_NODE_LAST=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
@@ -347,9 +354,9 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
+  remote: pre-close-tip:5fddd98957c8 draft book_5fdd
   remote: added 1 changesets with 1 changes to 1 files
   remote: 1 new obsolescence markers
-  remote: pre-close-tip:5fddd98957c8 draft book_5fdd
   remote: pushkey: lock state after "bookmarks"
   remote: lock:  free
   remote: wlock: free
@@ -398,9 +405,9 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
+  remote: pre-close-tip:32af7686d403 public book_32af
   remote: added 1 changesets with 1 changes to 1 files
   remote: 1 new obsolescence markers
-  remote: pre-close-tip:32af7686d403 public book_32af
   remote: pushkey: lock state after "bookmarks"
   remote: lock:  free
   remote: wlock: free
@@ -624,7 +631,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: pre-close-tip:e7ec4e813ba6 draft 
   remote: You shall not pass!
   remote: transaction abort!
@@ -639,7 +645,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: pre-close-tip:e7ec4e813ba6 draft 
   remote: You shall not pass!
   remote: transaction abort!
@@ -655,7 +660,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: pre-close-tip:e7ec4e813ba6 draft 
   remote: You shall not pass!
   remote: transaction abort!
@@ -689,7 +693,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: Fail early!
   remote: transaction abort!
   remote: Cleaning up the mess...
@@ -702,7 +705,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: Fail early!
   remote: transaction abort!
   remote: Cleaning up the mess...
@@ -716,7 +718,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: Fail early!
   remote: transaction abort!
   remote: Cleaning up the mess...
@@ -740,7 +741,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   Fail early!
   transaction abort!
   Cleaning up the mess...
@@ -753,7 +753,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: Fail early!
   remote: transaction abort!
   remote: Cleaning up the mess...
@@ -767,7 +766,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: Fail early!
   remote: transaction abort!
   remote: Cleaning up the mess...
@@ -815,7 +813,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   do not push the key !
   pushkey-abort: prepushkey.failpush hook exited with status 1
   transaction abort!
@@ -829,7 +826,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: do not push the key !
   remote: pushkey-abort: prepushkey.failpush hook exited with status 1
   remote: transaction abort!
@@ -843,7 +839,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: do not push the key !
   remote: pushkey-abort: prepushkey.failpush hook exited with status 1
   remote: transaction abort!
@@ -885,7 +880,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   transaction abort!
   Cleaning up the mess...
   rollback completed
@@ -900,7 +894,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: transaction abort!
   remote: Cleaning up the mess...
   remote: rollback completed
@@ -915,7 +908,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: transaction abort!
   remote: Cleaning up the mess...
   remote: rollback completed
--- a/tests/test-bundle2-format.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-bundle2-format.t	Mon Oct 21 11:09:48 2019 -0400
@@ -697,7 +697,7 @@
   bundle2-input: payload chunk size: 0
   bundle2-input: part header size: 0
   bundle2-input: end of bundle2 stream
-  bundle2-input-bundle: 6 parts total
+  bundle2-input-bundle: 7 parts total
   0 unread bytes
   3 total verses sung
 
@@ -1010,6 +1010,7 @@
 
   $ hg bundle2 --rev '8+7+5+4' --reply ../rev-rr.hg2
   $ hg unbundle2 ../rev-reply.hg2 < ../rev-rr.hg2
+  added 0 changesets with 0 changes to 3 files
   0 unread bytes
   addchangegroup return: 1
 
@@ -1021,13 +1022,11 @@
   0030: 2d 74 6f 31 72 65 74 75 72 6e 31 00 00 00 00 00 |-to1return1.....|
   0040: 00 00 1b 06 6f 75 74 70 75 74 00 00 00 01 00 01 |....output......|
   0050: 0b 01 69 6e 2d 72 65 70 6c 79 2d 74 6f 31 00 00 |..in-reply-to1..|
-  0060: 00 64 61 64 64 69 6e 67 20 63 68 61 6e 67 65 73 |.dadding changes|
+  0060: 00 37 61 64 64 69 6e 67 20 63 68 61 6e 67 65 73 |.7adding changes|
   0070: 65 74 73 0a 61 64 64 69 6e 67 20 6d 61 6e 69 66 |ets.adding manif|
   0080: 65 73 74 73 0a 61 64 64 69 6e 67 20 66 69 6c 65 |ests.adding file|
-  0090: 20 63 68 61 6e 67 65 73 0a 61 64 64 65 64 20 30 | changes.added 0|
-  00a0: 20 63 68 61 6e 67 65 73 65 74 73 20 77 69 74 68 | changesets with|
-  00b0: 20 30 20 63 68 61 6e 67 65 73 20 74 6f 20 33 20 | 0 changes to 3 |
-  00c0: 66 69 6c 65 73 0a 00 00 00 00 00 00 00 00       |files.........|
+  0090: 20 63 68 61 6e 67 65 73 0a 00 00 00 00 00 00 00 | changes........|
+  00a0: 00                                              |.|
 
 Check handling of exception during generation.
 ----------------------------------------------
--- a/tests/test-bundle2-multiple-changegroups.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-bundle2-multiple-changegroups.t	Mon Oct 21 11:09:48 2019 -0400
@@ -80,7 +80,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup
   HG_HOOKTYPE=pretxnchangegroup
   HG_NODE=27547f69f25460a52fff66ad004e58da7ad3fb56
@@ -96,7 +95,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup
   HG_HOOKTYPE=pretxnchangegroup
   HG_NODE=f838bfaca5c7226600ebcfd84f3c3c13a28d3757
@@ -109,6 +107,7 @@
   file:/*/$TESTTMP/repo (glob)
   HG_URL=file:$TESTTMP/repo
   
+  added 2 changesets with 2 changes to 2 files
   new changesets 27547f69f254:f838bfaca5c7
   changegroup hook: HG_HOOKNAME=changegroup
   HG_HOOKTYPE=changegroup
@@ -208,7 +207,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 2 changes to 2 files (+1 heads)
   pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup
   HG_HOOKTYPE=pretxnchangegroup
   HG_NODE=b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e
@@ -224,7 +222,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 3 changesets with 3 changes to 3 files (+1 heads)
   pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup
   HG_HOOKTYPE=pretxnchangegroup
   HG_NODE=7f219660301fe4c8a116f714df5e769695cc2b46
@@ -237,6 +234,7 @@
   file:/*/$TESTTMP/repo (glob)
   HG_URL=file:$TESTTMP/repo
   
+  added 5 changesets with 5 changes to 5 files (+2 heads)
   new changesets b3325c91a4d9:5cd59d311f65
   changegroup hook: HG_HOOKNAME=changegroup
   HG_HOOKTYPE=changegroup
@@ -365,7 +363,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 0 changes to 0 files (-1 heads)
   pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup
   HG_HOOKTYPE=pretxnchangegroup
   HG_NODE=71bd7b46de72e69a32455bf88d04757d542e6cf4
@@ -381,7 +378,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup
   HG_HOOKTYPE=pretxnchangegroup
   HG_NODE=9d18e5bd9ab09337802595d49f1dad0c98df4d84
@@ -394,6 +390,7 @@
   file:/*/$TESTTMP/repo (glob)
   HG_URL=file:$TESTTMP/repo
   
+  added 2 changesets with 1 changes to 1 files (-1 heads)
   new changesets 71bd7b46de72:9d18e5bd9ab0
   changegroup hook: HG_HOOKNAME=changegroup
   HG_HOOKTYPE=changegroup
--- a/tests/test-bundle2-remote-changegroup.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-bundle2-remote-changegroup.t	Mon Oct 21 11:09:48 2019 -0400
@@ -202,12 +202,11 @@
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 2 changes to 2 files (+1 heads)
   remote: changegroup
   adding changesets
   adding manifests
   adding file changes
-  added 3 changesets with 2 changes to 2 files (+1 heads)
+  added 5 changesets with 4 changes to 4 files (+2 heads)
   new changesets 32af7686d403:02de42196ebe
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg -R clone log -G
@@ -252,12 +251,11 @@
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 2 changes to 2 files (+1 heads)
   remote: remote-changegroup
   adding changesets
   adding manifests
   adding file changes
-  added 3 changesets with 2 changes to 2 files (+1 heads)
+  added 5 changesets with 4 changes to 4 files (+2 heads)
   new changesets 32af7686d403:02de42196ebe
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg -R clone log -G
@@ -305,17 +303,15 @@
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 2 changes to 2 files (+1 heads)
   remote: remote-changegroup
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 1 changes to 1 files
   remote: changegroup
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files (+1 heads)
+  added 5 changesets with 4 changes to 4 files (+2 heads)
   new changesets 32af7686d403:02de42196ebe
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg -R clone log -G
@@ -383,7 +379,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 8 changesets with 7 changes to 7 files (+2 heads)
   transaction abort!
   rollback completed
   abort: bundle at http://localhost:$HGPORT/bundle6.hg is corrupted:
@@ -418,7 +413,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 8 changesets with 7 changes to 7 files (+2 heads)
   transaction abort!
   rollback completed
   abort: bundle at http://localhost:$HGPORT/bundle6.hg is corrupted:
@@ -434,7 +428,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 8 changesets with 7 changes to 7 files (+2 heads)
   transaction abort!
   rollback completed
   abort: bundle at http://localhost:$HGPORT/bundle6.hg is corrupted:
@@ -464,12 +457,10 @@
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 2 changes to 2 files (+1 heads)
   remote: remote-changegroup
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 1 changes to 1 files
   transaction abort!
   rollback completed
   abort: bundle at http://localhost:$HGPORT/bundle5.hg is corrupted:
@@ -534,7 +525,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 2 changes to 2 files (+1 heads)
   transaction abort!
   rollback completed
   abort: bundle at http://localhost:$HGPORT/bundle4.hg is corrupted:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-byteify-strings.t	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,266 @@
+#require py3
+
+  $ byteify_strings () {
+  >   $PYTHON "$TESTDIR/../contrib/byteify-strings.py" "$@"
+  > }
+
+Test version
+
+  $ byteify_strings --version
+  Byteify strings * (glob)
+
+Test in-place
+
+  $ cat > testfile.py <<EOF
+  > obj['test'] = b"1234"
+  > mydict.iteritems()
+  > EOF
+  $ byteify_strings testfile.py -i
+  $ cat testfile.py
+  obj[b'test'] = b"1234"
+  mydict.iteritems()
+
+Test with dictiter
+
+  $ cat > testfile.py <<EOF
+  > obj['test'] = b"1234"
+  > mydict.iteritems()
+  > EOF
+  $ byteify_strings testfile.py --dictiter
+  obj[b'test'] = b"1234"
+  mydict.items()
+
+Test kwargs-like objects
+
+  $ cat > testfile.py <<EOF
+  > kwargs['test'] = "123"
+  > kwargs[test['testing']]
+  > kwargs[test[[['testing']]]]
+  > kwargs[kwargs['testing']]
+  > kwargs.get('test')
+  > kwargs.pop('test')
+  > kwargs.get('test', 'testing')
+  > kwargs.pop('test', 'testing')
+  > kwargs.setdefault('test', 'testing')
+  > 
+  > opts['test'] = "123"
+  > opts[test['testing']]
+  > opts[test[[['testing']]]]
+  > opts[opts['testing']]
+  > opts.get('test')
+  > opts.pop('test')
+  > opts.get('test', 'testing')
+  > opts.pop('test', 'testing')
+  > opts.setdefault('test', 'testing')
+  > 
+  > commitopts['test'] = "123"
+  > commitopts[test['testing']]
+  > commitopts[test[[['testing']]]]
+  > commitopts[commitopts['testing']]
+  > commitopts.get('test')
+  > commitopts.pop('test')
+  > commitopts.get('test', 'testing')
+  > commitopts.pop('test', 'testing')
+  > commitopts.setdefault('test', 'testing')
+  > EOF
+  $ byteify_strings testfile.py --treat-as-kwargs kwargs opts commitopts
+  kwargs['test'] = b"123"
+  kwargs[test[b'testing']]
+  kwargs[test[[[b'testing']]]]
+  kwargs[kwargs['testing']]
+  kwargs.get('test')
+  kwargs.pop('test')
+  kwargs.get('test', b'testing')
+  kwargs.pop('test', b'testing')
+  kwargs.setdefault('test', b'testing')
+  
+  opts['test'] = b"123"
+  opts[test[b'testing']]
+  opts[test[[[b'testing']]]]
+  opts[opts['testing']]
+  opts.get('test')
+  opts.pop('test')
+  opts.get('test', b'testing')
+  opts.pop('test', b'testing')
+  opts.setdefault('test', b'testing')
+  
+  commitopts['test'] = b"123"
+  commitopts[test[b'testing']]
+  commitopts[test[[[b'testing']]]]
+  commitopts[commitopts['testing']]
+  commitopts.get('test')
+  commitopts.pop('test')
+  commitopts.get('test', b'testing')
+  commitopts.pop('test', b'testing')
+  commitopts.setdefault('test', b'testing')
+
+Test attr*() as methods
+
+  $ cat > testfile.py <<EOF
+  > setattr(o, 'a', 1)
+  > util.setattr(o, 'ae', 1)
+  > util.getattr(o, 'alksjdf', 'default')
+  > util.addattr(o, 'asdf')
+  > util.hasattr(o, 'lksjdf', 'default')
+  > util.safehasattr(o, 'lksjdf', 'default')
+  > @eh.wrapfunction(func, 'lksjdf')
+  > def f():
+  >     pass
+  > @eh.wrapclass(klass, 'lksjdf')
+  > def f():
+  >     pass
+  > EOF
+  $ byteify_strings testfile.py --allow-attr-methods
+  setattr(o, 'a', 1)
+  util.setattr(o, 'ae', 1)
+  util.getattr(o, 'alksjdf', b'default')
+  util.addattr(o, 'asdf')
+  util.hasattr(o, 'lksjdf', b'default')
+  util.safehasattr(o, 'lksjdf', b'default')
+  @eh.wrapfunction(func, 'lksjdf')
+  def f():
+      pass
+  @eh.wrapclass(klass, 'lksjdf')
+  def f():
+      pass
+
+Test without attr*() as methods
+
+  $ cat > testfile.py <<EOF
+  > setattr(o, 'a', 1)
+  > util.setattr(o, 'ae', 1)
+  > util.getattr(o, 'alksjdf', 'default')
+  > util.addattr(o, 'asdf')
+  > util.hasattr(o, 'lksjdf', 'default')
+  > util.safehasattr(o, 'lksjdf', 'default')
+  > @eh.wrapfunction(func, 'lksjdf')
+  > def f():
+  >     pass
+  > @eh.wrapclass(klass, 'lksjdf')
+  > def f():
+  >     pass
+  > EOF
+  $ byteify_strings testfile.py
+  setattr(o, 'a', 1)
+  util.setattr(o, b'ae', 1)
+  util.getattr(o, b'alksjdf', b'default')
+  util.addattr(o, b'asdf')
+  util.hasattr(o, b'lksjdf', b'default')
+  util.safehasattr(o, b'lksjdf', b'default')
+  @eh.wrapfunction(func, b'lksjdf')
+  def f():
+      pass
+  @eh.wrapclass(klass, b'lksjdf')
+  def f():
+      pass
+
+Test ignore comments
+
+  $ cat > testfile.py <<EOF
+  > # py3-transform: off
+  > "none"
+  > "of"
+  > 'these'
+  > s = """should"""
+  > d = '''be'''
+  > # py3-transform: on
+  > "this should"
+  > 'and this also'
+  > 
+  > # no-py3-transform
+  > l = "this should be ignored"
+  > l2 = "this shouldn't"
+  > 
+  > EOF
+  $ byteify_strings testfile.py
+  # py3-transform: off
+  "none"
+  "of"
+  'these'
+  s = """should"""
+  d = '''be'''
+  # py3-transform: on
+  b"this should"
+  b'and this also'
+  
+  # no-py3-transform
+  l = "this should be ignored"
+  l2 = b"this shouldn't"
+  
+Test triple-quoted strings
+
+  $ cat > testfile.py <<EOF
+  > """This is ignored
+  > """
+  > 
+  > line = """
+  >   This should not be
+  > """
+  > line = '''
+  > Neither should this
+  > '''
+  > EOF
+  $ byteify_strings testfile.py
+  """This is ignored
+  """
+  
+  line = b"""
+    This should not be
+  """
+  line = b'''
+  Neither should this
+  '''
+
+Test prefixed strings
+
+  $ cat > testfile.py <<EOF
+  > obj['test'] = b"1234"
+  > obj[r'test'] = u"1234"
+  > EOF
+  $ byteify_strings testfile.py
+  obj[b'test'] = b"1234"
+  obj[r'test'] = u"1234"
+
+Test multi-line alignment
+
+  $ cat > testfile.py <<'EOF'
+  > def foo():
+  >     error.Abort(_("foo"
+  >                  "bar"
+  >                  "%s")
+  >                % parameter)
+  > {
+  >     'test': dict,
+  >     'test2': dict,
+  > }
+  > [
+  >    "thing",
+  >    "thing2"
+  > ]
+  > (
+  >    "tuple",
+  >    "tuple2",
+  > )
+  > {"thing",
+  >  }
+  > EOF
+  $ byteify_strings testfile.py
+  def foo():
+      error.Abort(_(b"foo"
+                    b"bar"
+                    b"%s")
+                  % parameter)
+  {
+      b'test': dict,
+      b'test2': dict,
+  }
+  [
+     b"thing",
+     b"thing2"
+  ]
+  (
+     b"tuple",
+     b"tuple2",
+  )
+  {b"thing",
+   }
--- a/tests/test-cache-abuse.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-cache-abuse.t	Mon Oct 21 11:09:48 2019 -0400
@@ -24,6 +24,7 @@
   $ echo dumb > dumb
   $ hg ci -qAmdumb
   $ hg debugobsolete b1174d11b69e63cb0c5726621a43c859f0858d7f
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg phase -pr t1
--- a/tests/test-cappedreader.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-cappedreader.py	Mon Oct 21 11:09:48 2019 -0400
@@ -3,9 +3,8 @@
 import io
 import unittest
 
-from mercurial import (
-    util,
-)
+from mercurial import util
+
 
 class CappedReaderTests(unittest.TestCase):
     def testreadfull(self):
@@ -86,6 +85,8 @@
         self.assertEqual(res, b'')
         self.assertEqual(source.tell(), 100)
 
+
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-cbor.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-cbor.py	Mon Oct 21 11:09:48 2019 -0400
@@ -5,44 +5,46 @@
 import unittest
 
 # TODO migrate to canned cbor test strings and stop using thirdparty.cbor
-tpp = os.path.normpath(os.path.join(os.path.dirname(__file__),
-                                    '..', 'mercurial', 'thirdparty'))
+tpp = os.path.normpath(
+    os.path.join(os.path.dirname(__file__), '..', 'mercurial', 'thirdparty')
+)
 if not os.path.exists(tpp):
     # skip, not in a repo
     sys.exit(80)
 sys.path[0:0] = [tpp]
 import cbor
+
 del sys.path[0]
 
-from mercurial.utils import (
-    cborutil,
-)
+from mercurial.utils import cborutil
+
 
 class TestCase(unittest.TestCase):
     if not getattr(unittest.TestCase, 'assertRaisesRegex', False):
         # Python 3.7 deprecates the regex*p* version, but 2.7 lacks
         # the regex version.
-        assertRaisesRegex = (# camelcase-required
-            unittest.TestCase.assertRaisesRegexp)
+        assertRaisesRegex = (  # camelcase-required
+            unittest.TestCase.assertRaisesRegexp
+        )
+
 
 def loadit(it):
     return cbor.loads(b''.join(it))
 
+
 class BytestringTests(TestCase):
     def testsimple(self):
         self.assertEqual(
-            list(cborutil.streamencode(b'foobar')),
-            [b'\x46', b'foobar'])
+            list(cborutil.streamencode(b'foobar')), [b'\x46', b'foobar']
+        )
+
+        self.assertEqual(loadit(cborutil.streamencode(b'foobar')), b'foobar')
+
+        self.assertEqual(cborutil.decodeall(b'\x46foobar'), [b'foobar'])
 
         self.assertEqual(
-            loadit(cborutil.streamencode(b'foobar')),
-            b'foobar')
-
-        self.assertEqual(cborutil.decodeall(b'\x46foobar'),
-                         [b'foobar'])
-
-        self.assertEqual(cborutil.decodeall(b'\x46foobar\x45fizbi'),
-                         [b'foobar', b'fizbi'])
+            cborutil.decodeall(b'\x46foobar\x45fizbi'), [b'foobar', b'fizbi']
+        )
 
     def testlong(self):
         source = b'x' * 1048576
@@ -65,19 +67,26 @@
                 b'\x43',
                 b'\xee\xff\x99',
                 b'\xff',
-            ])
+            ],
+        )
 
         self.assertEqual(
             loadit(cborutil.streamencodebytestringfromiter(source)),
-            b''.join(source))
+            b''.join(source),
+        )
 
-        self.assertEqual(cborutil.decodeall(b'\x5f\x44\xaa\xbb\xcc\xdd'
-                                            b'\x43\xee\xff\x99\xff'),
-                         [b'\xaa\xbb\xcc\xdd', b'\xee\xff\x99', b''])
+        self.assertEqual(
+            cborutil.decodeall(
+                b'\x5f\x44\xaa\xbb\xcc\xdd' b'\x43\xee\xff\x99\xff'
+            ),
+            [b'\xaa\xbb\xcc\xdd', b'\xee\xff\x99', b''],
+        )
 
         for i, chunk in enumerate(
-            cborutil.decodeall(b'\x5f\x44\xaa\xbb\xcc\xdd'
-                               b'\x43\xee\xff\x99\xff')):
+            cborutil.decodeall(
+                b'\x5f\x44\xaa\xbb\xcc\xdd' b'\x43\xee\xff\x99\xff'
+            )
+        ):
             self.assertIsInstance(chunk, cborutil.bytestringchunk)
 
             if i == 0:
@@ -95,7 +104,8 @@
 
         self.assertEqual(
             loadit(cborutil.streamencodebytestringfromiter(source)),
-            b''.join(source))
+            b''.join(source),
+        )
 
     def testindefinite(self):
         source = b'\x00\x01\x02\x03' + b'\xff' * 16384
@@ -110,8 +120,9 @@
         self.assertEqual(next(it), b'\x42')
         self.assertEqual(next(it), b'\xff\xff')
 
-        dest = b''.join(cborutil.streamencodeindefinitebytestring(
-            source, chunksize=42))
+        dest = b''.join(
+            cborutil.streamencodeindefinitebytestring(source, chunksize=42)
+        )
         self.assertEqual(cbor.loads(dest), source)
 
         self.assertEqual(b''.join(cborutil.decodeall(dest)), source)
@@ -140,27 +151,42 @@
             elif len(source) < 1048576:
                 hlen = 5
 
-            self.assertEqual(cborutil.decodeitem(encoded),
-                             (True, source, hlen + len(source),
-                              cborutil.SPECIAL_NONE))
+            self.assertEqual(
+                cborutil.decodeitem(encoded),
+                (True, source, hlen + len(source), cborutil.SPECIAL_NONE),
+            )
 
     def testpartialdecode(self):
         encoded = b''.join(cborutil.streamencode(b'foobar'))
 
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -6, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (False, None, -5, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
-                         (False, None, -4, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
-                         (False, None, -3, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
-                         (False, None, -2, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:7]),
-                         (True, b'foobar', 7, cborutil.SPECIAL_NONE))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -6, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (False, None, -5, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:3]),
+            (False, None, -4, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:4]),
+            (False, None, -3, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:5]),
+            (False, None, -2, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:6]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:7]),
+            (True, b'foobar', 7, cborutil.SPECIAL_NONE),
+        )
 
     def testpartialdecodevariouslengths(self):
         lens = [
@@ -192,11 +218,11 @@
         for size in lens:
             if size < 24:
                 hlen = 1
-            elif size < 2**8:
+            elif size < 2 ** 8:
                 hlen = 2
-            elif size < 2**16:
+            elif size < 2 ** 16:
                 hlen = 3
-            elif size < 2**32:
+            elif size < 2 ** 32:
                 hlen = 5
             else:
                 assert False
@@ -207,107 +233,158 @@
             res = cborutil.decodeitem(encoded[0:1])
 
             if hlen > 1:
-                self.assertEqual(res, (False, None, -(hlen - 1),
-                                       cborutil.SPECIAL_NONE))
+                self.assertEqual(
+                    res, (False, None, -(hlen - 1), cborutil.SPECIAL_NONE)
+                )
             else:
-                self.assertEqual(res, (False, None, -(size + hlen - 1),
-                                       cborutil.SPECIAL_NONE))
+                self.assertEqual(
+                    res,
+                    (False, None, -(size + hlen - 1), cborutil.SPECIAL_NONE),
+                )
 
             # Decoding partial header reports remaining header size.
             for i in range(hlen - 1):
-                self.assertEqual(cborutil.decodeitem(encoded[0:i + 1]),
-                                 (False, None, -(hlen - i - 1),
-                                  cborutil.SPECIAL_NONE))
+                self.assertEqual(
+                    cborutil.decodeitem(encoded[0 : i + 1]),
+                    (False, None, -(hlen - i - 1), cborutil.SPECIAL_NONE),
+                )
 
             # Decoding complete header reports item size.
-            self.assertEqual(cborutil.decodeitem(encoded[0:hlen]),
-                             (False, None, -size, cborutil.SPECIAL_NONE))
+            self.assertEqual(
+                cborutil.decodeitem(encoded[0:hlen]),
+                (False, None, -size, cborutil.SPECIAL_NONE),
+            )
 
             # Decoding single byte after header reports item size - 1
-            self.assertEqual(cborutil.decodeitem(encoded[0:hlen + 1]),
-                             (False, None, -(size - 1), cborutil.SPECIAL_NONE))
+            self.assertEqual(
+                cborutil.decodeitem(encoded[0 : hlen + 1]),
+                (False, None, -(size - 1), cborutil.SPECIAL_NONE),
+            )
 
             # Decoding all but the last byte reports -1 needed.
-            self.assertEqual(cborutil.decodeitem(encoded[0:hlen + size - 1]),
-                             (False, None, -1, cborutil.SPECIAL_NONE))
+            self.assertEqual(
+                cborutil.decodeitem(encoded[0 : hlen + size - 1]),
+                (False, None, -1, cborutil.SPECIAL_NONE),
+            )
 
             # Decoding last byte retrieves value.
-            self.assertEqual(cborutil.decodeitem(encoded[0:hlen + size]),
-                             (True, source, hlen + size, cborutil.SPECIAL_NONE))
+            self.assertEqual(
+                cborutil.decodeitem(encoded[0 : hlen + size]),
+                (True, source, hlen + size, cborutil.SPECIAL_NONE),
+            )
 
     def testindefinitepartialdecode(self):
-        encoded = b''.join(cborutil.streamencodebytestringfromiter(
-            [b'foobar', b'biz']))
+        encoded = b''.join(
+            cborutil.streamencodebytestringfromiter([b'foobar', b'biz'])
+        )
 
         # First item should be begin of bytestring special.
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (True, None, 1,
-                          cborutil.SPECIAL_START_INDEFINITE_BYTESTRING))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (True, None, 1, cborutil.SPECIAL_START_INDEFINITE_BYTESTRING),
+        )
 
         # Second item should be the first chunk. But only available when
         # we give it 7 bytes (1 byte header + 6 byte chunk).
-        self.assertEqual(cborutil.decodeitem(encoded[1:2]),
-                         (False, None, -6, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[1:3]),
-                         (False, None, -5, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[1:4]),
-                         (False, None, -4, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[1:5]),
-                         (False, None, -3, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[1:6]),
-                         (False, None, -2, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[1:7]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[1:2]),
+            (False, None, -6, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[1:3]),
+            (False, None, -5, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[1:4]),
+            (False, None, -4, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[1:5]),
+            (False, None, -3, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[1:6]),
+            (False, None, -2, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[1:7]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
 
-        self.assertEqual(cborutil.decodeitem(encoded[1:8]),
-                         (True, b'foobar', 7, cborutil.SPECIAL_NONE))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[1:8]),
+            (True, b'foobar', 7, cborutil.SPECIAL_NONE),
+        )
 
         # Third item should be second chunk. But only available when
         # we give it 4 bytes (1 byte header + 3 byte chunk).
-        self.assertEqual(cborutil.decodeitem(encoded[8:9]),
-                         (False, None, -3, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[8:10]),
-                         (False, None, -2, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[8:11]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[8:9]),
+            (False, None, -3, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[8:10]),
+            (False, None, -2, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[8:11]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
 
-        self.assertEqual(cborutil.decodeitem(encoded[8:12]),
-                         (True, b'biz', 4, cborutil.SPECIAL_NONE))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[8:12]),
+            (True, b'biz', 4, cborutil.SPECIAL_NONE),
+        )
 
         # Fourth item should be end of indefinite stream marker.
-        self.assertEqual(cborutil.decodeitem(encoded[12:13]),
-                         (True, None, 1, cborutil.SPECIAL_INDEFINITE_BREAK))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[12:13]),
+            (True, None, 1, cborutil.SPECIAL_INDEFINITE_BREAK),
+        )
 
         # Now test the behavior when going through the decoder.
 
-        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:1]),
-                         (False, 1, 0))
-        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:2]),
-                         (False, 1, 6))
-        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:3]),
-                         (False, 1, 5))
-        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:4]),
-                         (False, 1, 4))
-        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:5]),
-                         (False, 1, 3))
-        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:6]),
-                         (False, 1, 2))
-        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:7]),
-                         (False, 1, 1))
-        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:8]),
-                         (True, 8, 0))
+        self.assertEqual(
+            cborutil.sansiodecoder().decode(encoded[0:1]), (False, 1, 0)
+        )
+        self.assertEqual(
+            cborutil.sansiodecoder().decode(encoded[0:2]), (False, 1, 6)
+        )
+        self.assertEqual(
+            cborutil.sansiodecoder().decode(encoded[0:3]), (False, 1, 5)
+        )
+        self.assertEqual(
+            cborutil.sansiodecoder().decode(encoded[0:4]), (False, 1, 4)
+        )
+        self.assertEqual(
+            cborutil.sansiodecoder().decode(encoded[0:5]), (False, 1, 3)
+        )
+        self.assertEqual(
+            cborutil.sansiodecoder().decode(encoded[0:6]), (False, 1, 2)
+        )
+        self.assertEqual(
+            cborutil.sansiodecoder().decode(encoded[0:7]), (False, 1, 1)
+        )
+        self.assertEqual(
+            cborutil.sansiodecoder().decode(encoded[0:8]), (True, 8, 0)
+        )
 
-        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:9]),
-                         (True, 8, 3))
-        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:10]),
-                         (True, 8, 2))
-        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:11]),
-                         (True, 8, 1))
-        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:12]),
-                         (True, 12, 0))
+        self.assertEqual(
+            cborutil.sansiodecoder().decode(encoded[0:9]), (True, 8, 3)
+        )
+        self.assertEqual(
+            cborutil.sansiodecoder().decode(encoded[0:10]), (True, 8, 2)
+        )
+        self.assertEqual(
+            cborutil.sansiodecoder().decode(encoded[0:11]), (True, 8, 1)
+        )
+        self.assertEqual(
+            cborutil.sansiodecoder().decode(encoded[0:12]), (True, 12, 0)
+        )
 
-        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:13]),
-                         (True, 13, 0))
+        self.assertEqual(
+            cborutil.sansiodecoder().decode(encoded[0:13]), (True, 13, 0)
+        )
 
         decoder = cborutil.sansiodecoder()
         decoder.decode(encoded[0:8])
@@ -316,27 +393,28 @@
         self.assertTrue(values[0].isfirst)
         self.assertFalse(values[0].islast)
 
-        self.assertEqual(decoder.decode(encoded[8:12]),
-                         (True, 4, 0))
+        self.assertEqual(decoder.decode(encoded[8:12]), (True, 4, 0))
         values = decoder.getavailable()
         self.assertEqual(values, [b'biz'])
         self.assertFalse(values[0].isfirst)
         self.assertFalse(values[0].islast)
 
-        self.assertEqual(decoder.decode(encoded[12:]),
-                         (True, 1, 0))
+        self.assertEqual(decoder.decode(encoded[12:]), (True, 1, 0))
         values = decoder.getavailable()
         self.assertEqual(values, [b''])
         self.assertFalse(values[0].isfirst)
         self.assertTrue(values[0].islast)
 
+
 class StringTests(TestCase):
     def testdecodeforbidden(self):
         encoded = b'\x63foo'
-        with self.assertRaisesRegex(cborutil.CBORDecodeError,
-                                    'string major type not supported'):
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError, 'string major type not supported'
+        ):
             cborutil.decodeall(encoded)
 
+
 class IntTests(TestCase):
     def testsmall(self):
         self.assertEqual(list(cborutil.streamencode(0)), [b'\x00'])
@@ -355,8 +433,9 @@
         self.assertEqual(cborutil.decodeall(b'\x04'), [4])
 
         # Multiple value decode works.
-        self.assertEqual(cborutil.decodeall(b'\x00\x01\x02\x03\x04'),
-                         [0, 1, 2, 3, 4])
+        self.assertEqual(
+            cborutil.decodeall(b'\x00\x01\x02\x03\x04'), [0, 1, 2, 3, 4]
+        )
 
     def testnegativesmall(self):
         self.assertEqual(list(cborutil.streamencode(-1)), [b'\x20'])
@@ -375,8 +454,9 @@
         self.assertEqual(cborutil.decodeall(b'\x24'), [-5])
 
         # Multiple value decode works.
-        self.assertEqual(cborutil.decodeall(b'\x20\x21\x22\x23\x24'),
-                         [-1, -2, -3, -4, -5])
+        self.assertEqual(
+            cborutil.decodeall(b'\x20\x21\x22\x23\x24'), [-1, -2, -3, -4, -5]
+        )
 
     def testrange(self):
         for i in range(-70000, 70000, 10):
@@ -388,117 +468,196 @@
     def testdecodepartialubyte(self):
         encoded = b''.join(cborutil.streamencode(250))
 
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (True, 250, 2, cborutil.SPECIAL_NONE))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (True, 250, 2, cborutil.SPECIAL_NONE),
+        )
 
     def testdecodepartialbyte(self):
         encoded = b''.join(cborutil.streamencode(-42))
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (True, -42, 2, cborutil.SPECIAL_NONE))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (True, -42, 2, cborutil.SPECIAL_NONE),
+        )
 
     def testdecodepartialushort(self):
-        encoded = b''.join(cborutil.streamencode(2**15))
+        encoded = b''.join(cborutil.streamencode(2 ** 15))
 
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -2, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
-                         (True, 2**15, 3, cborutil.SPECIAL_NONE))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -2, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:5]),
+            (True, 2 ** 15, 3, cborutil.SPECIAL_NONE),
+        )
 
     def testdecodepartialshort(self):
         encoded = b''.join(cborutil.streamencode(-1024))
 
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -2, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
-                         (True, -1024, 3, cborutil.SPECIAL_NONE))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -2, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:3]),
+            (True, -1024, 3, cborutil.SPECIAL_NONE),
+        )
 
     def testdecodepartialulong(self):
-        encoded = b''.join(cborutil.streamencode(2**28))
+        encoded = b''.join(cborutil.streamencode(2 ** 28))
 
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -4, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (False, None, -3, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
-                         (False, None, -2, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
-                         (True, 2**28, 5, cborutil.SPECIAL_NONE))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -4, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (False, None, -3, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:3]),
+            (False, None, -2, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:4]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:5]),
+            (True, 2 ** 28, 5, cborutil.SPECIAL_NONE),
+        )
 
     def testdecodepartiallong(self):
         encoded = b''.join(cborutil.streamencode(-1048580))
 
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -4, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (False, None, -3, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
-                         (False, None, -2, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
-                         (True, -1048580, 5, cborutil.SPECIAL_NONE))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -4, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (False, None, -3, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:3]),
+            (False, None, -2, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:4]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:5]),
+            (True, -1048580, 5, cborutil.SPECIAL_NONE),
+        )
 
     def testdecodepartialulonglong(self):
-        encoded = b''.join(cborutil.streamencode(2**32))
+        encoded = b''.join(cborutil.streamencode(2 ** 32))
 
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -8, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (False, None, -7, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
-                         (False, None, -6, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
-                         (False, None, -5, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
-                         (False, None, -4, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
-                         (False, None, -3, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:7]),
-                         (False, None, -2, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:8]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:9]),
-                         (True, 2**32, 9, cborutil.SPECIAL_NONE))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -8, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (False, None, -7, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:3]),
+            (False, None, -6, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:4]),
+            (False, None, -5, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:5]),
+            (False, None, -4, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:6]),
+            (False, None, -3, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:7]),
+            (False, None, -2, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:8]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:9]),
+            (True, 2 ** 32, 9, cborutil.SPECIAL_NONE),
+        )
 
         with self.assertRaisesRegex(
-            cborutil.CBORDecodeError, 'input data not fully consumed'):
+            cborutil.CBORDecodeError, 'input data not fully consumed'
+        ):
             cborutil.decodeall(encoded[0:1])
 
         with self.assertRaisesRegex(
-            cborutil.CBORDecodeError, 'input data not fully consumed'):
+            cborutil.CBORDecodeError, 'input data not fully consumed'
+        ):
             cborutil.decodeall(encoded[0:2])
 
     def testdecodepartiallonglong(self):
         encoded = b''.join(cborutil.streamencode(-7000000000))
 
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -8, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (False, None, -7, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
-                         (False, None, -6, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
-                         (False, None, -5, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
-                         (False, None, -4, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
-                         (False, None, -3, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:7]),
-                         (False, None, -2, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:8]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:9]),
-                         (True, -7000000000, 9, cborutil.SPECIAL_NONE))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -8, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (False, None, -7, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:3]),
+            (False, None, -6, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:4]),
+            (False, None, -5, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:5]),
+            (False, None, -4, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:6]),
+            (False, None, -3, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:7]),
+            (False, None, -2, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:8]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:9]),
+            (True, -7000000000, 9, cborutil.SPECIAL_NONE),
+        )
+
 
 class ArrayTests(TestCase):
     def testempty(self):
@@ -510,35 +669,36 @@
     def testbasic(self):
         source = [b'foo', b'bar', 1, -10]
 
-        chunks = [
-            b'\x84', b'\x43', b'foo', b'\x43', b'bar', b'\x01', b'\x29']
+        chunks = [b'\x84', b'\x43', b'foo', b'\x43', b'bar', b'\x01', b'\x29']
 
         self.assertEqual(list(cborutil.streamencode(source)), chunks)
 
         self.assertEqual(cborutil.decodeall(b''.join(chunks)), [source])
 
     def testemptyfromiter(self):
-        self.assertEqual(b''.join(cborutil.streamencodearrayfromiter([])),
-                         b'\x9f\xff')
+        self.assertEqual(
+            b''.join(cborutil.streamencodearrayfromiter([])), b'\x9f\xff'
+        )
 
-        with self.assertRaisesRegex(cborutil.CBORDecodeError,
-                                    'indefinite length uint not allowed'):
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError, 'indefinite length uint not allowed'
+        ):
             cborutil.decodeall(b'\x9f\xff')
 
     def testfromiter1(self):
         source = [b'foo']
 
-        self.assertEqual(list(cborutil.streamencodearrayfromiter(source)), [
-            b'\x9f',
-            b'\x43', b'foo',
-            b'\xff',
-        ])
+        self.assertEqual(
+            list(cborutil.streamencodearrayfromiter(source)),
+            [b'\x9f', b'\x43', b'foo', b'\xff',],
+        )
 
         dest = b''.join(cborutil.streamencodearrayfromiter(source))
         self.assertEqual(cbor.loads(dest), source)
 
-        with self.assertRaisesRegex(cborutil.CBORDecodeError,
-                                    'indefinite length uint not allowed'):
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError, 'indefinite length uint not allowed'
+        ):
             cborutil.decodeall(dest)
 
     def testtuple(self):
@@ -552,37 +712,59 @@
     def testpartialdecode(self):
         source = list(range(4))
         encoded = b''.join(cborutil.streamencode(source))
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (True, 4, 1, cborutil.SPECIAL_START_ARRAY))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (True, 4, 1, cborutil.SPECIAL_START_ARRAY))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (True, 4, 1, cborutil.SPECIAL_START_ARRAY),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (True, 4, 1, cborutil.SPECIAL_START_ARRAY),
+        )
 
         source = list(range(23))
         encoded = b''.join(cborutil.streamencode(source))
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (True, 23, 1, cborutil.SPECIAL_START_ARRAY))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (True, 23, 1, cborutil.SPECIAL_START_ARRAY))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (True, 23, 1, cborutil.SPECIAL_START_ARRAY),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (True, 23, 1, cborutil.SPECIAL_START_ARRAY),
+        )
 
         source = list(range(24))
         encoded = b''.join(cborutil.streamencode(source))
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (True, 24, 2, cborutil.SPECIAL_START_ARRAY))
-        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
-                         (True, 24, 2, cborutil.SPECIAL_START_ARRAY))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (True, 24, 2, cborutil.SPECIAL_START_ARRAY),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:3]),
+            (True, 24, 2, cborutil.SPECIAL_START_ARRAY),
+        )
 
         source = list(range(256))
         encoded = b''.join(cborutil.streamencode(source))
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -2, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
-                         (True, 256, 3, cborutil.SPECIAL_START_ARRAY))
-        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
-                         (True, 256, 3, cborutil.SPECIAL_START_ARRAY))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -2, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:3]),
+            (True, 256, 3, cborutil.SPECIAL_START_ARRAY),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:4]),
+            (True, 256, 3, cborutil.SPECIAL_START_ARRAY),
+        )
 
     def testnested(self):
         source = [[], [], [[], [], []]]
@@ -607,17 +789,18 @@
         # Single value array whose value is an empty indefinite bytestring.
         encoded = b'\x81\x5f\x40\xff'
 
-        with self.assertRaisesRegex(cborutil.CBORDecodeError,
-                                    'indefinite length bytestrings not '
-                                    'allowed as array values'):
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError,
+            'indefinite length bytestrings not ' 'allowed as array values',
+        ):
             cborutil.decodeall(encoded)
 
+
 class SetTests(TestCase):
     def testempty(self):
-        self.assertEqual(list(cborutil.streamencode(set())), [
-            b'\xd9\x01\x02',
-            b'\x80',
-        ])
+        self.assertEqual(
+            list(cborutil.streamencode(set())), [b'\xd9\x01\x02', b'\x80',]
+        )
 
         self.assertEqual(cborutil.decodeall(b'\xd9\x01\x02\x80'), [set()])
 
@@ -633,99 +816,135 @@
         # Must use array to encode sets.
         encoded = b'\xd9\x01\x02\xa0'
 
-        with self.assertRaisesRegex(cborutil.CBORDecodeError,
-                                    'expected array after finite set '
-                                    'semantic tag'):
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError,
+            'expected array after finite set ' 'semantic tag',
+        ):
             cborutil.decodeall(encoded)
 
     def testpartialdecode(self):
         # Semantic tag item will be 3 bytes. Set header will be variable
         # depending on length.
         encoded = b''.join(cborutil.streamencode({i for i in range(23)}))
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -2, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
-                         (True, 23, 4, cborutil.SPECIAL_START_SET))
-        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
-                         (True, 23, 4, cborutil.SPECIAL_START_SET))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -2, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:3]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:4]),
+            (True, 23, 4, cborutil.SPECIAL_START_SET),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:5]),
+            (True, 23, 4, cborutil.SPECIAL_START_SET),
+        )
 
         encoded = b''.join(cborutil.streamencode({i for i in range(24)}))
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -2, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
-                         (True, 24, 5, cborutil.SPECIAL_START_SET))
-        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
-                         (True, 24, 5, cborutil.SPECIAL_START_SET))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -2, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:3]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:4]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:5]),
+            (True, 24, 5, cborutil.SPECIAL_START_SET),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:6]),
+            (True, 24, 5, cborutil.SPECIAL_START_SET),
+        )
 
         encoded = b''.join(cborutil.streamencode({i for i in range(256)}))
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -2, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
-                         (False, None, -2, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
-                         (True, 256, 6, cborutil.SPECIAL_START_SET))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -2, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:3]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:4]),
+            (False, None, -2, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:5]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:6]),
+            (True, 256, 6, cborutil.SPECIAL_START_SET),
+        )
 
     def testinvalidvalue(self):
-        encoded = b''.join([
-            b'\xd9\x01\x02', # semantic tag
-            b'\x81', # array of size 1
-            b'\x5f\x43foo\xff', # indefinite length bytestring "foo"
-        ])
+        encoded = b''.join(
+            [
+                b'\xd9\x01\x02',  # semantic tag
+                b'\x81',  # array of size 1
+                b'\x5f\x43foo\xff',  # indefinite length bytestring "foo"
+            ]
+        )
 
-        with self.assertRaisesRegex(cborutil.CBORDecodeError,
-                                    'indefinite length bytestrings not '
-                                    'allowed as set values'):
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError,
+            'indefinite length bytestrings not ' 'allowed as set values',
+        ):
+            cborutil.decodeall(encoded)
+
+        encoded = b''.join([b'\xd9\x01\x02', b'\x81', b'\x80',])  # empty array
+
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError, 'collections not allowed as set values'
+        ):
             cborutil.decodeall(encoded)
 
-        encoded = b''.join([
-            b'\xd9\x01\x02',
-            b'\x81',
-            b'\x80', # empty array
-        ])
+        encoded = b''.join([b'\xd9\x01\x02', b'\x81', b'\xa0',])  # empty map
 
-        with self.assertRaisesRegex(cborutil.CBORDecodeError,
-                                    'collections not allowed as set values'):
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError, 'collections not allowed as set values'
+        ):
             cborutil.decodeall(encoded)
 
-        encoded = b''.join([
-            b'\xd9\x01\x02',
-            b'\x81',
-            b'\xa0', # empty map
-        ])
+        encoded = b''.join(
+            [
+                b'\xd9\x01\x02',
+                b'\x81',
+                b'\xd9\x01\x02\x81\x01',  # set with integer 1
+            ]
+        )
 
-        with self.assertRaisesRegex(cborutil.CBORDecodeError,
-                                    'collections not allowed as set values'):
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError, 'collections not allowed as set values'
+        ):
             cborutil.decodeall(encoded)
 
-        encoded = b''.join([
-            b'\xd9\x01\x02',
-            b'\x81',
-            b'\xd9\x01\x02\x81\x01', # set with integer 1
-        ])
-
-        with self.assertRaisesRegex(cborutil.CBORDecodeError,
-                                    'collections not allowed as set values'):
-            cborutil.decodeall(encoded)
 
 class BoolTests(TestCase):
     def testbasic(self):
-        self.assertEqual(list(cborutil.streamencode(True)),  [b'\xf5'])
+        self.assertEqual(list(cborutil.streamencode(True)), [b'\xf5'])
         self.assertEqual(list(cborutil.streamencode(False)), [b'\xf4'])
 
         self.assertIs(loadit(cborutil.streamencode(True)), True)
@@ -734,8 +953,10 @@
         self.assertEqual(cborutil.decodeall(b'\xf4'), [False])
         self.assertEqual(cborutil.decodeall(b'\xf5'), [True])
 
-        self.assertEqual(cborutil.decodeall(b'\xf4\xf5\xf5\xf4'),
-                         [False, True, True, False])
+        self.assertEqual(
+            cborutil.decodeall(b'\xf4\xf5\xf5\xf4'), [False, True, True, False]
+        )
+
 
 class NoneTests(TestCase):
     def testbasic(self):
@@ -746,6 +967,7 @@
         self.assertEqual(cborutil.decodeall(b'\xf6'), [None])
         self.assertEqual(cborutil.decodeall(b'\xf6\xf6'), [None, None])
 
+
 class MapTests(TestCase):
     def testempty(self):
         self.assertEqual(list(cborutil.streamencode({})), [b'\xa0'])
@@ -754,19 +976,23 @@
         self.assertEqual(cborutil.decodeall(b'\xa0'), [{}])
 
     def testemptyindefinite(self):
-        self.assertEqual(list(cborutil.streamencodemapfromiter([])), [
-            b'\xbf', b'\xff'])
+        self.assertEqual(
+            list(cborutil.streamencodemapfromiter([])), [b'\xbf', b'\xff']
+        )
 
         self.assertEqual(loadit(cborutil.streamencodemapfromiter([])), {})
 
-        with self.assertRaisesRegex(cborutil.CBORDecodeError,
-                                    'indefinite length uint not allowed'):
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError, 'indefinite length uint not allowed'
+        ):
             cborutil.decodeall(b'\xbf\xff')
 
     def testone(self):
         source = {b'foo': b'bar'}
-        self.assertEqual(list(cborutil.streamencode(source)), [
-            b'\xa1', b'\x43', b'foo', b'\x43', b'bar'])
+        self.assertEqual(
+            list(cborutil.streamencode(source)),
+            [b'\xa1', b'\x43', b'foo', b'\x43', b'bar'],
+        )
 
         self.assertEqual(loadit(cborutil.streamencode(source)), source)
 
@@ -781,8 +1007,8 @@
         self.assertEqual(loadit(cborutil.streamencode(source)), source)
 
         self.assertEqual(
-            loadit(cborutil.streamencodemapfromiter(source.items())),
-            source)
+            loadit(cborutil.streamencodemapfromiter(source.items())), source
+        )
 
         encoded = b''.join(cborutil.streamencode(source))
         self.assertEqual(cborutil.decodeall(encoded), [source])
@@ -793,12 +1019,11 @@
             2: -10,
         }
 
-        self.assertEqual(loadit(cborutil.streamencode(source)),
-                         source)
+        self.assertEqual(loadit(cborutil.streamencode(source)), source)
 
         self.assertEqual(
-            loadit(cborutil.streamencodemapfromiter(source.items())),
-            source)
+            loadit(cborutil.streamencodemapfromiter(source.items())), source
+        )
 
         encoded = b''.join(cborutil.streamencode(source))
         self.assertEqual(cborutil.decodeall(encoded), [source])
@@ -819,88 +1044,124 @@
         self.assertEqual(cborutil.decodeall(encoded), [source])
 
     def testillegalkey(self):
-        encoded = b''.join([
-            # map header + len 1
-            b'\xa1',
-            # indefinite length bytestring "foo" in key position
-            b'\x5f\x03foo\xff'
-        ])
+        encoded = b''.join(
+            [
+                # map header + len 1
+                b'\xa1',
+                # indefinite length bytestring "foo" in key position
+                b'\x5f\x03foo\xff',
+            ]
+        )
 
-        with self.assertRaisesRegex(cborutil.CBORDecodeError,
-                                    'indefinite length bytestrings not '
-                                    'allowed as map keys'):
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError,
+            'indefinite length bytestrings not ' 'allowed as map keys',
+        ):
             cborutil.decodeall(encoded)
 
-        encoded = b''.join([
-            b'\xa1',
-            b'\x80', # empty array
-            b'\x43foo',
-        ])
+        encoded = b''.join([b'\xa1', b'\x80', b'\x43foo',])  # empty array
 
-        with self.assertRaisesRegex(cborutil.CBORDecodeError,
-                                    'collections not supported as map keys'):
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError, 'collections not supported as map keys'
+        ):
             cborutil.decodeall(encoded)
 
     def testillegalvalue(self):
-        encoded = b''.join([
-            b'\xa1', # map headers
-            b'\x43foo', # key
-            b'\x5f\x03bar\xff', # indefinite length value
-        ])
+        encoded = b''.join(
+            [
+                b'\xa1',  # map headers
+                b'\x43foo',  # key
+                b'\x5f\x03bar\xff',  # indefinite length value
+            ]
+        )
 
-        with self.assertRaisesRegex(cborutil.CBORDecodeError,
-                                    'indefinite length bytestrings not '
-                                    'allowed as map values'):
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError,
+            'indefinite length bytestrings not ' 'allowed as map values',
+        ):
             cborutil.decodeall(encoded)
 
     def testpartialdecode(self):
         source = {b'key1': b'value1'}
         encoded = b''.join(cborutil.streamencode(source))
 
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (True, 1, 1, cborutil.SPECIAL_START_MAP))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (True, 1, 1, cborutil.SPECIAL_START_MAP))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (True, 1, 1, cborutil.SPECIAL_START_MAP),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (True, 1, 1, cborutil.SPECIAL_START_MAP),
+        )
 
         source = {b'key%d' % i: None for i in range(23)}
         encoded = b''.join(cborutil.streamencode(source))
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (True, 23, 1, cborutil.SPECIAL_START_MAP))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (True, 23, 1, cborutil.SPECIAL_START_MAP),
+        )
 
         source = {b'key%d' % i: None for i in range(24)}
         encoded = b''.join(cborutil.streamencode(source))
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (True, 24, 2, cborutil.SPECIAL_START_MAP))
-        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
-                         (True, 24, 2, cborutil.SPECIAL_START_MAP))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (True, 24, 2, cborutil.SPECIAL_START_MAP),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:3]),
+            (True, 24, 2, cborutil.SPECIAL_START_MAP),
+        )
 
         source = {b'key%d' % i: None for i in range(256)}
         encoded = b''.join(cborutil.streamencode(source))
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -2, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
-                         (True, 256, 3, cborutil.SPECIAL_START_MAP))
-        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
-                         (True, 256, 3, cborutil.SPECIAL_START_MAP))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -2, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:3]),
+            (True, 256, 3, cborutil.SPECIAL_START_MAP),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:4]),
+            (True, 256, 3, cborutil.SPECIAL_START_MAP),
+        )
 
         source = {b'key%d' % i: None for i in range(65536)}
         encoded = b''.join(cborutil.streamencode(source))
-        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                         (False, None, -4, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                         (False, None, -3, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
-                         (False, None, -2, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
-                         (False, None, -1, cborutil.SPECIAL_NONE))
-        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
-                         (True, 65536, 5, cborutil.SPECIAL_START_MAP))
-        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
-                         (True, 65536, 5, cborutil.SPECIAL_START_MAP))
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:1]),
+            (False, None, -4, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:2]),
+            (False, None, -3, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:3]),
+            (False, None, -2, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:4]),
+            (False, None, -1, cborutil.SPECIAL_NONE),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:5]),
+            (True, 65536, 5, cborutil.SPECIAL_START_MAP),
+        )
+        self.assertEqual(
+            cborutil.decodeitem(encoded[0:6]),
+            (True, 65536, 5, cborutil.SPECIAL_START_MAP),
+        )
+
 
 class SemanticTagTests(TestCase):
     def testdecodeforbidden(self):
@@ -908,8 +1169,7 @@
             if i == cborutil.SEMANTIC_TAG_FINITE_SET:
                 continue
 
-            tag = cborutil.encodelength(cborutil.MAJOR_TYPE_SEMANTIC,
-                                        i)
+            tag = cborutil.encodelength(cborutil.MAJOR_TYPE_SEMANTIC, i)
 
             encoded = tag + cborutil.encodelength(cborutil.MAJOR_TYPE_UINT, 42)
 
@@ -917,18 +1177,26 @@
             if i < 24:
                 pass
             elif i < 256:
-                self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                                 (False, None, -1, cborutil.SPECIAL_NONE))
+                self.assertEqual(
+                    cborutil.decodeitem(encoded[0:1]),
+                    (False, None, -1, cborutil.SPECIAL_NONE),
+                )
             elif i < 65536:
-                self.assertEqual(cborutil.decodeitem(encoded[0:1]),
-                                 (False, None, -2, cborutil.SPECIAL_NONE))
-                self.assertEqual(cborutil.decodeitem(encoded[0:2]),
-                                 (False, None, -1, cborutil.SPECIAL_NONE))
+                self.assertEqual(
+                    cborutil.decodeitem(encoded[0:1]),
+                    (False, None, -2, cborutil.SPECIAL_NONE),
+                )
+                self.assertEqual(
+                    cborutil.decodeitem(encoded[0:2]),
+                    (False, None, -1, cborutil.SPECIAL_NONE),
+                )
 
-            with self.assertRaisesRegex(cborutil.CBORDecodeError,
-                                        r'semantic tag \d+ not allowed'):
+            with self.assertRaisesRegex(
+                cborutil.CBORDecodeError, r'semantic tag \d+ not allowed'
+            ):
                 cborutil.decodeitem(encoded)
 
+
 class SpecialTypesTests(TestCase):
     def testforbiddentypes(self):
         for i in range(256):
@@ -941,15 +1209,18 @@
 
             encoded = cborutil.encodelength(cborutil.MAJOR_TYPE_SPECIAL, i)
 
-            with self.assertRaisesRegex(cborutil.CBORDecodeError,
-                                        r'special type \d+ not allowed'):
+            with self.assertRaisesRegex(
+                cborutil.CBORDecodeError, r'special type \d+ not allowed'
+            ):
                 cborutil.decodeitem(encoded)
 
+
 class SansIODecoderTests(TestCase):
     def testemptyinput(self):
         decoder = cborutil.sansiodecoder()
         self.assertEqual(decoder.decode(b''), (False, 0, 0))
 
+
 class BufferingDecoderTests(TestCase):
     def testsimple(self):
         source = [
@@ -969,7 +1240,7 @@
             start = 0
 
             while start < len(encoded):
-                decoder.decode(encoded[start:start + step])
+                decoder.decode(encoded[start : start + step])
                 start += step
 
             self.assertEqual(decoder.getavailable(), [source])
@@ -982,20 +1253,23 @@
 
         self.assertEqual(decoder.getavailable(), [b'foobar'])
 
+
 class DecodeallTests(TestCase):
     def testemptyinput(self):
         self.assertEqual(cborutil.decodeall(b''), [])
 
     def testpartialinput(self):
-        encoded = b''.join([
-            b'\x82', # array of 2 elements
-            b'\x01', # integer 1
-        ])
+        encoded = b''.join(
+            [b'\x82', b'\x01',]  # array of 2 elements  # integer 1
+        )
 
-        with self.assertRaisesRegex(cborutil.CBORDecodeError,
-                                    'input data not complete'):
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError, 'input data not complete'
+        ):
             cborutil.decodeall(encoded)
 
+
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-check-code.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-check-code.t	Mon Oct 21 11:09:48 2019 -0400
@@ -16,9 +16,12 @@
   Skipping contrib/automation/hgautomation/aws.py it has no-che?k-code (glob)
   Skipping contrib/automation/hgautomation/cli.py it has no-che?k-code (glob)
   Skipping contrib/automation/hgautomation/linux.py it has no-che?k-code (glob)
+  Skipping contrib/automation/hgautomation/pypi.py it has no-che?k-code (glob)
   Skipping contrib/automation/hgautomation/ssh.py it has no-che?k-code (glob)
+  Skipping contrib/automation/hgautomation/try_server.py it has no-che?k-code (glob)
   Skipping contrib/automation/hgautomation/windows.py it has no-che?k-code (glob)
   Skipping contrib/automation/hgautomation/winrm.py it has no-che?k-code (glob)
+  Skipping contrib/grey.py it has no-che?k-code (glob)
   Skipping contrib/packaging/hgpackaging/downloads.py it has no-che?k-code (glob)
   Skipping contrib/packaging/hgpackaging/inno.py it has no-che?k-code (glob)
   Skipping contrib/packaging/hgpackaging/py2exe.py it has no-che?k-code (glob)
@@ -61,6 +64,7 @@
   COPYING
   Makefile
   README.rst
+  black.toml
   hg
   hgeditor
   hgweb.cgi
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-check-format.t	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,7 @@
+#require grey
+
+(this should use the actual black as soon as possible)
+
+  $ cd $RUNTESTDIR/..
+  $ python3 contrib/grey.py --config=black.toml --check --diff `hg files 'set:**.py - hgext/fsmonitor/pywatchman/** - mercurial/thirdparty/** - "contrib/python-zstandard/**" - contrib/grey.py'`
+
--- a/tests/test-check-interfaces.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-check-interfaces.py	Mon Oct 21 11:09:48 2019 -0400
@@ -3,6 +3,7 @@
 from __future__ import absolute_import, print_function
 
 from mercurial import encoding
+
 encoding.environ[b'HGREALINTERFACES'] = b'1'
 
 import os
@@ -10,24 +11,25 @@
 import sys
 
 # Only run if tests are run in a repo
-if subprocess.call(['python', '%s/hghave' % os.environ['TESTDIR'],
-                    'test-repo']):
+if subprocess.call(
+    ['python', '%s/hghave' % os.environ['TESTDIR'], 'test-repo']
+):
     sys.exit(80)
 
-from mercurial.thirdparty.zope import (
-    interface as zi,
+from mercurial.interfaces import (
+    dirstate as intdirstate,
+    repository,
 )
-from mercurial.thirdparty.zope.interface import (
-    verify as ziverify,
-)
+from mercurial.thirdparty.zope import interface as zi
+from mercurial.thirdparty.zope.interface import verify as ziverify
 from mercurial import (
     bundlerepo,
+    dirstate,
     filelog,
     httppeer,
     localrepo,
     manifest,
     pycompat,
-    repository,
     revlog,
     sshpeer,
     statichttprepo,
@@ -45,8 +47,10 @@
 
 sys.path[0:0] = [testdir]
 import simplestorerepo
+
 del sys.path[0]
 
+
 def checkzobject(o, allowextra=False):
     """Verify an object with a zope interface."""
     ifaces = zi.providedBy(o)
@@ -71,35 +75,45 @@
     public = {a for a in dir(o) if not a.startswith('_')}
 
     for attr in sorted(public - allowed):
-        print('public attribute not declared in interfaces: %s.%s' % (
-            o.__class__.__name__, attr))
+        print(
+            'public attribute not declared in interfaces: %s.%s'
+            % (o.__class__.__name__, attr)
+        )
+
 
 # Facilitates testing localpeer.
 class dummyrepo(object):
     def __init__(self):
         self.ui = uimod.ui()
+
     def filtered(self, name):
         pass
+
     def _restrictcapabilities(self, caps):
         pass
 
+
 class dummyopener(object):
     handlers = []
 
+
 # Facilitates testing sshpeer without requiring a server.
 class badpeer(httppeer.httppeer):
     def __init__(self):
-        super(badpeer, self).__init__(None, None, None, dummyopener(), None,
-                                      None)
+        super(badpeer, self).__init__(
+            None, None, None, dummyopener(), None, None
+        )
         self.badattribute = True
 
     def badmethod(self):
         pass
 
+
 class dummypipe(object):
     def close(self):
         pass
 
+
 def main():
     ui = uimod.ui()
     # Needed so we can open a local repo with obsstore without a warning.
@@ -113,25 +127,44 @@
     ziverify.verifyClass(repository.ipeerv2, httppeer.httpv2peer)
     checkzobject(httppeer.httpv2peer(None, b'', b'', None, None, None))
 
-    ziverify.verifyClass(repository.ipeerbase,
-                         localrepo.localpeer)
+    ziverify.verifyClass(repository.ipeerbase, localrepo.localpeer)
     checkzobject(localrepo.localpeer(dummyrepo()))
 
-    ziverify.verifyClass(repository.ipeercommandexecutor,
-                         localrepo.localcommandexecutor)
+    ziverify.verifyClass(
+        repository.ipeercommandexecutor, localrepo.localcommandexecutor
+    )
     checkzobject(localrepo.localcommandexecutor(None))
 
-    ziverify.verifyClass(repository.ipeercommandexecutor,
-                         wireprotov1peer.peerexecutor)
+    ziverify.verifyClass(
+        repository.ipeercommandexecutor, wireprotov1peer.peerexecutor
+    )
     checkzobject(wireprotov1peer.peerexecutor(None))
 
     ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv1peer)
-    checkzobject(sshpeer.sshv1peer(ui, b'ssh://localhost/foo', b'', dummypipe(),
-                                   dummypipe(), None, None))
+    checkzobject(
+        sshpeer.sshv1peer(
+            ui,
+            b'ssh://localhost/foo',
+            b'',
+            dummypipe(),
+            dummypipe(),
+            None,
+            None,
+        )
+    )
 
     ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv2peer)
-    checkzobject(sshpeer.sshv2peer(ui, b'ssh://localhost/foo', b'', dummypipe(),
-                                   dummypipe(), None, None))
+    checkzobject(
+        sshpeer.sshv2peer(
+            ui,
+            b'ssh://localhost/foo',
+            b'',
+            dummypipe(),
+            dummypipe(),
+            None,
+            None,
+        )
+    )
 
     ziverify.verifyClass(repository.ipeerbase, bundlerepo.bundlepeer)
     checkzobject(bundlerepo.bundlepeer(dummyrepo()))
@@ -142,21 +175,29 @@
     ziverify.verifyClass(repository.ipeerbase, unionrepo.unionpeer)
     checkzobject(unionrepo.unionpeer(dummyrepo()))
 
-    ziverify.verifyClass(repository.ilocalrepositorymain,
-                         localrepo.localrepository)
-    ziverify.verifyClass(repository.ilocalrepositoryfilestorage,
-                         localrepo.revlogfilestorage)
+    ziverify.verifyClass(
+        repository.ilocalrepositorymain, localrepo.localrepository
+    )
+    ziverify.verifyClass(
+        repository.ilocalrepositoryfilestorage, localrepo.revlogfilestorage
+    )
     repo = localrepo.makelocalrepository(ui, rootdir)
     checkzobject(repo)
 
-    ziverify.verifyClass(wireprototypes.baseprotocolhandler,
-                         wireprotoserver.sshv1protocolhandler)
-    ziverify.verifyClass(wireprototypes.baseprotocolhandler,
-                         wireprotoserver.sshv2protocolhandler)
-    ziverify.verifyClass(wireprototypes.baseprotocolhandler,
-                         wireprotoserver.httpv1protocolhandler)
-    ziverify.verifyClass(wireprototypes.baseprotocolhandler,
-                         wireprotov2server.httpv2protocolhandler)
+    ziverify.verifyClass(
+        wireprototypes.baseprotocolhandler, wireprotoserver.sshv1protocolhandler
+    )
+    ziverify.verifyClass(
+        wireprototypes.baseprotocolhandler, wireprotoserver.sshv2protocolhandler
+    )
+    ziverify.verifyClass(
+        wireprototypes.baseprotocolhandler,
+        wireprotoserver.httpv1protocolhandler,
+    )
+    ziverify.verifyClass(
+        wireprototypes.baseprotocolhandler,
+        wireprotov2server.httpv2protocolhandler,
+    )
 
     sshv1 = wireprotoserver.sshv1protocolhandler(None, None, None)
     checkzobject(sshv1)
@@ -170,30 +211,39 @@
 
     ziverify.verifyClass(repository.ifilestorage, filelog.filelog)
     ziverify.verifyClass(repository.imanifestdict, manifest.manifestdict)
-    ziverify.verifyClass(repository.imanifestrevisionstored,
-                         manifest.manifestctx)
-    ziverify.verifyClass(repository.imanifestrevisionwritable,
-                         manifest.memmanifestctx)
-    ziverify.verifyClass(repository.imanifestrevisionstored,
-                         manifest.treemanifestctx)
-    ziverify.verifyClass(repository.imanifestrevisionwritable,
-                         manifest.memtreemanifestctx)
+    ziverify.verifyClass(
+        repository.imanifestrevisionstored, manifest.manifestctx
+    )
+    ziverify.verifyClass(
+        repository.imanifestrevisionwritable, manifest.memmanifestctx
+    )
+    ziverify.verifyClass(
+        repository.imanifestrevisionstored, manifest.treemanifestctx
+    )
+    ziverify.verifyClass(
+        repository.imanifestrevisionwritable, manifest.memtreemanifestctx
+    )
     ziverify.verifyClass(repository.imanifestlog, manifest.manifestlog)
     ziverify.verifyClass(repository.imanifeststorage, manifest.manifestrevlog)
 
-    ziverify.verifyClass(repository.irevisiondelta,
-                         simplestorerepo.simplestorerevisiondelta)
+    ziverify.verifyClass(
+        repository.irevisiondelta, simplestorerepo.simplestorerevisiondelta
+    )
     ziverify.verifyClass(repository.ifilestorage, simplestorerepo.filestorage)
-    ziverify.verifyClass(repository.iverifyproblem,
-                         simplestorerepo.simplefilestoreproblem)
+    ziverify.verifyClass(
+        repository.iverifyproblem, simplestorerepo.simplefilestoreproblem
+    )
+
+    ziverify.verifyClass(intdirstate.idirstate, dirstate.dirstate)
 
     vfs = vfsmod.vfs(b'.')
     fl = filelog.filelog(vfs, b'dummy.i')
     checkzobject(fl, allowextra=True)
 
     # Conforms to imanifestlog.
-    ml = manifest.manifestlog(vfs, repo, manifest.manifestrevlog(repo.svfs),
-                              repo.narrowmatch())
+    ml = manifest.manifestlog(
+        vfs, repo, manifest.manifestrevlog(repo.svfs), repo.narrowmatch()
+    )
     checkzobject(ml)
     checkzobject(repo.manifestlog)
 
@@ -211,8 +261,7 @@
     mrl = manifest.manifestrevlog(vfs)
     checkzobject(mrl)
 
-    ziverify.verifyClass(repository.irevisiondelta,
-                         revlog.revlogrevisiondelta)
+    ziverify.verifyClass(repository.irevisiondelta, revlog.revlogrevisiondelta)
 
     rd = revlog.revlogrevisiondelta(
         node=b'',
@@ -223,11 +272,12 @@
         flags=b'',
         baserevisionsize=None,
         revision=b'',
-        delta=None)
+        delta=None,
+    )
     checkzobject(rd)
 
-    ziverify.verifyClass(repository.iverifyproblem,
-                         revlog.revlogproblem)
+    ziverify.verifyClass(repository.iverifyproblem, revlog.revlogproblem)
     checkzobject(revlog.revlogproblem())
 
+
 main()
--- a/tests/test-check-module-imports.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-check-module-imports.t	Mon Oct 21 11:09:48 2019 -0400
@@ -20,6 +20,7 @@
   > -X setup.py \
   > -X contrib/automation/ \
   > -X contrib/debugshell.py \
+  > -X contrib/grey.py \
   > -X contrib/hgweb.fcgi \
   > -X contrib/packaging/hg-docker \
   > -X contrib/packaging/hgpackaging/ \
--- a/tests/test-check-py3-compat.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-check-py3-compat.t	Mon Oct 21 11:09:48 2019 -0400
@@ -6,6 +6,7 @@
 #if no-py3
   $ testrepohg files 'set:(**.py)' \
   > -X contrib/automation/ \
+  > -X contrib/grey.py \
   > -X contrib/packaging/hgpackaging/ \
   > -X contrib/packaging/inno/ \
   > -X contrib/packaging/wix/ \
--- a/tests/test-check-pyflakes.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-check-pyflakes.t	Mon Oct 21 11:09:48 2019 -0400
@@ -21,4 +21,7 @@
   > -X mercurial/thirdparty/zope \
   > 2>/dev/null \
   > | xargs pyflakes 2>/dev/null | "$TESTDIR/filterpyflakes.py"
+  contrib/perf.py:*: undefined name 'xrange' (glob) (?)
+  mercurial/hgweb/server.py:*: undefined name 'reload' (glob) (?)
+  mercurial/util.py:*: undefined name 'file' (glob) (?)
   
--- a/tests/test-chg.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-chg.t	Mon Oct 21 11:09:48 2019 -0400
@@ -127,7 +127,7 @@
   > command = registrar.command(cmdtable)
   > @command(b'crash')
   > def pagercrash(ui, repo, *pats, **opts):
-  >     ui.write('going to crash\n')
+  >     ui.write(b'going to crash\n')
   >     raise Exception('.')
   > EOF
 
--- a/tests/test-clone-uncompressed.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-clone-uncompressed.t	Mon Oct 21 11:09:48 2019 -0400
@@ -259,7 +259,7 @@
   bundle2-input-part: "listkeys" (params: 1 mandatory) supported
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 1 parts total
+  bundle2-input-bundle: 2 parts total
   checking for updated bookmarks
   (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
 #endif
@@ -281,7 +281,7 @@
   transferred 96.5 KB in * seconds (* */sec) (glob)
   bundle2-input-part: total payload size 112094
   bundle2-input-part: "listkeys" (params: 1 mandatory) supported
-  bundle2-input-bundle: 1 parts total
+  bundle2-input-bundle: 2 parts total
   checking for updated bookmarks
   (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
 #endif
@@ -537,6 +537,7 @@
   $ echo foo > foo
   $ hg -q commit -m 'about to be pruned'
   $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg up null -q
   $ hg log -T '{rev}: {phase}\n'
--- a/tests/test-clone.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-clone.t	Mon Oct 21 11:09:48 2019 -0400
@@ -759,6 +759,7 @@
   $ echo initial2 > foo
   $ hg -q commit -A -m initial1
   $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ cd ..
 
@@ -867,9 +868,9 @@
   adding changesets
   adding manifests
   adding file changes
-  added 4 changesets with 4 changes to 1 files (+4 heads)
   adding remote bookmark head1
   adding remote bookmark head2
+  added 4 changesets with 4 changes to 1 files (+4 heads)
   new changesets 4a8dc1ab4c13:6bacf4683960
   updating working directory
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -995,9 +996,9 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files (+1 heads)
   adding remote bookmark head1
   adding remote bookmark head2
+  added 1 changesets with 1 changes to 1 files (+1 heads)
   new changesets 99f71071f117
   updating working directory
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-clonebundles.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-clonebundles.t	Mon Oct 21 11:09:48 2019 -0400
@@ -53,7 +53,7 @@
   $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
   $ hg clone http://localhost:$HGPORT 404-url
   applying clone bundle from http://does.not.exist/bundle.hg
-  error fetching bundle: (.* not known|(\[Errno -?\d+])? [Nn]o address associated with (host)?name) (re) (no-windows !)
+  error fetching bundle: (.* not known|(\[Errno -?\d+] )?([Nn]o address associated with (host)?name|Temporary failure in name resolution)) (re) (no-windows !)
   error fetching bundle: [Errno 1100*] getaddrinfo failed (glob) (windows !)
   abort: error applying bundle
   (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
--- a/tests/test-commandserver.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-commandserver.t	Mon Oct 21 11:09:48 2019 -0400
@@ -549,6 +549,7 @@
   *** runcommand up null
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   *** runcommand phase -df tip
+  1 new obsolescence markers
   obsoleted 1 changesets
   *** runcommand log --hidden
   changeset:   1:731265503d86
--- a/tests/test-completion.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-completion.t	Mon Oct 21 11:09:48 2019 -0400
@@ -124,6 +124,7 @@
   debugrevspec
   debugserve
   debugsetparents
+  debugsidedata
   debugssl
   debugsub
   debugsuccessorssets
@@ -305,6 +306,7 @@
   debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
   debugserve: sshstdio, logiofd, logiofile
   debugsetparents: 
+  debugsidedata: changelog, manifest, dir
   debugssl: 
   debugsub: rev
   debugsuccessorssets: closest
@@ -312,7 +314,7 @@
   debuguigetpass: prompt
   debuguiprompt: prompt
   debugupdatecaches: 
-  debugupgraderepo: optimize, run, backup
+  debugupgraderepo: optimize, run, backup, changelog, manifest
   debugwalk: include, exclude
   debugwhyunstable: 
   debugwireargs: three, four, five, ssh, remotecmd, insecure
--- a/tests/test-config-env.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-config-env.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,9 +11,7 @@
     util,
 )
 
-from mercurial.utils import (
-    procutil,
-)
+from mercurial.utils import procutil
 
 testtmp = encoding.environ[b'TESTTMP']
 
@@ -21,6 +19,7 @@
 def join(name):
     return os.path.join(testtmp, name)
 
+
 with open(join(b'sysrc'), 'wb') as f:
     f.write(b'[ui]\neditor=e0\n[pager]\npager=p0\n')
 
@@ -31,24 +30,28 @@
 def systemrcpath():
     return [join(b'sysrc')]
 
+
 def userrcpath():
     return [join(b'userrc')]
 
+
 rcutil.systemrcpath = systemrcpath
 rcutil.userrcpath = userrcpath
-os.path.isdir = lambda x: False # hack: do not load default.d/*.rc
+os.path.isdir = lambda x: False  # hack: do not load default.d/*.rc
 
 # utility to print configs
 def printconfigs(env):
     encoding.environ = env
-    rcutil._rccomponents = None # reset cache
+    rcutil._rccomponents = None  # reset cache
     ui = uimod.ui.load()
     for section, name, value in ui.walkconfig():
         source = ui.configsource(section, name)
-        procutil.stdout.write(b'%s.%s=%s # %s\n'
-                              % (section, name, value, util.pconvert(source)))
+        procutil.stdout.write(
+            b'%s.%s=%s # %s\n' % (section, name, value, util.pconvert(source))
+        )
     procutil.stdout.write(b'\n')
 
+
 # environment variable overrides
 printconfigs({})
 printconfigs({b'EDITOR': b'e2', b'PAGER': b'p2'})
--- a/tests/test-config.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-config.t	Mon Oct 21 11:09:48 2019 -0400
@@ -57,11 +57,13 @@
   $ hg showconfig Section -Tjson
   [
    {
+    "defaultvalue": null,
     "name": "Section.KeY",
     "source": "*.hgrc:*", (glob)
     "value": "Case Sensitive"
    },
    {
+    "defaultvalue": null,
     "name": "Section.key",
     "source": "*.hgrc:*", (glob)
     "value": "lower case"
@@ -70,14 +72,15 @@
   $ hg showconfig Section.KeY -Tjson
   [
    {
+    "defaultvalue": null,
     "name": "Section.KeY",
     "source": "*.hgrc:*", (glob)
     "value": "Case Sensitive"
    }
   ]
   $ hg showconfig -Tjson | tail -7
-   },
    {
+    "defaultvalue": null,
     "name": "*", (glob)
     "source": "*", (glob)
     "value": "*" (glob)
@@ -102,6 +105,7 @@
   $ hg config empty.source -Tjson
   [
    {
+    "defaultvalue": null,
     "name": "empty.source",
     "source": "",
     "value": "value"
--- a/tests/test-context.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-context.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,6 +13,8 @@
 )
 
 print_ = print
+
+
 def print(*args, **kwargs):
     """print() wrapper that flushes stdout buffers to avoid py3 buffer issues
 
@@ -22,11 +24,13 @@
     print_(*args, **kwargs)
     sys.stdout.flush()
 
+
 def printb(data, end=b'\n'):
     out = getattr(sys.stdout, 'buffer', sys.stdout)
     out.write(data + end)
     out.flush()
 
+
 ui = uimod.ui.load()
 
 repo = hg.repository(ui, b'test1', create=1)
@@ -49,12 +53,18 @@
 
 # test memctx with non-ASCII commit message
 
+
 def filectxfn(repo, memctx, path):
     return context.memfilectx(repo, memctx, b"foo", b"")
 
-ctx = context.memctx(repo, [b'tip', None],
-                     encoding.tolocal(b"Gr\xc3\xbcezi!"),
-                     [b"foo"], filectxfn)
+
+ctx = context.memctx(
+    repo,
+    [b'tip', None],
+    encoding.tolocal(b"Gr\xc3\xbcezi!"),
+    [b"foo"],
+    filectxfn,
+)
 ctx.commit()
 for enc in "ASCII", "Latin-1", "UTF-8":
     encoding.encoding = enc
@@ -62,17 +72,27 @@
 
 # test performing a status
 
+
 def getfilectx(repo, memctx, f):
     fctx = memctx.p1()[f]
     data, flags = fctx.data(), fctx.flags()
     if f == b'foo':
         data += b'bar\n'
     return context.memfilectx(
-        repo, memctx, f, data, b'l' in flags, b'x' in flags)
+        repo, memctx, f, data, b'l' in flags, b'x' in flags
+    )
+
 
 ctxa = repo[0]
-ctxb = context.memctx(repo, [ctxa.node(), None], b"test diff", [b"foo"],
-                      getfilectx, ctxa.user(), ctxa.date())
+ctxb = context.memctx(
+    repo,
+    [ctxa.node(), None],
+    b"test diff",
+    [b"foo"],
+    getfilectx,
+    ctxa.user(),
+    ctxa.date(),
+)
 
 print(ctxb.status(ctxa))
 
@@ -114,11 +134,13 @@
 print('wctx._status=%s' % (str(wctx._status)))
 
 print('=== with "pattern match":')
-print(actx1.status(other=wctx,
-                   match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])))
+print(
+    actx1.status(other=wctx, match=scmutil.matchfiles(repo, [b'bar-m', b'foo']))
+)
 print('wctx._status=%s' % (str(wctx._status)))
-print(actx2.status(other=wctx,
-                   match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])))
+print(
+    actx2.status(other=wctx, match=scmutil.matchfiles(repo, [b'bar-m', b'foo']))
+)
 print('wctx._status=%s' % (str(wctx._status)))
 
 print('=== with "always match" and "listclean=True":')
@@ -129,12 +151,12 @@
 
 print("== checking workingcommitctx.status:")
 
-wcctx = context.workingcommitctx(repo,
-                                 scmutil.status([b'bar-m'],
-                                                [b'bar-a'],
-                                                [],
-                                                [], [], [], []),
-                                 text=b'', date=b'0 0')
+wcctx = context.workingcommitctx(
+    repo,
+    scmutil.status([b'bar-m'], [b'bar-a'], [], [], [], [], []),
+    text=b'',
+    date=b'0 0',
+)
 print('wcctx._status=%s' % (str(wcctx._status)))
 
 print('=== with "always match":')
@@ -150,21 +172,35 @@
 print('wcctx._status=%s' % (str(wcctx._status)))
 
 print('=== with "pattern match":')
-print(actx1.status(other=wcctx,
-                   match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])))
+print(
+    actx1.status(
+        other=wcctx, match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])
+    )
+)
 print('wcctx._status=%s' % (str(wcctx._status)))
-print(actx2.status(other=wcctx,
-                   match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])))
+print(
+    actx2.status(
+        other=wcctx, match=scmutil.matchfiles(repo, [b'bar-m', b'foo'])
+    )
+)
 print('wcctx._status=%s' % (str(wcctx._status)))
 
 print('=== with "pattern match" and "listclean=True":')
-print(actx1.status(other=wcctx,
-                   match=scmutil.matchfiles(repo, [b'bar-r', b'foo']),
-                   listclean=True))
+print(
+    actx1.status(
+        other=wcctx,
+        match=scmutil.matchfiles(repo, [b'bar-r', b'foo']),
+        listclean=True,
+    )
+)
 print('wcctx._status=%s' % (str(wcctx._status)))
-print(actx2.status(other=wcctx,
-                   match=scmutil.matchfiles(repo, [b'bar-r', b'foo']),
-                   listclean=True))
+print(
+    actx2.status(
+        other=wcctx,
+        match=scmutil.matchfiles(repo, [b'bar-r', b'foo']),
+        listclean=True,
+    )
+)
 print('wcctx._status=%s' % (str(wcctx._status)))
 
 os.chdir('..')
@@ -180,17 +216,19 @@
     with open(i, 'wb') as f:
         f.write(i)
     status = scmutil.status([], [i], [], [], [], [], [])
-    ctx = context.workingcommitctx(repo, status, text=i, user=b'test@test.com',
-                                   date=(0, 0))
-    ctx.p1().manifest() # side effect: cache manifestctx
+    ctx = context.workingcommitctx(
+        repo, status, text=i, user=b'test@test.com', date=(0, 0)
+    )
+    ctx.p1().manifest()  # side effect: cache manifestctx
     n = repo.commitctx(ctx)
     printb(b'commit %s: %s' % (i, hex(n)))
 
     # touch 00manifest.i mtime so storecache could expire.
     # repo.__dict__['manifestlog'] is deleted by transaction releasefn.
     st = repo.svfs.stat(b'00manifest.i')
-    repo.svfs.utime(b'00manifest.i',
-                    (st[stat.ST_MTIME] + 1, st[stat.ST_MTIME] + 1))
+    repo.svfs.utime(
+        b'00manifest.i', (st[stat.ST_MTIME] + 1, st[stat.ST_MTIME] + 1)
+    )
 
     # read the file just committed
     try:
--- a/tests/test-contrib-check-commit.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-contrib-check-commit.t	Mon Oct 21 11:09:48 2019 -0400
@@ -130,6 +130,4 @@
    This has no topic and ends with a period.
   7: don't add trailing period on summary line
    This has no topic and ends with a period.
-  20: adds a function with foo_bar naming
-   + def blah_blah(x):
   [1]
--- a/tests/test-contrib-perf.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-contrib-perf.t	Mon Oct 21 11:09:48 2019 -0400
@@ -105,15 +105,15 @@
    perfctxfiles  (no help text available)
    perfdiffwd    Profile diff of working directory changes
    perfdirfoldmap
-                 (no help text available)
+                 benchmap a 'dirstate._map.dirfoldmap.get()' request
    perfdirs      (no help text available)
-   perfdirstate  (no help text available)
+   perfdirstate  benchmap the time of various distate operations
    perfdirstatedirs
-                 (no help text available)
+                 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
    perfdirstatefoldmap
-                 (no help text available)
+                 benchmap a 'dirstate._map.filefoldmap.get()' request
    perfdirstatewrite
-                 (no help text available)
+                 benchmap the time it take to write a dirstate on disk
    perfdiscovery
                  benchmark discovery between local repo and the peer at given
                  path
@@ -172,7 +172,7 @@
    perfrevrange  (no help text available)
    perfrevset    benchmark the execution time of a revset
    perfstartup   (no help text available)
-   perfstatus    (no help text available)
+   perfstatus    benchmark the performance of a single status call
    perftags      (no help text available)
    perftemplating
                  test the rendering time of a given template
@@ -205,6 +205,8 @@
   $ hg perfdirfoldmap
   $ hg perfdirs
   $ hg perfdirstate
+  $ hg perfdirstate --contains
+  $ hg perfdirstate --iteration
   $ hg perfdirstatedirs
   $ hg perfdirstatefoldmap
   $ hg perfdirstatewrite
--- a/tests/test-copies-in-changeset.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-copies-in-changeset.t	Mon Oct 21 11:09:48 2019 -0400
@@ -1,4 +1,6 @@
+#testcases extra sidedata
 
+#if extra
   $ cat >> $HGRCPATH << EOF
   > [experimental]
   > copies.write-to=changeset-only
@@ -7,15 +9,53 @@
   > changesetcopies = log -r . -T 'files: {files}
   >   {extras % "{ifcontains("files", key, "{key}: {value}\n")}"}
   >   {extras % "{ifcontains("copies", key, "{key}: {value}\n")}"}'
+  > EOF
+#endif
+
+#if sidedata
+  $ cat >> $HGRCPATH << EOF
+  > [format]
+  > exp-use-copies-side-data-changeset = yes
+  > EOF
+#endif
+
+  $ cat >> $HGRCPATH << EOF
+  > [alias]
   > showcopies = log -r . -T '{file_copies % "{source} -> {name}\n"}'
   > [extensions]
   > rebase =
+  > split =
   > EOF
 
 Check that copies are recorded correctly
 
   $ hg init repo
   $ cd repo
+#if sidedata
+  $ hg debugformat -v
+  format-variant    repo config default
+  fncache:           yes    yes     yes
+  dotencode:         yes    yes     yes
+  generaldelta:      yes    yes     yes
+  sparserevlog:      yes    yes     yes
+  sidedata:          yes    yes      no
+  copies-sdc:        yes    yes      no
+  plain-cl-delta:    yes    yes     yes
+  compression:       zlib   zlib    zlib
+  compression-level: default default default
+#else
+  $ hg debugformat -v
+  format-variant    repo config default
+  fncache:           yes    yes     yes
+  dotencode:         yes    yes     yes
+  generaldelta:      yes    yes     yes
+  sparserevlog:      yes    yes     yes
+  sidedata:           no     no      no
+  copies-sdc:         no     no      no
+  plain-cl-delta:    yes    yes     yes
+  compression:       zlib   zlib    zlib
+  compression-level: default default default
+#endif
   $ echo a > a
   $ hg add a
   $ hg ci -m initial
@@ -23,6 +63,9 @@
   $ hg cp a c
   $ hg cp a d
   $ hg ci -m 'copy a to b, c, and d'
+
+#if extra
+
   $ hg changesetcopies
   files: b c d
   filesadded: 0
@@ -32,29 +75,59 @@
   p1copies: 0\x00a (esc)
   1\x00a (esc)
   2\x00a (esc)
+#else
+  $ hg debugsidedata -c -v -- -1
+  2 sidedata entries
+   entry-0010 size 11
+    '0\x00a\n1\x00a\n2\x00a'
+   entry-0012 size 5
+    '0\n1\n2'
+#endif
+
   $ hg showcopies
   a -> b
   a -> c
   a -> d
+
+#if extra
+
   $ hg showcopies --config experimental.copies.read-from=compatibility
   a -> b
   a -> c
   a -> d
   $ hg showcopies --config experimental.copies.read-from=filelog-only
 
+#endif
+
 Check that renames are recorded correctly
 
   $ hg mv b b2
   $ hg ci -m 'rename b to b2'
+
+#if extra
+
   $ hg changesetcopies
   files: b b2
   filesadded: 1
   filesremoved: 0
   
   p1copies: 1\x00b (esc)
+
+#else
+  $ hg debugsidedata -c -v -- -1
+  3 sidedata entries
+   entry-0010 size 3
+    '1\x00b'
+   entry-0012 size 1
+    '1'
+   entry-0013 size 1
+    '0'
+#endif
+
   $ hg showcopies
   b -> b2
 
+
 Rename onto existing file. This should get recorded in the changeset files list and in the extras,
 even though there is no filelog entry.
 
@@ -62,20 +135,56 @@
   $ hg st --copies
   M c
     b2
+
+#if extra
+
   $ hg debugindex c
      rev linkrev nodeid       p1           p2
        0       1 b789fdd96dc2 000000000000 000000000000
+
+#else
+
+  $ hg debugindex c
+     rev linkrev nodeid       p1           p2
+       0       1 37d9b5d994ea 000000000000 000000000000
+
+#endif
+
+
   $ hg ci -m 'move b onto d'
+
+#if extra
+
   $ hg changesetcopies
   files: c
   
   p1copies: 0\x00b2 (esc)
+
+#else
+  $ hg debugsidedata -c -v -- -1
+  1 sidedata entries
+   entry-0010 size 4
+    '0\x00b2'
+#endif
+
   $ hg showcopies
   b2 -> c
+
+#if extra
+
   $ hg debugindex c
      rev linkrev nodeid       p1           p2
        0       1 b789fdd96dc2 000000000000 000000000000
 
+#else
+
+  $ hg debugindex c
+     rev linkrev nodeid       p1           p2
+       0       1 37d9b5d994ea 000000000000 000000000000
+       1       3 029625640347 000000000000 000000000000
+
+#endif
+
 Create a merge commit with copying done during merge.
 
   $ hg co 0
@@ -95,6 +204,9 @@
 File 'f' exists only in p1, so 'i' should be from p1
   $ hg cp f i
   $ hg ci -m 'merge'
+
+#if extra
+
   $ hg changesetcopies
   files: g h i
   filesadded: 0
@@ -104,6 +216,18 @@
   p1copies: 0\x00a (esc)
   2\x00f (esc)
   p2copies: 1\x00d (esc)
+
+#else
+  $ hg debugsidedata -c -v -- -1
+  3 sidedata entries
+   entry-0010 size 7
+    '0\x00a\n2\x00f'
+   entry-0011 size 3
+    '1\x00d'
+   entry-0012 size 5
+    '0\n1\n2'
+#endif
+
   $ hg showcopies
   a -> g
   d -> h
@@ -112,6 +236,7 @@
 Test writing to both changeset and filelog
 
   $ hg cp a j
+#if extra
   $ hg ci -m 'copy a to j' --config experimental.copies.write-to=compatibility
   $ hg changesetcopies
   files: j
@@ -120,6 +245,15 @@
   
   p1copies: 0\x00a (esc)
   p2copies: 
+#else
+  $ hg ci -m 'copy a to j'
+  $ hg debugsidedata -c -v -- -1
+  2 sidedata entries
+   entry-0010 size 3
+    '0\x00a'
+   entry-0012 size 1
+    '0'
+#endif
   $ hg debugdata j 0
   \x01 (esc)
   copy: a
@@ -132,9 +266,31 @@
   a -> j
   $ hg showcopies --config experimental.copies.read-from=filelog-only
   a -> j
+Existing copy information in the changeset gets removed on amend and writing
+copy information on to the filelog
+#if extra
+  $ hg ci --amend -m 'copy a to j, v2' \
+  > --config experimental.copies.write-to=filelog-only
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-*-amend.hg (glob)
+  $ hg changesetcopies
+  files: j
+  
+#else
+  $ hg ci --amend -m 'copy a to j, v2'
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-*-amend.hg (glob)
+  $ hg debugsidedata -c -v -- -1
+  2 sidedata entries
+   entry-0010 size 3
+    '0\x00a'
+   entry-0012 size 1
+    '0'
+#endif
+  $ hg showcopies --config experimental.copies.read-from=filelog-only
+  a -> j
 The entries should be written to extras even if they're empty (so the client
 won't have to fall back to reading from filelogs)
   $ echo x >> j
+#if extra
   $ hg ci -m 'modify j' --config experimental.copies.write-to=compatibility
   $ hg changesetcopies
   files: j
@@ -143,25 +299,47 @@
   
   p1copies: 
   p2copies: 
+#else
+  $ hg ci -m 'modify j'
+  $ hg debugsidedata -c -v -- -1
+#endif
 
 Test writing only to filelog
 
   $ hg cp a k
+#if extra
   $ hg ci -m 'copy a to k' --config experimental.copies.write-to=filelog-only
+
   $ hg changesetcopies
   files: k
   
+#else
+  $ hg ci -m 'copy a to k'
+  $ hg debugsidedata -c -v -- -1
+  2 sidedata entries
+   entry-0010 size 3
+    '0\x00a'
+   entry-0012 size 1
+    '0'
+#endif
+
   $ hg debugdata k 0
   \x01 (esc)
   copy: a
   copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
   \x01 (esc)
   a
+#if extra
   $ hg showcopies
+
   $ hg showcopies --config experimental.copies.read-from=compatibility
   a -> k
   $ hg showcopies --config experimental.copies.read-from=filelog-only
   a -> k
+#else
+  $ hg showcopies
+  a -> k
+#endif
 
   $ cd ..
 
@@ -177,11 +355,139 @@
   $ hg mv a b
   $ hg ci -qm 'rename a to b'
   $ hg rebase -d 1 --config rebase.experimental.inmemory=yes
-  rebasing 2:fc7287ac5b9b "rename a to b" (tip)
+  rebasing 2:* "rename a to b" (tip) (glob)
   merging a and b to b
-  saved backup bundle to $TESTTMP/rebase-rename/.hg/strip-backup/fc7287ac5b9b-8f2a95ec-rebase.hg
+  saved backup bundle to $TESTTMP/rebase-rename/.hg/strip-backup/*-*-rebase.hg (glob)
   $ hg st --change . --copies
   A b
     a
   R a
   $ cd ..
+
+Test splitting a commit
+
+  $ hg init split
+  $ cd split
+  $ echo a > a
+  $ echo b > b
+  $ hg ci -Aqm 'add a and b'
+  $ echo a2 > a
+  $ hg mv b c
+  $ hg ci -m 'modify a, move b to c'
+  $ hg --config ui.interactive=yes split <<EOF
+  > y
+  > y
+  > n
+  > y
+  > EOF
+  diff --git a/a b/a
+  1 hunks, 1 lines changed
+  examine changes to 'a'?
+  (enter ? for help) [Ynesfdaq?] y
+  
+  @@ -1,1 +1,1 @@
+  -a
+  +a2
+  record this change to 'a'?
+  (enter ? for help) [Ynesfdaq?] y
+  
+  diff --git a/b b/c
+  rename from b
+  rename to c
+  examine changes to 'b' and 'c'?
+  (enter ? for help) [Ynesfdaq?] n
+  
+  created new head
+  diff --git a/b b/c
+  rename from b
+  rename to c
+  examine changes to 'b' and 'c'?
+  (enter ? for help) [Ynesfdaq?] y
+  
+  saved backup bundle to $TESTTMP/split/.hg/strip-backup/*-*-split.hg (glob)
+  $ cd ..
+
+Test committing half a rename
+
+  $ hg init partial
+  $ cd partial
+  $ echo a > a
+  $ hg ci -Aqm 'add a'
+  $ hg mv a b
+  $ hg ci -m 'remove a' a
+
+#if sidedata
+
+Test upgrading/downgrading to sidedata storage
+==============================================
+
+downgrading (keeping some sidedata)
+
+  $ hg debugformat -v
+  format-variant    repo config default
+  fncache:           yes    yes     yes
+  dotencode:         yes    yes     yes
+  generaldelta:      yes    yes     yes
+  sparserevlog:      yes    yes     yes
+  sidedata:          yes    yes      no
+  copies-sdc:        yes    yes      no
+  plain-cl-delta:    yes    yes     yes
+  compression:       zlib   zlib    zlib
+  compression-level: default default default
+  $ hg debugsidedata -c -- 0
+  1 sidedata entries
+   entry-0012 size 1
+  $ hg debugsidedata -c -- 1
+  1 sidedata entries
+   entry-0013 size 1
+  $ hg debugsidedata -m -- 0
+  $ cat << EOF > .hg/hgrc
+  > [format]
+  > exp-use-side-data = yes
+  > exp-use-copies-side-data-changeset = no
+  > EOF
+  $ hg debugupgraderepo --run --quiet --no-backup > /dev/null
+  $ hg debugformat -v
+  format-variant    repo config default
+  fncache:           yes    yes     yes
+  dotencode:         yes    yes     yes
+  generaldelta:      yes    yes     yes
+  sparserevlog:      yes    yes     yes
+  sidedata:          yes    yes      no
+  copies-sdc:         no     no      no
+  plain-cl-delta:    yes    yes     yes
+  compression:       zlib   zlib    zlib
+  compression-level: default default default
+  $ hg debugsidedata -c -- 0
+  $ hg debugsidedata -c -- 1
+  $ hg debugsidedata -m -- 0
+
+upgrading
+
+  $ cat << EOF > .hg/hgrc
+  > [format]
+  > exp-use-copies-side-data-changeset = yes
+  > EOF
+  $ hg debugupgraderepo --run --quiet --no-backup > /dev/null
+  $ hg debugformat -v
+  format-variant    repo config default
+  fncache:           yes    yes     yes
+  dotencode:         yes    yes     yes
+  generaldelta:      yes    yes     yes
+  sparserevlog:      yes    yes     yes
+  sidedata:          yes    yes      no
+  copies-sdc:        yes    yes      no
+  plain-cl-delta:    yes    yes     yes
+  compression:       zlib   zlib    zlib
+  compression-level: default default default
+  $ hg debugsidedata -c -- 0
+  1 sidedata entries
+   entry-0012 size 1
+  $ hg debugsidedata -c -- 1
+  1 sidedata entries
+   entry-0013 size 1
+  $ hg debugsidedata -m -- 0
+
+#endif
+
+  $ cd ..
--- a/tests/test-copies-unrelated.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-copies-unrelated.t	Mon Oct 21 11:09:48 2019 -0400
@@ -1,4 +1,4 @@
-#testcases filelog compatibility changeset
+#testcases filelog compatibility changeset sidedata
 
   $ cat >> $HGRCPATH << EOF
   > [extensions]
@@ -22,6 +22,13 @@
   > EOF
 #endif
 
+#if sidedata
+  $ cat >> $HGRCPATH << EOF
+  > [format]
+  > exp-use-copies-side-data-changeset = yes
+  > EOF
+#endif
+
   $ REPONUM=0
   $ newrepo() {
   >     cd $TESTTMP
@@ -341,7 +348,11 @@
      a
   $ hg debugpathcopies 1 5
   x -> y (no-filelog !)
-#if filelog
+#if no-filelog
+  $ hg graft -r 2
+  grafting 2:* "modify x" (glob)
+  merging y and x to y
+#else
 BROKEN: This should succeed and merge the changes from x into y
   $ hg graft -r 2
   grafting 2:* "modify x" (glob)
@@ -351,10 +362,6 @@
   abort: unresolved conflicts, can't continue
   (use 'hg resolve' and 'hg graft --continue')
   [255]
-#else
-  $ hg graft -r 2
-  grafting 2:* "modify x" (glob)
-  merging y and x to y
 #endif
   $ hg co -qC 2
 BROKEN: This should succeed and merge the changes from x into y
@@ -387,3 +394,63 @@
   $ hg debugpathcopies 2 1
   $ hg graft -r 1
   grafting 1:* "copy x to y" (glob)
+
+Copies involving a merge of multiple roots.
+
+  $ newrepo
+  $ echo a > a
+  $ hg ci -Aqm 'add a'
+  $ echo a >> a
+  $ hg ci -Aqm 'update a'
+  $ echo a >> a
+  $ hg ci -Aqm 'update a'
+
+  $ hg up null
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ echo b > a
+  $ hg ci -Aqm 'add a'
+  $ hg mv a b
+  $ hg ci -Aqm 'move a to b'
+  $ echo b >> b
+  $ hg ci -Aqm 'update b'
+  $ hg merge 0
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "merge with other branch"
+  $ echo a >> a
+  $ echo a >> a
+  $ echo b >> b
+  $ hg ci -Aqm 'update a and b'
+  $ hg l
+  @  7 update a and b
+  |  a b
+  o    6 merge with other branch
+  |\
+  | o  5 update b
+  | |  b
+  | o  4 move a to b
+  | |  a b
+  | o  3 add a
+  |    a
+  | o  2 update a
+  | |  a
+  | o  1 update a
+  |/   a
+  o  0 add a
+     a
+  $ hg cat a -r 7
+  a
+  a
+  a
+  $ hg cat a -r 2
+  a
+  a
+  a
+  $ hg cat a -r 0
+  a
+  $ hg debugpathcopies 7 2
+  $ hg debugpathcopies 2 7
+  $ hg merge 2
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+
--- a/tests/test-copies.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-copies.t	Mon Oct 21 11:09:48 2019 -0400
@@ -1,4 +1,4 @@
-#testcases filelog compatibility changeset
+#testcases filelog compatibility changeset sidedata
 
   $ cat >> $HGRCPATH << EOF
   > [extensions]
@@ -22,6 +22,13 @@
   > EOF
 #endif
 
+#if sidedata
+  $ cat >> $HGRCPATH << EOF
+  > [format]
+  > exp-use-copies-side-data-changeset = yes
+  > EOF
+#endif
+
   $ REPONUM=0
   $ newrepo() {
   >     cd $TESTTMP
@@ -330,12 +337,14 @@
      x
   $ hg debugp1copies -r 2
   x -> z (changeset !)
+  x -> z (sidedata !)
   $ hg debugp2copies -r 2
-  x -> z (no-changeset !)
+  x -> z (no-changeset no-sidedata !)
   $ hg debugpathcopies 1 2
   x -> z (changeset !)
+  x -> z (sidedata !)
   $ hg debugpathcopies 0 2
-  x -> z (no-changeset !)
+  x -> z (no-changeset no-sidedata !)
 
 Copy x->y on one side of merge and copy x->z on the other side. Pathcopies from one parent
 of the merge to the merge should include the copy from the other side.
@@ -445,8 +454,7 @@
   x -> z (no-filelog !)
   $ hg debugpathcopies 0 4
   x -> z (filelog !)
-  y -> z (compatibility !)
-  y -> z (changeset !)
+  y -> z (no-filelog !)
   $ hg debugpathcopies 1 5
   y -> z (no-filelog !)
   $ hg debugpathcopies 2 5
--- a/tests/test-debugcommands.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-debugcommands.t	Mon Oct 21 11:09:48 2019 -0400
@@ -546,7 +546,12 @@
   .hg/cache/rbc-revs-v1
   .hg/cache/rbc-names-v1
   .hg/cache/hgtagsfnodes1
+  .hg/cache/branch2-visible-hidden
+  .hg/cache/branch2-visible
+  .hg/cache/branch2-served.hidden
   .hg/cache/branch2-served
+  .hg/cache/branch2-immutable
+  .hg/cache/branch2-base
 
 Test debugcolor
 
--- a/tests/test-demandimport.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-demandimport.py	Mon Oct 21 11:09:48 2019 -0400
@@ -1,6 +1,7 @@
 from __future__ import absolute_import, print_function
 
 from mercurial import demandimport
+
 demandimport.enable()
 
 import os
@@ -12,8 +13,9 @@
 ispy3 = sys.version_info[0] >= 3
 
 # Only run if demandimport is allowed
-if subprocess.call(['python', '%s/hghave' % os.environ['TESTDIR'],
-                    'demandimport']):
+if subprocess.call(
+    ['python', '%s/hghave' % os.environ['TESTDIR'], 'demandimport']
+):
     sys.exit(80)
 
 # We rely on assert, which gets optimized out.
@@ -33,8 +35,11 @@
 if os.name != 'nt':
     try:
         import distutils.msvc9compiler
-        print('distutils.msvc9compiler needs to be an immediate '
-              'importerror on non-windows platforms')
+
+        print(
+            'distutils.msvc9compiler needs to be an immediate '
+            'importerror on non-windows platforms'
+        )
         distutils.msvc9compiler
     except ImportError:
         pass
@@ -42,6 +47,8 @@
 import re
 
 rsub = re.sub
+
+
 def f(obj):
     l = repr(obj)
     l = rsub("0x[0-9a-fA-F]+", "0x?", l)
@@ -49,6 +56,7 @@
     l = rsub("'<[a-z]*>'", "'<whatever>'", l)
     return l
 
+
 demandimport.disable()
 os.environ['HGDEMANDIMPORT'] = 'disable'
 # this enable call should not actually enable demandimport!
--- a/tests/test-dispatch.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-dispatch.py	Mon Oct 21 11:09:48 2019 -0400
@@ -1,15 +1,15 @@
 from __future__ import absolute_import, print_function
 import os
 import sys
-from mercurial import (
-    dispatch,
-)
+from mercurial import dispatch
+
 
 def printb(data, end=b'\n'):
     out = getattr(sys.stdout, 'buffer', sys.stdout)
     out.write(data + end)
     out.flush()
 
+
 def testdispatch(cmd):
     """Simple wrapper around dispatch.dispatch()
 
@@ -20,6 +20,7 @@
     result = dispatch.dispatch(req)
     printb(b"result: %r" % (result,))
 
+
 testdispatch(b"init test1")
 os.chdir('test1')
 
--- a/tests/test-doctest.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-doctest.py	Mon Oct 21 11:09:48 2019 -0400
@@ -7,22 +7,30 @@
 import re
 import sys
 
-ispy3 = (sys.version_info[0] >= 3)
+ispy3 = sys.version_info[0] >= 3
 
 if 'TERM' in os.environ:
     del os.environ['TERM']
 
+
 class py3docchecker(doctest.OutputChecker):
     def check_output(self, want, got, optionflags):
         want2 = re.sub(r'''\bu(['"])(.*?)\1''', r'\1\2\1', want)  # py2: u''
         got2 = re.sub(r'''\bb(['"])(.*?)\1''', r'\1\2\1', got)  # py3: b''
         # py3: <exc.name>: b'<msg>' -> <name>: <msg>
         #      <exc.name>: <others> -> <name>: <others>
-        got2 = re.sub(r'''^mercurial\.\w+\.(\w+): (['"])(.*?)\2''', r'\1: \3',
-                      got2, re.MULTILINE)
+        got2 = re.sub(
+            r'''^mercurial\.\w+\.(\w+): (['"])(.*?)\2''',
+            r'\1: \3',
+            got2,
+            re.MULTILINE,
+        )
         got2 = re.sub(r'^mercurial\.\w+\.(\w+): ', r'\1: ', got2, re.MULTILINE)
-        return any(doctest.OutputChecker.check_output(self, w, g, optionflags)
-                   for w, g in [(want, got), (want2, got2)])
+        return any(
+            doctest.OutputChecker.check_output(self, w, g, optionflags)
+            for w, g in [(want, got), (want2, got2)]
+        )
+
 
 def testmod(name, optionflags=0, testtarget=None):
     __import__(name)
@@ -40,6 +48,7 @@
         runner.run(test)
     runner.summarize()
 
+
 testmod('mercurial.changegroup')
 testmod('mercurial.changelog')
 testmod('mercurial.cmdutil')
--- a/tests/test-duplicateoptions.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-duplicateoptions.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,7 +10,8 @@
 
 try:
     import sqlite3
-    del sqlite3 # unused, just checking that import works
+
+    del sqlite3  # unused, just checking that import works
 except ImportError:
     ignore.add(b'sqlitestore')
 
@@ -41,8 +42,9 @@
     seenshort = globalshort.copy()
     seenlong = globallong.copy()
     for option in entry[1]:
-        if ((option[0] and option[0] in seenshort) or
-            (option[1] and option[1] in seenlong)):
+        if (option[0] and option[0] in seenshort) or (
+            option[1] and option[1] in seenlong
+        ):
             print("command '" + cmd + "' has duplicate option " + str(option))
         seenshort.add(option[0])
         seenlong.add(option[1])
--- a/tests/test-encoding-func.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-encoding-func.py	Mon Oct 21 11:09:48 2019 -0400
@@ -2,9 +2,8 @@
 
 import unittest
 
-from mercurial import (
-    encoding,
-)
+from mercurial import encoding
+
 
 class IsasciistrTest(unittest.TestCase):
     asciistrs = [
@@ -28,12 +27,14 @@
                 t[i] |= 0x80
                 self.assertFalse(encoding.isasciistr(bytes(t)))
 
+
 class LocalEncodingTest(unittest.TestCase):
     def testasciifastpath(self):
         s = b'\0' * 100
         self.assertTrue(s is encoding.tolocal(s))
         self.assertTrue(s is encoding.fromlocal(s))
 
+
 class Utf8bEncodingTest(unittest.TestCase):
     def setUp(self):
         self.origencoding = encoding.encoding
@@ -75,6 +76,8 @@
         self.assertEqual(l, b'\xc5\xed')  # lossless
         self.assertEqual(s, encoding.toutf8b(l))  # convert back to utf-8
 
+
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-eol-clone.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-eol-clone.t	Mon Oct 21 11:09:48 2019 -0400
@@ -21,7 +21,8 @@
   adding .hgeol
   adding a.txt
 
-Clone
+Test commit of removed .hgeol and how it immediately makes the automatic
+changes explicit and committable.
 
   $ cd ..
   $ hg clone repo repo-2
@@ -37,21 +38,63 @@
   second
   third
   $ hg remove .hgeol
+  $ touch a.txt *  # ensure consistent st dirtyness checks, ignoring dirstate timing
+  $ hg st -v --debug
+  M a.txt
+  R .hgeol
   $ hg commit -m 'remove eol'
+  $ hg exp
+  # HG changeset patch
+  # User test
+  # Date 0 0
+  #      Thu Jan 01 00:00:00 1970 +0000
+  # Node ID 3c20c2d90333b6ecdc8f7aa8f9b73223c7c7a608
+  # Parent  90f94e2cf4e24628afddd641688dfe4cd476d6e4
+  remove eol
+  
+  diff -r 90f94e2cf4e2 -r 3c20c2d90333 .hgeol
+  --- a/.hgeol	Thu Jan 01 00:00:00 1970 +0000
+  +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,2 +0,0 @@
+  -[patterns]
+  -**.txt = native
+  diff -r 90f94e2cf4e2 -r 3c20c2d90333 a.txt
+  --- a/a.txt	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/a.txt	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,3 +1,3 @@
+  -first
+  -second
+  -third
+  +first\r (esc)
+  +second\r (esc)
+  +third\r (esc)
   $ hg push --quiet
   $ cd ..
 
-Test clone of repo with .hgeol in working dir, but no .hgeol in tip
+Test clone of repo with .hgeol in working dir, but no .hgeol in default
+checkout revision tip. The repo is correctly updated to be consistent and have
+the exact content checked out without filtering, ignoring the current .hgeol in
+the source repo:
 
-  $ hg clone repo repo-3
+  $ cat repo/.hgeol
+  [patterns]
+  **.txt = native
+  $ hg clone repo repo-3 -v --debug
+  linked 7 files
   updating to branch default
+  resolving manifests
+   branchmerge: False, force: False, partial: False
+   ancestor: 000000000000, local: 000000000000+, remote: 3c20c2d90333
+  calling hook preupdate.eol: hgext.eol.preupdate
+   a.txt: remote created -> g
+  getting a.txt
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd repo-3
 
   $ cat a.txt
-  first
-  second
-  third
+  first\r (esc)
+  second\r (esc)
+  third\r (esc)
 
 Test clone of revision with .hgeol
 
--- a/tests/test-eol-hook.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-eol-hook.t	Mon Oct 21 11:09:48 2019 -0400
@@ -39,7 +39,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   error: pretxnchangegroup hook failed: end-of-line check failed:
     a.txt in a8ee6548cd86 should not have CRLF line endings
   transaction abort!
@@ -67,7 +66,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   error: pretxnchangegroup hook failed: end-of-line check failed:
     crlf.txt in 004ba2132725 should not have LF line endings
   transaction abort!
@@ -95,7 +93,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   error: pretxnchangegroup hook failed: end-of-line check failed:
     b.txt in fbcf9b1025f5 should not have CRLF line endings
   transaction abort!
@@ -116,7 +113,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 2 changes to 2 files (+1 heads)
   error: pretxnchangegroup hook failed: end-of-line check failed:
     b.txt in fbcf9b1025f5 should not have CRLF line endings
   transaction abort!
@@ -137,7 +133,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 2 changes to 2 files (+1 heads)
   error: pretxnchangegroup hook failed: end-of-line check failed:
     b.txt in fbcf9b1025f5 should not have CRLF line endings
   transaction abort!
@@ -174,7 +169,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 3 changesets with 3 changes to 2 files (+1 heads)
   error: pretxnchangegroup hook failed: end-of-line check failed:
     b.txt in fbcf9b1025f5 should not have CRLF line endings
   transaction abort!
@@ -204,7 +198,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 3 changesets with 3 changes to 2 files (+1 heads)
   error: pretxnchangegroup hook failed: end-of-line check failed:
     b.txt in fbcf9b1025f5 should not have CRLF line endings
     d.txt in a7040e68714f should not have CRLF line endings
--- a/tests/test-eol-update.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-eol-update.t	Mon Oct 21 11:09:48 2019 -0400
@@ -26,14 +26,17 @@
   > EOF
   > 
   >     printf "first\nsecond\nthird\n" > a.txt
+  >     printf "f\r\n" > f
   >     hg commit --addremove -m 'LF commit'
   > 
   >     cat > .hgeol <<EOF
   > [patterns]
   > **.txt = CRLF
+  > f = LF
   > EOF
   > 
   >     printf "first\r\nsecond\r\nthird\r\n" > a.txt
+  >     printf "f\n" > f
   >     hg commit -m 'CRLF commit'
   > 
   >     cd ..
@@ -83,10 +86,11 @@
   % hg init
   adding .hgeol
   adding a.txt
+  adding f
   $ dotest LF
   
   % hg clone repo repo-LF
-  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
   % a.txt (before)
   first\r (esc)
   second\r (esc)
@@ -104,7 +108,7 @@
    third\r (esc)
   % hg update 0
   merging a.txt
-  1 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  2 files updated, 1 files merged, 0 files removed, 0 files unresolved
   % a.txt
   first
   third
@@ -119,7 +123,7 @@
   $ dotest CRLF
   
   % hg clone repo repo-CRLF
-  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
   % a.txt (before)
   first\r (esc)
   second\r (esc)
@@ -137,7 +141,7 @@
    third\r (esc)
   % hg update 0
   merging a.txt
-  1 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  2 files updated, 1 files merged, 0 files removed, 0 files unresolved
   % a.txt
   first
   third
@@ -149,4 +153,126 @@
    first
   -second
    third
+
+Test in repo using eol extension, while keeping an eye on how filters are
+applied:
+
+  $ cd repo
+
+  $ hg up -q -c -r null
+  $ cat > .hg/hgrc <<EOF
+  > [extensions]
+  > eol =
+  > EOF
+
+Update to revision 0 which has no .hgeol, shouldn't use any filters, and
+obviously should leave things as tidy as they were before the clean update.
+
+  $ hg up -c -r 0 -v --debug
+  resolving manifests
+   branchmerge: False, force: False, partial: False
+   ancestor: 000000000000, local: 000000000000+, remote: 15cbdf8ca3db
+  calling hook preupdate.eol: hgext.eol.preupdate
+   .hgeol: remote created -> g
+  getting .hgeol
+  filtering .hgeol through isbinary
+   a.txt: remote created -> g
+  getting a.txt
+  filtering a.txt through tolf
+   f: remote created -> g
+  getting f
+  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg st
+
+  $ hg branch b
+  marked working directory as branch b
+  (branches are permanent and global, did you want a bookmark?)
+  $ hg ci -m b
+
+Merge changes that apply a filter to f:
+
+  $ hg merge 1
+  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg st
+  M .hgeol
+  M a.txt
+  M f
+  $ hg diff
+  diff --git a/.hgeol b/.hgeol
+  --- a/.hgeol
+  +++ b/.hgeol
+  @@ -1,2 +1,3 @@
+   [patterns]
+  -**.txt = LF
+  +**.txt = CRLF
+  +f = LF
+  diff --git a/a.txt b/a.txt
+  --- a/a.txt
+  +++ b/a.txt
+  @@ -1,3 +1,3 @@
+  -first
+  -second
+  -third
+  +first\r (esc)
+  +second\r (esc)
+  +third\r (esc)
+  diff --git a/f b/f
+  --- a/f
+  +++ b/f
+  @@ -1,1 +1,1 @@
+  -f\r (esc)
+  +f
+
+Abort the merge with up -C to revision 0.
+Note that files are filtered correctly for revision 0: f is not filtered, a.txt
+is filtered with tolf, and everything is left tidy.
+
+  $ touch .hgeol *  # ensure consistent dirtyness checks ignoring dirstate
+  $ hg up -C -r 0 -v --debug
+  eol: detected change in .hgeol
+  resolving manifests
+   branchmerge: False, force: True, partial: False
+   ancestor: 1db78bdd3bd6+, local: 1db78bdd3bd6+, remote: 15cbdf8ca3db
+  calling hook preupdate.eol: hgext.eol.preupdate
+   .hgeol: remote is newer -> g
+  getting .hgeol
+  filtering .hgeol through isbinary
+   a.txt: remote is newer -> g
+  getting a.txt
+  filtering a.txt through tolf
+   f: remote is newer -> g
+  getting f
+  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ touch .hgeol *
+  $ hg st --debug
+  eol: detected change in .hgeol
+  filtering .hgeol through isbinary
+  filtering a.txt through tolf
+  skip updating dirstate: identity mismatch (?)
+  $ hg diff
+
+Things were clean, and updating again will not change anything:
+
+  $ touch .hgeol *
+  $ hg up -C -r 0 -v --debug
+  eol: detected change in .hgeol
+  filtering .hgeol through isbinary
+  filtering a.txt through tolf
+  resolving manifests
+   branchmerge: False, force: True, partial: False
+   ancestor: 15cbdf8ca3db+, local: 15cbdf8ca3db+, remote: 15cbdf8ca3db
+  calling hook preupdate.eol: hgext.eol.preupdate
+  starting 4 threads for background file closing (?)
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ touch .hgeol *
+  $ hg st --debug
+  eol: detected change in .hgeol
+  filtering .hgeol through isbinary
+  filtering a.txt through tolf
+
+  $ cd ..
+
   $ rm -r repo
--- a/tests/test-exchange-obsmarkers-case-A1.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-exchange-obsmarkers-case-A1.t	Mon Oct 21 11:09:48 2019 -0400
@@ -53,6 +53,7 @@
   $ cd main
   $ mkcommit A
   $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A)'`
+  1 new obsolescence markers
   $ hg log -G
   @  f5bc6836db60 (draft): A
   |
@@ -201,6 +202,7 @@
   o  a9bdc8b26820 (public): O
   
   $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A)'`
+  1 new obsolescence markers
   $ inspect_obsmarkers
   obsstore content
   ================
--- a/tests/test-exchange-obsmarkers-case-A2.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-exchange-obsmarkers-case-A2.t	Mon Oct 21 11:09:48 2019 -0400
@@ -56,11 +56,13 @@
   $ cd main
   $ mkcommit A
   $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A)'`
+  1 new obsolescence markers
   $ hg up '.~1'
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ mkcommit B
   created new head
   $ hg debugobsolete bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb `getid 'desc(B)'`
+  1 new obsolescence markers
   $ hg log -G
   @  35b183996678 (draft): B
   |
--- a/tests/test-exchange-obsmarkers-case-A3.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-exchange-obsmarkers-case-A3.t	Mon Oct 21 11:09:48 2019 -0400
@@ -73,9 +73,11 @@
   $ mkcommit B1
   created new head
   $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  f6298a8ac3a4 (draft): B1
@@ -163,9 +165,11 @@
   $ mkcommit B1
   created new head
   $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  f6298a8ac3a4 (draft): B1
--- a/tests/test-exchange-obsmarkers-case-A4.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-exchange-obsmarkers-case-A4.t	Mon Oct 21 11:09:48 2019 -0400
@@ -63,7 +63,9 @@
   $ mkcommit A1
   created new head
   $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A0)'`
+  1 new obsolescence markers
   $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-A5.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-exchange-obsmarkers-case-A5.t	Mon Oct 21 11:09:48 2019 -0400
@@ -65,9 +65,12 @@
   created new head
   $ mkcommit A1
   $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A0)'`
+  1 new obsolescence markers
   $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  8c0a98c83722 (draft): A1
--- a/tests/test-exchange-obsmarkers-case-A6.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-exchange-obsmarkers-case-A6.t	Mon Oct 21 11:09:48 2019 -0400
@@ -64,6 +64,7 @@
 create a marker after this
 
   $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  e5ea8f9c7314 (draft): A1
--- a/tests/test-exchange-obsmarkers-case-A7.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-exchange-obsmarkers-case-A7.t	Mon Oct 21 11:09:48 2019 -0400
@@ -51,6 +51,7 @@
   $ hg push -q ../pushdest
   $ hg push -q ../pulldest
   $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A)'`
+  1 new obsolescence markers
   $ hg log -G --hidden
   @  f5bc6836db60 (draft): A
   |
--- a/tests/test-exchange-obsmarkers-case-B5.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-exchange-obsmarkers-case-B5.t	Mon Oct 21 11:09:48 2019 -0400
@@ -70,10 +70,13 @@
   created new head
   $ mkcommit B1
   $ hg debugobsolete --hidden `getid 'desc(A0)'` `getid 'desc(A1)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   2 new orphan changesets
   $ hg debugobsolete --hidden aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(B0)'`
+  1 new obsolescence markers
   $ hg debugobsolete --hidden `getid 'desc(B0)'` `getid 'desc(B1)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg prune -qd '0 0' 'desc(B1)'
   $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-B6.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-exchange-obsmarkers-case-B6.t	Mon Oct 21 11:09:48 2019 -0400
@@ -57,6 +57,7 @@
   $ mkcommit B1
   created new head
   $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg prune -qd '0 0' .
   $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-C2.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-exchange-obsmarkers-case-C2.t	Mon Oct 21 11:09:48 2019 -0400
@@ -62,6 +62,7 @@
   $ mkcommit A1
   created new head
   $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  e5ea8f9c7314 (draft): A1
--- a/tests/test-exchange-obsmarkers-case-C3.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-exchange-obsmarkers-case-C3.t	Mon Oct 21 11:09:48 2019 -0400
@@ -64,6 +64,7 @@
   $ mkcommit A1
   created new head
   $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg prune -qd '0 0' .
   $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-C4.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-exchange-obsmarkers-case-C4.t	Mon Oct 21 11:09:48 2019 -0400
@@ -65,8 +65,10 @@
   $ mkcommit C
   created new head
   $ hg debugobsolete --hidden `getid 'desc(A)'` `getid 'desc(B)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete --hidden `getid 'desc(A)'` `getid 'desc(C)'`
+  1 new obsolescence markers
   2 new content-divergent changesets
   $ hg prune -qd '0 0' .
   $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-D1.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-exchange-obsmarkers-case-D1.t	Mon Oct 21 11:09:48 2019 -0400
@@ -61,9 +61,11 @@
   $ mkcommit A1
   created new head
   $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg prune -d '0 0' 'desc(B)'
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg strip --hidden -q 'desc(A0)'
   $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-D2.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-exchange-obsmarkers-case-D2.t	Mon Oct 21 11:09:48 2019 -0400
@@ -54,8 +54,10 @@
   $ mkcommit A1
   created new head
   $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg prune --date '0 0' .
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg strip --hidden -q 'desc(A1)'
   $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-D3.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-exchange-obsmarkers-case-D3.t	Mon Oct 21 11:09:48 2019 -0400
@@ -57,8 +57,10 @@
   created new head
   $ mkcommit A1
   $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg prune -d '0 0' .
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg strip --hidden -q 'desc(A1)'
   $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-D4.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-exchange-obsmarkers-case-D4.t	Mon Oct 21 11:09:48 2019 -0400
@@ -59,12 +59,16 @@
   created new head
   $ mkcommit B1
   $ hg debugobsolete `getid 'desc(A0)'` aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A1)'`
+  1 new obsolescence markers
   $ hg debugobsolete `getid 'desc(B0)'` bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb `getid 'desc(B1)'`
+  1 new obsolescence markers
   $ hg log -G --hidden
   @  069b05c3876d (draft): B1
   |
--- a/tests/test-extensions-wrapfunction.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-extensions-wrapfunction.py	Mon Oct 21 11:09:48 2019 -0400
@@ -2,28 +2,36 @@
 
 from mercurial import extensions
 
+
 def genwrapper(x):
     def f(orig, *args, **kwds):
         return [x] + orig(*args, **kwds)
+
     f.x = x
     return f
 
+
 def getid(wrapper):
     return getattr(wrapper, 'x', '-')
 
+
 wrappers = [genwrapper(i) for i in range(5)]
 
+
 class dummyclass(object):
     def getstack(self):
         return ['orig']
 
+
 dummy = dummyclass()
 
+
 def batchwrap(wrappers):
     for w in wrappers:
         extensions.wrapfunction(dummy, 'getstack', w)
         print('wrap %d: %s' % (getid(w), dummy.getstack()))
 
+
 def batchunwrap(wrappers):
     for w in wrappers:
         result = None
@@ -34,9 +42,14 @@
             msg = e.__class__.__name__
         print('unwrap %s: %s: %s' % (getid(w), getid(result), msg))
 
+
 batchwrap(wrappers + [wrappers[0]])
-batchunwrap([(wrappers[i] if i is not None and i >= 0 else None)
-             for i in [3, None, 0, 4, 0, 2, 1, None]])
+batchunwrap(
+    [
+        (wrappers[i] if i is not None and i >= 0 else None)
+        for i in [3, None, 0, 4, 0, 2, 1, None]
+    ]
+)
 
 wrap0 = extensions.wrappedfunction(dummy, 'getstack', wrappers[0])
 wrap1 = extensions.wrappedfunction(dummy, 'getstack', wrappers[1])
@@ -59,6 +72,8 @@
 class callableobj(object):
     def __call__(self):
         return ['orig']
+
+
 dummy.cobj = callableobj()
 extensions.wrapfunction(dummy, 'cobj', wrappers[0])
 print('wrap callable object', dummy.cobj())
--- a/tests/test-fastannotate-revmap.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-fastannotate-revmap.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,19 +13,23 @@
 if pycompat.ispy3:
     xrange = range
 
+
 def genhsh(i):
     return pycompat.bytechr(i) + b'\0' * 19
 
+
 def gettemppath():
     fd, path = tempfile.mkstemp()
     os.close(fd)
     os.unlink(path)
     return path
 
+
 def ensure(condition):
     if not condition:
         raise RuntimeError('Unexpected')
 
+
 def testbasicreadwrite():
     path = gettemppath()
 
@@ -36,7 +40,19 @@
     ensure(rm.hsh2rev(b'\0' * 20) is None)
 
     paths = [
-        b'', b'a', None, b'b', b'b', b'c', b'c', None, b'a', b'b', b'a', b'a']
+        b'',
+        b'a',
+        None,
+        b'b',
+        b'b',
+        b'c',
+        b'c',
+        None,
+        b'a',
+        b'b',
+        b'a',
+        b'a',
+    ]
     for i in xrange(1, 5):
         ensure(rm.append(genhsh(i), sidebranch=(i & 1), path=paths[i]) == i)
 
@@ -56,8 +72,10 @@
 
     # append without calling save() explicitly
     for i in xrange(5, 12):
-        ensure(rm.append(genhsh(i), sidebranch=(i & 1), path=paths[i],
-                         flush=True) == i)
+        ensure(
+            rm.append(genhsh(i), sidebranch=(i & 1), path=paths[i], flush=True)
+            == i
+        )
 
     # re-load and verify
     rm = revmap.revmap(path)
@@ -85,6 +103,7 @@
     except Exception:
         pass
 
+
 def testcorruptformat():
     path = gettemppath()
 
@@ -127,12 +146,15 @@
 
     os.unlink(path)
 
+
 def testcopyfrom():
     path = gettemppath()
     rm = revmap.revmap(path)
     for i in xrange(1, 10):
-        ensure(rm.append(genhsh(i),
-                         sidebranch=(i & 1), path=(b'%d' % (i // 3))) == i)
+        ensure(
+            rm.append(genhsh(i), sidebranch=(i & 1), path=(b'%d' % (i // 3)))
+            == i
+        )
     rm.flush()
 
     # copy rm to rm2
@@ -148,6 +170,7 @@
     os.unlink(path)
     os.unlink(path2)
 
+
 class fakefctx(object):
     def __init__(self, node, path=None):
         self._node = node
@@ -159,6 +182,7 @@
     def path(self):
         return self._path
 
+
 def testcontains():
     path = gettemppath()
 
@@ -181,6 +205,7 @@
         ensure(fakefctx(genhsh(i), path=(b'%d' % (i // 2))) in rm)
         ensure(fakefctx(genhsh(i), path=b'a') not in rm)
 
+
 def testlastnode():
     path = gettemppath()
     ensure(revmap.getlastnode(path) is None)
@@ -193,6 +218,7 @@
         rm2 = revmap.revmap(path)
         ensure(rm2.rev2hsh(rm2.maxrev) == hsh)
 
+
 testbasicreadwrite()
 testcorruptformat()
 testcopyfrom()
--- a/tests/test-filecache.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-filecache.py	Mon Oct 21 11:09:48 2019 -0400
@@ -4,11 +4,14 @@
 import subprocess
 import sys
 
-if subprocess.call(['python', '%s/hghave' % os.environ['TESTDIR'],
-                    'cacheable']):
+if subprocess.call(
+    ['python', '%s/hghave' % os.environ['TESTDIR'], 'cacheable']
+):
     sys.exit(80)
 
 print_ = print
+
+
 def print(*args, **kwargs):
     """print() wrapper that flushes stdout buffers to avoid py3 buffer issues
 
@@ -18,6 +21,7 @@
     print_(*args, **kwargs)
     sys.stdout.flush()
 
+
 from mercurial import (
     extensions,
     hg,
@@ -31,12 +35,12 @@
 if pycompat.ispy3:
     xrange = range
 
+
 class fakerepo(object):
     def __init__(self):
         self._filecache = {}
 
     class fakevfs(object):
-
         def join(self, p):
             return p
 
@@ -60,6 +64,7 @@
             except AttributeError:
                 pass
 
+
 def basic(repo):
     print("* neither file exists")
     # calls function
@@ -137,6 +142,7 @@
     print("* both files changed inode")
     repo.cached
 
+
 def fakeuncacheable():
     def wrapcacheable(orig, *args, **kwargs):
         return False
@@ -145,8 +151,9 @@
         pass
 
     originit = extensions.wrapfunction(util.cachestat, '__init__', wrapinit)
-    origcacheable = extensions.wrapfunction(util.cachestat, 'cacheable',
-                                            wrapcacheable)
+    origcacheable = extensions.wrapfunction(
+        util.cachestat, 'cacheable', wrapcacheable
+    )
 
     for fn in ['x', 'y']:
         try:
@@ -159,6 +166,7 @@
     util.cachestat.cacheable = origcacheable
     util.cachestat.__init__ = originit
 
+
 def test_filecache_synced():
     # test old behavior that caused filecached properties to go out of sync
     os.system('hg init && echo a >> a && hg ci -qAm.')
@@ -174,6 +182,7 @@
     # it
     repo.commit(b'.')
 
+
 def setbeforeget(repo):
     os.remove('x')
     os.remove('y')
@@ -200,6 +209,7 @@
     print("* file y created")
     print(repo.cached)
 
+
 def antiambiguity():
     filename = 'ambigcheck'
 
@@ -236,11 +246,17 @@
 
         # st_mtime should be advanced "repetition * 2" times, because
         # all changes occurred at same time (in sec)
-        expected = (oldstat[stat.ST_MTIME] + repetition * 2) & 0x7fffffff
+        expected = (oldstat[stat.ST_MTIME] + repetition * 2) & 0x7FFFFFFF
         if newstat[stat.ST_MTIME] != expected:
-            print("'newstat[stat.ST_MTIME] %s is not %s (as %s + %s * 2)" %
-                  (newstat[stat.ST_MTIME], expected,
-                   oldstat[stat.ST_MTIME], repetition))
+            print(
+                "'newstat[stat.ST_MTIME] %s is not %s (as %s + %s * 2)"
+                % (
+                    newstat[stat.ST_MTIME],
+                    expected,
+                    oldstat[stat.ST_MTIME],
+                    repetition,
+                )
+            )
 
         # no more examination is needed regardless of result
         break
@@ -251,6 +267,7 @@
         # on other faster platforms can detect problems
         pass
 
+
 print('basic:')
 print()
 basic(fakerepo())
--- a/tests/test-filelog.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-filelog.py	Mon Oct 21 11:09:48 2019 -0400
@@ -18,6 +18,7 @@
 
 fl = repo.file(b'foobar')
 
+
 def addrev(text, renamed=False):
     if renamed:
         # data doesn't matter. Just make sure filelog.renamed() returns True
@@ -37,9 +38,11 @@
         if lock:
             lock.release()
 
+
 def error(text):
     print('ERROR: ' + text)
 
+
 textwith = b'\1\nfoo'
 without = b'foo'
 
@@ -49,8 +52,10 @@
 if fl.cmp(node, textwith) or not fl.cmp(node, without):
     error('filelog.cmp for data starting with \\1\\n')
 if fl.size(0) != len(textwith):
-    error('FIXME: This is a known failure of filelog.size for data starting '
-        'with \\1\\n')
+    error(
+        'FIXME: This is a known failure of filelog.size for data starting '
+        'with \\1\\n'
+    )
 
 node = addrev(textwith, renamed=True)
 if not textwith == fl.read(node):
--- a/tests/test-fix-metadata.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-fix-metadata.t	Mon Oct 21 11:09:48 2019 -0400
@@ -43,6 +43,9 @@
   > [extensions]
   > fix =
   > [fix]
+  > metadatafalse:command=cat $TESTTMP/missing
+  > metadatafalse:pattern=metadatafalse
+  > metadatafalse:metadata=false
   > missing:command=cat $TESTTMP/missing
   > missing:pattern=missing
   > missing:metadata=true
@@ -65,6 +68,7 @@
   $ hg init repo
   $ cd repo
 
+  $ printf "old content\n" > metadatafalse
   $ printf "old content\n" > invalid
   $ printf "old content\n" > missing
   $ printf "old content\n" > valid
@@ -72,15 +76,20 @@
 
   $ hg fix -w
   ignored invalid output from fixer tool: invalid
+  fixed metadatafalse in revision 2147483647 using metadatafalse
   ignored invalid output from fixer tool: missing
   fixed valid in revision 2147483647 using valid
   saw "key" 1 times
   fixed 1 files with valid
   fixed the working copy
 
-  $ cat missing invalid valid
+  $ cat metadatafalse
+  new content
+  $ cat missing
   old content
+  $ cat invalid
   old content
+  $ cat valid
   new content
 
   $ cd ..
--- a/tests/test-fix.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-fix.t	Mon Oct 21 11:09:48 2019 -0400
@@ -147,9 +147,20 @@
     {first}   The 1-based line number of the first line in the modified range
     {last}    The 1-based line number of the last line in the modified range
   
+  Deleted sections of a file will be ignored by :linerange, because there is no
+  corresponding line range in the version being fixed.
+  
+  By default, tools that set :linerange will only be executed if there is at
+  least one changed line range. This is meant to prevent accidents like running
+  a code formatter in such a way that it unexpectedly reformats the whole file.
+  If such a tool needs to operate on unchanged files, it should set the
+  :skipclean suboption to false.
+  
   The :pattern suboption determines which files will be passed through each
-  configured tool. See 'hg help patterns' for possible values. If there are file
-  arguments to 'hg fix', the intersection of these patterns is used.
+  configured tool. See 'hg help patterns' for possible values. However, all
+  patterns are relative to the repo root, even if that text says they are
+  relative to the current working directory. If there are file arguments to 'hg
+  fix', the intersection of these patterns is used.
   
   There is also a configurable limit for the maximum size of file that will be
   processed by 'hg fix':
@@ -215,6 +226,13 @@
       executions that modified a file. This aggregates the same metadata
       previously passed to the "postfixfile" hook.
   
+  Fixer tools are run the in repository's root directory. This allows them to
+  read configuration files from the working copy, or even write to the working
+  copy. The working copy is not updated to match the revision being fixed. In
+  fact, several revisions may be fixed in parallel. Writes to the working copy
+  are not amended into the revision being fixed; fixer tools should always write
+  fixed file content back to stdout as documented above.
+  
   list of commands:
   
    fix           rewrite file content in changesets or working directory
@@ -439,6 +457,18 @@
   $ printf "a\nb\nc\nd\ne\nf\ng\n" > foo.changed
   $ hg commit -Aqm "foo"
   $ printf "zz\na\nc\ndd\nee\nff\nf\ngg\n" > foo.changed
+
+  $ hg fix --working-dir
+  $ cat foo.changed
+  ZZ
+  a
+  c
+  DD
+  EE
+  FF
+  f
+  GG
+
   $ hg fix --working-dir --whole
   $ cat foo.changed
   ZZ
@@ -526,6 +556,21 @@
 
   $ cd ..
 
+If we try to fix a missing file, we still fix other files.
+
+  $ hg init fixmissingfile
+  $ cd fixmissingfile
+
+  $ printf "fix me!\n" > foo.whole
+  $ hg add
+  adding foo.whole
+  $ hg fix --working-dir foo.whole bar.whole
+  bar.whole: $ENOENT$
+  $ cat *.whole
+  FIX ME!
+
+  $ cd ..
+
 Specifying a directory name should fix all its files and subdirectories.
 
   $ hg init fixdirectory
@@ -1060,6 +1105,7 @@
   $ printf "foo\n" > foo.changed
   $ hg commit -Aqm "foo"
   $ hg debugobsolete `hg parents --template '{node}'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg --hidden fix -r 0
   abort: fixing obsolete revision could cause divergence
@@ -1161,28 +1207,6 @@
 
   $ cd ..
 
-The :fileset subconfig was a misnomer, so we renamed it to :pattern. We will
-still accept :fileset by itself as if it were :pattern, but this will issue a
-warning.
-
-  $ hg init filesetispattern
-  $ cd filesetispattern
-
-  $ printf "foo\n" > foo.whole
-  $ printf "first\nsecond\n" > bar.txt
-  $ hg add -q
-  $ hg fix -w --config fix.sometool:fileset=bar.txt \
-  >           --config fix.sometool:command="sort -r"
-  the fix.tool:fileset config name is deprecated; please rename it to fix.tool:pattern
-
-  $ cat foo.whole
-  FOO
-  $ cat bar.txt
-  second
-  first
-
-  $ cd ..
-
 The execution order of tools can be controlled. This example doesn't work if
 you sort after truncating, but the config defines the correct order while the
 definitions are out of order (which might imply the incorrect order given the
@@ -1264,3 +1288,142 @@
 
   $ cd ..
 
+We run fixer tools in the repo root so they can look for config files or other
+important things in the working directory. This does NOT mean we are
+reconstructing a working copy of every revision being fixed; we're just giving
+the tool knowledge of the repo's location in case it can do something
+reasonable with that.
+
+  $ hg init subprocesscwd
+  $ cd subprocesscwd
+
+  $ cat >> .hg/hgrc <<EOF
+  > [fix]
+  > printcwd:command = "$PYTHON" -c "import os; print(os.getcwd())"
+  > printcwd:pattern = relpath:foo/bar
+  > EOF
+
+  $ mkdir foo
+  $ printf "bar\n" > foo/bar
+  $ hg commit -Aqm blah
+
+  $ hg fix -w -r . foo/bar
+  $ hg cat -r tip foo/bar
+  $TESTTMP/subprocesscwd
+  $ cat foo/bar
+  $TESTTMP/subprocesscwd
+
+  $ cd foo
+
+  $ hg fix -w -r . bar
+  $ hg cat -r tip bar
+  $TESTTMP/subprocesscwd
+  $ cat bar
+  $TESTTMP/subprocesscwd
+  $ echo modified > bar
+  $ hg fix -w bar
+  $ cat bar
+  $TESTTMP/subprocesscwd
+
+  $ cd ../..
+
+Tools configured without a pattern are ignored. It would be too dangerous to
+run them on all files, because this might happen while testing a configuration
+that also deletes all of the file content. There is no reasonable subset of the
+files to use as a default. Users should be explicit about what files are
+affected by a tool. This test also confirms that we don't crash when the
+pattern config is missing, and that we only warn about it once.
+
+  $ hg init nopatternconfigured
+  $ cd nopatternconfigured
+
+  $ printf "foo" > foo
+  $ printf "bar" > bar
+  $ hg add -q
+  $ hg fix --debug --working-dir --config "fix.nopattern:command=echo fixed"
+  fixer tool has no pattern configuration: nopattern
+  $ cat foo bar
+  foobar (no-eol)
+  $ hg fix --debug --working-dir --config "fix.nocommand:pattern=foo.bar"
+  fixer tool has no command configuration: nocommand
+
+  $ cd ..
+
+Tools can be disabled. Disabled tools do nothing but print a debug message.
+
+  $ hg init disabled
+  $ cd disabled
+
+  $ printf "foo\n" > foo
+  $ hg add -q
+  $ hg fix --debug --working-dir --config "fix.disabled:command=echo fixed" \
+  >                              --config "fix.disabled:pattern=foo" \
+  >                              --config "fix.disabled:enabled=false"
+  ignoring disabled fixer tool: disabled
+  $ cat foo
+  foo
+
+  $ cd ..
+
+Test that we can configure a fixer to affect all files regardless of the cwd.
+The way we invoke matching must not prohibit this.
+
+  $ hg init affectallfiles
+  $ cd affectallfiles
+
+  $ mkdir foo bar
+  $ printf "foo" > foo/file
+  $ printf "bar" > bar/file
+  $ printf "baz" > baz_file
+  $ hg add -q
+
+  $ cd bar
+  $ hg fix --working-dir --config "fix.cooltool:command=echo fixed" \
+  >                      --config "fix.cooltool:pattern=glob:**"
+  $ cd ..
+
+  $ cat foo/file
+  fixed
+  $ cat bar/file
+  fixed
+  $ cat baz_file
+  fixed
+
+  $ cd ..
+
+Tools should be able to run on unchanged files, even if they set :linerange.
+This includes a corner case where deleted chunks of a file are not considered
+changes.
+
+  $ hg init skipclean
+  $ cd skipclean
+
+  $ printf "a\nb\nc\n" > foo
+  $ printf "a\nb\nc\n" > bar
+  $ printf "a\nb\nc\n" > baz
+  $ hg commit -Aqm "base"
+
+  $ printf "a\nc\n" > foo
+  $ printf "a\nx\nc\n" > baz
+
+  $ cat >> print.py <<EOF
+  > import sys
+  > for a in sys.argv[1:]:
+  >    print(a)
+  > EOF
+
+  $ hg fix --working-dir foo bar baz \
+  >        --config "fix.changedlines:command=\"$PYTHON\" print.py \"Line ranges:\"" \
+  >        --config 'fix.changedlines:linerange="{first} through {last}"' \
+  >        --config 'fix.changedlines:pattern=glob:**' \
+  >        --config 'fix.changedlines:skipclean=false'
+
+  $ cat foo
+  Line ranges:
+  $ cat bar
+  Line ranges:
+  $ cat baz
+  Line ranges:
+  2 through 2
+
+  $ cd ..
--- a/tests/test-flagprocessor.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-flagprocessor.t	Mon Oct 21 11:09:48 2019 -0400
@@ -204,10 +204,10 @@
     File "*/mercurial/extensions.py", line *, in _runextsetup (glob)
       extsetup(ui)
     File "*/tests/flagprocessorext.py", line *, in extsetup (glob)
-      validatehash,
-    File "*/mercurial/revlog.py", line *, in addflagprocessor (glob)
-      _insertflagprocessor(flag, processor, _flagprocessors)
-    File "*/mercurial/revlog.py", line *, in _insertflagprocessor (glob)
+      REVIDX_NOOP, (noopdonothingread, noopdonothing, validatehash,)
+    File "*/mercurial/revlogutils/flagutil.py", line *, in addflagprocessor (glob)
+      insertflagprocessor(flag, processor, flagprocessors)
+    File "*/mercurial/revlogutils/flagutil.py", line *, in insertflagprocessor (glob)
       raise error.Abort(msg)
   mercurial.error.Abort: b"cannot register multiple processors on flag '0x8'." (py3 !)
   Abort: cannot register multiple processors on flag '0x8'. (no-py3 !)
--- a/tests/test-fuzz-targets.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-fuzz-targets.t	Mon Oct 21 11:09:48 2019 -0400
@@ -41,3 +41,6 @@
   $ ./bdiff -max_total_time 5
   $ ./mpatch -max_total_time 5
   $ ./xdiff -max_total_time 5
+
+Clean up.
+  $ $MAKE -s clean
--- a/tests/test-globalopts.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-globalopts.t	Mon Oct 21 11:09:48 2019 -0400
@@ -336,7 +336,7 @@
    cat           output the current or given revision of files
    copy          mark files as copied for the next commit
    diff          diff repository (or selected files)
-   grep          search revision history for a pattern in specified files
+   grep          search for a pattern in specified files
   
   Change navigation:
   
@@ -468,7 +468,7 @@
    cat           output the current or given revision of files
    copy          mark files as copied for the next commit
    diff          diff repository (or selected files)
-   grep          search revision history for a pattern in specified files
+   grep          search for a pattern in specified files
   
   Change navigation:
   
--- a/tests/test-glog-beautifygraph.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-glog-beautifygraph.t	Mon Oct 21 11:09:48 2019 -0400
@@ -2426,6 +2426,7 @@
   > EOF
 
   $ hg debugobsolete `hg id --debug -i -r 8`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ testlog
   []
--- a/tests/test-glog.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-glog.t	Mon Oct 21 11:09:48 2019 -0400
@@ -2276,6 +2276,7 @@
   > EOF
 
   $ hg debugobsolete `hg id --debug -i -r 8`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ testlog
   []
--- a/tests/test-grep.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-grep.t	Mon Oct 21 11:09:48 2019 -0400
@@ -27,10 +27,30 @@
   port:4:export
   port:4:vaportight
   port:4:import/export
+  port:3:export
+  port:3:vaportight
+  port:3:import/export
+  port:3:import/export
+  port:2:export
+  port:2:vaportight
+  port:2:import/export
+  port:1:import
+  port:1:export
+  port:0:import
   $ hg grep -r tip:0 port port
   port:4:export
   port:4:vaportight
   port:4:import/export
+  port:3:export
+  port:3:vaportight
+  port:3:import/export
+  port:3:import/export
+  port:2:export
+  port:2:vaportight
+  port:2:import/export
+  port:1:import
+  port:1:export
+  port:0:import
 
 simple from subdirectory
 
@@ -40,10 +60,30 @@
   port:4:export
   port:4:vaportight
   port:4:import/export
+  port:3:export
+  port:3:vaportight
+  port:3:import/export
+  port:3:import/export
+  port:2:export
+  port:2:vaportight
+  port:2:import/export
+  port:1:import
+  port:1:export
+  port:0:import
   $ hg grep -r tip:0 port --config ui.relative-paths=yes
   ../port:4:export
   ../port:4:vaportight
   ../port:4:import/export
+  ../port:3:export
+  ../port:3:vaportight
+  ../port:3:import/export
+  ../port:3:import/export
+  ../port:2:export
+  ../port:2:vaportight
+  ../port:2:import/export
+  ../port:1:import
+  ../port:1:export
+  ../port:0:import
   $ cd ..
 
 simple with color
@@ -53,6 +93,16 @@
   \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m4\x1b[0m\x1b[0;36m:\x1b[0mex\x1b[0;31;1mport\x1b[0m (esc)
   \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m4\x1b[0m\x1b[0;36m:\x1b[0mva\x1b[0;31;1mport\x1b[0might (esc)
   \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m4\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m/ex\x1b[0;31;1mport\x1b[0m (esc)
+  \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m3\x1b[0m\x1b[0;36m:\x1b[0mex\x1b[0;31;1mport\x1b[0m (esc)
+  \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m3\x1b[0m\x1b[0;36m:\x1b[0mva\x1b[0;31;1mport\x1b[0might (esc)
+  \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m3\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m/ex\x1b[0;31;1mport\x1b[0m (esc)
+  \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m3\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m/ex\x1b[0;31;1mport\x1b[0m (esc)
+  \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m2\x1b[0m\x1b[0;36m:\x1b[0mex\x1b[0;31;1mport\x1b[0m (esc)
+  \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m2\x1b[0m\x1b[0;36m:\x1b[0mva\x1b[0;31;1mport\x1b[0might (esc)
+  \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m2\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m/ex\x1b[0;31;1mport\x1b[0m (esc)
+  \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m1\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m (esc)
+  \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m1\x1b[0m\x1b[0;36m:\x1b[0mex\x1b[0;31;1mport\x1b[0m (esc)
+  \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m0\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m (esc)
 
 simple templated
 
@@ -61,16 +111,46 @@
   port:4:914fa752cdea:exPORT
   port:4:914fa752cdea:vaPORTight
   port:4:914fa752cdea:imPORT/exPORT
+  port:3:95040cfd017d:exPORT
+  port:3:95040cfd017d:vaPORTight
+  port:3:95040cfd017d:imPORT/exPORT
+  port:3:95040cfd017d:imPORT/exPORT
+  port:2:3b325e3481a1:exPORT
+  port:2:3b325e3481a1:vaPORTight
+  port:2:3b325e3481a1:imPORT/exPORT
+  port:1:8b20f75c1585:imPORT
+  port:1:8b20f75c1585:exPORT
+  port:0:f31323c92170:imPORT
 
   $ hg grep port -r tip:0 -T '{path}:{rev}:{texts}\n'
   port:4:export
   port:4:vaportight
   port:4:import/export
+  port:3:export
+  port:3:vaportight
+  port:3:import/export
+  port:3:import/export
+  port:2:export
+  port:2:vaportight
+  port:2:import/export
+  port:1:import
+  port:1:export
+  port:0:import
 
   $ hg grep port -r tip:0 -T '{path}:{tags}:{texts}\n'
   port:tip:export
   port:tip:vaportight
   port:tip:import/export
+  port::export
+  port::vaportight
+  port::import/export
+  port::import/export
+  port::export
+  port::vaportight
+  port::import/export
+  port::import
+  port::export
+  port::import
 
 simple JSON (no "change" field)
 
@@ -102,6 +182,96 @@
     "rev": 4,
     "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
     "user": "spam"
+   },
+   {
+    "date": [3, 0],
+    "lineno": 1,
+    "node": "95040cfd017d658c536071c6290230a613c4c2a6",
+    "path": "port",
+    "rev": 3,
+    "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
+    "user": "eggs"
+   },
+   {
+    "date": [3, 0],
+    "lineno": 2,
+    "node": "95040cfd017d658c536071c6290230a613c4c2a6",
+    "path": "port",
+    "rev": 3,
+    "texts": [{"matched": false, "text": "va"}, {"matched": true, "text": "port"}, {"matched": false, "text": "ight"}],
+    "user": "eggs"
+   },
+   {
+    "date": [3, 0],
+    "lineno": 3,
+    "node": "95040cfd017d658c536071c6290230a613c4c2a6",
+    "path": "port",
+    "rev": 3,
+    "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
+    "user": "eggs"
+   },
+   {
+    "date": [3, 0],
+    "lineno": 4,
+    "node": "95040cfd017d658c536071c6290230a613c4c2a6",
+    "path": "port",
+    "rev": 3,
+    "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
+    "user": "eggs"
+   },
+   {
+    "date": [2, 0],
+    "lineno": 1,
+    "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
+    "path": "port",
+    "rev": 2,
+    "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
+    "user": "spam"
+   },
+   {
+    "date": [2, 0],
+    "lineno": 2,
+    "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
+    "path": "port",
+    "rev": 2,
+    "texts": [{"matched": false, "text": "va"}, {"matched": true, "text": "port"}, {"matched": false, "text": "ight"}],
+    "user": "spam"
+   },
+   {
+    "date": [2, 0],
+    "lineno": 3,
+    "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
+    "path": "port",
+    "rev": 2,
+    "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
+    "user": "spam"
+   },
+   {
+    "date": [1, 0],
+    "lineno": 1,
+    "node": "8b20f75c158513ff5ac80bd0e5219bfb6f0eb587",
+    "path": "port",
+    "rev": 1,
+    "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}],
+    "user": "eggs"
+   },
+   {
+    "date": [1, 0],
+    "lineno": 2,
+    "node": "8b20f75c158513ff5ac80bd0e5219bfb6f0eb587",
+    "path": "port",
+    "rev": 1,
+    "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
+    "user": "eggs"
+   },
+   {
+    "date": [0, 0],
+    "lineno": 1,
+    "node": "f31323c9217050ba245ee8b537c713ec2e8ab226",
+    "path": "port",
+    "rev": 0,
+    "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}],
+    "user": "spam"
    }
   ]
 
@@ -116,6 +286,38 @@
     "path": "port",
     "rev": 4,
     "user": "spam"
+   },
+   {
+    "date": [3, 0],
+    "lineno": 1,
+    "node": "95040cfd017d658c536071c6290230a613c4c2a6",
+    "path": "port",
+    "rev": 3,
+    "user": "eggs"
+   },
+   {
+    "date": [2, 0],
+    "lineno": 1,
+    "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
+    "path": "port",
+    "rev": 2,
+    "user": "spam"
+   },
+   {
+    "date": [1, 0],
+    "lineno": 1,
+    "node": "8b20f75c158513ff5ac80bd0e5219bfb6f0eb587",
+    "path": "port",
+    "rev": 1,
+    "user": "eggs"
+   },
+   {
+    "date": [0, 0],
+    "lineno": 1,
+    "node": "f31323c9217050ba245ee8b537c713ec2e8ab226",
+    "path": "port",
+    "rev": 0,
+    "user": "spam"
    }
   ]
 
@@ -232,8 +434,17 @@
 
   $ hg grep -r tip:0 -l port port
   port:4
+  port:3
+  port:2
+  port:1
+  port:0
   $ hg grep -r tip:0 import port
   port:4:import/export
+  port:3:import/export
+  port:3:import/export
+  port:2:import/export
+  port:1:import
+  port:0:import
 
   $ hg cp port port2
   $ hg commit -m 4 -u spam -d '5 0'
@@ -241,8 +452,7 @@
 follow
 
   $ hg grep -r tip:0 --traceback -f 'import\n\Z' port2
-  port:0:import
-  
+  [1]
   $ echo deport >> port2
   $ hg commit -m 5 -u eggs -d '6 0'
   $ hg grep -f --all -nu port port2
@@ -269,6 +479,9 @@
   $ hg stat
   M port2
   $ hg grep -r 'wdir()' port
+  port:2147483647:export
+  port:2147483647:vaportight
+  port:2147483647:import/export
   port2:2147483647:export
   port2:2147483647:vaportight
   port2:2147483647:import/export
@@ -295,6 +508,7 @@
   $ hg ci -m 3
   $ hg grep -r tip:0 orange
   color:3:orange
+  color:1:orange
   $ hg grep --all orange
   color:3:+:orange
   color:2:-:orange
@@ -320,6 +534,12 @@
   [grep.filename|color][grep.sep|:][grep.rev|3][grep.sep|:][grep.match|b]lack
   [grep.filename|color][grep.sep|:][grep.rev|3][grep.sep|:][grep.match|o]range
   [grep.filename|color][grep.sep|:][grep.rev|3][grep.sep|:][grep.match|b]lue
+  [grep.filename|color][grep.sep|:][grep.rev|2][grep.sep|:][grep.match|b]lack
+  [grep.filename|color][grep.sep|:][grep.rev|1][grep.sep|:][grep.match|b]lue
+  [grep.filename|color][grep.sep|:][grep.rev|1][grep.sep|:][grep.match|b]lack
+  [grep.filename|color][grep.sep|:][grep.rev|1][grep.sep|:][grep.match|o]range
+  [grep.filename|color][grep.sep|:][grep.rev|0][grep.sep|:][grep.match|b]lue
+  [grep.filename|color][grep.sep|:][grep.rev|0][grep.sep|:][grep.match|b]lack
 
 match in last "line" without newline
 
@@ -431,39 +651,21 @@
   $ hg ci -A -m "second commit"
   adding new
   $ hg grep -r "." "unmod"
-  [1]
-  $ hg grep -r "." "unmod" --all-files
   um:1:unmod
 
-With --all-files, the working directory is searched by default
+Working directory is searched by default
 
   $ echo modified >> new
-  $ hg grep --all-files mod
+  $ hg grep mod
   new:modified
   um:unmod
 
  which can be overridden by -rREV
 
-  $ hg grep --all-files -r. mod
+  $ hg grep -r. mod
   um:1:unmod
 
-commands.all-files can be negated by --no-all-files
-
-  $ hg grep --config commands.grep.all-files=True mod
-  new:modified
-  um:unmod
-  $ hg grep --config commands.grep.all-files=True --no-all-files mod
-  um:0:unmod
-
---diff --all-files makes no sense since --diff is the option to grep history
-
-  $ hg grep --diff --all-files um
-  abort: --diff and --all-files are mutually exclusive
-  [255]
-
-but --diff should precede the commands.grep.all-files option
-
-  $ hg grep --config commands.grep.all-files=True --diff mod
+  $ hg grep --diff mod
   um:0:+:unmod
 
   $ cd ..
@@ -515,18 +717,18 @@
 
   $ cd ..
 
-test -rMULTIREV with --all-files
+test -rMULTIREV
 
   $ cd sng
   $ hg rm um
   $ hg commit -m "deletes um"
-  $ hg grep -r "0:2" "unmod" --all-files
+  $ hg grep -r "0:2" "unmod"
   um:0:unmod
   um:1:unmod
-  $ hg grep -r "0:2" "unmod" --all-files um
+  $ hg grep -r "0:2" "unmod" um
   um:0:unmod
   um:1:unmod
-  $ hg grep -r "0:2" "unmod" --all-files "glob:**/um" # Check that patterns also work
+  $ hg grep -r "0:2" "unmod" "glob:**/um" # Check that patterns also work
   um:0:unmod
   um:1:unmod
   $ cd ..
--- a/tests/test-help-hide.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-help-hide.t	Mon Oct 21 11:09:48 2019 -0400
@@ -40,7 +40,7 @@
    cat           output the current or given revision of files
    copy          mark files as copied for the next commit
    diff          diff repository (or selected files)
-   grep          search revision history for a pattern in specified files
+   grep          search for a pattern in specified files
   
   Change navigation:
   
@@ -176,7 +176,7 @@
    cat           output the current or given revision of files
    copy          mark files as copied for the next commit
    diff          diff repository (or selected files)
-   grep          search revision history for a pattern in specified files
+   grep          search for a pattern in specified files
   
   Change navigation:
   
--- a/tests/test-help.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-help.t	Mon Oct 21 11:09:48 2019 -0400
@@ -92,7 +92,7 @@
    cat           output the current or given revision of files
    copy          mark files as copied for the next commit
    diff          diff repository (or selected files)
-   grep          search revision history for a pattern in specified files
+   grep          search for a pattern in specified files
   
   Change navigation:
   
@@ -220,7 +220,7 @@
    cat           output the current or given revision of files
    copy          mark files as copied for the next commit
    diff          diff repository (or selected files)
-   grep          search revision history for a pattern in specified files
+   grep          search for a pattern in specified files
   
   Change navigation:
   
@@ -1048,6 +1048,8 @@
    debugserve    run a server with advanced settings
    debugsetparents
                  manually set the parents of the current working directory
+   debugsidedata
+                 dump the side data for a cl/manifest/file revision
    debugssl      test a secure connection to a server
    debugsub      (no help text available)
    debugsuccessorssets
@@ -2588,7 +2590,7 @@
   grep
   </a>
   </td><td>
-  search revision history for a pattern in specified files
+  search for a pattern in specified files
   </td></tr>
   <tr><td>
   <a href="/help/hashelp">
--- a/tests/test-hg-parseurl.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-hg-parseurl.py	Mon Oct 21 11:09:48 2019 -0400
@@ -2,33 +2,49 @@
 
 import unittest
 
-from mercurial import (
-    hg,
-)
+from mercurial import hg
+
 
 class ParseRequestTests(unittest.TestCase):
     def testparse(self):
 
-        self.assertEqual(hg.parseurl(b'http://example.com/no/anchor'),
-                         (b'http://example.com/no/anchor', (None, [])))
-        self.assertEqual(hg.parseurl(b'http://example.com/an/anchor#foo'),
-                         (b'http://example.com/an/anchor', (b'foo', [])))
+        self.assertEqual(
+            hg.parseurl(b'http://example.com/no/anchor'),
+            (b'http://example.com/no/anchor', (None, [])),
+        )
+        self.assertEqual(
+            hg.parseurl(b'http://example.com/an/anchor#foo'),
+            (b'http://example.com/an/anchor', (b'foo', [])),
+        )
         self.assertEqual(
             hg.parseurl(b'http://example.com/no/anchor/branches', [b'foo']),
-            (b'http://example.com/no/anchor/branches', (None, [b'foo'])))
+            (b'http://example.com/no/anchor/branches', (None, [b'foo'])),
+        )
         self.assertEqual(
             hg.parseurl(b'http://example.com/an/anchor/branches#bar', [b'foo']),
-            (b'http://example.com/an/anchor/branches', (b'bar', [b'foo'])))
-        self.assertEqual(hg.parseurl(
-            b'http://example.com/an/anchor/branches-None#foo', None),
-            (b'http://example.com/an/anchor/branches-None', (b'foo', [])))
-        self.assertEqual(hg.parseurl(b'http://example.com/'),
-                         (b'http://example.com/', (None, [])))
-        self.assertEqual(hg.parseurl(b'http://example.com'),
-                         (b'http://example.com/', (None, [])))
-        self.assertEqual(hg.parseurl(b'http://example.com#foo'),
-                         (b'http://example.com/', (b'foo', [])))
+            (b'http://example.com/an/anchor/branches', (b'bar', [b'foo'])),
+        )
+        self.assertEqual(
+            hg.parseurl(
+                b'http://example.com/an/anchor/branches-None#foo', None
+            ),
+            (b'http://example.com/an/anchor/branches-None', (b'foo', [])),
+        )
+        self.assertEqual(
+            hg.parseurl(b'http://example.com/'),
+            (b'http://example.com/', (None, [])),
+        )
+        self.assertEqual(
+            hg.parseurl(b'http://example.com'),
+            (b'http://example.com/', (None, [])),
+        )
+        self.assertEqual(
+            hg.parseurl(b'http://example.com#foo'),
+            (b'http://example.com/', (b'foo', [])),
+        )
+
 
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-hghave.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-hghave.t	Mon Oct 21 11:09:48 2019 -0400
@@ -22,7 +22,7 @@
   > EOF
   $ ( \
   > testrepohgenv; \
-  > "$PYTHON" $TESTDIR/run-tests.py -j 1 \
+  > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j 1 \
   >    $HGTEST_RUN_TESTS_PURE test-hghaveaddon.t \
   > )
   running 1 tests using 1 parallel processes 
--- a/tests/test-hgignore.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-hgignore.t	Mon Oct 21 11:09:48 2019 -0400
@@ -176,6 +176,8 @@
   ? .hgignore
   ? a.c
   ? syntax
+  $ hg debugignore
+  <includematcher includes='.*\\.o(?:/|$)'>
 
   $ cd ..
   $ echo > .hg/testhgignorerel
@@ -222,7 +224,7 @@
   A b.o
 
   $ hg debugignore
-  <includematcher includes='(?:|.*/)[^/]*(?:/|$)'>
+  <includematcher includes='.*(?:/|$)'>
 
   $ hg debugignore b.o
   b.o is ignored
--- a/tests/test-hgrc.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-hgrc.t	Mon Oct 21 11:09:48 2019 -0400
@@ -49,6 +49,20 @@
   paths.default=$TESTTMP/foo%bar
   $ cd ..
 
+Check %include
+
+  $ echo '[section]' > $TESTTMP/included
+  $ echo 'option = value' >> $TESTTMP/included
+  $ echo '%include $TESTTMP/included' >> $HGRC
+  $ hg showconfig section
+  section.option=value
+#if no-windows
+  $ chmod u-r $TESTTMP/included
+  $ hg showconfig section
+  hg: parse error at $TESTTMP/hgrc:2: cannot include $TESTTMP/included (Permission denied)
+  [255]
+#endif
+
 issue1829: wrong indentation
 
   $ echo '[foo]' > $HGRC
--- a/tests/test-hgweb-auth.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-hgweb-auth.py	Mon Oct 21 11:09:48 2019 -0400
@@ -1,6 +1,8 @@
 from __future__ import absolute_import, print_function
 
-from mercurial import demandimport; demandimport.enable()
+from mercurial import demandimport
+
+demandimport.enable()
 from mercurial import (
     error,
     pycompat,
@@ -8,35 +10,39 @@
     url,
     util,
 )
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.utils import stringutil
 
 urlerr = util.urlerr
 urlreq = util.urlreq
 
+
 class myui(uimod.ui):
     def interactive(self):
         return False
 
+
 origui = myui.load()
 
+
 def writeauth(items):
     ui = origui.copy()
     for name, value in items.items():
         ui.setconfig(b'auth', name, value)
     return ui
 
+
 def _stringifyauthinfo(ai):
     if ai is None:
         return ai
     realm, authuris, user, passwd = ai
-    return (pycompat.strurl(realm),
-            [pycompat.strurl(u) for u in authuris],
-            pycompat.strurl(user),
-            pycompat.strurl(passwd),
+    return (
+        pycompat.strurl(realm),
+        [pycompat.strurl(u) for u in authuris],
+        pycompat.strurl(user),
+        pycompat.strurl(passwd),
     )
 
+
 def test(auth, urls=None):
     print('CFG:', pycompat.sysstr(stringutil.pprint(auth, bprefix=True)))
     prefixes = set()
@@ -57,11 +63,15 @@
             u, authinfo = util.url(uri).authinfo()
             if authinfo is not None:
                 pm.add_password(*_stringifyauthinfo(authinfo))
-            print('    ', tuple(pycompat.strurl(a) for a in
-                                pm.find_user_password('test',
-                                                      pycompat.strurl(u))))
+            print(
+                '    ',
+                tuple(
+                    pycompat.strurl(a)
+                    for a in pm.find_user_password('test', pycompat.strurl(u))
+                ),
+            )
         except error.Abort:
-            print('    ','abort')
+            print('    ', 'abort')
 
     if not urls:
         urls = [
@@ -73,7 +83,7 @@
             b'https://example.org/bar',
             b'https://x@example.org/bar',
             b'https://y@example.org/bar',
-            ]
+        ]
     for u in urls:
         _test(u)
 
@@ -90,64 +100,100 @@
 test({b'x.prefix': b'example.org', b'x.schemes': b'http https'})
 
 print('\n*** Test prefix matching\n')
-test({b'x.prefix': b'http://example.org/foo',
-      b'y.prefix': b'http://example.org/bar'})
-test({b'x.prefix': b'http://example.org/foo',
-      b'y.prefix': b'http://example.org/foo/bar'})
+test(
+    {
+        b'x.prefix': b'http://example.org/foo',
+        b'y.prefix': b'http://example.org/bar',
+    }
+)
+test(
+    {
+        b'x.prefix': b'http://example.org/foo',
+        b'y.prefix': b'http://example.org/foo/bar',
+    }
+)
 test({b'x.prefix': b'*', b'y.prefix': b'https://example.org/bar'})
 
 print('\n*** Test user matching\n')
-test({b'x.prefix': b'http://example.org/foo',
-      b'x.username': None,
-      b'x.password': b'xpassword'},
-     urls=[b'http://y@example.org/foo'])
-test({b'x.prefix': b'http://example.org/foo',
-      b'x.username': None,
-      b'x.password': b'xpassword',
-      b'y.prefix': b'http://example.org/foo',
-      b'y.username': b'y',
-      b'y.password': b'ypassword'},
-     urls=[b'http://y@example.org/foo'])
-test({b'x.prefix': b'http://example.org/foo/bar',
-      b'x.username': None,
-      b'x.password': b'xpassword',
-      b'y.prefix': b'http://example.org/foo',
-      b'y.username': b'y',
-      b'y.password': b'ypassword'},
-     urls=[b'http://y@example.org/foo/bar'])
+test(
+    {
+        b'x.prefix': b'http://example.org/foo',
+        b'x.username': None,
+        b'x.password': b'xpassword',
+    },
+    urls=[b'http://y@example.org/foo'],
+)
+test(
+    {
+        b'x.prefix': b'http://example.org/foo',
+        b'x.username': None,
+        b'x.password': b'xpassword',
+        b'y.prefix': b'http://example.org/foo',
+        b'y.username': b'y',
+        b'y.password': b'ypassword',
+    },
+    urls=[b'http://y@example.org/foo'],
+)
+test(
+    {
+        b'x.prefix': b'http://example.org/foo/bar',
+        b'x.username': None,
+        b'x.password': b'xpassword',
+        b'y.prefix': b'http://example.org/foo',
+        b'y.username': b'y',
+        b'y.password': b'ypassword',
+    },
+    urls=[b'http://y@example.org/foo/bar'],
+)
 
 print('\n*** Test user matching with name in prefix\n')
 
 # prefix, username and URL have the same user
-test({b'x.prefix': b'https://example.org/foo',
-      b'x.username': None,
-      b'x.password': b'xpassword',
-      b'y.prefix': b'http://y@example.org/foo',
-      b'y.username': b'y',
-      b'y.password': b'ypassword'},
-     urls=[b'http://y@example.org/foo'])
+test(
+    {
+        b'x.prefix': b'https://example.org/foo',
+        b'x.username': None,
+        b'x.password': b'xpassword',
+        b'y.prefix': b'http://y@example.org/foo',
+        b'y.username': b'y',
+        b'y.password': b'ypassword',
+    },
+    urls=[b'http://y@example.org/foo'],
+)
 # Prefix has a different user from username and URL
-test({b'y.prefix': b'http://z@example.org/foo',
-      b'y.username': b'y',
-      b'y.password': b'ypassword'},
-     urls=[b'http://y@example.org/foo'])
+test(
+    {
+        b'y.prefix': b'http://z@example.org/foo',
+        b'y.username': b'y',
+        b'y.password': b'ypassword',
+    },
+    urls=[b'http://y@example.org/foo'],
+)
 # Prefix has a different user from URL; no username
-test({b'y.prefix': b'http://z@example.org/foo',
-      b'y.password': b'ypassword'},
-     urls=[b'http://y@example.org/foo'])
+test(
+    {b'y.prefix': b'http://z@example.org/foo', b'y.password': b'ypassword'},
+    urls=[b'http://y@example.org/foo'],
+)
 # Prefix and URL have same user, but doesn't match username
-test({b'y.prefix': b'http://y@example.org/foo',
-      b'y.username': b'z',
-      b'y.password': b'ypassword'},
-     urls=[b'http://y@example.org/foo'])
+test(
+    {
+        b'y.prefix': b'http://y@example.org/foo',
+        b'y.username': b'z',
+        b'y.password': b'ypassword',
+    },
+    urls=[b'http://y@example.org/foo'],
+)
 # Prefix and URL have the same user; no username
-test({b'y.prefix': b'http://y@example.org/foo',
-      b'y.password': b'ypassword'},
-     urls=[b'http://y@example.org/foo'])
+test(
+    {b'y.prefix': b'http://y@example.org/foo', b'y.password': b'ypassword'},
+    urls=[b'http://y@example.org/foo'],
+)
 # Prefix user, but no URL user or username
-test({b'y.prefix': b'http://y@example.org/foo',
-      b'y.password': b'ypassword'},
-     urls=[b'http://example.org/foo'])
+test(
+    {b'y.prefix': b'http://y@example.org/foo', b'y.password': b'ypassword'},
+    urls=[b'http://example.org/foo'],
+)
+
 
 def testauthinfo(fullurl, authurl):
     print('URIs:', fullurl, authurl)
@@ -156,5 +202,6 @@
     pm.add_password(*ai)
     print(pm.find_user_password('test', authurl))
 
+
 print('\n*** Test urllib2 and util.url\n')
 testauthinfo('http://user@example.com:8080/foo', 'http://example.com:8080/foo')
--- a/tests/test-hgweb-json.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-hgweb-json.t	Mon Oct 21 11:09:48 2019 -0400
@@ -2005,7 +2005,7 @@
         "topic": "graft"
       },
       {
-        "summary": "search revision history for a pattern in specified files",
+        "summary": "search for a pattern in specified files",
         "topic": "grep"
       },
       {
--- a/tests/test-hgwebdir-paths.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-hgwebdir-paths.py	Mon Oct 21 11:09:48 2019 -0400
@@ -5,9 +5,8 @@
     hg,
     ui as uimod,
 )
-from mercurial.hgweb import (
-    hgwebdir_mod,
-)
+from mercurial.hgweb import hgwebdir_mod
+
 hgwebdir = hgwebdir_mod.hgwebdir
 
 os.mkdir(b'webdir')
@@ -24,10 +23,12 @@
 hg.repository(u, b'c', create=1)
 os.chdir(b'..')
 
-paths = {b't/a/': b'%s/a' % webdir,
-         b'b': b'%s/b' % webdir,
-         b'coll': b'%s/*' % webdir,
-         b'rcoll': b'%s/**' % webdir}
+paths = {
+    b't/a/': b'%s/a' % webdir,
+    b'b': b'%s/b' % webdir,
+    b'coll': b'%s/*' % webdir,
+    b'rcoll': b'%s/**' % webdir,
+}
 
 config = os.path.join(webdir, b'hgwebdir.conf')
 configfile = open(config, 'wb')
--- a/tests/test-histedit-non-commute-abort.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-histedit-non-commute-abort.t	Mon Oct 21 11:09:48 2019 -0400
@@ -162,4 +162,26 @@
      summary:     a
   
 
+Early tree conflict doesn't leave histedit in a wedged state. Note
+that we don't specify --commands here: we catch the problem before we
+even prompt the user for rules, sidestepping any dataloss issues.
+
+  $ hg rm c
+  $ hg ci -m 'remove c'
+  $ echo collision > c
+
+  $ hg histedit e860deea161a
+  c: untracked file differs
+  abort: untracked files in working directory conflict with files in 055a42cdd887
+  [255]
+
+We should have detected the collision early enough we're not in a
+histedit state, and p1 is unchanged.
+
+  $ hg log -r 'p1()' -T'{node}\n'
+  1b0954ff00fccb15a37b679e4a35e9b01dfe685e
+  $ hg status --config ui.tweakdefaults=yes
+  ? c
+  ? e.orig
+
   $ cd ..
--- a/tests/test-hook.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-hook.t	Mon Oct 21 11:09:48 2019 -0400
@@ -720,7 +720,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   4:539e4b31b6dc
   pretxnchangegroup.forbid hook: HG_HOOKNAME=pretxnchangegroup.forbid1
   HG_HOOKTYPE=pretxnchangegroup
@@ -763,8 +762,8 @@
   adding changesets
   adding manifests
   adding file changes
+  adding remote bookmark quux
   added 1 changesets with 1 changes to 1 files
-  adding remote bookmark quux
   new changesets 539e4b31b6dc
   (run 'hg update' to get a working copy)
   $ hg rollback
@@ -969,19 +968,27 @@
   (run with --traceback for stack trace)
   [255]
 
-The second egrep is to filter out lines like '    ^', which are slightly
-different between Python 2.6 and Python 2.7.
-  $ hg pull ../a --traceback 2>&1 | egrep -v '^( +File|    [_a-zA-Z*(])' | egrep -v '^( )+(\^)?$'
+  $ hg pull ../a --traceback 2>&1 | egrep 'pulling|searching|^exception|Traceback|SyntaxError|ImportError|ModuleNotFoundError|HookLoadError|abort'
   pulling from ../a
   searching for changes
   exception from first failed import attempt:
   Traceback (most recent call last):
   SyntaxError: * (glob)
   exception from second failed import attempt:
+  Traceback (most recent call last): (py3 !)
+  SyntaxError: * (glob) (py3 !)
   Traceback (most recent call last):
-  ImportError: No module named hgext_syntaxerror
+  ImportError: No module named hgext_syntaxerror (no-py3 !)
+  ImportError: No module named 'hgext_syntaxerror' (py3 no-py36 !)
+  ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
   Traceback (most recent call last):
-  HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
+  SyntaxError: * (glob) (py3 !)
+  Traceback (most recent call last): (py3 !)
+  ImportError: No module named 'hgext_syntaxerror' (py3 no-py36 !)
+  ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
+  Traceback (most recent call last): (py3 !)
+  HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed (no-py3 !)
+  mercurial.error.HookLoadError: b'preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed' (py3 !)
   abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
 
   $ echo '[hooks]' > ../a/.hg/hgrc
@@ -995,8 +1002,8 @@
   adding changesets
   adding manifests
   adding file changes
+  adding remote bookmark quux
   added 1 changesets with 1 changes to 1 files
-  adding remote bookmark quux
   new changesets 539e4b31b6dc
   (run 'hg update' to get a working copy)
 
@@ -1114,7 +1121,8 @@
 
   $ hg id
   loading pre-identify.npmd hook failed:
-  abort: No module named repo!
+  abort: No module named repo! (no-py3 !)
+  abort: No module named 'repo'! (py3 !)
   [255]
 
   $ cd ../../b
@@ -1131,15 +1139,29 @@
   $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
 
   $ echo a >> a
-  $ hg --traceback commit -ma 2>&1 | egrep -v '^( +File|    [a-zA-Z(])'
+  $ hg --traceback commit -ma 2>&1 | egrep '^exception|ImportError|ModuleNotFoundError|Traceback|HookLoadError|abort'
   exception from first failed import attempt:
   Traceback (most recent call last):
-  ImportError: No module named somebogusmodule
+  ImportError: No module named somebogusmodule (no-py3 !)
+  ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
+  ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
   exception from second failed import attempt:
+  Traceback (most recent call last): (py3 !)
+  ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
+  ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
+  Traceback (most recent call last): (py3 !)
+  ImportError: No module named 'hgext_importfail' (py3 no-py36 !)
+  ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
+  Traceback (most recent call last): (py3 !)
+  ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
+  ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
   Traceback (most recent call last):
-  ImportError: No module named hgext_importfail
+  ImportError: No module named hgext_importfail (no-py3 !)
+  ImportError: No module named 'hgext_importfail' (py3 no-py36 !)
+  ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
   Traceback (most recent call last):
-  HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed
+  HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed (no-py3 !)
+  mercurial.error.HookLoadError: b'precommit.importfail hook is invalid: import of "importfail" failed' (py3 !)
   abort: precommit.importfail hook is invalid: import of "importfail" failed
 
 Issue1827: Hooks Update & Commit not completely post operation
@@ -1235,13 +1257,13 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   changeset:   1:9836a07b9b9d
   tag:         tip
   user:        test
   date:        Thu Jan 01 00:00:00 1970 +0000
   summary:     b
   
+  added 1 changesets with 1 changes to 1 files
 
 pretxnclose hook failure should abort the transaction
 
--- a/tests/test-http-bad-server.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-http-bad-server.t	Mon Oct 21 11:09:48 2019 -0400
@@ -1092,7 +1092,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   transaction abort!
   rollback completed
   abort: HTTP request error (incomplete response) (py3 !)
--- a/tests/test-http.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-http.t	Mon Oct 21 11:09:48 2019 -0400
@@ -338,13 +338,14 @@
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
   bundle2-input-part: "output" (advisory) (params: 0 advisory) supported
-  bundle2-input-part: total payload size 100
+  bundle2-input-part: total payload size 55
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
+  bundle2-input-part: "output" (advisory) supported
+  bundle2-input-part: total payload size 45
   remote: added 1 changesets with 1 changes to 1 files
-  bundle2-input-part: "output" (advisory) supported
-  bundle2-input-bundle: 2 parts total
+  bundle2-input-bundle: 3 parts total
   preparing listkeys for "phases"
   sending listkeys command
   devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
--- a/tests/test-hybridencode.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-hybridencode.py	Mon Oct 21 11:09:48 2019 -0400
@@ -2,9 +2,8 @@
 
 import unittest
 
-from mercurial import (
-    store,
-)
+from mercurial import store
+
 
 class hybridencodetests(unittest.TestCase):
     def hybridencode(self, input, want):
@@ -19,72 +18,112 @@
     def testnoencodingrequired(self):
         self.hybridencode(
             b'data/abcdefghijklmnopqrstuvwxyz0123456789 !#%&\'()+,-.;=[]^`{}',
-            b'data/abcdefghijklmnopqrstuvwxyz0123456789 !#%&\'()+,-.;=[]^`{}')
+            b'data/abcdefghijklmnopqrstuvwxyz0123456789 !#%&\'()+,-.;=[]^`{}',
+        )
 
-    def testuppercasechars(self): # uppercase char X is encoded as _x
+    def testuppercasechars(self):  # uppercase char X is encoded as _x
         self.hybridencode(
             b'data/ABCDEFGHIJKLMNOPQRSTUVWXYZ',
-            b'data/_a_b_c_d_e_f_g_h_i_j_k_l_m_n_o_p_q_r_s_t_u_v_w_x_y_z')
+            b'data/_a_b_c_d_e_f_g_h_i_j_k_l_m_n_o_p_q_r_s_t_u_v_w_x_y_z',
+        )
 
-    def testunderbar(self): # underbar is doubled
+    def testunderbar(self):  # underbar is doubled
         self.hybridencode(b'data/_', b'data/__')
 
-    def testtilde(self): # tilde is character-encoded
+    def testtilde(self):  # tilde is character-encoded
         self.hybridencode(b'data/~', b'data/~7e')
 
-    def testcontrolchars(self): # characters in ASCII code range 1..31
+    def testcontrolchars(self):  # characters in ASCII code range 1..31
         self.hybridencode(
-            (b'data/\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
-             b'\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e'
-             b'\x1f'),
-            (b'data/~01~02~03~04~05~06~07~08~09~0a~0b~0c~0d~0e~0f~10~11~12~13'
-             b'~14~15~16~17~18~19~1a~1b~1c~1d~1e~1f'))
+            (
+                b'data/\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
+                b'\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e'
+                b'\x1f'
+            ),
+            (
+                b'data/~01~02~03~04~05~06~07~08~09~0a~0b~0c~0d~0e~0f~10~11~12~13'
+                b'~14~15~16~17~18~19~1a~1b~1c~1d~1e~1f'
+            ),
+        )
 
-    def testhighascii(self):# characters in ASCII code range 126..255
+    def testhighascii(self):  # characters in ASCII code range 126..255
         self.hybridencode(
-            (b'data/~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c'
-             b'\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b'
-             b'\x9c\x9d\x9e\x9f'),
-            (b'data/~7e~7f~80~81~82~83~84~85~86~87~88~89~8a~8b~8c~8d~8e~8f~90'
-             b'~91~92~93~94~95~96~97~98~99~9a~9b~9c~9d~9e~9f'))
+            (
+                b'data/~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c'
+                b'\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b'
+                b'\x9c\x9d\x9e\x9f'
+            ),
+            (
+                b'data/~7e~7f~80~81~82~83~84~85~86~87~88~89~8a~8b~8c~8d~8e~8f~90'
+                b'~91~92~93~94~95~96~97~98~99~9a~9b~9c~9d~9e~9f'
+            ),
+        )
         self.hybridencode(
-            (b'data/\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad'
-             b'\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc'
-             b'\xbd\xbe\xbf'),
-            (b'data/~a0~a1~a2~a3~a4~a5~a6~a7~a8~a9~aa~ab~ac~ad~ae~af~b0~b1~b2'
-             b'~b3~b4~b5~b6~b7~b8~b9~ba~bb~bc~bd~be~bf'))
+            (
+                b'data/\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad'
+                b'\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc'
+                b'\xbd\xbe\xbf'
+            ),
+            (
+                b'data/~a0~a1~a2~a3~a4~a5~a6~a7~a8~a9~aa~ab~ac~ad~ae~af~b0~b1~b2'
+                b'~b3~b4~b5~b6~b7~b8~b9~ba~bb~bc~bd~be~bf'
+            ),
+        )
         self.hybridencode(
-            (b'data/\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca'
-             b'\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6'
-             b'\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf'),
-            (b'data/~c0~c1~c2~c3~c4~c5~c6~c7~c8~c9~ca~cb~cc~cd~ce~cf~d0~d1~d2'
-             b'~d3~d4~d5~d6~d7~d8~d9~da~db~dc~dd~de~df'))
+            (
+                b'data/\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca'
+                b'\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6'
+                b'\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf'
+            ),
+            (
+                b'data/~c0~c1~c2~c3~c4~c5~c6~c7~c8~c9~ca~cb~cc~cd~ce~cf~d0~d1~d2'
+                b'~d3~d4~d5~d6~d7~d8~d9~da~db~dc~dd~de~df'
+            ),
+        )
         self.hybridencode(
-            (b'data/\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed'
-             b'\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd'
-             b'\xfe\xff'),
-            (b'data/~e0~e1~e2~e3~e4~e5~e6~e7~e8~e9~ea~eb~ec~ed~ee~ef~f0~f1~f2'
-             b'~f3~f4~f5~f6~f7~f8~f9~fa~fb~fc~fd~fe~ff'))
+            (
+                b'data/\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed'
+                b'\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd'
+                b'\xfe\xff'
+            ),
+            (
+                b'data/~e0~e1~e2~e3~e4~e5~e6~e7~e8~e9~ea~eb~ec~ed~ee~ef~f0~f1~f2'
+                b'~f3~f4~f5~f6~f7~f8~f9~fa~fb~fc~fd~fe~ff'
+            ),
+        )
 
-    def testwinreserved(self): # Windows reserved characters
+    def testwinreserved(self):  # Windows reserved characters
         self.hybridencode(
-            (b'data/less <, greater >, colon :, double-quote ", backslash \\, '
-             b'pipe |, question-mark ?, asterisk *'),
-            (b'data/less ~3c, greater ~3e, colon ~3a, double-quote ~22, '
-             b'backslash ~5c, pipe ~7c, question-mark ~3f, asterisk ~2a'))
+            (
+                b'data/less <, greater >, colon :, double-quote ", backslash \\, '
+                b'pipe |, question-mark ?, asterisk *'
+            ),
+            (
+                b'data/less ~3c, greater ~3e, colon ~3a, double-quote ~22, '
+                b'backslash ~5c, pipe ~7c, question-mark ~3f, asterisk ~2a'
+            ),
+        )
 
     def testhgreserved(self):
         # encoding directories ending in .hg, .i or .d with '.hg' suffix
-        self.hybridencode(b'data/x.h.i/x.hg/x.i/x.d/foo',
-                          b'data/x.h.i.hg/x.hg.hg/x.i.hg/x.d.hg/foo')
-        self.hybridencode(b'data/a.hg/a.i/a.d/foo',
-                          b'data/a.hg.hg/a.i.hg/a.d.hg/foo')
-        self.hybridencode(b'data/au.hg/au.i/au.d/foo',
-                          b'data/au.hg.hg/au.i.hg/au.d.hg/foo')
-        self.hybridencode(b'data/aux.hg/aux.i/aux.d/foo',
-                          b'data/au~78.hg.hg/au~78.i.hg/au~78.d.hg/foo')
-        self.hybridencode(b'data/auxy.hg/auxy.i/auxy.d/foo',
-                          b'data/auxy.hg.hg/auxy.i.hg/auxy.d.hg/foo')
+        self.hybridencode(
+            b'data/x.h.i/x.hg/x.i/x.d/foo',
+            b'data/x.h.i.hg/x.hg.hg/x.i.hg/x.d.hg/foo',
+        )
+        self.hybridencode(
+            b'data/a.hg/a.i/a.d/foo', b'data/a.hg.hg/a.i.hg/a.d.hg/foo'
+        )
+        self.hybridencode(
+            b'data/au.hg/au.i/au.d/foo', b'data/au.hg.hg/au.i.hg/au.d.hg/foo'
+        )
+        self.hybridencode(
+            b'data/aux.hg/aux.i/aux.d/foo',
+            b'data/au~78.hg.hg/au~78.i.hg/au~78.d.hg/foo',
+        )
+        self.hybridencode(
+            b'data/auxy.hg/auxy.i/auxy.d/foo',
+            b'data/auxy.hg.hg/auxy.i.hg/auxy.d.hg/foo',
+        )
         # but these are not encoded on *filenames*
         self.hybridencode(b'data/foo/x.hg', b'data/foo/x.hg')
         self.hybridencode(b'data/foo/x.i', b'data/foo/x.i')
@@ -103,775 +142,1293 @@
         self.hybridencode(b'data/foo/auxy.d', b'data/foo/auxy.d')
 
         # plain .hg, .i and .d directories have the leading dot encoded
-        self.hybridencode(b'data/.hg/.i/.d/foo',
-                          b'data/~2ehg.hg/~2ei.hg/~2ed.hg/foo')
+        self.hybridencode(
+            b'data/.hg/.i/.d/foo', b'data/~2ehg.hg/~2ei.hg/~2ed.hg/foo'
+        )
 
     def testmisclongcases(self):
         self.hybridencode(
-            (b'data/aux.bla/bla.aux/prn/PRN/lpt/com3/nul/'
-             b'coma/foo.NUL/normal.c.i'),
-            (b'data/au~78.bla/bla.aux/pr~6e/_p_r_n/lpt/co~6d3'
-             b'/nu~6c/coma/foo._n_u_l/normal.c.i'))
+            (
+                b'data/aux.bla/bla.aux/prn/PRN/lpt/com3/nul/'
+                b'coma/foo.NUL/normal.c.i'
+            ),
+            (
+                b'data/au~78.bla/bla.aux/pr~6e/_p_r_n/lpt/co~6d3'
+                b'/nu~6c/coma/foo._n_u_l/normal.c.i'
+            ),
+        )
         self.hybridencode(
-            (b'data/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH'
-             b'/TENTH/ELEVENTH/LOREMIPSUM.TXT.i'),
-            (b'dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/'
-             b'nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i'))
+            (
+                b'data/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH'
+                b'/TENTH/ELEVENTH/LOREMIPSUM.TXT.i'
+            ),
+            (
+                b'dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/'
+                b'nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i'
+            ),
+        )
         self.hybridencode(
-            (b'data/enterprise/openesbaddons/contrib-imola/corba-bc/'
-             b'netbeansplugin/wsdlExtension/src/main/java/META-INF/services'
-             b'/org.netbeans.modules.xml.wsdl.bindingsupport.spi.'
-             b'ExtensibilityElementTemplateProvider.i'),
-            (b'dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/'
-             b'main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i'))
+            (
+                b'data/enterprise/openesbaddons/contrib-imola/corba-bc/'
+                b'netbeansplugin/wsdlExtension/src/main/java/META-INF/services'
+                b'/org.netbeans.modules.xml.wsdl.bindingsupport.spi.'
+                b'ExtensibilityElementTemplateProvider.i'
+            ),
+            (
+                b'dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/'
+                b'main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i'
+            ),
+        )
         self.hybridencode(
-            (b'data/AUX.THE-QUICK-BROWN-FOX-JU:MPS-OVER-THE-LAZY-DOG-THE-'
-             b'QUICK-BROWN-FOX-JUMPS-OVER-THE-LAZY-DOG.TXT.i'),
-            (b'dh/au~78.the-quick-brown-fox-ju~3amps-over-the-lazy-dog-the-'
-             b'quick-brown-fox-jud4dcadd033000ab2b26eb66bae1906bcb15d4a70.i'))
+            (
+                b'data/AUX.THE-QUICK-BROWN-FOX-JU:MPS-OVER-THE-LAZY-DOG-THE-'
+                b'QUICK-BROWN-FOX-JUMPS-OVER-THE-LAZY-DOG.TXT.i'
+            ),
+            (
+                b'dh/au~78.the-quick-brown-fox-ju~3amps-over-the-lazy-dog-the-'
+                b'quick-brown-fox-jud4dcadd033000ab2b26eb66bae1906bcb15d4a70.i'
+            ),
+        )
         self.hybridencode(
-            (b'data/Project Planning/Resources/AnotherLongDirectoryName/Follow'
-             b'edbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt'),
-            (b'dh/project_/resource/anotherl/followed/andanoth/andthenanextrem'
-             b'elylongfilenaf93030515d9849cfdca52937c2204d19f83913e5.txt'))
+            (
+                b'data/Project Planning/Resources/AnotherLongDirectoryName/Follow'
+                b'edbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt'
+            ),
+            (
+                b'dh/project_/resource/anotherl/followed/andanoth/andthenanextrem'
+                b'elylongfilenaf93030515d9849cfdca52937c2204d19f83913e5.txt'
+            ),
+        )
         self.hybridencode(
-            (b'data/Project.Planning/Resources/AnotherLongDirectoryName/Follo'
-             b'wedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt'),
-            (b'dh/project_/resource/anotherl/followed/andanoth/andthenanextre'
-             b'melylongfilena0fd7c506f5c9d58204444fc67e9499006bd2d445.txt'))
+            (
+                b'data/Project.Planning/Resources/AnotherLongDirectoryName/Follo'
+                b'wedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt'
+            ),
+            (
+                b'dh/project_/resource/anotherl/followed/andanoth/andthenanextre'
+                b'melylongfilena0fd7c506f5c9d58204444fc67e9499006bd2d445.txt'
+            ),
+        )
         self.hybridencode(
             b'data/foo.../foo   / /a./_. /__/.x../    bla/.FOO/something.i',
-            (b'data/foo..~2e/foo  ~20/~20/a~2e/__.~20/____/~2ex.~2e/~20   bla/'
-             b'~2e_f_o_o/something.i'))
+            (
+                b'data/foo..~2e/foo  ~20/~20/a~2e/__.~20/____/~2ex.~2e/~20   bla/'
+                b'~2e_f_o_o/something.i'
+            ),
+        )
         self.hybridencode(
             b'data/c/co/com/com0/com1/com2/com3/com4/com5/com6/com7/com8/com9',
-            (b'data/c/co/com/com0/co~6d1/co~6d2/co~6d3/co~6d4/co~6d5/co~6d6/'
-             b'co~6d7/co~6d8/co~6d9'))
+            (
+                b'data/c/co/com/com0/co~6d1/co~6d2/co~6d3/co~6d4/co~6d5/co~6d6/'
+                b'co~6d7/co~6d8/co~6d9'
+            ),
+        )
         self.hybridencode(
             b'data/C/CO/COM/COM0/COM1/COM2/COM3/COM4/COM5/COM6/COM7/COM8/COM9',
-            (b'data/_c/_c_o/_c_o_m/_c_o_m0/_c_o_m1/_c_o_m2/_c_o_m3/_c_o_m4/'
-             b'_c_o_m5/_c_o_m6/_c_o_m7/_c_o_m8/_c_o_m9'))
+            (
+                b'data/_c/_c_o/_c_o_m/_c_o_m0/_c_o_m1/_c_o_m2/_c_o_m3/_c_o_m4/'
+                b'_c_o_m5/_c_o_m6/_c_o_m7/_c_o_m8/_c_o_m9'
+            ),
+        )
         self.hybridencode(
-            (b'data/c.x/co.x/com.x/com0.x/com1.x/com2.x/com3.x/com4.x/com5.x/'
-             b'com6.x/com7.x/com8.x/com9.x'),
-            (b'data/c.x/co.x/com.x/com0.x/co~6d1.x/co~6d2.x/co~6d3.x/co~6d4.x'
-             b'/co~6d5.x/co~6d6.x/co~6d7.x/co~6d8.x/co~6d9.x'))
+            (
+                b'data/c.x/co.x/com.x/com0.x/com1.x/com2.x/com3.x/com4.x/com5.x/'
+                b'com6.x/com7.x/com8.x/com9.x'
+            ),
+            (
+                b'data/c.x/co.x/com.x/com0.x/co~6d1.x/co~6d2.x/co~6d3.x/co~6d4.x'
+                b'/co~6d5.x/co~6d6.x/co~6d7.x/co~6d8.x/co~6d9.x'
+            ),
+        )
         self.hybridencode(
-            (b'data/x.c/x.co/x.com0/x.com1/x.com2/x.com3/x.com4/x.com5/x.com6'
-             b'/x.com7/x.com8/x.com9'),
-            (b'data/x.c/x.co/x.com0/x.com1/x.com2/x.com3/x.com4/x.com5/x.com6'
-             b'/x.com7/x.com8/x.com9'))
+            (
+                b'data/x.c/x.co/x.com0/x.com1/x.com2/x.com3/x.com4/x.com5/x.com6'
+                b'/x.com7/x.com8/x.com9'
+            ),
+            (
+                b'data/x.c/x.co/x.com0/x.com1/x.com2/x.com3/x.com4/x.com5/x.com6'
+                b'/x.com7/x.com8/x.com9'
+            ),
+        )
         self.hybridencode(
-            (b'data/cx/cox/comx/com0x/com1x/com2x/com3x/com4x/com5x/com6x/'
-             b'com7x/com8x/com9x'),
-            (b'data/cx/cox/comx/com0x/com1x/com2x/com3x/com4x/com5x/com6x/'
-             b'com7x/com8x/com9x'))
+            (
+                b'data/cx/cox/comx/com0x/com1x/com2x/com3x/com4x/com5x/com6x/'
+                b'com7x/com8x/com9x'
+            ),
+            (
+                b'data/cx/cox/comx/com0x/com1x/com2x/com3x/com4x/com5x/com6x/'
+                b'com7x/com8x/com9x'
+            ),
+        )
         self.hybridencode(
-            (b'data/xc/xco/xcom0/xcom1/xcom2/xcom3/xcom4/xcom5/xcom6/xcom7/'
-             b'xcom8/xcom9'),
-            (b'data/xc/xco/xcom0/xcom1/xcom2/xcom3/xcom4/xcom5/xcom6/xcom7/'
-             b'xcom8/xcom9'))
+            (
+                b'data/xc/xco/xcom0/xcom1/xcom2/xcom3/xcom4/xcom5/xcom6/xcom7/'
+                b'xcom8/xcom9'
+            ),
+            (
+                b'data/xc/xco/xcom0/xcom1/xcom2/xcom3/xcom4/xcom5/xcom6/xcom7/'
+                b'xcom8/xcom9'
+            ),
+        )
         self.hybridencode(
             b'data/l/lp/lpt/lpt0/lpt1/lpt2/lpt3/lpt4/lpt5/lpt6/lpt7/lpt8/lpt9',
-            (b'data/l/lp/lpt/lpt0/lp~741/lp~742/lp~743/lp~744/lp~745/lp~746/'
-             b'lp~747/lp~748/lp~749'))
+            (
+                b'data/l/lp/lpt/lpt0/lp~741/lp~742/lp~743/lp~744/lp~745/lp~746/'
+                b'lp~747/lp~748/lp~749'
+            ),
+        )
         self.hybridencode(
             b'data/L/LP/LPT/LPT0/LPT1/LPT2/LPT3/LPT4/LPT5/LPT6/LPT7/LPT8/LPT9',
-            (b'data/_l/_l_p/_l_p_t/_l_p_t0/_l_p_t1/_l_p_t2/_l_p_t3/_l_p_t4/'
-             b'_l_p_t5/_l_p_t6/_l_p_t7/_l_p_t8/_l_p_t9'))
+            (
+                b'data/_l/_l_p/_l_p_t/_l_p_t0/_l_p_t1/_l_p_t2/_l_p_t3/_l_p_t4/'
+                b'_l_p_t5/_l_p_t6/_l_p_t7/_l_p_t8/_l_p_t9'
+            ),
+        )
         self.hybridencode(
-            (b'data/l.x/lp.x/lpt.x/lpt0.x/lpt1.x/lpt2.x/lpt3.x/lpt4.x/lpt5.x/'
-             b'lpt6.x/lpt7.x/lpt8.x/lpt9.x'),
-            (b'data/l.x/lp.x/lpt.x/lpt0.x/lp~741.x/lp~742.x/lp~743.x/lp~744.x/'
-             b'lp~745.x/lp~746.x/lp~747.x/lp~748.x/lp~749.x'))
+            (
+                b'data/l.x/lp.x/lpt.x/lpt0.x/lpt1.x/lpt2.x/lpt3.x/lpt4.x/lpt5.x/'
+                b'lpt6.x/lpt7.x/lpt8.x/lpt9.x'
+            ),
+            (
+                b'data/l.x/lp.x/lpt.x/lpt0.x/lp~741.x/lp~742.x/lp~743.x/lp~744.x/'
+                b'lp~745.x/lp~746.x/lp~747.x/lp~748.x/lp~749.x'
+            ),
+        )
         self.hybridencode(
-            (b'data/x.l/x.lp/x.lpt/x.lpt0/x.lpt1/x.lpt2/x.lpt3/x.lpt4/x.lpt5/'
-             b'x.lpt6/x.lpt7/x.lpt8/x.lpt9'),
-            (b'data/x.l/x.lp/x.lpt/x.lpt0/x.lpt1/x.lpt2/x.lpt3/x.lpt4/x.lpt5'
-             b'/x.lpt6/x.lpt7/x.lpt8/x.lpt9'))
+            (
+                b'data/x.l/x.lp/x.lpt/x.lpt0/x.lpt1/x.lpt2/x.lpt3/x.lpt4/x.lpt5/'
+                b'x.lpt6/x.lpt7/x.lpt8/x.lpt9'
+            ),
+            (
+                b'data/x.l/x.lp/x.lpt/x.lpt0/x.lpt1/x.lpt2/x.lpt3/x.lpt4/x.lpt5'
+                b'/x.lpt6/x.lpt7/x.lpt8/x.lpt9'
+            ),
+        )
         self.hybridencode(
-            (b'data/lx/lpx/lptx/lpt0x/lpt1x/lpt2x/lpt3x/lpt4x/lpt5x/lpt6x/'
-             b'lpt7x/lpt8x/lpt9x'),
-            (b'data/lx/lpx/lptx/lpt0x/lpt1x/lpt2x/lpt3x/lpt4x/lpt5x/lpt6x/'
-             b'lpt7x/lpt8x/lpt9x'))
+            (
+                b'data/lx/lpx/lptx/lpt0x/lpt1x/lpt2x/lpt3x/lpt4x/lpt5x/lpt6x/'
+                b'lpt7x/lpt8x/lpt9x'
+            ),
+            (
+                b'data/lx/lpx/lptx/lpt0x/lpt1x/lpt2x/lpt3x/lpt4x/lpt5x/lpt6x/'
+                b'lpt7x/lpt8x/lpt9x'
+            ),
+        )
         self.hybridencode(
-            (b'data/xl/xlp/xlpt/xlpt0/xlpt1/xlpt2/xlpt3/xlpt4/xlpt5/xlpt6/'
-             b'xlpt7/xlpt8/xlpt9'),
-            (b'data/xl/xlp/xlpt/xlpt0/xlpt1/xlpt2/xlpt3/xlpt4/xlpt5/xlpt6/'
-             b'xlpt7/xlpt8/xlpt9'))
-        self.hybridencode(b'data/con/p/pr/prn/a/au/aux/n/nu/nul',
-                          b'data/co~6e/p/pr/pr~6e/a/au/au~78/n/nu/nu~6c')
+            (
+                b'data/xl/xlp/xlpt/xlpt0/xlpt1/xlpt2/xlpt3/xlpt4/xlpt5/xlpt6/'
+                b'xlpt7/xlpt8/xlpt9'
+            ),
+            (
+                b'data/xl/xlp/xlpt/xlpt0/xlpt1/xlpt2/xlpt3/xlpt4/xlpt5/xlpt6/'
+                b'xlpt7/xlpt8/xlpt9'
+            ),
+        )
+        self.hybridencode(
+            b'data/con/p/pr/prn/a/au/aux/n/nu/nul',
+            b'data/co~6e/p/pr/pr~6e/a/au/au~78/n/nu/nu~6c',
+        )
         self.hybridencode(
             b'data/CON/P/PR/PRN/A/AU/AUX/N/NU/NUL',
-            b'data/_c_o_n/_p/_p_r/_p_r_n/_a/_a_u/_a_u_x/_n/_n_u/_n_u_l')
+            b'data/_c_o_n/_p/_p_r/_p_r_n/_a/_a_u/_a_u_x/_n/_n_u/_n_u_l',
+        )
         self.hybridencode(
             b'data/con.x/p.x/pr.x/prn.x/a.x/au.x/aux.x/n.x/nu.x/nul.x',
-            b'data/co~6e.x/p.x/pr.x/pr~6e.x/a.x/au.x/au~78.x/n.x/nu.x/nu~6c.x')
+            b'data/co~6e.x/p.x/pr.x/pr~6e.x/a.x/au.x/au~78.x/n.x/nu.x/nu~6c.x',
+        )
         self.hybridencode(
             b'data/x.con/x.p/x.pr/x.prn/x.a/x.au/x.aux/x.n/x.nu/x.nul',
-            b'data/x.con/x.p/x.pr/x.prn/x.a/x.au/x.aux/x.n/x.nu/x.nul')
-        self.hybridencode(b'data/conx/px/prx/prnx/ax/aux/auxx/nx/nux/nulx',
-                          b'data/conx/px/prx/prnx/ax/au~78/auxx/nx/nux/nulx')
-        self.hybridencode(b'data/xcon/xp/xpr/xprn/xa/xau/xaux/xn/xnu/xnul',
-                          b'data/xcon/xp/xpr/xprn/xa/xau/xaux/xn/xnu/xnul')
-        self.hybridencode(b'data/a./au./aux./auxy./aux.',
-                          b'data/a~2e/au~2e/au~78~2e/auxy~2e/au~78~2e')
-        self.hybridencode(b'data/c./co./con./cony./con.',
-                          b'data/c~2e/co~2e/co~6e~2e/cony~2e/co~6e~2e')
-        self.hybridencode(b'data/p./pr./prn./prny./prn.',
-                          b'data/p~2e/pr~2e/pr~6e~2e/prny~2e/pr~6e~2e')
-        self.hybridencode(b'data/n./nu./nul./nuly./nul.',
-                          b'data/n~2e/nu~2e/nu~6c~2e/nuly~2e/nu~6c~2e')
+            b'data/x.con/x.p/x.pr/x.prn/x.a/x.au/x.aux/x.n/x.nu/x.nul',
+        )
+        self.hybridencode(
+            b'data/conx/px/prx/prnx/ax/aux/auxx/nx/nux/nulx',
+            b'data/conx/px/prx/prnx/ax/au~78/auxx/nx/nux/nulx',
+        )
+        self.hybridencode(
+            b'data/xcon/xp/xpr/xprn/xa/xau/xaux/xn/xnu/xnul',
+            b'data/xcon/xp/xpr/xprn/xa/xau/xaux/xn/xnu/xnul',
+        )
+        self.hybridencode(
+            b'data/a./au./aux./auxy./aux.',
+            b'data/a~2e/au~2e/au~78~2e/auxy~2e/au~78~2e',
+        )
+        self.hybridencode(
+            b'data/c./co./con./cony./con.',
+            b'data/c~2e/co~2e/co~6e~2e/cony~2e/co~6e~2e',
+        )
+        self.hybridencode(
+            b'data/p./pr./prn./prny./prn.',
+            b'data/p~2e/pr~2e/pr~6e~2e/prny~2e/pr~6e~2e',
+        )
+        self.hybridencode(
+            b'data/n./nu./nul./nuly./nul.',
+            b'data/n~2e/nu~2e/nu~6c~2e/nuly~2e/nu~6c~2e',
+        )
         self.hybridencode(
             b'data/l./lp./lpt./lpt1./lpt1y./lpt1.',
-            b'data/l~2e/lp~2e/lpt~2e/lp~741~2e/lpt1y~2e/lp~741~2e')
-        self.hybridencode(b'data/lpt9./lpt9y./lpt9.',
-                          b'data/lp~749~2e/lpt9y~2e/lp~749~2e')
-        self.hybridencode(b'data/com./com1./com1y./com1.',
-                          b'data/com~2e/co~6d1~2e/com1y~2e/co~6d1~2e')
-        self.hybridencode(b'data/com9./com9y./com9.',
-                          b'data/co~6d9~2e/com9y~2e/co~6d9~2e')
-        self.hybridencode(b'data/a /au /aux /auxy /aux ',
-                          b'data/a~20/au~20/aux~20/auxy~20/aux~20')
+            b'data/l~2e/lp~2e/lpt~2e/lp~741~2e/lpt1y~2e/lp~741~2e',
+        )
+        self.hybridencode(
+            b'data/lpt9./lpt9y./lpt9.', b'data/lp~749~2e/lpt9y~2e/lp~749~2e'
+        )
+        self.hybridencode(
+            b'data/com./com1./com1y./com1.',
+            b'data/com~2e/co~6d1~2e/com1y~2e/co~6d1~2e',
+        )
+        self.hybridencode(
+            b'data/com9./com9y./com9.', b'data/co~6d9~2e/com9y~2e/co~6d9~2e'
+        )
+        self.hybridencode(
+            b'data/a /au /aux /auxy /aux ',
+            b'data/a~20/au~20/aux~20/auxy~20/aux~20',
+        )
 
     def testhashingboundarycases(self):
         # largest unhashed path
         self.hybridencode(
-            (b'data/123456789-123456789-123456789-123456789-123456789-unhashed'
-             b'--xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12345'),
-            (b'data/123456789-123456789-123456789-123456789-123456789-unhashed'
-             b'--xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12345'))
+            (
+                b'data/123456789-123456789-123456789-123456789-123456789-unhashed'
+                b'--xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12345'
+            ),
+            (
+                b'data/123456789-123456789-123456789-123456789-123456789-unhashed'
+                b'--xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12345'
+            ),
+        )
         # shortest hashed path
         self.hybridencode(
-            (b'data/123456789-123456789-123456789-123456789-123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/123456789-123456789-123456789-123456789-123456789-hashed---'
-             b'-xxxxxxxxx-xxxxxxxe9c55002b50bf5181e7a6fc1f60b126e2a6fcf71'))
+            (
+                b'data/123456789-123456789-123456789-123456789-123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/123456789-123456789-123456789-123456789-123456789-hashed---'
+                b'-xxxxxxxxx-xxxxxxxe9c55002b50bf5181e7a6fc1f60b126e2a6fcf71'
+            ),
+        )
 
     def testhashing(self):
         # changing one char in part that's hashed away produces a different hash
         self.hybridencode(
-            (b'data/123456789-123456789-123456789-123456789-123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxy-123456789-123456'),
-            (b'dh/123456789-123456789-123456789-123456789-123456789-hashed---'
-             b'-xxxxxxxxx-xxxxxxxd24fa4455faf8a94350c18e5eace7c2bb17af706'))
+            (
+                b'data/123456789-123456789-123456789-123456789-123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxy-123456789-123456'
+            ),
+            (
+                b'dh/123456789-123456789-123456789-123456789-123456789-hashed---'
+                b'-xxxxxxxxx-xxxxxxxd24fa4455faf8a94350c18e5eace7c2bb17af706'
+            ),
+        )
         # uppercase hitting length limit due to encoding
         self.hybridencode(
-            (b'data/A23456789-123456789-123456789-123456789-123456789-'
-             b'xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
-             b'123456789-12345'),
-            (b'dh/a23456789-123456789-123456789-123456789-123456789-'
-             b'xxxxxxxxx-xxxxxxxxx-xxxxxxx'
-             b'cbbc657029b41b94ed510d05feb6716a5c03bc6b'))
+            (
+                b'data/A23456789-123456789-123456789-123456789-123456789-'
+                b'xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-12345'
+            ),
+            (
+                b'dh/a23456789-123456789-123456789-123456789-123456789-'
+                b'xxxxxxxxx-xxxxxxxxx-xxxxxxx'
+                b'cbbc657029b41b94ed510d05feb6716a5c03bc6b'
+            ),
+        )
         self.hybridencode(
-            (b'data/Z23456789-123456789-123456789-123456789-123456789-'
-             b'xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
-             b'123456789-12345'),
-            (b'dh/z23456789-123456789-123456789-123456789-123456789-xxxxxxxxx'
-             b'-xxxxxxxxx-xxxxxxx938f32a725c89512833fb96b6602dd9ebff51ddd'))
+            (
+                b'data/Z23456789-123456789-123456789-123456789-123456789-'
+                b'xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-12345'
+            ),
+            (
+                b'dh/z23456789-123456789-123456789-123456789-123456789-xxxxxxxxx'
+                b'-xxxxxxxxx-xxxxxxx938f32a725c89512833fb96b6602dd9ebff51ddd'
+            ),
+        )
         # compare with lowercase not hitting limit
         self.hybridencode(
-            (b'data/a23456789-123456789-123456789-123456789-123456789-'
-             b'xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-'
-             b'12345'),
-            (b'data/a23456789-123456789-123456789-123456789-123456789-'
-             b'xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-'
-             b'12345'))
+            (
+                b'data/a23456789-123456789-123456789-123456789-123456789-'
+                b'xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-'
+                b'12345'
+            ),
+            (
+                b'data/a23456789-123456789-123456789-123456789-123456789-'
+                b'xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-'
+                b'12345'
+            ),
+        )
         self.hybridencode(
-            (b'data/z23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789'
-             b'-12345'),
-            (b'data/z23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-'
-             b'12345'))
+            (
+                b'data/z23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789'
+                b'-12345'
+            ),
+            (
+                b'data/z23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-'
+                b'12345'
+            ),
+        )
         # not hitting limit with any of these
         self.hybridencode(
-            (b'data/abcdefghijklmnopqrstuvwxyz0123456789 !#%&\'()+,-.;=[]^`{}'
-             b'xxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12345'),
-            (b'data/abcdefghijklmnopqrstuvwxyz0123456789 !#%&\'()+,-.;=[]^`{}'
-             b'xxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12345'))
+            (
+                b'data/abcdefghijklmnopqrstuvwxyz0123456789 !#%&\'()+,-.;=[]^`{}'
+                b'xxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12345'
+            ),
+            (
+                b'data/abcdefghijklmnopqrstuvwxyz0123456789 !#%&\'()+,-.;=[]^`{}'
+                b'xxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12345'
+            ),
+        )
         # underbar hitting length limit due to encoding
         self.hybridencode(
-            (b'data/_23456789-123456789-123456789-123456789-123456789-'
-             b'xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-'
-             b'12345'),
-            (b'dh/_23456789-123456789-123456789-123456789-123456789-xxxxxxxxx-'
-             b'xxxxxxxxx-xxxxxxx9921a01af50feeabc060ce00eee4cba6efc31d2b'))
+            (
+                b'data/_23456789-123456789-123456789-123456789-123456789-'
+                b'xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-'
+                b'12345'
+            ),
+            (
+                b'dh/_23456789-123456789-123456789-123456789-123456789-xxxxxxxxx-'
+                b'xxxxxxxxx-xxxxxxx9921a01af50feeabc060ce00eee4cba6efc31d2b'
+            ),
+        )
 
         # tilde hitting length limit due to encoding
         self.hybridencode(
-            (b'data/~23456789-123456789-123456789-123456789-123456789-'
-             b'xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-'
-             b'12345'),
-            (b'dh/~7e23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'9cec6f97d569c10995f785720044ea2e4227481b'))
+            (
+                b'data/~23456789-123456789-123456789-123456789-123456789-'
+                b'xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-'
+                b'12345'
+            ),
+            (
+                b'dh/~7e23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'9cec6f97d569c10995f785720044ea2e4227481b'
+            ),
+        )
 
     def testwinreservedoverlimit(self):
         # Windows reserved characters hitting length limit
         self.hybridencode(
-            (b'data/<23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/~3c23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxee'
-             b'67d8f275876ca1ef2500fc542e63c885c4e62d'))
+            (
+                b'data/<23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/~3c23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxee'
+                b'67d8f275876ca1ef2500fc542e63c885c4e62d'
+            ),
+        )
         self.hybridencode(
-            (b'data/>23456789-123456789-123456789-123456789-123456789-'
-             b'xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
-             b'123456789-12345'),
-            (b'dh/~3e23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'387a85a5b1547cc9136310c974df716818458ddb'))
+            (
+                b'data/>23456789-123456789-123456789-123456789-123456789-'
+                b'xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-12345'
+            ),
+            (
+                b'dh/~3e23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'387a85a5b1547cc9136310c974df716818458ddb'
+            ),
+        )
         self.hybridencode(
-            (b'data/:23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
-             b'123456789-12345'),
-            (b'dh/~3a23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'2e4154fb571d13d22399c58cc4ef4858e4b75999'))
+            (
+                b'data/:23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-12345'
+            ),
+            (
+                b'dh/~3a23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'2e4154fb571d13d22399c58cc4ef4858e4b75999'
+            ),
+        )
         self.hybridencode(
-            (b'data/"23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/~2223456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'fc7e3ec7b0687ee06ed8c32fef0eb0c1980259f5'))
+            (
+                b'data/"23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/~2223456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'fc7e3ec7b0687ee06ed8c32fef0eb0c1980259f5'
+            ),
+        )
         self.hybridencode(
-            (b'data/\\23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
-             b'123456789-12345'),
-            (b'dh/~5c23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'944e1f2b7110687e116e0d151328ac648b06ab4a'))
+            (
+                b'data/\\23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-12345'
+            ),
+            (
+                b'dh/~5c23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'944e1f2b7110687e116e0d151328ac648b06ab4a'
+            ),
+        )
         self.hybridencode(
-            (b'data/|23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/~7c23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'28b23dd3fd0242946334126ab62bcd772aac32f4'))
+            (
+                b'data/|23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/~7c23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'28b23dd3fd0242946334126ab62bcd772aac32f4'
+            ),
+        )
         self.hybridencode(
-            (b'data/?23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/~3f23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'a263022d3994d2143d98f94f431eef8b5e7e0f8a'))
+            (
+                b'data/?23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/~3f23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'a263022d3994d2143d98f94f431eef8b5e7e0f8a'
+            ),
+        )
         self.hybridencode(
-            (b'data/*23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
-             b'123456789-12345'),
-            (b'dh/~2a23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'0e7e6020e3c00ba7bb7893d84ca2966fbf53e140'))
+            (
+                b'data/*23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-12345'
+            ),
+            (
+                b'dh/~2a23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'0e7e6020e3c00ba7bb7893d84ca2966fbf53e140'
+            ),
+        )
 
     def testinitialspacelenlimit(self):
         # initial space hitting length limit
         self.hybridencode(
-            (b'data/ 23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
-             b'123456789-12345'),
-            (b'dh/~2023456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'92acbc78ef8c0b796111629a02601f07d8aec4ea'))
+            (
+                b'data/ 23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-12345'
+            ),
+            (
+                b'dh/~2023456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'92acbc78ef8c0b796111629a02601f07d8aec4ea'
+            ),
+        )
 
     def testinitialdotlenlimit(self):
         # initial dot hitting length limit
         self.hybridencode(
-            (b'data/.23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/~2e23456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'dbe19cc6505b3515ab9228cebf877ad07075168f'))
+            (
+                b'data/.23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/~2e23456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'dbe19cc6505b3515ab9228cebf877ad07075168f'
+            ),
+        )
 
     def testtrailingspacelenlimit(self):
         # trailing space in filename hitting length limit
         self.hybridencode(
-            (b'data/123456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
-             b'123456789-1234 '),
-            (b'dh/123456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxx'
-             b'0025dc73e04f97426db4893e3bf67d581dc6d066'))
+            (
+                b'data/123456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-1234 '
+            ),
+            (
+                b'dh/123456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxx'
+                b'0025dc73e04f97426db4893e3bf67d581dc6d066'
+            ),
+        )
 
     def testtrailingdotlenlimit(self):
         # trailing dot in filename hitting length limit
         self.hybridencode(
-            (b'data/123456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-'
-             b'1234.'),
-            (b'dh/123456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxx'
-             b'85a16cf03ee7feba8a5abc626f1ba9886d01e89d'))
+            (
+                b'data/123456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-'
+                b'1234.'
+            ),
+            (
+                b'dh/123456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxx'
+                b'85a16cf03ee7feba8a5abc626f1ba9886d01e89d'
+            ),
+        )
 
     def testinitialspacedirlenlimit(self):
         # initial space in directory hitting length limit
         self.hybridencode(
-            (b'data/ x/456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/~20x/456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'1b3a3b712b2ac00d6af14ae8b4c14fdbf904f516'))
+            (
+                b'data/ x/456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/~20x/456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'1b3a3b712b2ac00d6af14ae8b4c14fdbf904f516'
+            ),
+        )
 
     def testinitialdotdirlenlimit(self):
         # initial dot in directory hitting length limit
         self.hybridencode(
-            (b'data/.x/456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/~2ex/456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'39dbc4c193a5643a8936fc69c3363cd7ac91ab14'))
+            (
+                b'data/.x/456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/~2ex/456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'39dbc4c193a5643a8936fc69c3363cd7ac91ab14'
+            ),
+        )
 
     def testtrailspacedirlenlimit(self):
         # trailing space in directory hitting length limit
         self.hybridencode(
-            (b'data/x /456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/x~20/456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'2253c341df0b5290790ad312cd8499850f2273e5'))
+            (
+                b'data/x /456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/x~20/456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'2253c341df0b5290790ad312cd8499850f2273e5'
+            ),
+        )
 
     def testtrailingdotdirlenlimit(self):
         # trailing dot in directory hitting length limit
         self.hybridencode(
-            (b'data/x./456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
-             b'123456789-12345'),
-            (b'dh/x~2e/456789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'cc0324d696d34562b44b5138db08ee1594ccc583'))
+            (
+                b'data/x./456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-12345'
+            ),
+            (
+                b'dh/x~2e/456789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'cc0324d696d34562b44b5138db08ee1594ccc583'
+            ),
+        )
 
     def testdirencodinglenlimit(self):
         # with directories that need direncoding, hitting length limit
         self.hybridencode(
-            (b'data/x.i/56789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-'
-             b'12345'),
-            (b'dh/x.i.hg/56789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxx'
-             b'a4c4399bdf81c67dbbbb7060aa0124d8dea94f74'))
+            (
+                b'data/x.i/56789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-'
+                b'12345'
+            ),
+            (
+                b'dh/x.i.hg/56789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxx'
+                b'a4c4399bdf81c67dbbbb7060aa0124d8dea94f74'
+            ),
+        )
         self.hybridencode(
-            (b'data/x.d/56789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/x.d.hg/56789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxx'
-             b'1303fa90473b230615f5b3ea7b660e881ae5270a'))
+            (
+                b'data/x.d/56789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/x.d.hg/56789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxx'
+                b'1303fa90473b230615f5b3ea7b660e881ae5270a'
+            ),
+        )
         self.hybridencode(
-            (b'data/x.hg/5789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/x.hg.hg/5789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxx'
-             b'26d724a8af68e7a4e4455e6602ea9adbd0eb801f'))
+            (
+                b'data/x.hg/5789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/x.hg.hg/5789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxx'
+                b'26d724a8af68e7a4e4455e6602ea9adbd0eb801f'
+            ),
+        )
 
     def testwinreservedfilenameslimit(self):
         # Windows reserved filenames, hitting length limit
         self.hybridencode(
-            (b'data/con/56789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
-             b'123456789-12345'),
-            (b'dh/co~6e/56789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'c0794d4f4c605a2617900eb2563d7113cf6ea7d3'))
+            (
+                b'data/con/56789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-12345'
+            ),
+            (
+                b'dh/co~6e/56789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'c0794d4f4c605a2617900eb2563d7113cf6ea7d3'
+            ),
+        )
         self.hybridencode(
-            (b'data/prn/56789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/pr~6e/56789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'64db876e1a9730e27236cb9b167aff942240e932'))
+            (
+                b'data/prn/56789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/pr~6e/56789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'64db876e1a9730e27236cb9b167aff942240e932'
+            ),
+        )
         self.hybridencode(
-            (b'data/aux/56789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/au~78/56789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'8a178558405ca6fb4bbd75446dfa186f06751a0d'))
+            (
+                b'data/aux/56789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/au~78/56789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'8a178558405ca6fb4bbd75446dfa186f06751a0d'
+            ),
+        )
         self.hybridencode(
-            (b'data/nul/56789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/nu~6c/56789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'c5e51b6fec1bd07bd243b053a0c3f7209855b886'))
+            (
+                b'data/nul/56789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/nu~6c/56789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'c5e51b6fec1bd07bd243b053a0c3f7209855b886'
+            ),
+        )
         self.hybridencode(
-            (b'data/com1/6789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/co~6d1/6789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'32f5f44ece3bb62b9327369ca84cc19c86259fcd'))
+            (
+                b'data/com1/6789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/co~6d1/6789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'32f5f44ece3bb62b9327369ca84cc19c86259fcd'
+            ),
+        )
         self.hybridencode(
-            (b'data/com9/6789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/co~6d9/6789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'734360b28c66a3230f55849fe8926206d229f990'))
+            (
+                b'data/com9/6789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/co~6d9/6789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'734360b28c66a3230f55849fe8926206d229f990'
+            ),
+        )
         self.hybridencode(
-            (b'data/lpt1/6789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/lp~741/6789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'e6f16ab4b6b0637676b2842b3345c9836df46ef7'))
+            (
+                b'data/lpt1/6789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/lp~741/6789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'e6f16ab4b6b0637676b2842b3345c9836df46ef7'
+            ),
+        )
         self.hybridencode(
-            (b'data/lpt9/6789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-12345'),
-            (b'dh/lp~749/6789-123456789-123456789-123456789-123456789'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
-             b'a475814c51acead3e44f2ff801f0c4903f986157'))
+            (
+                b'data/lpt9/6789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-12345'
+            ),
+            (
+                b'dh/lp~749/6789-123456789-123456789-123456789-123456789'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxx'
+                b'a475814c51acead3e44f2ff801f0c4903f986157'
+            ),
+        )
 
     def testnonreservednolimit(self):
         # non-reserved names, just not hitting limit
         self.hybridencode(
-            (b'data/123456789-123456789-123456789-123456789-123456789-'
-             b'/com/com0/lpt/lpt0/'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12345'),
-            (b'data/123456789-123456789-123456789-123456789-123456789-'
-             b'/com/com0/lpt/lpt0/'
-             b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12345'))
+            (
+                b'data/123456789-123456789-123456789-123456789-123456789-'
+                b'/com/com0/lpt/lpt0/'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12345'
+            ),
+            (
+                b'data/123456789-123456789-123456789-123456789-123456789-'
+                b'/com/com0/lpt/lpt0/'
+                b'-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12345'
+            ),
+        )
 
     def testhashedpathuntrucfirst(self):
         # hashed path with largest untruncated 1st dir
         self.hybridencode(
-            (b'data/12345678/-123456789-123456789-123456789-123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/-123456789-123456789-123456789-123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxx4e9e9e384d00929a93b6835fbf976eb32321ff3c'))
+            (
+                b'data/12345678/-123456789-123456789-123456789-123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/-123456789-123456789-123456789-123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxx4e9e9e384d00929a93b6835fbf976eb32321ff3c'
+            ),
+        )
 
     def testhashedpathsmallesttrucdir(self):
         # hashed path with smallest truncated 1st dir
         self.hybridencode(
-            (b'data/123456789/123456789-123456789-123456789-123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/123456789-123456789-123456789-123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxxx1f4e4ec5f2be76e109bfaa8e31c062fe426d5490'))
+            (
+                b'data/123456789/123456789-123456789-123456789-123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/123456789-123456789-123456789-123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxxx1f4e4ec5f2be76e109bfaa8e31c062fe426d5490'
+            ),
+        )
 
     def testhashedlargesttwountruc(self):
         # hashed path with largest untruncated two dirs
         self.hybridencode(
-            (b'data/12345678/12345678/9-123456789-123456789-123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/9-123456789-123456789-123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxx3332d8329d969cf835542a9f2cbcfb385b6cf39d'))
+            (
+                b'data/12345678/12345678/9-123456789-123456789-123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/9-123456789-123456789-123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxx3332d8329d969cf835542a9f2cbcfb385b6cf39d'
+            ),
+        )
 
     def testhashedpathsmallesttrunctwodirs(self):
         # hashed path with smallest truncated two dirs
         self.hybridencode(
-            (b'data/123456789/123456789/123456789-123456789-123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/123456789-123456789-123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxxxx'
-             b'9699559798247dffa18717138859be5f8874840e'))
+            (
+                b'data/123456789/123456789/123456789-123456789-123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/123456789-123456789-123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxxxx'
+                b'9699559798247dffa18717138859be5f8874840e'
+            ),
+        )
 
     def testhashuntruncthree(self):
         # hashed path with largest untruncated three dirs
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/89-123456789-123456789-'
-             b'hashed----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
-             b'123456789-123456'),
-            (b'dh/12345678/12345678/12345678/89-123456789-123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxxf0a2b053bb1369cce02f78c217d6a7aaea18c439'))
+            (
+                b'data/12345678/12345678/12345678/89-123456789-123456789-'
+                b'hashed----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/89-123456789-123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxxf0a2b053bb1369cce02f78c217d6a7aaea18c439'
+            ),
+        )
 
     def testhashtruncthree(self):
         # hashed path with smallest truncated three dirs
         self.hybridencode(
-            (b'data/123456789/123456789/123456789/123456789-123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/123456789-123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxxxx-'
-             b'1c6f8284967384ec13985a046d3553179d9d03cd'))
+            (
+                b'data/123456789/123456789/123456789/123456789-123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/123456789-123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxxxx-'
+                b'1c6f8284967384ec13985a046d3553179d9d03cd'
+            ),
+        )
 
     def testhashuntrucfour(self):
         # hashed path with largest untruncated four dirs
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/789-123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/789-123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxx0d30c99049d8f0ff97b94d4ef302027e8d54c6fd'))
+            (
+                b'data/12345678/12345678/12345678/12345678/789-123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/789-123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxx0d30c99049d8f0ff97b94d4ef302027e8d54c6fd'
+            ),
+        )
 
     def testhashtruncfour(self):
         # hashed path with smallest truncated four dirs
         self.hybridencode(
-            (b'data/123456789/123456789/123456789/123456789/123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/123456789-hashed'
-             b'----xxxxxxxxx-xxxxxxxxx-x'
-             b'46162779e1a771810b37a737f82ae7ed33771402'))
+            (
+                b'data/123456789/123456789/123456789/123456789/123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/123456789-hashed'
+                b'----xxxxxxxxx-xxxxxxxxx-x'
+                b'46162779e1a771810b37a737f82ae7ed33771402'
+            ),
+        )
 
     def testhashuntruncfive(self):
         # hashed path with largest untruncated five dirs
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/6789-hashed'
-             b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/6789-hashed'
-             b'----xxxxxxxxx-xxxxxxxbfe752ddc8b003c2790c66a9f2eb1ea75c114390'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/6789-hashed'
+                b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/6789-hashed'
+                b'----xxxxxxxxx-xxxxxxxbfe752ddc8b003c2790c66a9f2eb1ea75c114390'
+            ),
+        )
 
     def testhashtruncfive(self):
         # hashed path with smallest truncated five dirs
         self.hybridencode(
-            (b'data/123456789/123456789/123456789/123456789/123456789/hashed'
-             b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/hashed'
-             b'----xxxxxxxxx-xxxxxxxxx-xx'
-             b'b94c27b3532fa880cdd572b1c514785cab7b6ff2'))
+            (
+                b'data/123456789/123456789/123456789/123456789/123456789/hashed'
+                b'----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/hashed'
+                b'----xxxxxxxxx-xxxxxxxxx-xx'
+                b'b94c27b3532fa880cdd572b1c514785cab7b6ff2'
+            ),
+        )
 
     def testhashuntruncsix(self):
         # hashed path with largest untruncated six dirs
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'ed----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'ed----xxxxxxxxx-xxxxxxx'
-             b'cd8cc5483a0f3be409e0e5d4bf9e36e113c59235'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'ed----xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'ed----xxxxxxxxx-xxxxxxx'
+                b'cd8cc5483a0f3be409e0e5d4bf9e36e113c59235'
+            ),
+        )
 
     def testhashtruncsix(self):
         # hashed path with smallest truncated six dirs
         self.hybridencode(
-            (b'data/123456789/123456789/123456789/123456789/123456789/'
-              b'123456789/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
-              b'123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'xxxxxxxxx-xxxxxxxxx-xxx'
-             b'47dd6f616f833a142da00701b334cebbf640da06'))
+            (
+                b'data/123456789/123456789/123456789/123456789/123456789/'
+                b'123456789/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'xxxxxxxxx-xxxxxxxxx-xxx'
+                b'47dd6f616f833a142da00701b334cebbf640da06'
+            ),
+        )
 
     def testhashuntrunc7(self):
         # hashed path with largest untruncated seven dirs
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/xxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
-             b'123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/xxxxxx-xxxxxxx'
-             b'1c8ed635229fc22efe51035feeadeb4c8a0ecb82'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/xxxxxx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/xxxxxx-xxxxxxx'
+                b'1c8ed635229fc22efe51035feeadeb4c8a0ecb82'
+            ),
+        )
 
     def testhashtrunc7(self):
         # hashed path with smallest truncated seven dirs
         self.hybridencode(
-            (b'data/123456789/123456789/123456789/123456789/123456789/'
-              b'123456789/123456789/'
-              b'xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/123'
-             b'45678/xxxxxxxxx-xxxx298ff7d33f8ce6db57930837ffea2fb2f48bb926'))
+            (
+                b'data/123456789/123456789/123456789/123456789/123456789/'
+                b'123456789/123456789/'
+                b'xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/123'
+                b'45678/xxxxxxxxx-xxxx298ff7d33f8ce6db57930837ffea2fb2f48bb926'
+            ),
+        )
 
     def testhashuntrunc8(self):
         # hashed path with largest untruncated eight dirs
         # (directory 8 is dropped because it hits _maxshortdirslen)
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'12345678/12345678/xxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/1'
-             b'2345678/xxxxxxx-xxxxxxc8996ccd41b471f768057181a4d59d2febe7277d'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'12345678/12345678/xxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/1'
+                b'2345678/xxxxxxx-xxxxxxc8996ccd41b471f768057181a4d59d2febe7277d'
+            ),
+        )
 
     def testhashtrunc8(self):
         # hashed path with smallest truncated eight dirs
         # (directory 8 is dropped because it hits _maxshortdirslen)
         self.hybridencode(
-            (b'data/123456789/123456789/123456789/123456789/123456789/'
-             b'123456789/123456789/123456789/xxxxxxxxx-xxxxxxxxx-'
-             b'123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/xxxxxxxxx-xxxx'
-             b'4fa04a839a6bda93e1c21c713f2edcbd16e8890d'))
+            (
+                b'data/123456789/123456789/123456789/123456789/123456789/'
+                b'123456789/123456789/123456789/xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/xxxxxxxxx-xxxx'
+                b'4fa04a839a6bda93e1c21c713f2edcbd16e8890d'
+            ),
+        )
 
     def testhashnondropped8(self):
         # hashed path with largest non-dropped directory 8
         # (just not hitting the _maxshortdirslen boundary)
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789'
-             b'-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/12345/-xxxxxxx'
-             b'4d43d1ccaa20efbfe99ec779dc063611536ff2c5'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789'
+                b'-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/12345/-xxxxxxx'
+                b'4d43d1ccaa20efbfe99ec779dc063611536ff2c5'
+            ),
+        )
         # ...adding one truncated char to dir 1..7 won't drop dir 8
         self.hybridencode(
-            (b'data/12345678x/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/1234'
-             b'5678/12345/xxxxxxxx0f9efce65189cc60fd90fe4ffd49d7b58bbe0f2e'))
+            (
+                b'data/12345678x/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/1234'
+                b'5678/12345/xxxxxxxx0f9efce65189cc60fd90fe4ffd49d7b58bbe0f2e'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678x/12345678/12345678/12345678/12345678'
-             b'/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/1234'
-             b'5678/12345/xxxxxxxx945ca395708cafdd54a94501859beabd3e243921'))
+            (
+                b'data/12345678/12345678x/12345678/12345678/12345678/12345678'
+                b'/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/1234'
+                b'5678/12345/xxxxxxxx945ca395708cafdd54a94501859beabd3e243921'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678x/12345678/12345678/12345678/12'
-             b'345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/1234'
-             b'5678/12345/xxxxxxxxac62bf6898c4fd0502146074547c11caa751a327'))
+            (
+                b'data/12345678/12345678/12345678x/12345678/12345678/12345678/12'
+                b'345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/1234'
+                b'5678/12345/xxxxxxxxac62bf6898c4fd0502146074547c11caa751a327'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678x/12345678/12345678/12'
-             b'345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/1234'
-             b'5678/12345/xxxxxxxx2ae5a2baed7983fae8974d0ca06c6bf08b9aee92'))
+            (
+                b'data/12345678/12345678/12345678/12345678x/12345678/12345678/12'
+                b'345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/1234'
+                b'5678/12345/xxxxxxxx2ae5a2baed7983fae8974d0ca06c6bf08b9aee92'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678x/12345678/'
-             b'12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/1234'
-             b'5678/12345/xxxxxxxx214aba07b6687532a43d1e9eaf6e88cfca96b68c'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678x/12345678/'
+                b'12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/1234'
+                b'5678/12345/xxxxxxxx214aba07b6687532a43d1e9eaf6e88cfca96b68c'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678x'
-             b'/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/1234'
-             b'5678/12345/xxxxxxxxe7a022ae82f0f55cf4e0498e55ba59ea4ebb55bf'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678x'
+                b'/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/1234'
+                b'5678/12345/xxxxxxxxe7a022ae82f0f55cf4e0498e55ba59ea4ebb55bf'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'12345678x/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12345'
-             b'678/12345/xxxxxxxxb51ce61164996a80f36ce3cfe64b62d519aedae3'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'12345678x/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12345'
+                b'678/12345/xxxxxxxxb51ce61164996a80f36ce3cfe64b62d519aedae3'
+            ),
+        )
 
     def testhashedpathshortestdropped8(self):
         # hashed path with shortest dropped directory 8
         # (just hitting the _maxshortdirslen boundary)
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/123456/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
-             b'123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/xxxxxxxxx-xxxx'
-             b'11fa9873cc6c3215eae864528b5530a04efc6cfe'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/123456/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/xxxxxxxxx-xxxx'
+                b'11fa9873cc6c3215eae864528b5530a04efc6cfe'
+            ),
+        )
 
     def testhashedpathdropsdir8fortrailingdotspace(self):
         # hashed path that drops dir 8 due to dot or space at end is
         # encoded, and thus causing to hit _maxshortdirslen
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/1234./-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
-             b'123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/-xxxxxxxxx-xxx'
-             b'602df9b45bec564e2e1f0645d5140dddcc76ed58'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/1234./-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/-xxxxxxxxx-xxx'
+                b'602df9b45bec564e2e1f0645d5140dddcc76ed58'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/1234 /-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
-             b'123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/-xxxxxxxxx-xxx'
-             b'd99ff212bc84b4d1f70cd6b0071e3ef69d4e12ce'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/1234 /-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-'
+                b'123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/-xxxxxxxxx-xxx'
+                b'd99ff212bc84b4d1f70cd6b0071e3ef69d4e12ce'
+            ),
+        )
         # ... with dir 8 short enough for encoding
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/12./xx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
-             b'-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/12~2e/'
-             b'xx-xxxxx7baeb5ed7f14a586ee1cacecdbcbff70032d1b3c'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/12./xx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx'
+                b'-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/12~2e/'
+                b'xx-xxxxx7baeb5ed7f14a586ee1cacecdbcbff70032d1b3c'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/12 '
-             b'/xx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/12~20/'
-             b'xx-xxxxxcf79ca9795f77d7f75745da36807e5d772bd5182'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/12 '
+                b'/xx-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-123456'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/12~20/'
+                b'xx-xxxxxcf79ca9795f77d7f75745da36807e5d772bd5182'
+            ),
+        )
 
     def testextensionsreplicatedonhashedpaths(self):
         # Extensions are replicated on hashed paths. Note that
         # we only get to encode files that end in .i or .d inside the
         # store. Encoded filenames are thus bound in length.
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
-             b'45.i'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
-             b'345678/12345/-xxxxxc10ad03b5755ed524f5286aab1815dfe07729438.i'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
+                b'45.i'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
+                b'345678/12345/-xxxxxc10ad03b5755ed524f5286aab1815dfe07729438.i'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
-             b'45.d'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
-             b'345678/12345/-xxxxx9eec83381f2b39ef5ac8b4ecdf2c94f7983f57c8.d'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
+                b'45.d'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
+                b'345678/12345/-xxxxx9eec83381f2b39ef5ac8b4ecdf2c94f7983f57c8.d'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
-             b'456.i'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
-             b'345678/12345/-xxxxxb7796dc7d175cfb0bb8a7728f58f6ebec9042568.i'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
+                b'456.i'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
+                b'345678/12345/-xxxxxb7796dc7d175cfb0bb8a7728f58f6ebec9042568.i'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
-             b'4567.i'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
-             b'345678/12345/-xxxxxb515857a6bfeef017c4894d8df42458ac65d55b8.i'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
+                b'4567.i'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
+                b'345678/12345/-xxxxxb515857a6bfeef017c4894d8df42458ac65d55b8.i'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
-             b'45678.i'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
-             b'345678/12345/-xxxxxb05a0f247bc0a776211cd6a32ab714fd9cc09f2b.i'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
+                b'45678.i'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
+                b'345678/12345/-xxxxxb05a0f247bc0a776211cd6a32ab714fd9cc09f2b.i'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
-             b'456789.i'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
-             b'345678/12345/-xxxxxf192b48bff08d9e0e12035fb52bc58c70de72c94.i'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
+                b'456789.i'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
+                b'345678/12345/-xxxxxf192b48bff08d9e0e12035fb52bc58c70de72c94.i'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
-             b'456789-.i'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
-             b'345678/12345/-xxxxx435551e0ed4c7b083b9ba83cee916670e02e80ad.i'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
+                b'456789-.i'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
+                b'345678/12345/-xxxxx435551e0ed4c7b083b9ba83cee916670e02e80ad.i'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
-             b'456789-1.i'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
-             b'345678/12345/-xxxxxa7f74eb98d8d58b716356dfd26e2f9aaa65d6a9a.i'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
+                b'456789-1.i'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
+                b'345678/12345/-xxxxxa7f74eb98d8d58b716356dfd26e2f9aaa65d6a9a.i'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
-             b'456789-12.i'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
-             b'345678/12345/-xxxxxed68d9bd43b931f0b100267fee488d65a0c66f62.i'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
+                b'456789-12.i'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
+                b'345678/12345/-xxxxxed68d9bd43b931f0b100267fee488d65a0c66f62.i'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
-             b'456789-123.i'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
-             b'345678/12345/-xxxxx5cea44de2b642d2ba2b4a30693ffb1049644d698.i'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
+                b'456789-123.i'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
+                b'345678/12345/-xxxxx5cea44de2b642d2ba2b4a30693ffb1049644d698.i'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
-             b'456789-1234.i'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
-             b'345678/12345/-xxxxx68462f62a7f230b39c1b5400d73ec35920990b7e.i'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
+                b'456789-1234.i'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
+                b'345678/12345/-xxxxx68462f62a7f230b39c1b5400d73ec35920990b7e.i'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
-             b'456789-12345.i'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
-             b'345678/12345/-xxxxx4cb852a314c6da240a83eec94761cdd71c6ec22e.i'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
+                b'456789-12345.i'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
+                b'345678/12345/-xxxxx4cb852a314c6da240a83eec94761cdd71c6ec22e.i'
+            ),
+        )
         self.hybridencode(
-            (b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
-             b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
-             b'456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-'
-             b'abcdefghjiklmnopqrstuvwxyz-ABCDEFGHIJKLMNOPRSTUVWXYZ'
-             b'-1234567890-xxxxxxxxx-xxxxxxxxx-xxxxxxxx'
-             b'-xxxxxxxxx-wwwwwwwww-wwwwwwwww-wwwwwwwww-wwwwwwwww'
-             b'-wwwwwwwww-wwwwwwwww-wwwwwwwww-wwwwwwwww-wwwwwwwww.i'),
-            (b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
-             b'345678/12345/-xxxxx93352aa50377751d9e5ebdf52da1e6e69a6887a6.i'))
+            (
+                b'data/12345678/12345678/12345678/12345678/12345678/12345678/'
+                b'12345678/12345/-xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3'
+                b'456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-'
+                b'abcdefghjiklmnopqrstuvwxyz-ABCDEFGHIJKLMNOPRSTUVWXYZ'
+                b'-1234567890-xxxxxxxxx-xxxxxxxxx-xxxxxxxx'
+                b'-xxxxxxxxx-wwwwwwwww-wwwwwwwww-wwwwwwwww-wwwwwwwww'
+                b'-wwwwwwwww-wwwwwwwww-wwwwwwwww-wwwwwwwww-wwwwwwwww.i'
+            ),
+            (
+                b'dh/12345678/12345678/12345678/12345678/12345678/12345678/12'
+                b'345678/12345/-xxxxx93352aa50377751d9e5ebdf52da1e6e69a6887a6.i'
+            ),
+        )
 
     def testpathsoutsidedata(self):
         # paths outside data/ can be encoded
-        self.hybridencode(b'metadata/dir/00manifest.i',
-                          b'metadata/dir/00manifest.i')
+        self.hybridencode(
+            b'metadata/dir/00manifest.i', b'metadata/dir/00manifest.i'
+        )
         self.hybridencode(
-            (b'metadata/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/12345678/00manifest.i'),
-            (b'dh/ata/12345678/12345678/12345678/12345678/12345678'
-             b'/12345678/12345678/00manife'
-             b'0a4da1f89aa2aa9eb0896eb451288419049781b4.i'))
+            (
+                b'metadata/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/12345678/00manifest.i'
+            ),
+            (
+                b'dh/ata/12345678/12345678/12345678/12345678/12345678'
+                b'/12345678/12345678/00manife'
+                b'0a4da1f89aa2aa9eb0896eb451288419049781b4.i'
+            ),
+        )
+
 
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-import.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-import.t	Mon Oct 21 11:09:48 2019 -0400
@@ -34,8 +34,25 @@
   new changesets 80971e65b431
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ HGEDITOR=cat hg --cwd b import ../exported-tip.patch
+  $ HGEDITOR=cat hg --cwd b import --debug ../exported-tip.patch
   applying ../exported-tip.patch
+  Subject: 
+  
+  Content-Type: text/plain
+  found patch at byte 202
+  patch generated by hg export
+  From: someone
+  Date: 1 0
+  Node ID: 1d4bd90af0e43687763d158dfa83ff2a4b6c0c32
+  message:
+  second change
+  patching file a
+  committing files:
+  a
+  committing manifest
+  committing changelog
+  created 1d4bd90af0e4
+  updating the branch cache
 
 message and committer and date should be same
 
@@ -220,7 +237,6 @@
   [255]
   $ rm -r b
 
-
 hg -R repo import
 put the clone in a subdir - having a directory named "a"
 used to hide a bug.
@@ -379,6 +395,48 @@
   summary:     second change
   $ rm -r b
 
+hg email --plain, should read X-Mercurial-Node header
+
+  $ cat >> a/.hg/hgrc << EOF
+  > [extensions]
+  > patchbomb =
+  > [email]
+  > from = foo
+  > cc = foo
+  > to = bar
+  > EOF
+  $ hg --cwd a email -m ../tip-plain.mbox --plain --date '1970-1-1 0:1' tip
+  this patch series consists of 1 patches.
+  
+  
+  sending [PATCH] second change ...
+
+  $ hg clone -r0 a b -q
+  $ hg --cwd b import --debug ../tip-plain.mbox
+  applying ../tip-plain.mbox
+  Node ID: 1d4bd90af0e43687763d158dfa83ff2a4b6c0c32
+  Subject: second change
+  From: foo
+  Content-Type: text/plain
+  found patch at byte 0
+  message:
+  second change
+  patching file a
+  committing files:
+  a
+  committing manifest
+  committing changelog
+  created de620f6fe949
+  updating the branch cache
+  $ hg --cwd b tip
+  changeset:   1:de620f6fe949
+  tag:         tip
+  user:        foo
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     second change
+  
+  $ rm -r b
+
 
 subject: duplicate detection, removal of [PATCH]
 The '---' tests the gitsendmail handling without proper mail headers
--- a/tests/test-infinitepush-bundlestore.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-infinitepush-bundlestore.t	Mon Oct 21 11:09:48 2019 -0400
@@ -168,8 +168,8 @@
   adding changesets
   adding manifests
   adding file changes
+  adding remote bookmark newbook
   added 1 changesets with 1 changes to 2 files
-  adding remote bookmark newbook
   new changesets 1de1d7d92f89 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg log -G -T '{desc} {phase} {bookmarks}'
--- a/tests/test-infinitepush.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-infinitepush.t	Mon Oct 21 11:09:48 2019 -0400
@@ -78,11 +78,10 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files (+1 heads)
+  added 2 changesets with 2 changes to 2 files (+1 heads)
   new changesets * (glob)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg log -r scratch/secondpart -T '{node}'
@@ -158,11 +157,10 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files (+1 heads)
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
+  added 2 changesets with 2 changes to 2 files (+1 heads)
   new changesets a79b6597f322:c70aee6da07d (1 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
   $ hg log -r scratch/scratchontopofpublic -T '{phase}'
--- a/tests/test-install.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-install.t	Mon Oct 21 11:09:48 2019 -0400
@@ -153,6 +153,16 @@
   1 problems detected, please check your install!
   [1]
 
+debuginstall extension support
+  $ hg debuginstall --config extensions.fsmonitor= --config fsmonitor.watchman_exe=false | grep atchman
+  fsmonitor checking for watchman binary... (false)
+   watchman binary missing or broken: warning: Watchman unavailable: watchman exited with code 1
+Verify the json works too:
+  $ hg debuginstall --config extensions.fsmonitor= --config fsmonitor.watchman_exe=false -Tjson | grep atchman
+    "fsmonitor-watchman": "false",
+    "fsmonitor-watchman-error": "warning: Watchman unavailable: watchman exited with code 1",
+
+
 #if test-repo
   $ . "$TESTDIR/helpers-testrepo.sh"
 
@@ -226,6 +236,11 @@
 
 #endif
 
+#if py3
+  $ HGALLOWPYTHON3=1
+  $ export HGALLOWPYTHON3
+#endif
+
 #if virtualenv
 
 Verify that Mercurial is installable with pip. Note that this MUST be
@@ -239,10 +254,12 @@
   $ unset PYTHONPATH
   $ "$PYTHON" -m virtualenv --no-site-packages --never-download installenv >> pip.log
   DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. (?)
+  DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support (?)
 Note: we use this weird path to run pip and hg to avoid platform differences,
 since it's bin on most platforms but Scripts on Windows.
   $ ./installenv/*/pip install --no-index $TESTDIR/.. >> pip.log
   DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. (?)
+  DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support (?)
   $ ./installenv/*/hg debuginstall || cat pip.log
   checking encoding (ascii)...
   checking Python executable (*) (glob)
--- a/tests/test-largefiles.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-largefiles.t	Mon Oct 21 11:09:48 2019 -0400
@@ -1115,7 +1115,7 @@
   $ hg pull -v --lfrev 'heads(pulled())+min(pulled())'
   pulling from $TESTTMP/a
   searching for changes
-  all local heads known remotely
+  all local changesets known remotely
   6 changesets found
   uncompressed size of bundle content:
       1389 (changelog)
--- a/tests/test-lfconvert.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-lfconvert.t	Mon Oct 21 11:09:48 2019 -0400
@@ -332,6 +332,7 @@
   > evolution.createmarkers=True
   > EOF
   $ hg debugobsolete `hg log -r tip -T "{node}"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ cd ..
 
--- a/tests/test-lfs-pointer.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-lfs-pointer.py	Mon Oct 21 11:09:48 2019 -0400
@@ -2,10 +2,12 @@
 
 # Import something from Mercurial, so the module loader gets initialized.
 from mercurial import pycompat
+
 del pycompat  # unused for now
 
 from hgext.lfs import pointer
 
+
 def tryparse(text):
     r = {}
     try:
@@ -19,11 +21,14 @@
             print('reconstructed text differs')
     return r
 
-t = (b'version https://git-lfs.github.com/spec/v1\n'
-     b'oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1'
-     b'258daaa5e2ca24d17e2393\n'
-     b'size 12345\n'
-     b'x-foo extra-information\n')
+
+t = (
+    b'version https://git-lfs.github.com/spec/v1\n'
+    b'oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1'
+    b'258daaa5e2ca24d17e2393\n'
+    b'size 12345\n'
+    b'x-foo extra-information\n'
+)
 
 tryparse(b'')
 tryparse(t)
--- a/tests/test-lfs-serve-access.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-lfs-serve-access.t	Mon Oct 21 11:09:48 2019 -0400
@@ -105,16 +105,16 @@
   adding manifests
   adding file changes
   adding lfs.bin revisions
-  added 1 changesets with 1 changes to 1 files
   bundle2-input-part: total payload size 648
   bundle2-input-part: "listkeys" (params: 1 mandatory) supported
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
   bundle2-input-part: total payload size 39
-  bundle2-input-bundle: 3 parts total
+  bundle2-input-bundle: 4 parts total
   checking for updated bookmarks
   updating the branch cache
+  added 1 changesets with 1 changes to 1 files
   new changesets 525251863cad
   updating to branch default
   resolving manifests
@@ -353,7 +353,7 @@
   $LOCALIP - - [$ERRDATE$] HG error:  Traceback (most recent call last): (glob)
   $LOCALIP - - [$ERRDATE$] HG error:      localstore.download(oid, req.bodyfh) (glob)
   $LOCALIP - - [$ERRDATE$] HG error:      super(badstore, self).download(oid, src) (glob)
-  $LOCALIP - - [$ERRDATE$] HG error:      % oid) (glob)
+  $LOCALIP - - [$ERRDATE$] HG error:      _(b'corrupt remote lfs object: %s') % oid (glob)
   $LOCALIP - - [$ERRDATE$] HG error:  LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (glob)
   $LOCALIP - - [$ERRDATE$] HG error:   (glob)
   $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
@@ -362,9 +362,9 @@
       self.do_hgweb()
       for chunk in self.server.application(env, self._start_response):
       for r in self._runwsgi(req, res, repo):
-      rctx, req, res, self.check_perm)
+      rctx, req, res, self.check_perm
       return func(*(args + a), **kw) (no-py3 !)
-      lambda perm:
+      rctx.repo, req, res, lambda perm: checkperm(rctx, req, perm)
       res.setbodybytes(localstore.read(oid))
       blob = self._read(self.vfs, oid, verify)
       raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8"))
@@ -375,7 +375,7 @@
   $LOCALIP - - [$ERRDATE$] HG error:      res.setbodybytes(localstore.read(oid)) (glob)
   $LOCALIP - - [$ERRDATE$] HG error:      blob = self._read(self.vfs, oid, verify) (glob)
   $LOCALIP - - [$ERRDATE$] HG error:      blobstore._verify(oid, b'dummy content') (glob)
-  $LOCALIP - - [$ERRDATE$] HG error:      hint=_(b'run hg verify')) (glob)
+  $LOCALIP - - [$ERRDATE$] HG error:      hint=_(b'run hg verify'), (glob)
   $LOCALIP - - [$ERRDATE$] HG error:  LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (glob)
   $LOCALIP - - [$ERRDATE$] HG error:   (glob)
 
--- a/tests/test-lfs-serve.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-lfs-serve.t	Mon Oct 21 11:09:48 2019 -0400
@@ -132,6 +132,12 @@
   requirements
      preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
   
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
   beginning upgrade...
   repository locked and read-only
   creating temporary repository to stage migrated data: * (glob)
@@ -499,8 +505,8 @@
   adding changesets
   adding manifests
   adding file changes
+  calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
   added 6 changesets with 5 changes to 5 files (+1 heads)
-  calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
   new changesets d437e1d24fbd:d3b84d50eacb
   resolving manifests
   lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
--- a/tests/test-lfs-test-server.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-lfs-test-server.t	Mon Oct 21 11:09:48 2019 -0400
@@ -135,18 +135,18 @@
   adding manifests
   adding file changes
   adding a revisions
-  added 1 changesets with 1 changes to 1 files
   calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
   bundle2-input-part: total payload size 617
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 3 parts total
+  bundle2-input-bundle: 4 parts total
   updating the branch cache
+  added 1 changesets with 1 changes to 1 files
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   listing keys for "phases"
   $ mv .hg/store/lfs_ .hg/store/lfs
 
@@ -312,17 +312,17 @@
   adding b revisions
   adding c revisions
   adding d revisions
-  added 1 changesets with 3 changes to 3 files
   bundle2-input-part: total payload size 1315
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 5 parts total
   updating the branch cache
+  added 1 changesets with 3 changes to 3 files
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   listing keys for "phases"
 
 Clear the cache to force a download
--- a/tests/test-lfs.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-lfs.t	Mon Oct 21 11:09:48 2019 -0400
@@ -124,8 +124,8 @@
   adding changesets
   adding manifests
   adding file changes
+  calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
   added 2 changesets with 3 changes to 3 files
-  calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
   $ grep lfs $TESTTMP/server/.hg/requires
   lfs
 
@@ -701,7 +701,7 @@
   >         if len(fl) == 0:
   >             continue
   >         sizes = [fl._revlog.rawsize(i) for i in fl]
-  >         texts = [fl.revision(i, raw=True) for i in fl]
+  >         texts = [fl.rawdata(i) for i in fl]
   >         flags = [int(fl._revlog.flags(i)) for i in fl]
   >         hashes = [hash(t) for t in texts]
   >         pycompat.stdout.write(b'  %s: rawsizes=%r flags=%r hashes=%s\n'
--- a/tests/test-linelog.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-linelog.py	Mon Oct 21 11:09:48 2019 -0400
@@ -6,12 +6,13 @@
 
 from mercurial import linelog
 
-vecratio = 3 # number of replacelines / number of replacelines_vec
-maxlinenum = 0xffffff
-maxb1 = 0xffffff
+vecratio = 3  # number of replacelines / number of replacelines_vec
+maxlinenum = 0xFFFFFF
+maxb1 = 0xFFFFFF
 maxdeltaa = 10
 maxdeltab = 10
 
+
 def _genedits(seed, endrev):
     lines = []
     random.seed(seed)
@@ -23,22 +24,26 @@
         b2 = random.randint(b1, b1 + maxdeltab)
         usevec = not bool(random.randint(0, vecratio))
         if usevec:
-            blines = [(random.randint(0, rev), random.randint(0, maxlinenum))
-                      for _ in range(b1, b2)]
+            blines = [
+                (random.randint(0, rev), random.randint(0, maxlinenum))
+                for _ in range(b1, b2)
+            ]
         else:
             blines = [(rev, bidx) for bidx in range(b1, b2)]
         lines[a1:a2] = blines
         yield lines, rev, a1, a2, b1, b2, blines, usevec
 
+
 class linelogtests(unittest.TestCase):
     def testlinelogencodedecode(self):
-        program = [linelog._eof(0, 0),
-                   linelog._jge(41, 42),
-                   linelog._jump(0, 43),
-                   linelog._eof(0, 0),
-                   linelog._jl(44, 45),
-                   linelog._line(46, 47),
-                   ]
+        program = [
+            linelog._eof(0, 0),
+            linelog._jge(41, 42),
+            linelog._jump(0, 43),
+            linelog._eof(0, 0),
+            linelog._jl(44, 45),
+            linelog._line(46, 47),
+        ]
         ll = linelog.linelog(program, maxrev=100)
         enc = ll.encode()
         # round-trips okay
@@ -46,89 +51,80 @@
         self.assertEqual(linelog.linelog.fromdata(enc), ll)
         # This encoding matches the encoding used by hg-experimental's
         # linelog file, or is supposed to if it doesn't.
-        self.assertEqual(enc, (b'\x00\x00\x01\x90\x00\x00\x00\x06'
-                               b'\x00\x00\x00\xa4\x00\x00\x00*'
-                               b'\x00\x00\x00\x00\x00\x00\x00+'
-                               b'\x00\x00\x00\x00\x00\x00\x00\x00'
-                               b'\x00\x00\x00\xb1\x00\x00\x00-'
-                               b'\x00\x00\x00\xba\x00\x00\x00/'))
+        self.assertEqual(
+            enc,
+            (
+                b'\x00\x00\x01\x90\x00\x00\x00\x06'
+                b'\x00\x00\x00\xa4\x00\x00\x00*'
+                b'\x00\x00\x00\x00\x00\x00\x00+'
+                b'\x00\x00\x00\x00\x00\x00\x00\x00'
+                b'\x00\x00\x00\xb1\x00\x00\x00-'
+                b'\x00\x00\x00\xba\x00\x00\x00/'
+            ),
+        )
 
     def testsimpleedits(self):
         ll = linelog.linelog()
         # Initial revision: add lines 0, 1, and 2
         ll.replacelines(1, 0, 0, 0, 3)
-        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(1)],
-                         [(1, 0),
-                          (1, 1),
-                          (1, 2),
-                         ])
+        self.assertEqual(
+            [(l.rev, l.linenum) for l in ll.annotate(1)],
+            [(1, 0), (1, 1), (1, 2),],
+        )
         # Replace line 1 with a new line
         ll.replacelines(2, 1, 2, 1, 2)
-        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(2)],
-                         [(1, 0),
-                          (2, 1),
-                          (1, 2),
-                         ])
+        self.assertEqual(
+            [(l.rev, l.linenum) for l in ll.annotate(2)],
+            [(1, 0), (2, 1), (1, 2),],
+        )
         # delete a line out of 2
         ll.replacelines(3, 1, 2, 0, 0)
-        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(3)],
-                         [(1, 0),
-                          (1, 2),
-                         ])
+        self.assertEqual(
+            [(l.rev, l.linenum) for l in ll.annotate(3)], [(1, 0), (1, 2),]
+        )
         # annotation of 1 is unchanged
-        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(1)],
-                         [(1, 0),
-                          (1, 1),
-                          (1, 2),
-                         ])
-        ll.annotate(3) # set internal state to revision 3
+        self.assertEqual(
+            [(l.rev, l.linenum) for l in ll.annotate(1)],
+            [(1, 0), (1, 1), (1, 2),],
+        )
+        ll.annotate(3)  # set internal state to revision 3
         start = ll.getoffset(0)
         end = ll.getoffset(1)
-        self.assertEqual(ll.getalllines(start, end), [
-            (1, 0),
-            (2, 1),
-            (1, 1),
-        ])
-        self.assertEqual(ll.getalllines(), [
-            (1, 0),
-            (2, 1),
-            (1, 1),
-            (1, 2),
-        ])
+        self.assertEqual(ll.getalllines(start, end), [(1, 0), (2, 1), (1, 1),])
+        self.assertEqual(ll.getalllines(), [(1, 0), (2, 1), (1, 1), (1, 2),])
 
     def testparseclinelogfile(self):
         # This data is what the replacements in testsimpleedits
         # produce when fed to the original linelog.c implementation.
-        data = (b'\x00\x00\x00\x0c\x00\x00\x00\x0f'
-                b'\x00\x00\x00\x00\x00\x00\x00\x02'
-                b'\x00\x00\x00\x05\x00\x00\x00\x06'
-                b'\x00\x00\x00\x06\x00\x00\x00\x00'
-                b'\x00\x00\x00\x00\x00\x00\x00\x07'
-                b'\x00\x00\x00\x06\x00\x00\x00\x02'
-                b'\x00\x00\x00\x00\x00\x00\x00\x00'
-                b'\x00\x00\x00\t\x00\x00\x00\t'
-                b'\x00\x00\x00\x00\x00\x00\x00\x0c'
-                b'\x00\x00\x00\x08\x00\x00\x00\x05'
-                b'\x00\x00\x00\x06\x00\x00\x00\x01'
-                b'\x00\x00\x00\x00\x00\x00\x00\x05'
-                b'\x00\x00\x00\x0c\x00\x00\x00\x05'
-                b'\x00\x00\x00\n\x00\x00\x00\x01'
-                b'\x00\x00\x00\x00\x00\x00\x00\t')
+        data = (
+            b'\x00\x00\x00\x0c\x00\x00\x00\x0f'
+            b'\x00\x00\x00\x00\x00\x00\x00\x02'
+            b'\x00\x00\x00\x05\x00\x00\x00\x06'
+            b'\x00\x00\x00\x06\x00\x00\x00\x00'
+            b'\x00\x00\x00\x00\x00\x00\x00\x07'
+            b'\x00\x00\x00\x06\x00\x00\x00\x02'
+            b'\x00\x00\x00\x00\x00\x00\x00\x00'
+            b'\x00\x00\x00\t\x00\x00\x00\t'
+            b'\x00\x00\x00\x00\x00\x00\x00\x0c'
+            b'\x00\x00\x00\x08\x00\x00\x00\x05'
+            b'\x00\x00\x00\x06\x00\x00\x00\x01'
+            b'\x00\x00\x00\x00\x00\x00\x00\x05'
+            b'\x00\x00\x00\x0c\x00\x00\x00\x05'
+            b'\x00\x00\x00\n\x00\x00\x00\x01'
+            b'\x00\x00\x00\x00\x00\x00\x00\t'
+        )
         llc = linelog.linelog.fromdata(data)
-        self.assertEqual([(l.rev, l.linenum) for l in llc.annotate(1)],
-                         [(1, 0),
-                          (1, 1),
-                          (1, 2),
-                         ])
-        self.assertEqual([(l.rev, l.linenum) for l in llc.annotate(2)],
-                         [(1, 0),
-                          (2, 1),
-                          (1, 2),
-                         ])
-        self.assertEqual([(l.rev, l.linenum) for l in llc.annotate(3)],
-                         [(1, 0),
-                          (1, 2),
-                         ])
+        self.assertEqual(
+            [(l.rev, l.linenum) for l in llc.annotate(1)],
+            [(1, 0), (1, 1), (1, 2),],
+        )
+        self.assertEqual(
+            [(l.rev, l.linenum) for l in llc.annotate(2)],
+            [(1, 0), (2, 1), (1, 2),],
+        )
+        self.assertEqual(
+            [(l.rev, l.linenum) for l in llc.annotate(3)], [(1, 0), (1, 2),]
+        )
         # Check we emit the same bytecode.
         ll = linelog.linelog()
         # Initial revision: add lines 0, 1, and 2
@@ -137,9 +133,15 @@
         ll.replacelines(2, 1, 2, 1, 2)
         # delete a line out of 2
         ll.replacelines(3, 1, 2, 0, 0)
-        diff = '\n   ' + '\n   '.join(difflib.unified_diff(
-            ll.debugstr().splitlines(), llc.debugstr().splitlines(),
-            'python', 'c', lineterm=''))
+        diff = '\n   ' + '\n   '.join(
+            difflib.unified_diff(
+                ll.debugstr().splitlines(),
+                llc.debugstr().splitlines(),
+                'python',
+                'c',
+                lineterm='',
+            )
+        )
         self.assertEqual(ll._program, llc._program, 'Program mismatch: ' + diff)
         # Done as a secondary step so we get a better result if the
         # program is where the mismatch is.
@@ -150,13 +152,12 @@
         ll = linelog.linelog()
         ll.replacelines(3, 0, 0, 0, 2)
         ll.replacelines(4, 0, 2, 0, 0)
-        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(4)],
-                         [])
-        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(3)],
-                         [(3, 0), (3, 1)])
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(4)], [])
+        self.assertEqual(
+            [(l.rev, l.linenum) for l in ll.annotate(3)], [(3, 0), (3, 1)]
+        )
         # rev 2 is empty because contents were only ever introduced in rev 3
-        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(2)],
-                         [])
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(2)], [])
 
     def testrandomedits(self):
         # Inspired by original linelog tests.
@@ -165,7 +166,8 @@
         ll = linelog.linelog()
         # Populate linelog
         for lines, rev, a1, a2, b1, b2, blines, usevec in _genedits(
-                seed, numrevs):
+            seed, numrevs
+        ):
             if usevec:
                 ll.replacelines_vec(rev, a1, a2, blines)
             else:
@@ -174,7 +176,8 @@
             self.assertEqual(ll.annotateresult, lines)
         # Verify we can get back these states by annotating each rev
         for lines, rev, a1, a2, b1, b2, blines, usevec in _genedits(
-                seed, numrevs):
+            seed, numrevs
+        ):
             ar = ll.annotate(rev)
             self.assertEqual([(l.rev, l.linenum) for l in ar], lines)
 
@@ -187,6 +190,8 @@
             # should not be an infinite loop and raise
             ll.annotate(1)
 
+
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-linerange.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-linerange.py	Mon Oct 21 11:09:48 2019 -0400
@@ -17,7 +17,9 @@
            09 at OLD
            10 at OLD
            11 at OLD
-'''[1:] # strip initial LF
+'''[
+    1:
+]  # strip initial LF
 
 text2 = b'''
 00 at NEW
@@ -32,7 +34,10 @@
 09 at NEW
 10 at NEW
 11 at NEW
-'''[1:] # strip initial LF
+'''[
+    1:
+]  # strip initial LF
+
 
 def filteredblocks(blocks, rangeb):
     """return `rangea` extracted from `blocks` coming from
@@ -42,8 +47,8 @@
     skipped = [b not in filtered for b in blocks]
     return rangea, skipped
 
+
 class blocksinrangetests(unittest.TestCase):
-
     def setUp(self):
         self.blocks = list(mdiff.allblocks(text1, text2))
         assert self.blocks == [
@@ -227,6 +232,8 @@
             else:
                 self.fail('%s not raised' % exctype.__name__)
 
+
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-lock.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-lock.py	Mon Oct 21 11:09:48 2019 -0400
@@ -19,19 +19,24 @@
 
 # work around http://bugs.python.org/issue1515
 if types.MethodType not in copy._deepcopy_dispatch:
+
     def _deepcopy_method(x, memo):
         return type(x)(x.__func__, copy.deepcopy(x.__self__, memo), x.im_class)
+
     copy._deepcopy_dispatch[types.MethodType] = _deepcopy_method
 
+
 class lockwrapper(lock.lock):
     def __init__(self, pidoffset, *args, **kwargs):
         # lock.lock.__init__() calls lock(), so the pidoffset assignment needs
         # to be earlier
         self._pidoffset = pidoffset
         super(lockwrapper, self).__init__(*args, **kwargs)
+
     def _getpid(self):
         return super(lockwrapper, self)._getpid() + self._pidoffset
 
+
 class teststate(object):
     def __init__(self, testcase, dir, pidoffset=0):
         self._testcase = testcase
@@ -42,9 +47,15 @@
         self._pidoffset = pidoffset
 
     def makelock(self, *args, **kwargs):
-        l = lockwrapper(self._pidoffset, self.vfs, testlockname,
-                        releasefn=self.releasefn, acquirefn=self.acquirefn,
-                        *args, **kwargs)
+        l = lockwrapper(
+            self._pidoffset,
+            self.vfs,
+            testlockname,
+            releasefn=self.releasefn,
+            acquirefn=self.acquirefn,
+            *args,
+            **kwargs
+        )
         l.postrelease.append(self.postreleasefn)
         return l
 
@@ -59,39 +70,42 @@
 
     def assertacquirecalled(self, called):
         self._testcase.assertEqual(
-            self._acquirecalled, called,
-            'expected acquire to be %s but was actually %s' % (
-                self._tocalled(called),
-                self._tocalled(self._acquirecalled),
-            ))
+            self._acquirecalled,
+            called,
+            'expected acquire to be %s but was actually %s'
+            % (self._tocalled(called), self._tocalled(self._acquirecalled),),
+        )
 
     def resetacquirefn(self):
         self._acquirecalled = False
 
     def assertreleasecalled(self, called):
         self._testcase.assertEqual(
-            self._releasecalled, called,
-            'expected release to be %s but was actually %s' % (
-                self._tocalled(called),
-                self._tocalled(self._releasecalled),
-            ))
+            self._releasecalled,
+            called,
+            'expected release to be %s but was actually %s'
+            % (self._tocalled(called), self._tocalled(self._releasecalled),),
+        )
 
     def assertpostreleasecalled(self, called):
         self._testcase.assertEqual(
-            self._postreleasecalled, called,
-            'expected postrelease to be %s but was actually %s' % (
+            self._postreleasecalled,
+            called,
+            'expected postrelease to be %s but was actually %s'
+            % (
                 self._tocalled(called),
                 self._tocalled(self._postreleasecalled),
-            ))
+            ),
+        )
 
     def assertlockexists(self, exists):
         actual = self.vfs.lexists(testlockname)
         self._testcase.assertEqual(
-            actual, exists,
-            'expected lock to %s but actually did %s' % (
-                self._toexists(exists),
-                self._toexists(actual),
-            ))
+            actual,
+            exists,
+            'expected lock to %s but actually did %s'
+            % (self._toexists(exists), self._toexists(actual),),
+        )
 
     def _tocalled(self, called):
         if called:
@@ -105,6 +119,7 @@
         else:
             return 'not exist'
 
+
 class testlock(unittest.TestCase):
     def testlock(self):
         state = teststate(self, tempfile.mkdtemp(dir=encoding.getcwd()))
@@ -125,12 +140,12 @@
         # recursive lock should not call acquirefn again
         state.assertacquirecalled(False)
 
-        lock.release() # brings lock refcount down from 2 to 1
+        lock.release()  # brings lock refcount down from 2 to 1
         state.assertreleasecalled(False)
         state.assertpostreleasecalled(False)
         state.assertlockexists(True)
 
-        lock.release() # releases the lock
+        lock.release()  # releases the lock
         state.assertreleasecalled(True)
         state.assertpostreleasecalled(True)
         state.assertlockexists(False)
@@ -256,8 +271,10 @@
     def testinheritcheck(self):
         d = tempfile.mkdtemp(dir=encoding.getcwd())
         state = teststate(self, d)
+
         def check():
             raise error.LockInheritanceContractViolation('check failed')
+
         lock = state.makelock(inheritchecker=check)
         state.assertacquirecalled(True)
 
@@ -279,6 +296,7 @@
 
         def emulatefrequentlock(*args):
             raise OSError(errno.EEXIST, "File exists")
+
         def emulatefrequentunlock(*args):
             raise OSError(errno.ENOENT, "No such file or directory")
 
@@ -293,5 +311,6 @@
             self.assertTrue(why.locker == b"")
             state.assertlockexists(False)
 
+
 if __name__ == '__main__':
     silenttestrunner.main(__name__)
--- a/tests/test-log.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-log.t	Mon Oct 21 11:09:48 2019 -0400
@@ -1941,6 +1941,7 @@
   1:a765632148dc55d38c35c4f247c618701886cb2f
   0:9f758d63dcde62d547ebfb08e1e7ee96535f2b05
   $ hg debugobsolete a765632148dc55d38c35c4f247c618701886cb2f
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg up null -q
   $ hg log --template='{rev}:{node}\n'
@@ -1995,6 +1996,7 @@
   $ hg bookmark -d X@foo
   $ hg up null -q
   $ hg debugobsolete 9f758d63dcde62d547ebfb08e1e7ee96535f2b05
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ echo f > b
   $ hg ci -Am'b' -d '2 0'
@@ -2470,6 +2472,7 @@
   $ hg log -T '{node}\n' -r 1
   2294ae80ad8447bc78383182eeac50cb049df623
   $ hg debugobsolete 2294ae80ad8447bc78383182eeac50cb049df623
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G
   o  changeset:   4:50b9b36e9c5d
@@ -2520,6 +2523,7 @@
   $ hg log -T '{node}\n' -r 4
   50b9b36e9c5df2c6fc6dcefa8ad0da929e84aed2
   $ hg debugobsolete 50b9b36e9c5df2c6fc6dcefa8ad0da929e84aed2
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G a
   @  changeset:   3:15b2327059e5
--- a/tests/test-logexchange.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-logexchange.t	Mon Oct 21 11:09:48 2019 -0400
@@ -98,9 +98,9 @@
   adding changesets
   adding manifests
   adding file changes
-  added 9 changesets with 9 changes to 9 files (+1 heads)
   adding remote bookmark bar
   adding remote bookmark foo
+  added 9 changesets with 9 changes to 9 files (+1 heads)
   new changesets 18d04c59bb5d:3e1487808078
   (run 'hg heads' to see heads)
 
--- a/tests/test-lrucachedict.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-lrucachedict.py	Mon Oct 21 11:09:48 2019 -0400
@@ -4,9 +4,8 @@
 
 import silenttestrunner
 
-from mercurial import (
-    util,
-)
+from mercurial import util
+
 
 class testlrucachedict(unittest.TestCase):
     def testsimple(self):
@@ -363,5 +362,6 @@
         self.assertIn('d', d)
         self.assertIn('e', d)
 
+
 if __name__ == '__main__':
     silenttestrunner.main(__name__)
--- a/tests/test-manifest.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-manifest.py	Mon Oct 21 11:09:48 2019 -0400
@@ -20,13 +20,8 @@
 HASH_3 = b'1234567890abcdef0987654321deadbeef0fcafe'
 BIN_HASH_3 = binascii.unhexlify(HASH_3)
 A_SHORT_MANIFEST = (
-    b'bar/baz/qux.py\0%(hash2)s%(flag2)s\n'
-    b'foo\0%(hash1)s%(flag1)s\n'
-    ) % {b'hash1': HASH_1,
-         b'flag1': b'',
-         b'hash2': HASH_2,
-         b'flag2': b'l',
-         }
+    b'bar/baz/qux.py\0%(hash2)s%(flag2)s\n' b'foo\0%(hash1)s%(flag1)s\n'
+) % {b'hash1': HASH_1, b'flag1': b'', b'hash2': HASH_2, b'flag2': b'l',}
 
 A_DEEPER_MANIFEST = (
     b'a/b/c/bar.py\0%(hash3)s%(flag1)s\n'
@@ -47,12 +42,13 @@
     b'a/purple.py\0%(hash2)s%(flag1)s\n'
     b'app.py\0%(hash3)s%(flag1)s\n'
     b'readme.txt\0%(hash2)s%(flag1)s\n'
-    ) % {b'hash1': HASH_1,
-         b'flag1': b'',
-         b'hash2': HASH_2,
-         b'flag2': b'l',
-         b'hash3': HASH_3,
-         }
+) % {
+    b'hash1': HASH_1,
+    b'flag1': b'',
+    b'hash2': HASH_2,
+    b'flag2': b'l',
+    b'hash3': HASH_3,
+}
 
 HUGE_MANIFEST_ENTRIES = 200001
 
@@ -60,11 +56,17 @@
 if 'xrange' not in globals():
     xrange = range
 
-A_HUGE_MANIFEST = b''.join(sorted(
-    b'file%d\0%s%s\n' % (i, h, f) for i, h, f in
-    izip(xrange(200001),
-         itertools.cycle((HASH_1, HASH_2)),
-         itertools.cycle((b'', b'x', b'l')))))
+A_HUGE_MANIFEST = b''.join(
+    sorted(
+        b'file%d\0%s%s\n' % (i, h, f)
+        for i, h, f in izip(
+            xrange(200001),
+            itertools.cycle((HASH_1, HASH_2)),
+            itertools.cycle((b'', b'x', b'l')),
+        )
+    )
+)
+
 
 class basemanifesttests(object):
     def parsemanifest(self, text):
@@ -97,8 +99,7 @@
         m = self.parsemanifest(A_SHORT_MANIFEST)
         m[b'a'] = want
         self.assertEqual(want, m[b'a'])
-        self.assertEqual(b'a\0' + HASH_1 + b'\n' + A_SHORT_MANIFEST,
-                         m.text())
+        self.assertEqual(b'a\0' + HASH_1 + b'\n' + A_SHORT_MANIFEST, m.text())
 
     def testSetFlag(self):
         want = b'x'
@@ -115,15 +116,16 @@
         m[b'a'] = BIN_HASH_1
         m.setflag(b'a', want)
         self.assertEqual(want, m.flags(b'a'))
-        self.assertEqual(b'a\0' + HASH_1 + want + b'\n' + A_SHORT_MANIFEST,
-                         m.text())
+        self.assertEqual(
+            b'a\0' + HASH_1 + want + b'\n' + A_SHORT_MANIFEST, m.text()
+        )
 
     def testCopy(self):
         m = self.parsemanifest(A_SHORT_MANIFEST)
         m[b'a'] = BIN_HASH_1
         m2 = m.copy()
         del m
-        del m2 # make sure we don't double free() anything
+        del m2  # make sure we don't double free() anything
 
     def testCompaction(self):
         unhex = binascii.unhexlify
@@ -133,7 +135,10 @@
         m[b'beta'] = h2
         del m[b'foo']
         want = b'alpha\0%s\nbar/baz/qux.py\0%sl\nbeta\0%s\n' % (
-            HASH_1, HASH_2, HASH_2)
+            HASH_1,
+            HASH_2,
+            HASH_2,
+        )
         self.assertEqual(want, m.text())
         self.assertEqual(3, len(m))
         self.assertEqual([b'alpha', b'bar/baz/qux.py', b'beta'], list(m))
@@ -155,9 +160,10 @@
         # Merge code wants to set 21-byte fake hashes at times
         m[b'foo'] = want
         self.assertEqual(want, m[b'foo'])
-        self.assertEqual([(b'bar/baz/qux.py', BIN_HASH_2),
-                          (b'foo', BIN_HASH_1 + b'a')],
-                         list(m.items()))
+        self.assertEqual(
+            [(b'bar/baz/qux.py', BIN_HASH_2), (b'foo', BIN_HASH_1 + b'a')],
+            list(m.items()),
+        )
         # Sometimes it even tries a 22-byte fake hash, but we can
         # return 21 and it'll work out
         m[b'foo'] = want + b'+'
@@ -170,9 +176,9 @@
         m2 = m.copy()
         self.assertEqual(want, m2[b'foo'])
         # suffix with iteration
-        self.assertEqual([(b'bar/baz/qux.py', BIN_HASH_2),
-                          (b'foo', want)],
-                         list(m.items()))
+        self.assertEqual(
+            [(b'bar/baz/qux.py', BIN_HASH_2), (b'foo', want)], list(m.items())
+        )
 
         # shows up in diff
         self.assertEqual({b'foo': ((want, f), (h, b''))}, m.diff(clean))
@@ -181,10 +187,12 @@
     def testMatchException(self):
         m = self.parsemanifest(A_SHORT_MANIFEST)
         match = matchmod.match(b'', b'', [b're:.*'])
+
         def filt(path):
             if path == b'foo':
                 assert False
             return True
+
         match.matchfn = filt
         with self.assertRaises(AssertionError):
             m.matches(match)
@@ -206,28 +214,28 @@
         addl = b'z-only-in-left\0' + HASH_1 + b'\n'
         addr = b'z-only-in-right\0' + HASH_2 + b'x\n'
         left = self.parsemanifest(
-            A_SHORT_MANIFEST.replace(HASH_1, HASH_3 + b'x') + addl)
+            A_SHORT_MANIFEST.replace(HASH_1, HASH_3 + b'x') + addl
+        )
         right = self.parsemanifest(A_SHORT_MANIFEST + addr)
         want = {
-            b'foo': ((BIN_HASH_3, b'x'),
-                     (BIN_HASH_1, b'')),
+            b'foo': ((BIN_HASH_3, b'x'), (BIN_HASH_1, b'')),
             b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
             b'z-only-in-right': (MISSING, (BIN_HASH_2, b'x')),
-            }
+        }
         self.assertEqual(want, left.diff(right))
 
         want = {
             b'bar/baz/qux.py': (MISSING, (BIN_HASH_2, b'l')),
             b'foo': (MISSING, (BIN_HASH_3, b'x')),
             b'z-only-in-left': (MISSING, (BIN_HASH_1, b'')),
-            }
+        }
         self.assertEqual(want, self.parsemanifest(EMTPY_MANIFEST).diff(left))
 
         want = {
             b'bar/baz/qux.py': ((BIN_HASH_2, b'l'), MISSING),
             b'foo': ((BIN_HASH_3, b'x'), MISSING),
             b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
-            }
+        }
         self.assertEqual(want, left.diff(self.parsemanifest(EMTPY_MANIFEST)))
         copy = right.copy()
         del copy[b'z-only-in-right']
@@ -235,7 +243,7 @@
         want = {
             b'foo': (MISSING, (BIN_HASH_1, b'')),
             b'z-only-in-right': ((BIN_HASH_2, b'x'), MISSING),
-            }
+        }
         self.assertEqual(want, right.diff(copy))
 
         short = self.parsemanifest(A_SHORT_MANIFEST)
@@ -243,21 +251,22 @@
         del pruned[b'foo']
         want = {
             b'foo': ((BIN_HASH_1, b''), MISSING),
-            }
+        }
         self.assertEqual(want, short.diff(pruned))
         want = {
             b'foo': (MISSING, (BIN_HASH_1, b'')),
-            }
+        }
         self.assertEqual(want, pruned.diff(short))
         want = {
             b'bar/baz/qux.py': None,
             b'foo': (MISSING, (BIN_HASH_1, b'')),
-            }
+        }
         self.assertEqual(want, pruned.diff(short, clean=True))
 
     def testReversedLines(self):
         backwards = b''.join(
-            l + b'\n' for l in reversed(A_SHORT_MANIFEST.split(b'\n')) if l)
+            l + b'\n' for l in reversed(A_SHORT_MANIFEST.split(b'\n')) if l
+        )
         try:
             self.parsemanifest(backwards)
             self.fail('Should have raised ValueError')
@@ -292,9 +301,11 @@
         match = matchmod.exact([b'file1', b'file200', b'file300'])
         m2 = m.matches(match)
 
-        w = (b'file1\0%sx\n'
-             b'file200\0%sl\n'
-             b'file300\0%s\n') % (HASH_2, HASH_1, HASH_1)
+        w = (b'file1\0%sx\n' b'file200\0%sl\n' b'file300\0%s\n') % (
+            HASH_2,
+            HASH_1,
+            HASH_1,
+        )
         self.assertEqual(w, m2.text())
 
     def testMatchesNonexistentFile(self):
@@ -303,13 +314,14 @@
         '''
         m = self.parsemanifest(A_DEEPER_MANIFEST)
 
-        match = matchmod.exact([b'a/b/c/bar.txt', b'a/b/d/qux.py',
-                                b'readme.txt', b'nonexistent'])
+        match = matchmod.exact(
+            [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt', b'nonexistent']
+        )
         m2 = m.matches(match)
 
         self.assertEqual(
-                [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt'],
-                m2.keys())
+            [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt'], m2.keys()
+        )
 
     def testMatchesNonexistentDirectory(self):
         '''Tests matches() for a relpath match on a directory that doesn't
@@ -349,11 +361,20 @@
         match = matchmod.match(b'/', b'', [b'a/b'], default=b'relpath')
         m2 = m.matches(match)
 
-        self.assertEqual([
-            b'a/b/c/bar.py', b'a/b/c/bar.txt', b'a/b/c/foo.py',
-            b'a/b/c/foo.txt',
-            b'a/b/d/baz.py', b'a/b/d/qux.py', b'a/b/d/ten.txt', b'a/b/dog.py',
-            b'a/b/fish.py'], m2.keys())
+        self.assertEqual(
+            [
+                b'a/b/c/bar.py',
+                b'a/b/c/bar.txt',
+                b'a/b/c/foo.py',
+                b'a/b/c/foo.txt',
+                b'a/b/d/baz.py',
+                b'a/b/d/qux.py',
+                b'a/b/d/ten.txt',
+                b'a/b/dog.py',
+                b'a/b/fish.py',
+            ],
+            m2.keys(),
+        )
 
     def testMatchesExactPath(self):
         '''Tests matches() on an exact match on a directory, which should
@@ -374,10 +395,20 @@
         match = matchmod.match(b'/', b'a/b', [b'.'], default=b'relpath')
         m2 = m.matches(match)
 
-        self.assertEqual([
-            b'a/b/c/bar.py', b'a/b/c/bar.txt', b'a/b/c/foo.py',
-            b'a/b/c/foo.txt', b'a/b/d/baz.py', b'a/b/d/qux.py',
-            b'a/b/d/ten.txt', b'a/b/dog.py', b'a/b/fish.py'], m2.keys())
+        self.assertEqual(
+            [
+                b'a/b/c/bar.py',
+                b'a/b/c/bar.txt',
+                b'a/b/c/foo.py',
+                b'a/b/c/foo.txt',
+                b'a/b/d/baz.py',
+                b'a/b/d/qux.py',
+                b'a/b/d/ten.txt',
+                b'a/b/dog.py',
+                b'a/b/fish.py',
+            ],
+            m2.keys(),
+        )
 
     def testMatchesWithPattern(self):
         '''Tests matches() for files matching a pattern that reside
@@ -388,8 +419,9 @@
         m2 = m.matches(match)
 
         self.assertEqual(
-                [b'a/b/c/bar.txt', b'a/b/c/foo.txt', b'a/b/d/ten.txt'],
-                m2.keys())
+            [b'a/b/c/bar.txt', b'a/b/c/foo.txt', b'a/b/d/ten.txt'], m2.keys()
+        )
+
 
 class testmanifestdict(unittest.TestCase, basemanifesttests):
     def parsemanifest(self, text):
@@ -414,10 +446,12 @@
             b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
             b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
             b'\x00\x00\xc0\x8aey\x1d}\x01\xd8\xe0\xb9\xf3\xde\x1b\xcf\x17'
-            b'\xac\xbe')
+            b'\xac\xbe'
+        )
         with self.assertRaises(ValueError):
             self.parsemanifest(data)
 
+
 class testtreemanifest(unittest.TestCase, basemanifesttests):
     def parsemanifest(self, text):
         return manifestmod.treemanifest(b'', text)
@@ -427,17 +461,16 @@
 
         dirs = [s._dir for s in m.walksubtrees()]
         self.assertEqual(
-            sorted([
-                b'', b'a/', b'a/c/', b'a/d/', b'a/b/', b'a/b/c/', b'a/b/d/']),
-            sorted(dirs)
+            sorted(
+                [b'', b'a/', b'a/c/', b'a/d/', b'a/b/', b'a/b/c/', b'a/b/d/']
+            ),
+            sorted(dirs),
         )
 
         match = matchmod.match(b'/', b'', [b'path:a/b/'])
         dirs = [s._dir for s in m.walksubtrees(matcher=match)]
-        self.assertEqual(
-            sorted([b'a/b/', b'a/b/c/', b'a/b/d/']),
-            sorted(dirs)
-        )
+        self.assertEqual(sorted([b'a/b/', b'a/b/c/', b'a/b/d/']), sorted(dirs))
+
 
 if __name__ == '__main__':
     silenttestrunner.main(__name__)
--- a/tests/test-match.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-match.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,8 +9,8 @@
     util,
 )
 
+
 class BaseMatcherTests(unittest.TestCase):
-
     def testVisitdir(self):
         m = matchmod.basematcher()
         self.assertTrue(m.visitdir(b''))
@@ -21,8 +21,8 @@
         self.assertEqual(m.visitchildrenset(b''), b'this')
         self.assertEqual(m.visitchildrenset(b'dir'), b'this')
 
+
 class AlwaysMatcherTests(unittest.TestCase):
-
     def testVisitdir(self):
         m = matchmod.alwaysmatcher()
         self.assertEqual(m.visitdir(b''), b'all')
@@ -33,8 +33,8 @@
         self.assertEqual(m.visitchildrenset(b''), b'all')
         self.assertEqual(m.visitchildrenset(b'dir'), b'all')
 
+
 class NeverMatcherTests(unittest.TestCase):
-
     def testVisitdir(self):
         m = matchmod.nevermatcher()
         self.assertFalse(m.visitdir(b''))
@@ -45,6 +45,7 @@
         self.assertEqual(m.visitchildrenset(b''), set())
         self.assertEqual(m.visitchildrenset(b'dir'), set())
 
+
 class PredicateMatcherTests(unittest.TestCase):
     # predicatematcher does not currently define either of these methods, so
     # this is equivalent to BaseMatcherTests.
@@ -59,8 +60,8 @@
         self.assertEqual(m.visitchildrenset(b''), b'this')
         self.assertEqual(m.visitchildrenset(b'dir'), b'this')
 
+
 class PatternMatcherTests(unittest.TestCase):
-
     def testVisitdirPrefix(self):
         m = matchmod.match(b'x', b'', patterns=[b'path:dir/subdir'])
         assert isinstance(m, matchmod.patternmatcher)
@@ -122,8 +123,8 @@
         self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'this')
         self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this')
 
+
 class IncludeMatcherTests(unittest.TestCase):
-
     def testVisitdirPrefix(self):
         m = matchmod.match(b'x', b'', include=[b'path:dir/subdir'])
         assert isinstance(m, matchmod.includematcher)
@@ -182,8 +183,8 @@
         self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'this')
         self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this')
 
+
 class ExactMatcherTests(unittest.TestCase):
-
     def testVisitdir(self):
         m = matchmod.exact(files=[b'dir/subdir/foo.txt'])
         assert isinstance(m, matchmod.exactmatcher)
@@ -206,11 +207,15 @@
         self.assertEqual(m.visitchildrenset(b'folder'), set())
 
     def testVisitchildrensetFilesAndDirs(self):
-        m = matchmod.exact(files=[b'rootfile.txt',
-                                  b'a/file1.txt',
-                                  b'a/b/file2.txt',
-                                  # no file in a/b/c
-                                  b'a/b/c/d/file4.txt'])
+        m = matchmod.exact(
+            files=[
+                b'rootfile.txt',
+                b'a/file1.txt',
+                b'a/b/file2.txt',
+                # no file in a/b/c
+                b'a/b/c/d/file4.txt',
+            ]
+        )
         assert isinstance(m, matchmod.exactmatcher)
         self.assertEqual(m.visitchildrenset(b''), {b'a', b'rootfile.txt'})
         self.assertEqual(m.visitchildrenset(b'a'), {b'b', b'file1.txt'})
@@ -220,8 +225,8 @@
         self.assertEqual(m.visitchildrenset(b'a/b/c/d/e'), set())
         self.assertEqual(m.visitchildrenset(b'folder'), set())
 
+
 class DifferenceMatcherTests(unittest.TestCase):
-
     def testVisitdirM2always(self):
         m1 = matchmod.alwaysmatcher()
         m2 = matchmod.alwaysmatcher()
@@ -341,8 +346,8 @@
         self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), b'this')
         self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), b'this')
 
+
 class IntersectionMatcherTests(unittest.TestCase):
-
     def testVisitdirM2always(self):
         m1 = matchmod.alwaysmatcher()
         m2 = matchmod.alwaysmatcher()
@@ -533,8 +538,8 @@
         self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set())
         self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set())
 
+
 class UnionMatcherTests(unittest.TestCase):
-
     def testVisitdirM2always(self):
         m1 = matchmod.alwaysmatcher()
         m2 = matchmod.alwaysmatcher()
@@ -748,8 +753,8 @@
         self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all')
         self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
 
+
 class SubdirMatcherTests(unittest.TestCase):
-
     def testVisitdir(self):
         m = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
         sm = matchmod.subdirmatcher(b'dir', m)
@@ -772,11 +777,12 @@
         self.assertEqual(sm.visitchildrenset(b'subdir/z'), b'this')
         self.assertEqual(sm.visitchildrenset(b'foo'), set())
 
+
 class PrefixdirMatcherTests(unittest.TestCase):
-
     def testVisitdir(self):
-        m = matchmod.match(util.localpath(b'root/d'), b'e/f',
-                [b'../a.txt', b'b.txt'])
+        m = matchmod.match(
+            util.localpath(b'root/d'), b'e/f', [b'../a.txt', b'b.txt']
+        )
         pm = matchmod.prefixdirmatcher(b'd', m)
 
         # `m` elides 'd' because it's part of the root, and the rest of the
@@ -807,8 +813,9 @@
         self.assertEqual(pm.visitdir(b'd/e/f/g'), False)
 
     def testVisitchildrenset(self):
-        m = matchmod.match(util.localpath(b'root/d'), b'e/f',
-                [b'../a.txt', b'b.txt'])
+        m = matchmod.match(
+            util.localpath(b'root/d'), b'e/f', [b'../a.txt', b'b.txt']
+        )
         pm = matchmod.prefixdirmatcher(b'd', m)
 
         # OPT: visitchildrenset could possibly return {'e'} and {'f'} for these
@@ -828,5 +835,6 @@
         self.assertEqual(pm.visitchildrenset(b'd/e/f'), b'this')
         self.assertEqual(pm.visitchildrenset(b'd/e/f/g'), set())
 
+
 if __name__ == '__main__':
     silenttestrunner.main(__name__)
--- a/tests/test-mdiff.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-mdiff.py	Mon Oct 21 11:09:48 2019 -0400
@@ -3,22 +3,23 @@
 
 import unittest
 
-from mercurial import (
-    mdiff,
-)
+from mercurial import mdiff
+
 
 class splitnewlinesTests(unittest.TestCase):
-
     def test_splitnewlines(self):
-        cases = {b'a\nb\nc\n': [b'a\n', b'b\n', b'c\n'],
-                 b'a\nb\nc': [b'a\n', b'b\n', b'c'],
-                 b'a\nb\nc\n\n': [b'a\n', b'b\n', b'c\n', b'\n'],
-                 b'': [],
-                 b'abcabc': [b'abcabc'],
-                 }
+        cases = {
+            b'a\nb\nc\n': [b'a\n', b'b\n', b'c\n'],
+            b'a\nb\nc': [b'a\n', b'b\n', b'c'],
+            b'a\nb\nc\n\n': [b'a\n', b'b\n', b'c\n', b'\n'],
+            b'': [],
+            b'abcabc': [b'abcabc'],
+        }
         for inp, want in cases.items():
             self.assertEqual(mdiff.splitnewlines(inp), want)
 
+
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-merge-closedheads.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-merge-closedheads.t	Mon Oct 21 11:09:48 2019 -0400
@@ -43,7 +43,7 @@
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg merge
   abort: branch 'default' has 3 heads - please merge with an explicit rev
-  (run 'hg heads .' to see heads)
+  (run 'hg heads .' to see heads, specify rev with -r)
   [255]
 
 close one of the heads
--- a/tests/test-merge-default.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-merge-default.t	Mon Oct 21 11:09:48 2019 -0400
@@ -41,7 +41,7 @@
   $ HGMERGE=internal:other; export HGMERGE
   $ hg merge
   abort: branch 'default' has 3 heads - please merge with an explicit rev
-  (run 'hg heads .' to see heads)
+  (run 'hg heads .' to see heads, specify rev with -r)
   [255]
 
 Should succeed:
@@ -117,7 +117,7 @@
 
   $ hg merge
   abort: branch 'foobranch' has one head - please merge with an explicit rev
-  (run 'hg heads' to see all heads)
+  (run 'hg heads' to see all heads, specify rev with -r)
   [255]
 
 
@@ -137,7 +137,7 @@
 
   $ hg log -r '_destmerge()'
   abort: branch 'foobranch' has one head - please merge with an explicit rev
-  (run 'hg heads' to see all heads)
+  (run 'hg heads' to see all heads, specify rev with -r)
   [255]
 
 (on a branch with a two heads)
@@ -171,5 +171,5 @@
 
   $ hg log -r '_destmerge(foobranch)'
   abort: branch 'foobranch' has one head - please merge with an explicit rev
-  (run 'hg heads' to see all heads)
+  (run 'hg heads' to see all heads, specify rev with -r)
   [255]
--- a/tests/test-merge1.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-merge1.t	Mon Oct 21 11:09:48 2019 -0400
@@ -49,7 +49,7 @@
   ? b/nonempty
   # The repository is in an unfinished *update* state.
   
-  # To continue:    hg update
+  # To continue:    hg update .
   
 
   $ rm b/nonempty
--- a/tests/test-minifileset.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-minifileset.py	Mon Oct 21 11:09:48 2019 -0400
@@ -3,6 +3,7 @@
 
 from mercurial import minifileset
 
+
 def check(text, truecases, falsecases):
     f = minifileset.compile(text)
     for args in truecases:
@@ -12,24 +13,31 @@
         if f(*args):
             print('unexpected: %r should exclude %r' % (text, args))
 
+
 check(b'all()', [(b'a.php', 123), (b'b.txt', 0)], [])
 check(b'none()', [], [(b'a.php', 123), (b'b.txt', 0)])
 check(b'!!!!((!(!!all())))', [], [(b'a.php', 123), (b'b.txt', 0)])
 
-check(b'"path:a" & (**.b | **.c)',
-      [(b'a/b.b', 0), (b'a/c.c', 0)], [(b'b/c.c', 0)])
-check(b'(path:a & **.b) | **.c',
-      [(b'a/b.b', 0), (b'a/c.c', 0), (b'b/c.c', 0)], [])
+check(
+    b'"path:a" & (**.b | **.c)', [(b'a/b.b', 0), (b'a/c.c', 0)], [(b'b/c.c', 0)]
+)
+check(
+    b'(path:a & **.b) | **.c', [(b'a/b.b', 0), (b'a/c.c', 0), (b'b/c.c', 0)], []
+)
 
-check(b'**.bin - size("<20B")',
-      [(b'b.bin', 21)], [(b'a.bin', 11), (b'b.txt', 21)])
+check(
+    b'**.bin - size("<20B")', [(b'b.bin', 21)], [(b'a.bin', 11), (b'b.txt', 21)]
+)
 
-check(b'!!**.bin or size(">20B") + "path:bin" or !size(">10")',
-      [(b'a.bin', 11), (b'b.txt', 21), (b'bin/abc', 11)],
-      [(b'a.notbin', 11), (b'b.txt', 11), (b'bin2/abc', 11)])
+check(
+    b'!!**.bin or size(">20B") + "path:bin" or !size(">10")',
+    [(b'a.bin', 11), (b'b.txt', 21), (b'bin/abc', 11)],
+    [(b'a.notbin', 11), (b'b.txt', 11), (b'bin2/abc', 11)],
+)
 
 check(
     b'(**.php and size(">10KB")) | **.zip | ("path:bin" & !"path:bin/README") '
     b' | size(">1M")',
     [(b'a.php', 15000), (b'a.zip', 0), (b'bin/a', 0), (b'bin/README', 1e7)],
-    [(b'a.php', 5000), (b'b.zip2', 0), (b't/bin/a', 0), (b'bin/README', 1)])
+    [(b'a.php', 5000), (b'b.zip2', 0), (b't/bin/a', 0), (b'bin/README', 1)],
+)
--- a/tests/test-minirst.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-minirst.py	Mon Oct 21 11:09:48 2019 -0400
@@ -1,10 +1,7 @@
 from __future__ import absolute_import, print_function
-from mercurial import (
-    minirst,
-)
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial import minirst
+from mercurial.utils import stringutil
+
 
 def debugformat(text, form, **kwargs):
     blocks, pruned = minirst.parse(text, **kwargs)
@@ -23,12 +20,14 @@
     print("-" * 70)
     print()
 
+
 def debugformats(title, text, **kwargs):
     print("== %s ==" % title)
     debugformat(text, 60, **kwargs)
     debugformat(text, 30, **kwargs)
     debugformat(text, b'html', **kwargs)
 
+
 paragraphs = b"""
 This is some text in the first paragraph.
 
@@ -188,8 +187,9 @@
 debugformats('containers (normal)', containers)
 debugformats('containers (verbose)', containers, keep=[b'verbose'])
 debugformats('containers (debug)', containers, keep=[b'debug'])
-debugformats('containers (verbose debug)', containers,
-            keep=[b'verbose', b'debug'])
+debugformats(
+    'containers (verbose debug)', containers, keep=[b'verbose', b'debug']
+)
 
 roles = b"""Please see :hg:`add`."""
 debugformats('roles', roles)
@@ -245,9 +245,11 @@
 debugformats('comments', comments)
 
 
-data = [[b'a', b'b', b'c'],
-         [b'1', b'2', b'3'],
-         [b'foo', b'bar', b'baz this list is very very very long man']]
+data = [
+    [b'a', b'b', b'c'],
+    [b'1', b'2', b'3'],
+    [b'foo', b'bar', b'baz this list is very very very long man'],
+]
 
 rst = minirst.maketable(data, 2, True)
 table = b''.join(rst)
@@ -256,8 +258,10 @@
 
 debugformats('table', table)
 
-data = [[b's', b'long', b'line\ngoes on here'],
-        [b'', b'xy', b'tried to fix here\n        by indenting']]
+data = [
+    [b's', b'long', b'line\ngoes on here'],
+    [b'', b'xy', b'tried to fix here\n        by indenting'],
+]
 
 rst = minirst.maketable(data, 1, False)
 table = b''.join(rst)
--- a/tests/test-mv-cp-st-diff.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-mv-cp-st-diff.t	Mon Oct 21 11:09:48 2019 -0400
@@ -1676,7 +1676,6 @@
   debug.copies: searching copies from a51f36ab1704 to 1f4aa1fd627b
   debug.copies: search mode: forward
   debug.copies:    looking into rename from a51f36ab1704 to 1f4aa1fd627b
-  debug.copies:      search limit: 3
   debug.copies:      missing files to search: 1
   debug.copies:        tracing file: renamed
   debug.copies:          rename of: f
--- a/tests/test-narrow-exchange.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-narrow-exchange.t	Mon Oct 21 11:09:48 2019 -0400
@@ -217,7 +217,7 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 0 changes to 0 files
+  remote: added 1 changesets with 0 changes to 0 files (no-lfs-on !)
   remote: error: pretxnchangegroup.lfs hook raised an exception: data/inside2/f.i@f59b4e021835: no match found (lfs-on !)
   remote: transaction abort! (lfs-on !)
   remote: rollback completed (lfs-on !)
--- a/tests/test-narrow-trackedcmd.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-narrow-trackedcmd.t	Mon Oct 21 11:09:48 2019 -0400
@@ -101,6 +101,8 @@
   
       --addinclude VALUE [+]       new paths to include
       --removeinclude VALUE [+]    old paths to no longer include
+      --auto-remove-includes       automatically choose unused includes to
+                                   remove
       --addexclude VALUE [+]       new paths to exclude
       --import-rules VALUE         import narrowspecs from a file
       --removeexclude VALUE [+]    old paths to no longer exclude
@@ -220,5 +222,5 @@
   $ hg init non-narrow
   $ cd non-narrow
   $ hg tracked --addinclude foobar
-  abort: the tracked command is only supported on respositories cloned with --narrow
+  abort: the tracked command is only supported on repositories cloned with --narrow
   [255]
--- a/tests/test-narrow-widen-no-ellipsis.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-narrow-widen-no-ellipsis.t	Mon Oct 21 11:09:48 2019 -0400
@@ -116,7 +116,7 @@
   query 1; heads
   sending batch command
   searching for changes
-  all local heads known remotely
+  all local changesets known remotely
   sending narrow_widen command
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "changegroup" (params: * mandatory) supported (glob)
@@ -125,9 +125,9 @@
   adding widest/ revisions (tree !)
   adding file changes
   adding widest/f revisions
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-bundle: 1 parts total
   added 0 changesets with 1 changes to 1 files
-  bundle2-input-part: total payload size * (glob)
-  bundle2-input-bundle: 0 parts total
    widest/f: narrowspec updated -> g
   getting widest/f
   $ hg tracked
--- a/tests/test-narrow-widen.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-narrow-widen.t	Mon Oct 21 11:09:48 2019 -0400
@@ -1,6 +1,11 @@
 #testcases flat tree
   $ . "$TESTDIR/narrow-library.sh"
 
+  $ cat >> $HGRCPATH <<EOF
+  > [alias]
+  > l = log -G -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  > EOF
+
 #if tree
   $ cat << EOF >> $HGRCPATH
   > [experimental]
@@ -76,15 +81,23 @@
   $ echo 'widest v4' > widest/f
   $ hg commit -m 'update widest v4'
 
-  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
-  7: update widest v4
-  6: add outside2
-  5: update inside
-  4: update widest v3
-  3: add wider, update widest
-  2: add outside
-  1: add widest
-  0: add inside
+  $ hg l
+  @  7: update widest v4
+  |
+  o  6: add outside2
+  |
+  o  5: update inside
+  |
+  o  4: update widest v3
+  |
+  o  3: add wider, update widest
+  |
+  o  2: add outside
+  |
+  o  1: add widest
+  |
+  o  0: add inside
+  
 
   $ cd ..
 
@@ -92,6 +105,11 @@
 added upstream revisions.
 
   $ cd narrow
+  $ hg l
+  @  ...1: add outside
+  |
+  o  0: add inside
+  
   $ hg tracked --addinclude widest/f
   comparing with ssh://user@dummy/master
   searching for changes
@@ -100,6 +118,13 @@
   adding manifests
   adding file changes
   added 3 changesets with 2 changes to 2 files
+  $ hg l
+  @  ...2: add outside
+  |
+  o  1: add widest
+  |
+  o  0: add inside
+  
   $ hg tracked
   I path:inside
   I path:widest/f
@@ -130,15 +155,23 @@
   $ cat inside/f
   inside v2
 
-  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
-  7: update widest v4
-  ...6: add outside2
-  5: update inside
-  4: update widest v3
-  3: add wider, update widest
-  ...2: add outside
-  1: add widest
-  0: add inside
+  $ hg l
+  o  7: update widest v4
+  |
+  o  ...6: add outside2
+  |
+  @  5: update inside
+  |
+  o  4: update widest v3
+  |
+  o  3: add wider, update widest
+  |
+  o  ...2: add outside
+  |
+  o  1: add widest
+  |
+  o  0: add inside
+  
 
 Check that widening with a newline fails
 
@@ -180,15 +213,23 @@
   $ cat widest/f
   widest v4
 
-  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
-  7: update widest v4
-  ...6: add outside2
-  5: update inside
-  4: update widest v3
-  3: add wider, update widest
-  ...2: add outside
-  1: add widest
-  0: add inside
+  $ hg l
+  @  7: update widest v4
+  |
+  o  ...6: add outside2
+  |
+  o  5: update inside
+  |
+  o  4: update widest v3
+  |
+  o  3: add wider, update widest
+  |
+  o  ...2: add outside
+  |
+  o  1: add widest
+  |
+  o  0: add inside
+  
 
 separate suite of tests: files from 0-10 modified in changes 0-10. This allows
 more obvious precise tests tickling particular corner cases.
@@ -245,15 +286,23 @@
   crosschecking files in changesets and manifests
   checking files
   checked 8 changesets with 4 changes to 4 files
-  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
-  ...7: add d10/f
-  6: add d9/f
-  ...5: add d8/f
-  4: add d6/f
-  ...3: add d5/f
-  2: add d3/f
-  ...1: add d2/f
-  0: add d0/f
+  $ hg l
+  @  ...7: add d10/f
+  |
+  o  6: add d9/f
+  |
+  o  ...5: add d8/f
+  |
+  o  4: add d6/f
+  |
+  o  ...3: add d5/f
+  |
+  o  2: add d3/f
+  |
+  o  ...1: add d2/f
+  |
+  o  0: add d0/f
+  
   $ hg tracked --addinclude d1
   comparing with ssh://user@dummy/upstream
   searching for changes
@@ -268,16 +317,25 @@
   I path:d3
   I path:d6
   I path:d9
-  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
-  ...8: add d10/f
-  7: add d9/f
-  ...6: add d8/f
-  5: add d6/f
-  ...4: add d5/f
-  3: add d3/f
-  ...2: add d2/f
-  1: add d1/f
-  0: add d0/f
+  $ hg l
+  @  ...8: add d10/f
+  |
+  o  7: add d9/f
+  |
+  o  ...6: add d8/f
+  |
+  o  5: add d6/f
+  |
+  o  ...4: add d5/f
+  |
+  o  3: add d3/f
+  |
+  o  ...2: add d2/f
+  |
+  o  1: add d1/f
+  |
+  o  0: add d0/f
+  
 
 Verify shouldn't claim the repo is corrupt after a widen.
 
@@ -294,16 +352,42 @@
   $ cd ..
   $ hg clone -q --narrow ssh://user@dummy/upstream narrow3 --include d2 -r 2
   $ cd narrow3
-  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
-  1: add d2/f
-  ...0: add d1/f
+  $ hg l
+  @  1: add d2/f
+  |
+  o  ...0: add d1/f
+  
   $ hg pull -q -r 3
   $ hg co -q tip
   $ hg pull -q -r 4
   $ echo local > d2/f
   $ hg ci -m local
   created new head
+  $ hg l
+  @  4: local
+  |
+  | o  ...3: add d4/f
+  |/
+  o  ...2: add d3/f
+  |
+  o  1: add d2/f
+  |
+  o  ...0: add d1/f
+  
   $ hg tracked -q --addinclude d0 --addinclude d9
+  $ hg l
+  @  5: local
+  |
+  | o  ...4: add d4/f
+  |/
+  o  ...3: add d3/f
+  |
+  o  2: add d2/f
+  |
+  o  ...1: add d1/f
+  |
+  o  0: add d0/f
+  
 
 Widening preserves bookmarks
 
@@ -328,10 +412,13 @@
   $ cd interrupted
   $ echo local > d0/f
   $ hg ci -m local
-  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
-  2: local
-  ...1: add d10/f
-  0: add d0/f
+  $ hg l
+  @  2: local
+  |
+  o  ...1: add d10/f
+  |
+  o  0: add d0/f
+  
   $ hg bookmarks bookmark
   $ hg --config hooks.pretxnchangegroup.bad=false tracked --addinclude d1
   comparing with ssh://user@dummy/upstream
@@ -340,12 +427,11 @@
   adding changesets
   adding manifests
   adding file changes
-  added 3 changesets with 2 changes to 2 files
   transaction abort!
   rollback completed
   abort: pretxnchangegroup.bad hook exited with status 1
   [255]
-  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  $ hg l
   $ hg bookmarks
   no bookmarks set
   $ hg unbundle .hg/strip-backup/*-widen.hg
@@ -355,9 +441,12 @@
   added 3 changesets with 2 changes to 1 files
   new changesets *:* (glob)
   (run 'hg update' to get a working copy)
-  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
-  2: local
-  ...1: add d10/f
-  0: add d0/f
+  $ hg l
+  o  2: local
+  |
+  o  ...1: add d10/f
+  |
+  o  0: add d0/f
+  
   $ hg bookmarks
    * bookmark                  2:* (glob)
--- a/tests/test-narrow.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-narrow.t	Mon Oct 21 11:09:48 2019 -0400
@@ -1,6 +1,11 @@
 #testcases flat tree
 #testcases lfs-on lfs-off
 
+  $ cat >> $HGRCPATH << EOF
+  > [experimental]
+  > evolution=createmarkers
+  > EOF
+
 #if lfs-on
   $ cat >> $HGRCPATH <<EOF
   > [extensions]
@@ -77,10 +82,6 @@
   updating to branch default
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd narrow-local-changes
-  $ cat >> $HGRCPATH << EOF
-  > [experimental]
-  > evolution=createmarkers
-  > EOF
   $ echo local change >> d0/f
   $ hg ci -m 'local change to d0'
   $ hg co '.^'
@@ -157,6 +158,7 @@
   $ hg co '.^'
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg debugobsolete `hg log -T '{node}' -r 'desc("local change to d0")'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg tracked --removeinclude d0
   comparing with ssh://user@dummy/master
@@ -445,3 +447,48 @@
   abort: local changes found
   (use --force-delete-local-changes to ignore)
   [255]
+  $ cd ..
+
+Test --auto-remove-includes
+  $ hg clone --narrow ssh://user@dummy/master narrow-auto-remove -q \
+  > --include d0 --include d1 --include d2
+  $ cd narrow-auto-remove
+  $ echo a >> d0/f
+  $ hg ci -m 'local change to d0'
+  $ hg co '.^'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo a >> d1/f
+  $ hg ci -m 'local change to d1'
+  created new head
+  $ hg debugobsolete $(hg log -T '{node}' -r 'desc("local change to d0")')
+  1 new obsolescence markers
+  obsoleted 1 changesets
+  $ echo n | hg tracked --auto-remove-includes --config ui.interactive=yes
+  comparing with ssh://user@dummy/master
+  searching for changes
+  looking for unused includes to remove
+  path:d0
+  path:d2
+  remove these unused includes (yn)? n
+  $ hg tracked --auto-remove-includes
+  comparing with ssh://user@dummy/master
+  searching for changes
+  looking for unused includes to remove
+  path:d0
+  path:d2
+  remove these unused includes (yn)? y
+  looking for local changes to affected paths
+  saved backup bundle to $TESTTMP/narrow-auto-remove/.hg/strip-backup/*-narrow.hg (glob)
+  deleting data/d0/f.i
+  deleting data/d2/f.i
+  deleting meta/d0/00manifest.i (tree !)
+  deleting meta/d2/00manifest.i (tree !)
+  $ hg tracked
+  I path:d1
+  $ hg files
+  d1/f
+  $ hg tracked --auto-remove-includes
+  comparing with ssh://user@dummy/master
+  searching for changes
+  looking for unused includes to remove
+  found no unused includes
--- a/tests/test-newbranch.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-newbranch.t	Mon Oct 21 11:09:48 2019 -0400
@@ -318,7 +318,7 @@
 
   $ hg merge
   abort: branch 'test' has one head - please merge with an explicit rev
-  (run 'hg heads' to see all heads)
+  (run 'hg heads' to see all heads, specify rev with -r)
   [255]
   $ hg up -C default
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
@@ -327,7 +327,7 @@
 
   $ hg merge
   abort: branch 'default' has 3 heads - please merge with an explicit rev
-  (run 'hg heads .' to see heads)
+  (run 'hg heads .' to see heads, specify rev with -r)
   [255]
 
 3 branch heads, explicit merge required:
--- a/tests/test-notify.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-notify.t	Mon Oct 21 11:09:48 2019 -0400
@@ -99,7 +99,13 @@
     "/long/path/repository" into "repository". Default: 0.
   
   notify.domain
-    Default email domain for sender or recipients with no explicit domain.
+    Default email domain for sender or recipients with no explicit domain. It is
+    also used for the domain part of the "Message-Id" when using
+    "notify.messageidseed".
+  
+  notify.messageidseed
+    Create deterministic "Message-Id" headers for the mails based on the seed
+    and the revision identifier of the first commit in the changeset.
   
   notify.style
     Style file to use when formatting emails.
@@ -190,7 +196,7 @@
 of the very long subject line
 pull (minimal config)
 
-  $ hg --traceback --cwd b pull ../a | "$PYTHON" $TESTTMP/filter.py
+  $ hg --traceback --cwd b --config notify.domain=example.com --config notify.messageidseed=example pull ../a | "$PYTHON" $TESTTMP/filter.py
   pulling from ../a
   searching for changes
   adding changesets
@@ -203,10 +209,10 @@
   Content-Transfer-Encoding: 7bit
   Date: * (glob)
   Subject: changeset in $TESTTMP/b: b
-  From: test
+  From: test@example.com
   X-Hg-Notification: changeset 00a13f371396
-  Message-Id: <*> (glob)
-  To: baz, foo@bar
+  Message-Id: <hg.ba3098a36bd4c297288d16788623a841f81f618ea961a0f0fd65de7eb1191b66@example.com>
+  To: baz@example.com, foo@bar
   
   changeset 00a13f371396 in $TESTTMP/b
   details: $TESTTMP/b?cmd=changeset;node=00a13f371396
--- a/tests/test-obshistory.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-obshistory.t	Mon Oct 21 11:09:48 2019 -0400
@@ -88,6 +88,7 @@
      summary:     ROOT
   
   $ hg debugobsolete --record-parents `getid 'desc(B0)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg log --hidden -G
@@ -162,6 +163,7 @@
   adding b
 
   $ hg debugobsolete `getid '1'` `getid '2'` `getid '3'`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg log --hidden -G
@@ -251,6 +253,7 @@
   adding d
 
   $ hg debugobsolete `getid '1'` `getid '2'` `getid '3'` `getid '4'` `getid '5'`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg log --hidden -G
@@ -336,9 +339,11 @@
   created new head
 
   $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(C0)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(C0)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg log --hidden -G
@@ -507,9 +512,11 @@
   created new head
 
   $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(C0)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete `getid 'desc(B1)'` `getid 'desc(C0)'`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg log --hidden -G
--- a/tests/test-obsmarker-template.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-obsmarker-template.t	Mon Oct 21 11:09:48 2019 -0400
@@ -387,6 +387,7 @@
   $ hg commit -A -m "A0"
   adding b
   $ hg debugobsolete `getid "1"` `getid "2"` `getid "3"`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg log --hidden -G
@@ -593,9 +594,11 @@
   adding B0
   created new head
   $ hg debugobsolete `getid "desc(A0)"` `getid "desc(C0)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete `getid "desc(B0)"` `getid "desc(C0)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg log --hidden -G
@@ -1120,9 +1123,11 @@
   adding B0
   created new head
   $ hg debugobsolete `getid "desc(A0)"` `getid "desc(C0)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete `getid "desc(B1)"` `getid "desc(C0)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg log --hidden -G
@@ -1601,11 +1606,14 @@
 Create the cycle
 
   $ hg debugobsolete `getid "desc(A0)"` `getid "desc(B0)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete `getid "desc(B0)"` `getid "desc(C0)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid "desc(B0)"` `getid "desc(A0)"`
+  1 new obsolescence markers
 
 Check templates
 ---------------
@@ -1854,6 +1862,7 @@
      summary:     ROOT
   
   $ hg debugobsolete `getid "4"` `getid "5"` `getid "6"` `getid "7"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G
   @  changeset:   7:ba2ed02b0c9a
@@ -2301,6 +2310,7 @@
   $ mkcommit ROOT
   $ mkcommit A0
   $ hg debugobsolete --record-parent `getid "."`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
 Check output
@@ -2330,6 +2340,7 @@
   $ mkcommit A0
   $ hg commit --amend -m "A1"
   $ hg debugobsolete --record-parent `getid "."`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg up -r "desc(A0)" --hidden
@@ -2338,6 +2349,7 @@
   (hidden revision '471f378eab4c' is pruned)
   $ hg commit --amend -m "A2"
   $ hg debugobsolete --record-parent `getid "."`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
 Check output
@@ -2481,10 +2493,12 @@
   $ hg commit -A -m "A2"
   adding b
   $ hg debugobsolete `getid "1"` `getid "2"` `getid "3"`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
 # Simulate prune
   $ hg debugobsolete --record-parent `getid "."`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg log --hidden -G
@@ -2622,6 +2636,7 @@
   > --config devel.user.obsmarker="`cat test2`"
   $ mkcommit B0
   $ HGENCODING=latin-1 hg debugobsolete -u "`cat test2`" "`getid 'desc(B0)'`"
+  1 new obsolescence markers
   obsoleted 1 changesets
 
 metadata should be stored in UTF-8, and debugobsolete doesn't decode it to
--- a/tests/test-obsolete-bundle-strip.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-obsolete-bundle-strip.t	Mon Oct 21 11:09:48 2019 -0400
@@ -126,9 +126,12 @@
   $ mkcommit 'C-A1'
   created new head
   $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'`
+  1 new obsolescence markers
   $ hg debugobsolete `getid 'desc("C-A0")'` a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 `getid 'desc("C-A1")'`
+  1 new obsolescence markers
 
   $ hg up 'desc("ROOT")'
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
@@ -272,10 +275,13 @@
   $ mkcommit 'C-A1'
   created new head
   $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'`
+  1 new obsolescence markers
   $ hg debugobsolete `getid 'desc("C-A0")'` `getid 'desc("C-A1")'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete --record-parents `getid 'desc("C-B0")'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg up 'desc("ROOT")'
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
@@ -461,9 +467,12 @@
   $ mkcommit 'C-A1'
   created new head
   $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'`
+  1 new obsolescence markers
   $ hg debugobsolete --record-parents `getid 'desc("C-A0")'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid 'desc("C-A0")'` `getid 'desc("C-A1")'`
+  1 new obsolescence markers
   $ hg up 'desc("ROOT")'
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg log --hidden -G
@@ -606,10 +615,13 @@
   $ mkcommit 'C-A1'
   created new head
   $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'`
+  1 new obsolescence markers
   $ hg debugobsolete `getid 'desc("C-A0")'` `getid 'desc("C-A1")'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete --record-parents `getid 'desc("C-B0")'`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
 (it is annoying to create prune with parent data without the changeset, so we strip it after the fact)
@@ -688,9 +700,12 @@
   $ mkcommit 'C-A1'
   created new head
   $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'`
+  1 new obsolescence markers
   $ hg debugobsolete --record-parents `getid 'desc("C-A0")'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid 'desc("C-A0")'` `getid 'desc("C-A1")'`
+  1 new obsolescence markers
 
 (it is annoying to create prune with parent data without the changeset, so we strip it after the fact)
 
@@ -775,20 +790,29 @@
   $ mkcommit 'C-E'
   created new head
   $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A")'`
+  1 new obsolescence markers
   $ hg debugobsolete `getid 'desc("C-A")'` `getid 'desc("C-B")'` `getid 'desc("C-C")'` # record split
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid 'desc("C-A")'` `getid 'desc("C-D")'` # other divergent
+  1 new obsolescence markers
   3 new content-divergent changesets
   $ hg debugobsolete `getid 'desc("C-A")'` b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0
+  1 new obsolescence markers
   $ hg debugobsolete b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0 `getid 'desc("C-E")'`
+  1 new obsolescence markers
   1 new content-divergent changesets
   $ hg debugobsolete `getid 'desc("C-B")'` `getid 'desc("C-E")'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid 'desc("C-C")'` `getid 'desc("C-E")'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid 'desc("C-D")'` `getid 'desc("C-E")'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0 `getid 'desc("C-E")'`
+  1 new obsolescence markers
 
   $ hg up 'desc("ROOT")'
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
@@ -1334,6 +1358,7 @@
   $ mkcommit 'C-A'
   $ mkcommit 'C-B'
   $ hg debugobsolete --record-parent `getid 'desc("C-B")'`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg up 'desc("ROOT")'
--- a/tests/test-obsolete-changeset-exchange.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-obsolete-changeset-exchange.t	Mon Oct 21 11:09:48 2019 -0400
@@ -34,6 +34,7 @@
   o  base d20a80d4def38df63a4b330b7fb688f3d4cae1e3
   
   $ hg debugobsolete 9d73aac1b2ed7d53835eaeec212ed41ea47da53a f89bcc95eba5174b1ccc3e33a82e84c96e8338ee
+  1 new obsolescence markers
   obsoleted 1 changesets
 
 Push it. The bundle should not refer to the extinct changeset.
@@ -139,6 +140,7 @@
   $ echo 2b > foo
   $ hg -q commit -m 2b
   $ hg debugobsolete 6a29ed9c68defff1a139e5c6fa9696fb1a75783d bec0734cd68e84477ba7fc1d13e6cff53ab70129
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ cd ..
 
@@ -168,15 +170,15 @@
   adding manifests
   adding file changes
   adding foo revisions
-  added 1 changesets with 1 changes to 1 files (+1 heads)
   bundle2-input-part: total payload size 476
   bundle2-input-part: "listkeys" (params: 1 mandatory) supported
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
   bundle2-input-part: total payload size 39
-  bundle2-input-bundle: 3 parts total
+  bundle2-input-bundle: 4 parts total
   checking for updated bookmarks
   updating the branch cache
+  added 1 changesets with 1 changes to 1 files (+1 heads)
   new changesets bec0734cd68e
   (run 'hg heads' to see heads, 'hg merge' to merge)
--- a/tests/test-obsolete-checkheads.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-obsolete-checkheads.t	Mon Oct 21 11:09:48 2019 -0400
@@ -47,6 +47,7 @@
   $ mkcommit new
   created new head
   $ hg debugobsolete --flags 1 `getid old` `getid new`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  71e3228bffe1 (draft) add new
@@ -190,6 +191,7 @@
   $ mkcommit desc2
   created new head
   $ hg debugobsolete `getid old` `getid new`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  5fe37041cc2b (draft) add desc2
@@ -247,6 +249,7 @@
   $ hg  id --debug -r tip
   71e3228bffe1886550777233d6c97bb5a6b2a650 tip
   $ hg debugobsolete c70b08862e0838ea6d7c59c85da2f1ed6c8d67da 71e3228bffe1886550777233d6c97bb5a6b2a650
+  1 new obsolescence markers
   $ hg log -G --hidden
   @  71e3228bffe1 (draft) add new
   |
@@ -301,6 +304,7 @@
   $ mkcommit new-unrelated
   created new head
   $ hg debugobsolete `getid old`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  350a93b716be (draft) add new-unrelated
--- a/tests/test-obsolete-distributed.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-obsolete-distributed.t	Mon Oct 21 11:09:48 2019 -0400
@@ -50,6 +50,7 @@
   $ mkcommit c_B0
   created new head
   $ hg debugobsolete `getid 'desc("c_A0")'` `getid 'desc("c_A1")'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden -v
   @  3:e5d7dda7cd28 c_B0
@@ -82,6 +83,7 @@
   $ mkcommit c_B1
   created new head
   $ hg debugobsolete `getid 'desc("c_B0")'` `getid 'desc("c_B1")'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G
   @  4:391a2bf12b1b c_B1
@@ -111,6 +113,7 @@
 
   $ cd client
   $ hg debugobsolete `getid 'desc("c_A1")'` `getid 'desc("c_B0")'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G
   @  3:e5d7dda7cd28 c_B0
@@ -537,6 +540,7 @@
   $ cd repo-a
   $ hg debugbuilddag ..
   $ hg debugobsolete `getid tip`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ cd ../
   $ hg clone --pull repo-a repo-b
--- a/tests/test-obsolete-divergent.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-obsolete-divergent.t	Mon Oct 21 11:09:48 2019 -0400
@@ -64,8 +64,10 @@
 
   $ newcase direct
   $ hg debugobsolete `getid A_0` `getid A_1`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid A_0` `getid A_2`
+  1 new obsolescence markers
   2 new content-divergent changesets
   $ hg log -G --hidden
   *  3:392fd25390da A_2
@@ -124,12 +126,15 @@
 
   $ newcase indirect_known
   $ hg debugobsolete `getid A_0` `getid A_1`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid A_0` `getid A_2`
+  1 new obsolescence markers
   2 new content-divergent changesets
   $ mkcommit A_3
   created new head
   $ hg debugobsolete `getid A_2` `getid A_3`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  4:01f36c5a8fda A_3
@@ -184,9 +189,12 @@
 
   $ newcase indirect_unknown
   $ hg debugobsolete `getid A_0` aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid A_1`
+  1 new obsolescence markers
   $ hg debugobsolete `getid A_0` `getid A_2`
+  1 new obsolescence markers
   2 new content-divergent changesets
   $ hg log -G --hidden
   *  3:392fd25390da A_2
@@ -234,12 +242,17 @@
 
   $ newcase final-unknown
   $ hg debugobsolete `getid A_0` `getid A_1`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid A_1` `getid A_2`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid A_0` bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
+  1 new obsolescence markers
   $ hg debugobsolete bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb cccccccccccccccccccccccccccccccccccccccc
+  1 new obsolescence markers
   $ hg debugobsolete `getid A_1` dddddddddddddddddddddddddddddddddddddddd
+  1 new obsolescence markers
 
   $ hg debugsuccessorssets --hidden 'desc('A_0')'
   007dc284c1f8
@@ -256,14 +269,18 @@
 
   $ newcase converged_divergence
   $ hg debugobsolete `getid A_0` `getid A_1`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid A_0` `getid A_2`
+  1 new obsolescence markers
   2 new content-divergent changesets
   $ mkcommit A_3
   created new head
   $ hg debugobsolete `getid A_1` `getid A_3`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid A_2` `getid A_3`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  4:01f36c5a8fda A_3
@@ -312,6 +329,7 @@
 
   $ newcase split
   $ hg debugobsolete `getid A_0` `getid A_1` `getid A_2`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   o  3:392fd25390da A_2
@@ -354,18 +372,21 @@
   $ mkcommit A_3
   created new head
   $ hg debugobsolete `getid A_1` `getid A_3`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ mkcommit A_4
   created new head
   $ hg debugobsolete `getid A_2` `getid A_4`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ mkcommit A_5
   created new head
   $ hg debugobsolete `getid A_4` `getid A_5`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  6:e442cfc57690 A_5
@@ -426,6 +447,7 @@
   $ mkcommit B_0; hg up 0
   0 files updated, 0 files merged, 2 files removed, 0 files unresolved
   $ hg debugobsolete `getid B_0` `getid A_2`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ mkcommit A_7; hg up 0
   created new head
@@ -434,11 +456,13 @@
   created new head
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg debugobsolete `getid A_5` `getid A_7` `getid A_8`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ mkcommit A_9; hg up 0
   created new head
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg debugobsolete `getid A_5` `getid A_9`
+  1 new obsolescence markers
   4 new content-divergent changesets
   $ hg log -G --hidden
   *  10:bed64f5d2f5a A_9
@@ -547,10 +571,13 @@
   created new head
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg debugobsolete `getid A_9` `getid A_A`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid A_7` `getid A_A`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid A_8` `getid A_A`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   o  11:a139f71be9da A_A
@@ -675,8 +702,10 @@
 
   $ newcase subset
   $ hg debugobsolete `getid A_0` `getid A_2`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid A_0` `getid A_1` `getid A_2`
+  1 new obsolescence markers
   $ hg debugsuccessorssets --hidden 'desc('A_0')'
   007dc284c1f8
       82623d38b9ba 392fd25390da
--- a/tests/test-obsolete-tag-cache.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-obsolete-tag-cache.t	Mon Oct 21 11:09:48 2019 -0400
@@ -59,6 +59,7 @@
 Hiding a non-tip changeset should change filtered hash and cause tags recompute
 
   $ hg debugobsolete -d '0 0' c3cb30f2d2cd0aae008cc91a07876e3c5131fd22 -u dummyuser
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg tags
@@ -81,8 +82,10 @@
 Hiding another changeset should cause the filtered hash to change
 
   $ hg debugobsolete -d '0 0' d75775ffbc6bca1794d300f5571272879bd280da -u dummyuser
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete -d '0 0' 5f97d42da03fd56f3b228b03dfe48af5c0adf75b -u dummyuser
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg tags
--- a/tests/test-obsolete.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-obsolete.t	Mon Oct 21 11:09:48 2019 -0400
@@ -52,6 +52,7 @@
   abort: changeset references must be full hexadecimal node identifiers
   [255]
   $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete
   97b7c2d76b1845ed3eb988cd612611e72406cef0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'babar'}
@@ -88,6 +89,7 @@
   created new head
   $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
   $ hg debugobsolete --config format.obsstore-version=0 --flag 12 `getid original_c`  `getid new_c` -d '121 120'
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
   2:245bde4270cd add original_c
@@ -112,6 +114,7 @@
   $ mkcommit new_2_c
   created new head
   $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete
   245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
@@ -124,8 +127,10 @@
   $ mkcommit new_3_c
   created new head
   $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
+  1 new obsolescence markers
   $ hg debugobsolete
   245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
   cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
@@ -264,6 +269,7 @@
   $ hg ci -m 'add n3w_3_c'
   created new head
   $ hg debugobsolete -d '1338 0' --flags 1 `getid new_3_c` `getid n3w_3_c`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -r 'phasedivergent()'
   $ hg log -G
@@ -323,6 +329,7 @@
   $ mkcommit kill0
   $ hg up -q null
   $ hg debugobsolete `getid kill0`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ mkcommit a
   $ mkcommit b
@@ -349,6 +356,7 @@
   $ mkcommit b
   $ hg up -q null
   $ hg --config experimental.evolution.report-instabilities=false debugobsolete `getid a`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ cd ..
 
@@ -392,6 +400,7 @@
 Rollback//Transaction support
 
   $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
+  1 new obsolescence markers
   $ hg debugobsolete
   1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
   245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
@@ -478,6 +487,7 @@
   $ hg init tmpe
   $ cd tmpe
   $ hg debugobsolete -d '1339 0' 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00
+  1 new obsolescence markers
   $ hg pull ../tmpb
   pulling from ../tmpb
   requesting all changes
@@ -531,6 +541,7 @@
   $ mkcommit original_d
   $ mkcommit original_e
   $ hg debugobsolete --record-parents `getid original_d` -d '0 0'
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete | grep `getid original_d`
@@ -627,6 +638,7 @@
   created new head
   $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'` \
   > -u 'test <test@example.net>'
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg outgoing ../tmpf # parasite hg outgoing testin
   comparing with ../tmpf
@@ -812,65 +824,125 @@
   > do
   >    hg debugobsolete $node
   > done
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg up tip
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -938,9 +1010,11 @@
 Several troubles on the same changeset (create an unstable and bumped and content-divergent changeset)
 
   $ hg debugobsolete `getid obsolete_e`
+  1 new obsolescence markers
   obsoleted 1 changesets
   2 new orphan changesets
   $ hg debugobsolete `getid original_c` `getid babar`
+  1 new obsolescence markers
   1 new phase-divergent changesets
   2 new content-divergent changesets
   $ hg log --config ui.logtemplate= -r 'phasedivergent() and orphan() and contentdivergent()'
@@ -1309,6 +1383,7 @@
   grafting 1:1c9eddb02162 "content-1" (tip)
 
   $ hg debugobsolete `hg log -r1 -T'{node}'` `hg log -r2 -T'{node}'`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
@@ -1610,6 +1685,7 @@
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   saved backup bundle to $TESTTMP/tmpe/issue4845/doindexrev/.hg/strip-backup/9bc153528424-ee80edd4-backup.hg
   $ hg debugobsolete 9bc153528424ea266d13e57f9ff0d799dfe61e4b
+  1 new obsolescence markers
   $ hg unbundle ../bundle-2.hg
   adding changesets
   adding manifests
--- a/tests/test-parseindex.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-parseindex.t	Mon Oct 21 11:09:48 2019 -0400
@@ -53,6 +53,7 @@
   >     def wrapper(*a, **kwargs):
   >         f = o(*a, **kwargs)
   >         return singlebyteread(f)
+  >     wrapper.options = o.options
   >     return wrapper
   > 
   > cl = changelog.changelog(opener(b'.hg/store'))
--- a/tests/test-parseindex2.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-parseindex2.py	Mon Oct 21 11:09:48 2019 -0400
@@ -26,12 +26,15 @@
 def gettype(q):
     return int(q & 0xFFFF)
 
+
 def offset_type(offset, type):
     return int(int(offset) << 16 | type)
 
+
 indexformatng = ">Qiiiiii20s12x"
 
-def py_parseindex(data, inline) :
+
+def py_parseindex(data, inline):
     s = 64
     cache = None
     index = []
@@ -43,7 +46,7 @@
     if inline:
         cache = (0, data)
         while off <= l:
-            e = struct.unpack(indexformatng, data[off:off + s])
+            e = struct.unpack(indexformatng, data[off : off + s])
             nodemap[e[7]] = n
             append(e)
             n += 1
@@ -52,7 +55,7 @@
             off += e[1] + s
     else:
         while off <= l:
-            e = struct.unpack(indexformatng, data[off:off + s])
+            e = struct.unpack(indexformatng, data[off : off + s])
             nodemap[e[7]] = n
             append(e)
             n += 1
@@ -65,6 +68,7 @@
 
     return index, cache
 
+
 data_inlined = (
     b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x01\x8c'
     b'\x00\x00\x04\x07\x00\x00\x00\x00\x00\x00\x15\x15\xff\xff\xff'
@@ -89,7 +93,7 @@
     b'\x83\x00\x9f$z\xb8#\xa5\xb1\xdf\x98\xd9\xec\x1b\x89O\xe3Ts\x9a4'
     b'\x17m\x8b\xfc\x8f\xa5\x95\x9a\xfc\xfa\xed,\xe5|\xa1\xfe\x15\xb9'
     b'\xbc\xb2\x93\x1f\xf2\x95\xff\xdf,\x1a\xc5\xe7\x17*\x93Oz:>\x0e'
-    )
+)
 
 data_non_inlined = (
     b'\x00\x00\x00\x01\x00\x00\x00\x00\x00\x01D\x19'
@@ -108,57 +112,81 @@
     b'\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x12\xcb\xeby1'
     b'\xb6\r\x98B\xcb\x07\xbd`\x8f\x92\xd9\xc4\x84\xbdK\x00\x00\x00'
     b'\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-    )
+)
+
 
 def parse_index2(data, inline):
     index, chunkcache = parsers.parse_index2(data, inline)
     return list(index), chunkcache
 
+
 def importparsers(hexversion):
     """Import mercurial.parsers with the given sys.hexversion."""
     # The file parsers.c inspects sys.hexversion to determine the version
     # of the currently-running Python interpreter, so we monkey-patch
     # sys.hexversion to simulate using different versions.
-    code = ("import sys; sys.hexversion=%s; "
-            "import mercurial.cext.parsers" % hexversion)
+    code = (
+        "import sys; sys.hexversion=%s; "
+        "import mercurial.cext.parsers" % hexversion
+    )
     cmd = "python -c \"%s\"" % code
     # We need to do these tests inside a subprocess because parser.c's
     # version-checking code happens inside the module init function, and
     # when using reload() to reimport an extension module, "The init function
     # of extension modules is not called a second time"
     # (from http://docs.python.org/2/library/functions.html?#reload).
-    p = subprocess.Popen(cmd, shell=True,
-                         stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+    p = subprocess.Popen(
+        cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
+    )
     return p.communicate()  # returns stdout, stderr
 
+
 def hexfailmsg(testnumber, hexversion, stdout, expected):
     try:
         hexstring = hex(hexversion)
     except TypeError:
         hexstring = None
-    return ("FAILED: version test #%s with Python %s and patched "
-            "sys.hexversion %r (%r):\n Expected %s but got:\n-->'%s'\n" %
-            (testnumber, sys.version_info, hexversion, hexstring, expected,
-             stdout))
+    return (
+        "FAILED: version test #%s with Python %s and patched "
+        "sys.hexversion %r (%r):\n Expected %s but got:\n-->'%s'\n"
+        % (
+            testnumber,
+            sys.version_info,
+            hexversion,
+            hexstring,
+            expected,
+            stdout,
+        )
+    )
+
 
 def makehex(major, minor, micro):
     return int("%x%02x%02x00" % (major, minor, micro), 16)
 
+
 class parseindex2tests(unittest.TestCase):
-
     def assertversionokay(self, testnumber, hexversion):
         stdout, stderr = importparsers(hexversion)
         self.assertFalse(
-            stdout, hexfailmsg(testnumber, hexversion, stdout, 'no stdout'))
+            stdout, hexfailmsg(testnumber, hexversion, stdout, 'no stdout')
+        )
 
     def assertversionfail(self, testnumber, hexversion):
         stdout, stderr = importparsers(hexversion)
         # We include versionerrortext to distinguish from other ImportErrors.
         errtext = b"ImportError: %s" % pycompat.sysbytes(
-            parsers.versionerrortext)
-        self.assertIn(errtext, stdout,
-                      hexfailmsg(testnumber, hexversion, stdout,
-                                 expected="stdout to contain %r" % errtext))
+            parsers.versionerrortext
+        )
+        self.assertIn(
+            errtext,
+            stdout,
+            hexfailmsg(
+                testnumber,
+                hexversion,
+                stdout,
+                expected="stdout to contain %r" % errtext,
+            ),
+        )
 
     def testversiondetection(self):
         """Check the version-detection logic when importing parsers."""
@@ -189,11 +217,11 @@
 
         want = py_parseindex(data_inlined, True)
         got = parse_index2(data_inlined, True)
-        self.assertEqual(want, got) # inline data
+        self.assertEqual(want, got)  # inline data
 
         want = py_parseindex(data_non_inlined, False)
         got = parse_index2(data_non_inlined, False)
-        self.assertEqual(want, got) # no inline data
+        self.assertEqual(want, got)  # no inline data
 
         ix = parsers.parse_index2(data_inlined, True)[0]
         for i, r in enumerate(ix):
@@ -201,8 +229,10 @@
                 i = -1
             try:
                 self.assertEqual(
-                    ix[r[7]], i,
-                    'Reverse lookup inconsistent for %r' % nodemod.hex(r[7]))
+                    ix[r[7]],
+                    i,
+                    'Reverse lookup inconsistent for %r' % nodemod.hex(r[7]),
+                )
             except TypeError:
                 # pure version doesn't support this
                 break
@@ -211,12 +241,14 @@
         want = (0, 0, 0, -1, -1, -1, -1, nullid)
         index, junk = parsers.parse_index2(data_inlined, True)
         got = index[-1]
-        self.assertEqual(want, got) # inline data
+        self.assertEqual(want, got)  # inline data
 
         index, junk = parsers.parse_index2(data_non_inlined, False)
         got = index[-1]
-        self.assertEqual(want, got) # no inline data
+        self.assertEqual(want, got)  # no inline data
+
 
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-patchbomb.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-patchbomb.t	Mon Oct 21 11:09:48 2019 -0400
@@ -3035,6 +3035,47 @@
   sending [PATCH] test ...
   sending mail: $TESTTMP/t2/pretendmail.sh -f test foo
 
+Shell characters in addresses
+
+  $ hg email --date '1980-1-1 0:1' -v -t '~foo/bar@example.com' -f 'me*@example.com' -r '10'
+  this patch series consists of 1 patches.
+  
+  warning: invalid patchbomb.intro value "mpmwearaclownnose"
+  (should be one of always, never, auto)
+  -f me*@example.com ~foo/bar@example.com
+  MIME-Version: 1.0
+  Content-Type: text/plain; charset="us-ascii"
+  Content-Transfer-Encoding: 7bit
+  Subject: [PATCH] dd
+  X-Mercurial-Node: 3b6f1ec9dde933a40a115a7990f8b320477231af
+  X-Mercurial-Series-Index: 1
+  X-Mercurial-Series-Total: 1
+  Message-Id: <3b6f1ec9dde933a40a11.315532860@test-hostname>
+  X-Mercurial-Series-Id: <3b6f1ec9dde933a40a11.315532860@test-hostname>
+  User-Agent: Mercurial-patchbomb/* (glob)
+  Date: Tue, 01 Jan 1980 00:01:00 +0000
+  From: me*@example.com
+  To: ~foo/bar@example.com
+  
+  # HG changeset patch
+  # User test
+  # Date 5 0
+  #      Thu Jan 01 00:00:05 1970 +0000
+  # Branch test
+  # Node ID 3b6f1ec9dde933a40a115a7990f8b320477231af
+  # Parent  2f9fa9b998c5fe3ac2bd9a2b14bfcbeecbc7c268
+  dd
+  
+  diff -r 2f9fa9b998c5 -r 3b6f1ec9dde9 d
+  --- a/d	Thu Jan 01 00:00:04 1970 +0000
+  +++ b/d	Thu Jan 01 00:00:05 1970 +0000
+  @@ -1,1 +1,2 @@
+   d
+  +d
+  
+  sending [PATCH] dd ...
+  sending mail: $TESTTMP/t2/pretendmail.sh -f 'me*@example.com' '~foo/bar@example.com'
+
 Test pull url header
 =================================
 
--- a/tests/test-pathencode.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-pathencode.py	Mon Oct 21 11:09:48 2019 -0400
@@ -31,9 +31,12 @@
 for c in (b'\0', b'/'):
     validchars.remove(c)
 
-winreserved = (b'aux con prn nul'.split() +
-               [b'com%d' % i for i in xrange(1, 10)] +
-               [b'lpt%d' % i for i in xrange(1, 10)])
+winreserved = (
+    b'aux con prn nul'.split()
+    + [b'com%d' % i for i in xrange(1, 10)]
+    + [b'lpt%d' % i for i in xrange(1, 10)]
+)
+
 
 def casecombinations(names):
     '''Build all case-diddled combinations of names.'''
@@ -45,10 +48,11 @@
             for c in itertools.combinations(xrange(len(r)), i):
                 d = r
                 for j in c:
-                    d = b''.join((d[:j], d[j:j + 1].upper(), d[j + 1:]))
+                    d = b''.join((d[:j], d[j : j + 1].upper(), d[j + 1 :]))
                 combos.add(d)
     return sorted(combos)
 
+
 def buildprobtable(fp, cmd='hg manifest tip'):
     '''Construct and print a table of probabilities for path name
     components.  The numbers are percentages.'''
@@ -63,10 +67,11 @@
             counts[c] += 1
     for c in '\r/\n':
         counts.pop(c, None)
-    t = sum(counts.itervalues()) / 100.0
+    t = sum(pycompat.itervalues(counts)) / 100.0
     fp.write('probtable = (')
-    for i, (k, v) in enumerate(sorted(counts.items(), key=lambda x: x[1],
-                                      reverse=True)):
+    for i, (k, v) in enumerate(
+        sorted(counts.items(), key=lambda x: x[1], reverse=True)
+    ):
         if (i % 5) == 0:
             fp.write('\n    ')
         vt = v / t
@@ -75,29 +80,83 @@
         fp.write('(%r, %.03f), ' % (k, vt))
     fp.write('\n    )\n')
 
+
 # A table of character frequencies (as percentages), gleaned by
 # looking at filelog names from a real-world, very large repo.
 
 probtable = (
-    (b't', 9.828), (b'e', 9.042), (b's', 8.011), (b'a', 6.801), (b'i', 6.618),
-    (b'g', 5.053), (b'r', 5.030), (b'o', 4.887), (b'p', 4.363), (b'n', 4.258),
-    (b'l', 3.830), (b'h', 3.693), (b'_', 3.659), (b'.', 3.377), (b'm', 3.194),
-    (b'u', 2.364), (b'd', 2.296), (b'c', 2.163), (b'b', 1.739), (b'f', 1.625),
-    (b'6', 0.666), (b'j', 0.610), (b'y', 0.554), (b'x', 0.487), (b'w', 0.477),
-    (b'k', 0.476), (b'v', 0.473), (b'3', 0.336), (b'1', 0.335), (b'2', 0.326),
-    (b'4', 0.310), (b'5', 0.305), (b'9', 0.302), (b'8', 0.300), (b'7', 0.299),
-    (b'q', 0.298), (b'0', 0.250), (b'z', 0.223), (b'-', 0.118), (b'C', 0.095),
-    (b'T', 0.087), (b'F', 0.085), (b'B', 0.077), (b'S', 0.076), (b'P', 0.076),
-    (b'L', 0.059), (b'A', 0.058), (b'N', 0.051), (b'D', 0.049), (b'M', 0.046),
-    (b'E', 0.039), (b'I', 0.035), (b'R', 0.035), (b'G', 0.028), (b'U', 0.026),
-    (b'W', 0.025), (b'O', 0.017), (b'V', 0.015), (b'H', 0.013), (b'Q', 0.011),
-    (b'J', 0.007), (b'K', 0.005), (b'+', 0.004), (b'X', 0.003), (b'Y', 0.001),
-    )
+    (b't', 9.828),
+    (b'e', 9.042),
+    (b's', 8.011),
+    (b'a', 6.801),
+    (b'i', 6.618),
+    (b'g', 5.053),
+    (b'r', 5.030),
+    (b'o', 4.887),
+    (b'p', 4.363),
+    (b'n', 4.258),
+    (b'l', 3.830),
+    (b'h', 3.693),
+    (b'_', 3.659),
+    (b'.', 3.377),
+    (b'm', 3.194),
+    (b'u', 2.364),
+    (b'd', 2.296),
+    (b'c', 2.163),
+    (b'b', 1.739),
+    (b'f', 1.625),
+    (b'6', 0.666),
+    (b'j', 0.610),
+    (b'y', 0.554),
+    (b'x', 0.487),
+    (b'w', 0.477),
+    (b'k', 0.476),
+    (b'v', 0.473),
+    (b'3', 0.336),
+    (b'1', 0.335),
+    (b'2', 0.326),
+    (b'4', 0.310),
+    (b'5', 0.305),
+    (b'9', 0.302),
+    (b'8', 0.300),
+    (b'7', 0.299),
+    (b'q', 0.298),
+    (b'0', 0.250),
+    (b'z', 0.223),
+    (b'-', 0.118),
+    (b'C', 0.095),
+    (b'T', 0.087),
+    (b'F', 0.085),
+    (b'B', 0.077),
+    (b'S', 0.076),
+    (b'P', 0.076),
+    (b'L', 0.059),
+    (b'A', 0.058),
+    (b'N', 0.051),
+    (b'D', 0.049),
+    (b'M', 0.046),
+    (b'E', 0.039),
+    (b'I', 0.035),
+    (b'R', 0.035),
+    (b'G', 0.028),
+    (b'U', 0.026),
+    (b'W', 0.025),
+    (b'O', 0.017),
+    (b'V', 0.015),
+    (b'H', 0.013),
+    (b'Q', 0.011),
+    (b'J', 0.007),
+    (b'K', 0.005),
+    (b'+', 0.004),
+    (b'X', 0.003),
+    (b'Y', 0.001),
+)
 
 for c, _ in probtable:
     validchars.remove(c)
 validchars = list(validchars)
 
+
 def pickfrom(rng, table):
     c = 0
     r = rng.random() * sum(i[1] for i in table)
@@ -106,6 +165,7 @@
         if c >= r:
             return i
 
+
 reservedcombos = casecombinations(winreserved)
 
 # The first component of a name following a slash.
@@ -114,7 +174,7 @@
     (lambda rng: pickfrom(rng, probtable), 90),
     (lambda rng: rng.choice(validchars), 5),
     (lambda rng: rng.choice(reservedcombos), 5),
-    )
+)
 
 # Components of a name following the first.
 
@@ -129,7 +189,8 @@
 lasttable = resttable + (
     (lambda rng: b'', 95),
     (lambda rng: rng.choice(internalsuffixcombos), 5),
-    )
+)
+
 
 def makepart(rng, k):
     '''Construct a part of a pathname, without slashes.'''
@@ -145,29 +206,37 @@
     ps.append(pickfrom(rng, lasttable)(rng))
     return b''.join(ps)
 
+
 def makepath(rng, j, k):
     '''Construct a complete pathname.'''
 
-    return (b'data/' + b'/'.join(makepart(rng, k) for _ in xrange(j)) +
-            rng.choice([b'.d', b'.i']))
+    return (
+        b'data/'
+        + b'/'.join(makepart(rng, k) for _ in xrange(j))
+        + rng.choice([b'.d', b'.i'])
+    )
+
 
 def genpath(rng, count):
     '''Generate random pathnames with gradually increasing lengths.'''
 
     mink, maxk = 1, 4096
+
     def steps():
         for i in xrange(count):
             yield mink + int(round(math.sqrt((maxk - mink) * float(i) / count)))
+
     for k in steps():
         x = rng.randint(1, k)
         y = rng.randint(1, k)
         yield makepath(rng, x, y)
 
+
 def runtests(rng, seed, count):
     nerrs = 0
     for p in genpath(rng, count):
-        h = store._pathencode(p)    # uses C implementation, if available
-        r = store._hybridencode(p, True) # reference implementation in Python
+        h = store._pathencode(p)  # uses C implementation, if available
+        r = store._hybridencode(p, True)  # reference implementation in Python
         if h != r:
             if nerrs == 0:
                 print('seed:', hex(seed)[:-1], file=sys.stderr)
@@ -177,23 +246,27 @@
             nerrs += 1
     return nerrs
 
+
 def main():
     import getopt
 
     # Empirically observed to take about a second to run
     count = 100
     seed = None
-    opts, args = getopt.getopt(sys.argv[1:], 'c:s:',
-                               ['build', 'count=', 'seed='])
+    opts, args = getopt.getopt(
+        sys.argv[1:], 'c:s:', ['build', 'count=', 'seed=']
+    )
     for o, a in opts:
         if o in ('-c', '--count'):
             count = int(a)
         elif o in ('-s', '--seed'):
-            seed = int(a, base=0) # accepts base 10 or 16 strings
+            seed = int(a, base=0)  # accepts base 10 or 16 strings
         elif o == '--build':
-            buildprobtable(sys.stdout,
-                           'find .hg/store/data -type f && '
-                           'cat .hg/store/fncache 2>/dev/null')
+            buildprobtable(
+                sys.stdout,
+                'find .hg/store/data -type f && '
+                'cat .hg/store/fncache 2>/dev/null',
+            )
             sys.exit(0)
 
     if seed is None:
@@ -206,5 +279,6 @@
     if runtests(rng, seed, count):
         sys.exit(1)
 
+
 if __name__ == '__main__':
     main()
--- a/tests/test-phabricator.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-phabricator.t	Mon Oct 21 11:09:48 2019 -0400
@@ -20,134 +20,3 @@
   > hgphab.phabtoken = cli-hahayouwish
   > EOF
   $ VCR="$TESTDIR/phabricator"
-
-Error is handled reasonably. We override the phabtoken here so that
-when you're developing changes to phabricator.py you can edit the
-above config and have a real token in the test but not have to edit
-this test.
-  $ hg phabread --config auth.hgphab.phabtoken=cli-notavalidtoken \
-  >  --test-vcr "$VCR/phabread-conduit-error.json" D4480 | head
-  abort: Conduit Error (ERR-INVALID-AUTH): API token "cli-notavalidtoken" has the wrong length. API tokens should be 32 characters long.
-
-Basic phabread:
-  $ hg phabread --test-vcr "$VCR/phabread-4480.json" D4480 | head
-  # HG changeset patch
-  # Date 1536771503 0
-  # Parent  a5de21c9e3703f8e8eb064bd7d893ff2f703c66a
-  exchangev2: start to implement pull with wire protocol v2
-  
-  Wire protocol version 2 will take a substantially different
-  approach to exchange than version 1 (at least as far as pulling
-  is concerned).
-  
-  This commit establishes a new exchangev2 module for holding
-
-phabupdate with an accept:
-  $ hg phabupdate --accept D4564 \
-  > -m 'I think I like where this is headed. Will read rest of series later.'\
-  >  --test-vcr "$VCR/accept-4564.json"
-
-Create a differential diff:
-  $ HGENCODING=utf-8; export HGENCODING
-  $ echo alpha > alpha
-  $ hg ci --addremove -m 'create alpha for phabricator test €'
-  adding alpha
-  $ hg phabsend -r . --test-vcr "$VCR/phabsend-create-alpha.json"
-  D1190 - created - d386117f30e6: create alpha for phabricator test \xe2\x82\xac (esc)
-  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/d386117f30e6-24ffe649-phabsend.hg
-  $ echo more >> alpha
-  $ HGEDITOR=true hg ci --amend
-  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/a86ed7d85e86-b7a54f3b-amend.hg
-  $ echo beta > beta
-  $ hg ci --addremove -m 'create beta for phabricator test'
-  adding beta
-  $ hg phabsend -r ".^::" --test-vcr "$VCR/phabsend-update-alpha-create-beta.json"
-  D1190 - updated - d940d39fb603: create alpha for phabricator test \xe2\x82\xac (esc)
-  D1191 - created - 4b2486dfc8c7: create beta for phabricator test
-  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/4b2486dfc8c7-d90584fa-phabsend.hg
-  $ unset HGENCODING
-
-The amend won't explode after posting a public commit.  The local tag is left
-behind to identify it.
-
-  $ echo 'public change' > beta
-  $ hg ci -m 'create public change for phabricator testing'
-  $ hg phase --public .
-  $ echo 'draft change' > alpha
-  $ hg ci -m 'create draft change for phabricator testing'
-  $ hg phabsend --amend -r '.^::' --test-vcr "$VCR/phabsend-create-public.json"
-  D1192 - created - 24ffd6bca53a: create public change for phabricator testing
-  D1193 - created - ac331633be79: create draft change for phabricator testing
-  warning: not updating public commit 2:24ffd6bca53a
-  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/ac331633be79-719b961c-phabsend.hg
-  $ hg tags -v
-  tip                                3:a19f1434f9a5
-  D1192                              2:24ffd6bca53a local
-
-  $ hg debugcallconduit user.search --test-vcr "$VCR/phab-conduit.json" <<EOF
-  > {
-  >     "constraints": {
-  >         "isBot": true
-  >     }
-  > }
-  > EOF
-  {
-    "cursor": {
-      "after": null,
-      "before": null,
-      "limit": 100,
-      "order": null
-    },
-    "data": [],
-    "maps": {},
-    "query": {
-      "queryKey": null
-    }
-  }
-
-Template keywords
-  $ hg log -T'{rev} {phabreview|json}\n'
-  3 {"id": "D1193", "url": "https://phab.mercurial-scm.org/D1193"}
-  2 {"id": "D1192", "url": "https://phab.mercurial-scm.org/D1192"}
-  1 {"id": "D1191", "url": "https://phab.mercurial-scm.org/D1191"}
-  0 {"id": "D1190", "url": "https://phab.mercurial-scm.org/D1190"}
-
-  $ hg log -T'{rev} {if(phabreview, "{phabreview.url} {phabreview.id}")}\n'
-  3 https://phab.mercurial-scm.org/D1193 D1193
-  2 https://phab.mercurial-scm.org/D1192 D1192
-  1 https://phab.mercurial-scm.org/D1191 D1191
-  0 https://phab.mercurial-scm.org/D1190 D1190
-
-Commenting when phabsending:
-  $ echo comment > comment
-  $ hg ci --addremove -m "create comment for phabricator test"
-  adding comment
-  $ hg phabsend -r . -m "For default branch" --test-vcr "$VCR/phabsend-comment-created.json"
-  D1253 - created - a7ee4bac036a: create comment for phabricator test
-  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/a7ee4bac036a-8009b5a0-phabsend.hg
-  $ echo comment2 >> comment
-  $ hg ci --amend
-  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/81fce7de1b7d-05339e5b-amend.hg
-  $ hg phabsend -r . -m "Address review comments" --test-vcr "$VCR/phabsend-comment-updated.json"
-  D1253 - updated - 1acd4b60af38: create comment for phabricator test
-
-Phabreading a DREV with a local:commits time as a string:
-  $ hg phabread --test-vcr "$VCR/phabread-str-time.json" D1285
-  # HG changeset patch
-  # User test <test>
-  # Date 1562019844 0
-  # Branch default
-  # Node ID da5c8c6bf23a36b6e3af011bc3734460692c23ce
-  # Parent  1f634396406d03e565ed645370e5fecd062cf215
-  test string time
-  
-  Differential Revision: https://phab.mercurial-scm.org/D1285
-  diff --git a/test b/test
-  new file mode 100644
-  --- /dev/null
-  +++ b/test
-  @@ * @@ (glob)
-  +test
-  
-
-  $ cd ..
--- a/tests/test-phases.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-phases.t	Mon Oct 21 11:09:48 2019 -0400
@@ -638,6 +638,7 @@
 
 (making a changeset hidden; H in that case)
   $ hg debugobsolete `hg id --debug -r 5`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ cd ..
--- a/tests/test-propertycache.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-propertycache.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,13 +16,13 @@
     util,
 )
 
-from mercurial.utils import (
-    procutil,
-)
+from mercurial.utils import procutil
 
 # create some special property cache that trace they call
 
 calllog = []
+
+
 @util.propertycache
 def testcachedfoobar(repo):
     name = repo.filtername
@@ -32,7 +32,10 @@
     calllog.append(val)
     return val
 
+
 unficalllog = []
+
+
 @localrepo.unfilteredpropertycache
 def testcachedunfifoobar(repo):
     name = repo.filtername
@@ -42,7 +45,8 @@
     unficalllog.append(val)
     return val
 
-#plug them on repo
+
+# plug them on repo
 localrepo.localrepository.testcachedfoobar = testcachedfoobar
 localrepo.localrepository.testcachedunfifoobar = testcachedunfifoobar
 
@@ -50,8 +54,12 @@
 # Create an empty repo and instantiate it. It is important to run
 # these tests on the real object to detect regression.
 repopath = pycompat.fsencode(os.path.join(os.environ['TESTTMP'], 'repo'))
-assert subprocess.call(pycompat.rapply(procutil.tonativestr,
-                                       [b'hg', b'init', repopath])) == 0
+assert (
+    subprocess.call(
+        pycompat.rapply(procutil.tonativestr, [b'hg', b'init', repopath])
+    )
+    == 0
+)
 
 ui = uimod.ui.load()
 repo = hg.repository(ui, path=repopath).unfiltered()
@@ -61,57 +69,75 @@
 print('=== property cache ===')
 print('')
 print('calllog:', calllog)
-print('cached value (unfiltered):',
-    vars(repo).get('testcachedfoobar', 'NOCACHE'))
+print(
+    'cached value (unfiltered):', vars(repo).get('testcachedfoobar', 'NOCACHE')
+)
 
 print('')
 print('= first access on unfiltered, should do a call')
 print('access:', repo.testcachedfoobar)
 print('calllog:', calllog)
-print('cached value (unfiltered):',
-    vars(repo).get('testcachedfoobar', 'NOCACHE'))
+print(
+    'cached value (unfiltered):', vars(repo).get('testcachedfoobar', 'NOCACHE')
+)
 
 print('')
 print('= second access on unfiltered, should not do call')
 print('access', repo.testcachedfoobar)
 print('calllog:', calllog)
-print('cached value (unfiltered):',
-    vars(repo).get('testcachedfoobar', 'NOCACHE'))
+print(
+    'cached value (unfiltered):', vars(repo).get('testcachedfoobar', 'NOCACHE')
+)
 
 print('')
 print('= first access on "visible" view, should do a call')
 visibleview = repo.filtered('visible')
-print('cached value ("visible" view):',
-    vars(visibleview).get('testcachedfoobar', 'NOCACHE'))
+print(
+    'cached value ("visible" view):',
+    vars(visibleview).get('testcachedfoobar', 'NOCACHE'),
+)
 print('access:', visibleview.testcachedfoobar)
 print('calllog:', calllog)
-print('cached value (unfiltered):',
-    vars(repo).get('testcachedfoobar', 'NOCACHE'))
-print('cached value ("visible" view):',
-    vars(visibleview).get('testcachedfoobar', 'NOCACHE'))
+print(
+    'cached value (unfiltered):', vars(repo).get('testcachedfoobar', 'NOCACHE')
+)
+print(
+    'cached value ("visible" view):',
+    vars(visibleview).get('testcachedfoobar', 'NOCACHE'),
+)
 
 print('')
 print('= second access on "visible view", should not do call')
 print('access:', visibleview.testcachedfoobar)
 print('calllog:', calllog)
-print('cached value (unfiltered):',
-    vars(repo).get('testcachedfoobar', 'NOCACHE'))
-print('cached value ("visible" view):',
-    vars(visibleview).get('testcachedfoobar', 'NOCACHE'))
+print(
+    'cached value (unfiltered):', vars(repo).get('testcachedfoobar', 'NOCACHE')
+)
+print(
+    'cached value ("visible" view):',
+    vars(visibleview).get('testcachedfoobar', 'NOCACHE'),
+)
 
 print('')
 print('= no effect on other view')
 immutableview = repo.filtered('immutable')
-print('cached value ("immutable" view):',
-    vars(immutableview).get('testcachedfoobar', 'NOCACHE'))
+print(
+    'cached value ("immutable" view):',
+    vars(immutableview).get('testcachedfoobar', 'NOCACHE'),
+)
 print('access:', immutableview.testcachedfoobar)
 print('calllog:', calllog)
-print('cached value (unfiltered):',
-    vars(repo).get('testcachedfoobar', 'NOCACHE'))
-print('cached value ("visible" view):',
-    vars(visibleview).get('testcachedfoobar', 'NOCACHE'))
-print('cached value ("immutable" view):',
-    vars(immutableview).get('testcachedfoobar', 'NOCACHE'))
+print(
+    'cached value (unfiltered):', vars(repo).get('testcachedfoobar', 'NOCACHE')
+)
+print(
+    'cached value ("visible" view):',
+    vars(visibleview).get('testcachedfoobar', 'NOCACHE'),
+)
+print(
+    'cached value ("immutable" view):',
+    vars(immutableview).get('testcachedfoobar', 'NOCACHE'),
+)
 
 # unfiltered property cache test
 print('')
@@ -119,26 +145,36 @@
 print('=== unfiltered property cache ===')
 print('')
 print('unficalllog:', unficalllog)
-print('cached value (unfiltered):      ',
-    vars(repo).get('testcachedunfifoobar', 'NOCACHE'))
-print('cached value ("visible" view):  ',
-    vars(visibleview).get('testcachedunfifoobar', 'NOCACHE'))
-print('cached value ("immutable" view):',
-    vars(immutableview).get('testcachedunfifoobar', 'NOCACHE'))
+print(
+    'cached value (unfiltered):      ',
+    vars(repo).get('testcachedunfifoobar', 'NOCACHE'),
+)
+print(
+    'cached value ("visible" view):  ',
+    vars(visibleview).get('testcachedunfifoobar', 'NOCACHE'),
+)
+print(
+    'cached value ("immutable" view):',
+    vars(immutableview).get('testcachedunfifoobar', 'NOCACHE'),
+)
 
 print('')
 print('= first access on unfiltered, should do a call')
 print('access (unfiltered):', repo.testcachedunfifoobar)
 print('unficalllog:', unficalllog)
-print('cached value (unfiltered):      ',
-    vars(repo).get('testcachedunfifoobar', 'NOCACHE'))
+print(
+    'cached value (unfiltered):      ',
+    vars(repo).get('testcachedunfifoobar', 'NOCACHE'),
+)
 
 print('')
 print('= second access on unfiltered, should not do call')
 print('access (unfiltered):', repo.testcachedunfifoobar)
 print('unficalllog:', unficalllog)
-print('cached value (unfiltered):      ',
-    vars(repo).get('testcachedunfifoobar', 'NOCACHE'))
+print(
+    'cached value (unfiltered):      ',
+    vars(repo).get('testcachedunfifoobar', 'NOCACHE'),
+)
 
 print('')
 print('= access on view should use the unfiltered cache')
@@ -146,44 +182,74 @@
 print('access ("visible" view):  ', visibleview.testcachedunfifoobar)
 print('access ("immutable" view):', immutableview.testcachedunfifoobar)
 print('unficalllog:', unficalllog)
-print('cached value (unfiltered):      ',
-    vars(repo).get('testcachedunfifoobar', 'NOCACHE'))
-print('cached value ("visible" view):  ',
-    vars(visibleview).get('testcachedunfifoobar', 'NOCACHE'))
-print('cached value ("immutable" view):',
-    vars(immutableview).get('testcachedunfifoobar', 'NOCACHE'))
+print(
+    'cached value (unfiltered):      ',
+    vars(repo).get('testcachedunfifoobar', 'NOCACHE'),
+)
+print(
+    'cached value ("visible" view):  ',
+    vars(visibleview).get('testcachedunfifoobar', 'NOCACHE'),
+)
+print(
+    'cached value ("immutable" view):',
+    vars(immutableview).get('testcachedunfifoobar', 'NOCACHE'),
+)
 
 print('')
 print('= even if we clear the unfiltered cache')
 del repo.__dict__['testcachedunfifoobar']
-print('cached value (unfiltered):      ',
-    vars(repo).get('testcachedunfifoobar', 'NOCACHE'))
-print('cached value ("visible" view):  ',
-    vars(visibleview).get('testcachedunfifoobar', 'NOCACHE'))
-print('cached value ("immutable" view):',
-    vars(immutableview).get('testcachedunfifoobar', 'NOCACHE'))
+print(
+    'cached value (unfiltered):      ',
+    vars(repo).get('testcachedunfifoobar', 'NOCACHE'),
+)
+print(
+    'cached value ("visible" view):  ',
+    vars(visibleview).get('testcachedunfifoobar', 'NOCACHE'),
+)
+print(
+    'cached value ("immutable" view):',
+    vars(immutableview).get('testcachedunfifoobar', 'NOCACHE'),
+)
 print('unficalllog:', unficalllog)
 print('access ("visible" view):  ', visibleview.testcachedunfifoobar)
 print('unficalllog:', unficalllog)
-print('cached value (unfiltered):      ',
-    vars(repo).get('testcachedunfifoobar', 'NOCACHE'))
-print('cached value ("visible" view):  ',
-    vars(visibleview).get('testcachedunfifoobar', 'NOCACHE'))
-print('cached value ("immutable" view):',
-    vars(immutableview).get('testcachedunfifoobar', 'NOCACHE'))
+print(
+    'cached value (unfiltered):      ',
+    vars(repo).get('testcachedunfifoobar', 'NOCACHE'),
+)
+print(
+    'cached value ("visible" view):  ',
+    vars(visibleview).get('testcachedunfifoobar', 'NOCACHE'),
+)
+print(
+    'cached value ("immutable" view):',
+    vars(immutableview).get('testcachedunfifoobar', 'NOCACHE'),
+)
 print('access ("immutable" view):', immutableview.testcachedunfifoobar)
 print('unficalllog:', unficalllog)
-print('cached value (unfiltered):      ',
-    vars(repo).get('testcachedunfifoobar', 'NOCACHE'))
-print('cached value ("visible" view):  ',
-    vars(visibleview).get('testcachedunfifoobar', 'NOCACHE'))
-print('cached value ("immutable" view):',
-    vars(immutableview).get('testcachedunfifoobar', 'NOCACHE'))
+print(
+    'cached value (unfiltered):      ',
+    vars(repo).get('testcachedunfifoobar', 'NOCACHE'),
+)
+print(
+    'cached value ("visible" view):  ',
+    vars(visibleview).get('testcachedunfifoobar', 'NOCACHE'),
+)
+print(
+    'cached value ("immutable" view):',
+    vars(immutableview).get('testcachedunfifoobar', 'NOCACHE'),
+)
 print('access (unfiltered):      ', repo.testcachedunfifoobar)
 print('unficalllog:', unficalllog)
-print('cached value (unfiltered):      ',
-    vars(repo).get('testcachedunfifoobar', 'NOCACHE'))
-print('cached value ("visible" view):  ',
-    vars(visibleview).get('testcachedunfifoobar', 'NOCACHE'))
-print('cached value ("immutable" view):',
-    vars(immutableview).get('testcachedunfifoobar', 'NOCACHE'))
+print(
+    'cached value (unfiltered):      ',
+    vars(repo).get('testcachedunfifoobar', 'NOCACHE'),
+)
+print(
+    'cached value ("visible" view):  ',
+    vars(visibleview).get('testcachedunfifoobar', 'NOCACHE'),
+)
+print(
+    'cached value ("immutable" view):',
+    vars(immutableview).get('testcachedunfifoobar', 'NOCACHE'),
+)
--- a/tests/test-pull-bundle.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-pull-bundle.t	Mon Oct 21 11:09:48 2019 -0400
@@ -101,15 +101,13 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files (+1 heads)
+  added 3 changesets with 3 changes to 3 files (+1 heads)
   new changesets bbd179dfa0a7:ed1b79f46b9a (3 drafts)
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-pull-update.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-pull-update.t	Mon Oct 21 11:09:48 2019 -0400
@@ -108,8 +108,8 @@
   adding changesets
   adding manifests
   adding file changes
+  adding remote bookmark active-after-pull
   added 1 changesets with 1 changes to 1 files
-  adding remote bookmark active-after-pull
   new changesets f815b3da6163
   1 local changesets published
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -138,8 +138,8 @@
   adding changesets
   adding manifests
   adding file changes
+  adding remote bookmark active-after-pull
   added 1 changesets with 1 changes to 1 files
-  adding remote bookmark active-after-pull
   new changesets f815b3da6163
   1 local changesets published
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-partial-C1.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-partial-C1.t	Mon Oct 21 11:09:48 2019 -0400
@@ -60,6 +60,7 @@
   $ mkcommit B1
   created new head
   $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-partial-C2.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-partial-C2.t	Mon Oct 21 11:09:48 2019 -0400
@@ -60,6 +60,7 @@
   $ mkcommit A1
   created new head
   $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg log -G --hidden
--- a/tests/test-push-checkheads-partial-C3.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-partial-C3.t	Mon Oct 21 11:09:48 2019 -0400
@@ -60,6 +60,7 @@
   $ mkcommit C0
   created new head
   $ hg debugobsolete --record-parents `getid "desc(B0)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  0f88766e02d6 (draft): C0
--- a/tests/test-push-checkheads-partial-C4.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-partial-C4.t	Mon Oct 21 11:09:48 2019 -0400
@@ -60,6 +60,7 @@
   $ mkcommit C0
   created new head
   $ hg debugobsolete --record-parents `getid "desc(A0)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg log -G --hidden
--- a/tests/test-push-checkheads-pruned-B1.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-pruned-B1.t	Mon Oct 21 11:09:48 2019 -0400
@@ -49,6 +49,7 @@
   $ mkcommit B0
   created new head
   $ hg debugobsolete --record-parents `getid "desc(A0)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  74ff5441d343 (draft): B0
--- a/tests/test-push-checkheads-pruned-B2.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-pruned-B2.t	Mon Oct 21 11:09:48 2019 -0400
@@ -60,9 +60,11 @@
   $ mkcommit A1
   created new head
   $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete --record-parents `getid "desc(B0)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  f6082bc4ffef (draft): A1
--- a/tests/test-push-checkheads-pruned-B3.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-pruned-B3.t	Mon Oct 21 11:09:48 2019 -0400
@@ -60,9 +60,11 @@
   $ mkcommit B1
   created new head
   $ hg debugobsolete --record-parents `getid "desc(A0)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-pruned-B4.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-pruned-B4.t	Mon Oct 21 11:09:48 2019 -0400
@@ -61,9 +61,11 @@
   $ mkcommit C0
   created new head
   $ hg debugobsolete --record-parents `getid "desc(A0)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete --record-parents `getid "desc(B0)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  0f88766e02d6 (draft): C0
--- a/tests/test-push-checkheads-pruned-B5.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-pruned-B5.t	Mon Oct 21 11:09:48 2019 -0400
@@ -64,11 +64,14 @@
   $ mkcommit B1
   created new head
   $ hg debugobsolete --record-parents `getid "desc(A0)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   2 new orphan changesets
   $ hg debugobsolete `getid "desc(B0)"` `getid "desc(B1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete --record-parents `getid "desc(C0)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-pruned-B6.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-pruned-B6.t	Mon Oct 21 11:09:48 2019 -0400
@@ -52,8 +52,10 @@
   $ hg up 'desc(B0)'
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete --record-parents `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   x  ba93660aff8d (draft): A1
--- a/tests/test-push-checkheads-pruned-B7.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-pruned-B7.t	Mon Oct 21 11:09:48 2019 -0400
@@ -51,8 +51,10 @@
   $ hg up 'desc(B0)'
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete --record-parents `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   x  ba93660aff8d (draft): A1
--- a/tests/test-push-checkheads-pruned-B8.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-pruned-B8.t	Mon Oct 21 11:09:48 2019 -0400
@@ -67,13 +67,17 @@
   $ mkcommit A2
   created new head
   $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete --record-parents `getid "desc(B1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid "desc(A1)" ` `getid "desc(A2)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  c1f8d089020f (draft): A2
--- a/tests/test-push-checkheads-superceed-A1.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-superceed-A1.t	Mon Oct 21 11:09:48 2019 -0400
@@ -46,6 +46,7 @@
   $ mkcommit A1
   created new head
   $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  f6082bc4ffef (draft): A1
--- a/tests/test-push-checkheads-superceed-A2.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-superceed-A2.t	Mon Oct 21 11:09:48 2019 -0400
@@ -60,9 +60,11 @@
   created new head
   $ mkcommit B1
   $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  262c8c798096 (draft): B1
--- a/tests/test-push-checkheads-superceed-A3.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-superceed-A3.t	Mon Oct 21 11:09:48 2019 -0400
@@ -63,9 +63,11 @@
   created new head
   $ mkcommit A1
   $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  c1c7524e9488 (draft): A1
--- a/tests/test-push-checkheads-superceed-A4.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-superceed-A4.t	Mon Oct 21 11:09:48 2019 -0400
@@ -48,6 +48,7 @@
   $ mkcommit A1
   created new head
   $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ mkcommit B0
   $ hg log -G --hidden
--- a/tests/test-push-checkheads-superceed-A5.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-superceed-A5.t	Mon Oct 21 11:09:48 2019 -0400
@@ -49,6 +49,7 @@
   created new head
   $ mkcommit A1
   $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  ba93660aff8d (draft): A1
--- a/tests/test-push-checkheads-superceed-A6.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-superceed-A6.t	Mon Oct 21 11:09:48 2019 -0400
@@ -69,9 +69,11 @@
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ mkcommit B1
   $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  d70a1f75a020 (draft): B1
--- a/tests/test-push-checkheads-superceed-A7.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-superceed-A7.t	Mon Oct 21 11:09:48 2019 -0400
@@ -69,9 +69,11 @@
   $ mkcommit B1
   created new head
   $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-superceed-A8.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-superceed-A8.t	Mon Oct 21 11:09:48 2019 -0400
@@ -53,8 +53,10 @@
   $ mkcommit A2
   created new head
   $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid "desc(A1)" ` `getid "desc(A2)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  c1f8d089020f (draft): A2
--- a/tests/test-push-checkheads-unpushed-D1.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-unpushed-D1.t	Mon Oct 21 11:09:48 2019 -0400
@@ -49,6 +49,7 @@
   $ mkcommit A1
   created new head
   $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-unpushed-D2.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-unpushed-D2.t	Mon Oct 21 11:09:48 2019 -0400
@@ -64,9 +64,11 @@
   $ mkcommit A1
   created new head
   $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete --record-parents `getid "desc(B0)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-unpushed-D3.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-unpushed-D3.t	Mon Oct 21 11:09:48 2019 -0400
@@ -67,9 +67,11 @@
   $ mkcommit B1
   created new head
   $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-unpushed-D4.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-unpushed-D4.t	Mon Oct 21 11:09:48 2019 -0400
@@ -83,9 +83,11 @@
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ mkcommit B1
   $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  d70a1f75a020 (draft): B1
--- a/tests/test-push-checkheads-unpushed-D5.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-unpushed-D5.t	Mon Oct 21 11:09:48 2019 -0400
@@ -72,9 +72,11 @@
   $ mkcommit B1
   created new head
   $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-unpushed-D6.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-unpushed-D6.t	Mon Oct 21 11:09:48 2019 -0400
@@ -56,8 +56,10 @@
   $ mkcommit C0
   created new head
   $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete --record-parents `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  0f88766e02d6 (draft): C0
--- a/tests/test-push-checkheads-unpushed-D7.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-checkheads-unpushed-D7.t	Mon Oct 21 11:09:48 2019 -0400
@@ -65,10 +65,13 @@
   $ mkcommit C0
   created new head
   $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete `getid "desc(A1)"` `getid "desc(A2)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete --record-parents `getid "desc(A2)"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G --hidden
   @  0f88766e02d6 (draft): C0
--- a/tests/test-push-http.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-http.t	Mon Oct 21 11:09:48 2019 -0400
@@ -88,8 +88,8 @@
   remote: adding manifests
   remote: adding file changes
   remote: adding a revisions
+  remote: updating the branch cache
   remote: added 1 changesets with 1 changes to 1 files
-  remote: updating the branch cache
   remote: running hook txnclose-phase.test: sh $TESTTMP/hook.sh
   remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b:  draft -> public
   remote: running hook txnclose-phase.test: sh $TESTTMP/hook.sh
@@ -117,8 +117,8 @@
   remote: adding manifests
   remote: adding file changes
   remote: adding a revisions
+  remote: updating the branch cache
   remote: added 1 changesets with 1 changes to 1 files
-  remote: updating the branch cache
   remote: running hook txnclose-phase.test: sh $TESTTMP/hook.sh
   remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b:  draft -> public
   remote: running hook txnclose-phase.test: sh $TESTTMP/hook.sh
@@ -309,7 +309,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: prepushkey hook: HG_BUNDLE2=1
   remote: HG_HOOKNAME=prepushkey
   remote: HG_HOOKTYPE=prepushkey
@@ -351,7 +350,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: prepushkey hook: HG_BUNDLE2=1
   remote: HG_HOOKNAME=prepushkey
   remote: HG_HOOKTYPE=prepushkey
@@ -368,6 +366,7 @@
   remote: HG_TXNNAME=serve
   remote: HG_URL=remote:http:$LOCALIP: (glob)
   remote: 
+  remote: added 1 changesets with 1 changes to 1 files
   % serve errors
 #endif
 
@@ -410,7 +409,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: prepushkey hook: HG_BUNDLE2=1
   remote: HG_HOOKNAME=prepushkey
   remote: HG_HOOKTYPE=prepushkey
@@ -465,7 +463,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: prepushkey hook: HG_BUNDLE2=1
   remote: HG_HOOKNAME=prepushkey
   remote: HG_HOOKTYPE=prepushkey
@@ -482,6 +479,7 @@
   remote: HG_TXNNAME=serve
   remote: HG_URL=remote:http:$LOCALIP: (glob)
   remote: 
+  remote: added 1 changesets with 1 changes to 1 files
   % serve errors
 #endif
 
--- a/tests/test-push-race.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push-race.t	Mon Oct 21 11:09:48 2019 -0400
@@ -1608,6 +1608,7 @@
   $ ID_Q=`hg -R client-racy log -T '{node}\n' -r 'desc("C-Q")'`
   $ ID_V=`hg -R client-racy log -T '{node}\n' -r 'desc("C-V")'`
   $ hg -R client-racy debugobsolete $ID_Q $ID_V
+  1 new obsolescence markers
   obsoleted 1 changesets
 
 Pushing
@@ -1800,6 +1801,7 @@
   $ ID_V=`hg -R client-other log -T '{node}\n' -r 'desc("C-V")'`
   $ ID_W=`hg -R client-other log -T '{node}\n' -r 'desc("C-W")'`
   $ hg -R client-other debugobsolete $ID_V $ID_W
+  1 new obsolescence markers
   obsoleted 1 changesets
 
 (continue the same head)
--- a/tests/test-push.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-push.t	Mon Oct 21 11:09:48 2019 -0400
@@ -287,9 +287,9 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   lock:  user *, process * (*s) (glob)
   wlock: free
+  added 1 changesets with 1 changes to 1 files
 
   $ hg --cwd 1 --config extensions.strip= strip tip -q
   $ hg --cwd 2 --config extensions.strip= strip tip -q
@@ -299,9 +299,9 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   lock:  user *, process * (*s) (glob)
   wlock: user *, process * (*s) (glob)
+  added 1 changesets with 1 changes to 1 files
 
 Test bare push with multiple race checking options
 --------------------------------------------------
@@ -348,3 +348,55 @@
   [255]
 
   $ [ ! -f owned ] || echo 'you got owned'
+
+Test `commands.push.require-revs`
+---------------------------------
+
+  $ hg clone -q test-revflag test-require-revs-source
+  $ hg init test-require-revs-dest
+  $ cd test-require-revs-source
+  $ cat >> .hg/hgrc << EOF
+  > [paths]
+  > default = ../test-require-revs-dest
+  > [commands]
+  > push.require-revs=1
+  > EOF
+  $ hg push
+  pushing to $TESTTMP/test-require-revs-dest
+  abort: no revisions specified to push
+  (did you mean "hg push -r ."?)
+  [255]
+  $ hg push -r 0
+  pushing to $TESTTMP/test-require-revs-dest
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  $ hg bookmark -r 0 push-this-bookmark
+(test that -B (bookmark) works for specifying "revs")
+  $ hg push -B push-this-bookmark
+  pushing to $TESTTMP/test-require-revs-dest
+  searching for changes
+  no changes found
+  exporting bookmark push-this-bookmark
+  [1]
+(test that -b (branch)  works for specifying "revs")
+  $ hg push -b default
+  pushing to $TESTTMP/test-require-revs-dest
+  searching for changes
+  abort: push creates new remote head [0-9a-f]+! (re)
+  (merge or see 'hg help push' for details about pushing new heads)
+  [255]
+(demonstrate that even though we don't have anything to exchange, we're still
+showing the error)
+  $ hg push
+  pushing to $TESTTMP/test-require-revs-dest
+  abort: no revisions specified to push
+  (did you mean "hg push -r ."?)
+  [255]
+  $ hg push --config paths.default:pushrev=0
+  pushing to $TESTTMP/test-require-revs-dest
+  searching for changes
+  no changes found
+  [1]
--- a/tests/test-pushvars.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-pushvars.t	Mon Oct 21 11:09:48 2019 -0400
@@ -41,9 +41,9 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   HG_USERVAR_BYPASS_REVIEW=true
   HG_USERVAR_DEBUG=1
+  added 1 changesets with 1 changes to 1 files
 
 Test pushing var with empty right-hand side
 
@@ -55,8 +55,8 @@
   adding changesets
   adding manifests
   adding file changes
+  HG_USERVAR_DEBUG=
   added 1 changesets with 1 changes to 1 files
-  HG_USERVAR_DEBUG=
 
 Test pushing bad vars
 
--- a/tests/test-rebase-conflicts.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-rebase-conflicts.t	Mon Oct 21 11:09:48 2019 -0400
@@ -315,14 +315,14 @@
   adding manifests
   adding file changes
   adding f1.txt revisions
-  added 2 changesets with 2 changes to 1 files
   bundle2-input-part: total payload size 1686
   bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
   bundle2-input-part: total payload size 74
   truncating cache/rbc-revs-v1 to 56
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 2 parts total
+  bundle2-input-bundle: 3 parts total
+  added 2 changesets with 2 changes to 1 files
   updating the branch cache
   invalid branch cache (served): tip differs
   invalid branch cache (served.hidden): tip differs
--- a/tests/test-rebase-inmemory.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-rebase-inmemory.t	Mon Oct 21 11:09:48 2019 -0400
@@ -506,6 +506,7 @@
   $ hg rebase -s 2 -d 7
   rebasing 2:177f92b77385 "c"
   abort: outstanding merge conflicts
+  (use 'hg resolve' to resolve)
   [255]
   $ hg resolve -l
   U e
--- a/tests/test-rebase-named-branches.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-rebase-named-branches.t	Mon Oct 21 11:09:48 2019 -0400
@@ -374,7 +374,7 @@
   
   $ hg rebase
   abort: branch 'c' has one head - please rebase to an explicit rev
-  (run 'hg heads' to see all heads)
+  (run 'hg heads' to see all heads, specify destination with -d)
   [255]
   $ hg tglog
   _  4: 8427af5d86f2 'c2 closed' c
--- a/tests/test-rebase-obsolete.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-rebase-obsolete.t	Mon Oct 21 11:09:48 2019 -0400
@@ -651,6 +651,7 @@
   $ hg commit -m J
   1 new orphan changesets
   $ hg debugobsolete `hg log --rev . -T '{node}'`
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg rebase --rev .~1::. --dest 'max(desc(D))' --traceback --config experimental.rebaseskipobsolete=off
@@ -838,6 +839,7 @@
   o  0:4a2df7238c3b A
   
   $ hg debugobsolete `hg log -r 7 -T '{node}\n'` --config experimental.evolution=true
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg rebase -d 6 -r "4::"
@@ -867,6 +869,7 @@
   $ hg commit -m nonrelevant
   created new head
   $ hg debugobsolete `hg log -r 11 -T '{node}\n'` --config experimental.evolution=true
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G
   @  11:f44da1f4954c nonrelevant (pruned)
@@ -1007,6 +1010,7 @@
   $ hg add L
   $ hg commit -m "dummy change"
   $ hg debugobsolete `hg log -r ".^" -T '{node}'` `hg log -r 18 -T '{node}'` --config experimental.evolution=true
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
 
@@ -1276,6 +1280,7 @@
   > EOF
   1 new orphan changesets
   $ hg debugobsolete `hg log -T "{node}" --hidden -r 'desc("c1")'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg log -G -r 'a': --hidden
   *  4:76be324c128b d
--- a/tests/test-rebase-parameters.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-rebase-parameters.t	Mon Oct 21 11:09:48 2019 -0400
@@ -85,7 +85,7 @@
 
   $ hg rebase --base 6
   abort: branch 'default' has 3 heads - please rebase to an explicit rev
-  (run 'hg heads .' to see heads)
+  (run 'hg heads .' to see heads, specify destination with -d)
   [255]
 
   $ hg rebase --rev '1 & !1' --dest 8
--- a/tests/test-rebase-templates.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-rebase-templates.t	Mon Oct 21 11:09:48 2019 -0400
@@ -55,3 +55,30 @@
 
   $ hg rebase -s 6 -d 4 -q -T "{nodechanges % '{oldnode}:{newnodes % ' {node} '}'}"
   d9d6773efc831c274eace04bc13e8e6412517139: f48cd65c6dc3d2acb55da54402a5b029546e546f  (no-eol)
+
+  $ hg log -G -T "{rev}:{node|short} {desc}"
+  o  7:f48cd65c6dc3 Added b
+  |
+  | @  5:df21b32134ba Added d
+  |/
+  o  4:849767420fd5 Added c
+  |
+  o  0:18d04c59bb5d Added a
+  
+
+
+  $ hg rebase -s 7 -d 5 -q --keep -T "{nodechanges % '{oldnode}:{newnodes % ' {node} '}'}"
+  f48cd65c6dc3d2acb55da54402a5b029546e546f: 6f7dda91e55e728fb798f3e44dbecf0ebaa83267  (no-eol)
+
+  $ hg log -G -T "{rev}:{node|short} {desc}"
+  o  8:6f7dda91e55e Added b
+  |
+  | o  7:f48cd65c6dc3 Added b
+  | |
+  @ |  5:df21b32134ba Added d
+  |/
+  o  4:849767420fd5 Added c
+  |
+  o  0:18d04c59bb5d Added a
+  
+
--- a/tests/test-remote-hidden.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-remote-hidden.t	Mon Oct 21 11:09:48 2019 -0400
@@ -36,6 +36,7 @@
   $ hg ci -m "c_Pruned"
   created new head
   $ hg debugobsolete --record-parents `getid 'desc("c_Pruned")'` -d '0 0'
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg up ".^"
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -45,6 +46,7 @@
   $ echo 5 > a
   $ hg ci -m "c_Secret_Pruned" --secret
   $ hg debugobsolete --record-parents `getid 'desc("c_Secret_Pruned")'` -d '0 0'
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg up null
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-remotefilelog-bgprefetch.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-remotefilelog-bgprefetch.t	Mon Oct 21 11:09:48 2019 -0400
@@ -1,6 +1,12 @@
 #require no-windows
 
   $ . "$TESTDIR/remotefilelog-library.sh"
+# devel.remotefilelog.ensurestart: reduce race condition with
+# waiton{repack/prefetch}
+  $ cat >> $HGRCPATH <<EOF
+  > [devel]
+  > remotefilelog.ensurestart=True
+  > EOF
 
   $ hg init master
   $ cd master
@@ -67,8 +73,8 @@
   adding changesets
   adding manifests
   adding file changes
+  updating bookmark foo
   added 1 changesets with 0 changes to 0 files
-  updating bookmark foo
   new changesets 6b4b6f66ef8c
   (run 'hg update' to get a working copy)
   prefetching file contents
@@ -96,8 +102,8 @@
   adding changesets
   adding manifests
   adding file changes
+  updating bookmark foo
   added 1 changesets with 0 changes to 0 files
-  updating bookmark foo
   new changesets 6b4b6f66ef8c
   (run 'hg update' to get a working copy)
   prefetching file contents
@@ -111,7 +117,6 @@
   $TESTTMP/hgcache/master/packs/6e8633deba6e544e5f8edbd7b996d6e31a2c42ae.histpack
   $TESTTMP/hgcache/master/packs/8ce5ab3745465ab83bba30a7b9c295e0c8404652.dataidx
   $TESTTMP/hgcache/master/packs/8ce5ab3745465ab83bba30a7b9c295e0c8404652.datapack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
 # background prefetch with repack on update when wcprevset configured
@@ -148,7 +153,6 @@
   $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack
   $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx
   $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
 # Ensure that file 'w' was prefetched - it was not part of the update operation and therefore
@@ -201,7 +205,6 @@
   $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack
   $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx
   $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
 # Ensure that file 'w' was prefetched - it was not part of the commit operation and therefore
@@ -240,7 +243,7 @@
   $ find $CACHEDIR -type f | sort
   $ hg rebase -s temporary -d foo
   rebasing 3:58147a5b5242 "b" (temporary tip)
-  saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/58147a5b5242-c3678817-rebase.hg (glob)
+  saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/58147a5b5242-c3678817-rebase.hg
   3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
   $ sleep 1
   $ hg debugwaitonprefetch >/dev/null 2>%1
@@ -295,7 +298,6 @@
   $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack
   $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx
   $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
 # Ensure that files were prefetched
@@ -340,7 +342,6 @@
   $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack
   $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx
   $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
 # Ensure that files were prefetched
--- a/tests/test-remotefilelog-datapack.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-remotefilelog-datapack.py	Mon Oct 21 11:09:48 2019 -0400
@@ -27,6 +27,7 @@
     datapack,
 )
 
+
 class datapacktestsbase(object):
     def __init__(self, datapackreader, paramsavailable):
         self.datapackreader = datapackreader
@@ -48,8 +49,9 @@
         return hashlib.sha1(content).digest()
 
     def getFakeHash(self):
-        return b''.join(pycompat.bytechr(random.randint(0, 255))
-                        for _ in range(20))
+        return b''.join(
+            pycompat.bytechr(random.randint(0, 255)) for _ in range(20)
+        )
 
     def createPack(self, revisions=None, packdir=None):
         if revisions is None:
@@ -80,8 +82,9 @@
         revisions = [(filename, node, nullid, content)]
         pack = self.createPack(revisions)
         if self.paramsavailable:
-            self.assertEqual(pack.params.fanoutprefix,
-                             basepack.SMALLFANOUTPREFIX)
+            self.assertEqual(
+                pack.params.fanoutprefix, basepack.SMALLFANOUTPREFIX
+            )
 
         chain = pack.getdeltachain(filename, node)
         self.assertEqual(content, chain[0][4])
@@ -171,10 +174,12 @@
             filename = b'%d.txt' % i
             content = b'put-something-here \n' * i
             node = self.getHash(content)
-            meta = {constants.METAKEYFLAG: i ** 4,
-                    constants.METAKEYSIZE: len(content),
-                    b'Z': b'random_string',
-                    b'_': b'\0' * i}
+            meta = {
+                constants.METAKEYFLAG: i ** 4,
+                constants.METAKEYSIZE: len(content),
+                b'Z': b'random_string',
+                b'_': b'\0' * i,
+            }
             revisions.append((filename, node, nullid, content, meta))
         pack = self.createPack(revisions)
         for name, node, x, content, origmeta in revisions:
@@ -201,13 +206,15 @@
         missing = pack.getmissing([(b"foo", revisions[0][1])])
         self.assertFalse(missing)
 
-        missing = pack.getmissing([(b"foo", revisions[0][1]),
-                                   (b"foo", revisions[1][1])])
+        missing = pack.getmissing(
+            [(b"foo", revisions[0][1]), (b"foo", revisions[1][1])]
+        )
         self.assertFalse(missing)
 
         fakenode = self.getFakeHash()
-        missing = pack.getmissing([(b"foo", revisions[0][1]),
-                                   (b"foo", fakenode)])
+        missing = pack.getmissing(
+            [(b"foo", revisions[0][1]), (b"foo", fakenode)]
+        )
         self.assertEqual(missing, [(b"foo", fakenode)])
 
     def testAddThrows(self):
@@ -257,8 +264,9 @@
 
         pack = self.createPack(revisions)
         if self.paramsavailable:
-            self.assertEqual(pack.params.fanoutprefix,
-                             basepack.LARGEFANOUTPREFIX)
+            self.assertEqual(
+                pack.params.fanoutprefix, basepack.LARGEFANOUTPREFIX
+            )
 
         for (filename, node), content in blobs.items():
             actualcontent = pack.getdeltachain(filename, node)[0][4]
@@ -284,7 +292,7 @@
                     b'%d' % i,
                     self.getFakeHash(),
                     revision[1],
-                    self.getFakeHash()
+                    self.getFakeHash(),
                 )
 
             self.createPack(chain, packdir)
@@ -303,8 +311,7 @@
 
             mostrecentpack = next(iter(store.packs), None)
             self.assertEqual(
-                mostrecentpack.getdeltachain(revision[0], revision[1]),
-                chain
+                mostrecentpack.getdeltachain(revision[0], revision[1]), chain
             )
 
             self.assertEqual(randomchain.index(revision) + 1, len(chain))
@@ -341,6 +348,7 @@
 
             # Perf of large multi-get
             import gc
+
             gc.disable()
             pack = self.datapackreader(path)
             for lookupsize in lookupsizes:
@@ -352,10 +360,14 @@
                 start = time.time()
                 pack.getmissing(findnodes[:lookupsize])
                 elapsed = time.time() - start
-                print ("%s pack %d lookups = %0.04f" %
-                       (('%d' % packsize).rjust(7),
+                print(
+                    "%s pack %d lookups = %0.04f"
+                    % (
+                        ('%d' % packsize).rjust(7),
                         ('%d' % lookupsize).rjust(7),
-                        elapsed))
+                        elapsed,
+                    )
+                )
 
             print("")
             gc.enable()
@@ -364,11 +376,13 @@
         # so the user sees the output.
         raise RuntimeError("perf test always fails")
 
+
 class datapacktests(datapacktestsbase, unittest.TestCase):
     def __init__(self, *args, **kwargs):
         datapacktestsbase.__init__(self, datapack.datapack, True)
         unittest.TestCase.__init__(self, *args, **kwargs)
 
+
 # TODO:
 # datapack store:
 # - getmissing
@@ -376,5 +390,5 @@
 
 if __name__ == '__main__':
     if pycompat.iswindows:
-        sys.exit(80)    # Skip on Windows
+        sys.exit(80)  # Skip on Windows
     silenttestrunner.main(__name__)
--- a/tests/test-remotefilelog-gc.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-remotefilelog-gc.t	Mon Oct 21 11:09:48 2019 -0400
@@ -93,7 +93,6 @@
   $TESTTMP/hgcache/master/packs/320dab99b7e3f60512b97f347689625263d22cf5.datapack
   $TESTTMP/hgcache/master/packs/837b83c1ef6485a336eb4421ac5973c0ec130fbb.histidx
   $TESTTMP/hgcache/master/packs/837b83c1ef6485a336eb4421ac5973c0ec130fbb.histpack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
 # Test that warning is displayed when there are no valid repos in repofile
--- a/tests/test-remotefilelog-histpack.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-remotefilelog-histpack.py	Mon Oct 21 11:09:48 2019 -0400
@@ -18,6 +18,7 @@
     pycompat,
     ui as uimod,
 )
+
 # Load the local remotefilelog, not the system one
 sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
 from hgext.remotefilelog import (
@@ -25,6 +26,7 @@
     historypack,
 )
 
+
 class histpacktests(unittest.TestCase):
     def setUp(self):
         self.tempdirs = []
@@ -42,8 +44,9 @@
         return hashlib.sha1(content).digest()
 
     def getFakeHash(self):
-        return b''.join(pycompat.bytechr(random.randint(0, 255))
-                        for _ in range(20))
+        return b''.join(
+            pycompat.bytechr(random.randint(0, 255)) for _ in range(20)
+        )
 
     def createPack(self, revisions=None):
         """Creates and returns a historypack containing the specified revisions.
@@ -52,12 +55,19 @@
         node, p1node, p2node, and linknode.
         """
         if revisions is None:
-            revisions = [(b"filename", self.getFakeHash(), nullid, nullid,
-                          self.getFakeHash(), None)]
+            revisions = [
+                (
+                    b"filename",
+                    self.getFakeHash(),
+                    nullid,
+                    nullid,
+                    self.getFakeHash(),
+                    None,
+                )
+            ]
 
         packdir = pycompat.fsencode(self.makeTempDir())
-        packer = historypack.mutablehistorypack(uimod.ui(), packdir,
-                                                version=2)
+        packer = historypack.mutablehistorypack(uimod.ui(), packdir, version=2)
 
         for filename, node, p1, p2, linknode, copyfrom in revisions:
             packer.add(filename, node, p1, p2, linknode, copyfrom)
@@ -163,8 +173,7 @@
         # Verify the pack contents
         for (filename, node) in allentries:
             ancestors = pack.getancestors(filename, node)
-            self.assertEqual(ancestorcounts[(filename, node)],
-                             len(ancestors))
+            self.assertEqual(ancestorcounts[(filename, node)], len(ancestors))
             for anode, (ap1, ap2, alinknode, copyfrom) in ancestors.items():
                 ep1, ep2, elinknode = allentries[(filename, anode)]
                 self.assertEqual(ap1, ep1)
@@ -208,13 +217,15 @@
         missing = pack.getmissing([(filename, revisions[0][1])])
         self.assertFalse(missing)
 
-        missing = pack.getmissing([(filename, revisions[0][1]),
-                                   (filename, revisions[1][1])])
+        missing = pack.getmissing(
+            [(filename, revisions[0][1]), (filename, revisions[1][1])]
+        )
         self.assertFalse(missing)
 
         fakenode = self.getFakeHash()
-        missing = pack.getmissing([(filename, revisions[0][1]),
-                                   (filename, fakenode)])
+        missing = pack.getmissing(
+            [(filename, revisions[0][1]), (filename, fakenode)]
+        )
         self.assertEqual(missing, [(filename, fakenode)])
 
         # Test getmissing on a non-existant filename
@@ -268,11 +279,13 @@
             self.assertEqual(p2, actual[1])
             self.assertEqual(linknode, actual[2])
             self.assertEqual(copyfrom, actual[3])
+
+
 # TODO:
 # histpack store:
 # - repack two packs into one
 
 if __name__ == '__main__':
     if pycompat.iswindows:
-        sys.exit(80)    # Skip on Windows
+        sys.exit(80)  # Skip on Windows
     silenttestrunner.main(__name__)
--- a/tests/test-remotefilelog-prefetch.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-remotefilelog-prefetch.t	Mon Oct 21 11:09:48 2019 -0400
@@ -94,8 +94,8 @@
   adding changesets
   adding manifests
   adding file changes
+  updating bookmark foo
   added 1 changesets with 0 changes to 0 files
-  updating bookmark foo
   new changesets 109c3a557a73
   (run 'hg update' to get a working copy)
   prefetching file contents
@@ -118,8 +118,8 @@
   adding changesets
   adding manifests
   adding file changes
+  updating bookmark foo
   added 1 changesets with 0 changes to 0 files
-  updating bookmark foo
   new changesets 109c3a557a73
   (run 'hg update' to get a working copy)
   prefetching file contents
@@ -149,8 +149,8 @@
   adding changesets
   adding manifests
   adding file changes
+  updating bookmark foo
   added 1 changesets with 0 changes to 0 files
-  updating bookmark foo
   new changesets 109c3a557a73
   1 local changesets published (?)
   (run 'hg update' to get a working copy)
--- a/tests/test-remotefilelog-repack-fast.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-remotefilelog-repack-fast.t	Mon Oct 21 11:09:48 2019 -0400
@@ -1,10 +1,13 @@
 #require no-windows
 
   $ . "$TESTDIR/remotefilelog-library.sh"
-
+# devel.remotefilelog.ensurestart: reduce race condition with
+# waiton{repack/prefetch}
   $ cat >> $HGRCPATH <<EOF
   > [remotefilelog]
   > fastdatapack=True
+  > [devel]
+  > remotefilelog.ensurestart=True
   > EOF
 
   $ hg init master
@@ -53,7 +56,6 @@
   $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
   $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
   $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
 # Test that the packs are readonly
@@ -62,7 +64,6 @@
   -r--r--r--     172 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
   -r--r--r--    1074 b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
   -r--r--r--      72 b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
-  -rw-r--r--       0 repacklock
 
 # Test that the data in the new packs is accessible
   $ hg cat -r . x
@@ -86,7 +87,6 @@
   $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
   $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
   $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
   $ hg repack --traceback
@@ -96,7 +96,6 @@
   $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
   $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
   $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
 # Verify all the file data is still available
@@ -116,7 +115,6 @@
   $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
   $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
   $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
 # Run two repacks at once
@@ -140,7 +138,6 @@
   $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
   $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
   $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
   $ hg repack --background
@@ -152,7 +149,6 @@
   $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.datapack
   $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histidx
   $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histpack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
 # Test debug commands
@@ -229,7 +225,6 @@
   bfd60adb76018bb952e27cd23fc151bf94865d7d.histpack
   fb3aa57b22789ebcc45706c352e2d6af099c5816.dataidx
   fb3aa57b22789ebcc45706c352e2d6af099c5816.datapack
-  repacklock
   $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx
   
   x
--- a/tests/test-remotefilelog-repack.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-remotefilelog-repack.t	Mon Oct 21 11:09:48 2019 -0400
@@ -1,6 +1,12 @@
 #require no-windows
 
   $ . "$TESTDIR/remotefilelog-library.sh"
+# devel.remotefilelog.ensurestart: reduce race condition with
+# waiton{repack/prefetch}
+  $ cat >> $HGRCPATH <<EOF
+  > [devel]
+  > remotefilelog.ensurestart=True
+  > EOF
 
   $ hg init master
   $ cd master
@@ -48,7 +54,6 @@
   $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
   $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
   $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
 # Test that the packs are readonly
@@ -57,7 +62,6 @@
   -r--r--r--     172 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
   -r--r--r--    1074 b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
   -r--r--r--      72 b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
-  -rw-r--r--       0 repacklock
 
 # Test that the data in the new packs is accessible
   $ hg cat -r . x
@@ -81,7 +85,6 @@
   $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
   $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
   $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
 # First assert that with --packsonly, the loose object will be ignored:
@@ -94,7 +97,6 @@
   $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
   $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
   $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
   $ hg repack --traceback
@@ -104,7 +106,6 @@
   $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
   $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
   $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
 # Verify all the file data is still available
@@ -124,7 +125,6 @@
   $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
   $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
   $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
 # Run two repacks at once
@@ -148,7 +148,6 @@
   $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
   $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
   $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
   $ hg repack --background
@@ -160,7 +159,6 @@
   $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.datapack
   $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histidx
   $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histpack
-  $TESTTMP/hgcache/master/packs/repacklock
   $TESTTMP/hgcache/repos
 
 # Test debug commands
@@ -237,7 +235,6 @@
   bfd60adb76018bb952e27cd23fc151bf94865d7d.histpack
   fb3aa57b22789ebcc45706c352e2d6af099c5816.dataidx
   fb3aa57b22789ebcc45706c352e2d6af099c5816.datapack
-  repacklock
   $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx
   
   x
--- a/tests/test-remotefilelog-sparse.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-remotefilelog-sparse.t	Mon Oct 21 11:09:48 2019 -0400
@@ -58,8 +58,8 @@
   adding changesets
   adding manifests
   adding file changes
+  updating bookmark foo
   added 1 changesets with 0 changes to 0 files
-  updating bookmark foo
   new changesets 876b1317060d
   (run 'hg update' to get a working copy)
   prefetching file contents
--- a/tests/test-repo-compengines.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-repo-compengines.t	Mon Oct 21 11:09:48 2019 -0400
@@ -169,7 +169,7 @@
   > done
 
   $ $RUNTESTDIR/f -s zstd-*/.hg/store/data/*
-  zstd-level-1/.hg/store/data/a.i: size=4097
+  zstd-level-1/.hg/store/data/a.i: size=4114
   zstd-level-22/.hg/store/data/a.i: size=4091
   zstd-level-default/\.hg/store/data/a\.i: size=(4094|4102) (re)
 
--- a/tests/test-resolve.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-resolve.t	Mon Oct 21 11:09:48 2019 -0400
@@ -210,12 +210,15 @@
   [1]
   $ hg up 0
   abort: outstanding merge conflicts
+  (use 'hg resolve' to resolve)
   [255]
   $ hg merge 2
   abort: outstanding merge conflicts
+  (use 'hg resolve' to resolve)
   [255]
   $ hg merge --force 2
   abort: outstanding merge conflicts
+  (use 'hg resolve' to resolve)
   [255]
 
 set up conflict-free merge
--- a/tests/test-revlog-ancestry.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-revlog-ancestry.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,9 +11,11 @@
 repo = hg.repository(u, b'test1', create=1)
 os.chdir('test1')
 
+
 def commit(text, time):
     repo.commit(text=text, date=b"%d 0" % time)
 
+
 def addcommit(name, time):
     f = open(name, 'wb')
     f.write(b'%s\n' % name)
@@ -21,12 +23,15 @@
     repo[None].add([name])
     commit(name, time)
 
+
 def update(rev):
     merge.update(repo, rev, branchmerge=False, force=True)
 
+
 def merge_(rev):
     merge.update(repo, rev, branchmerge=True, force=False)
 
+
 if __name__ == '__main__':
     addcommit(b"A", 0)
     addcommit(b"B", 1)
--- a/tests/test-revlog-raw.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-revlog-raw.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,24 +16,30 @@
 
 from mercurial.revlogutils import (
     deltas,
+    flagutil,
 )
 
 # TESTTMP is optional. This makes it convenient to run without run-tests.py
 tvfs = vfs.vfs(encoding.environ.get(b'TESTTMP', b'/tmp'))
 
 # Enable generaldelta otherwise revlog won't use delta as expected by the test
-tvfs.options = {b'generaldelta': True, b'revlogv1': True,
-                b'sparse-revlog': True}
+tvfs.options = {
+    b'generaldelta': True,
+    b'revlogv1': True,
+    b'sparse-revlog': True,
+}
 
 # The test wants to control whether to use delta explicitly, based on
 # "storedeltachains".
 revlog.revlog._isgooddeltainfo = lambda self, d, textlen: self._storedeltachains
 
+
 def abort(msg):
     print('abort: %s' % msg)
     # Return 0 so run-tests.py could compare the output.
     sys.exit()
 
+
 # Register a revlog processor for flag EXTSTORED.
 #
 # It simply prepends a fixed header, and replaces '1' to 'i'. So it has
@@ -41,37 +47,45 @@
 # deltas.
 _extheader = b'E\n'
 
+
 def readprocessor(self, rawtext):
     # True: the returned text could be used to verify hash
-    text = rawtext[len(_extheader):].replace(b'i', b'1')
-    return text, True
+    text = rawtext[len(_extheader) :].replace(b'i', b'1')
+    return text, True, {}
 
-def writeprocessor(self, text):
+
+def writeprocessor(self, text, sidedata):
     # False: the returned rawtext shouldn't be used to verify hash
     rawtext = _extheader + text.replace(b'1', b'i')
     return rawtext, False
 
+
 def rawprocessor(self, rawtext):
     # False: do not verify hash. Only the content returned by "readprocessor"
     # can be used to verify hash.
     return False
 
-revlog.addflagprocessor(revlog.REVIDX_EXTSTORED,
-                        (readprocessor, writeprocessor, rawprocessor))
+
+flagutil.addflagprocessor(
+    revlog.REVIDX_EXTSTORED, (readprocessor, writeprocessor, rawprocessor)
+)
 
 # Utilities about reading and appending revlog
 
+
 def newtransaction():
     # A transaction is required to write revlogs
     report = lambda msg: None
     return transaction.transaction(report, tvfs, {'plain': tvfs}, b'journal')
 
+
 def newrevlog(name=b'_testrevlog.i', recreate=False):
     if recreate:
         tvfs.tryunlink(name)
     rlog = revlog.revlog(tvfs, name)
     return rlog
 
+
 def appendrev(rlog, text, tr, isext=False, isdelta=True):
     '''Append a revision. If isext is True, set the EXTSTORED flag so flag
     processor will be used (and rawtext is different from text). If isdelta is
@@ -95,6 +109,7 @@
         # Restore storedeltachains. It is always True, see revlog.__init__
         rlog._storedeltachains = True
 
+
 def addgroupcopy(rlog, tr, destname=b'_destrevlog.i', optimaldelta=True):
     '''Copy revlog to destname using revlog.addgroup. Return the copied revlog.
 
@@ -108,6 +123,7 @@
     This exercises some revlog.addgroup (and revlog._addrevision(text=None))
     code path, which is not covered by "appendrev" alone.
     '''
+
     class dummychangegroup(object):
         @staticmethod
         def deltachunk(pnode):
@@ -123,10 +139,15 @@
                 deltaparent = min(0, parentrev)
             if not rlog.candelta(deltaparent, r):
                 deltaparent = -1
-            return {b'node': rlog.node(r), b'p1': pnode, b'p2': node.nullid,
-                    b'cs': rlog.node(rlog.linkrev(r)), b'flags': rlog.flags(r),
-                    b'deltabase': rlog.node(deltaparent),
-                    b'delta': rlog.revdiff(deltaparent, r)}
+            return {
+                b'node': rlog.node(r),
+                b'p1': pnode,
+                b'p2': node.nullid,
+                b'cs': rlog.node(rlog.linkrev(r)),
+                b'flags': rlog.flags(r),
+                b'deltabase': rlog.node(deltaparent),
+                b'delta': rlog.revdiff(deltaparent, r),
+            }
 
         def deltaiter(self):
             chain = None
@@ -151,6 +172,7 @@
     dlog.addgroup(dummydeltas, linkmap, tr)
     return dlog
 
+
 def lowlevelcopy(rlog, tr, destname=b'_destrevlog.i'):
     '''Like addgroupcopy, but use the low level revlog._addrevision directly.
 
@@ -161,13 +183,18 @@
         p1 = rlog.node(r - 1)
         p2 = node.nullid
         if r == 0 or (rlog.flags(r) & revlog.REVIDX_EXTSTORED):
-            text = rlog.revision(r, raw=True)
+            text = rlog.rawdata(r)
             cachedelta = None
         else:
             # deltaparent cannot have EXTSTORED flag.
-            deltaparent = max([-1] +
-                              [p for p in range(r)
-                               if rlog.flags(p) & revlog.REVIDX_EXTSTORED == 0])
+            deltaparent = max(
+                [-1]
+                + [
+                    p
+                    for p in range(r)
+                    if rlog.flags(p) & revlog.REVIDX_EXTSTORED == 0
+                ]
+            )
             text = None
             cachedelta = (deltaparent, rlog.revdiff(deltaparent, r))
         flags = rlog.flags(r)
@@ -176,8 +203,9 @@
             ifh = dlog.opener(dlog.indexfile, b'a+')
             if not dlog._inline:
                 dfh = dlog.opener(dlog.datafile, b'a+')
-            dlog._addrevision(rlog.node(r), text, tr, r, p1, p2, flags,
-                              cachedelta, ifh, dfh)
+            dlog._addrevision(
+                rlog.node(r), text, tr, r, p1, p2, flags, cachedelta, ifh, dfh
+            )
         finally:
             if dfh is not None:
                 dfh.close()
@@ -185,8 +213,10 @@
                 ifh.close()
     return dlog
 
+
 # Utilities to generate revisions for testing
 
+
 def genbits(n):
     '''Given a number n, generate (2 ** (n * 2) + 1) numbers in range(2 ** n).
     i.e. the generated numbers have a width of n bits.
@@ -218,10 +248,12 @@
         x = y
         yield x
 
+
 def gentext(rev):
     '''Given a revision number, generate dummy text'''
     return b''.join(b'%d\n' % j for j in range(-1, rev % 5))
 
+
 def writecases(rlog, tr):
     '''Write some revisions interested to the test.
 
@@ -261,14 +293,14 @@
 
         # Verify text, rawtext, and rawsize
         if isext:
-            rawtext = writeprocessor(None, text)[0]
+            rawtext = writeprocessor(None, text, {})[0]
         else:
             rawtext = text
         if rlog.rawsize(rev) != len(rawtext):
             abort('rev %d: wrong rawsize' % rev)
         if rlog.revision(rev, raw=False) != text:
             abort('rev %d: wrong text' % rev)
-        if rlog.revision(rev, raw=True) != rawtext:
+        if rlog.rawdata(rev) != rawtext:
             abort('rev %d: wrong rawtext' % rev)
         result.append((text, rawtext))
 
@@ -280,8 +312,10 @@
             abort('rev %d: isext is ineffective' % rev)
     return result
 
+
 # Main test and checking
 
+
 def checkrevlog(rlog, expected):
     '''Check if revlog has expected contents. expected is [(text, rawtext)]'''
     # Test using different access orders. This could expose some issues
@@ -293,23 +327,28 @@
                 nlog = newrevlog()
                 for rev in revorder:
                     for raw in raworder:
-                        t = nlog.revision(rev, raw=raw)
+                        if raw:
+                            t = nlog.rawdata(rev)
+                        else:
+                            t = nlog.revision(rev)
                         if t != expected[rev][int(raw)]:
-                            abort('rev %d: corrupted %stext'
-                                  % (rev, raw and 'raw' or ''))
+                            abort(
+                                'rev %d: corrupted %stext'
+                                % (rev, raw and 'raw' or '')
+                            )
+
 
 slicingdata = [
-    ([0, 1, 2, 3, 55, 56, 58, 59, 60],
-     [[0, 1], [2], [58], [59, 60]],
-     10),
-    ([0, 1, 2, 3, 55, 56, 58, 59, 60],
-     [[0, 1], [2], [58], [59, 60]],
-     10),
-    ([-1, 0, 1, 2, 3, 55, 56, 58, 59, 60],
-     [[-1, 0, 1], [2], [58], [59, 60]],
-     10),
+    ([0, 1, 2, 3, 55, 56, 58, 59, 60], [[0, 1], [2], [58], [59, 60]], 10),
+    ([0, 1, 2, 3, 55, 56, 58, 59, 60], [[0, 1], [2], [58], [59, 60]], 10),
+    (
+        [-1, 0, 1, 2, 3, 55, 56, 58, 59, 60],
+        [[-1, 0, 1], [2], [58], [59, 60]],
+        10,
+    ),
 ]
 
+
 def slicingtest(rlog):
     oldmin = rlog._srmingapsize
     try:
@@ -329,9 +368,11 @@
     finally:
         rlog._srmingapsize = oldmin
 
+
 def md5sum(s):
     return hashlib.md5(s).digest()
 
+
 def _maketext(*coord):
     """create piece of text according to range of integers
 
@@ -344,6 +385,7 @@
         pieces.append(b'\n'.join(p))
     return b'\n'.join(pieces) + b'\n'
 
+
 data = [
     _maketext((0, 120), (456, 60)),
     _maketext((0, 120), (345, 60)),
@@ -379,13 +421,17 @@
     _maketext((0, 120), (60, 60), (618, 30), (398, 40), (158, 10)),
 ]
 
+
 def makesnapshot(tr):
     rl = newrevlog(name=b'_snaprevlog3.i', recreate=True)
     for i in data:
         appendrev(rl, i, tr)
     return rl
 
+
 snapshots = [-1, 0, 6, 8, 11, 17, 19, 21, 25, 30]
+
+
 def issnapshottest(rlog):
     result = []
     if rlog.issnapshot(-1):
@@ -398,8 +444,11 @@
         print('  expected: %s' % snapshots)
         print('  got:      %s' % result)
 
+
 snapshotmapall = {0: [6, 8, 11, 17, 19, 25], 8: [21], -1: [0, 30]}
 snapshotmap15 = {0: [17, 19, 25], 8: [21], -1: [30]}
+
+
 def findsnapshottest(rlog):
     resultall = collections.defaultdict(list)
     deltas._findsnapshots(rlog, resultall, 0)
@@ -416,6 +465,7 @@
         print('  expected: %s' % snapshotmap15)
         print('  got:      %s' % result15)
 
+
 def maintest():
     with newtransaction() as tr:
         rl = newrevlog(recreate=True)
@@ -445,6 +495,7 @@
         findsnapshottest(rl5)
         print('findsnapshot test passed')
 
+
 try:
     maintest()
 except Exception as ex:
--- a/tests/test-revset.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-revset.t	Mon Oct 21 11:09:48 2019 -0400
@@ -2003,6 +2003,7 @@
   4:ffff85cff0ff78504fcdc3c0bc10de0c65379249 ffff8
   2147483647:ffffffffffffffffffffffffffffffffffffffff fffff
   $ hg debugobsolete fffbae3886c8fbb2114296380d276fd37715d571
+  1 new obsolescence markers
   obsoleted 1 changesets
 
   $ hg debugrevspec 'fff'
--- a/tests/test-run-tests.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-run-tests.py	Mon Oct 21 11:09:48 2019 -0400
@@ -8,11 +8,13 @@
 import doctest
 import os
 import re
+
 # this is hack to make sure no escape characters are inserted into the output
 if 'TERM' in os.environ:
     del os.environ['TERM']
 run_tests = __import__('run-tests')
 
+
 def prn(ex):
     m = ex.args[0]
     if isinstance(m, str):
@@ -20,6 +22,7 @@
     else:
         print(m.decode('utf-8'))
 
+
 def lm(expected, output):
     r"""check if output matches expected
 
@@ -35,10 +38,12 @@
         ... except AssertionError as ex: prn(ex)
         single backslash or unknown char
     """
-    assert (expected.endswith(b'\n')
-            and output.endswith(b'\n')), 'missing newline'
-    assert not re.search(br'[^ \w\\/\r\n()*?]', expected + output), (
-           b'single backslash or unknown char')
+    assert expected.endswith(b'\n') and output.endswith(
+        b'\n'
+    ), 'missing newline'
+    assert not re.search(
+        br'[^ \w\\/\r\n()*?]', expected + output
+    ), b'single backslash or unknown char'
     test = run_tests.TTest(b'test-run-test.t', b'.', b'.')
     match, exact = test.linematch(expected, output)
     if isinstance(match, str):
@@ -46,7 +51,8 @@
     elif isinstance(match, bytes):
         return 'special: ' + match.decode('utf-8')
     else:
-        return bool(match) # do not return match object
+        return bool(match)  # do not return match object
+
 
 def wintests():
     r"""test matching like running on windows
@@ -77,6 +83,7 @@
     """
     pass
 
+
 def otherostests():
     r"""test matching like running on non-windows os
 
@@ -104,5 +111,6 @@
     """
     pass
 
+
 if __name__ == '__main__':
     doctest.testmod()
--- a/tests/test-run-tests.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-run-tests.t	Mon Oct 21 11:09:48 2019 -0400
@@ -403,18 +403,21 @@
   <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
     <testcase name="test-success.t" time="*"/> (glob)
     <testcase name="test-failure-unicode.t" time="*"> (glob)
-      <failure message="output changed" type="output-mismatch">
-  <![CDATA[--- $TESTTMP/test-failure-unicode.t
+      <failure message="output changed" type="output-mismatch"><![CDATA[--- $TESTTMP/test-failure-unicode.t (py38 !)
+      <failure message="output changed" type="output-mismatch"> (no-py38 !)
+  <![CDATA[--- $TESTTMP/test-failure-unicode.t (no-py38 !)
   +++ $TESTTMP/test-failure-unicode.t.err
   @@ -1,2 +1,2 @@
      $ echo babar\xce\xb1 (esc)
   -  l\xce\xb5\xce\xb5t (esc)
   +  babar\xce\xb1 (esc)
-  ]]>    </failure>
+  ]]></failure> (py38 !)
+  ]]>    </failure> (no-py38 !)
     </testcase>
     <testcase name="test-failure.t" time="*"> (glob)
-      <failure message="output changed" type="output-mismatch">
-  <![CDATA[--- $TESTTMP/test-failure.t
+      <failure message="output changed" type="output-mismatch"><![CDATA[--- $TESTTMP/test-failure.t (py38 !)
+      <failure message="output changed" type="output-mismatch"> (no-py38 !)
+  <![CDATA[--- $TESTTMP/test-failure.t (no-py38 !)
   +++ $TESTTMP/test-failure.t.err
   @@ -1,5 +1,5 @@
      $ echo babar
@@ -423,7 +426,8 @@
    This is a noop statement so that
    this test is still more bytes than success.
    pad pad pad pad............................................................
-  ]]>    </failure>
+  ]]></failure> (py38 !)
+  ]]>    </failure> (no-py38 !)
     </testcase>
   </testsuite>
 
@@ -1084,8 +1088,9 @@
   <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
     <testcase name="test-success.t" time="*"/> (glob)
     <testcase name="test-skip.t">
-      <skipped>
-  <![CDATA[missing feature: nail clipper]]>    </skipped>
+      <skipped><![CDATA[missing feature: nail clipper]]></skipped> (py38 !)
+      <skipped> (no-py38 !)
+  <![CDATA[missing feature: nail clipper]]>    </skipped> (no-py38 !)
     </testcase>
   </testsuite>
 
--- a/tests/test-rust-ancestor.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-rust-ancestor.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,6 +9,7 @@
 
 try:
     from mercurial import rustext
+
     rustext.__name__  # trigger immediate actual import
 except ImportError:
     rustext = None
@@ -45,12 +46,14 @@
     b'\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x12\xcb\xeby1'
     b'\xb6\r\x98B\xcb\x07\xbd`\x8f\x92\xd9\xc4\x84\xbdK\x00\x00\x00'
     b'\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-    )
+)
 
 
-@unittest.skipIf(rustext is None or cparsers is None,
-                 "rustext or the C Extension parsers module "
-                 "ancestor relies on is not available")
+@unittest.skipIf(
+    rustext is None or cparsers is None,
+    "rustext or the C Extension parsers module "
+    "ancestor relies on is not available",
+)
 class rustancestorstest(unittest.TestCase):
     """Test the correctness of binding to Rust code.
 
@@ -70,11 +73,10 @@
     def testiteratorrevlist(self):
         idx = self.parseindex()
         # checking test assumption about the index binary data:
-        self.assertEqual({i: (r[5], r[6]) for i, r in enumerate(idx)},
-                         {0: (-1, -1),
-                          1: (0, -1),
-                          2: (1, -1),
-                          3: (2, -1)})
+        self.assertEqual(
+            {i: (r[5], r[6]) for i, r in enumerate(idx)},
+            {0: (-1, -1), 1: (0, -1), 2: (1, -1), 3: (2, -1)},
+        )
         ait = AncestorsIterator(idx, [3], 0, True)
         self.assertEqual([r for r in ait], [3, 2, 1, 0])
 
@@ -84,11 +86,10 @@
     def testlazyancestors(self):
         idx = self.parseindex()
         start_count = sys.getrefcount(idx)  # should be 2 (see Python doc)
-        self.assertEqual({i: (r[5], r[6]) for i, r in enumerate(idx)},
-                         {0: (-1, -1),
-                          1: (0, -1),
-                          2: (1, -1),
-                          3: (2, -1)})
+        self.assertEqual(
+            {i: (r[5], r[6]) for i, r in enumerate(idx)},
+            {0: (-1, -1), 1: (0, -1), 2: (1, -1), 3: (2, -1)},
+        )
         lazy = LazyAncestors(idx, [3], 0, True)
         # we have two more references to the index:
         # - in its inner iterator for __contains__ and __bool__
@@ -148,9 +149,9 @@
         self.assertEqual(list(ait), [3, 2, 1, 0])
 
     def testgrapherror(self):
-        data = (data_non_inlined[:64 + 27] +
-                b'\xf2' +
-                data_non_inlined[64 + 28:])
+        data = (
+            data_non_inlined[: 64 + 27] + b'\xf2' + data_non_inlined[64 + 28 :]
+        )
         idx = cparsers.parse_index2(data, False)[0]
         with self.assertRaises(rustext.GraphError) as arc:
             AncestorsIterator(idx, [1], -1, False)
@@ -170,6 +171,8 @@
         idx = self.parseindex()
         self.assertEqual(dagop.headrevs(idx, [1, 2, 3]), {3})
 
+
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-rust-discovery.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-rust-discovery.py	Mon Oct 21 11:09:48 2019 -0400
@@ -1,16 +1,9 @@
 from __future__ import absolute_import
 import unittest
 
-try:
-    from mercurial import rustext
-    rustext.__name__  # trigger immediate actual import
-except ImportError:
-    rustext = None
-else:
-    # this would fail already without appropriate ancestor.__package__
-    from mercurial.rustext.discovery import (
-        PartialDiscovery,
-    )
+from mercurial import policy
+
+PartialDiscovery = policy.importrust('discovery', member='PartialDiscovery')
 
 try:
     from mercurial.cext import parsers as cparsers
@@ -36,12 +29,25 @@
     b'\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x12\xcb\xeby1'
     b'\xb6\r\x98B\xcb\x07\xbd`\x8f\x92\xd9\xc4\x84\xbdK\x00\x00\x00'
     b'\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-    )
+)
+
+
+class fakechangelog(object):
+    def __init__(self, idx):
+        self.index = idx
 
 
-@unittest.skipIf(rustext is None or cparsers is None,
-                 "rustext or the C Extension parsers module "
-                 "discovery relies on is not available")
+class fakerepo(object):
+    def __init__(self, idx):
+        """Just make so that self.changelog.index is the given idx."""
+        self.changelog = fakechangelog(idx)
+
+
+@unittest.skipIf(
+    PartialDiscovery is None or cparsers is None,
+    "rustext or the C Extension parsers module "
+    "discovery relies on is not available",
+)
 class rustdiscoverytest(unittest.TestCase):
     """Test the correctness of binding to Rust code.
 
@@ -57,18 +63,19 @@
     def parseindex(self):
         return cparsers.parse_index2(data_non_inlined, False)[0]
 
+    def repo(self):
+        return fakerepo(self.parseindex())
+
     def testindex(self):
         idx = self.parseindex()
         # checking our assumptions about the index binary data:
-        self.assertEqual({i: (r[5], r[6]) for i, r in enumerate(idx)},
-                         {0: (-1, -1),
-                          1: (0, -1),
-                          2: (1, -1),
-                          3: (2, -1)})
+        self.assertEqual(
+            {i: (r[5], r[6]) for i, r in enumerate(idx)},
+            {0: (-1, -1), 1: (0, -1), 2: (1, -1), 3: (2, -1)},
+        )
 
     def testaddcommonsmissings(self):
-        idx = self.parseindex()
-        disco = PartialDiscovery(idx, [3])
+        disco = PartialDiscovery(self.repo(), [3], True)
         self.assertFalse(disco.hasinfo())
         self.assertFalse(disco.iscomplete())
 
@@ -83,29 +90,31 @@
         self.assertEqual(disco.commonheads(), {1})
 
     def testaddmissingsstats(self):
-        idx = self.parseindex()
-        disco = PartialDiscovery(idx, [3])
+        disco = PartialDiscovery(self.repo(), [3], True)
         self.assertIsNone(disco.stats()['undecided'], None)
 
         disco.addmissings([2])
         self.assertEqual(disco.stats()['undecided'], 2)
 
     def testaddinfocommonfirst(self):
-        idx = self.parseindex()
-        disco = PartialDiscovery(idx, [3])
+        disco = PartialDiscovery(self.repo(), [3], True)
         disco.addinfo([(1, True), (2, False)])
         self.assertTrue(disco.hasinfo())
         self.assertTrue(disco.iscomplete())
         self.assertEqual(disco.commonheads(), {1})
 
     def testaddinfomissingfirst(self):
-        idx = self.parseindex()
-        disco = PartialDiscovery(idx, [3])
+        disco = PartialDiscovery(self.repo(), [3], True)
         disco.addinfo([(2, False), (1, True)])
         self.assertTrue(disco.hasinfo())
         self.assertTrue(disco.iscomplete())
         self.assertEqual(disco.commonheads(), {1})
 
+    def testinitnorandom(self):
+        PartialDiscovery(self.repo(), [3], True, randomize=False)
+
+
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-server-view.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-server-view.t	Mon Oct 21 11:09:48 2019 -0400
@@ -50,7 +50,12 @@
   $ hg -R test --config experimental.extra-filter-revs='not public()' debugupdatecache
   $ ls -1 test/.hg/cache/
   branch2-base%89c45d2fa07e
+  branch2-immutable%89c45d2fa07e
   branch2-served
+  branch2-served%89c45d2fa07e
+  branch2-served.hidden%89c45d2fa07e
+  branch2-visible%89c45d2fa07e
+  branch2-visible-hidden%89c45d2fa07e
   hgtagsfnodes1
   rbc-names-v1
   rbc-revs-v1
--- a/tests/test-setdiscovery.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-setdiscovery.t	Mon Oct 21 11:09:48 2019 -0400
@@ -64,7 +64,7 @@
   comparing with b
   query 1; heads
   searching for changes
-  all local heads known remotely
+  all local changesets known remotely
   elapsed time:  * seconds (glob)
   heads summary:
     total common heads:          2
@@ -86,7 +86,7 @@
   comparing with b
   query 1; heads
   searching for changes
-  all local heads known remotely
+  all local changesets known remotely
   elapsed time:  * seconds (glob)
   heads summary:
     total common heads:          1
@@ -968,7 +968,7 @@
   updating to branch b
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
-  $ hg -R a debugdiscovery b --debug --verbose --config progress.debug=true
+  $ hg -R a debugdiscovery b --debug --verbose --config progress.debug=true --config devel.discovery.randomize=false
   comparing with b
   query 1; heads
   searching for changes
@@ -980,13 +980,14 @@
   query 3; still undecided: 980, sample size is: 200
   sampling from both directions
   searching: 4 queries
-  query 4; still undecided: 435, sample size is: 210 (no-py3 !)
-  query 4; still undecided: 430, sample size is: 210 (py3 !)
+  query 4; still undecided: 497, sample size is: 210
   sampling from both directions
   searching: 5 queries
-  query 5; still undecided: 185, sample size is: 185 (no-py3 !)
-  query 5; still undecided: 187, sample size is: 187 (py3 !)
-  5 total queries in *.????s (glob)
+  query 5; still undecided: 285, sample size is: 220
+  sampling from both directions
+  searching: 6 queries
+  query 6; still undecided: 63, sample size is: 63
+  6 total queries in *.????s (glob)
   elapsed time:  * seconds (glob)
   heads summary:
     total common heads:          1
@@ -1095,16 +1096,9 @@
 give 'all remote heads known locally' without checking the remaining heads -
 fixed in 86c35b7ae300:
 
-  $ cat >> $TESTTMP/unrandomsample.py << EOF
-  > import random
-  > def sample(population, k):
-  >     return sorted(population)[:k]
-  > random.sample = sample
-  > EOF
-
   $ cat >> r1/.hg/hgrc << EOF
-  > [extensions]
-  > unrandomsample = $TESTTMP/unrandomsample.py
+  > [devel]
+  > discovery.randomize = False
   > EOF
 
   $ hg -R r1 outgoing r2 -T'{rev} ' --config extensions.blackbox= \
--- a/tests/test-shelve.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-shelve.t	Mon Oct 21 11:09:48 2019 -0400
@@ -1239,6 +1239,7 @@
   > y
   > EOF
   unshelving change 'default'
+  temporarily committing pending changes (restore with 'hg unshelve --abort')
   rebasing shelved changes
   diff --git a/d b/d
   new file mode 100644
@@ -1250,6 +1251,10 @@
   record this change to 'd'?
   (enter ? for help) [Ynesfdaq?] y
   
+
+  $ hg status -v
+  A c
+  A d
   $ ls
   b
   c
@@ -1267,15 +1272,21 @@
   > B
   > C
   > EOF
-  $ hg shelve
+  $ echo > garbage
+  $ hg st
+  M foo
+  ? garbage
+  $ hg shelve --unknown
   shelved as default
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ cat foo
   B
   $ hg unshelve -i <<EOF
   > y
   > y
   > n
+  > y
+  > y
   > EOF
   unshelving change 'default'
   rebasing shelved changes
@@ -1287,15 +1298,28 @@
   @@ -1,1 +1,2 @@
   +A
    B
-  record change 1/2 to 'foo'?
+  record change 1/3 to 'foo'?
   (enter ? for help) [Ynesfdaq?] y
   
   @@ -1,1 +2,2 @@
    B
   +C
-  record change 2/2 to 'foo'?
+  record change 2/3 to 'foo'?
   (enter ? for help) [Ynesfdaq?] n
   
+  diff --git a/garbage b/garbage
+  new file mode 100644
+  examine changes to 'garbage'?
+  (enter ? for help) [Ynesfdaq?] y
+  
+  @@ -0,0 +1,1 @@
+  +
+  record change 3/3 to 'garbage'?
+  (enter ? for help) [Ynesfdaq?] y
+  
+  $ hg st
+  M foo
+  ? garbage
   $ cat foo
   A
   B
@@ -1347,17 +1371,44 @@
   $ hg resolve -m bar1 bar2
   (no more unresolved files)
   continue: hg unshelve --continue
+
+-- using --continue with --interactive should throw an error
+  $ hg unshelve --continue -i
+  abort: cannot use both continue and interactive
+  [255]
+
   $ cat bar1
   A
   B
   C
-  $ hg unshelve --continue -i <<EOF
+
+#if stripbased
+  $ hg log -r 3:: -G
+  @  changeset:   5:f1d5f53e397b
+  |  tag:         tip
+  |  parent:      3:e28fd7fa7938
+  |  user:        shelve@localhost
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     changes to: add A to bars
+  |
+  | @  changeset:   4:fe451a778c81
+  |/   user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    summary:     add C to bars
+  |
+  o  changeset:   3:e28fd7fa7938
+  |  user:        test
+  ~  date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     add A to bars
+  
+#endif
+
+  $ hg unshelve --continue <<EOF
   > y
   > y
   > y
-  > y
+  > n
   > EOF
-  unshelving change 'default-01'
   diff --git a/bar1 b/bar1
   1 hunks, 1 lines changed
   examine changes to 'bar1'?
@@ -1380,6 +1431,51 @@
   +B
    C
   record change 2/2 to 'bar2'?
+  (enter ? for help) [Ynesfdaq?] n
+  
+  unshelve of 'default-01' complete
+
+#if stripbased
+  $ hg log -r 3:: -G
+  @  changeset:   4:fe451a778c81
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     add C to bars
+  |
+  o  changeset:   3:e28fd7fa7938
+  |  user:        test
+  ~  date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     add A to bars
+  
+#endif
+
+  $ hg unshelve --continue
+  abort: no unshelve in progress
+  [255]
+
+  $ hg shelve --list
+  default-01      (*)* changes to: add A to bars (glob)
+  default         (*)* changes to: add B to foo (glob)
+  $ hg unshelve -n default-01 -i <<EOF
+  > y
+  > y
+  > EOF
+  temporarily committing pending changes (restore with 'hg unshelve --abort')
+  rebasing shelved changes
+  diff --git a/bar2 b/bar2
+  1 hunks, 1 lines changed
+  examine changes to 'bar2'?
   (enter ? for help) [Ynesfdaq?] y
   
-  unshelve of 'default-01' complete
+  @@ -1,2 +1,3 @@
+   A
+  +B
+   C
+  record this change to 'bar2'?
+  (enter ? for help) [Ynesfdaq?] y
+  
+-- test for --interactive --keep
+  $ hg unshelve -i --keep
+  abort: --keep on --interactive is not yet supported
+  [255]
--- a/tests/test-shelve2.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-shelve2.t	Mon Oct 21 11:09:48 2019 -0400
@@ -112,6 +112,7 @@
   shelved as default
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg debugobsolete `hg log -r 0e067c57feba -T '{node}'`
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg unshelve
   unshelving change 'default'
--- a/tests/test-show-work.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-show-work.t	Mon Oct 21 11:09:48 2019 -0400
@@ -235,6 +235,7 @@
   > evolution=createmarkers
   > EOF
   $ hg debugobsolete `hg log -r 'desc("commit 2")' -T "{node}"`
+  1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
   $ hg show work --color=debug
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-sidedata.t	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,102 @@
+==========================================================
+Test file dedicated to checking side-data related behavior
+==========================================================
+
+Check data can be written/read from sidedata
+============================================
+
+  $ cat << EOF >> $HGRCPATH
+  > [extensions]
+  > testsidedata=$TESTDIR/testlib/ext-sidedata.py
+  > EOF
+
+  $ hg init test-sidedata --config format.exp-use-side-data=yes
+  $ cd test-sidedata
+  $ echo aaa > a
+  $ hg add a
+  $ hg commit -m a --traceback
+  $ echo aaa > b
+  $ hg add b
+  $ hg commit -m b
+  $ echo xxx >> a
+  $ hg commit -m aa
+
+  $ hg debugsidedata -c 0
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+  $ hg debugsidedata -c 1 -v
+  2 sidedata entries
+   entry-0001 size 4
+    '\x00\x00\x006'
+   entry-0002 size 32
+    '\x98\t\xf9\xc4v\xf0\xc5P\x90\xf7wRf\xe8\xe27e\xfc\xc1\x93\xa4\x96\xd0\x1d\x97\xaaG\x1d\xd7t\xfa\xde'
+  $ hg debugsidedata -m 2
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+  $ hg debugsidedata a  1
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+
+Check upgrade behavior
+======================
+
+Right now, sidedata has not upgrade support
+
+Check that we can upgrade to sidedata
+-------------------------------------
+
+  $ hg init up-no-side-data --config format.exp-use-side-data=no
+  $ hg debugformat -v -R up-no-side-data
+  format-variant    repo config default
+  fncache:           yes    yes     yes
+  dotencode:         yes    yes     yes
+  generaldelta:      yes    yes     yes
+  sparserevlog:      yes    yes     yes
+  sidedata:           no     no      no
+  copies-sdc:         no     no      no
+  plain-cl-delta:    yes    yes     yes
+  compression:       zlib   zlib    zlib
+  compression-level: default default default
+  $ hg debugformat -v -R up-no-side-data --config format.exp-use-side-data=yes
+  format-variant    repo config default
+  fncache:           yes    yes     yes
+  dotencode:         yes    yes     yes
+  generaldelta:      yes    yes     yes
+  sparserevlog:      yes    yes     yes
+  sidedata:           no    yes      no
+  copies-sdc:         no     no      no
+  plain-cl-delta:    yes    yes     yes
+  compression:       zlib   zlib    zlib
+  compression-level: default default default
+  $ hg debugupgraderepo -R up-no-side-data --config format.exp-use-side-data=yes > /dev/null
+
+Check that we can downgrade from sidedata
+-----------------------------------------
+
+  $ hg init up-side-data --config format.exp-use-side-data=yes
+  $ hg debugformat -v -R up-side-data
+  format-variant    repo config default
+  fncache:           yes    yes     yes
+  dotencode:         yes    yes     yes
+  generaldelta:      yes    yes     yes
+  sparserevlog:      yes    yes     yes
+  sidedata:          yes     no      no
+  copies-sdc:         no     no      no
+  plain-cl-delta:    yes    yes     yes
+  compression:       zlib   zlib    zlib
+  compression-level: default default default
+  $ hg debugformat -v -R up-side-data --config format.exp-use-side-data=no
+  format-variant    repo config default
+  fncache:           yes    yes     yes
+  dotencode:         yes    yes     yes
+  generaldelta:      yes    yes     yes
+  sparserevlog:      yes    yes     yes
+  sidedata:          yes     no      no
+  copies-sdc:         no     no      no
+  plain-cl-delta:    yes    yes     yes
+  compression:       zlib   zlib    zlib
+  compression-level: default default default
+  $ hg debugupgraderepo -R up-side-data --config format.exp-use-side-data=no > /dev/null
--- a/tests/test-simplekeyvaluefile.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-simplekeyvaluefile.py	Mon Oct 21 11:09:48 2019 -0400
@@ -8,6 +8,7 @@
     scmutil,
 )
 
+
 class mockfile(object):
     def __init__(self, name, fs):
         self.name = name
@@ -25,6 +26,7 @@
     def read(self):
         return self.fs.contents[self.name]
 
+
 class mockvfs(object):
     def __init__(self):
         self.contents = {}
@@ -39,6 +41,7 @@
     def __call__(self, path, mode, atomictemp):
         return mockfile(path, self)
 
+
 class testsimplekeyvaluefile(unittest.TestCase):
     def setUp(self):
         self.vfs = mockvfs()
@@ -46,21 +49,25 @@
     def testbasicwritingiandreading(self):
         dw = {b'key1': b'value1', b'Key2': b'value2'}
         scmutil.simplekeyvaluefile(self.vfs, b'kvfile').write(dw)
-        self.assertEqual(sorted(self.vfs.read(b'kvfile').split(b'\n')),
-                         [b'', b'Key2=value2', b'key1=value1'])
+        self.assertEqual(
+            sorted(self.vfs.read(b'kvfile').split(b'\n')),
+            [b'', b'Key2=value2', b'key1=value1'],
+        )
         dr = scmutil.simplekeyvaluefile(self.vfs, b'kvfile').read()
         self.assertEqual(dr, dw)
 
     if not getattr(unittest.TestCase, 'assertRaisesRegex', False):
         # Python 3.7 deprecates the regex*p* version, but 2.7 lacks
         # the regex version.
-        assertRaisesRegex = (# camelcase-required
-            unittest.TestCase.assertRaisesRegexp)
+        assertRaisesRegex = (  # camelcase-required
+            unittest.TestCase.assertRaisesRegexp
+        )
 
     def testinvalidkeys(self):
         d = {b'0key1': b'value1', b'Key2': b'value2'}
-        with self.assertRaisesRegex(error.ProgrammingError,
-                                     'keys must start with a letter.*'):
+        with self.assertRaisesRegex(
+            error.ProgrammingError, 'keys must start with a letter.*'
+        ):
             scmutil.simplekeyvaluefile(self.vfs, b'kvfile').write(d)
 
         d = {b'key1@': b'value1', b'Key2': b'value2'}
@@ -69,22 +76,25 @@
 
     def testinvalidvalues(self):
         d = {b'key1': b'value1', b'Key2': b'value2\n'}
-        with self.assertRaisesRegex(error.ProgrammingError,  'invalid val.*'):
+        with self.assertRaisesRegex(error.ProgrammingError, 'invalid val.*'):
             scmutil.simplekeyvaluefile(self.vfs, b'kvfile').write(d)
 
     def testcorruptedfile(self):
         self.vfs.contents[b'badfile'] = b'ababagalamaga\n'
-        with self.assertRaisesRegex(error.CorruptedState,
-                                     'dictionary.*element.*'):
+        with self.assertRaisesRegex(
+            error.CorruptedState, 'dictionary.*element.*'
+        ):
             scmutil.simplekeyvaluefile(self.vfs, b'badfile').read()
 
     def testfirstline(self):
         dw = {b'key1': b'value1'}
         scmutil.simplekeyvaluefile(self.vfs, b'fl').write(dw, firstline=b'1.0')
         self.assertEqual(self.vfs.read(b'fl'), b'1.0\nkey1=value1\n')
-        dr = scmutil.simplekeyvaluefile(
-            self.vfs, b'fl').read(firstlinenonkeyval=True)
+        dr = scmutil.simplekeyvaluefile(self.vfs, b'fl').read(
+            firstlinenonkeyval=True
+        )
         self.assertEqual(dr, {b'__firstline': b'1.0', b'key1': b'value1'})
 
+
 if __name__ == "__main__":
     silenttestrunner.main(__name__)
--- a/tests/test-simplemerge.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-simplemerge.py	Mon Oct 21 11:09:48 2019 -0400
@@ -22,9 +22,7 @@
     util,
 )
 
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.utils import stringutil
 
 TestCase = unittest.TestCase
 # bzr compatible interface, for the tests
@@ -34,25 +32,34 @@
     Given BASE, OTHER, THIS, tries to produce a combined text
     incorporating the changes from both BASE->OTHER and BASE->THIS.
     All three will typically be sequences of lines."""
+
     def __init__(self, base, a, b):
         basetext = b'\n'.join([i.strip(b'\n') for i in base] + [b''])
         atext = b'\n'.join([i.strip(b'\n') for i in a] + [b''])
         btext = b'\n'.join([i.strip(b'\n') for i in b] + [b''])
-        if (stringutil.binary(basetext) or stringutil.binary(atext)
-            or stringutil.binary(btext)):
+        if (
+            stringutil.binary(basetext)
+            or stringutil.binary(atext)
+            or stringutil.binary(btext)
+        ):
             raise error.Abort(b"don't know how to merge binary files")
-        simplemerge.Merge3Text.__init__(self, basetext, atext, btext,
-                                        base, a, b)
+        simplemerge.Merge3Text.__init__(
+            self, basetext, atext, btext, base, a, b
+        )
+
 
 CantReprocessAndShowBase = simplemerge.CantReprocessAndShowBase
 
+
 def split_lines(t):
     return util.stringio(t).readlines()
 
+
 ############################################################
 # test case data from the gnu diffutils manual
 # common base
-TZU = split_lines(b"""     The Nameless is the origin of Heaven and Earth;
+TZU = split_lines(
+    b"""     The Nameless is the origin of Heaven and Earth;
      The named is the mother of all things.
 
      Therefore let there always be non-being,
@@ -65,9 +72,11 @@
      They both may be called deep and profound.
      Deeper and more profound,
      The door of all subtleties!
-""")
+"""
+)
 
-LAO = split_lines(b"""     The Way that can be told of is not the eternal Way;
+LAO = split_lines(
+    b"""     The Way that can be told of is not the eternal Way;
      The name that can be named is not the eternal name.
      The Nameless is the origin of Heaven and Earth;
      The Named is the mother of all things.
@@ -78,10 +87,12 @@
      The two are the same,
      But after they are produced,
        they have different names.
-""")
+"""
+)
 
 
-TAO = split_lines(b"""     The Way that can be told of is not the eternal Way;
+TAO = split_lines(
+    b"""     The Way that can be told of is not the eternal Way;
      The name that can be named is not the eternal name.
      The Nameless is the origin of Heaven and Earth;
      The named is the mother of all things.
@@ -96,9 +107,11 @@
 
        -- The Way of Lao-Tzu, tr. Wing-tsit Chan
 
-""")
+"""
+)
 
-MERGED_RESULT = split_lines(b"""\
+MERGED_RESULT = split_lines(
+    b"""\
      The Way that can be told of is not the eternal Way;
      The name that can be named is not the eternal name.
      The Nameless is the origin of Heaven and Earth;
@@ -116,7 +129,9 @@
        -- The Way of Lao-Tzu, tr. Wing-tsit Chan
 \
 \n>>>>>>> TAO
-""")
+"""
+)
+
 
 class TestMerge3(TestCase):
     def log(self, msg):
@@ -124,203 +139,211 @@
 
     def test_no_changes(self):
         """No conflicts because nothing changed"""
-        m3 = Merge3([b'aaa', b'bbb'],
-                    [b'aaa', b'bbb'],
-                    [b'aaa', b'bbb'])
+        m3 = Merge3([b'aaa', b'bbb'], [b'aaa', b'bbb'], [b'aaa', b'bbb'])
 
-        self.assertEqual(m3.find_unconflicted(),
-                         [(0, 2)])
+        self.assertEqual(m3.find_unconflicted(), [(0, 2)])
 
-        self.assertEqual(list(m3.find_sync_regions()),
-                         [(0, 2,
-                           0, 2,
-                           0, 2),
-                          (2, 2,  2, 2,  2, 2)])
+        self.assertEqual(
+            list(m3.find_sync_regions()),
+            [(0, 2, 0, 2, 0, 2), (2, 2, 2, 2, 2, 2)],
+        )
 
-        self.assertEqual(list(m3.merge_regions()),
-                         [(b'unchanged', 0, 2)])
+        self.assertEqual(list(m3.merge_regions()), [(b'unchanged', 0, 2)])
 
-        self.assertEqual(list(m3.merge_groups()),
-                         [(b'unchanged', [b'aaa', b'bbb'])])
+        self.assertEqual(
+            list(m3.merge_groups()), [(b'unchanged', [b'aaa', b'bbb'])]
+        )
 
     def test_front_insert(self):
-        m3 = Merge3([b'zz'],
-                    [b'aaa', b'bbb', b'zz'],
-                    [b'zz'])
+        m3 = Merge3([b'zz'], [b'aaa', b'bbb', b'zz'], [b'zz'])
 
         # todo: should use a sentinel at end as from get_matching_blocks
         # to match without zz
-        self.assertEqual(list(m3.find_sync_regions()),
-                         [(0, 1,  2, 3,  0, 1),
-                          (1, 1,  3, 3,  1, 1)])
+        self.assertEqual(
+            list(m3.find_sync_regions()),
+            [(0, 1, 2, 3, 0, 1), (1, 1, 3, 3, 1, 1)],
+        )
 
-        self.assertEqual(list(m3.merge_regions()),
-                         [(b'a', 0, 2),
-                          (b'unchanged', 0, 1)])
+        self.assertEqual(
+            list(m3.merge_regions()), [(b'a', 0, 2), (b'unchanged', 0, 1)]
+        )
 
-        self.assertEqual(list(m3.merge_groups()),
-                         [(b'a', [b'aaa', b'bbb']),
-                          (b'unchanged', [b'zz'])])
+        self.assertEqual(
+            list(m3.merge_groups()),
+            [(b'a', [b'aaa', b'bbb']), (b'unchanged', [b'zz'])],
+        )
 
     def test_null_insert(self):
-        m3 = Merge3([],
-                    [b'aaa', b'bbb'],
-                    [])
+        m3 = Merge3([], [b'aaa', b'bbb'], [])
         # todo: should use a sentinel at end as from get_matching_blocks
         # to match without zz
-        self.assertEqual(list(m3.find_sync_regions()),
-                         [(0, 0,  2, 2,  0, 0)])
+        self.assertEqual(list(m3.find_sync_regions()), [(0, 0, 2, 2, 0, 0)])
 
-        self.assertEqual(list(m3.merge_regions()),
-                         [(b'a', 0, 2)])
+        self.assertEqual(list(m3.merge_regions()), [(b'a', 0, 2)])
 
-        self.assertEqual(list(m3.merge_lines()),
-                         [b'aaa', b'bbb'])
+        self.assertEqual(list(m3.merge_lines()), [b'aaa', b'bbb'])
 
     def test_no_conflicts(self):
         """No conflicts because only one side changed"""
-        m3 = Merge3([b'aaa', b'bbb'],
-                    [b'aaa', b'111', b'bbb'],
-                    [b'aaa', b'bbb'])
+        m3 = Merge3(
+            [b'aaa', b'bbb'], [b'aaa', b'111', b'bbb'], [b'aaa', b'bbb']
+        )
 
-        self.assertEqual(m3.find_unconflicted(),
-                         [(0, 1), (1, 2)])
+        self.assertEqual(m3.find_unconflicted(), [(0, 1), (1, 2)])
 
-        self.assertEqual(list(m3.find_sync_regions()),
-                         [(0, 1,  0, 1,  0, 1),
-                          (1, 2,  2, 3,  1, 2),
-                          (2, 2,  3, 3,  2, 2)])
+        self.assertEqual(
+            list(m3.find_sync_regions()),
+            [(0, 1, 0, 1, 0, 1), (1, 2, 2, 3, 1, 2), (2, 2, 3, 3, 2, 2)],
+        )
 
-        self.assertEqual(list(m3.merge_regions()),
-                         [(b'unchanged', 0, 1),
-                          (b'a', 1, 2),
-                          (b'unchanged', 1, 2)])
+        self.assertEqual(
+            list(m3.merge_regions()),
+            [(b'unchanged', 0, 1), (b'a', 1, 2), (b'unchanged', 1, 2)],
+        )
 
     def test_append_a(self):
-        m3 = Merge3([b'aaa\n', b'bbb\n'],
-                    [b'aaa\n', b'bbb\n', b'222\n'],
-                    [b'aaa\n', b'bbb\n'])
+        m3 = Merge3(
+            [b'aaa\n', b'bbb\n'],
+            [b'aaa\n', b'bbb\n', b'222\n'],
+            [b'aaa\n', b'bbb\n'],
+        )
 
-        self.assertEqual(b''.join(m3.merge_lines()),
-                         b'aaa\nbbb\n222\n')
+        self.assertEqual(b''.join(m3.merge_lines()), b'aaa\nbbb\n222\n')
 
     def test_append_b(self):
-        m3 = Merge3([b'aaa\n', b'bbb\n'],
-                    [b'aaa\n', b'bbb\n'],
-                    [b'aaa\n', b'bbb\n', b'222\n'])
+        m3 = Merge3(
+            [b'aaa\n', b'bbb\n'],
+            [b'aaa\n', b'bbb\n'],
+            [b'aaa\n', b'bbb\n', b'222\n'],
+        )
 
-        self.assertEqual(b''.join(m3.merge_lines()),
-                         b'aaa\nbbb\n222\n')
+        self.assertEqual(b''.join(m3.merge_lines()), b'aaa\nbbb\n222\n')
 
     def test_append_agreement(self):
-        m3 = Merge3([b'aaa\n', b'bbb\n'],
-                    [b'aaa\n', b'bbb\n', b'222\n'],
-                    [b'aaa\n', b'bbb\n', b'222\n'])
+        m3 = Merge3(
+            [b'aaa\n', b'bbb\n'],
+            [b'aaa\n', b'bbb\n', b'222\n'],
+            [b'aaa\n', b'bbb\n', b'222\n'],
+        )
 
-        self.assertEqual(b''.join(m3.merge_lines()),
-                         b'aaa\nbbb\n222\n')
+        self.assertEqual(b''.join(m3.merge_lines()), b'aaa\nbbb\n222\n')
 
     def test_append_clash(self):
-        m3 = Merge3([b'aaa\n', b'bbb\n'],
-                    [b'aaa\n', b'bbb\n', b'222\n'],
-                    [b'aaa\n', b'bbb\n', b'333\n'])
+        m3 = Merge3(
+            [b'aaa\n', b'bbb\n'],
+            [b'aaa\n', b'bbb\n', b'222\n'],
+            [b'aaa\n', b'bbb\n', b'333\n'],
+        )
 
-        ml = m3.merge_lines(name_a=b'a',
-                            name_b=b'b',
-                            start_marker=b'<<',
-                            mid_marker=b'--',
-                            end_marker=b'>>')
-        self.assertEqual(b''.join(ml),
-                         b'aaa\n'
-                         b'bbb\n'
-                         b'<< a\n'
-                         b'222\n'
-                         b'--\n'
-                         b'333\n'
-                         b'>> b\n'
-                         )
+        ml = m3.merge_lines(
+            name_a=b'a',
+            name_b=b'b',
+            start_marker=b'<<',
+            mid_marker=b'--',
+            end_marker=b'>>',
+        )
+        self.assertEqual(
+            b''.join(ml),
+            b'aaa\n' b'bbb\n' b'<< a\n' b'222\n' b'--\n' b'333\n' b'>> b\n',
+        )
 
     def test_insert_agreement(self):
-        m3 = Merge3([b'aaa\n', b'bbb\n'],
-                    [b'aaa\n', b'222\n', b'bbb\n'],
-                    [b'aaa\n', b'222\n', b'bbb\n'])
+        m3 = Merge3(
+            [b'aaa\n', b'bbb\n'],
+            [b'aaa\n', b'222\n', b'bbb\n'],
+            [b'aaa\n', b'222\n', b'bbb\n'],
+        )
 
-        ml = m3.merge_lines(name_a=b'a',
-                            name_b=b'b',
-                            start_marker=b'<<',
-                            mid_marker=b'--',
-                            end_marker=b'>>')
+        ml = m3.merge_lines(
+            name_a=b'a',
+            name_b=b'b',
+            start_marker=b'<<',
+            mid_marker=b'--',
+            end_marker=b'>>',
+        )
         self.assertEqual(b''.join(ml), b'aaa\n222\nbbb\n')
 
-
     def test_insert_clash(self):
         """Both try to insert lines in the same place."""
-        m3 = Merge3([b'aaa\n', b'bbb\n'],
-                    [b'aaa\n', b'111\n', b'bbb\n'],
-                    [b'aaa\n', b'222\n', b'bbb\n'])
+        m3 = Merge3(
+            [b'aaa\n', b'bbb\n'],
+            [b'aaa\n', b'111\n', b'bbb\n'],
+            [b'aaa\n', b'222\n', b'bbb\n'],
+        )
 
-        self.assertEqual(m3.find_unconflicted(),
-                         [(0, 1), (1, 2)])
+        self.assertEqual(m3.find_unconflicted(), [(0, 1), (1, 2)])
 
-        self.assertEqual(list(m3.find_sync_regions()),
-                         [(0, 1,  0, 1,  0, 1),
-                          (1, 2,  2, 3,  2, 3),
-                          (2, 2,  3, 3,  3, 3)])
+        self.assertEqual(
+            list(m3.find_sync_regions()),
+            [(0, 1, 0, 1, 0, 1), (1, 2, 2, 3, 2, 3), (2, 2, 3, 3, 3, 3)],
+        )
 
-        self.assertEqual(list(m3.merge_regions()),
-                         [(b'unchanged', 0, 1),
-                          (b'conflict', 1, 1,  1, 2,  1, 2),
-                          (b'unchanged', 1, 2)])
+        self.assertEqual(
+            list(m3.merge_regions()),
+            [
+                (b'unchanged', 0, 1),
+                (b'conflict', 1, 1, 1, 2, 1, 2),
+                (b'unchanged', 1, 2),
+            ],
+        )
 
-        self.assertEqual(list(m3.merge_groups()),
-                         [(b'unchanged', [b'aaa\n']),
-                          (b'conflict', [], [b'111\n'], [b'222\n']),
-                          (b'unchanged', [b'bbb\n']),
-                          ])
+        self.assertEqual(
+            list(m3.merge_groups()),
+            [
+                (b'unchanged', [b'aaa\n']),
+                (b'conflict', [], [b'111\n'], [b'222\n']),
+                (b'unchanged', [b'bbb\n']),
+            ],
+        )
 
-        ml = m3.merge_lines(name_a=b'a',
-                            name_b=b'b',
-                            start_marker=b'<<',
-                            mid_marker=b'--',
-                            end_marker=b'>>')
-        self.assertEqual(b''.join(ml),
-b'''aaa
+        ml = m3.merge_lines(
+            name_a=b'a',
+            name_b=b'b',
+            start_marker=b'<<',
+            mid_marker=b'--',
+            end_marker=b'>>',
+        )
+        self.assertEqual(
+            b''.join(ml),
+            b'''aaa
 << a
 111
 --
 222
 >> b
 bbb
-''')
+''',
+        )
 
     def test_replace_clash(self):
         """Both try to insert lines in the same place."""
-        m3 = Merge3([b'aaa', b'000', b'bbb'],
-                    [b'aaa', b'111', b'bbb'],
-                    [b'aaa', b'222', b'bbb'])
+        m3 = Merge3(
+            [b'aaa', b'000', b'bbb'],
+            [b'aaa', b'111', b'bbb'],
+            [b'aaa', b'222', b'bbb'],
+        )
 
-        self.assertEqual(m3.find_unconflicted(),
-                         [(0, 1), (2, 3)])
+        self.assertEqual(m3.find_unconflicted(), [(0, 1), (2, 3)])
 
-        self.assertEqual(list(m3.find_sync_regions()),
-                         [(0, 1,  0, 1,  0, 1),
-                           (2, 3,  2, 3,  2, 3),
-                           (3, 3,  3, 3,  3, 3)])
+        self.assertEqual(
+            list(m3.find_sync_regions()),
+            [(0, 1, 0, 1, 0, 1), (2, 3, 2, 3, 2, 3), (3, 3, 3, 3, 3, 3)],
+        )
 
     def test_replace_multi(self):
         """Replacement with regions of different size."""
-        m3 = Merge3([b'aaa', b'000', b'000', b'bbb'],
-                    [b'aaa', b'111', b'111', b'111', b'bbb'],
-                    [b'aaa', b'222', b'222', b'222', b'222', b'bbb'])
+        m3 = Merge3(
+            [b'aaa', b'000', b'000', b'bbb'],
+            [b'aaa', b'111', b'111', b'111', b'bbb'],
+            [b'aaa', b'222', b'222', b'222', b'222', b'bbb'],
+        )
 
-        self.assertEqual(m3.find_unconflicted(),
-                         [(0, 1), (3, 4)])
-
+        self.assertEqual(m3.find_unconflicted(), [(0, 1), (3, 4)])
 
-        self.assertEqual(list(m3.find_sync_regions()),
-                         [(0, 1,  0, 1,  0, 1),
-                          (3, 4,  4, 5,  5, 6),
-                          (4, 4,  5, 5,  6, 6)])
+        self.assertEqual(
+            list(m3.find_sync_regions()),
+            [(0, 1, 0, 1, 0, 1), (3, 4, 4, 5, 5, 6), (4, 4, 5, 5, 6, 6)],
+        )
 
     def test_merge_poem(self):
         """Test case from diff3 manual"""
@@ -338,28 +361,36 @@
         base_text = b'a\r\n'
         this_text = b'b\r\n'
         other_text = b'c\r\n'
-        m3 = Merge3(base_text.splitlines(True), other_text.splitlines(True),
-                    this_text.splitlines(True))
+        m3 = Merge3(
+            base_text.splitlines(True),
+            other_text.splitlines(True),
+            this_text.splitlines(True),
+        )
         m_lines = m3.merge_lines(b'OTHER', b'THIS')
-        self.assertEqual(b'<<<<<<< OTHER\r\nc\r\n=======\r\nb\r\n'
-                         b'>>>>>>> THIS\r\n'.splitlines(True), list(m_lines))
+        self.assertEqual(
+            b'<<<<<<< OTHER\r\nc\r\n=======\r\nb\r\n'
+            b'>>>>>>> THIS\r\n'.splitlines(True),
+            list(m_lines),
+        )
 
     def test_mac_text(self):
         base_text = b'a\r'
         this_text = b'b\r'
         other_text = b'c\r'
-        m3 = Merge3(base_text.splitlines(True), other_text.splitlines(True),
-                    this_text.splitlines(True))
+        m3 = Merge3(
+            base_text.splitlines(True),
+            other_text.splitlines(True),
+            this_text.splitlines(True),
+        )
         m_lines = m3.merge_lines(b'OTHER', b'THIS')
-        self.assertEqual(b'<<<<<<< OTHER\rc\r=======\rb\r'
-                         b'>>>>>>> THIS\r'.splitlines(True), list(m_lines))
+        self.assertEqual(
+            b'<<<<<<< OTHER\rc\r=======\rb\r'
+            b'>>>>>>> THIS\r'.splitlines(True),
+            list(m_lines),
+        )
+
 
 if __name__ == '__main__':
-    # hide the timer
-    import time
-    orig = time.time
-    try:
-        time.time = lambda: 0
-        unittest.main()
-    finally:
-        time.time = orig
+    import silenttestrunner
+
+    silenttestrunner.main(__name__)
--- a/tests/test-simplemerge.py.out	Wed Oct 02 12:20:36 2019 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,5 +0,0 @@
-................
-----------------------------------------------------------------------
-Ran 16 tests in 0.000s
-
-OK
--- a/tests/test-single-head.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-single-head.t	Mon Oct 21 11:09:48 2019 -0400
@@ -71,7 +71,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files (+1 heads)
   transaction abort!
   rollback completed
   abort: rejecting multiple heads on branch "default"
@@ -114,8 +113,8 @@
   1 new obsolescence markers
   obsoleted 1 changesets
 
-Check it does to interfer with strip
-------------------------------------
+Check it does not interfer with strip
+-------------------------------------
 
 setup
 
@@ -201,3 +200,62 @@
   $ hg strip --config extensions.strip= --rev 'desc("c_dH0")'
   saved backup bundle to $TESTTMP/client/.hg/strip-backup/fe47ea669cea-a41bf5a9-backup.hg
 
+Test that closing heads are ignored by default
+-----------------------------------------------
+
+  $ hg up 'desc("c_aG0")'
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ mkcommit c_aJ0
+  created new head
+
+pushing the new head should fails
+
+  $ hg push -f
+  pushing to $TESTTMP/single-head-server
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  transaction abort!
+  rollback completed
+  abort: rejecting multiple heads on branch "branch_A"
+  (2 heads: 49003e504178 468bd81ccc5d)
+  [255]
+
+
+closing the head and pushing should succeed
+
+  $ mkcommit c_aK0 --close-branch
+  $ hg push -f
+  pushing to $TESTTMP/single-head-server
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 4 changesets with 4 changes to 4 files (-1 heads)
+
+
+Test that closing heads can be explicitly accounted for
+-------------------------------------------------------
+
+  $ cat <<EOF >> $TESTTMP/single-head-server/.hg/hgrc
+  > [experimental]
+  > single-head-per-branch:account-closed-heads = yes
+  > EOF
+
+  $ hg up 'desc("c_aG0")'
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ mkcommit c_aL0
+  created new head
+  $ mkcommit c_aM0 --close-branch
+  $ hg push -f
+  pushing to $TESTTMP/single-head-server
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  transaction abort!
+  rollback completed
+  abort: rejecting multiple heads on branch "branch_A"
+  (3 heads: 49003e504178 5254bcccab93 42b9fe70a3c1)
+  [255]
--- a/tests/test-split.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-split.t	Mon Oct 21 11:09:48 2019 -0400
@@ -789,3 +789,190 @@
   abort: cannot split an empty revision
   [255]
 #endif
+
+Test that splitting moves works properly (issue5723)
+----------------------------------------------------
+
+  $ hg init $TESTTMP/issue5723-mv
+  $ cd $TESTTMP/issue5723-mv
+  $ printf '1\n2\n' > file
+  $ hg ci -qAm initial
+  $ hg mv file file2
+  $ printf 'a\nb\n1\n2\n3\n4\n' > file2
+  $ cat > $TESTTMP/messages <<EOF
+  > split1, keeping only the numbered lines
+  > --
+  > split2, keeping the lettered lines
+  > EOF
+  $ hg ci -m 'move and modify'
+  $ printf 'y\nn\na\na\n' | hg split
+  diff --git a/file b/file2
+  rename from file
+  rename to file2
+  2 hunks, 4 lines changed
+  examine changes to 'file' and 'file2'?
+  (enter ? for help) [Ynesfdaq?] y
+  
+  @@ -0,0 +1,2 @@
+  +a
+  +b
+  record change 1/2 to 'file2'?
+  (enter ? for help) [Ynesfdaq?] n
+  
+  @@ -2,0 +5,2 @@ 2
+  +3
+  +4
+  record change 2/2 to 'file2'?
+  (enter ? for help) [Ynesfdaq?] a
+  
+  EDITOR: HG: Splitting 8c42fa635116. Write commit message for the first split changeset.
+  EDITOR: move and modify
+  EDITOR: 
+  EDITOR: 
+  EDITOR: HG: Enter commit message.  Lines beginning with 'HG:' are removed.
+  EDITOR: HG: Leave message empty to abort commit.
+  EDITOR: HG: --
+  EDITOR: HG: user: test
+  EDITOR: HG: branch 'default'
+  EDITOR: HG: added file2
+  EDITOR: HG: removed file
+  created new head
+  diff --git a/file2 b/file2
+  1 hunks, 2 lines changed
+  examine changes to 'file2'?
+  (enter ? for help) [Ynesfdaq?] a
+  
+  EDITOR: HG: Splitting 8c42fa635116. So far it has been split into:
+  EDITOR: HG: - 478be2a70c27: split1, keeping only the numbered lines
+  EDITOR: HG: Write commit message for the next split changeset.
+  EDITOR: move and modify
+  EDITOR: 
+  EDITOR: 
+  EDITOR: HG: Enter commit message.  Lines beginning with 'HG:' are removed.
+  EDITOR: HG: Leave message empty to abort commit.
+  EDITOR: HG: --
+  EDITOR: HG: user: test
+  EDITOR: HG: branch 'default'
+  EDITOR: HG: changed file2
+  saved backup bundle to $TESTTMP/issue5723-mv/.hg/strip-backup/8c42fa635116-a38044d4-split.hg (obsstore-off !)
+  $ hg log -T '{desc}: {files%"{file} "}\n'
+  split2, keeping the lettered lines: file2 
+  split1, keeping only the numbered lines: file file2 
+  initial: file 
+  $ cat file2
+  a
+  b
+  1
+  2
+  3
+  4
+  $ hg cat -r ".^" file2
+  1
+  2
+  3
+  4
+  $ hg cat -r . file2
+  a
+  b
+  1
+  2
+  3
+  4
+
+
+Test that splitting copies works properly (issue5723)
+----------------------------------------------------
+
+  $ hg init $TESTTMP/issue5723-cp
+  $ cd $TESTTMP/issue5723-cp
+  $ printf '1\n2\n' > file
+  $ hg ci -qAm initial
+  $ hg cp file file2
+  $ printf 'a\nb\n1\n2\n3\n4\n' > file2
+Also modify 'file' to prove that the changes aren't being pulled in
+accidentally.
+  $ printf 'this is the new contents of "file"' > file
+  $ cat > $TESTTMP/messages <<EOF
+  > split1, keeping "file" and only the numbered lines in file2
+  > --
+  > split2, keeping the lettered lines in file2
+  > EOF
+  $ hg ci -m 'copy file->file2, modify both'
+  $ printf 'f\ny\nn\na\na\n' | hg split
+  diff --git a/file b/file
+  1 hunks, 2 lines changed
+  examine changes to 'file'?
+  (enter ? for help) [Ynesfdaq?] f
+  
+  diff --git a/file b/file2
+  copy from file
+  copy to file2
+  2 hunks, 4 lines changed
+  examine changes to 'file' and 'file2'?
+  (enter ? for help) [Ynesfdaq?] y
+  
+  @@ -0,0 +1,2 @@
+  +a
+  +b
+  record change 2/3 to 'file2'?
+  (enter ? for help) [Ynesfdaq?] n
+  
+  @@ -2,0 +5,2 @@ 2
+  +3
+  +4
+  record change 3/3 to 'file2'?
+  (enter ? for help) [Ynesfdaq?] a
+  
+  EDITOR: HG: Splitting 41c861dfa61e. Write commit message for the first split changeset.
+  EDITOR: copy file->file2, modify both
+  EDITOR: 
+  EDITOR: 
+  EDITOR: HG: Enter commit message.  Lines beginning with 'HG:' are removed.
+  EDITOR: HG: Leave message empty to abort commit.
+  EDITOR: HG: --
+  EDITOR: HG: user: test
+  EDITOR: HG: branch 'default'
+  EDITOR: HG: added file2
+  EDITOR: HG: changed file
+  created new head
+  diff --git a/file2 b/file2
+  1 hunks, 2 lines changed
+  examine changes to 'file2'?
+  (enter ? for help) [Ynesfdaq?] a
+  
+  EDITOR: HG: Splitting 41c861dfa61e. So far it has been split into:
+  EDITOR: HG: - 4b19e06610eb: split1, keeping "file" and only the numbered lines in file2
+  EDITOR: HG: Write commit message for the next split changeset.
+  EDITOR: copy file->file2, modify both
+  EDITOR: 
+  EDITOR: 
+  EDITOR: HG: Enter commit message.  Lines beginning with 'HG:' are removed.
+  EDITOR: HG: Leave message empty to abort commit.
+  EDITOR: HG: --
+  EDITOR: HG: user: test
+  EDITOR: HG: branch 'default'
+  EDITOR: HG: changed file2
+  saved backup bundle to $TESTTMP/issue5723-cp/.hg/strip-backup/41c861dfa61e-467e8d3c-split.hg (obsstore-off !)
+  $ hg log -T '{desc}: {files%"{file} "}\n'
+  split2, keeping the lettered lines in file2: file2 
+  split1, keeping "file" and only the numbered lines in file2: file file2 
+  initial: file 
+  $ cat file2
+  a
+  b
+  1
+  2
+  3
+  4
+  $ hg cat -r ".^" file2
+  1
+  2
+  3
+  4
+  $ hg cat -r . file2
+  a
+  b
+  1
+  2
+  3
+  4
--- a/tests/test-ssh-bundle1.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-ssh-bundle1.t	Mon Oct 21 11:09:48 2019 -0400
@@ -583,7 +583,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: hook failure!
   remote: transaction abort!
   remote: rollback completed
--- a/tests/test-ssh-proto-unbundle.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-ssh-proto-unbundle.t	Mon Oct 21 11:09:48 2019 -0400
@@ -272,11 +272,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 196:
+  e> read(-1) -> 151:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     ui.write 1 line\n
   e>     transaction abort!\n
   e>     rollback completed\n
@@ -328,11 +327,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 196:
+  e> read(-1) -> 151:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     ui.write 1 line\n
   e>     transaction abort!\n
   e>     rollback completed\n
@@ -398,11 +396,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 218:
+  e> read(-1) -> 173:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     ui.write 2 lines 1\n
   e>     ui.write 2 lines 2\n
   e>     transaction abort!\n
@@ -455,11 +452,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 218:
+  e> read(-1) -> 173:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     ui.write 2 lines 1\n
   e>     ui.write 2 lines 2\n
   e>     transaction abort!\n
@@ -526,11 +522,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 202:
+  e> read(-1) -> 157:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     ui.write 1 line flush\n
   e>     transaction abort!\n
   e>     rollback completed\n
@@ -582,11 +577,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 202:
+  e> read(-1) -> 157:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     ui.write 1 line flush\n
   e>     transaction abort!\n
   e>     rollback completed\n
@@ -652,11 +646,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 206:
+  e> read(-1) -> 161:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     ui.write 1st\n
   e>     ui.write 2nd\n
   e>     transaction abort!\n
@@ -709,11 +702,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 206:
+  e> read(-1) -> 161:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     ui.write 1st\n
   e>     ui.write 2nd\n
   e>     transaction abort!\n
@@ -780,11 +772,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 232:
+  e> read(-1) -> 187:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     ui.write 1\n
   e>     ui.write_err 1\n
   e>     ui.write 2\n
@@ -839,11 +830,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 232:
+  e> read(-1) -> 187:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     ui.write 1\n
   e>     ui.write_err 1\n
   e>     ui.write 2\n
@@ -912,11 +902,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 193:
+  e> read(-1) -> 148:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     printed line\n
   e>     transaction abort!\n
   e>     rollback completed\n
@@ -968,11 +957,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 193:
+  e> read(-1) -> 148:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     printed line\n
   e>     transaction abort!\n
   e>     rollback completed\n
@@ -1038,11 +1026,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 218:
+  e> read(-1) -> 173:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     print 1\n
   e>     ui.write 1\n
   e>     print 2\n
@@ -1097,11 +1084,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 218:
+  e> read(-1) -> 173:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     print 1\n
   e>     ui.write 1\n
   e>     print 2\n
@@ -1170,11 +1156,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 216:
+  e> read(-1) -> 171:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     stdout 1\n
   e>     stderr 1\n
   e>     stdout 2\n
@@ -1229,11 +1214,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 216:
+  e> read(-1) -> 171:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     stdout 1\n
   e>     stderr 1\n
   e>     stdout 2\n
@@ -1308,11 +1292,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 212:
+  e> read(-1) -> 167:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     stdout 1\n
   e>     stdout 2\n
   e>     transaction abort!\n
@@ -1365,11 +1348,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 212:
+  e> read(-1) -> 167:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     stdout 1\n
   e>     stdout 2\n
   e>     transaction abort!\n
@@ -1437,11 +1419,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 212:
+  e> read(-1) -> 167:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     stderr 1\n
   e>     stderr 2\n
   e>     transaction abort!\n
@@ -1494,11 +1475,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 212:
+  e> read(-1) -> 167:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     stderr 1\n
   e>     stderr 2\n
   e>     transaction abort!\n
@@ -1568,11 +1548,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 230:
+  e> read(-1) -> 185:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     stdout 1\n
   e>     stderr 1\n
   e>     stdout 2\n
@@ -1627,11 +1606,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 230:
+  e> read(-1) -> 185:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     stdout 1\n
   e>     stderr 1\n
   e>     stdout 2\n
@@ -1709,11 +1687,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 273:
+  e> read(-1) -> 228:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     shell stdout 1\n
   e>     shell stderr 1\n
   e>     shell stdout 2\n
@@ -1772,11 +1749,10 @@
   o> read(1) -> 1: 0
   result: 0
   remote output: 
-  e> read(-1) -> 273:
+  e> read(-1) -> 228:
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     shell stdout 1\n
   e>     shell stderr 1\n
   e>     shell stdout 2\n
@@ -1983,11 +1959,11 @@
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     ui.write 1\n
   e>     ui.write_err 1\n
   e>     ui.write 2\n
   e>     ui.write_err 2\n
+  e>     added 1 changesets with 1 changes to 1 files\n
   
   testing ssh2
   creating ssh peer from handshake results
@@ -2039,8 +2015,8 @@
   e>     adding changesets\n
   e>     adding manifests\n
   e>     adding file changes\n
-  e>     added 1 changesets with 1 changes to 1 files\n
   e>     ui.write 1\n
   e>     ui.write_err 1\n
   e>     ui.write 2\n
   e>     ui.write_err 2\n
+  e>     added 1 changesets with 1 changes to 1 files\n
--- a/tests/test-ssh-proto.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-ssh-proto.t	Mon Oct 21 11:09:48 2019 -0400
@@ -104,6 +104,7 @@
   $ hg debugserve --sshstdio --logiofd 1 << EOF
   > hello
   > EOF
+  e> flush() -> None
   o> write(4) -> 4:
   o>     440\n
   o> write(440) -> 440:
@@ -119,6 +120,7 @@
   capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
 
   $ cat $TESTTMP/io
+  e> flush() -> None
   o> write(4) -> 4:
   o>     440\n
   o> write(440) -> 440:
--- a/tests/test-ssh.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-ssh.t	Mon Oct 21 11:09:48 2019 -0400
@@ -546,7 +546,7 @@
   bundle2-input-part: total payload size 45
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 72
-  bundle2-input-bundle: 2 parts total
+  bundle2-input-bundle: 3 parts total
   checking for updated bookmarks
 
   $ cd $TESTTMP
@@ -644,7 +644,6 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
   remote: hook failure!
   remote: transaction abort!
   remote: rollback completed
--- a/tests/test-sshserver.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-sshserver.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,37 +10,41 @@
     wireprotov1server,
 )
 
-from mercurial.utils import (
-    procutil,
-)
+from mercurial.utils import procutil
+
 
 class SSHServerGetArgsTests(unittest.TestCase):
     def testparseknown(self):
         tests = [
             (b'* 0\nnodes 0\n', [b'', {}]),
-            (b'* 0\nnodes 40\n1111111111111111111111111111111111111111\n',
-             [b'1111111111111111111111111111111111111111', {}]),
+            (
+                b'* 0\nnodes 40\n1111111111111111111111111111111111111111\n',
+                [b'1111111111111111111111111111111111111111', {}],
+            ),
         ]
         for input, expected in tests:
             self.assertparse(b'known', input, expected)
 
     def assertparse(self, cmd, input, expected):
         server = mockserver(input)
-        proto = wireprotoserver.sshv1protocolhandler(server._ui,
-                                                     server._fin,
-                                                     server._fout)
+        proto = wireprotoserver.sshv1protocolhandler(
+            server._ui, server._fin, server._fout
+        )
         _func, spec = wireprotov1server.commands[cmd]
         self.assertEqual(proto.getargs(spec), expected)
 
+
 def mockserver(inbytes):
     ui = mockui(inbytes)
     repo = mockrepo(ui)
     return wireprotoserver.sshserver(ui, repo)
 
+
 class mockrepo(object):
     def __init__(self, ui):
         self.ui = ui
 
+
 class mockui(object):
     def __init__(self, inbytes):
         self.fin = io.BytesIO(inbytes)
@@ -53,6 +57,7 @@
     def restorefinout(self, fin, fout):
         pass
 
+
 if __name__ == '__main__':
     # Don't call into msvcrt to set BytesIO to binary mode
     procutil.setbinary = lambda fp: True
--- a/tests/test-status-inprocess.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-status-inprocess.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,6 +10,8 @@
 )
 
 print_ = print
+
+
 def print(*args, **kwargs):
     """print() wrapper that flushes stdout buffers to avoid py3 buffer issues
 
@@ -19,6 +21,7 @@
     print_(*args, **kwargs)
     sys.stdout.flush()
 
+
 u = uimod.ui.load()
 
 print('% creating repo')
--- a/tests/test-storage.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-storage.py	Mon Oct 21 11:09:48 2019 -0400
@@ -13,19 +13,16 @@
     vfs as vfsmod,
 )
 
-from mercurial.testing import (
-    storage as storagetesting,
-)
+from mercurial.testing import storage as storagetesting
 
 try:
-    from hgext import (
-        sqlitestore,
-    )
+    from hgext import sqlitestore
 except ImportError:
     sqlitestore = None
 
 try:
     import sqlite3
+
     if sqlite3.sqlite_version_info < (3, 8, 3):
         # WITH clause not supported
         sqlitestore = None
@@ -34,6 +31,7 @@
 
 try:
     from mercurial import zstd
+
     zstd.__version__
 except ImportError:
     zstd = None
@@ -44,20 +42,36 @@
     'vfs': vfsmod.vfs(b'.', realpath=True),
 }
 
+
 def makefilefn(self):
     """Factory for filelog instances."""
     fl = filelog.filelog(STATE['vfs'], b'filelog-%d' % STATE['lastindex'])
     STATE['lastindex'] += 1
     return fl
 
+
 def maketransaction(self):
     vfsmap = {b'plain': STATE['vfs'], b'store': STATE['vfs']}
 
-    return transaction.transaction(STATE['ui'].warn, STATE['vfs'], vfsmap,
-                                   b'journal', b'undo')
+    return transaction.transaction(
+        STATE['ui'].warn, STATE['vfs'], vfsmap, b'journal', b'undo'
+    )
+
 
-def addrawrevision(self, fl, tr, node, p1, p2, linkrev, rawtext=None,
-                   delta=None, censored=False, ellipsis=False, extstored=False):
+def addrawrevision(
+    self,
+    fl,
+    tr,
+    node,
+    p1,
+    p2,
+    linkrev,
+    rawtext=None,
+    delta=None,
+    censored=False,
+    ellipsis=False,
+    extstored=False,
+):
     flags = 0
 
     if censored:
@@ -70,8 +84,9 @@
     if rawtext is not None:
         fl._revlog.addrawrevision(rawtext, tr, linkrev, p1, p2, node, flags)
     elif delta is not None:
-        fl._revlog.addrawrevision(rawtext, tr, linkrev, p1, p2, node, flags,
-                                  cachedelta=delta)
+        fl._revlog.addrawrevision(
+            rawtext, tr, linkrev, p1, p2, node, flags, cachedelta=delta
+        )
     else:
         raise error.Abort('must supply rawtext or delta arguments')
 
@@ -79,17 +94,19 @@
     # bypass hash verification.
     fl._revlog.clearcaches()
 
+
 # Assigning module-level attributes that inherit from unittest.TestCase
 # is all that is needed to register tests.
-filelogindextests = storagetesting.makeifileindextests(makefilefn,
-                                                       maketransaction,
-                                                       addrawrevision)
-filelogdatatests = storagetesting.makeifiledatatests(makefilefn,
-                                                     maketransaction,
-                                                     addrawrevision)
-filelogmutationtests = storagetesting.makeifilemutationtests(makefilefn,
-                                                             maketransaction,
-                                                             addrawrevision)
+filelogindextests = storagetesting.makeifileindextests(
+    makefilefn, maketransaction, addrawrevision
+)
+filelogdatatests = storagetesting.makeifiledatatests(
+    makefilefn, maketransaction, addrawrevision
+)
+filelogmutationtests = storagetesting.makeifilemutationtests(
+    makefilefn, maketransaction, addrawrevision
+)
+
 
 def makesqlitefile(self):
     path = STATE['vfs'].join(b'db-%d.db' % STATE['lastindex'])
@@ -101,33 +118,51 @@
 
     return sqlitestore.sqlitefilestore(db, b'dummy-path', compression)
 
-def addrawrevisionsqlite(self, fl, tr, node, p1, p2, linkrev, rawtext=None,
-                         delta=None, censored=False, ellipsis=False,
-                         extstored=False):
+
+def addrawrevisionsqlite(
+    self,
+    fl,
+    tr,
+    node,
+    p1,
+    p2,
+    linkrev,
+    rawtext=None,
+    delta=None,
+    censored=False,
+    ellipsis=False,
+    extstored=False,
+):
     flags = 0
 
     if censored:
         flags |= sqlitestore.FLAG_CENSORED
 
     if ellipsis | extstored:
-        raise error.Abort(b'support for ellipsis and extstored flags not '
-                          b'supported')
+        raise error.Abort(
+            b'support for ellipsis and extstored flags not ' b'supported'
+        )
 
     if rawtext is not None:
         fl._addrawrevision(node, rawtext, tr, linkrev, p1, p2, flags=flags)
     elif delta is not None:
-        fl._addrawrevision(node, rawtext, tr, linkrev, p1, p2,
-                           storedelta=delta, flags=flags)
+        fl._addrawrevision(
+            node, rawtext, tr, linkrev, p1, p2, storedelta=delta, flags=flags
+        )
     else:
         raise error.Abort(b'must supply rawtext or delta arguments')
 
+
 if sqlitestore is not None:
     sqlitefileindextests = storagetesting.makeifileindextests(
-        makesqlitefile, maketransaction, addrawrevisionsqlite)
+        makesqlitefile, maketransaction, addrawrevisionsqlite
+    )
     sqlitefiledatatests = storagetesting.makeifiledatatests(
-        makesqlitefile, maketransaction, addrawrevisionsqlite)
+        makesqlitefile, maketransaction, addrawrevisionsqlite
+    )
     sqlitefilemutationtests = storagetesting.makeifilemutationtests(
-        makesqlitefile, maketransaction, addrawrevisionsqlite)
+        makesqlitefile, maketransaction, addrawrevisionsqlite
+    )
 
 if __name__ == '__main__':
     silenttestrunner.main(__name__)
--- a/tests/test-stream-bundle-v2.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-stream-bundle-v2.t	Mon Oct 21 11:09:48 2019 -0400
@@ -87,7 +87,7 @@
   adding [c] rbc-revs-v1 (40 bytes)
   transferred 1.65 KB in \d\.\d seconds \(.*/sec\) (re)
   bundle2-input-part: total payload size 1840
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   updating the branch cache
   finished applying clone bundle
   query 1; heads
@@ -100,7 +100,7 @@
   bundle2-input-part: "listkeys" (params: 1 mandatory) supported
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 1 parts total
+  bundle2-input-bundle: 2 parts total
   checking for updated bookmarks
   updating to branch default
   resolving manifests
@@ -143,7 +143,7 @@
   adding [c] rbc-revs-v1 (40 bytes)
   transferred 1.65 KB in *.* seconds (*/sec) (glob)
   bundle2-input-part: total payload size 1840
-  bundle2-input-bundle: 0 parts total
+  bundle2-input-bundle: 1 parts total
   updating the branch cache
   finished applying clone bundle
   query 1; heads
@@ -156,7 +156,7 @@
   bundle2-input-part: "listkeys" (params: 1 mandatory) supported
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 1 parts total
+  bundle2-input-bundle: 2 parts total
   checking for updated bookmarks
   updating to branch default
   resolving manifests
--- a/tests/test-strip-cross.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-strip-cross.t	Mon Oct 21 11:09:48 2019 -0400
@@ -2,14 +2,13 @@
 
   $ echo '[extensions]' >> $HGRCPATH
   $ echo 'strip =' >> $HGRCPATH
-  $ hg init orig
-  $ cd orig
   $ commit()
   > {
   >     hg up -qC null
   >     count=1
   >     for i in "$@"; do
   >         for f in $i; do
+  >             mkdir -p `dirname $f`
   >             echo $count > $f
   >         done
   >         count=`expr $count + 1`
@@ -19,29 +18,22 @@
 
 2 1 0 2 0 1 2
 
+  $ mkdir files
+  $ cd files
+  $ hg init orig
+  $ cd orig
   $ commit '201 210'
   $ commit '102 120' '210'
   $ commit '021'
   $ commit '201' '021 120'
   $ commit '012 021' '102 201' '120 210'
-  $ commit 'manifest-file'
   $ commit '102 120' '012 210' '021 201'
   $ commit '201 210' '021 120' '012 102'
-  $ HGUSER=another-user; export HGUSER
-  $ commit 'manifest-file'
-  $ commit '012' 'manifest-file'
   $ cd ..
-  $ hg clone -q -U -r -1 -r -2 -r -3 -r -4 -r -6 orig crossed
+  $ hg clone -q -U -r 4 -r 5 -r 6 orig crossed
   $ cd crossed
-  $ hg debugindex --manifest
-     rev linkrev nodeid       p1           p2
-       0       0 6f105cbb914d 000000000000 000000000000
-       1       3 1b55917b3699 000000000000 000000000000
-       2       1 8f3d04e263e5 000000000000 000000000000
-       3       2 f0ef8726ac4f 000000000000 000000000000
-       4       4 0b76e38b4070 000000000000 000000000000
 
-  $ for i in 012 021 102 120 201 210 manifest-file; do
+  $ for i in 012 021 102 120 201 210; do
   >     echo $i
   >     hg debugindex $i
   >     echo
@@ -82,13 +74,8 @@
        1       1 5d9299349fc0 000000000000 000000000000
        2       0 2661d26c6496 000000000000 000000000000
   
-  manifest-file
-     rev linkrev nodeid       p1           p2
-       0       3 b8e02f643373 000000000000 000000000000
-       1       4 5d9299349fc0 000000000000 000000000000
-  
   $ cd ..
-  $ for i in 0 1 2 3 4; do
+  $ for i in 0 1 2; do
   >     hg clone -q -U --pull crossed $i
   >     echo "% Trying to strip revision $i"
   >     hg --cwd $i strip $i
@@ -97,47 +84,137 @@
   >     echo
   > done
   % Trying to strip revision 0
-  saved backup bundle to $TESTTMP/0/.hg/strip-backup/*-backup.hg (glob)
+  saved backup bundle to $TESTTMP/files/0/.hg/strip-backup/cbb8c2f0a2e3-239800b9-backup.hg
+  % Verifying
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checked 2 changesets with 12 changes to 6 files
+  
+  % Trying to strip revision 1
+  saved backup bundle to $TESTTMP/files/1/.hg/strip-backup/124ecc0cbec9-6104543f-backup.hg
+  % Verifying
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checked 2 changesets with 12 changes to 6 files
+  
+  % Trying to strip revision 2
+  saved backup bundle to $TESTTMP/files/2/.hg/strip-backup/f6439b304a1a-c6505a5f-backup.hg
   % Verifying
   checking changesets
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  checked 4 changesets with 15 changes to 7 files
+  checked 2 changesets with 12 changes to 6 files
   
-  % Trying to strip revision 1
-  saved backup bundle to $TESTTMP/1/.hg/strip-backup/*-backup.hg (glob)
+  $ cd ..
+
+Do a similar test where the manifest revlog has unordered linkrevs
+  $ mkdir manifests
+  $ cd manifests
+  $ hg init orig
+  $ cd orig
+  $ commit 'file'
+  $ commit 'other'
+  $ commit '' 'other'
+  $ HGUSER=another-user; export HGUSER
+  $ commit 'file'
+  $ commit 'other' 'file'
+  $ cd ..
+  $ hg clone -q -U -r 1 -r 2 -r 3 -r 4 orig crossed
+  $ cd crossed
+  $ hg debugindex --manifest
+     rev linkrev nodeid       p1           p2
+       0       2 6bbc6fee55c2 000000000000 000000000000
+       1       0 1c556153fe54 000000000000 000000000000
+       2       1 1f76dba919fd 000000000000 000000000000
+       3       3 bbee06ad59d5 000000000000 000000000000
+
+  $ cd ..
+  $ for i in 2 3; do
+  >     hg clone -q -U --pull crossed $i
+  >     echo "% Trying to strip revision $i"
+  >     hg --cwd $i strip $i
+  >     echo "% Verifying"
+  >     hg --cwd $i verify
+  >     echo
+  > done
+  % Trying to strip revision 2
+  saved backup bundle to $TESTTMP/manifests/2/.hg/strip-backup/f3015ad03c03-4d98bdc2-backup.hg
   % Verifying
   checking changesets
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  checked 4 changesets with 14 changes to 7 files
+  checked 3 changesets with 3 changes to 2 files
   
-  % Trying to strip revision 2
-  saved backup bundle to $TESTTMP/2/.hg/strip-backup/*-backup.hg (glob)
+  % Trying to strip revision 3
+  saved backup bundle to $TESTTMP/manifests/3/.hg/strip-backup/9632aa303aa4-69192e3f-backup.hg
   % Verifying
   checking changesets
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  checked 4 changesets with 14 changes to 7 files
+  checked 3 changesets with 3 changes to 2 files
   
-  % Trying to strip revision 3
-  saved backup bundle to $TESTTMP/3/.hg/strip-backup/*-backup.hg (glob)
+  $ cd ..
+
+Now a similar test for a non-root manifest revlog
+  $ cat >> $HGRCPATH <<EOF
+  > [experimental]
+  > treemanifests = yes
+  > EOF
+  $ mkdir treemanifests
+  $ cd treemanifests
+  $ 
+  $ hg --config experimental.treemanifest=True init orig
+  $ cd orig
+  $ commit 'dir/file'
+  $ commit 'dir/other'
+  $ commit '' 'dir/other'
+  $ HGUSER=yet-another-user; export HGUSER
+  $ commit 'otherdir dir/file'
+  $ commit 'otherdir dir/other' 'otherdir dir/file'
+  $ cd ..
+  $ hg --config experimental.treemanifest=True clone -q -U -r 1 -r 2 -r 3 -r 4 orig crossed
+  $ cd crossed
+  $ hg debugindex --dir dir
+     rev linkrev nodeid       p1           p2
+       0       2 6bbc6fee55c2 000000000000 000000000000
+       1       0 1c556153fe54 000000000000 000000000000
+       2       1 1f76dba919fd 000000000000 000000000000
+       3       3 bbee06ad59d5 000000000000 000000000000
+
+  $ cd ..
+  $ for i in 2 3; do
+  >     hg --config experimental.treemanifest=True clone -q -U --pull crossed $i
+  >     echo "% Trying to strip revision $i"
+  >     hg --cwd $i strip $i
+  >     echo "% Verifying"
+  >     hg --cwd $i verify
+  >     echo
+  > done
+  % Trying to strip revision 2
+  saved backup bundle to $TESTTMP/treemanifests/2/.hg/strip-backup/145f5c75f9ac-a105cfbe-backup.hg
   % Verifying
   checking changesets
   checking manifests
+  checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  checked 4 changesets with 19 changes to 7 files
+  checked 3 changesets with 4 changes to 3 files
   
-  % Trying to strip revision 4
-  saved backup bundle to $TESTTMP/4/.hg/strip-backup/*-backup.hg (glob)
+  % Trying to strip revision 3
+  saved backup bundle to $TESTTMP/treemanifests/3/.hg/strip-backup/e4e3de5c3cb2-f4c70376-backup.hg
   % Verifying
   checking changesets
   checking manifests
+  checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  checked 4 changesets with 19 changes to 7 files
+  checked 3 changesets with 4 changes to 3 files
   
+  $ cd ..
--- a/tests/test-subrepo.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-subrepo.t	Mon Oct 21 11:09:48 2019 -0400
@@ -1119,7 +1119,7 @@
 Ensure a full traceback, not just the SubrepoAbort part
 
   $ hg -R issue1852b update --traceback 2>&1 | grep 'raise error\.Abort'
-      raise error.Abort(_("default path for subrepository not found"))
+      raise error.Abort(_(b"default path for subrepository not found"))
 
 Pull -u now doesn't help
 
--- a/tests/test-symlink-os-yes-fs-no.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-symlink-os-yes-fs-no.py	Mon Oct 21 11:09:48 2019 -0400
@@ -16,7 +16,7 @@
 
 # only makes sense to test on os which supports symlinks
 if not getattr(os, "symlink", False):
-    sys.exit(80) # SKIPPED_STATUS defined in run-tests.py
+    sys.exit(80)  # SKIPPED_STATUS defined in run-tests.py
 
 u = uimod.ui.load()
 # hide outer repo
@@ -36,9 +36,15 @@
 # non-symlink file system
 def symlink_failure(src, dst):
     raise OSError(1, "Operation not permitted")
+
+
 os.symlink = symlink_failure
+
+
 def islink_failure(path):
     return False
+
+
 os.path.islink = islink_failure
 
 # dereference links as if a Samba server has exported this to a
--- a/tests/test-tag.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-tag.t	Mon Oct 21 11:09:48 2019 -0400
@@ -434,6 +434,7 @@
   abeb261f0508ecebcd345ce21e7a25112df417aa
 (mimic 'hg prune' command by obsoleting current changeset and then moving to its parent)
   $ hg debugobsolete abeb261f0508ecebcd345ce21e7a25112df417aa --record-parents
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg up ".^" --quiet
   $ cat .hgtags
--- a/tests/test-template-functions.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-template-functions.t	Mon Oct 21 11:09:48 2019 -0400
@@ -370,6 +370,17 @@
   @@ -0,0 +1,1 @@
   +second
 
+  $ hg --config diff.git=true log -r 8 -T "{diff()}"
+  diff --git a/second b/fourth
+  rename from second
+  rename to fourth
+  diff --git a/third b/third
+  new file mode 100644
+  --- /dev/null
+  +++ b/third
+  @@ -0,0 +1,1 @@
+  +third
+
   $ cd ..
 
 latesttag() function:
@@ -918,10 +929,13 @@
   9:c5623987d205cd6d9d8389bfc40fff9dbb670b48
   10:c562ddd9c94164376c20b86b0b4991636a3bf84f
   $ hg debugobsolete a00be79088084cb3aff086ab799f8790e01a976b
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete c5623987d205cd6d9d8389bfc40fff9dbb670b48
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete c562ddd9c94164376c20b86b0b4991636a3bf84f
+  1 new obsolescence markers
   obsoleted 1 changesets
 
  nodes starting with '11' (we don't have the revision number '11' though)
@@ -987,6 +1001,7 @@
   1:x0
 
   $ hg debugobsolete 0cf177ba2b1dc3862a00fb81715fec90950201be
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg up -q 0
   $ echo 61 > a
--- a/tests/test-template-map.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-template-map.t	Mon Oct 21 11:09:48 2019 -0400
@@ -736,6 +736,18 @@
    }
   ]
 
+  $ hg log -r . -T'cbor(rev, node|short)' | "$PYTHON" "$TESTTMP/decodecborarray.py"
+  [
+   {
+    'node': '95c24699272e',
+    'rev': 8
+   }
+  ]
+
+  $ hg log -r . -T'cbor()' | "$PYTHON" "$TESTTMP/decodecborarray.py"
+  [
+   {}
+  ]
 
 Test JSON style:
 
@@ -1101,6 +1113,98 @@
    }
   ]
 
+  $ hg log -l2 -T'json(rev, parents)'
+  [
+   {"parents": ["29114dbae42b9f078cf2714dbe3a86bba8ec7453"], "rev": 8},
+   {"parents": ["0000000000000000000000000000000000000000"], "rev": 7}
+  ]
+
+  $ hg log -qr. -T'json(rev, parents)'
+  [
+   {"parents": ["29114dbae42b9f078cf2714dbe3a86bba8ec7453"], "rev": 8}
+  ]
+
+  $ hg log -r. -T'json(diff)'
+  [
+   {"diff": "diff -r 29114dbae42b -r 95c24699272e fourth\n--- /dev/null\tThu Jan 01 00:00:00 1970 +0000\n+++ b/fourth\tWed Jan 01 10:01:00 2020 +0000\n@@ -0,0 +1,1 @@\n+second\ndiff -r 29114dbae42b -r 95c24699272e second\n--- a/second\tMon Jan 12 13:46:40 1970 +0000\n+++ /dev/null\tThu Jan 01 00:00:00 1970 +0000\n@@ -1,1 +0,0 @@\n-second\ndiff -r 29114dbae42b -r 95c24699272e third\n--- /dev/null\tThu Jan 01 00:00:00 1970 +0000\n+++ b/third\tWed Jan 01 10:01:00 2020 +0000\n@@ -0,0 +1,1 @@\n+third\n"}
+  ]
+
+  $ hg log -r. -T'json(diffstat)'
+  [
+   {"diffstat": " fourth |  1 +\n second |  1 -\n third  |  1 +\n 3 files changed, 2 insertions(+), 1 deletions(-)\n"}
+  ]
+
+  $ hg log -r. -T'json(manifest)'
+  [
+   {"manifest": "94961b75a2da554b4df6fb599e5bfc7d48de0c64"}
+  ]
+
+  $ hg log -r. -T'json(extra)'
+  [
+   {"extra": {"branch": "default"}}
+  ]
+
+  $ hg log -r3 -T'json(modified)'
+  [
+   {"modified": ["c"]}
+  ]
+
+  $ hg log -r. -T'json(added)'
+  [
+   {"added": ["fourth", "third"]}
+  ]
+
+  $ hg log -r. -T'json(removed)'
+  [
+   {"removed": ["second"]}
+  ]
+
+  $ hg log -r. -T'json(files)'
+  [
+   {"files": ["fourth", "second", "third"]}
+  ]
+
+ --copies is the exception. copies dict is built only when --copies switch
+ is on:
+
+  $ hg log -r'.^:' -T'json(copies)' --copies
+  [
+   {"copies": {}},
+   {"copies": {"fourth": "second"}}
+  ]
+
+  $ hg log -r. -T'json()'
+  [
+   {}
+  ]
+
+Other unsupported formatter styles:
+
+  $ hg log -qr . -Tpickle
+  abort: "pickle" not in template map
+  [255]
+  $ hg log -qr . -Tdebug
+  abort: "debug" not in template map
+  [255]
+
+Unparsable function-style references:
+
+  $ hg log -qr . -T'json(-)'
+  hg: parse error at 6: not a prefix: )
+  (json(-)
+         ^ here)
+  [255]
+
+For backward compatibility, the following examples are not parsed as
+function-style references:
+
+  $ hg log -qr . -T'cbor(rev'
+  cbor(rev (no-eol)
+  $ hg log -qr . -T'json (rev)'
+  json (rev) (no-eol)
+  $ hg log -qr . -T'json(x="{rev}")'
+  json(x="8") (no-eol)
+
 Error if style not readable:
 
 #if unix-permissions no-root
--- a/tests/test-transplant.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-transplant.t	Mon Oct 21 11:09:48 2019 -0400
@@ -1,8 +1,17 @@
+#testcases commandmode continueflag
   $ cat <<EOF >> $HGRCPATH
   > [extensions]
   > transplant=
+  > graphlog=
   > EOF
 
+#if continueflag
+  $ cat >> $HGRCPATH <<EOF
+  > [alias]
+  > continue = transplant --continue
+  > EOF
+#endif
+
   $ hg init t
   $ cd t
   $ hg transplant
@@ -11,6 +20,9 @@
   $ hg transplant --continue --all
   abort: --continue is incompatible with --branch, --all and --merge
   [255]
+  $ hg transplant --stop --all
+  abort: --stop is incompatible with --branch, --all and --merge
+  [255]
   $ hg transplant --all tip
   abort: --all requires a branch revision
   [255]
@@ -350,9 +362,9 @@
   adding changesets
   adding manifests
   adding file changes
-  added 1 changesets with 1 changes to 1 files
   applying a53251cdf717
   a53251cdf717 transplanted to 8d9279348abb
+  added 1 changesets with 1 changes to 1 files
   $ hg log --template '{rev} {parents} {desc}\n'
   2  b3
   1  b1
@@ -368,7 +380,8 @@
   applying 722f4667af76
   722f4667af76 transplanted to 76e321915884
 
-transplant --continue
+
+transplant --continue and --stop behaviour
 
   $ hg init ../tc
   $ cd ../tc
@@ -408,6 +421,36 @@
   $ echo foobar > foo
   $ hg ci -mfoobar
   created new head
+
+Repo log before transplant
+  $ hg glog
+  @  changeset:   4:e8643552fde5
+  |  tag:         tip
+  |  parent:      0:493149fa1541
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     foobar
+  |
+  | o  changeset:   3:1dab759070cf
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | |  summary:     bar2
+  | |
+  | o  changeset:   2:9d6d6b5a8275
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | |  summary:     bar
+  | |
+  | o  changeset:   1:46ae92138f3c
+  |/   user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    summary:     foo2
+  |
+  o  changeset:   0:493149fa1541
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     foo
+  
   $ hg transplant 1:3
   applying 46ae92138f3c
   patching file foo
@@ -417,6 +460,49 @@
   abort: fix up the working directory and run hg transplant --continue
   [255]
 
+  $ hg transplant --stop
+  stopped the interrupted transplant
+  working directory is now at e8643552fde5
+Repo log after abort
+  $ hg glog
+  @  changeset:   4:e8643552fde5
+  |  tag:         tip
+  |  parent:      0:493149fa1541
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     foobar
+  |
+  | o  changeset:   3:1dab759070cf
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | |  summary:     bar2
+  | |
+  | o  changeset:   2:9d6d6b5a8275
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | |  summary:     bar
+  | |
+  | o  changeset:   1:46ae92138f3c
+  |/   user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    summary:     foo2
+  |
+  o  changeset:   0:493149fa1541
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     foo
+  
+  $ hg transplant 1:3
+  applying 46ae92138f3c
+  file added already exists
+  1 out of 1 hunks FAILED -- saving rejects to file added.rej
+  patching file foo
+  Hunk #1 FAILED at 0
+  1 out of 1 hunks FAILED -- saving rejects to file foo.rej
+  patch failed to apply
+  abort: fix up the working directory and run hg transplant --continue
+  [255]
+
 transplant -c shouldn't use an old changeset
 
   $ hg up -C
@@ -424,8 +510,12 @@
   updated to "e8643552fde5: foobar"
   1 other heads for branch "default"
   $ rm added
-  $ hg transplant --continue
-  abort: no transplant to continue
+  $ hg continue
+  abort: no transplant to continue (continueflag !)
+  abort: no operation in progress (no-continueflag !)
+  [255]
+  $ hg transplant --stop
+  abort: no interrupted transplant found
   [255]
   $ hg transplant 1
   applying 46ae92138f3c
@@ -480,23 +570,23 @@
   [255]
   $ hg transplant 1:3
   abort: transplant in progress
-  (use 'hg transplant --continue' or 'hg update' to abort)
+  (use 'hg transplant --continue' or 'hg transplant --stop')
   [255]
   $ hg status -v
   A bar
+  ? added.rej
   ? baz.rej
   ? foo.rej
   # The repository is in an unfinished *transplant* state.
   
   # To continue:    hg transplant --continue
-  # To abort:       hg update
+  # To stop:        hg transplant --stop
   
   $ echo fixed > baz
-  $ hg transplant --continue
+  $ hg continue
   9d6d6b5a8275 transplanted as d80c49962290
   applying 1dab759070cf
   1dab759070cf transplanted to aa0ffe6bd5ae
-
   $ cd ..
 
 Issue1111: Test transplant --merge
@@ -564,9 +654,9 @@
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 2 changes to 2 files
   applying a53251cdf717
   4:a53251cdf717 merged at 4831f4dc831a
+  added 2 changesets with 2 changes to 2 files
 
 test interactive transplant
 
@@ -881,7 +971,7 @@
   [255]
   $ hg status
   ? b.rej
-  $ hg transplant --continue
+  $ hg continue
   645035761929 skipped due to empty diff
 
   $ cd ..
--- a/tests/test-trusted.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-trusted.py	Mon Oct 21 11:09:48 2019 -0400
@@ -20,20 +20,33 @@
 basehgrc = f.read()
 f.close()
 
+
 def _maybesysstr(v):
     if isinstance(v, bytes):
         return pycompat.sysstr(v)
     return pycompat.sysstr(stringutil.pprint(v))
 
+
 def bprint(*args, **kwargs):
-    print(*[_maybesysstr(a) for a in args],
-          **{k: _maybesysstr(v) for k, v in kwargs.items()})
+    print(
+        *[_maybesysstr(a) for a in args],
+        **{k: _maybesysstr(v) for k, v in kwargs.items()}
+    )
     # avoid awkward interleaving with ui object's output
     sys.stdout.flush()
 
-def testui(user=b'foo', group=b'bar', tusers=(), tgroups=(),
-           cuser=b'foo', cgroup=b'bar', debug=False, silent=False,
-           report=True):
+
+def testui(
+    user=b'foo',
+    group=b'bar',
+    tusers=(),
+    tgroups=(),
+    cuser=b'foo',
+    cgroup=b'bar',
+    debug=False,
+    silent=False,
+    report=True,
+):
     # user, group => owners of the file
     # tusers, tgroups => trusted users/groups
     # cuser, cgroup => user/group of the current process
@@ -58,29 +71,33 @@
         if uid is None:
             return cuser
         return user
+
     util.username = username
 
     def groupname(gid=None):
         if gid is None:
             return b'bar'
         return group
+
     util.groupname = groupname
 
     def isowner(st):
         return user == cuser
+
     util.isowner = isowner
 
     # try to read everything
-    #print '# File belongs to user %s, group %s' % (user, group)
-    #print '# trusted users = %s; trusted groups = %s' % (tusers, tgroups)
+    # print '# File belongs to user %s, group %s' % (user, group)
+    # print '# trusted users = %s; trusted groups = %s' % (tusers, tgroups)
     kind = (b'different', b'same')
     who = (b'', b'user', b'group', b'user and the group')
-    trusted = who[(user in tusers) + 2*(group in tgroups)]
+    trusted = who[(user in tusers) + 2 * (group in tgroups)]
     if trusted:
         trusted = b', but we trust the ' + trusted
-    bprint(b'# %s user, %s group%s' % (kind[user == cuser],
-                                       kind[group == cgroup],
-                                       trusted))
+    bprint(
+        b'# %s user, %s group%s'
+        % (kind[user == cuser], kind[group == cgroup], trusted)
+    )
 
     u = uimod.ui.load()
     # disable the configuration registration warning
@@ -101,14 +118,15 @@
     bprint(b'untrusted')
     for name, path in u.configitems(b'paths', untrusted=True):
         bprint(b'.', end=b' ')
-        u.config(b'paths', name) # warning with debug=True
+        u.config(b'paths', name)  # warning with debug=True
         bprint(b'.', end=b' ')
-        u.config(b'paths', name, untrusted=True) # no warnings
+        u.config(b'paths', name, untrusted=True)  # no warnings
         bprint(name, b'=', util.pconvert(path))
     print()
 
     return u
 
+
 os.mkdir(b'repo')
 os.chdir(b'repo')
 os.mkdir(b'.hg')
@@ -117,7 +135,7 @@
 f.write(b'local = /another/path\n\n')
 f.close()
 
-#print '# Everything is run by user foo, group bar\n'
+# print '# Everything is run by user foo, group bar\n'
 
 # same user, same group
 testui()
@@ -149,12 +167,20 @@
 testui(user=b'abc', group=b'def', tusers=[b'def'], tgroups=[b'abc'])
 # ... lists of user names work
 bprint(b"# list of user names")
-testui(user=b'abc', group=b'def', tusers=[b'foo', b'xyz', b'abc', b'bleh'],
-       tgroups=[b'bar', b'baz', b'qux'])
+testui(
+    user=b'abc',
+    group=b'def',
+    tusers=[b'foo', b'xyz', b'abc', b'bleh'],
+    tgroups=[b'bar', b'baz', b'qux'],
+)
 # ... lists of group names work
 bprint(b"# list of group names")
-testui(user=b'abc', group=b'def', tusers=[b'foo', b'xyz', b'bleh'],
-       tgroups=[b'bar', b'def', b'baz', b'qux'])
+testui(
+    user=b'abc',
+    group=b'def',
+    tusers=[b'foo', b'xyz', b'bleh'],
+    tgroups=[b'bar', b'def', b'baz', b'qux'],
+)
 
 bprint(b"# Can't figure out the name of the user running this process")
 testui(user=b'abc', group=b'def', cuser=None)
@@ -190,8 +216,12 @@
 u.setconfig(b'ui', b'debug', b'on')
 u.readconfig(filename)
 u2 = u.copy()
+
+
 def username(uid=None):
     return b'foo'
+
+
 util.username = username
 u2.readconfig(b'.hg/hgrc')
 bprint(b'trusted:')
@@ -202,6 +232,7 @@
 print()
 bprint(b"# error handling")
 
+
 def assertraises(f, exc=error.Abort):
     try:
         f()
@@ -210,6 +241,7 @@
     else:
         bprint(b'no exception?!')
 
+
 bprint(b"# file doesn't exist")
 os.unlink(b'.hg/hgrc')
 assert not os.path.exists(b'.hg/hgrc')
@@ -232,6 +264,7 @@
 
     return error.ParseError(*args)
 
+
 try:
     testui(user=b'abc', group=b'def', silent=True)
 except error.ParseError as inst:
@@ -245,7 +278,8 @@
 print()
 bprint(b'# access typed information')
 with open(b'.hg/hgrc', 'wb') as f:
-    f.write(b'''\
+    f.write(
+        b'''\
 [foo]
 sub=main
 sub:one=one
@@ -255,34 +289,43 @@
 int=42
 bytes=81mb
 list=spam,ham,eggs
-''')
+'''
+    )
 u = testui(user=b'abc', group=b'def', cuser=b'foo', silent=True)
+
+
 def configpath(section, name, default=None, untrusted=False):
     path = u.configpath(section, name, default, untrusted)
     if path is None:
         return None
     return util.pconvert(path)
 
+
 bprint(b'# suboptions, trusted and untrusted')
 trusted = u.configsuboptions(b'foo', b'sub')
 untrusted = u.configsuboptions(b'foo', b'sub', untrusted=True)
 bprint(
     (trusted[0], sorted(trusted[1].items())),
-    (untrusted[0], sorted(untrusted[1].items())))
+    (untrusted[0], sorted(untrusted[1].items())),
+)
 bprint(b'# path, trusted and untrusted')
 bprint(configpath(b'foo', b'path'), configpath(b'foo', b'path', untrusted=True))
 bprint(b'# bool, trusted and untrusted')
-bprint(u.configbool(b'foo', b'bool'),
-       u.configbool(b'foo', b'bool', untrusted=True))
+bprint(
+    u.configbool(b'foo', b'bool'), u.configbool(b'foo', b'bool', untrusted=True)
+)
 bprint(b'# int, trusted and untrusted')
 bprint(
     u.configint(b'foo', b'int', 0),
-    u.configint(b'foo', b'int', 0, untrusted=True))
+    u.configint(b'foo', b'int', 0, untrusted=True),
+)
 bprint(b'# bytes, trusted and untrusted')
 bprint(
     u.configbytes(b'foo', b'bytes', 0),
-    u.configbytes(b'foo', b'bytes', 0, untrusted=True))
+    u.configbytes(b'foo', b'bytes', 0, untrusted=True),
+)
 bprint(b'# list, trusted and untrusted')
 bprint(
     u.configlist(b'foo', b'list', []),
-    u.configlist(b'foo', b'list', [], untrusted=True))
+    u.configlist(b'foo', b'list', [], untrusted=True),
+)
--- a/tests/test-ui-color.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-ui-color.py	Mon Oct 21 11:09:48 2019 -0400
@@ -5,15 +5,13 @@
     dispatch,
     ui as uimod,
 )
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.utils import stringutil
 
 # ensure errors aren't buffered
 testui = uimod.ui()
 testui.pushbuffer()
-testui.write((b'buffered\n'))
-testui.warn((b'warning\n'))
+testui.writenoi18n(b'buffered\n')
+testui.warnnoi18n(b'warning\n')
 testui.write_err(b'error\n')
 print(stringutil.pprint(testui.popbuffer(), bprefix=True).decode('ascii'))
 
@@ -34,6 +32,7 @@
 def runcmd():
     dispatch.dispatch(dispatch.request([b'version', b'-q'], ui_))
 
+
 runcmd()
 print("colored? %s" % (ui_._colormode is not None))
 runcmd()
--- a/tests/test-ui-config.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-ui-config.py	Mon Oct 21 11:09:48 2019 -0400
@@ -5,9 +5,7 @@
     pycompat,
     ui as uimod,
 )
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.utils import stringutil
 
 testui = uimod.ui.load()
 
@@ -19,40 +17,45 @@
 testui.setconfig(b'devel', b'warn-config-unknown', False, b'test')
 testui.setconfig(b'devel', b'all-warnings', False, b'test')
 
-parsed = dispatch._parseconfig(testui, [
-    b'values.string=string value',
-    b'values.bool1=true',
-    b'values.bool2=false',
-    b'values.boolinvalid=foo',
-    b'values.int1=42',
-    b'values.int2=-42',
-    b'values.intinvalid=foo',
-    b'lists.list1=foo',
-    b'lists.list2=foo bar baz',
-    b'lists.list3=alice, bob',
-    b'lists.list4=foo bar baz alice, bob',
-    b'lists.list5=abc d"ef"g "hij def"',
-    b'lists.list6="hello world", "how are you?"',
-    b'lists.list7=Do"Not"Separate',
-    b'lists.list8="Do"Separate',
-    b'lists.list9="Do\\"NotSeparate"',
-    b'lists.list10=string "with extraneous" quotation mark"',
-    b'lists.list11=x, y',
-    b'lists.list12="x", "y"',
-    b'lists.list13=""" key = "x", "y" """',
-    b'lists.list14=,,,,     ',
-    b'lists.list15=" just with starting quotation',
-    b'lists.list16="longer quotation" with "no ending quotation',
-    b'lists.list17=this is \\" "not a quotation mark"',
-    b'lists.list18=\n \n\nding\ndong',
-    b'date.epoch=0 0',
-    b'date.birth=2005-04-19T00:00:00',
-    b'date.invalid=0'
-    ])
+parsed = dispatch._parseconfig(
+    testui,
+    [
+        b'values.string=string value',
+        b'values.bool1=true',
+        b'values.bool2=false',
+        b'values.boolinvalid=foo',
+        b'values.int1=42',
+        b'values.int2=-42',
+        b'values.intinvalid=foo',
+        b'lists.list1=foo',
+        b'lists.list2=foo bar baz',
+        b'lists.list3=alice, bob',
+        b'lists.list4=foo bar baz alice, bob',
+        b'lists.list5=abc d"ef"g "hij def"',
+        b'lists.list6="hello world", "how are you?"',
+        b'lists.list7=Do"Not"Separate',
+        b'lists.list8="Do"Separate',
+        b'lists.list9="Do\\"NotSeparate"',
+        b'lists.list10=string "with extraneous" quotation mark"',
+        b'lists.list11=x, y',
+        b'lists.list12="x", "y"',
+        b'lists.list13=""" key = "x", "y" """',
+        b'lists.list14=,,,,     ',
+        b'lists.list15=" just with starting quotation',
+        b'lists.list16="longer quotation" with "no ending quotation',
+        b'lists.list17=this is \\" "not a quotation mark"',
+        b'lists.list18=\n \n\nding\ndong',
+        b'date.epoch=0 0',
+        b'date.birth=2005-04-19T00:00:00',
+        b'date.invalid=0',
+    ],
+)
+
 
 def pprint(obj):
     return stringutil.pprint(obj).decode('ascii')
 
+
 print(pprint(testui.configitems(b'values')))
 print(pprint(testui.configitems(b'lists')))
 print("---")
@@ -107,9 +110,11 @@
 
 print(pprint(testui.config(b'values', b'String')))
 
+
 def function():
     pass
 
+
 # values that aren't strings should work
 testui.setconfig(b'hook', b'commit', function)
 print(function == testui.config(b'hook', b'commit'))
--- a/tests/test-ui-verbosity.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-ui-verbosity.py	Mon Oct 21 11:09:48 2019 -0400
@@ -18,12 +18,12 @@
 print('    quiet verbo debug   quiet verbo debug      quiet verbo debug')
 
 for i in xrange(64):
-    hgrc_quiet   = bool(i & 1<<0)
-    hgrc_verbose = bool(i & 1<<1)
-    hgrc_debug   = bool(i & 1<<2)
-    cmd_quiet    = bool(i & 1<<3)
-    cmd_verbose  = bool(i & 1<<4)
-    cmd_debug    = bool(i & 1<<5)
+    hgrc_quiet = bool(i & 1 << 0)
+    hgrc_verbose = bool(i & 1 << 1)
+    hgrc_debug = bool(i & 1 << 2)
+    cmd_quiet = bool(i & 1 << 3)
+    cmd_verbose = bool(i & 1 << 4)
+    cmd_debug = bool(i & 1 << 5)
 
     f = open(hgrc, 'w')
     f.write(basehgrc)
@@ -49,7 +49,21 @@
     elif u.verbose and u.quiet:
         check = ' +'
 
-    print(('%2d  %5s %5s %5s   %5s %5s %5s  ->  %5s %5s %5s%s'
-           % (i, hgrc_quiet, hgrc_verbose, hgrc_debug,
-              cmd_quiet, cmd_verbose, cmd_debug,
-              u.quiet, u.verbose, u.debugflag, check)))
+    print(
+        (
+            '%2d  %5s %5s %5s   %5s %5s %5s  ->  %5s %5s %5s%s'
+            % (
+                i,
+                hgrc_quiet,
+                hgrc_verbose,
+                hgrc_debug,
+                cmd_quiet,
+                cmd_verbose,
+                cmd_debug,
+                u.quiet,
+                u.verbose,
+                u.debugflag,
+                check,
+            )
+        )
+    )
--- a/tests/test-uncommit.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-uncommit.t	Mon Oct 21 11:09:48 2019 -0400
@@ -34,10 +34,17 @@
   
   options ([+] can be repeated):
   
-      --keep                     allow an empty commit after uncommiting
+      --keep                     allow an empty commit after uncommitting
       --allow-dirty-working-copy allow uncommit with outstanding changes
+   -n --note TEXT                store a note on uncommit
    -I --include PATTERN [+]      include names matching the given patterns
    -X --exclude PATTERN [+]      exclude names matching the given patterns
+   -m --message TEXT             use text as commit message
+   -l --logfile FILE             read commit message from file
+   -d --date DATE                record the specified date as commit date
+   -u --user USER                record the specified user as committer
+   -D --currentdate              record the current date as commit date
+   -U --currentuser              record the current user as committer
   
   (some details hidden, use --verbose to show complete help)
 
@@ -531,13 +538,30 @@
   $ mkdir dir
   $ echo 1 > dir/file.txt
   $ hg ci -Aqm 'add file in directory'
-  $ hg uncommit dir
+  $ hg uncommit dir -m 'uncommit with message' -u 'different user' \
+  >                 -d 'Jun 30 12:12:12 1980 +0000'
   $ hg status
   A dir/file.txt
+  $ hg log -r .
+  changeset:   8:b4dd26dc42e0
+  tag:         tip
+  parent:      6:2278a4c24330
+  user:        different user
+  date:        Mon Jun 30 12:12:12 1980 +0000
+  summary:     uncommit with message
+  
+Bad option combinations
+
+  $ hg rollback -q --config ui.rollback=True
+  $ hg uncommit -U --user 'user'
+  abort: --user and --currentuser are mutually exclusive
+  [255]
+  $ hg uncommit -D --date today
+  abort: --date and --currentdate are mutually exclusive
+  [255]
 
 `uncommit <dir>` and `cd <dir> && uncommit .` behave the same...
 
-  $ hg rollback -q --config ui.rollback=True
   $ echo 2 > dir/file2.txt
   $ hg ci -Aqm 'add file2 in directory'
   $ hg uncommit dir
@@ -547,7 +571,7 @@
 
   $ hg rollback -q --config ui.rollback=True
   $ cd dir
-  $ hg uncommit .
+  $ hg uncommit . -n 'this is a note'
   note: keeping empty commit
   $ hg status
   A dir/file2.txt
--- a/tests/test-update-branches.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-update-branches.t	Mon Oct 21 11:09:48 2019 -0400
@@ -502,8 +502,10 @@
   $ hg id --debug -i -r 4
   d047485b3896813b2a624e86201983520f003206
   $ hg debugobsolete 6efa171f091b00a3c35edc15d48c52a498929953 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa d047485b3896813b2a624e86201983520f003206
+  1 new obsolescence markers
 
 Test that 5 is detected as a valid destination from 3 and also accepts moving
 the bookmark (issue4015)
@@ -575,6 +577,7 @@
   $ hg up --quiet 0
   $ hg up --quiet 2
   $ hg debugobsolete bd10386d478cd5a9faf2e604114c8e6da62d3889
+  1 new obsolescence markers
   obsoleted 1 changesets
   $ hg up
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-upgrade-repo.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-upgrade-repo.t	Mon Oct 21 11:09:48 2019 -0400
@@ -57,6 +57,8 @@
   dotencode:         yes
   generaldelta:      yes
   sparserevlog:      yes
+  sidedata:           no
+  copies-sdc:         no
   plain-cl-delta:    yes
   compression:       zlib
   compression-level: default
@@ -66,6 +68,8 @@
   dotencode:         yes    yes     yes
   generaldelta:      yes    yes     yes
   sparserevlog:      yes    yes     yes
+  sidedata:           no     no      no
+  copies-sdc:         no     no      no
   plain-cl-delta:    yes    yes     yes
   compression:       zlib   zlib    zlib
   compression-level: default default default
@@ -75,6 +79,8 @@
   dotencode:         yes     no     yes
   generaldelta:      yes    yes     yes
   sparserevlog:      yes    yes     yes
+  sidedata:           no     no      no
+  copies-sdc:         no     no      no
   plain-cl-delta:    yes    yes     yes
   compression:       zlib   zlib    zlib
   compression-level: default default default
@@ -84,6 +90,8 @@
   [formatvariant.name.mismatchconfig|dotencode:        ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special|     no][formatvariant.default|     yes]
   [formatvariant.name.uptodate|generaldelta:     ][formatvariant.repo.uptodate| yes][formatvariant.config.default|    yes][formatvariant.default|     yes]
   [formatvariant.name.uptodate|sparserevlog:     ][formatvariant.repo.uptodate| yes][formatvariant.config.default|    yes][formatvariant.default|     yes]
+  [formatvariant.name.uptodate|sidedata:         ][formatvariant.repo.uptodate|  no][formatvariant.config.default|     no][formatvariant.default|      no]
+  [formatvariant.name.uptodate|copies-sdc:       ][formatvariant.repo.uptodate|  no][formatvariant.config.default|     no][formatvariant.default|      no]
   [formatvariant.name.uptodate|plain-cl-delta:   ][formatvariant.repo.uptodate| yes][formatvariant.config.default|    yes][formatvariant.default|     yes]
   [formatvariant.name.uptodate|compression:      ][formatvariant.repo.uptodate| zlib][formatvariant.config.default|   zlib][formatvariant.default|    zlib]
   [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
@@ -114,6 +122,18 @@
     "repo": true
    },
    {
+    "config": false,
+    "default": false,
+    "name": "sidedata",
+    "repo": false
+   },
+   {
+    "config": false,
+    "default": false,
+    "name": "copies-sdc",
+    "repo": false
+   },
+   {
     "config": true,
     "default": true,
     "name": "plain-cl-delta",
@@ -139,6 +159,12 @@
   requirements
      preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
   
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
   additional optimizations are available by specifying "--optimize <name>":
   
   re-delta-parent
@@ -163,6 +189,12 @@
   requirements
      preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
   
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
   re-delta-parent
      deltas within internal storage will choose a new base revision if needed
   
@@ -187,6 +219,12 @@
   requirements
      preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
   
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
   re-delta-parent
      deltas within internal storage will choose a new base revision if needed
   
@@ -222,6 +260,8 @@
   dotencode:          no
   generaldelta:       no
   sparserevlog:       no
+  sidedata:           no
+  copies-sdc:         no
   plain-cl-delta:    yes
   compression:       zlib
   compression-level: default
@@ -231,6 +271,8 @@
   dotencode:          no    yes     yes
   generaldelta:       no    yes     yes
   sparserevlog:       no    yes     yes
+  sidedata:           no     no      no
+  copies-sdc:         no     no      no
   plain-cl-delta:    yes    yes     yes
   compression:       zlib   zlib    zlib
   compression-level: default default default
@@ -240,6 +282,8 @@
   dotencode:          no    yes     yes
   generaldelta:       no     no     yes
   sparserevlog:       no     no     yes
+  sidedata:           no     no      no
+  copies-sdc:         no     no      no
   plain-cl-delta:    yes    yes     yes
   compression:       zlib   zlib    zlib
   compression-level: default default default
@@ -249,6 +293,8 @@
   [formatvariant.name.mismatchconfig|dotencode:        ][formatvariant.repo.mismatchconfig|  no][formatvariant.config.default|    yes][formatvariant.default|     yes]
   [formatvariant.name.mismatchdefault|generaldelta:     ][formatvariant.repo.mismatchdefault|  no][formatvariant.config.special|     no][formatvariant.default|     yes]
   [formatvariant.name.mismatchdefault|sparserevlog:     ][formatvariant.repo.mismatchdefault|  no][formatvariant.config.special|     no][formatvariant.default|     yes]
+  [formatvariant.name.uptodate|sidedata:         ][formatvariant.repo.uptodate|  no][formatvariant.config.default|     no][formatvariant.default|      no]
+  [formatvariant.name.uptodate|copies-sdc:       ][formatvariant.repo.uptodate|  no][formatvariant.config.default|     no][formatvariant.default|      no]
   [formatvariant.name.uptodate|plain-cl-delta:   ][formatvariant.repo.uptodate| yes][formatvariant.config.default|    yes][formatvariant.default|     yes]
   [formatvariant.name.uptodate|compression:      ][formatvariant.repo.uptodate| zlib][formatvariant.config.default|   zlib][formatvariant.default|    zlib]
   [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
@@ -286,6 +332,12 @@
   sparserevlog
      Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
   
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
   additional optimizations are available by specifying "--optimize <name>":
   
   re-delta-parent
@@ -334,6 +386,12 @@
   sparserevlog
      Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
   
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
   additional optimizations are available by specifying "--optimize <name>":
   
   re-delta-parent
@@ -360,6 +418,12 @@
   requirements
      preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
   
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
   beginning upgrade...
   repository locked and read-only
   creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
@@ -381,12 +445,26 @@
   $ cd upgradegd
   $ touch f0
   $ hg -q commit -A -m initial
-  $ touch f1
+  $ mkdir FooBarDirectory.d
+  $ touch FooBarDirectory.d/f1
   $ hg -q commit -A -m 'add f1'
   $ hg -q up -r 0
-  $ touch f2
+  >>> from __future__ import absolute_import, print_function
+  >>> import random
+  >>> random.seed(0) # have a reproducible content
+  >>> with open("f2", "wb") as f:
+  ...     for i in range(100000):
+  ...         f.write(b"%d\n" % random.randint(1000000000, 9999999999)) and None
   $ hg -q commit -A -m 'add f2'
 
+make sure we have a .d file
+
+  $ ls -d .hg/store/data/*
+  .hg/store/data/_foo_bar_directory.d.hg
+  .hg/store/data/f0.i
+  .hg/store/data/f2.d
+  .hg/store/data/f2.i
+
   $ hg debugupgraderepo --run --config format.sparse-revlog=false
   upgrade will perform the following actions:
   
@@ -397,19 +475,25 @@
   generaldelta
      repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
   
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
   beginning upgrade...
   repository locked and read-only
   creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
   (it is safe to interrupt this process any time before data migration completes)
   migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
-  migrating 917 bytes in store; 401 bytes tracked data
-  migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
+  migrating 519 KB in store; 1.05 MB tracked data
+  migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
   finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
-  migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
-  finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
-  migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
+  migrating 1 manifests containing 3 revisions (384 bytes in store; 238 bytes tracked data)
+  finished migrating 3 manifest revisions across 1 manifests; change in size: -17 bytes
+  migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
   finished migrating 3 changelog revisions; change in size: 0 bytes
-  finished migrating 9 total revisions; total change in store size: 0 bytes
+  finished migrating 9 total revisions; total change in store size: -17 bytes
   copying phaseroots
   data fully migrated to temporary repository
   marking source repository as being upgraded; clients will be unable to read from repository
@@ -493,17 +577,23 @@
   sparserevlog
      Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
   
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
   beginning upgrade...
   repository locked and read-only
   creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
   (it is safe to interrupt this process any time before data migration completes)
   migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
-  migrating 917 bytes in store; 401 bytes tracked data
-  migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
+  migrating 519 KB in store; 1.05 MB tracked data
+  migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
   finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
-  migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
+  migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
   finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
-  migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
+  migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
   finished migrating 3 changelog revisions; change in size: 0 bytes
   finished migrating 9 total revisions; total change in store size: 0 bytes
   copying phaseroots
@@ -518,9 +608,360 @@
   removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
   $ ls -1 .hg/ | grep upgradebackup
   [1]
+
+We can restrict optimization to some revlog:
+
+  $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
+  upgrade will perform the following actions:
+  
+  requirements
+     preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+  
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
+  re-delta-parent
+     deltas within internal storage will choose a new base revision if needed
+  
+  beginning upgrade...
+  repository locked and read-only
+  creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
+  (it is safe to interrupt this process any time before data migration completes)
+  migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
+  migrating 519 KB in store; 1.05 MB tracked data
+  migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
+  blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
+  blindly copying data/f0.i containing 1 revisions
+  blindly copying data/f2.i containing 1 revisions
+  finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
+  migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
+  cloning 3 revisions from 00manifest.i
+  finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
+  migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
+  blindly copying 00changelog.i containing 3 revisions
+  finished migrating 3 changelog revisions; change in size: 0 bytes
+  finished migrating 9 total revisions; total change in store size: 0 bytes
+  copying phaseroots
+  data fully migrated to temporary repository
+  marking source repository as being upgraded; clients will be unable to read from repository
+  starting in-place swap of repository data
+  replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
+  replacing store...
+  store replacement complete; repository was inconsistent for *s (glob)
+  finalizing requirements file and making repository readable again
+  removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
+  removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
+
+Check that the repo still works fine
+
+  $ hg log -G --stat
+  @  changeset:   2:76d4395f5413 (no-py3 !)
+  @  changeset:   2:fca376863211 (py3 !)
+  |  tag:         tip
+  |  parent:      0:ba592bf28da2
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     add f2
+  |
+  |   f2 |  100000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+  |   1 files changed, 100000 insertions(+), 0 deletions(-)
+  |
+  | o  changeset:   1:2029ce2354e2
+  |/   user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    summary:     add f1
+  |
+  |
+  o  changeset:   0:ba592bf28da2
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     initial
+  
+  
+
+  $ hg verify
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checked 3 changesets with 3 changes to 3 files
+
+Check we can select negatively
+
+  $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback
+  upgrade will perform the following actions:
+  
+  requirements
+     preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+  
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
+  re-delta-parent
+     deltas within internal storage will choose a new base revision if needed
+  
+  beginning upgrade...
+  repository locked and read-only
+  creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
+  (it is safe to interrupt this process any time before data migration completes)
+  migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
+  migrating 519 KB in store; 1.05 MB tracked data
+  migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
+  cloning 1 revisions from data/FooBarDirectory.d/f1.i
+  cloning 1 revisions from data/f0.i
+  cloning 1 revisions from data/f2.i
+  finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
+  migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
+  blindly copying 00manifest.i containing 3 revisions
+  finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
+  migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
+  cloning 3 revisions from 00changelog.i
+  finished migrating 3 changelog revisions; change in size: 0 bytes
+  finished migrating 9 total revisions; total change in store size: 0 bytes
+  copying phaseroots
+  data fully migrated to temporary repository
+  marking source repository as being upgraded; clients will be unable to read from repository
+  starting in-place swap of repository data
+  replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
+  replacing store...
+  store replacement complete; repository was inconsistent for *s (glob)
+  finalizing requirements file and making repository readable again
+  removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
+  removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
+  $ hg verify
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checked 3 changesets with 3 changes to 3 files
+
+Check that we can select changelog only
+
+  $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback
+  upgrade will perform the following actions:
+  
+  requirements
+     preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+  
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
+  re-delta-parent
+     deltas within internal storage will choose a new base revision if needed
+  
+  beginning upgrade...
+  repository locked and read-only
+  creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
+  (it is safe to interrupt this process any time before data migration completes)
+  migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
+  migrating 519 KB in store; 1.05 MB tracked data
+  migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
+  blindly copying data/FooBarDirectory.d/f1.i containing 1 revisions
+  blindly copying data/f0.i containing 1 revisions
+  blindly copying data/f2.i containing 1 revisions
+  finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
+  migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
+  blindly copying 00manifest.i containing 3 revisions
+  finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
+  migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
+  cloning 3 revisions from 00changelog.i
+  finished migrating 3 changelog revisions; change in size: 0 bytes
+  finished migrating 9 total revisions; total change in store size: 0 bytes
+  copying phaseroots
+  data fully migrated to temporary repository
+  marking source repository as being upgraded; clients will be unable to read from repository
+  starting in-place swap of repository data
+  replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
+  replacing store...
+  store replacement complete; repository was inconsistent for *s (glob)
+  finalizing requirements file and making repository readable again
+  removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
+  removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
+  $ hg verify
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checked 3 changesets with 3 changes to 3 files
+
+Check that we can select filelog only
+
+  $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback
+  upgrade will perform the following actions:
+  
+  requirements
+     preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+  
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
+  re-delta-parent
+     deltas within internal storage will choose a new base revision if needed
+  
+  beginning upgrade...
+  repository locked and read-only
+  creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
+  (it is safe to interrupt this process any time before data migration completes)
+  migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
+  migrating 519 KB in store; 1.05 MB tracked data
+  migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
+  cloning 1 revisions from data/FooBarDirectory.d/f1.i
+  cloning 1 revisions from data/f0.i
+  cloning 1 revisions from data/f2.i
+  finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
+  migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
+  blindly copying 00manifest.i containing 3 revisions
+  finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
+  migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
+  blindly copying 00changelog.i containing 3 revisions
+  finished migrating 3 changelog revisions; change in size: 0 bytes
+  finished migrating 9 total revisions; total change in store size: 0 bytes
+  copying phaseroots
+  data fully migrated to temporary repository
+  marking source repository as being upgraded; clients will be unable to read from repository
+  starting in-place swap of repository data
+  replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
+  replacing store...
+  store replacement complete; repository was inconsistent for *s (glob)
+  finalizing requirements file and making repository readable again
+  removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
+  removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
+  $ hg verify
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checked 3 changesets with 3 changes to 3 files
+
+
+Check you can't skip revlog clone during important format downgrade
+
+  $ echo "[format]" > .hg/hgrc
+  $ echo "sparse-revlog=no" >> .hg/hgrc
+  $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
+  ignoring revlogs selection flags, format requirements change: sparserevlog
+  upgrade will perform the following actions:
+  
+  requirements
+     preserved: dotencode, fncache, generaldelta, revlogv1, store
+     removed: sparserevlog
+  
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
+  re-delta-parent
+     deltas within internal storage will choose a new base revision if needed
+  
+  beginning upgrade...
+  repository locked and read-only
+  creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
+  (it is safe to interrupt this process any time before data migration completes)
+  migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
+  migrating 519 KB in store; 1.05 MB tracked data
+  migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
+  cloning 1 revisions from data/FooBarDirectory.d/f1.i
+  cloning 1 revisions from data/f0.i
+  cloning 1 revisions from data/f2.i
+  finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
+  migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
+  cloning 3 revisions from 00manifest.i
+  finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
+  migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
+  cloning 3 revisions from 00changelog.i
+  finished migrating 3 changelog revisions; change in size: 0 bytes
+  finished migrating 9 total revisions; total change in store size: 0 bytes
+  copying phaseroots
+  data fully migrated to temporary repository
+  marking source repository as being upgraded; clients will be unable to read from repository
+  starting in-place swap of repository data
+  replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
+  replacing store...
+  store replacement complete; repository was inconsistent for *s (glob)
+  finalizing requirements file and making repository readable again
+  removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
+  removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
+  $ hg verify
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checked 3 changesets with 3 changes to 3 files
+
+Check you can't skip revlog clone during important format upgrade
+
+  $ echo "sparse-revlog=yes" >> .hg/hgrc
+  $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
+  ignoring revlogs selection flags, format requirements change: sparserevlog
+  upgrade will perform the following actions:
+  
+  requirements
+     preserved: dotencode, fncache, generaldelta, revlogv1, store
+     added: sparserevlog
+  
+  sparserevlog
+     Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
+  
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
+  re-delta-parent
+     deltas within internal storage will choose a new base revision if needed
+  
+  beginning upgrade...
+  repository locked and read-only
+  creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
+  (it is safe to interrupt this process any time before data migration completes)
+  migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
+  migrating 519 KB in store; 1.05 MB tracked data
+  migrating 3 filelogs containing 3 revisions (518 KB in store; 1.05 MB tracked data)
+  cloning 1 revisions from data/FooBarDirectory.d/f1.i
+  cloning 1 revisions from data/f0.i
+  cloning 1 revisions from data/f2.i
+  finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
+  migrating 1 manifests containing 3 revisions (367 bytes in store; 238 bytes tracked data)
+  cloning 3 revisions from 00manifest.i
+  finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
+  migrating changelog containing 3 revisions (394 bytes in store; 199 bytes tracked data)
+  cloning 3 revisions from 00changelog.i
+  finished migrating 3 changelog revisions; change in size: 0 bytes
+  finished migrating 9 total revisions; total change in store size: 0 bytes
+  copying phaseroots
+  data fully migrated to temporary repository
+  marking source repository as being upgraded; clients will be unable to read from repository
+  starting in-place swap of repository data
+  replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
+  replacing store...
+  store replacement complete; repository was inconsistent for *s (glob)
+  finalizing requirements file and making repository readable again
+  removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
+  removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
+  $ hg verify
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checked 3 changesets with 3 changes to 3 files
+
   $ cd ..
 
-
 store files with special filenames aren't encoded during copy
 
   $ hg init store-filenames
@@ -535,6 +976,12 @@
   requirements
      preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
   
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
   beginning upgrade...
   repository locked and read-only
   creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
@@ -566,6 +1013,12 @@
   requirements
      preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
   
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
   re-delta-fulladd
      each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
   
@@ -630,6 +1083,12 @@
   requirements
      preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store
   
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
   beginning upgrade...
   repository locked and read-only
   creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
@@ -683,6 +1142,12 @@
   requirements
      preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store
   
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
   beginning upgrade...
   repository locked and read-only
   creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
@@ -778,6 +1243,12 @@
   requirements
      preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
   
+  sidedata
+     Allows storage of extra data alongside a revision.
+  
+  copies-sdc
+     Allows to use more efficient algorithm to deal with copy tracing.
+  
   re-delta-all
      deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
   
@@ -869,6 +1340,8 @@
   dotencode:         yes    yes     yes
   generaldelta:      yes    yes     yes
   sparserevlog:      yes    yes     yes
+  sidedata:           no     no      no
+  copies-sdc:         no     no      no
   plain-cl-delta:    yes    yes     yes
   compression:       zstd   zlib    zlib
   compression-level: default default default
@@ -890,6 +1363,8 @@
   dotencode:         yes    yes     yes
   generaldelta:      yes    yes     yes
   sparserevlog:      yes    yes     yes
+  sidedata:           no     no      no
+  copies-sdc:         no     no      no
   plain-cl-delta:    yes    yes     yes
   compression:       zlib   zlib    zlib
   compression-level: default default default
@@ -914,6 +1389,8 @@
   dotencode:         yes    yes     yes
   generaldelta:      yes    yes     yes
   sparserevlog:      yes    yes     yes
+  sidedata:           no     no      no
+  copies-sdc:         no     no      no
   plain-cl-delta:    yes    yes     yes
   compression:       zstd   zstd    zlib
   compression-level: default default default
@@ -926,6 +1403,91 @@
   sparserevlog
   store
 
-  $ cd ..
+#endif
+
+Check upgrading to a side-data revlog
+-------------------------------------
+
+upgrade
+
+  $ hg --config format.exp-use-side-data=yes debugupgraderepo --run  --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" >/dev/null
+  $ hg debugformat -v
+  format-variant    repo config default
+  fncache:           yes    yes     yes
+  dotencode:         yes    yes     yes
+  generaldelta:      yes    yes     yes
+  sparserevlog:      yes    yes     yes
+  sidedata:          yes     no      no
+  copies-sdc:         no     no      no
+  plain-cl-delta:    yes    yes     yes
+  compression:       zstd   zstd    zlib (zstd !)
+  compression:       zlib   zlib    zlib (no-zstd !)
+  compression-level: default default default
+  $ cat .hg/requires
+  dotencode
+  exp-sidedata-flag
+  fncache
+  generaldelta
+  revlog-compression-zstd (zstd !)
+  revlogv1
+  sparserevlog
+  store
+  $ hg debugsidedata -c 0
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+
+downgrade
 
-#endif
+  $ hg debugupgraderepo --config format.exp-use-side-data=no --run --no-backup > /dev/null
+  $ hg debugformat -v
+  format-variant    repo config default
+  fncache:           yes    yes     yes
+  dotencode:         yes    yes     yes
+  generaldelta:      yes    yes     yes
+  sparserevlog:      yes    yes     yes
+  sidedata:           no     no      no
+  copies-sdc:         no     no      no
+  plain-cl-delta:    yes    yes     yes
+  compression:       zstd   zstd    zlib (zstd !)
+  compression:       zlib   zlib    zlib (no-zstd !)
+  compression-level: default default default
+  $ cat .hg/requires
+  dotencode
+  fncache
+  generaldelta
+  revlog-compression-zstd (zstd !)
+  revlogv1
+  sparserevlog
+  store
+  $ hg debugsidedata -c 0
+
+upgrade from hgrc
+
+  $ cat >> .hg/hgrc << EOF
+  > [format]
+  > exp-use-side-data=yes
+  > EOF
+  $ hg debugupgraderepo --run --no-backup > /dev/null
+  $ hg debugformat -v
+  format-variant    repo config default
+  fncache:           yes    yes     yes
+  dotencode:         yes    yes     yes
+  generaldelta:      yes    yes     yes
+  sparserevlog:      yes    yes     yes
+  sidedata:          yes    yes      no
+  copies-sdc:         no     no      no
+  plain-cl-delta:    yes    yes     yes
+  compression:       zstd   zstd    zlib (zstd !)
+  compression:       zlib   zlib    zlib (no-zstd !)
+  compression-level: default default default
+  $ cat .hg/requires
+  dotencode
+  exp-sidedata-flag
+  fncache
+  generaldelta
+  revlog-compression-zstd (zstd !)
+  revlogv1
+  sparserevlog
+  store
+  $ hg debugsidedata -c 0
--- a/tests/test-url.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-url.py	Mon Oct 21 11:09:48 2019 -0400
@@ -4,146 +4,170 @@
 import doctest
 import os
 
+
 def check(a, b):
     if a != b:
         print((a, b))
 
+
 def cert(cn):
     return {'subject': ((('commonName', cn),),)}
 
-from mercurial import (
-    sslutil,
-)
+
+from mercurial import sslutil
 
 _verifycert = sslutil._verifycert
 # Test non-wildcard certificates
-check(_verifycert(cert('example.com'), 'example.com'),
-      None)
-check(_verifycert(cert('example.com'), 'www.example.com'),
-      b'certificate is for example.com')
-check(_verifycert(cert('www.example.com'), 'example.com'),
-      b'certificate is for www.example.com')
+check(_verifycert(cert('example.com'), 'example.com'), None)
+check(
+    _verifycert(cert('example.com'), 'www.example.com'),
+    b'certificate is for example.com',
+)
+check(
+    _verifycert(cert('www.example.com'), 'example.com'),
+    b'certificate is for www.example.com',
+)
 
 # Test wildcard certificates
-check(_verifycert(cert('*.example.com'), 'www.example.com'),
-      None)
-check(_verifycert(cert('*.example.com'), 'example.com'),
-      b'certificate is for *.example.com')
-check(_verifycert(cert('*.example.com'), 'w.w.example.com'),
-      b'certificate is for *.example.com')
+check(_verifycert(cert('*.example.com'), 'www.example.com'), None)
+check(
+    _verifycert(cert('*.example.com'), 'example.com'),
+    b'certificate is for *.example.com',
+)
+check(
+    _verifycert(cert('*.example.com'), 'w.w.example.com'),
+    b'certificate is for *.example.com',
+)
 
 # Test subjectAltName
-san_cert = {'subject': ((('commonName', 'example.com'),),),
-            'subjectAltName': (('DNS', '*.example.net'),
-                               ('DNS', 'example.net'))}
-check(_verifycert(san_cert, 'example.net'),
-      None)
-check(_verifycert(san_cert, 'foo.example.net'),
-      None)
+san_cert = {
+    'subject': ((('commonName', 'example.com'),),),
+    'subjectAltName': (('DNS', '*.example.net'), ('DNS', 'example.net')),
+}
+check(_verifycert(san_cert, 'example.net'), None)
+check(_verifycert(san_cert, 'foo.example.net'), None)
 # no fallback to subject commonName when subjectAltName has DNS
-check(_verifycert(san_cert, 'example.com'),
-      b'certificate is for *.example.net, example.net')
+check(
+    _verifycert(san_cert, 'example.com'),
+    b'certificate is for *.example.net, example.net',
+)
 # fallback to subject commonName when no DNS in subjectAltName
-san_cert = {'subject': ((('commonName', 'example.com'),),),
-            'subjectAltName': (('IP Address', '8.8.8.8'),)}
+san_cert = {
+    'subject': ((('commonName', 'example.com'),),),
+    'subjectAltName': (('IP Address', '8.8.8.8'),),
+}
 check(_verifycert(san_cert, 'example.com'), None)
 
 # Avoid some pitfalls
-check(_verifycert(cert('*.foo'), 'foo'),
-      b'certificate is for *.foo')
+check(_verifycert(cert('*.foo'), 'foo'), b'certificate is for *.foo')
 check(_verifycert(cert('*o'), 'foo'), None)
 
-check(_verifycert({'subject': ()},
-                  'example.com'),
-      b'no commonName or subjectAltName found in certificate')
-check(_verifycert(None, 'example.com'),
-      b'no certificate received')
+check(
+    _verifycert({'subject': ()}, 'example.com'),
+    b'no commonName or subjectAltName found in certificate',
+)
+check(_verifycert(None, 'example.com'), b'no certificate received')
 
 # Unicode (IDN) certname isn't supported
-check(_verifycert(cert(u'\u4f8b.jp'), 'example.jp'),
-      b'IDN in certificate not supported')
+check(
+    _verifycert(cert(u'\u4f8b.jp'), 'example.jp'),
+    b'IDN in certificate not supported',
+)
 
 # The following tests are from CPython's test_ssl.py.
 check(_verifycert(cert('example.com'), 'example.com'), None)
 check(_verifycert(cert('example.com'), 'ExAmple.cOm'), None)
-check(_verifycert(cert('example.com'), 'www.example.com'),
-      b'certificate is for example.com')
-check(_verifycert(cert('example.com'), '.example.com'),
-      b'certificate is for example.com')
-check(_verifycert(cert('example.com'), 'example.org'),
-      b'certificate is for example.com')
-check(_verifycert(cert('example.com'), 'exampleXcom'),
-      b'certificate is for example.com')
+check(
+    _verifycert(cert('example.com'), 'www.example.com'),
+    b'certificate is for example.com',
+)
+check(
+    _verifycert(cert('example.com'), '.example.com'),
+    b'certificate is for example.com',
+)
+check(
+    _verifycert(cert('example.com'), 'example.org'),
+    b'certificate is for example.com',
+)
+check(
+    _verifycert(cert('example.com'), 'exampleXcom'),
+    b'certificate is for example.com',
+)
 check(_verifycert(cert('*.a.com'), 'foo.a.com'), None)
-check(_verifycert(cert('*.a.com'), 'bar.foo.a.com'),
-      b'certificate is for *.a.com')
-check(_verifycert(cert('*.a.com'), 'a.com'),
-      b'certificate is for *.a.com')
-check(_verifycert(cert('*.a.com'), 'Xa.com'),
-      b'certificate is for *.a.com')
-check(_verifycert(cert('*.a.com'), '.a.com'),
-      b'certificate is for *.a.com')
+check(
+    _verifycert(cert('*.a.com'), 'bar.foo.a.com'), b'certificate is for *.a.com'
+)
+check(_verifycert(cert('*.a.com'), 'a.com'), b'certificate is for *.a.com')
+check(_verifycert(cert('*.a.com'), 'Xa.com'), b'certificate is for *.a.com')
+check(_verifycert(cert('*.a.com'), '.a.com'), b'certificate is for *.a.com')
 
 # only match one left-most wildcard
 check(_verifycert(cert('f*.com'), 'foo.com'), None)
 check(_verifycert(cert('f*.com'), 'f.com'), None)
-check(_verifycert(cert('f*.com'), 'bar.com'),
-      b'certificate is for f*.com')
-check(_verifycert(cert('f*.com'), 'foo.a.com'),
-      b'certificate is for f*.com')
-check(_verifycert(cert('f*.com'), 'bar.foo.com'),
-      b'certificate is for f*.com')
+check(_verifycert(cert('f*.com'), 'bar.com'), b'certificate is for f*.com')
+check(_verifycert(cert('f*.com'), 'foo.a.com'), b'certificate is for f*.com')
+check(_verifycert(cert('f*.com'), 'bar.foo.com'), b'certificate is for f*.com')
 
 # NULL bytes are bad, CVE-2013-4073
-check(_verifycert(cert('null.python.org\x00example.org'),
-                  'null.python.org\x00example.org'), None)
-check(_verifycert(cert('null.python.org\x00example.org'),
-                  'example.org'),
-      b'certificate is for null.python.org\x00example.org')
-check(_verifycert(cert('null.python.org\x00example.org'),
-                  'null.python.org'),
-      b'certificate is for null.python.org\x00example.org')
+check(
+    _verifycert(
+        cert('null.python.org\x00example.org'), 'null.python.org\x00example.org'
+    ),
+    None,
+)
+check(
+    _verifycert(cert('null.python.org\x00example.org'), 'example.org'),
+    b'certificate is for null.python.org\x00example.org',
+)
+check(
+    _verifycert(cert('null.python.org\x00example.org'), 'null.python.org'),
+    b'certificate is for null.python.org\x00example.org',
+)
 
 # error cases with wildcards
-check(_verifycert(cert('*.*.a.com'), 'bar.foo.a.com'),
-      b'certificate is for *.*.a.com')
-check(_verifycert(cert('*.*.a.com'), 'a.com'),
-      b'certificate is for *.*.a.com')
-check(_verifycert(cert('*.*.a.com'), 'Xa.com'),
-      b'certificate is for *.*.a.com')
-check(_verifycert(cert('*.*.a.com'), '.a.com'),
-      b'certificate is for *.*.a.com')
+check(
+    _verifycert(cert('*.*.a.com'), 'bar.foo.a.com'),
+    b'certificate is for *.*.a.com',
+)
+check(_verifycert(cert('*.*.a.com'), 'a.com'), b'certificate is for *.*.a.com')
+check(_verifycert(cert('*.*.a.com'), 'Xa.com'), b'certificate is for *.*.a.com')
+check(_verifycert(cert('*.*.a.com'), '.a.com'), b'certificate is for *.*.a.com')
 
-check(_verifycert(cert('a.*.com'), 'a.foo.com'),
-      b'certificate is for a.*.com')
-check(_verifycert(cert('a.*.com'), 'a..com'),
-      b'certificate is for a.*.com')
-check(_verifycert(cert('a.*.com'), 'a.com'),
-      b'certificate is for a.*.com')
+check(_verifycert(cert('a.*.com'), 'a.foo.com'), b'certificate is for a.*.com')
+check(_verifycert(cert('a.*.com'), 'a..com'), b'certificate is for a.*.com')
+check(_verifycert(cert('a.*.com'), 'a.com'), b'certificate is for a.*.com')
 
 # wildcard doesn't match IDNA prefix 'xn--'
 idna = u'püthon.python.org'.encode('idna').decode('ascii')
 check(_verifycert(cert(idna), idna), None)
-check(_verifycert(cert('x*.python.org'), idna),
-      b'certificate is for x*.python.org')
-check(_verifycert(cert('xn--p*.python.org'), idna),
-      b'certificate is for xn--p*.python.org')
+check(
+    _verifycert(cert('x*.python.org'), idna),
+    b'certificate is for x*.python.org',
+)
+check(
+    _verifycert(cert('xn--p*.python.org'), idna),
+    b'certificate is for xn--p*.python.org',
+)
 
 # wildcard in first fragment and  IDNA A-labels in sequent fragments
 # are supported.
 idna = u'www*.pythön.org'.encode('idna').decode('ascii')
-check(_verifycert(cert(idna),
-                  u'www.pythön.org'.encode('idna').decode('ascii')),
-      None)
-check(_verifycert(cert(idna),
-                  u'www1.pythön.org'.encode('idna').decode('ascii')),
-      None)
-check(_verifycert(cert(idna),
-                  u'ftp.pythön.org'.encode('idna').decode('ascii')),
-      b'certificate is for www*.xn--pythn-mua.org')
-check(_verifycert(cert(idna),
-                  u'pythön.org'.encode('idna').decode('ascii')),
-      b'certificate is for www*.xn--pythn-mua.org')
+check(
+    _verifycert(cert(idna), u'www.pythön.org'.encode('idna').decode('ascii')),
+    None,
+)
+check(
+    _verifycert(cert(idna), u'www1.pythön.org'.encode('idna').decode('ascii')),
+    None,
+)
+check(
+    _verifycert(cert(idna), u'ftp.pythön.org'.encode('idna').decode('ascii')),
+    b'certificate is for www*.xn--pythn-mua.org',
+)
+check(
+    _verifycert(cert(idna), u'pythön.org'.encode('idna').decode('ascii')),
+    b'certificate is for www*.xn--pythn-mua.org',
+)
 
 c = {
     'notAfter': 'Jun 26 21:41:46 2011 GMT',
@@ -152,16 +176,20 @@
         ('DNS', 'linuxfr.org'),
         ('DNS', 'linuxfr.com'),
         ('othername', '<unsupported>'),
-    )
+    ),
 }
 check(_verifycert(c, 'linuxfr.org'), None)
 check(_verifycert(c, 'linuxfr.com'), None)
 # Not a "DNS" entry
-check(_verifycert(c, '<unsupported>'),
-      b'certificate is for linuxfr.org, linuxfr.com')
+check(
+    _verifycert(c, '<unsupported>'),
+    b'certificate is for linuxfr.org, linuxfr.com',
+)
 # When there is a subjectAltName, commonName isn't used
-check(_verifycert(c, 'linuxfrz.org'),
-      b'certificate is for linuxfr.org, linuxfr.com')
+check(
+    _verifycert(c, 'linuxfrz.org'),
+    b'certificate is for linuxfr.org, linuxfr.com',
+)
 
 # A pristine real-world example
 c = {
@@ -190,8 +218,10 @@
         ((u'organizationName', u'Google Inc'),),
     ),
 }
-check(_verifycert(c, 'mail.google.com'),
-      b'no commonName or subjectAltName found in certificate')
+check(
+    _verifycert(c, 'mail.google.com'),
+    b'no commonName or subjectAltName found in certificate',
+)
 
 # No DNS entry in subjectAltName but a commonName
 c = {
@@ -217,8 +247,10 @@
     ),
     'subjectAltName': (('othername', 'blabla'),),
 }
-check(_verifycert(c, 'google.com'),
-      b'no commonName or subjectAltName found in certificate')
+check(
+    _verifycert(c, 'google.com'),
+    b'no commonName or subjectAltName found in certificate',
+)
 
 # Empty cert / no cert
 check(_verifycert(None, 'example.com'), b'no certificate received')
@@ -226,13 +258,19 @@
 
 # avoid denials of service by refusing more than one
 # wildcard per fragment.
-check(_verifycert({'subject': (((u'commonName', u'a*b.com'),),)},
-                  'axxb.com'), None)
-check(_verifycert({'subject': (((u'commonName', u'a*b.co*'),),)},
-                  'axxb.com'), b'certificate is for a*b.co*')
-check(_verifycert({'subject': (((u'commonName', u'a*b*.com'),),)},
-                  'axxbxxc.com'),
-      b'too many wildcards in certificate DNS name: a*b*.com')
+check(
+    _verifycert({'subject': (((u'commonName', u'a*b.com'),),)}, 'axxb.com'),
+    None,
+)
+check(
+    _verifycert({'subject': (((u'commonName', u'a*b.co*'),),)}, 'axxb.com'),
+    b'certificate is for a*b.co*',
+)
+check(
+    _verifycert({'subject': (((u'commonName', u'a*b*.com'),),)}, 'axxbxxc.com'),
+    b'too many wildcards in certificate DNS name: a*b*.com',
+)
+
 
 def test_url():
     """
@@ -418,6 +456,7 @@
     'foo/bar/baz'
     """
 
+
 if 'TERM' in os.environ:
     del os.environ['TERM']
 
--- a/tests/test-util.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-util.py	Mon Oct 21 11:09:48 2019 -0400
@@ -7,6 +7,7 @@
 
 from mercurial import pycompat, util, utils
 
+
 @contextlib.contextmanager
 def mocktimer(incr=0.1, *additional_targets):
     """Replaces util.timer and additional_targets with a mock
@@ -46,10 +47,12 @@
         for args in additional_origs:
             setattr(*args)
 
+
 # attr.s default factory for util.timedstats.start binds the timer we
 # need to mock out.
 _start_default = (util.timedcmstats.start.default, 'factory')
 
+
 @contextlib.contextmanager
 def capturestderr():
     """Replace utils.procutil.stderr with a pycompat.bytesio instance
@@ -66,6 +69,7 @@
     finally:
         utils.procutil.stderr = orig
 
+
 class timedtests(unittest.TestCase):
     def testtimedcmstatsstr(self):
         stats = util.timedcmstats()
@@ -127,11 +131,13 @@
             with capturestderr() as out:
                 testfunc(2)
 
-        self.assertEqual(out.getvalue(), (
-            b'    testfunc: 1.000 s\n'
-            b'  testfunc: 3.000 s\n'
-        ))
+        self.assertEqual(
+            out.getvalue(),
+            (b'    testfunc: 1.000 s\n' b'  testfunc: 3.000 s\n'),
+        )
+
 
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-verify-repo-operations.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-verify-repo-operations.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,8 +11,7 @@
 import sys
 
 # Only run if slow tests are allowed
-if subprocess.call(['python', '%s/hghave' % os.environ['TESTDIR'],
-                    'slow']):
+if subprocess.call(['python', '%s/hghave' % os.environ['TESTDIR'], 'slow']):
     sys.exit(80)
 
 # These tests require Hypothesis and pytz to be installed.
@@ -29,6 +28,7 @@
 # fix this problem.
 try:
     import enum
+
     assert enum  # Silence pyflakes
 except ImportError:
     sys.stderr.write("skipped: enum34 not installed" + os.linesep)
@@ -44,7 +44,11 @@
 
 from hypothesis.errors import HypothesisException
 from hypothesis.stateful import (
-    rule, RuleBasedStateMachine, Bundle, precondition)
+    rule,
+    RuleBasedStateMachine,
+    Bundle,
+    precondition,
+)
 from hypothesis import settings, note, strategies as st
 from hypothesis.configuration import set_hypothesis_home_dir
 from hypothesis.database import ExampleDatabase
@@ -76,9 +80,9 @@
 file_index = 0
 while True:
     file_index += 1
-    savefile = os.path.join(generatedtests, "test-generated-%d.t" % (
-        file_index,
-    ))
+    savefile = os.path.join(
+        generatedtests, "test-generated-%d.t" % (file_index,)
+    )
     try:
         os.close(os.open(savefile, os.O_CREAT | os.O_EXCL | os.O_WRONLY))
         break
@@ -94,17 +98,23 @@
     "[]^_`;=@{}~ !#$%&'()+,-"
 )
 
-files = st.text(filecharacters, min_size=1).map(lambda x: x.strip()).filter(
-    bool).map(lambda s: s.encode('ascii'))
-
-safetext = st.text(st.characters(
-    min_codepoint=1, max_codepoint=127,
-    blacklist_categories=('Cc', 'Cs')), min_size=1).map(
-    lambda s: s.encode('utf-8')
+files = (
+    st.text(filecharacters, min_size=1)
+    .map(lambda x: x.strip())
+    .filter(bool)
+    .map(lambda s: s.encode('ascii'))
 )
 
+safetext = st.text(
+    st.characters(
+        min_codepoint=1, max_codepoint=127, blacklist_categories=('Cc', 'Cs')
+    ),
+    min_size=1,
+).map(lambda s: s.encode('utf-8'))
+
 extensions = st.sampled_from(('shelve', 'mq', 'blackbox',))
 
+
 @contextmanager
 def acceptableerrors(*args):
     """Sometimes we know an operation we're about to perform might fail, and
@@ -118,10 +128,12 @@
             note(e.output)
             raise
 
+
 reponames = st.text("abcdefghijklmnopqrstuvwxyz01234556789", min_size=1).map(
     lambda s: s.encode('ascii')
 )
 
+
 class verifyingstatemachine(RuleBasedStateMachine):
     """This defines the set of acceptable operations on a Mercurial repository
     using Hypothesis's RuleBasedStateMachine.
@@ -188,8 +200,10 @@
             o.write(ttest + os.linesep)
         with open(os.devnull, "w") as devnull:
             rewriter = subprocess.Popen(
-                [runtests, "--local", "-i", path], stdin=subprocess.PIPE,
-                stdout=devnull, stderr=devnull,
+                [runtests, "--local", "-i", path],
+                stdin=subprocess.PIPE,
+                stdout=devnull,
+                stderr=devnull,
             )
             rewriter.communicate("yes")
             with open(path, 'r') as i:
@@ -198,29 +212,29 @@
         e = None
         if not self.failed:
             try:
-                output = subprocess.check_output([
-                    runtests, path, "--local", "--pure"
-                ], stderr=subprocess.STDOUT)
+                output = subprocess.check_output(
+                    [runtests, path, "--local", "--pure"],
+                    stderr=subprocess.STDOUT,
+                )
                 assert "Ran 1 test" in output, output
-                for ext in (
-                    self.all_extensions - self.non_skippable_extensions
-                ):
-                    tf = os.path.join(testtmp, "test-generated-no-%s.t" % (
-                        ext,
-                    ))
+                for ext in self.all_extensions - self.non_skippable_extensions:
+                    tf = os.path.join(
+                        testtmp, "test-generated-no-%s.t" % (ext,)
+                    )
                     with open(tf, 'w') as o:
                         for l in ttest.splitlines():
                             if l.startswith("  $ hg"):
                                 l = l.replace(
-                                    "--config %s=" % (
-                                        extensionconfigkey(ext),), "")
+                                    "--config %s=" % (extensionconfigkey(ext),),
+                                    "",
+                                )
                             o.write(l + os.linesep)
                     with open(tf, 'r') as r:
                         t = r.read()
                         assert ext not in t, t
-                    output = subprocess.check_output([
-                        runtests, tf, "--local",
-                    ], stderr=subprocess.STDOUT)
+                    output = subprocess.check_output(
+                        [runtests, tf, "--local",], stderr=subprocess.STDOUT
+                    )
                     assert "Ran 1 test" in output, output
             except subprocess.CalledProcessError as e:
                 note(e.output)
@@ -244,7 +258,8 @@
         if os.path.exists(path):
             return
         self.log.append(
-            "$ mkdir -p -- %s" % (pipes.quote(os.path.relpath(path)),))
+            "$ mkdir -p -- %s" % (pipes.quote(os.path.relpath(path)),)
+        )
         os.makedirs(path)
 
     def cd(self, path):
@@ -270,28 +285,29 @@
     # to use later.
     @rule(
         target=paths,
-        source=st.lists(files, min_size=1).map(lambda l: os.path.join(*l)))
+        source=st.lists(files, min_size=1).map(lambda l: os.path.join(*l)),
+    )
     def genpath(self, source):
         return source
 
     @rule(
         target=committimes,
-        when=datetimes(min_year=1970, max_year=2038) | st.none())
+        when=datetimes(min_year=1970, max_year=2038) | st.none(),
+    )
     def gentime(self, when):
         return when
 
     @rule(
         target=contents,
         content=st.one_of(
-            st.binary(),
-            st.text().map(lambda x: x.encode('utf-8'))
-        ))
+            st.binary(), st.text().map(lambda x: x.encode('utf-8'))
+        ),
+    )
     def gencontent(self, content):
         return content
 
     @rule(
-        target=branches,
-        name=safetext,
+        target=branches, name=safetext,
     )
     def genbranch(self, name):
         return name
@@ -322,12 +338,13 @@
                 return
         with open(path, 'wb') as o:
             o.write(content)
-        self.log.append((
-            "$ python -c 'import binascii; "
-            "print(binascii.unhexlify(\"%s\"))' > %s") % (
-                binascii.hexlify(content),
-                pipes.quote(path),
-            ))
+        self.log.append(
+            (
+                "$ python -c 'import binascii; "
+                "print(binascii.unhexlify(\"%s\"))' > %s"
+            )
+            % (binascii.hexlify(content), pipes.quote(path),)
+        )
 
     @rule(path=paths)
     def addpath(self, path):
@@ -337,9 +354,7 @@
     @rule(path=paths)
     def forgetpath(self, path):
         if os.path.exists(path):
-            with acceptableerrors(
-                "file is already untracked",
-            ):
+            with acceptableerrors("file is already untracked",):
                 self.hg("forget", "--", path)
 
     @rule(s=st.none() | st.integers(0, 100))
@@ -388,8 +403,9 @@
                 errors.append('negative date value')
             if when.year == 2038:
                 errors.append('exceeds 32 bits')
-            command.append("--date=%s" % (
-                when.strftime('%Y-%m-%d %H:%M:%S %z'),))
+            command.append(
+                "--date=%s" % (when.strftime('%Y-%m-%d %H:%M:%S %z'),)
+            )
 
         with acceptableerrors(*errors):
             self.hg(*command)
@@ -404,9 +420,7 @@
         return self.configperrepo.setdefault(self.currentrepo, {})
 
     @rule(
-        target=repos,
-        source=repos,
-        name=reponames,
+        target=repos, source=repos, name=reponames,
     )
     def clone(self, source, name):
         if not os.path.exists(os.path.join("..", name)):
@@ -416,8 +430,7 @@
         return name
 
     @rule(
-        target=repos,
-        name=reponames,
+        target=repos, name=reponames,
     )
     def fresh(self, name):
         if not os.path.exists(os.path.join("..", name)):
@@ -440,23 +453,19 @@
     @rule()
     def pull(self, repo=repos):
         with acceptableerrors(
-            "repository default not found",
-            "repository is unrelated",
+            "repository default not found", "repository is unrelated",
         ):
             self.hg("pull")
 
     @rule(newbranch=st.booleans())
     def push(self, newbranch):
         with acceptableerrors(
-            "default repository not configured",
-            "no changes found",
+            "default repository not configured", "no changes found",
         ):
             if newbranch:
                 self.hg("push", "--new-branch")
             else:
-                with acceptableerrors(
-                    "creates new branches"
-                ):
+                with acceptableerrors("creates new branches"):
                     self.hg("push")
 
     # Section: Simple side effect free "check" operations
@@ -498,8 +507,7 @@
     @rule(branch=branches, clean=st.booleans())
     def update(self, branch, clean):
         with acceptableerrors(
-            'unknown revision',
-            'parse error',
+            'unknown revision', 'parse error',
         ):
             if clean:
                 self.hg("update", "-C", "--", branch)
@@ -538,6 +546,7 @@
         with acceptableerrors("no shelved changes to apply"):
             self.hg("unshelve")
 
+
 class writeonlydatabase(ExampleDatabase):
     def __init__(self, underlying):
         super(ExampleDatabase, self).__init__()
@@ -555,35 +564,35 @@
     def close(self):
         self.underlying.close()
 
+
 def extensionconfigkey(extension):
     return "extensions." + extension
 
+
 settings.register_profile(
-    'default',  settings(
-        timeout=300,
-        stateful_step_count=50,
-        max_examples=10,
-    )
+    'default', settings(timeout=300, stateful_step_count=50, max_examples=10,)
 )
 
 settings.register_profile(
-    'fast',  settings(
+    'fast',
+    settings(
         timeout=10,
         stateful_step_count=20,
         max_examples=5,
         min_satisfying_examples=1,
         max_shrinks=0,
-    )
+    ),
 )
 
 settings.register_profile(
-    'continuous', settings(
+    'continuous',
+    settings(
         timeout=-1,
         stateful_step_count=1000,
         max_examples=10 ** 8,
         max_iterations=10 ** 8,
-        database=writeonlydatabase(settings.default.database)
-    )
+        database=writeonlydatabase(settings.default.database),
+    ),
 )
 
 settings.load_profile(os.getenv('HYPOTHESIS_PROFILE', 'default'))
--- a/tests/test-walk.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-walk.t	Mon Oct 21 11:09:48 2019 -0400
@@ -100,7 +100,7 @@
   f  mammals/skunk  skunk
   $ hg debugwalk -v -I 'relglob:*k'
   * matcher:
-  <includematcher includes='(?:|.*/)[^/]*k(?:/|$)'>
+  <includematcher includes='.*k(?:/|$)'>
   f  beans/black    ../beans/black
   f  fenugreek      ../fenugreek
   f  mammals/skunk  skunk
@@ -108,7 +108,7 @@
   * matcher:
   <intersectionmatcher
     m1=<patternmatcher patterns='mammals(?:/|$)'>,
-    m2=<includematcher includes='(?:|.*/)[^/]*k(?:/|$)'>>
+    m2=<includematcher includes='.*k(?:/|$)'>>
   f  mammals/skunk  skunk
   $ hg debugwalk -v -I 're:.*k$'
   * matcher:
--- a/tests/test-walkrepo.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-walkrepo.py	Mon Oct 21 11:09:48 2019 -0400
@@ -31,24 +31,35 @@
     os.symlink(os.path.pardir, b'circle')
     os.symlink(pjoin(b'subsubdir', b'subsub1'), b'subsub1')
 
+
 def runtest():
     reposet = frozenset(walkrepos(b'.', followsym=True))
     if sym and (len(reposet) != 3):
         print("reposet = %r" % (reposet,))
-        print(("Found %d repositories when I should have found 3"
-               % (len(reposet),)))
+        print(
+            (
+                "Found %d repositories when I should have found 3"
+                % (len(reposet),)
+            )
+        )
     if (not sym) and (len(reposet) != 2):
         print("reposet = %r" % (reposet,))
-        print(("Found %d repositories when I should have found 2"
-               % (len(reposet),)))
-    sub1set = frozenset((pjoin(b'.', b'sub1'),
-                         pjoin(b'.', b'circle', b'subdir', b'sub1')))
+        print(
+            (
+                "Found %d repositories when I should have found 2"
+                % (len(reposet),)
+            )
+        )
+    sub1set = frozenset(
+        (pjoin(b'.', b'sub1'), pjoin(b'.', b'circle', b'subdir', b'sub1'))
+    )
     if len(sub1set & reposet) != 1:
         print("sub1set = %r" % (sub1set,))
         print("reposet = %r" % (reposet,))
         print("sub1set and reposet should have exactly one path in common.")
-    sub2set = frozenset((pjoin(b'.', b'subsub1'),
-                         pjoin(b'.', b'subsubdir', b'subsub1')))
+    sub2set = frozenset(
+        (pjoin(b'.', b'subsub1'), pjoin(b'.', b'subsubdir', b'subsub1'))
+    )
     if len(sub2set & reposet) != 1:
         print("sub2set = %r" % (sub2set,))
         print("reposet = %r" % (reposet,))
@@ -58,6 +69,7 @@
         print("reposet = %r" % (reposet,))
         print("Symbolic links are supported and %s is not in reposet" % (sub3,))
 
+
 runtest()
 if sym:
     # Simulate not having symlinks.
--- a/tests/test-websub.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-websub.t	Mon Oct 21 11:09:48 2019 -0400
@@ -11,16 +11,18 @@
   > 
   > [websub]
   > issues = s|Issue(\d+)|<a href="http://bts.example.org/issue\1">Issue\1</a>|
+  > tickets = s|ticket(\d+)|<a href="http://ticket.example.org/issue\1">Ticket\1</a>|i
   > 
   > [interhg]
   > # check that we maintain some interhg backwards compatibility...
   > # yes, 'x' is a weird delimiter...
   > markbugs = sxbugx<i class="\x">bug</i>x
+  > problems = sxPROBLEMx<i class="\x">problem</i>xi
   > EOF
 
   $ touch foo
   $ hg add foo
-  $ hg commit -d '1 0' -m 'Issue123: fixed the bug!'
+  $ hg commit -d '1 0' -m 'Issue123: fixed the bug! Ticket456 and problem789 too'
 
   $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
   $ cat hg.pid >> $DAEMON_PIDS
@@ -28,7 +30,7 @@
 log
 
   $ get-with-headers.py localhost:$HGPORT "rev/tip" | grep bts
-  <div class="description"><a href="http://bts.example.org/issue123">Issue123</a>: fixed the <i class="x">bug</i>!</div>
+  <div class="description"><a href="http://bts.example.org/issue123">Issue123</a>: fixed the <i class="x">bug</i>! <a href="http://ticket.example.org/issue456">Ticket456</a> and <i class="x">problem</i>789 too</div>
 errors
 
   $ cat errors.log
--- a/tests/test-win32text.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-win32text.t	Mon Oct 21 11:09:48 2019 -0400
@@ -56,7 +56,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 2 changes to 2 files
   attempt to commit or push text file(s) using CRLF line endings
   in bc2d09796734: g
   in b1aa5cde7ff4: f
@@ -265,7 +264,6 @@
   adding changesets
   adding manifests
   adding file changes
-  added 3 changesets with 4 changes to 4 files
   attempt to commit or push text file(s) using CRLF line endings
   in 67ac5962ab43: d
   in 68c127d1834e: b
--- a/tests/test-wireproto-clientreactor.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-wireproto-clientreactor.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,12 +9,11 @@
     ui as uimod,
     wireprotoframing as framing,
 )
-from mercurial.utils import (
-    cborutil,
-)
+from mercurial.utils import cborutil
 
 try:
     from mercurial import zstd
+
     zstd.__version__
 except ImportError:
     zstd = None
@@ -23,18 +22,24 @@
 
 globalui = uimod.ui()
 
+
 def sendframe(reactor, frame):
     """Send a frame bytearray to a reactor."""
     header = framing.parseheader(frame)
-    payload = frame[framing.FRAME_HEADER_SIZE:]
+    payload = frame[framing.FRAME_HEADER_SIZE :]
     assert len(payload) == header.length
 
-    return reactor.onframerecv(framing.frame(header.requestid,
-                                             header.streamid,
-                                             header.streamflags,
-                                             header.typeid,
-                                             header.flags,
-                                             payload))
+    return reactor.onframerecv(
+        framing.frame(
+            header.requestid,
+            header.streamid,
+            header.streamflags,
+            header.typeid,
+            header.flags,
+            payload,
+        )
+    )
+
 
 class SingleSendTests(unittest.TestCase):
     """A reactor that can only send once rejects subsequent sends."""
@@ -42,13 +47,14 @@
     if not getattr(unittest.TestCase, 'assertRaisesRegex', False):
         # Python 3.7 deprecates the regex*p* version, but 2.7 lacks
         # the regex version.
-        assertRaisesRegex = (# camelcase-required
-            unittest.TestCase.assertRaisesRegexp)
+        assertRaisesRegex = (  # camelcase-required
+            unittest.TestCase.assertRaisesRegexp
+        )
 
     def testbasic(self):
-        reactor = framing.clientreactor(globalui,
-                                        hasmultiplesend=False,
-                                        buffersends=True)
+        reactor = framing.clientreactor(
+            globalui, hasmultiplesend=False, buffersends=True
+        )
 
         request, action, meta = reactor.callcommand(b'foo', {})
         self.assertEqual(request.state, b'pending')
@@ -62,20 +68,24 @@
 
         self.assertEqual(request.state, b'sent')
 
-        with self.assertRaisesRegex(error.ProgrammingError,
-                                     'cannot issue new commands'):
+        with self.assertRaisesRegex(
+            error.ProgrammingError, 'cannot issue new commands'
+        ):
             reactor.callcommand(b'foo', {})
 
-        with self.assertRaisesRegex(error.ProgrammingError,
-                                     'cannot issue new commands'):
+        with self.assertRaisesRegex(
+            error.ProgrammingError, 'cannot issue new commands'
+        ):
             reactor.callcommand(b'foo', {})
 
+
 class NoBufferTests(unittest.TestCase):
     """A reactor without send buffering sends requests immediately."""
+
     def testbasic(self):
-        reactor = framing.clientreactor(globalui,
-                                        hasmultiplesend=True,
-                                        buffersends=False)
+        reactor = framing.clientreactor(
+            globalui, hasmultiplesend=True, buffersends=False
+        )
 
         request, action, meta = reactor.callcommand(b'command1', {})
         self.assertEqual(request.requestid, 1)
@@ -101,29 +111,34 @@
 
         self.assertEqual(request.state, b'sent')
 
+
 class BadFrameRecvTests(unittest.TestCase):
     if not getattr(unittest.TestCase, 'assertRaisesRegex', False):
         # Python 3.7 deprecates the regex*p* version, but 2.7 lacks
         # the regex version.
-        assertRaisesRegex = (# camelcase-required
-            unittest.TestCase.assertRaisesRegexp)
+        assertRaisesRegex = (  # camelcase-required
+            unittest.TestCase.assertRaisesRegexp
+        )
 
     def testoddstream(self):
         reactor = framing.clientreactor(globalui)
 
         action, meta = sendframe(reactor, ffs(b'1 1 0 1 0 foo'))
         self.assertEqual(action, b'error')
-        self.assertEqual(meta[b'message'],
-                         b'received frame with odd numbered stream ID: 1')
+        self.assertEqual(
+            meta[b'message'], b'received frame with odd numbered stream ID: 1'
+        )
 
     def testunknownstream(self):
         reactor = framing.clientreactor(globalui)
 
         action, meta = sendframe(reactor, ffs(b'1 0 0 1 0 foo'))
         self.assertEqual(action, b'error')
-        self.assertEqual(meta[b'message'],
-                         b'received frame on unknown stream without beginning '
-                         b'of stream flag set')
+        self.assertEqual(
+            meta[b'message'],
+            b'received frame on unknown stream without beginning '
+            b'of stream flag set',
+        )
 
     def testunhandledframetype(self):
         reactor = framing.clientreactor(globalui, buffersends=False)
@@ -132,10 +147,12 @@
         for frame in meta[b'framegen']:
             pass
 
-        with self.assertRaisesRegex(error.ProgrammingError,
-                                     'unhandled frame type'):
+        with self.assertRaisesRegex(
+            error.ProgrammingError, 'unhandled frame type'
+        ):
             sendframe(reactor, ffs(b'1 0 stream-begin text-output 0 foo'))
 
+
 class StreamTests(unittest.TestCase):
     def testmultipleresponseframes(self):
         reactor = framing.clientreactor(globalui, buffersends=False)
@@ -148,15 +165,18 @@
 
         action, meta = sendframe(
             reactor,
-            ffs(b'%d 0 stream-begin command-response 0 foo' %
-                request.requestid))
+            ffs(
+                b'%d 0 stream-begin command-response 0 foo' % request.requestid
+            ),
+        )
         self.assertEqual(action, b'responsedata')
 
         action, meta = sendframe(
-            reactor,
-            ffs(b'%d 0 0 command-response eos bar' % request.requestid))
+            reactor, ffs(b'%d 0 0 command-response eos bar' % request.requestid)
+        )
         self.assertEqual(action, b'responsedata')
 
+
 class RedirectTests(unittest.TestCase):
     def testredirect(self):
         reactor = framing.clientreactor(globalui, buffersends=False)
@@ -167,18 +187,24 @@
         }
 
         request, action, meta = reactor.callcommand(
-            b'foo', {}, redirect=redirect)
+            b'foo', {}, redirect=redirect
+        )
 
         self.assertEqual(action, b'sendframes')
 
         frames = list(meta[b'framegen'])
         self.assertEqual(len(frames), 1)
 
-        self.assertEqual(frames[0],
-                         ffs(b'1 1 stream-begin command-request new '
-                             b"cbor:{b'name': b'foo', "
-                             b"b'redirect': {b'targets': [b'a', b'b'], "
-                             b"b'hashes': [b'sha256']}}"))
+        self.assertEqual(
+            frames[0],
+            ffs(
+                b'1 1 stream-begin command-request new '
+                b"cbor:{b'name': b'foo', "
+                b"b'redirect': {b'targets': [b'a', b'b'], "
+                b"b'hashes': [b'sha256']}}"
+            ),
+        )
+
 
 class StreamSettingsTests(unittest.TestCase):
     def testnoflags(self):
@@ -188,14 +214,18 @@
         for f in meta[b'framegen']:
             pass
 
-        action, meta = sendframe(reactor,
-            ffs(b'1 2 stream-begin stream-settings 0 '))
+        action, meta = sendframe(
+            reactor, ffs(b'1 2 stream-begin stream-settings 0 ')
+        )
 
         self.assertEqual(action, b'error')
-        self.assertEqual(meta, {
-            b'message': b'stream encoding settings frame must have '
-                        b'continuation or end of stream flag set',
-        })
+        self.assertEqual(
+            meta,
+            {
+                b'message': b'stream encoding settings frame must have '
+                b'continuation or end of stream flag set',
+            },
+        )
 
     def testconflictflags(self):
         reactor = framing.clientreactor(globalui, buffersends=False)
@@ -204,14 +234,18 @@
         for f in meta[b'framegen']:
             pass
 
-        action, meta = sendframe(reactor,
-            ffs(b'1 2 stream-begin stream-settings continuation|eos '))
+        action, meta = sendframe(
+            reactor, ffs(b'1 2 stream-begin stream-settings continuation|eos ')
+        )
 
         self.assertEqual(action, b'error')
-        self.assertEqual(meta, {
-            b'message': b'stream encoding settings frame cannot have both '
-                        b'continuation and end of stream flags set',
-        })
+        self.assertEqual(
+            meta,
+            {
+                b'message': b'stream encoding settings frame cannot have both '
+                b'continuation and end of stream flags set',
+            },
+        )
 
     def testemptypayload(self):
         reactor = framing.clientreactor(globalui, buffersends=False)
@@ -220,14 +254,18 @@
         for f in meta[b'framegen']:
             pass
 
-        action, meta = sendframe(reactor,
-            ffs(b'1 2 stream-begin stream-settings eos '))
+        action, meta = sendframe(
+            reactor, ffs(b'1 2 stream-begin stream-settings eos ')
+        )
 
         self.assertEqual(action, b'error')
-        self.assertEqual(meta, {
-            b'message': b'stream encoding settings frame did not contain '
-                        b'CBOR data'
-        })
+        self.assertEqual(
+            meta,
+            {
+                b'message': b'stream encoding settings frame did not contain '
+                b'CBOR data'
+            },
+        )
 
     def testbadcbor(self):
         reactor = framing.clientreactor(globalui, buffersends=False)
@@ -236,8 +274,9 @@
         for f in meta[b'framegen']:
             pass
 
-        action, meta = sendframe(reactor,
-            ffs(b'1 2 stream-begin stream-settings eos badvalue'))
+        action, meta = sendframe(
+            reactor, ffs(b'1 2 stream-begin stream-settings eos badvalue')
+        )
 
         self.assertEqual(action, b'error')
 
@@ -248,8 +287,10 @@
         for f in meta[b'framegen']:
             pass
 
-        action, meta = sendframe(reactor,
-            ffs(b'1 2 stream-begin stream-settings eos cbor:b"identity"'))
+        action, meta = sendframe(
+            reactor,
+            ffs(b'1 2 stream-begin stream-settings eos cbor:b"identity"'),
+        )
 
         self.assertEqual(action, b'noop')
         self.assertEqual(meta, {})
@@ -261,19 +302,25 @@
         for f in meta[b'framegen']:
             pass
 
-        data = b''.join([
-            b''.join(cborutil.streamencode(b'identity')),
-            b''.join(cborutil.streamencode({b'foo', b'bar'})),
-        ])
+        data = b''.join(
+            [
+                b''.join(cborutil.streamencode(b'identity')),
+                b''.join(cborutil.streamencode({b'foo', b'bar'})),
+            ]
+        )
 
-        action, meta = sendframe(reactor,
-            ffs(b'1 2 stream-begin stream-settings eos %s' % data))
+        action, meta = sendframe(
+            reactor, ffs(b'1 2 stream-begin stream-settings eos %s' % data)
+        )
 
         self.assertEqual(action, b'error')
-        self.assertEqual(meta, {
-            b'message': b'error setting stream decoder: identity decoder '
-                        b'received unexpected additional values',
-        })
+        self.assertEqual(
+            meta,
+            {
+                b'message': b'error setting stream decoder: identity decoder '
+                b'received unexpected additional values',
+            },
+        )
 
     def testmultipleframes(self):
         reactor = framing.clientreactor(globalui, buffersends=False)
@@ -284,15 +331,19 @@
 
         data = b''.join(cborutil.streamencode(b'identity'))
 
-        action, meta = sendframe(reactor,
-            ffs(b'1 2 stream-begin stream-settings continuation %s' %
-                data[0:3]))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'1 2 stream-begin stream-settings continuation %s' % data[0:3]
+            ),
+        )
 
         self.assertEqual(action, b'noop')
         self.assertEqual(meta, {})
 
-        action, meta = sendframe(reactor,
-            ffs(b'1 2 0 stream-settings eos %s' % data[3:]))
+        action, meta = sendframe(
+            reactor, ffs(b'1 2 0 stream-settings eos %s' % data[3:])
+        )
 
         self.assertEqual(action, b'noop')
         self.assertEqual(meta, {})
@@ -304,14 +355,19 @@
         for f in meta[b'framegen']:
             pass
 
-        action, meta = sendframe(reactor,
-            ffs(b'1 2 stream-begin stream-settings eos cbor:b"badvalue"'))
+        action, meta = sendframe(
+            reactor,
+            ffs(b'1 2 stream-begin stream-settings eos cbor:b"badvalue"'),
+        )
 
         self.assertEqual(action, b'error')
-        self.assertEqual(meta, {
-            b'message': b'error setting stream decoder: unknown stream '
-                        b'decoder: badvalue',
-        })
+        self.assertEqual(
+            meta,
+            {
+                b'message': b'error setting stream decoder: unknown stream '
+                b'decoder: badvalue',
+            },
+        )
 
     def testzlibencoding(self):
         reactor = framing.clientreactor(globalui, buffersends=False)
@@ -320,9 +376,13 @@
         for f in meta[b'framegen']:
             pass
 
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 stream-begin stream-settings eos cbor:b"zlib"' %
-                request.requestid))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'%d 2 stream-begin stream-settings eos cbor:b"zlib"'
+                % request.requestid
+            ),
+        )
 
         self.assertEqual(action, b'noop')
         self.assertEqual(meta, {})
@@ -335,9 +395,13 @@
         compressed = zlib.compress(encoded)
         self.assertEqual(zlib.decompress(compressed), encoded)
 
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 encoded command-response eos %s' %
-                (request.requestid, compressed)))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'%d 2 encoded command-response eos %s'
+                % (request.requestid, compressed)
+            ),
+        )
 
         self.assertEqual(action, b'responsedata')
         self.assertEqual(meta[b'data'], encoded)
@@ -349,9 +413,13 @@
         for f in meta[b'framegen']:
             pass
 
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 stream-begin stream-settings eos cbor:b"zlib"' %
-                request.requestid))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'%d 2 stream-begin stream-settings eos cbor:b"zlib"'
+                % request.requestid
+            ),
+        )
 
         self.assertEqual(action, b'noop')
         self.assertEqual(meta, {})
@@ -367,12 +435,16 @@
         chunks = []
 
         for i in range(len(compressed)):
-            char = compressed[i:i + 1]
+            char = compressed[i : i + 1]
             if char == b'\\':
                 char = b'\\\\'
-            action, meta = sendframe(reactor,
-                ffs(b'%d 2 encoded command-response continuation %s' %
-                    (request.requestid, char)))
+            action, meta = sendframe(
+                reactor,
+                ffs(
+                    b'%d 2 encoded command-response continuation %s'
+                    % (request.requestid, char)
+                ),
+            )
 
             self.assertEqual(action, b'responsedata')
             chunks.append(meta[b'data'])
@@ -384,8 +456,10 @@
         self.assertEqual(b''.join(chunks), encoded)
 
         # End the stream for good measure.
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 stream-end command-response eos ' % request.requestid))
+        action, meta = sendframe(
+            reactor,
+            ffs(b'%d 2 stream-end command-response eos ' % request.requestid),
+        )
 
         self.assertEqual(action, b'responsedata')
         self.assertEqual(meta[b'data'], b'')
@@ -395,8 +469,9 @@
     def testzlibmultipleresponses(self):
         # We feed in zlib compressed data on the same stream but belonging to
         # 2 different requests. This tests our flushing behavior.
-        reactor = framing.clientreactor(globalui, buffersends=False,
-                                        hasmultiplesend=True)
+        reactor = framing.clientreactor(
+            globalui, buffersends=False, hasmultiplesend=True
+        )
 
         request1, action, meta = reactor.callcommand(b'foo', {})
         for f in meta[b'framegen']:
@@ -409,48 +484,70 @@
         outstream = framing.outputstream(2)
         outstream.setencoder(globalui, b'zlib')
 
-        response1 = b''.join(cborutil.streamencode({
-            b'status': b'ok',
-            b'extra': b'response1' * 10,
-        }))
+        response1 = b''.join(
+            cborutil.streamencode(
+                {b'status': b'ok', b'extra': b'response1' * 10,}
+            )
+        )
 
-        response2 = b''.join(cborutil.streamencode({
-            b'status': b'error',
-            b'extra': b'response2' * 10,
-        }))
+        response2 = b''.join(
+            cborutil.streamencode(
+                {b'status': b'error', b'extra': b'response2' * 10,}
+            )
+        )
 
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 stream-begin stream-settings eos cbor:b"zlib"' %
-                request1.requestid))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'%d 2 stream-begin stream-settings eos cbor:b"zlib"'
+                % request1.requestid
+            ),
+        )
 
         self.assertEqual(action, b'noop')
         self.assertEqual(meta, {})
 
         # Feeding partial data in won't get anything useful out.
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 encoded command-response continuation %s' % (
-                request1.requestid, outstream.encode(response1))))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'%d 2 encoded command-response continuation %s'
+                % (request1.requestid, outstream.encode(response1))
+            ),
+        )
         self.assertEqual(action, b'responsedata')
         self.assertEqual(meta[b'data'], b'')
 
         # But flushing data at both ends will get our original data.
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 encoded command-response eos %s' % (
-                request1.requestid, outstream.flush())))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'%d 2 encoded command-response eos %s'
+                % (request1.requestid, outstream.flush())
+            ),
+        )
         self.assertEqual(action, b'responsedata')
         self.assertEqual(meta[b'data'], response1)
 
         # We should be able to reuse the compressor/decompressor for the
         # 2nd response.
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 encoded command-response continuation %s' % (
-                request2.requestid, outstream.encode(response2))))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'%d 2 encoded command-response continuation %s'
+                % (request2.requestid, outstream.encode(response2))
+            ),
+        )
         self.assertEqual(action, b'responsedata')
         self.assertEqual(meta[b'data'], b'')
 
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 encoded command-response eos %s' % (
-                request2.requestid, outstream.flush())))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'%d 2 encoded command-response eos %s'
+                % (request2.requestid, outstream.flush())
+            ),
+        )
         self.assertEqual(action, b'responsedata')
         self.assertEqual(meta[b'data'], response2)
 
@@ -462,9 +559,13 @@
         for f in meta[b'framegen']:
             pass
 
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 stream-begin stream-settings eos cbor:b"zstd-8mb"' %
-                request.requestid))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'%d 2 stream-begin stream-settings eos cbor:b"zstd-8mb"'
+                % request.requestid
+            ),
+        )
 
         self.assertEqual(action, b'noop')
         self.assertEqual(meta, {})
@@ -476,12 +577,20 @@
 
         encoder = framing.zstd8mbencoder(globalui)
         compressed = encoder.encode(encoded) + encoder.finish()
-        self.assertEqual(zstd.ZstdDecompressor().decompress(
-            compressed, max_output_size=len(encoded)), encoded)
+        self.assertEqual(
+            zstd.ZstdDecompressor().decompress(
+                compressed, max_output_size=len(encoded)
+            ),
+            encoded,
+        )
 
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 encoded command-response eos %s' %
-                (request.requestid, compressed)))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'%d 2 encoded command-response eos %s'
+                % (request.requestid, compressed)
+            ),
+        )
 
         self.assertEqual(action, b'responsedata')
         self.assertEqual(meta[b'data'], encoded)
@@ -494,9 +603,13 @@
         for f in meta[b'framegen']:
             pass
 
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 stream-begin stream-settings eos cbor:b"zstd-8mb"' %
-                request.requestid))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'%d 2 stream-begin stream-settings eos cbor:b"zstd-8mb"'
+                % request.requestid
+            ),
+        )
 
         self.assertEqual(action, b'noop')
         self.assertEqual(meta, {})
@@ -507,18 +620,23 @@
         encoded = b''.join(cborutil.streamencode(result))
 
         compressed = zstd.ZstdCompressor().compress(encoded)
-        self.assertEqual(zstd.ZstdDecompressor().decompress(compressed),
-                         encoded)
+        self.assertEqual(
+            zstd.ZstdDecompressor().decompress(compressed), encoded
+        )
 
         chunks = []
 
         for i in range(len(compressed)):
-            char = compressed[i:i + 1]
+            char = compressed[i : i + 1]
             if char == b'\\':
                 char = b'\\\\'
-            action, meta = sendframe(reactor,
-                ffs(b'%d 2 encoded command-response continuation %s' %
-                    (request.requestid, char)))
+            action, meta = sendframe(
+                reactor,
+                ffs(
+                    b'%d 2 encoded command-response continuation %s'
+                    % (request.requestid, char)
+                ),
+            )
 
             self.assertEqual(action, b'responsedata')
             chunks.append(meta[b'data'])
@@ -529,8 +647,10 @@
         self.assertEqual(b''.join(chunks), encoded)
 
         # End the stream for good measure.
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 stream-end command-response eos ' % request.requestid))
+        action, meta = sendframe(
+            reactor,
+            ffs(b'%d 2 stream-end command-response eos ' % request.requestid),
+        )
 
         self.assertEqual(action, b'responsedata')
         self.assertEqual(meta[b'data'], b'')
@@ -541,8 +661,9 @@
     def testzstd8mbmultipleresponses(self):
         # We feed in zstd compressed data on the same stream but belonging to
         # 2 different requests. This tests our flushing behavior.
-        reactor = framing.clientreactor(globalui, buffersends=False,
-                                        hasmultiplesend=True)
+        reactor = framing.clientreactor(
+            globalui, buffersends=False, hasmultiplesend=True
+        )
 
         request1, action, meta = reactor.callcommand(b'foo', {})
         for f in meta[b'framegen']:
@@ -555,51 +676,74 @@
         outstream = framing.outputstream(2)
         outstream.setencoder(globalui, b'zstd-8mb')
 
-        response1 = b''.join(cborutil.streamencode({
-            b'status': b'ok',
-            b'extra': b'response1' * 10,
-        }))
+        response1 = b''.join(
+            cborutil.streamencode(
+                {b'status': b'ok', b'extra': b'response1' * 10,}
+            )
+        )
 
-        response2 = b''.join(cborutil.streamencode({
-            b'status': b'error',
-            b'extra': b'response2' * 10,
-        }))
+        response2 = b''.join(
+            cborutil.streamencode(
+                {b'status': b'error', b'extra': b'response2' * 10,}
+            )
+        )
 
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 stream-begin stream-settings eos cbor:b"zstd-8mb"' %
-                request1.requestid))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'%d 2 stream-begin stream-settings eos cbor:b"zstd-8mb"'
+                % request1.requestid
+            ),
+        )
 
         self.assertEqual(action, b'noop')
         self.assertEqual(meta, {})
 
         # Feeding partial data in won't get anything useful out.
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 encoded command-response continuation %s' % (
-                request1.requestid, outstream.encode(response1))))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'%d 2 encoded command-response continuation %s'
+                % (request1.requestid, outstream.encode(response1))
+            ),
+        )
         self.assertEqual(action, b'responsedata')
         self.assertEqual(meta[b'data'], b'')
 
         # But flushing data at both ends will get our original data.
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 encoded command-response eos %s' % (
-                request1.requestid, outstream.flush())))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'%d 2 encoded command-response eos %s'
+                % (request1.requestid, outstream.flush())
+            ),
+        )
         self.assertEqual(action, b'responsedata')
         self.assertEqual(meta[b'data'], response1)
 
         # We should be able to reuse the compressor/decompressor for the
         # 2nd response.
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 encoded command-response continuation %s' % (
-                request2.requestid, outstream.encode(response2))))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'%d 2 encoded command-response continuation %s'
+                % (request2.requestid, outstream.encode(response2))
+            ),
+        )
         self.assertEqual(action, b'responsedata')
         self.assertEqual(meta[b'data'], b'')
 
-        action, meta = sendframe(reactor,
-            ffs(b'%d 2 encoded command-response eos %s' % (
-                request2.requestid, outstream.flush())))
+        action, meta = sendframe(
+            reactor,
+            ffs(
+                b'%d 2 encoded command-response eos %s'
+                % (request2.requestid, outstream.flush())
+            ),
+        )
         self.assertEqual(action, b'responsedata')
         self.assertEqual(meta[b'data'], response2)
 
+
 if __name__ == '__main__':
     if (3, 6, 0) <= sys.version_info < (3, 6, 4):
         # Python 3.6.0 through 3.6.3 inclusive shipped with
@@ -607,4 +751,5 @@
         # tests on those specific versions of Python. Sigh.
         sys.exit(80)
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-wireproto-exchangev2-shallow.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-wireproto-exchangev2-shallow.t	Mon Oct 21 11:09:48 2019 -0400
@@ -490,7 +490,7 @@
   received frame(size=2; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
   received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
   searching for changes
-  all local heads known remotely
+  all local changesets known remotely
   sending 1 commands
   sending command changesetdata: {
     'fields': set([
--- a/tests/test-wireproto-exchangev2.t	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-wireproto-exchangev2.t	Mon Oct 21 11:09:48 2019 -0400
@@ -299,7 +299,7 @@
     ]
   }
   searching for changes
-  all local heads known remotely
+  all local changesets known remotely
   sending 1 commands
   sending command changesetdata: {
     'fields': set([
--- a/tests/test-wireproto-framing.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-wireproto-framing.py	Mon Oct 21 11:09:48 2019 -0400
@@ -9,186 +9,278 @@
 
 ffs = framing.makeframefromhumanstring
 
+
 class FrameHumanStringTests(unittest.TestCase):
     def testbasic(self):
-        self.assertEqual(ffs(b'1 1 0 1 0 '),
-                         b'\x00\x00\x00\x01\x00\x01\x00\x10')
+        self.assertEqual(
+            ffs(b'1 1 0 1 0 '), b'\x00\x00\x00\x01\x00\x01\x00\x10'
+        )
 
-        self.assertEqual(ffs(b'2 4 0 1 0 '),
-                         b'\x00\x00\x00\x02\x00\x04\x00\x10')
+        self.assertEqual(
+            ffs(b'2 4 0 1 0 '), b'\x00\x00\x00\x02\x00\x04\x00\x10'
+        )
 
-        self.assertEqual(ffs(b'2 4 0 1 0 foo'),
-                         b'\x03\x00\x00\x02\x00\x04\x00\x10foo')
+        self.assertEqual(
+            ffs(b'2 4 0 1 0 foo'), b'\x03\x00\x00\x02\x00\x04\x00\x10foo'
+        )
 
     def testcborint(self):
-        self.assertEqual(ffs(b'1 1 0 1 0 cbor:15'),
-                         b'\x01\x00\x00\x01\x00\x01\x00\x10\x0f')
+        self.assertEqual(
+            ffs(b'1 1 0 1 0 cbor:15'), b'\x01\x00\x00\x01\x00\x01\x00\x10\x0f'
+        )
 
-        self.assertEqual(ffs(b'1 1 0 1 0 cbor:42'),
-                         b'\x02\x00\x00\x01\x00\x01\x00\x10\x18*')
+        self.assertEqual(
+            ffs(b'1 1 0 1 0 cbor:42'), b'\x02\x00\x00\x01\x00\x01\x00\x10\x18*'
+        )
+
+        self.assertEqual(
+            ffs(b'1 1 0 1 0 cbor:1048576'),
+            b'\x05\x00\x00\x01\x00\x01\x00\x10\x1a' b'\x00\x10\x00\x00',
+        )
 
-        self.assertEqual(ffs(b'1 1 0 1 0 cbor:1048576'),
-                         b'\x05\x00\x00\x01\x00\x01\x00\x10\x1a'
-                         b'\x00\x10\x00\x00')
+        self.assertEqual(
+            ffs(b'1 1 0 1 0 cbor:0'), b'\x01\x00\x00\x01\x00\x01\x00\x10\x00'
+        )
 
-        self.assertEqual(ffs(b'1 1 0 1 0 cbor:0'),
-                         b'\x01\x00\x00\x01\x00\x01\x00\x10\x00')
+        self.assertEqual(
+            ffs(b'1 1 0 1 0 cbor:-1'), b'\x01\x00\x00\x01\x00\x01\x00\x10 '
+        )
 
-        self.assertEqual(ffs(b'1 1 0 1 0 cbor:-1'),
-                         b'\x01\x00\x00\x01\x00\x01\x00\x10 ')
-
-        self.assertEqual(ffs(b'1 1 0 1 0 cbor:-342542'),
-                         b'\x05\x00\x00\x01\x00\x01\x00\x10:\x00\x05:\r')
+        self.assertEqual(
+            ffs(b'1 1 0 1 0 cbor:-342542'),
+            b'\x05\x00\x00\x01\x00\x01\x00\x10:\x00\x05:\r',
+        )
 
     def testcborstrings(self):
-        self.assertEqual(ffs(b"1 1 0 1 0 cbor:b'foo'"),
-                         b'\x04\x00\x00\x01\x00\x01\x00\x10Cfoo')
+        self.assertEqual(
+            ffs(b"1 1 0 1 0 cbor:b'foo'"),
+            b'\x04\x00\x00\x01\x00\x01\x00\x10Cfoo',
+        )
 
     def testcborlists(self):
-        self.assertEqual(ffs(b"1 1 0 1 0 cbor:[None, True, False, 42, b'foo']"),
-                         b'\n\x00\x00\x01\x00\x01\x00\x10\x85\xf6\xf5\xf4'
-                         b'\x18*Cfoo')
+        self.assertEqual(
+            ffs(b"1 1 0 1 0 cbor:[None, True, False, 42, b'foo']"),
+            b'\n\x00\x00\x01\x00\x01\x00\x10\x85\xf6\xf5\xf4' b'\x18*Cfoo',
+        )
 
     def testcbordicts(self):
-        self.assertEqual(ffs(b"1 1 0 1 0 "
-                             b"cbor:{b'foo': b'val1', b'bar': b'val2'}"),
-                         b'\x13\x00\x00\x01\x00\x01\x00\x10\xa2'
-                         b'CbarDval2CfooDval1')
+        self.assertEqual(
+            ffs(b"1 1 0 1 0 " b"cbor:{b'foo': b'val1', b'bar': b'val2'}"),
+            b'\x13\x00\x00\x01\x00\x01\x00\x10\xa2' b'CbarDval2CfooDval1',
+        )
+
 
 class FrameTests(unittest.TestCase):
     def testdataexactframesize(self):
         data = util.bytesio(b'x' * framing.DEFAULT_MAX_FRAME_SIZE)
 
         stream = framing.stream(1)
-        frames = list(framing.createcommandframes(stream, 1, b'command',
-                                                  {}, data))
-        self.assertEqual(frames, [
-            ffs(b'1 1 stream-begin command-request new|have-data '
-                b"cbor:{b'name': b'command'}"),
-            ffs(b'1 1 0 command-data continuation %s' % data.getvalue()),
-            ffs(b'1 1 0 command-data eos ')
-        ])
+        frames = list(
+            framing.createcommandframes(stream, 1, b'command', {}, data)
+        )
+        self.assertEqual(
+            frames,
+            [
+                ffs(
+                    b'1 1 stream-begin command-request new|have-data '
+                    b"cbor:{b'name': b'command'}"
+                ),
+                ffs(b'1 1 0 command-data continuation %s' % data.getvalue()),
+                ffs(b'1 1 0 command-data eos '),
+            ],
+        )
 
     def testdatamultipleframes(self):
         data = util.bytesio(b'x' * (framing.DEFAULT_MAX_FRAME_SIZE + 1))
 
         stream = framing.stream(1)
-        frames = list(framing.createcommandframes(stream, 1, b'command', {},
-                                                  data))
-        self.assertEqual(frames, [
-            ffs(b'1 1 stream-begin command-request new|have-data '
-                b"cbor:{b'name': b'command'}"),
-            ffs(b'1 1 0 command-data continuation %s' % (
-                b'x' * framing.DEFAULT_MAX_FRAME_SIZE)),
-            ffs(b'1 1 0 command-data eos x'),
-        ])
+        frames = list(
+            framing.createcommandframes(stream, 1, b'command', {}, data)
+        )
+        self.assertEqual(
+            frames,
+            [
+                ffs(
+                    b'1 1 stream-begin command-request new|have-data '
+                    b"cbor:{b'name': b'command'}"
+                ),
+                ffs(
+                    b'1 1 0 command-data continuation %s'
+                    % (b'x' * framing.DEFAULT_MAX_FRAME_SIZE)
+                ),
+                ffs(b'1 1 0 command-data eos x'),
+            ],
+        )
 
     def testargsanddata(self):
         data = util.bytesio(b'x' * 100)
 
         stream = framing.stream(1)
-        frames = list(framing.createcommandframes(stream, 1, b'command', {
-            b'key1': b'key1value',
-            b'key2': b'key2value',
-            b'key3': b'key3value',
-        }, data))
+        frames = list(
+            framing.createcommandframes(
+                stream,
+                1,
+                b'command',
+                {
+                    b'key1': b'key1value',
+                    b'key2': b'key2value',
+                    b'key3': b'key3value',
+                },
+                data,
+            )
+        )
 
-        self.assertEqual(frames, [
-            ffs(b'1 1 stream-begin command-request new|have-data '
-                b"cbor:{b'name': b'command', b'args': {b'key1': b'key1value', "
-                b"b'key2': b'key2value', b'key3': b'key3value'}}"),
-            ffs(b'1 1 0 command-data eos %s' % data.getvalue()),
-        ])
+        self.assertEqual(
+            frames,
+            [
+                ffs(
+                    b'1 1 stream-begin command-request new|have-data '
+                    b"cbor:{b'name': b'command', b'args': {b'key1': b'key1value', "
+                    b"b'key2': b'key2value', b'key3': b'key3value'}}"
+                ),
+                ffs(b'1 1 0 command-data eos %s' % data.getvalue()),
+            ],
+        )
 
     if not getattr(unittest.TestCase, 'assertRaisesRegex', False):
         # Python 3.7 deprecates the regex*p* version, but 2.7 lacks
         # the regex version.
-        assertRaisesRegex = (# camelcase-required
-            unittest.TestCase.assertRaisesRegexp)
+        assertRaisesRegex = (  # camelcase-required
+            unittest.TestCase.assertRaisesRegexp
+        )
 
     def testtextoutputformattingstringtype(self):
         """Formatting string must be bytes."""
         with self.assertRaisesRegex(ValueError, 'must use bytes formatting '):
-            list(framing.createtextoutputframe(None, 1, [
-                (b'foo'.decode('ascii'), [], [])]))
+            list(
+                framing.createtextoutputframe(
+                    None, 1, [(b'foo'.decode('ascii'), [], [])]
+                )
+            )
 
     def testtextoutputargumentbytes(self):
         with self.assertRaisesRegex(ValueError, 'must use bytes for argument'):
-            list(framing.createtextoutputframe(None, 1, [
-                (b'foo', [b'foo'.decode('ascii')], [])]))
+            list(
+                framing.createtextoutputframe(
+                    None, 1, [(b'foo', [b'foo'.decode('ascii')], [])]
+                )
+            )
 
     def testtextoutputlabelbytes(self):
         with self.assertRaisesRegex(ValueError, 'must use bytes for labels'):
-            list(framing.createtextoutputframe(None, 1, [
-                (b'foo', [], [b'foo'.decode('ascii')])]))
+            list(
+                framing.createtextoutputframe(
+                    None, 1, [(b'foo', [], [b'foo'.decode('ascii')])]
+                )
+            )
 
     def testtextoutput1simpleatom(self):
         stream = framing.stream(1)
-        val = list(framing.createtextoutputframe(stream, 1, [
-            (b'foo', [], [])]))
+        val = list(framing.createtextoutputframe(stream, 1, [(b'foo', [], [])]))
 
-        self.assertEqual(val, [
-            ffs(b'1 1 stream-begin text-output 0 '
-                b"cbor:[{b'msg': b'foo'}]"),
-        ])
+        self.assertEqual(
+            val,
+            [
+                ffs(
+                    b'1 1 stream-begin text-output 0 '
+                    b"cbor:[{b'msg': b'foo'}]"
+                ),
+            ],
+        )
 
     def testtextoutput2simpleatoms(self):
         stream = framing.stream(1)
-        val = list(framing.createtextoutputframe(stream, 1, [
-            (b'foo', [], []),
-            (b'bar', [], []),
-        ]))
+        val = list(
+            framing.createtextoutputframe(
+                stream, 1, [(b'foo', [], []), (b'bar', [], []),]
+            )
+        )
 
-        self.assertEqual(val, [
-            ffs(b'1 1 stream-begin text-output 0 '
-                b"cbor:[{b'msg': b'foo'}, {b'msg': b'bar'}]")
-        ])
+        self.assertEqual(
+            val,
+            [
+                ffs(
+                    b'1 1 stream-begin text-output 0 '
+                    b"cbor:[{b'msg': b'foo'}, {b'msg': b'bar'}]"
+                )
+            ],
+        )
 
     def testtextoutput1arg(self):
         stream = framing.stream(1)
-        val = list(framing.createtextoutputframe(stream, 1, [
-            (b'foo %s', [b'val1'], []),
-        ]))
+        val = list(
+            framing.createtextoutputframe(
+                stream, 1, [(b'foo %s', [b'val1'], []),]
+            )
+        )
 
-        self.assertEqual(val, [
-            ffs(b'1 1 stream-begin text-output 0 '
-                b"cbor:[{b'msg': b'foo %s', b'args': [b'val1']}]")
-        ])
+        self.assertEqual(
+            val,
+            [
+                ffs(
+                    b'1 1 stream-begin text-output 0 '
+                    b"cbor:[{b'msg': b'foo %s', b'args': [b'val1']}]"
+                )
+            ],
+        )
 
     def testtextoutput2arg(self):
         stream = framing.stream(1)
-        val = list(framing.createtextoutputframe(stream, 1, [
-            (b'foo %s %s', [b'val', b'value'], []),
-        ]))
+        val = list(
+            framing.createtextoutputframe(
+                stream, 1, [(b'foo %s %s', [b'val', b'value'], []),]
+            )
+        )
 
-        self.assertEqual(val, [
-            ffs(b'1 1 stream-begin text-output 0 '
-                b"cbor:[{b'msg': b'foo %s %s', b'args': [b'val', b'value']}]")
-        ])
+        self.assertEqual(
+            val,
+            [
+                ffs(
+                    b'1 1 stream-begin text-output 0 '
+                    b"cbor:[{b'msg': b'foo %s %s', b'args': [b'val', b'value']}]"
+                )
+            ],
+        )
 
     def testtextoutput1label(self):
         stream = framing.stream(1)
-        val = list(framing.createtextoutputframe(stream, 1, [
-            (b'foo', [], [b'label']),
-        ]))
+        val = list(
+            framing.createtextoutputframe(
+                stream, 1, [(b'foo', [], [b'label']),]
+            )
+        )
 
-        self.assertEqual(val, [
-            ffs(b'1 1 stream-begin text-output 0 '
-                b"cbor:[{b'msg': b'foo', b'labels': [b'label']}]")
-        ])
+        self.assertEqual(
+            val,
+            [
+                ffs(
+                    b'1 1 stream-begin text-output 0 '
+                    b"cbor:[{b'msg': b'foo', b'labels': [b'label']}]"
+                )
+            ],
+        )
 
     def testargandlabel(self):
         stream = framing.stream(1)
-        val = list(framing.createtextoutputframe(stream, 1, [
-            (b'foo %s', [b'arg'], [b'label']),
-        ]))
+        val = list(
+            framing.createtextoutputframe(
+                stream, 1, [(b'foo %s', [b'arg'], [b'label']),]
+            )
+        )
 
-        self.assertEqual(val, [
-            ffs(b'1 1 stream-begin text-output 0 '
-                b"cbor:[{b'msg': b'foo %s', b'args': [b'arg'], "
-                b"b'labels': [b'label']}]")
-        ])
+        self.assertEqual(
+            val,
+            [
+                ffs(
+                    b'1 1 stream-begin text-output 0 '
+                    b"cbor:[{b'msg': b'foo %s', b'args': [b'arg'], "
+                    b"b'labels': [b'label']}]"
+                )
+            ],
+        )
+
 
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-wireproto-serverreactor.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-wireproto-serverreactor.py	Mon Oct 21 11:09:48 2019 -0400
@@ -7,18 +7,18 @@
     util,
     wireprotoframing as framing,
 )
-from mercurial.utils import (
-    cborutil,
-)
+from mercurial.utils import cborutil
 
 ffs = framing.makeframefromhumanstring
 
 OK = b''.join(cborutil.streamencode({b'status': b'ok'}))
 
+
 def makereactor(deferoutput=False):
     ui = uimod.ui()
     return framing.serverreactor(ui, deferoutput=deferoutput)
 
+
 def sendframes(reactor, gen):
     """Send a generator of frame bytearray to a reactor.
 
@@ -26,21 +26,26 @@
     """
     for frame in gen:
         header = framing.parseheader(frame)
-        payload = frame[framing.FRAME_HEADER_SIZE:]
+        payload = frame[framing.FRAME_HEADER_SIZE :]
         assert len(payload) == header.length
 
-        yield reactor.onframerecv(framing.frame(header.requestid,
-                                                header.streamid,
-                                                header.streamflags,
-                                                header.typeid,
-                                                header.flags,
-                                                payload))
+        yield reactor.onframerecv(
+            framing.frame(
+                header.requestid,
+                header.streamid,
+                header.streamflags,
+                header.typeid,
+                header.flags,
+                payload,
+            )
+        )
+
 
 def sendcommandframes(reactor, stream, rid, cmd, args, datafh=None):
     """Generate frames to run a command and send them to a reactor."""
-    return sendframes(reactor,
-                      framing.createcommandframes(stream, rid, cmd, args,
-                                                  datafh))
+    return sendframes(
+        reactor, framing.createcommandframes(stream, rid, cmd, args, datafh)
+    )
 
 
 class ServerReactorTests(unittest.TestCase):
@@ -67,13 +72,16 @@
         results = list(sendcommandframes(reactor, stream, 1, b'mycommand', {}))
         self.assertEqual(len(results), 1)
         self.assertaction(results[0], b'runcommand')
-        self.assertEqual(results[0][1], {
-            b'requestid': 1,
-            b'command': b'mycommand',
-            b'args': {},
-            b'redirect': None,
-            b'data': None,
-        })
+        self.assertEqual(
+            results[0][1],
+            {
+                b'requestid': 1,
+                b'command': b'mycommand',
+                b'args': {},
+                b'redirect': None,
+                b'data': None,
+            },
+        )
 
         result = reactor.oninputeof()
         self.assertaction(result, b'noop')
@@ -81,53 +89,77 @@
     def test1argument(self):
         reactor = makereactor()
         stream = framing.stream(1)
-        results = list(sendcommandframes(reactor, stream, 41, b'mycommand',
-                                         {b'foo': b'bar'}))
+        results = list(
+            sendcommandframes(
+                reactor, stream, 41, b'mycommand', {b'foo': b'bar'}
+            )
+        )
         self.assertEqual(len(results), 1)
         self.assertaction(results[0], b'runcommand')
-        self.assertEqual(results[0][1], {
-            b'requestid': 41,
-            b'command': b'mycommand',
-            b'args': {b'foo': b'bar'},
-            b'redirect': None,
-            b'data': None,
-        })
+        self.assertEqual(
+            results[0][1],
+            {
+                b'requestid': 41,
+                b'command': b'mycommand',
+                b'args': {b'foo': b'bar'},
+                b'redirect': None,
+                b'data': None,
+            },
+        )
 
     def testmultiarguments(self):
         reactor = makereactor()
         stream = framing.stream(1)
-        results = list(sendcommandframes(reactor, stream, 1, b'mycommand',
-                                         {b'foo': b'bar', b'biz': b'baz'}))
+        results = list(
+            sendcommandframes(
+                reactor,
+                stream,
+                1,
+                b'mycommand',
+                {b'foo': b'bar', b'biz': b'baz'},
+            )
+        )
         self.assertEqual(len(results), 1)
         self.assertaction(results[0], b'runcommand')
-        self.assertEqual(results[0][1], {
-            b'requestid': 1,
-            b'command': b'mycommand',
-            b'args': {b'foo': b'bar', b'biz': b'baz'},
-            b'redirect': None,
-            b'data': None,
-        })
+        self.assertEqual(
+            results[0][1],
+            {
+                b'requestid': 1,
+                b'command': b'mycommand',
+                b'args': {b'foo': b'bar', b'biz': b'baz'},
+                b'redirect': None,
+                b'data': None,
+            },
+        )
 
     def testsimplecommanddata(self):
         reactor = makereactor()
         stream = framing.stream(1)
-        results = list(sendcommandframes(reactor, stream, 1, b'mycommand', {},
-                                         util.bytesio(b'data!')))
+        results = list(
+            sendcommandframes(
+                reactor, stream, 1, b'mycommand', {}, util.bytesio(b'data!')
+            )
+        )
         self.assertEqual(len(results), 2)
         self.assertaction(results[0], b'wantframe')
         self.assertaction(results[1], b'runcommand')
-        self.assertEqual(results[1][1], {
-            b'requestid': 1,
-            b'command': b'mycommand',
-            b'args': {},
-            b'redirect': None,
-            b'data': b'data!',
-        })
+        self.assertEqual(
+            results[1][1],
+            {
+                b'requestid': 1,
+                b'command': b'mycommand',
+                b'args': {},
+                b'redirect': None,
+                b'data': b'data!',
+            },
+        )
 
     def testmultipledataframes(self):
         frames = [
-            ffs(b'1 1 stream-begin command-request new|have-data '
-                b"cbor:{b'name': b'mycommand'}"),
+            ffs(
+                b'1 1 stream-begin command-request new|have-data '
+                b"cbor:{b'name': b'mycommand'}"
+            ),
             ffs(b'1 1 0 command-data continuation data1'),
             ffs(b'1 1 0 command-data continuation data2'),
             ffs(b'1 1 0 command-data eos data3'),
@@ -139,19 +171,24 @@
         for i in range(3):
             self.assertaction(results[i], b'wantframe')
         self.assertaction(results[3], b'runcommand')
-        self.assertEqual(results[3][1], {
-            b'requestid': 1,
-            b'command': b'mycommand',
-            b'args': {},
-            b'redirect': None,
-            b'data': b'data1data2data3',
-        })
+        self.assertEqual(
+            results[3][1],
+            {
+                b'requestid': 1,
+                b'command': b'mycommand',
+                b'args': {},
+                b'redirect': None,
+                b'data': b'data1data2data3',
+            },
+        )
 
     def testargumentanddata(self):
         frames = [
-            ffs(b'1 1 stream-begin command-request new|have-data '
+            ffs(
+                b'1 1 stream-begin command-request new|have-data '
                 b"cbor:{b'name': b'command', b'args': {b'key': b'val',"
-                b"b'foo': b'bar'}}"),
+                b"b'foo': b'bar'}}"
+            ),
             ffs(b'1 1 0 command-data continuation value1'),
             ffs(b'1 1 0 command-data eos value2'),
         ]
@@ -160,169 +197,251 @@
         results = list(sendframes(reactor, frames))
 
         self.assertaction(results[-1], b'runcommand')
-        self.assertEqual(results[-1][1], {
-            b'requestid': 1,
-            b'command': b'command',
-            b'args': {
-                b'key': b'val',
-                b'foo': b'bar',
+        self.assertEqual(
+            results[-1][1],
+            {
+                b'requestid': 1,
+                b'command': b'command',
+                b'args': {b'key': b'val', b'foo': b'bar',},
+                b'redirect': None,
+                b'data': b'value1value2',
             },
-            b'redirect': None,
-            b'data': b'value1value2',
-        })
+        )
 
     def testnewandcontinuation(self):
-        result = self._sendsingleframe(makereactor(),
-            ffs(b'1 1 stream-begin command-request new|continuation '))
+        result = self._sendsingleframe(
+            makereactor(),
+            ffs(b'1 1 stream-begin command-request new|continuation '),
+        )
         self.assertaction(result, b'error')
-        self.assertEqual(result[1], {
-            b'message': b'received command request frame with both new and '
-                        b'continuation flags set',
-        })
+        self.assertEqual(
+            result[1],
+            {
+                b'message': b'received command request frame with both new and '
+                b'continuation flags set',
+            },
+        )
 
     def testneithernewnorcontinuation(self):
-        result = self._sendsingleframe(makereactor(),
-            ffs(b'1 1 stream-begin command-request 0 '))
+        result = self._sendsingleframe(
+            makereactor(), ffs(b'1 1 stream-begin command-request 0 ')
+        )
         self.assertaction(result, b'error')
-        self.assertEqual(result[1], {
-            b'message': b'received command request frame with neither new nor '
-                        b'continuation flags set',
-        })
+        self.assertEqual(
+            result[1],
+            {
+                b'message': b'received command request frame with neither new nor '
+                b'continuation flags set',
+            },
+        )
 
     def testunexpectedcommanddata(self):
         """Command data frame when not running a command is an error."""
-        result = self._sendsingleframe(makereactor(),
-            ffs(b'1 1 stream-begin command-data 0 ignored'))
+        result = self._sendsingleframe(
+            makereactor(), ffs(b'1 1 stream-begin command-data 0 ignored')
+        )
         self.assertaction(result, b'error')
-        self.assertEqual(result[1], {
-            b'message': b'expected sender protocol settings or command request '
-                        b'frame; got 2',
-        })
+        self.assertEqual(
+            result[1],
+            {
+                b'message': b'expected sender protocol settings or command request '
+                b'frame; got 2',
+            },
+        )
 
     def testunexpectedcommanddatareceiving(self):
         """Same as above except the command is receiving."""
-        results = list(sendframes(makereactor(), [
-            ffs(b'1 1 stream-begin command-request new|more '
-                b"cbor:{b'name': b'ignored'}"),
-            ffs(b'1 1 0 command-data eos ignored'),
-        ]))
+        results = list(
+            sendframes(
+                makereactor(),
+                [
+                    ffs(
+                        b'1 1 stream-begin command-request new|more '
+                        b"cbor:{b'name': b'ignored'}"
+                    ),
+                    ffs(b'1 1 0 command-data eos ignored'),
+                ],
+            )
+        )
 
         self.assertaction(results[0], b'wantframe')
         self.assertaction(results[1], b'error')
-        self.assertEqual(results[1][1], {
-            b'message': b'received command data frame for request that is not '
-                        b'expecting data: 1',
-        })
+        self.assertEqual(
+            results[1][1],
+            {
+                b'message': b'received command data frame for request that is not '
+                b'expecting data: 1',
+            },
+        )
 
     def testconflictingrequestidallowed(self):
         """Multiple fully serviced commands with same request ID is allowed."""
         reactor = makereactor()
         results = []
         outstream = reactor.makeoutputstream()
-        results.append(self._sendsingleframe(
-            reactor, ffs(b'1 1 stream-begin command-request new '
-                         b"cbor:{b'name': b'command'}")))
+        results.append(
+            self._sendsingleframe(
+                reactor,
+                ffs(
+                    b'1 1 stream-begin command-request new '
+                    b"cbor:{b'name': b'command'}"
+                ),
+            )
+        )
         result = reactor.oncommandresponsereadyobjects(
-            outstream, 1, [b'response1'])
+            outstream, 1, [b'response1']
+        )
         self.assertaction(result, b'sendframes')
         list(result[1][b'framegen'])
-        results.append(self._sendsingleframe(
-            reactor, ffs(b'1 1 stream-begin command-request new '
-                         b"cbor:{b'name': b'command'}")))
+        results.append(
+            self._sendsingleframe(
+                reactor,
+                ffs(
+                    b'1 1 stream-begin command-request new '
+                    b"cbor:{b'name': b'command'}"
+                ),
+            )
+        )
         result = reactor.oncommandresponsereadyobjects(
-            outstream, 1, [b'response2'])
+            outstream, 1, [b'response2']
+        )
         self.assertaction(result, b'sendframes')
         list(result[1][b'framegen'])
-        results.append(self._sendsingleframe(
-            reactor, ffs(b'1 1 stream-begin command-request new '
-                         b"cbor:{b'name': b'command'}")))
+        results.append(
+            self._sendsingleframe(
+                reactor,
+                ffs(
+                    b'1 1 stream-begin command-request new '
+                    b"cbor:{b'name': b'command'}"
+                ),
+            )
+        )
         result = reactor.oncommandresponsereadyobjects(
-            outstream, 1, [b'response3'])
+            outstream, 1, [b'response3']
+        )
         self.assertaction(result, b'sendframes')
         list(result[1][b'framegen'])
 
         for i in range(3):
             self.assertaction(results[i], b'runcommand')
-            self.assertEqual(results[i][1], {
-                b'requestid': 1,
-                b'command': b'command',
-                b'args': {},
-                b'redirect': None,
-                b'data': None,
-            })
+            self.assertEqual(
+                results[i][1],
+                {
+                    b'requestid': 1,
+                    b'command': b'command',
+                    b'args': {},
+                    b'redirect': None,
+                    b'data': None,
+                },
+            )
 
     def testconflictingrequestid(self):
         """Request ID for new command matching in-flight command is illegal."""
-        results = list(sendframes(makereactor(), [
-            ffs(b'1 1 stream-begin command-request new|more '
-                b"cbor:{b'name': b'command'}"),
-            ffs(b'1 1 0 command-request new '
-                b"cbor:{b'name': b'command1'}"),
-        ]))
+        results = list(
+            sendframes(
+                makereactor(),
+                [
+                    ffs(
+                        b'1 1 stream-begin command-request new|more '
+                        b"cbor:{b'name': b'command'}"
+                    ),
+                    ffs(
+                        b'1 1 0 command-request new '
+                        b"cbor:{b'name': b'command1'}"
+                    ),
+                ],
+            )
+        )
 
         self.assertaction(results[0], b'wantframe')
         self.assertaction(results[1], b'error')
-        self.assertEqual(results[1][1], {
-            b'message': b'request with ID 1 already received',
-        })
+        self.assertEqual(
+            results[1][1], {b'message': b'request with ID 1 already received',}
+        )
 
     def testinterleavedcommands(self):
-        cbor1 = b''.join(cborutil.streamencode({
-            b'name': b'command1',
-            b'args': {
-                b'foo': b'bar',
-                b'key1': b'val',
-            }
-        }))
-        cbor3 = b''.join(cborutil.streamencode({
-            b'name': b'command3',
-            b'args': {
-                b'biz': b'baz',
-                b'key': b'val',
-            },
-        }))
-
-        results = list(sendframes(makereactor(), [
-            ffs(b'1 1 stream-begin command-request new|more %s' % cbor1[0:6]),
-            ffs(b'3 1 0 command-request new|more %s' % cbor3[0:10]),
-            ffs(b'1 1 0 command-request continuation|more %s' % cbor1[6:9]),
-            ffs(b'3 1 0 command-request continuation|more %s' % cbor3[10:13]),
-            ffs(b'3 1 0 command-request continuation %s' % cbor3[13:]),
-            ffs(b'1 1 0 command-request continuation %s' % cbor1[9:]),
-        ]))
+        cbor1 = b''.join(
+            cborutil.streamencode(
+                {
+                    b'name': b'command1',
+                    b'args': {b'foo': b'bar', b'key1': b'val',},
+                }
+            )
+        )
+        cbor3 = b''.join(
+            cborutil.streamencode(
+                {
+                    b'name': b'command3',
+                    b'args': {b'biz': b'baz', b'key': b'val',},
+                }
+            )
+        )
 
-        self.assertEqual([t[0] for t in results], [
-            b'wantframe',
-            b'wantframe',
-            b'wantframe',
-            b'wantframe',
-            b'runcommand',
-            b'runcommand',
-        ])
+        results = list(
+            sendframes(
+                makereactor(),
+                [
+                    ffs(
+                        b'1 1 stream-begin command-request new|more %s'
+                        % cbor1[0:6]
+                    ),
+                    ffs(b'3 1 0 command-request new|more %s' % cbor3[0:10]),
+                    ffs(
+                        b'1 1 0 command-request continuation|more %s'
+                        % cbor1[6:9]
+                    ),
+                    ffs(
+                        b'3 1 0 command-request continuation|more %s'
+                        % cbor3[10:13]
+                    ),
+                    ffs(b'3 1 0 command-request continuation %s' % cbor3[13:]),
+                    ffs(b'1 1 0 command-request continuation %s' % cbor1[9:]),
+                ],
+            )
+        )
 
-        self.assertEqual(results[4][1], {
-            b'requestid': 3,
-            b'command': b'command3',
-            b'args': {b'biz': b'baz', b'key': b'val'},
-            b'redirect': None,
-            b'data': None,
-        })
-        self.assertEqual(results[5][1], {
-            b'requestid': 1,
-            b'command': b'command1',
-            b'args': {b'foo': b'bar', b'key1': b'val'},
-            b'redirect': None,
-            b'data': None,
-        })
+        self.assertEqual(
+            [t[0] for t in results],
+            [
+                b'wantframe',
+                b'wantframe',
+                b'wantframe',
+                b'wantframe',
+                b'runcommand',
+                b'runcommand',
+            ],
+        )
+
+        self.assertEqual(
+            results[4][1],
+            {
+                b'requestid': 3,
+                b'command': b'command3',
+                b'args': {b'biz': b'baz', b'key': b'val'},
+                b'redirect': None,
+                b'data': None,
+            },
+        )
+        self.assertEqual(
+            results[5][1],
+            {
+                b'requestid': 1,
+                b'command': b'command1',
+                b'args': {b'foo': b'bar', b'key1': b'val'},
+                b'redirect': None,
+                b'data': None,
+            },
+        )
 
     def testmissingcommanddataframe(self):
         # The reactor doesn't currently handle partially received commands.
         # So this test is failing to do anything with request 1.
         frames = [
-            ffs(b'1 1 stream-begin command-request new|have-data '
-                b"cbor:{b'name': b'command1'}"),
-            ffs(b'3 1 0 command-request new '
-                b"cbor:{b'name': b'command2'}"),
+            ffs(
+                b'1 1 stream-begin command-request new|have-data '
+                b"cbor:{b'name': b'command1'}"
+            ),
+            ffs(b'3 1 0 command-request new ' b"cbor:{b'name': b'command2'}"),
         ]
         results = list(sendframes(makereactor(), frames))
         self.assertEqual(len(results), 2)
@@ -331,31 +450,45 @@
 
     def testmissingcommanddataframeflags(self):
         frames = [
-            ffs(b'1 1 stream-begin command-request new|have-data '
-                b"cbor:{b'name': b'command1'}"),
+            ffs(
+                b'1 1 stream-begin command-request new|have-data '
+                b"cbor:{b'name': b'command1'}"
+            ),
             ffs(b'1 1 0 command-data 0 data'),
         ]
         results = list(sendframes(makereactor(), frames))
         self.assertEqual(len(results), 2)
         self.assertaction(results[0], b'wantframe')
         self.assertaction(results[1], b'error')
-        self.assertEqual(results[1][1], {
-            b'message': b'command data frame without flags',
-        })
+        self.assertEqual(
+            results[1][1], {b'message': b'command data frame without flags',}
+        )
 
     def testframefornonreceivingrequest(self):
         """Receiving a frame for a command that is not receiving is illegal."""
-        results = list(sendframes(makereactor(), [
-            ffs(b'1 1 stream-begin command-request new '
-                b"cbor:{b'name': b'command1'}"),
-            ffs(b'3 1 0 command-request new|have-data '
-                b"cbor:{b'name': b'command3'}"),
-            ffs(b'5 1 0 command-data eos ignored'),
-        ]))
+        results = list(
+            sendframes(
+                makereactor(),
+                [
+                    ffs(
+                        b'1 1 stream-begin command-request new '
+                        b"cbor:{b'name': b'command1'}"
+                    ),
+                    ffs(
+                        b'3 1 0 command-request new|have-data '
+                        b"cbor:{b'name': b'command3'}"
+                    ),
+                    ffs(b'5 1 0 command-data eos ignored'),
+                ],
+            )
+        )
         self.assertaction(results[2], b'error')
-        self.assertEqual(results[2][1], {
-            b'message': b'received frame for request that is not receiving: 5',
-        })
+        self.assertEqual(
+            results[2][1],
+            {
+                b'message': b'received frame for request that is not receiving: 5',
+            },
+        )
 
     def testsimpleresponse(self):
         """Bytes response to command sends result frames."""
@@ -365,14 +498,18 @@
 
         outstream = reactor.makeoutputstream()
         result = reactor.oncommandresponsereadyobjects(
-            outstream, 1, [b'response'])
+            outstream, 1, [b'response']
+        )
         self.assertaction(result, b'sendframes')
-        self.assertframesequal(result[1][b'framegen'], [
-            b'1 2 stream-begin stream-settings eos cbor:b"identity"',
-            b'1 2 encoded command-response continuation %s' % OK,
-            b'1 2 encoded command-response continuation cbor:b"response"',
-            b'1 2 0 command-response eos ',
-        ])
+        self.assertframesequal(
+            result[1][b'framegen'],
+            [
+                b'1 2 stream-begin stream-settings eos cbor:b"identity"',
+                b'1 2 encoded command-response continuation %s' % OK,
+                b'1 2 encoded command-response continuation cbor:b"response"',
+                b'1 2 0 command-response eos ',
+            ],
+        )
 
     def testmultiframeresponse(self):
         """Bytes response spanning multiple frames is handled."""
@@ -385,16 +522,20 @@
 
         outstream = reactor.makeoutputstream()
         result = reactor.oncommandresponsereadyobjects(
-            outstream, 1, [first + second])
+            outstream, 1, [first + second]
+        )
         self.assertaction(result, b'sendframes')
-        self.assertframesequal(result[1][b'framegen'], [
-            b'1 2 stream-begin stream-settings eos cbor:b"identity"',
-            b'1 2 encoded command-response continuation %s' % OK,
-            b'1 2 encoded command-response continuation Y\x80d',
-            b'1 2 encoded command-response continuation %s' % first,
-            b'1 2 encoded command-response continuation %s' % second,
-            b'1 2 0 command-response eos '
-        ])
+        self.assertframesequal(
+            result[1][b'framegen'],
+            [
+                b'1 2 stream-begin stream-settings eos cbor:b"identity"',
+                b'1 2 encoded command-response continuation %s' % OK,
+                b'1 2 encoded command-response continuation Y\x80d',
+                b'1 2 encoded command-response continuation %s' % first,
+                b'1 2 encoded command-response continuation %s' % second,
+                b'1 2 0 command-response eos ',
+            ],
+        )
 
     def testservererror(self):
         reactor = makereactor()
@@ -404,33 +545,41 @@
         outstream = reactor.makeoutputstream()
         result = reactor.onservererror(outstream, 1, b'some message')
         self.assertaction(result, b'sendframes')
-        self.assertframesequal(result[1][b'framegen'], [
-            b"1 2 stream-begin error-response 0 "
-            b"cbor:{b'type': b'server', "
-            b"b'message': [{b'msg': b'some message'}]}",
-        ])
+        self.assertframesequal(
+            result[1][b'framegen'],
+            [
+                b"1 2 stream-begin error-response 0 "
+                b"cbor:{b'type': b'server', "
+                b"b'message': [{b'msg': b'some message'}]}",
+            ],
+        )
 
     def test1commanddeferresponse(self):
         """Responses when in deferred output mode are delayed until EOF."""
         reactor = makereactor(deferoutput=True)
         instream = framing.stream(1)
-        results = list(sendcommandframes(reactor, instream, 1, b'mycommand',
-                                         {}))
+        results = list(
+            sendcommandframes(reactor, instream, 1, b'mycommand', {})
+        )
         self.assertEqual(len(results), 1)
         self.assertaction(results[0], b'runcommand')
 
         outstream = reactor.makeoutputstream()
         result = reactor.oncommandresponsereadyobjects(
-            outstream, 1, [b'response'])
+            outstream, 1, [b'response']
+        )
         self.assertaction(result, b'noop')
         result = reactor.oninputeof()
         self.assertaction(result, b'sendframes')
-        self.assertframesequal(result[1][b'framegen'], [
-            b'1 2 stream-begin stream-settings eos cbor:b"identity"',
-            b'1 2 encoded command-response continuation %s' % OK,
-            b'1 2 encoded command-response continuation cbor:b"response"',
-            b'1 2 0 command-response eos ',
-        ])
+        self.assertframesequal(
+            result[1][b'framegen'],
+            [
+                b'1 2 stream-begin stream-settings eos cbor:b"identity"',
+                b'1 2 encoded command-response continuation %s' % OK,
+                b'1 2 encoded command-response continuation cbor:b"response"',
+                b'1 2 0 command-response eos ',
+            ],
+        )
 
     def testmultiplecommanddeferresponse(self):
         reactor = makereactor(deferoutput=True)
@@ -440,22 +589,27 @@
 
         outstream = reactor.makeoutputstream()
         result = reactor.oncommandresponsereadyobjects(
-            outstream, 1, [b'response1'])
+            outstream, 1, [b'response1']
+        )
         self.assertaction(result, b'noop')
         result = reactor.oncommandresponsereadyobjects(
-            outstream, 3, [b'response2'])
+            outstream, 3, [b'response2']
+        )
         self.assertaction(result, b'noop')
         result = reactor.oninputeof()
         self.assertaction(result, b'sendframes')
-        self.assertframesequal(result[1][b'framegen'], [
-            b'1 2 stream-begin stream-settings eos cbor:b"identity"',
-            b'1 2 encoded command-response continuation %s' % OK,
-            b'1 2 encoded command-response continuation cbor:b"response1"',
-            b'1 2 0 command-response eos ',
-            b'3 2 encoded command-response continuation %s' % OK,
-            b'3 2 encoded command-response continuation cbor:b"response2"',
-            b'3 2 0 command-response eos ',
-        ])
+        self.assertframesequal(
+            result[1][b'framegen'],
+            [
+                b'1 2 stream-begin stream-settings eos cbor:b"identity"',
+                b'1 2 encoded command-response continuation %s' % OK,
+                b'1 2 encoded command-response continuation cbor:b"response1"',
+                b'1 2 0 command-response eos ',
+                b'3 2 encoded command-response continuation %s' % OK,
+                b'3 2 encoded command-response continuation cbor:b"response2"',
+                b'3 2 0 command-response eos ',
+            ],
+        )
 
     def testrequestidtracking(self):
         reactor = makereactor(deferoutput=True)
@@ -472,18 +626,21 @@
 
         result = reactor.oninputeof()
         self.assertaction(result, b'sendframes')
-        self.assertframesequal(result[1][b'framegen'], [
-            b'3 2 stream-begin stream-settings eos cbor:b"identity"',
-            b'3 2 encoded command-response continuation %s' % OK,
-            b'3 2 encoded command-response continuation cbor:b"response3"',
-            b'3 2 0 command-response eos ',
-            b'1 2 encoded command-response continuation %s' % OK,
-            b'1 2 encoded command-response continuation cbor:b"response1"',
-            b'1 2 0 command-response eos ',
-            b'5 2 encoded command-response continuation %s' % OK,
-            b'5 2 encoded command-response continuation cbor:b"response5"',
-            b'5 2 0 command-response eos ',
-        ])
+        self.assertframesequal(
+            result[1][b'framegen'],
+            [
+                b'3 2 stream-begin stream-settings eos cbor:b"identity"',
+                b'3 2 encoded command-response continuation %s' % OK,
+                b'3 2 encoded command-response continuation cbor:b"response3"',
+                b'3 2 0 command-response eos ',
+                b'1 2 encoded command-response continuation %s' % OK,
+                b'1 2 encoded command-response continuation cbor:b"response1"',
+                b'1 2 0 command-response eos ',
+                b'5 2 encoded command-response continuation %s' % OK,
+                b'5 2 encoded command-response continuation cbor:b"response5"',
+                b'5 2 0 command-response eos ',
+            ],
+        )
 
     def testduplicaterequestonactivecommand(self):
         """Receiving a request ID that matches a request that isn't finished."""
@@ -493,9 +650,9 @@
         results = list(sendcommandframes(reactor, stream, 1, b'command1', {}))
 
         self.assertaction(results[0], b'error')
-        self.assertEqual(results[0][1], {
-            b'message': b'request with ID 1 is already active',
-        })
+        self.assertEqual(
+            results[0][1], {b'message': b'request with ID 1 is already active',}
+        )
 
     def testduplicaterequestonactivecommandnosend(self):
         """Same as above but we've registered a response but haven't sent it."""
@@ -510,9 +667,9 @@
 
         results = list(sendcommandframes(reactor, instream, 1, b'command1', {}))
         self.assertaction(results[0], b'error')
-        self.assertEqual(results[0][1], {
-            b'message': b'request with ID 1 is already active',
-        })
+        self.assertEqual(
+            results[0][1], {b'message': b'request with ID 1 is already active',}
+        )
 
     def testduplicaterequestaftersend(self):
         """We can use a duplicate request ID after we've sent the response."""
@@ -528,70 +685,100 @@
 
     def testprotocolsettingsnoflags(self):
         result = self._sendsingleframe(
-            makereactor(),
-            ffs(b'0 1 stream-begin sender-protocol-settings 0 '))
+            makereactor(), ffs(b'0 1 stream-begin sender-protocol-settings 0 ')
+        )
         self.assertaction(result, b'error')
-        self.assertEqual(result[1], {
-            b'message': b'sender protocol settings frame must have '
-                        b'continuation or end of stream flag set',
-        })
+        self.assertEqual(
+            result[1],
+            {
+                b'message': b'sender protocol settings frame must have '
+                b'continuation or end of stream flag set',
+            },
+        )
 
     def testprotocolsettingsconflictflags(self):
         result = self._sendsingleframe(
             makereactor(),
-            ffs(b'0 1 stream-begin sender-protocol-settings continuation|eos '))
+            ffs(b'0 1 stream-begin sender-protocol-settings continuation|eos '),
+        )
         self.assertaction(result, b'error')
-        self.assertEqual(result[1], {
-            b'message': b'sender protocol settings frame cannot have both '
-                        b'continuation and end of stream flags set',
-        })
+        self.assertEqual(
+            result[1],
+            {
+                b'message': b'sender protocol settings frame cannot have both '
+                b'continuation and end of stream flags set',
+            },
+        )
 
     def testprotocolsettingsemptypayload(self):
         result = self._sendsingleframe(
             makereactor(),
-            ffs(b'0 1 stream-begin sender-protocol-settings eos '))
+            ffs(b'0 1 stream-begin sender-protocol-settings eos '),
+        )
         self.assertaction(result, b'error')
-        self.assertEqual(result[1], {
-            b'message': b'sender protocol settings frame did not contain CBOR '
-                        b'data',
-        })
+        self.assertEqual(
+            result[1],
+            {
+                b'message': b'sender protocol settings frame did not contain CBOR '
+                b'data',
+            },
+        )
 
     def testprotocolsettingsmultipleobjects(self):
         result = self._sendsingleframe(
             makereactor(),
-            ffs(b'0 1 stream-begin sender-protocol-settings eos '
-                b'\x46foobar\x43foo'))
+            ffs(
+                b'0 1 stream-begin sender-protocol-settings eos '
+                b'\x46foobar\x43foo'
+            ),
+        )
         self.assertaction(result, b'error')
-        self.assertEqual(result[1], {
-            b'message': b'sender protocol settings frame contained multiple '
-                        b'CBOR values',
-        })
+        self.assertEqual(
+            result[1],
+            {
+                b'message': b'sender protocol settings frame contained multiple '
+                b'CBOR values',
+            },
+        )
 
     def testprotocolsettingscontentencodings(self):
         reactor = makereactor()
 
         result = self._sendsingleframe(
             reactor,
-            ffs(b'0 1 stream-begin sender-protocol-settings eos '
-                b'cbor:{b"contentencodings": [b"a", b"b"]}'))
+            ffs(
+                b'0 1 stream-begin sender-protocol-settings eos '
+                b'cbor:{b"contentencodings": [b"a", b"b"]}'
+            ),
+        )
         self.assertaction(result, b'wantframe')
 
         self.assertEqual(reactor._state, b'idle')
-        self.assertEqual(reactor._sendersettings[b'contentencodings'],
-                         [b'a', b'b'])
+        self.assertEqual(
+            reactor._sendersettings[b'contentencodings'], [b'a', b'b']
+        )
 
     def testprotocolsettingsmultipleframes(self):
         reactor = makereactor()
 
-        data = b''.join(cborutil.streamencode({
-            b'contentencodings': [b'value1', b'value2'],
-        }))
+        data = b''.join(
+            cborutil.streamencode(
+                {b'contentencodings': [b'value1', b'value2'],}
+            )
+        )
 
-        results = list(sendframes(reactor, [
-            ffs(b'0 1 stream-begin sender-protocol-settings continuation %s' %
-                data[0:5]),
-            ffs(b'0 1 0 sender-protocol-settings eos %s' % data[5:]),
-        ]))
+        results = list(
+            sendframes(
+                reactor,
+                [
+                    ffs(
+                        b'0 1 stream-begin sender-protocol-settings continuation %s'
+                        % data[0:5]
+                    ),
+                    ffs(b'0 1 0 sender-protocol-settings eos %s' % data[5:]),
+                ],
+            )
+        )
 
         self.assertEqual(len(results), 2)
 
@@ -599,13 +786,15 @@
         self.assertaction(results[1], b'wantframe')
 
         self.assertEqual(reactor._state, b'idle')
-        self.assertEqual(reactor._sendersettings[b'contentencodings'],
-                         [b'value1', b'value2'])
+        self.assertEqual(
+            reactor._sendersettings[b'contentencodings'], [b'value1', b'value2']
+        )
 
     def testprotocolsettingsbadcbor(self):
         result = self._sendsingleframe(
             makereactor(),
-            ffs(b'0 1 stream-begin sender-protocol-settings eos badvalue'))
+            ffs(b'0 1 stream-begin sender-protocol-settings eos badvalue'),
+        )
         self.assertaction(result, b'error')
 
     def testprotocolsettingsnoninitial(self):
@@ -618,13 +807,15 @@
         self.assertaction(results[0], b'runcommand')
 
         result = self._sendsingleframe(
-            reactor,
-            ffs(b'0 1 0 sender-protocol-settings eos '))
+            reactor, ffs(b'0 1 0 sender-protocol-settings eos ')
+        )
         self.assertaction(result, b'error')
-        self.assertEqual(result[1], {
-            b'message': b'expected command request frame; got 8',
-        })
+        self.assertEqual(
+            result[1], {b'message': b'expected command request frame; got 8',}
+        )
+
 
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/test-wireproto.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-wireproto.py	Mon Oct 21 11:09:48 2019 -0400
@@ -11,11 +11,11 @@
     wireprotov1peer,
     wireprotov1server,
 )
-from mercurial.utils import (
-    stringutil,
-)
+from mercurial.utils import stringutil
+
 stringio = util.stringio
 
+
 class proto(object):
     def __init__(self, args):
         self.args = args
@@ -30,11 +30,13 @@
     def checkperm(self, perm):
         pass
 
+
 wireprototypes.TRANSPORTS['dummyproto'] = {
     'transport': 'dummy',
     'version': 1,
 }
 
+
 class clientpeer(wireprotov1peer.wirepeer):
     def __init__(self, serverrepo, ui):
         self.serverrepo = serverrepo
@@ -77,6 +79,7 @@
         yield {b'name': mangle(name)}, f
         yield unmangle(f.value)
 
+
 class serverrepo(object):
     def __init__(self, ui):
         self.ui = ui
@@ -87,29 +90,37 @@
     def filtered(self, name):
         return self
 
+
 def mangle(s):
     return b''.join(pycompat.bytechr(ord(c) + 1) for c in pycompat.bytestr(s))
+
+
 def unmangle(s):
     return b''.join(pycompat.bytechr(ord(c) - 1) for c in pycompat.bytestr(s))
 
+
 def greet(repo, proto, name):
     return mangle(repo.greet(unmangle(name)))
 
+
 wireprotov1server.commands[b'greet'] = (greet, b'name')
 
 srv = serverrepo(uimod.ui())
 clt = clientpeer(srv, uimod.ui())
 
+
 def printb(data, end=b'\n'):
     out = getattr(sys.stdout, 'buffer', sys.stdout)
     out.write(data + end)
     out.flush()
 
+
 printb(clt.greet(b"Foobar"))
 
 with clt.commandexecutor() as e:
     fgreet1 = e.callcommand(b'greet', {b'name': b'Fo, =;:<o'})
     fgreet2 = e.callcommand(b'greet', {b'name': b'Bar'})
 
-printb(stringutil.pprint([f.result() for f in (fgreet1, fgreet2)],
-                         bprefix=True))
+printb(
+    stringutil.pprint([f.result() for f in (fgreet1, fgreet2)], bprefix=True)
+)
--- a/tests/test-wsgirequest.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/test-wsgirequest.py	Mon Oct 21 11:09:48 2019 -0400
@@ -2,12 +2,8 @@
 
 import unittest
 
-from mercurial.hgweb import (
-    request as requestmod,
-)
-from mercurial import (
-    error,
-)
+from mercurial.hgweb import request as requestmod
+from mercurial import error
 
 DEFAULT_ENV = {
     r'REQUEST_METHOD': r'GET',
@@ -23,12 +19,15 @@
     r'wsgi.run_once': False,
 }
 
+
 def parse(env, reponame=None, altbaseurl=None, extra=None):
     env = dict(env)
     env.update(extra or {})
 
-    return requestmod.parserequestfromenv(env, reponame=reponame,
-                                          altbaseurl=altbaseurl)
+    return requestmod.parserequestfromenv(
+        env, reponame=reponame, altbaseurl=altbaseurl
+    )
+
 
 class ParseRequestTests(unittest.TestCase):
     def testdefault(self):
@@ -50,19 +49,17 @@
         self.assertEqual(len(r.headers), 0)
 
     def testcustomport(self):
-        r = parse(DEFAULT_ENV, extra={
-            r'SERVER_PORT': r'8000',
-        })
+        r = parse(DEFAULT_ENV, extra={r'SERVER_PORT': r'8000',})
 
         self.assertEqual(r.url, b'http://testserver:8000')
         self.assertEqual(r.baseurl, b'http://testserver:8000')
         self.assertEqual(r.advertisedurl, b'http://testserver:8000')
         self.assertEqual(r.advertisedbaseurl, b'http://testserver:8000')
 
-        r = parse(DEFAULT_ENV, extra={
-            r'SERVER_PORT': r'4000',
-            r'wsgi.url_scheme': r'https',
-        })
+        r = parse(
+            DEFAULT_ENV,
+            extra={r'SERVER_PORT': r'4000', r'wsgi.url_scheme': r'https',},
+        )
 
         self.assertEqual(r.url, b'https://testserver:4000')
         self.assertEqual(r.baseurl, b'https://testserver:4000')
@@ -70,9 +67,7 @@
         self.assertEqual(r.advertisedbaseurl, b'https://testserver:4000')
 
     def testhttphost(self):
-        r = parse(DEFAULT_ENV, extra={
-            r'HTTP_HOST': r'altserver',
-        })
+        r = parse(DEFAULT_ENV, extra={r'HTTP_HOST': r'altserver',})
 
         self.assertEqual(r.url, b'http://altserver')
         self.assertEqual(r.baseurl, b'http://altserver')
@@ -80,9 +75,7 @@
         self.assertEqual(r.advertisedbaseurl, b'http://testserver')
 
     def testscriptname(self):
-        r = parse(DEFAULT_ENV, extra={
-            r'SCRIPT_NAME': r'',
-        })
+        r = parse(DEFAULT_ENV, extra={r'SCRIPT_NAME': r'',})
 
         self.assertEqual(r.url, b'http://testserver')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -92,9 +85,7 @@
         self.assertEqual(r.dispatchparts, [])
         self.assertIsNone(r.dispatchpath)
 
-        r = parse(DEFAULT_ENV, extra={
-            r'SCRIPT_NAME': r'/script',
-        })
+        r = parse(DEFAULT_ENV, extra={r'SCRIPT_NAME': r'/script',})
 
         self.assertEqual(r.url, b'http://testserver/script')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -104,9 +95,7 @@
         self.assertEqual(r.dispatchparts, [])
         self.assertIsNone(r.dispatchpath)
 
-        r = parse(DEFAULT_ENV, extra={
-            r'SCRIPT_NAME': r'/multiple words',
-        })
+        r = parse(DEFAULT_ENV, extra={r'SCRIPT_NAME': r'/multiple words',})
 
         self.assertEqual(r.url, b'http://testserver/multiple%20words')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -117,9 +106,7 @@
         self.assertIsNone(r.dispatchpath)
 
     def testpathinfo(self):
-        r = parse(DEFAULT_ENV, extra={
-            r'PATH_INFO': r'',
-        })
+        r = parse(DEFAULT_ENV, extra={r'PATH_INFO': r'',})
 
         self.assertEqual(r.url, b'http://testserver')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -129,9 +116,7 @@
         self.assertEqual(r.dispatchparts, [])
         self.assertEqual(r.dispatchpath, b'')
 
-        r = parse(DEFAULT_ENV, extra={
-            r'PATH_INFO': r'/pathinfo',
-        })
+        r = parse(DEFAULT_ENV, extra={r'PATH_INFO': r'/pathinfo',})
 
         self.assertEqual(r.url, b'http://testserver/pathinfo')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -141,9 +126,7 @@
         self.assertEqual(r.dispatchparts, [b'pathinfo'])
         self.assertEqual(r.dispatchpath, b'pathinfo')
 
-        r = parse(DEFAULT_ENV, extra={
-            r'PATH_INFO': r'/one/two/',
-        })
+        r = parse(DEFAULT_ENV, extra={r'PATH_INFO': r'/one/two/',})
 
         self.assertEqual(r.url, b'http://testserver/one/two/')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -154,10 +137,10 @@
         self.assertEqual(r.dispatchpath, b'one/two')
 
     def testscriptandpathinfo(self):
-        r = parse(DEFAULT_ENV, extra={
-            r'SCRIPT_NAME': r'/script',
-            r'PATH_INFO': r'/pathinfo',
-        })
+        r = parse(
+            DEFAULT_ENV,
+            extra={r'SCRIPT_NAME': r'/script', r'PATH_INFO': r'/pathinfo',},
+        )
 
         self.assertEqual(r.url, b'http://testserver/script/pathinfo')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -167,26 +150,34 @@
         self.assertEqual(r.dispatchparts, [b'pathinfo'])
         self.assertEqual(r.dispatchpath, b'pathinfo')
 
-        r = parse(DEFAULT_ENV, extra={
-            r'SCRIPT_NAME': r'/script1/script2',
-            r'PATH_INFO': r'/path1/path2',
-        })
+        r = parse(
+            DEFAULT_ENV,
+            extra={
+                r'SCRIPT_NAME': r'/script1/script2',
+                r'PATH_INFO': r'/path1/path2',
+            },
+        )
 
-        self.assertEqual(r.url,
-                         b'http://testserver/script1/script2/path1/path2')
+        self.assertEqual(
+            r.url, b'http://testserver/script1/script2/path1/path2'
+        )
         self.assertEqual(r.baseurl, b'http://testserver')
-        self.assertEqual(r.advertisedurl,
-                         b'http://testserver/script1/script2/path1/path2')
+        self.assertEqual(
+            r.advertisedurl, b'http://testserver/script1/script2/path1/path2'
+        )
         self.assertEqual(r.advertisedbaseurl, b'http://testserver')
         self.assertEqual(r.apppath, b'/script1/script2')
         self.assertEqual(r.dispatchparts, [b'path1', b'path2'])
         self.assertEqual(r.dispatchpath, b'path1/path2')
 
-        r = parse(DEFAULT_ENV, extra={
-            r'HTTP_HOST': r'hostserver',
-            r'SCRIPT_NAME': r'/script',
-            r'PATH_INFO': r'/pathinfo',
-        })
+        r = parse(
+            DEFAULT_ENV,
+            extra={
+                r'HTTP_HOST': r'hostserver',
+                r'SCRIPT_NAME': r'/script',
+                r'PATH_INFO': r'/pathinfo',
+            },
+        )
 
         self.assertEqual(r.url, b'http://hostserver/script/pathinfo')
         self.assertEqual(r.baseurl, b'http://hostserver')
@@ -199,32 +190,41 @@
     if not getattr(unittest.TestCase, 'assertRaisesRegex', False):
         # Python 3.7 deprecates the regex*p* version, but 2.7 lacks
         # the regex version.
-        assertRaisesRegex = (# camelcase-required
-            unittest.TestCase.assertRaisesRegexp)
+        assertRaisesRegex = (  # camelcase-required
+            unittest.TestCase.assertRaisesRegexp
+        )
 
     def testreponame(self):
         """repository path components get stripped from URL."""
 
-        with self.assertRaisesRegex(error.ProgrammingError,
-                                    'reponame requires PATH_INFO'):
+        with self.assertRaisesRegex(
+            error.ProgrammingError, 'reponame requires PATH_INFO'
+        ):
             parse(DEFAULT_ENV, reponame=b'repo')
 
-        with self.assertRaisesRegex(error.ProgrammingError,
-                                    'PATH_INFO does not begin with repo '
-                                    'name'):
-            parse(DEFAULT_ENV, reponame=b'repo', extra={
-                r'PATH_INFO': r'/pathinfo',
-            })
+        with self.assertRaisesRegex(
+            error.ProgrammingError, 'PATH_INFO does not begin with repo ' 'name'
+        ):
+            parse(
+                DEFAULT_ENV,
+                reponame=b'repo',
+                extra={r'PATH_INFO': r'/pathinfo',},
+            )
 
-        with self.assertRaisesRegex(error.ProgrammingError,
-                                    'reponame prefix of PATH_INFO'):
-            parse(DEFAULT_ENV, reponame=b'repo', extra={
-                r'PATH_INFO': r'/repoextra/path',
-            })
+        with self.assertRaisesRegex(
+            error.ProgrammingError, 'reponame prefix of PATH_INFO'
+        ):
+            parse(
+                DEFAULT_ENV,
+                reponame=b'repo',
+                extra={r'PATH_INFO': r'/repoextra/path',},
+            )
 
-        r = parse(DEFAULT_ENV, reponame=b'repo', extra={
-            r'PATH_INFO': r'/repo/path1/path2',
-        })
+        r = parse(
+            DEFAULT_ENV,
+            reponame=b'repo',
+            extra={r'PATH_INFO': r'/repo/path1/path2',},
+        )
 
         self.assertEqual(r.url, b'http://testserver/repo/path1/path2')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -235,14 +235,17 @@
         self.assertEqual(r.dispatchpath, b'path1/path2')
         self.assertEqual(r.reponame, b'repo')
 
-        r = parse(DEFAULT_ENV, reponame=b'prefix/repo', extra={
-            r'PATH_INFO': r'/prefix/repo/path1/path2',
-        })
+        r = parse(
+            DEFAULT_ENV,
+            reponame=b'prefix/repo',
+            extra={r'PATH_INFO': r'/prefix/repo/path1/path2',},
+        )
 
         self.assertEqual(r.url, b'http://testserver/prefix/repo/path1/path2')
         self.assertEqual(r.baseurl, b'http://testserver')
-        self.assertEqual(r.advertisedurl,
-                         b'http://testserver/prefix/repo/path1/path2')
+        self.assertEqual(
+            r.advertisedurl, b'http://testserver/prefix/repo/path1/path2'
+        )
         self.assertEqual(r.advertisedbaseurl, b'http://testserver')
         self.assertEqual(r.apppath, b'/prefix/repo')
         self.assertEqual(r.dispatchparts, [b'path1', b'path2'])
@@ -301,9 +304,11 @@
         self.assertIsNone(r.reponame)
 
         # With only PATH_INFO defined.
-        r = parse(DEFAULT_ENV, altbaseurl=b'http://altserver', extra={
-            r'PATH_INFO': r'/path1/path2',
-        })
+        r = parse(
+            DEFAULT_ENV,
+            altbaseurl=b'http://altserver',
+            extra={r'PATH_INFO': r'/path1/path2',},
+        )
         self.assertEqual(r.url, b'http://testserver/path1/path2')
         self.assertEqual(r.baseurl, b'http://testserver')
         self.assertEqual(r.advertisedurl, b'http://altserver/path1/path2')
@@ -339,13 +344,16 @@
         self.assertIsNone(r.reponame)
 
         # PATH_INFO + path on alt URL.
-        r = parse(DEFAULT_ENV, altbaseurl=b'http://altserver/altpath', extra={
-            r'PATH_INFO': r'/path1/path2',
-        })
+        r = parse(
+            DEFAULT_ENV,
+            altbaseurl=b'http://altserver/altpath',
+            extra={r'PATH_INFO': r'/path1/path2',},
+        )
         self.assertEqual(r.url, b'http://testserver/path1/path2')
         self.assertEqual(r.baseurl, b'http://testserver')
-        self.assertEqual(r.advertisedurl,
-                         b'http://altserver/altpath/path1/path2')
+        self.assertEqual(
+            r.advertisedurl, b'http://altserver/altpath/path1/path2'
+        )
         self.assertEqual(r.advertisedbaseurl, b'http://altserver')
         self.assertEqual(r.urlscheme, b'http')
         self.assertEqual(r.apppath, b'/altpath')
@@ -354,13 +362,16 @@
         self.assertIsNone(r.reponame)
 
         # PATH_INFO + path on alt URL with trailing slash.
-        r = parse(DEFAULT_ENV, altbaseurl=b'http://altserver/altpath/', extra={
-            r'PATH_INFO': r'/path1/path2',
-        })
+        r = parse(
+            DEFAULT_ENV,
+            altbaseurl=b'http://altserver/altpath/',
+            extra={r'PATH_INFO': r'/path1/path2',},
+        )
         self.assertEqual(r.url, b'http://testserver/path1/path2')
         self.assertEqual(r.baseurl, b'http://testserver')
-        self.assertEqual(r.advertisedurl,
-                         b'http://altserver/altpath//path1/path2')
+        self.assertEqual(
+            r.advertisedurl, b'http://altserver/altpath//path1/path2'
+        )
         self.assertEqual(r.advertisedbaseurl, b'http://altserver')
         self.assertEqual(r.urlscheme, b'http')
         self.assertEqual(r.apppath, b'/altpath/')
@@ -369,10 +380,11 @@
         self.assertIsNone(r.reponame)
 
         # Local SCRIPT_NAME is ignored.
-        r = parse(DEFAULT_ENV, altbaseurl=b'http://altserver', extra={
-            r'SCRIPT_NAME': r'/script',
-            r'PATH_INFO': r'/path1/path2',
-        })
+        r = parse(
+            DEFAULT_ENV,
+            altbaseurl=b'http://altserver',
+            extra={r'SCRIPT_NAME': r'/script', r'PATH_INFO': r'/path1/path2',},
+        )
         self.assertEqual(r.url, b'http://testserver/script/path1/path2')
         self.assertEqual(r.baseurl, b'http://testserver')
         self.assertEqual(r.advertisedurl, b'http://altserver/path1/path2')
@@ -384,14 +396,16 @@
         self.assertIsNone(r.reponame)
 
         # Use remote's path for script name, app path
-        r = parse(DEFAULT_ENV, altbaseurl=b'http://altserver/altroot', extra={
-            r'SCRIPT_NAME': r'/script',
-            r'PATH_INFO': r'/path1/path2',
-        })
+        r = parse(
+            DEFAULT_ENV,
+            altbaseurl=b'http://altserver/altroot',
+            extra={r'SCRIPT_NAME': r'/script', r'PATH_INFO': r'/path1/path2',},
+        )
         self.assertEqual(r.url, b'http://testserver/script/path1/path2')
         self.assertEqual(r.baseurl, b'http://testserver')
-        self.assertEqual(r.advertisedurl,
-                         b'http://altserver/altroot/path1/path2')
+        self.assertEqual(
+            r.advertisedurl, b'http://altserver/altroot/path1/path2'
+        )
         self.assertEqual(r.advertisedbaseurl, b'http://altserver')
         self.assertEqual(r.urlscheme, b'http')
         self.assertEqual(r.apppath, b'/altroot')
@@ -400,23 +414,29 @@
         self.assertIsNone(r.reponame)
 
         # reponame is factored in properly.
-        r = parse(DEFAULT_ENV, reponame=b'repo',
-                  altbaseurl=b'http://altserver/altroot',
-                  extra={
+        r = parse(
+            DEFAULT_ENV,
+            reponame=b'repo',
+            altbaseurl=b'http://altserver/altroot',
+            extra={
                 r'SCRIPT_NAME': r'/script',
                 r'PATH_INFO': r'/repo/path1/path2',
-            })
+            },
+        )
 
         self.assertEqual(r.url, b'http://testserver/script/repo/path1/path2')
         self.assertEqual(r.baseurl, b'http://testserver')
-        self.assertEqual(r.advertisedurl,
-                         b'http://altserver/altroot/repo/path1/path2')
+        self.assertEqual(
+            r.advertisedurl, b'http://altserver/altroot/repo/path1/path2'
+        )
         self.assertEqual(r.advertisedbaseurl, b'http://altserver')
         self.assertEqual(r.apppath, b'/altroot/repo')
         self.assertEqual(r.dispatchparts, [b'path1', b'path2'])
         self.assertEqual(r.dispatchpath, b'path1/path2')
         self.assertEqual(r.reponame, b'repo')
 
+
 if __name__ == '__main__':
     import silenttestrunner
+
     silenttestrunner.main(__name__)
--- a/tests/testlib/ext-phase-report.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/testlib/ext-phase-report.py	Mon Oct 21 11:09:48 2019 -0400
@@ -2,16 +2,24 @@
 
 from __future__ import absolute_import
 
+
 def reposetup(ui, repo):
-
     def reportphasemove(tr):
         for rev, move in sorted(tr.changes[b'phases'].items()):
             if move[0] is None:
-                ui.write((b'test-debug-phase: new rev %d:  x -> %d\n'
-                          % (rev, move[1])))
+                ui.write(
+                    (
+                        b'test-debug-phase: new rev %d:  x -> %d\n'
+                        % (rev, move[1])
+                    )
+                )
             else:
-                ui.write((b'test-debug-phase: move rev %d: %d -> %d\n'
-                          % (rev, move[0], move[1])))
+                ui.write(
+                    (
+                        b'test-debug-phase: move rev %d: %d -> %d\n'
+                        % (rev, move[0], move[1])
+                    )
+                )
 
     class reportphaserepo(repo.__class__):
         def transaction(self, *args, **kwargs):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/testlib/ext-sidedata.py	Mon Oct 21 11:09:48 2019 -0400
@@ -0,0 +1,83 @@
+# ext-sidedata.py - small extension to test the sidedata logic
+#
+# Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net)
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import hashlib
+import struct
+
+from mercurial import (
+    extensions,
+    localrepo,
+    node,
+    revlog,
+    upgrade,
+)
+
+from mercurial.revlogutils import sidedata
+
+
+def wrapaddrevision(
+    orig, self, text, transaction, link, p1, p2, *args, **kwargs
+):
+    if kwargs.get('sidedata') is None:
+        kwargs['sidedata'] = {}
+    sd = kwargs['sidedata']
+    ## let's store some arbitrary data just for testing
+    # text length
+    sd[sidedata.SD_TEST1] = struct.pack('>I', len(text))
+    # and sha2 hashes
+    sha256 = hashlib.sha256(text).digest()
+    sd[sidedata.SD_TEST2] = struct.pack('>32s', sha256)
+    return orig(self, text, transaction, link, p1, p2, *args, **kwargs)
+
+
+def wraprevision(orig, self, nodeorrev, *args, **kwargs):
+    text = orig(self, nodeorrev, *args, **kwargs)
+    if getattr(self, 'sidedatanocheck', False):
+        return text
+    if nodeorrev != node.nullrev and nodeorrev != node.nullid:
+        sd = self.sidedata(nodeorrev)
+        if len(text) != struct.unpack('>I', sd[sidedata.SD_TEST1])[0]:
+            raise RuntimeError('text size mismatch')
+        expected = sd[sidedata.SD_TEST2]
+        got = hashlib.sha256(text).digest()
+        if got != expected:
+            raise RuntimeError('sha256 mismatch')
+    return text
+
+
+def wrapgetsidedatacompanion(orig, srcrepo, dstrepo):
+    sidedatacompanion = orig(srcrepo, dstrepo)
+    addedreqs = dstrepo.requirements - srcrepo.requirements
+    if localrepo.SIDEDATA_REQUIREMENT in addedreqs:
+        assert sidedatacompanion is None  # deal with composition later
+
+        def sidedatacompanion(revlog, rev):
+            update = {}
+            revlog.sidedatanocheck = True
+            try:
+                text = revlog.revision(rev)
+            finally:
+                del revlog.sidedatanocheck
+            ## let's store some arbitrary data just for testing
+            # text length
+            update[sidedata.SD_TEST1] = struct.pack('>I', len(text))
+            # and sha2 hashes
+            sha256 = hashlib.sha256(text).digest()
+            update[sidedata.SD_TEST2] = struct.pack('>32s', sha256)
+            return False, (), update
+
+    return sidedatacompanion
+
+
+def extsetup(ui):
+    extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision)
+    extensions.wrapfunction(revlog.revlog, 'revision', wraprevision)
+    extensions.wrapfunction(
+        upgrade, 'getsidedatacompanion', wrapgetsidedatacompanion
+    )
--- a/tests/testlib/obsmarker-common.sh	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/testlib/obsmarker-common.sh	Mon Oct 21 11:09:48 2019 -0400
@@ -1,7 +1,9 @@
 mkcommit() {
-   echo "$1" > "$1"
-   hg add "$1"
-   hg ci -m "$1"
+   name="$1"
+   shift
+   echo "$name" > "$name"
+   hg add "$name"
+   hg ci -m "$name" "$@"
 }
 
 getid() {
--- a/tests/tinyproxy.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/tinyproxy.py	Mon Oct 21 11:09:48 2019 -0400
@@ -34,12 +34,13 @@
 else:
     family = socket.AF_INET
 
-class ProxyHandler (httpserver.basehttprequesthandler):
+
+class ProxyHandler(httpserver.basehttprequesthandler):
     __base = httpserver.basehttprequesthandler
     __base_handle = __base.handle
 
     server_version = "TinyHTTPProxy/" + __version__
-    rbufsize = 0                        # self.rfile Be unbuffered
+    rbufsize = 0  # self.rfile Be unbuffered
 
     def handle(self):
         (ip, port) = self.client_address
@@ -53,9 +54,13 @@
 
     def log_request(self, code='-', size='-'):
         xheaders = [h for h in self.headers.items() if h[0].startswith('x-')]
-        self.log_message('"%s" %s %s%s',
-                         self.requestline, str(code), str(size),
-                         ''.join([' %s:%s' % h for h in sorted(xheaders)]))
+        self.log_message(
+            '"%s" %s %s%s',
+            self.requestline,
+            str(code),
+            str(size),
+            ''.join([' %s:%s' % h for h in sorted(xheaders)]),
+        )
         # Flush for Windows, so output isn't lost on TerminateProcess()
         sys.stdout.flush()
         sys.stderr.flush()
@@ -63,14 +68,17 @@
     def _connect_to(self, netloc, soc):
         i = netloc.find(':')
         if i >= 0:
-            host_port = netloc[:i], int(netloc[i + 1:])
+            host_port = netloc[:i], int(netloc[i + 1 :])
         else:
             host_port = netloc, 80
         print("\t" "connect to %s:%d" % host_port)
-        try: soc.connect(host_port)
+        try:
+            soc.connect(host_port)
         except socket.error as arg:
-            try: msg = arg[1]
-            except (IndexError, TypeError): msg = arg
+            try:
+                msg = arg[1]
+            except (IndexError, TypeError):
+                msg = arg
             self.send_error(404, msg)
             return 0
         return 1
@@ -80,10 +88,14 @@
         try:
             if self._connect_to(self.path, soc):
                 self.log_request(200)
-                self.wfile.write(pycompat.bytestr(self.protocol_version) +
-                                 b" 200 Connection established\r\n")
-                self.wfile.write(b"Proxy-agent: %s\r\n" %
-                                 pycompat.bytestr(self.version_string()))
+                self.wfile.write(
+                    pycompat.bytestr(self.protocol_version)
+                    + b" 200 Connection established\r\n"
+                )
+                self.wfile.write(
+                    b"Proxy-agent: %s\r\n"
+                    % pycompat.bytestr(self.version_string())
+                )
                 self.wfile.write(b"\r\n")
                 self._read_write(soc, 300)
         finally:
@@ -93,7 +105,8 @@
 
     def do_GET(self):
         (scm, netloc, path, params, query, fragment) = urlreq.urlparse(
-            self.path, 'http')
+            self.path, 'http'
+        )
         if scm != 'http' or fragment or not netloc:
             self.send_error(400, "bad url %s" % self.path)
             return
@@ -102,15 +115,21 @@
             if self._connect_to(netloc, soc):
                 self.log_request()
                 url = urlreq.urlunparse(('', '', path, params, query, ''))
-                soc.send(b"%s %s %s\r\n" % (
-                    pycompat.bytestr(self.command),
-                    pycompat.bytestr(url),
-                    pycompat.bytestr(self.request_version)))
+                soc.send(
+                    b"%s %s %s\r\n"
+                    % (
+                        pycompat.bytestr(self.command),
+                        pycompat.bytestr(url),
+                        pycompat.bytestr(self.request_version),
+                    )
+                )
                 self.headers['Connection'] = 'close'
                 del self.headers['Proxy-Connection']
                 for key, val in self.headers.items():
-                    soc.send(b"%s: %s\r\n" % (pycompat.bytestr(key),
-                                              pycompat.bytestr(val)))
+                    soc.send(
+                        b"%s: %s\r\n"
+                        % (pycompat.bytestr(key), pycompat.bytestr(val))
+                    )
                 soc.send(b"\r\n")
                 self._read_write(soc)
         finally:
@@ -147,17 +166,18 @@
 
     do_HEAD = do_GET
     do_POST = do_GET
-    do_PUT  = do_GET
+    do_PUT = do_GET
     do_DELETE = do_GET
 
-class ThreadingHTTPServer (socketserver.ThreadingMixIn,
-                           httpserver.httpserver):
+
+class ThreadingHTTPServer(socketserver.ThreadingMixIn, httpserver.httpserver):
     def __init__(self, *args, **kwargs):
         httpserver.httpserver.__init__(self, *args, **kwargs)
         a = open("proxy.pid", "w")
         a.write(str(os.getpid()) + "\n")
         a.close()
 
+
 def runserver(port=8000, bind=""):
     server_address = (bind, port)
     ProxyHandler.protocol_version = "HTTP/1.0"
@@ -171,6 +191,7 @@
         httpd.server_close()
         sys.exit(0)
 
+
 if __name__ == '__main__':
     argv = sys.argv
     if argv[1:] and argv[1] in ('-h', '--help'):
@@ -188,9 +209,13 @@
             print("Any clients will be served...")
 
         parser = optparse.OptionParser()
-        parser.add_option('-b', '--bind', metavar='ADDRESS',
-                          help='Specify alternate bind address '
-                               '[default: all interfaces]', default='')
+        parser.add_option(
+            '-b',
+            '--bind',
+            metavar='ADDRESS',
+            help='Specify alternate bind address ' '[default: all interfaces]',
+            default='',
+        )
         (options, args) = parser.parse_args()
         port = 8000
         if len(args) == 1:
--- a/tests/wireprotosimplecache.py	Wed Oct 02 12:20:36 2019 -0400
+++ b/tests/wireprotosimplecache.py	Mon Oct 21 11:09:48 2019 -0400
@@ -10,28 +10,25 @@
 from mercurial import (
     extensions,
     registrar,
-    repository,
     util,
     wireprotoserver,
     wireprototypes,
     wireprotov2server,
 )
-from mercurial.utils import (
-    interfaceutil,
-    stringutil,
+from mercurial.interfaces import (
+    repository,
+    util as interfaceutil,
 )
+from mercurial.utils import stringutil
 
 CACHE = None
 
 configtable = {}
 configitem = registrar.configitem(configtable)
 
-configitem(b'simplecache', b'cacheapi',
-           default=False)
-configitem(b'simplecache', b'cacheobjects',
-           default=False)
-configitem(b'simplecache', b'redirectsfile',
-           default=None)
+configitem(b'simplecache', b'cacheapi', default=False)
+configitem(b'simplecache', b'cacheobjects', default=False)
+configitem(b'simplecache', b'redirectsfile', default=None)
 
 # API handler that makes cached keys available.
 def handlecacherequest(rctx, req, res, checkperm, urlparts):
@@ -58,19 +55,23 @@
     res.headers[b'Content-Type'] = b'application/mercurial-cbor'
     res.setbodybytes(CACHE[key])
 
+
 def cachedescriptor(req, repo):
     return {}
 
+
 wireprotoserver.API_HANDLERS[b'simplecache'] = {
     b'config': (b'simplecache', b'cacheapi'),
     b'handler': handlecacherequest,
     b'apidescriptor': cachedescriptor,
 }
 
+
 @interfaceutil.implementer(repository.iwireprotocolcommandcacher)
 class memorycacher(object):
-    def __init__(self, ui, command, encodefn, redirecttargets, redirecthashes,
-                 req):
+    def __init__(
+        self, ui, command, encodefn, redirecttargets, redirecthashes, req
+    ):
         self.ui = ui
         self.encodefn = encodefn
         self.redirecttargets = redirecttargets
@@ -129,12 +130,16 @@
 
             url = b'%s/%s' % (self.req.baseurl, b'/'.join(paths))
 
-            #url = b'http://example.com/%s' % self.key
-            self.ui.log(b'simplecache', b'sending content redirect for %s to '
-                                        b'%s\n', self.key, url)
+            # url = b'http://example.com/%s' % self.key
+            self.ui.log(
+                b'simplecache',
+                b'sending content redirect for %s to ' b'%s\n',
+                self.key,
+                url,
+            )
             response = wireprototypes.alternatelocationresponse(
-                url=url,
-                mediatype=b'application/mercurial-cbor')
+                url=url, mediatype=b'application/mercurial-cbor'
+            )
 
             return {b'objs': [response]}
 
@@ -164,10 +169,26 @@
 
         return []
 
-def makeresponsecacher(orig, repo, proto, command, args, objencoderfn,
-                       redirecttargets, redirecthashes):
-    return memorycacher(repo.ui, command, objencoderfn, redirecttargets,
-                        redirecthashes, proto._req)
+
+def makeresponsecacher(
+    orig,
+    repo,
+    proto,
+    command,
+    args,
+    objencoderfn,
+    redirecttargets,
+    redirecthashes,
+):
+    return memorycacher(
+        repo.ui,
+        command,
+        objencoderfn,
+        redirecttargets,
+        redirecthashes,
+        proto._req,
+    )
+
 
 def loadredirecttargets(ui):
     path = ui.config(b'simplecache', b'redirectsfile')
@@ -179,15 +200,21 @@
 
     return stringutil.evalpythonliteral(s)
 
+
 def getadvertisedredirecttargets(orig, repo, proto):
     return loadredirecttargets(repo.ui)
 
+
 def extsetup(ui):
     global CACHE
 
     CACHE = util.lrucachedict(10000)
 
-    extensions.wrapfunction(wireprotov2server, b'makeresponsecacher',
-                            makeresponsecacher)
-    extensions.wrapfunction(wireprotov2server, b'getadvertisedredirecttargets',
-                            getadvertisedredirecttargets)
+    extensions.wrapfunction(
+        wireprotov2server, b'makeresponsecacher', makeresponsecacher
+    )
+    extensions.wrapfunction(
+        wireprotov2server,
+        b'getadvertisedredirecttargets',
+        getadvertisedredirecttargets,
+    )